ad502c7dbe8621970d1a17caa5390172ec340902
[reactos.git] / reactos / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 */
9
10 /* NOTES **********************************************************************
11 *
12 * This is not the NT implementation of a file cache nor anything much like
13 * it.
14 *
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
17 *
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
20 *
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
23 *
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
27 *
28 * (4) Copy the data into or out of the page as necessary.
29 *
30 * (5) Release the cache page
31 */
32 /* INCLUDES ******************************************************************/
33
34 #include <ntoskrnl.h>
35 #define NDEBUG
36 #include <debug.h>
37
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
40 #endif
41
42 /* GLOBALS *******************************************************************/
43
44 /*
45 * If CACHE_BITMAP is defined, the cache manager uses one large memory region
46 * within the kernel address space and allocate/deallocate space from this block
47 * over a bitmap. If CACHE_BITMAP is used, the size of the mdl mapping region
48 * must be reduced (ntoskrnl\mm\mdl.c, MI_MDLMAPPING_REGION_SIZE).
49 */
50 //#define CACHE_BITMAP
51
52 static LIST_ENTRY DirtySegmentListHead;
53 static LIST_ENTRY CacheSegmentListHead;
54 static LIST_ENTRY CacheSegmentLRUListHead;
55 static LIST_ENTRY ClosedListHead;
56 ULONG DirtyPageCount=0;
57
58 KGUARDED_MUTEX ViewLock;
59
60 #ifdef CACHE_BITMAP
61 #define CI_CACHESEG_MAPPING_REGION_SIZE (128*1024*1024)
62
63 static PVOID CiCacheSegMappingRegionBase = NULL;
64 static RTL_BITMAP CiCacheSegMappingRegionAllocMap;
65 static ULONG CiCacheSegMappingRegionHint;
66 static KSPIN_LOCK CiCacheSegMappingRegionLock;
67 #endif
68
69 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
70 static NPAGED_LOOKASIDE_LIST BcbLookasideList;
71 static NPAGED_LOOKASIDE_LIST CacheSegLookasideList;
72
73
74 #if defined(__GNUC__)
75 /* void * alloca(size_t size); */
76 #elif defined(_MSC_VER)
77 void* _alloca(size_t size);
78 #else
79 #error Unknown compiler for alloca intrinsic stack allocation "function"
80 #endif
81
82 #if defined(DBG) || defined(KDBG)
83 static void CcRosCacheSegmentIncRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
84 {
85 ++cs->ReferenceCount;
86 if ( cs->Bcb->Trace )
87 {
88 DbgPrint("(%s:%i) CacheSegment %p ++RefCount=%d, Dirty %d, PageOut %d\n",
89 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
90 }
91 }
92 static void CcRosCacheSegmentDecRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
93 {
94 --cs->ReferenceCount;
95 if ( cs->Bcb->Trace )
96 {
97 DbgPrint("(%s:%i) CacheSegment %p --RefCount=%d, Dirty %d, PageOut %d\n",
98 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
99 }
100 }
101 #define CcRosCacheSegmentIncRefCount(cs) CcRosCacheSegmentIncRefCount_(cs,__FILE__,__LINE__)
102 #define CcRosCacheSegmentDecRefCount(cs) CcRosCacheSegmentDecRefCount_(cs,__FILE__,__LINE__)
103 #else
104 #define CcRosCacheSegmentIncRefCount(cs) (++((cs)->ReferenceCount))
105 #define CcRosCacheSegmentDecRefCount(cs) (--((cs)->ReferenceCount))
106 #endif
107
108 NTSTATUS
109 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg);
110
111
112 /* FUNCTIONS *****************************************************************/
113
114 VOID
115 NTAPI
116 CcRosTraceCacheMap (
117 PBCB Bcb,
118 BOOLEAN Trace )
119 {
120 #if defined(DBG) || defined(KDBG)
121 KIRQL oldirql;
122 PLIST_ENTRY current_entry;
123 PCACHE_SEGMENT current;
124
125 if ( !Bcb )
126 return;
127
128 Bcb->Trace = Trace;
129
130 if ( Trace )
131 {
132 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", Bcb );
133
134 KeAcquireGuardedMutex(&ViewLock);
135 KeAcquireSpinLock(&Bcb->BcbLock, &oldirql);
136
137 current_entry = Bcb->BcbSegmentListHead.Flink;
138 while (current_entry != &Bcb->BcbSegmentListHead)
139 {
140 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
141 current_entry = current_entry->Flink;
142
143 DPRINT1(" CacheSegment 0x%p enabled, RefCount %d, Dirty %d, PageOut %d\n",
144 current, current->ReferenceCount, current->Dirty, current->PageOut );
145 }
146 KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
147 KeReleaseGuardedMutex(&ViewLock);
148 }
149 else
150 {
151 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", Bcb );
152 }
153
154 #else
155 Bcb = Bcb;
156 Trace = Trace;
157 #endif
158 }
159
160 NTSTATUS
161 NTAPI
162 CcRosFlushCacheSegment(PCACHE_SEGMENT CacheSegment)
163 {
164 NTSTATUS Status;
165 KIRQL oldIrql;
166
167 Status = WriteCacheSegment(CacheSegment);
168 if (NT_SUCCESS(Status))
169 {
170 KeAcquireGuardedMutex(&ViewLock);
171 KeAcquireSpinLock(&CacheSegment->Bcb->BcbLock, &oldIrql);
172
173 CacheSegment->Dirty = FALSE;
174 RemoveEntryList(&CacheSegment->DirtySegmentListEntry);
175 DirtyPageCount -= CacheSegment->Bcb->CacheSegmentSize / PAGE_SIZE;
176 CcRosCacheSegmentDecRefCount ( CacheSegment );
177
178 KeReleaseSpinLock(&CacheSegment->Bcb->BcbLock, oldIrql);
179 KeReleaseGuardedMutex(&ViewLock);
180 }
181
182 return(Status);
183 }
184
185 NTSTATUS
186 NTAPI
187 CcRosFlushDirtyPages(ULONG Target, PULONG Count)
188 {
189 PLIST_ENTRY current_entry;
190 PCACHE_SEGMENT current;
191 ULONG PagesPerSegment;
192 BOOLEAN Locked;
193 NTSTATUS Status;
194 static ULONG WriteCount[4] = {0, 0, 0, 0};
195 ULONG NewTarget;
196
197 DPRINT("CcRosFlushDirtyPages(Target %d)\n", Target);
198
199 (*Count) = 0;
200
201 KeAcquireGuardedMutex(&ViewLock);
202
203 WriteCount[0] = WriteCount[1];
204 WriteCount[1] = WriteCount[2];
205 WriteCount[2] = WriteCount[3];
206 WriteCount[3] = 0;
207
208 NewTarget = WriteCount[0] + WriteCount[1] + WriteCount[2];
209
210 if (NewTarget < DirtyPageCount)
211 {
212 NewTarget = (DirtyPageCount - NewTarget + 3) / 4;
213 WriteCount[0] += NewTarget;
214 WriteCount[1] += NewTarget;
215 WriteCount[2] += NewTarget;
216 WriteCount[3] += NewTarget;
217 }
218
219 NewTarget = WriteCount[0];
220
221 Target = max(NewTarget, Target);
222
223 current_entry = DirtySegmentListHead.Flink;
224 if (current_entry == &DirtySegmentListHead)
225 {
226 DPRINT("No Dirty pages\n");
227 }
228
229 while (current_entry != &DirtySegmentListHead && Target > 0)
230 {
231 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
232 DirtySegmentListEntry);
233 current_entry = current_entry->Flink;
234
235 Locked = current->Bcb->Callbacks->AcquireForLazyWrite(
236 current->Bcb->LazyWriteContext, FALSE);
237 if (!Locked)
238 {
239 continue;
240 }
241
242 Locked = ExTryToAcquirePushLockExclusive(&current->Lock);
243 if (!Locked)
244 {
245 current->Bcb->Callbacks->ReleaseFromLazyWrite(
246 current->Bcb->LazyWriteContext);
247
248 continue;
249 }
250
251 ASSERT(current->Dirty);
252 if (current->ReferenceCount > 1)
253 {
254 ExReleasePushLock(&current->Lock);
255 current->Bcb->Callbacks->ReleaseFromLazyWrite(
256 current->Bcb->LazyWriteContext);
257 continue;
258 }
259
260 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
261
262 KeReleaseGuardedMutex(&ViewLock);
263
264 Status = CcRosFlushCacheSegment(current);
265
266 ExReleasePushLock(&current->Lock);
267 current->Bcb->Callbacks->ReleaseFromLazyWrite(
268 current->Bcb->LazyWriteContext);
269
270 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
271 {
272 DPRINT1("CC: Failed to flush cache segment.\n");
273 }
274 else
275 {
276 (*Count) += PagesPerSegment;
277 Target -= PagesPerSegment;
278 }
279
280 KeAcquireGuardedMutex(&ViewLock);
281 current_entry = DirtySegmentListHead.Flink;
282 }
283
284 if (*Count < NewTarget)
285 {
286 WriteCount[1] += (NewTarget - *Count);
287 }
288
289 KeReleaseGuardedMutex(&ViewLock);
290
291 DPRINT("CcRosFlushDirtyPages() finished\n");
292 return(STATUS_SUCCESS);
293 }
294
295 NTSTATUS
296 CcRosTrimCache(ULONG Target, ULONG Priority, PULONG NrFreed)
297 /*
298 * FUNCTION: Try to free some memory from the file cache.
299 * ARGUMENTS:
300 * Target - The number of pages to be freed.
301 * Priority - The priority of free (currently unused).
302 * NrFreed - Points to a variable where the number of pages
303 * actually freed is returned.
304 */
305 {
306 PLIST_ENTRY current_entry;
307 PCACHE_SEGMENT current;
308 ULONG PagesPerSegment;
309 ULONG PagesFreed;
310 KIRQL oldIrql;
311 LIST_ENTRY FreeList;
312
313 DPRINT("CcRosTrimCache(Target %d)\n", Target);
314
315 *NrFreed = 0;
316
317 InitializeListHead(&FreeList);
318
319 KeAcquireGuardedMutex(&ViewLock);
320 current_entry = CacheSegmentLRUListHead.Flink;
321 while (current_entry != &CacheSegmentLRUListHead && Target > 0)
322 {
323 NTSTATUS Status;
324
325 Status = STATUS_SUCCESS;
326 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
327 CacheSegmentLRUListEntry);
328 current_entry = current_entry->Flink;
329
330 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
331
332 if (current->MappedCount > 0 && !current->Dirty && !current->PageOut)
333 {
334 ULONG i;
335
336 CcRosCacheSegmentIncRefCount(current);
337 current->PageOut = TRUE;
338 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
339 KeReleaseGuardedMutex(&ViewLock);
340 for (i = 0; i < current->Bcb->CacheSegmentSize / PAGE_SIZE; i++)
341 {
342 PFN_TYPE Page;
343 Page = (PFN_TYPE)(MmGetPhysicalAddress((char*)current->BaseAddress + i * PAGE_SIZE).QuadPart >> PAGE_SHIFT);
344 Status = MmPageOutPhysicalAddress(Page);
345 }
346 KeAcquireGuardedMutex(&ViewLock);
347 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
348 CcRosCacheSegmentDecRefCount(current);
349 }
350
351 if (current->ReferenceCount == 0)
352 {
353 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
354 // PagesFreed = PagesPerSegment;
355 PagesFreed = min(PagesPerSegment, Target);
356 Target -= PagesFreed;
357 (*NrFreed) += PagesFreed;
358 }
359
360 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
361 }
362
363 current_entry = CacheSegmentLRUListHead.Flink;
364 while (current_entry != &CacheSegmentLRUListHead)
365 {
366 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
367 CacheSegmentLRUListEntry);
368 current->PageOut = FALSE;
369 current_entry = current_entry->Flink;
370
371 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
372 if (current->ReferenceCount == 0)
373 {
374 RemoveEntryList(&current->BcbSegmentListEntry);
375 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
376 RemoveEntryList(&current->CacheSegmentListEntry);
377 RemoveEntryList(&current->CacheSegmentLRUListEntry);
378 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
379 }
380 else
381 {
382 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
383 }
384 }
385
386 KeReleaseGuardedMutex(&ViewLock);
387
388 while (!IsListEmpty(&FreeList))
389 {
390 current_entry = RemoveHeadList(&FreeList);
391 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
392 BcbSegmentListEntry);
393 CcRosInternalFreeCacheSegment(current);
394 }
395
396 return(STATUS_SUCCESS);
397 }
398
399 NTSTATUS
400 NTAPI
401 CcRosReleaseCacheSegment(PBCB Bcb,
402 PCACHE_SEGMENT CacheSeg,
403 BOOLEAN Valid,
404 BOOLEAN Dirty,
405 BOOLEAN Mapped)
406 {
407 BOOLEAN WasDirty = CacheSeg->Dirty;
408 KIRQL oldIrql;
409
410 ASSERT(Bcb);
411
412 DPRINT("CcReleaseCacheSegment(Bcb 0x%p, CacheSeg 0x%p, Valid %d)\n",
413 Bcb, CacheSeg, Valid);
414
415 CacheSeg->Valid = Valid;
416 CacheSeg->Dirty = CacheSeg->Dirty || Dirty;
417
418 KeAcquireGuardedMutex(&ViewLock);
419 if (!WasDirty && CacheSeg->Dirty)
420 {
421 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
422 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
423 }
424 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
425 InsertTailList(&CacheSegmentLRUListHead, &CacheSeg->CacheSegmentLRUListEntry);
426
427 if (Mapped)
428 {
429 CacheSeg->MappedCount++;
430 }
431 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
432 CcRosCacheSegmentDecRefCount(CacheSeg);
433 if (Mapped && CacheSeg->MappedCount == 1)
434 {
435 CcRosCacheSegmentIncRefCount(CacheSeg);
436 }
437 if (!WasDirty && CacheSeg->Dirty)
438 {
439 CcRosCacheSegmentIncRefCount(CacheSeg);
440 }
441 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
442 KeReleaseGuardedMutex(&ViewLock);
443 ExReleasePushLock(&CacheSeg->Lock);
444
445 return(STATUS_SUCCESS);
446 }
447
448 /* Returns with Cache Segment Lock Held! */
449 PCACHE_SEGMENT
450 NTAPI
451 CcRosLookupCacheSegment(PBCB Bcb, ULONG FileOffset)
452 {
453 PLIST_ENTRY current_entry;
454 PCACHE_SEGMENT current;
455 KIRQL oldIrql;
456
457 ASSERT(Bcb);
458
459 DPRINT("CcRosLookupCacheSegment(Bcb -x%p, FileOffset %d)\n", Bcb, FileOffset);
460
461 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
462 current_entry = Bcb->BcbSegmentListHead.Flink;
463 while (current_entry != &Bcb->BcbSegmentListHead)
464 {
465 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
466 BcbSegmentListEntry);
467 if (current->FileOffset <= FileOffset &&
468 (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
469 {
470 CcRosCacheSegmentIncRefCount(current);
471 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
472 ExAcquirePushLockExclusive(&current->Lock);
473 return(current);
474 }
475 current_entry = current_entry->Flink;
476 }
477 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
478 return(NULL);
479 }
480
481 NTSTATUS
482 NTAPI
483 CcRosMarkDirtyCacheSegment(PBCB Bcb, ULONG FileOffset)
484 {
485 PCACHE_SEGMENT CacheSeg;
486 KIRQL oldIrql;
487
488 ASSERT(Bcb);
489
490 DPRINT("CcRosMarkDirtyCacheSegment(Bcb 0x%p, FileOffset %d)\n", Bcb, FileOffset);
491
492 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
493 if (CacheSeg == NULL)
494 {
495 KeBugCheck(CACHE_MANAGER);
496 }
497 if (!CacheSeg->Dirty)
498 {
499 KeAcquireGuardedMutex(&ViewLock);
500 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
501 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
502 KeReleaseGuardedMutex(&ViewLock);
503 }
504 else
505 {
506 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
507 CcRosCacheSegmentDecRefCount(CacheSeg);
508 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
509 }
510
511
512 CacheSeg->Dirty = TRUE;
513 ExReleasePushLock(&CacheSeg->Lock);
514
515 return(STATUS_SUCCESS);
516 }
517
518 NTSTATUS
519 NTAPI
520 CcRosUnmapCacheSegment(PBCB Bcb, ULONG FileOffset, BOOLEAN NowDirty)
521 {
522 PCACHE_SEGMENT CacheSeg;
523 BOOLEAN WasDirty;
524 KIRQL oldIrql;
525
526 ASSERT(Bcb);
527
528 DPRINT("CcRosUnmapCacheSegment(Bcb 0x%p, FileOffset %d, NowDirty %d)\n",
529 Bcb, FileOffset, NowDirty);
530
531 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
532 if (CacheSeg == NULL)
533 {
534 return(STATUS_UNSUCCESSFUL);
535 }
536
537 WasDirty = CacheSeg->Dirty;
538 CacheSeg->Dirty = CacheSeg->Dirty || NowDirty;
539
540 CacheSeg->MappedCount--;
541
542 if (!WasDirty && NowDirty)
543 {
544 KeAcquireGuardedMutex(&ViewLock);
545 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
546 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
547 KeReleaseGuardedMutex(&ViewLock);
548 }
549
550 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
551 CcRosCacheSegmentDecRefCount(CacheSeg);
552 if (!WasDirty && NowDirty)
553 {
554 CcRosCacheSegmentIncRefCount(CacheSeg);
555 }
556 if (CacheSeg->MappedCount == 0)
557 {
558 CcRosCacheSegmentDecRefCount(CacheSeg);
559 }
560 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
561
562 ExReleasePushLock(&CacheSeg->Lock);
563 return(STATUS_SUCCESS);
564 }
565
566 static
567 NTSTATUS
568 CcRosCreateCacheSegment(PBCB Bcb,
569 ULONG FileOffset,
570 PCACHE_SEGMENT* CacheSeg)
571 {
572 PCACHE_SEGMENT current;
573 PCACHE_SEGMENT previous;
574 PLIST_ENTRY current_entry;
575 NTSTATUS Status;
576 KIRQL oldIrql;
577 #ifdef CACHE_BITMAP
578 ULONG StartingOffset;
579 #endif
580 PHYSICAL_ADDRESS BoundaryAddressMultiple;
581
582 ASSERT(Bcb);
583
584 DPRINT("CcRosCreateCacheSegment()\n");
585
586 BoundaryAddressMultiple.QuadPart = 0;
587 if (FileOffset >= Bcb->FileSize.u.LowPart)
588 {
589 CacheSeg = NULL;
590 return STATUS_INVALID_PARAMETER;
591 }
592
593 current = ExAllocateFromNPagedLookasideList(&CacheSegLookasideList);
594 current->Valid = FALSE;
595 current->Dirty = FALSE;
596 current->PageOut = FALSE;
597 current->FileOffset = ROUND_DOWN(FileOffset, Bcb->CacheSegmentSize);
598 current->Bcb = Bcb;
599 #if defined(DBG) || defined(KDBG)
600 if ( Bcb->Trace )
601 {
602 DPRINT1("CacheMap 0x%p: new Cache Segment: 0x%p\n", Bcb, current );
603 }
604 #endif
605 current->MappedCount = 0;
606 current->DirtySegmentListEntry.Flink = NULL;
607 current->DirtySegmentListEntry.Blink = NULL;
608 current->ReferenceCount = 1;
609 ExInitializePushLock((PULONG_PTR)&current->Lock);
610 ExAcquirePushLockExclusive(&current->Lock);
611 KeAcquireGuardedMutex(&ViewLock);
612
613 *CacheSeg = current;
614 /* There is window between the call to CcRosLookupCacheSegment
615 * and CcRosCreateCacheSegment. We must check if a segment on
616 * the fileoffset exist. If there exist a segment, we release
617 * our new created segment and return the existing one.
618 */
619 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
620 current_entry = Bcb->BcbSegmentListHead.Flink;
621 previous = NULL;
622 while (current_entry != &Bcb->BcbSegmentListHead)
623 {
624 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
625 BcbSegmentListEntry);
626 if (current->FileOffset <= FileOffset &&
627 (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
628 {
629 CcRosCacheSegmentIncRefCount(current);
630 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
631 #if defined(DBG) || defined(KDBG)
632 if ( Bcb->Trace )
633 {
634 DPRINT1("CacheMap 0x%p: deleting newly created Cache Segment 0x%p ( found existing one 0x%p )\n",
635 Bcb,
636 (*CacheSeg),
637 current );
638 }
639 #endif
640 ExReleasePushLock(&(*CacheSeg)->Lock);
641 KeReleaseGuardedMutex(&ViewLock);
642 ExFreeToNPagedLookasideList(&CacheSegLookasideList, *CacheSeg);
643 *CacheSeg = current;
644 ExAcquirePushLockExclusive(&current->Lock);
645 return STATUS_SUCCESS;
646 }
647 if (current->FileOffset < FileOffset)
648 {
649 if (previous == NULL)
650 {
651 previous = current;
652 }
653 else
654 {
655 if (previous->FileOffset < current->FileOffset)
656 {
657 previous = current;
658 }
659 }
660 }
661 current_entry = current_entry->Flink;
662 }
663 /* There was no existing segment. */
664 current = *CacheSeg;
665 if (previous)
666 {
667 InsertHeadList(&previous->BcbSegmentListEntry, &current->BcbSegmentListEntry);
668 }
669 else
670 {
671 InsertHeadList(&Bcb->BcbSegmentListHead, &current->BcbSegmentListEntry);
672 }
673 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
674 InsertTailList(&CacheSegmentListHead, &current->CacheSegmentListEntry);
675 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
676 KeReleaseGuardedMutex(&ViewLock);
677 #ifdef CACHE_BITMAP
678 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
679
680 StartingOffset = RtlFindClearBitsAndSet(&CiCacheSegMappingRegionAllocMap, Bcb->CacheSegmentSize / PAGE_SIZE, CiCacheSegMappingRegionHint);
681
682 if (StartingOffset == 0xffffffff)
683 {
684 DPRINT1("Out of CacheSeg mapping space\n");
685 KeBugCheck(CACHE_MANAGER);
686 }
687
688 current->BaseAddress = CiCacheSegMappingRegionBase + StartingOffset * PAGE_SIZE;
689
690 if (CiCacheSegMappingRegionHint == StartingOffset)
691 {
692 CiCacheSegMappingRegionHint += Bcb->CacheSegmentSize / PAGE_SIZE;
693 }
694
695 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
696 #else
697 MmLockAddressSpace(MmGetKernelAddressSpace());
698 current->BaseAddress = NULL;
699 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
700 MEMORY_AREA_CACHE_SEGMENT,
701 &current->BaseAddress,
702 Bcb->CacheSegmentSize,
703 PAGE_READWRITE,
704 (PMEMORY_AREA*)&current->MemoryArea,
705 FALSE,
706 0,
707 BoundaryAddressMultiple);
708 MmUnlockAddressSpace(MmGetKernelAddressSpace());
709 if (!NT_SUCCESS(Status))
710 {
711 KeBugCheck(CACHE_MANAGER);
712 }
713 #endif
714
715 /* Create a virtual mapping for this memory area */
716 MmMapMemoryArea(current->BaseAddress, Bcb->CacheSegmentSize,
717 MC_CACHE, PAGE_READWRITE);
718
719 return(STATUS_SUCCESS);
720 }
721
722 NTSTATUS
723 NTAPI
724 CcRosGetCacheSegmentChain(PBCB Bcb,
725 ULONG FileOffset,
726 ULONG Length,
727 PCACHE_SEGMENT* CacheSeg)
728 {
729 PCACHE_SEGMENT current;
730 ULONG i;
731 PCACHE_SEGMENT* CacheSegList;
732 PCACHE_SEGMENT Previous = NULL;
733
734 ASSERT(Bcb);
735
736 DPRINT("CcRosGetCacheSegmentChain()\n");
737
738 Length = ROUND_UP(Length, Bcb->CacheSegmentSize);
739
740 #if defined(__GNUC__)
741 CacheSegList = alloca(sizeof(PCACHE_SEGMENT) *
742 (Length / Bcb->CacheSegmentSize));
743 #elif defined(_MSC_VER)
744 CacheSegList = _alloca(sizeof(PCACHE_SEGMENT) *
745 (Length / Bcb->CacheSegmentSize));
746 #else
747 #error Unknown compiler for alloca intrinsic stack allocation "function"
748 #endif
749
750 /*
751 * Look for a cache segment already mapping the same data.
752 */
753 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
754 {
755 ULONG CurrentOffset = FileOffset + (i * Bcb->CacheSegmentSize);
756 current = CcRosLookupCacheSegment(Bcb, CurrentOffset);
757 if (current != NULL)
758 {
759 CacheSegList[i] = current;
760 }
761 else
762 {
763 CcRosCreateCacheSegment(Bcb, CurrentOffset, &current);
764 CacheSegList[i] = current;
765 }
766 }
767
768 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
769 {
770 if (i == 0)
771 {
772 *CacheSeg = CacheSegList[i];
773 Previous = CacheSegList[i];
774 }
775 else
776 {
777 Previous->NextInChain = CacheSegList[i];
778 Previous = CacheSegList[i];
779 }
780 }
781 ASSERT(Previous);
782 Previous->NextInChain = NULL;
783
784 return(STATUS_SUCCESS);
785 }
786
787 NTSTATUS
788 NTAPI
789 CcRosGetCacheSegment(PBCB Bcb,
790 ULONG FileOffset,
791 PULONG BaseOffset,
792 PVOID* BaseAddress,
793 PBOOLEAN UptoDate,
794 PCACHE_SEGMENT* CacheSeg)
795 {
796 PCACHE_SEGMENT current;
797 NTSTATUS Status;
798
799 ASSERT(Bcb);
800
801 DPRINT("CcRosGetCacheSegment()\n");
802
803 /*
804 * Look for a cache segment already mapping the same data.
805 */
806 current = CcRosLookupCacheSegment(Bcb, FileOffset);
807 if (current == NULL)
808 {
809 /*
810 * Otherwise create a new segment.
811 */
812 Status = CcRosCreateCacheSegment(Bcb, FileOffset, &current);
813 if (!NT_SUCCESS(Status))
814 {
815 return Status;
816 }
817 }
818 /*
819 * Return information about the segment to the caller.
820 */
821 *UptoDate = current->Valid;
822 *BaseAddress = current->BaseAddress;
823 DPRINT("*BaseAddress 0x%.8X\n", *BaseAddress);
824 *CacheSeg = current;
825 *BaseOffset = current->FileOffset;
826 return(STATUS_SUCCESS);
827 }
828
829 NTSTATUS NTAPI
830 CcRosRequestCacheSegment(PBCB Bcb,
831 ULONG FileOffset,
832 PVOID* BaseAddress,
833 PBOOLEAN UptoDate,
834 PCACHE_SEGMENT* CacheSeg)
835 /*
836 * FUNCTION: Request a page mapping for a BCB
837 */
838 {
839 ULONG BaseOffset;
840
841 ASSERT(Bcb);
842
843 if ((FileOffset % Bcb->CacheSegmentSize) != 0)
844 {
845 DPRINT1("Bad fileoffset %x should be multiple of %x",
846 FileOffset, Bcb->CacheSegmentSize);
847 KeBugCheck(CACHE_MANAGER);
848 }
849
850 return(CcRosGetCacheSegment(Bcb,
851 FileOffset,
852 &BaseOffset,
853 BaseAddress,
854 UptoDate,
855 CacheSeg));
856 }
857 #ifdef CACHE_BITMAP
858 #else
859 static VOID
860 CcFreeCachePage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address,
861 PFN_TYPE Page, SWAPENTRY SwapEntry, BOOLEAN Dirty)
862 {
863 ASSERT(SwapEntry == 0);
864 if (Page != 0)
865 {
866 MmReleasePageMemoryConsumer(MC_CACHE, Page);
867 }
868 }
869 #endif
870 NTSTATUS
871 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg)
872 /*
873 * FUNCTION: Releases a cache segment associated with a BCB
874 */
875 {
876 #ifdef CACHE_BITMAP
877 ULONG i;
878 ULONG RegionSize;
879 ULONG Base;
880 PFN_TYPE Page;
881 KIRQL oldIrql;
882 #endif
883 DPRINT("Freeing cache segment 0x%p\n", CacheSeg);
884 #if defined(DBG) || defined(KDBG)
885 if ( CacheSeg->Bcb->Trace )
886 {
887 DPRINT1("CacheMap 0x%p: deleting Cache Segment: 0x%p\n", CacheSeg->Bcb, CacheSeg );
888 }
889 #endif
890 #ifdef CACHE_BITMAP
891 RegionSize = CacheSeg->Bcb->CacheSegmentSize / PAGE_SIZE;
892
893 /* Unmap all the pages. */
894 for (i = 0; i < RegionSize; i++)
895 {
896 MmDeleteVirtualMapping(NULL,
897 CacheSeg->BaseAddress + (i * PAGE_SIZE),
898 FALSE,
899 NULL,
900 &Page);
901 MmReleasePageMemoryConsumer(MC_CACHE, Page);
902 }
903
904 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
905 /* Deallocate all the pages used. */
906 Base = (ULONG)(CacheSeg->BaseAddress - CiCacheSegMappingRegionBase) / PAGE_SIZE;
907
908 RtlClearBits(&CiCacheSegMappingRegionAllocMap, Base, RegionSize);
909
910 CiCacheSegMappingRegionHint = min (CiCacheSegMappingRegionHint, Base);
911
912 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
913 #else
914 MmLockAddressSpace(MmGetKernelAddressSpace());
915 MmFreeMemoryArea(MmGetKernelAddressSpace(),
916 CacheSeg->MemoryArea,
917 CcFreeCachePage,
918 NULL);
919 MmUnlockAddressSpace(MmGetKernelAddressSpace());
920 #endif
921 ExFreeToNPagedLookasideList(&CacheSegLookasideList, CacheSeg);
922 return(STATUS_SUCCESS);
923 }
924
925 NTSTATUS
926 NTAPI
927 CcRosFreeCacheSegment(PBCB Bcb, PCACHE_SEGMENT CacheSeg)
928 {
929 NTSTATUS Status;
930 KIRQL oldIrql;
931
932 ASSERT(Bcb);
933
934 DPRINT("CcRosFreeCacheSegment(Bcb 0x%p, CacheSeg 0x%p)\n",
935 Bcb, CacheSeg);
936
937 KeAcquireGuardedMutex(&ViewLock);
938 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
939 RemoveEntryList(&CacheSeg->BcbSegmentListEntry);
940 RemoveEntryList(&CacheSeg->CacheSegmentListEntry);
941 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
942 if (CacheSeg->Dirty)
943 {
944 RemoveEntryList(&CacheSeg->DirtySegmentListEntry);
945 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
946
947 }
948 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
949 KeReleaseGuardedMutex(&ViewLock);
950
951 Status = CcRosInternalFreeCacheSegment(CacheSeg);
952 return(Status);
953 }
954
955 /*
956 * @implemented
957 */
958 VOID NTAPI
959 CcFlushCache(IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
960 IN PLARGE_INTEGER FileOffset OPTIONAL,
961 IN ULONG Length,
962 OUT PIO_STATUS_BLOCK IoStatus)
963 {
964 PBCB Bcb;
965 LARGE_INTEGER Offset;
966 PCACHE_SEGMENT current;
967 NTSTATUS Status;
968 KIRQL oldIrql;
969
970 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %d, IoStatus 0x%p)\n",
971 SectionObjectPointers, FileOffset, Length, IoStatus);
972
973 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
974 {
975 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
976 ASSERT(Bcb);
977 if (FileOffset)
978 {
979 Offset = *FileOffset;
980 }
981 else
982 {
983 Offset.QuadPart = (LONGLONG)0;
984 Length = Bcb->FileSize.u.LowPart;
985 }
986
987 if (IoStatus)
988 {
989 IoStatus->Status = STATUS_SUCCESS;
990 IoStatus->Information = 0;
991 }
992
993 while (Length > 0)
994 {
995 current = CcRosLookupCacheSegment (Bcb, Offset.u.LowPart);
996 if (current != NULL)
997 {
998 if (current->Dirty)
999 {
1000 Status = CcRosFlushCacheSegment(current);
1001 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1002 {
1003 IoStatus->Status = Status;
1004 }
1005 }
1006 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1007 ExReleasePushLock(&current->Lock);
1008 CcRosCacheSegmentDecRefCount(current);
1009 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1010 }
1011
1012 Offset.QuadPart += Bcb->CacheSegmentSize;
1013 if (Length > Bcb->CacheSegmentSize)
1014 {
1015 Length -= Bcb->CacheSegmentSize;
1016 }
1017 else
1018 {
1019 Length = 0;
1020 }
1021 }
1022 }
1023 else
1024 {
1025 if (IoStatus)
1026 {
1027 IoStatus->Status = STATUS_INVALID_PARAMETER;
1028 }
1029 }
1030 }
1031
1032 NTSTATUS
1033 NTAPI
1034 CcRosDeleteFileCache(PFILE_OBJECT FileObject, PBCB Bcb)
1035 /*
1036 * FUNCTION: Releases the BCB associated with a file object
1037 */
1038 {
1039 PLIST_ENTRY current_entry;
1040 PCACHE_SEGMENT current;
1041 NTSTATUS Status;
1042 LIST_ENTRY FreeList;
1043 KIRQL oldIrql;
1044
1045 ASSERT(Bcb);
1046
1047 Bcb->RefCount++;
1048 KeReleaseGuardedMutex(&ViewLock);
1049
1050 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1051
1052 KeAcquireGuardedMutex(&ViewLock);
1053 Bcb->RefCount--;
1054 if (Bcb->RefCount == 0)
1055 {
1056 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1057 {
1058 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1059 Bcb->BcbRemoveListEntry.Flink = NULL;
1060 }
1061
1062 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1063
1064 /*
1065 * Release all cache segments.
1066 */
1067 InitializeListHead(&FreeList);
1068 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1069 current_entry = Bcb->BcbSegmentListHead.Flink;
1070 while (!IsListEmpty(&Bcb->BcbSegmentListHead))
1071 {
1072 current_entry = RemoveTailList(&Bcb->BcbSegmentListHead);
1073 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1074 RemoveEntryList(&current->CacheSegmentListEntry);
1075 RemoveEntryList(&current->CacheSegmentLRUListEntry);
1076 if (current->Dirty)
1077 {
1078 RemoveEntryList(&current->DirtySegmentListEntry);
1079 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
1080 DPRINT1("Freeing dirty segment\n");
1081 }
1082 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
1083 }
1084 #if defined(DBG) || defined(KDBG)
1085 Bcb->Trace = FALSE;
1086 #endif
1087 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1088
1089 KeReleaseGuardedMutex(&ViewLock);
1090 ObDereferenceObject (Bcb->FileObject);
1091
1092 while (!IsListEmpty(&FreeList))
1093 {
1094 current_entry = RemoveTailList(&FreeList);
1095 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1096 Status = CcRosInternalFreeCacheSegment(current);
1097 }
1098 ExFreeToNPagedLookasideList(&BcbLookasideList, Bcb);
1099 KeAcquireGuardedMutex(&ViewLock);
1100 }
1101 return(STATUS_SUCCESS);
1102 }
1103
1104 VOID
1105 NTAPI
1106 CcRosReferenceCache(PFILE_OBJECT FileObject)
1107 {
1108 PBCB Bcb;
1109 KeAcquireGuardedMutex(&ViewLock);
1110 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1111 ASSERT(Bcb);
1112 if (Bcb->RefCount == 0)
1113 {
1114 ASSERT(Bcb->BcbRemoveListEntry.Flink != NULL);
1115 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1116 Bcb->BcbRemoveListEntry.Flink = NULL;
1117
1118 }
1119 else
1120 {
1121 ASSERT(Bcb->BcbRemoveListEntry.Flink == NULL);
1122 }
1123 Bcb->RefCount++;
1124 KeReleaseGuardedMutex(&ViewLock);
1125 }
1126
1127 VOID
1128 NTAPI
1129 CcRosSetRemoveOnClose(PSECTION_OBJECT_POINTERS SectionObjectPointer)
1130 {
1131 PBCB Bcb;
1132 DPRINT("CcRosSetRemoveOnClose()\n");
1133 KeAcquireGuardedMutex(&ViewLock);
1134 Bcb = (PBCB)SectionObjectPointer->SharedCacheMap;
1135 if (Bcb)
1136 {
1137 Bcb->RemoveOnClose = TRUE;
1138 if (Bcb->RefCount == 0)
1139 {
1140 CcRosDeleteFileCache(Bcb->FileObject, Bcb);
1141 }
1142 }
1143 KeReleaseGuardedMutex(&ViewLock);
1144 }
1145
1146
1147 VOID
1148 NTAPI
1149 CcRosDereferenceCache(PFILE_OBJECT FileObject)
1150 {
1151 PBCB Bcb;
1152 KeAcquireGuardedMutex(&ViewLock);
1153 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1154 ASSERT(Bcb);
1155 if (Bcb->RefCount > 0)
1156 {
1157 Bcb->RefCount--;
1158 if (Bcb->RefCount == 0)
1159 {
1160 MmFreeSectionSegments(Bcb->FileObject);
1161 CcRosDeleteFileCache(FileObject, Bcb);
1162 }
1163 }
1164 KeReleaseGuardedMutex(&ViewLock);
1165 }
1166
1167 NTSTATUS NTAPI
1168 CcRosReleaseFileCache(PFILE_OBJECT FileObject)
1169 /*
1170 * FUNCTION: Called by the file system when a handle to a file object
1171 * has been closed.
1172 */
1173 {
1174 PBCB Bcb;
1175
1176 KeAcquireGuardedMutex(&ViewLock);
1177
1178 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1179 {
1180 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1181 if (FileObject->PrivateCacheMap != NULL)
1182 {
1183 FileObject->PrivateCacheMap = NULL;
1184 if (Bcb->RefCount > 0)
1185 {
1186 Bcb->RefCount--;
1187 if (Bcb->RefCount == 0)
1188 {
1189 MmFreeSectionSegments(Bcb->FileObject);
1190 CcRosDeleteFileCache(FileObject, Bcb);
1191 }
1192 }
1193 }
1194 }
1195 KeReleaseGuardedMutex(&ViewLock);
1196 return(STATUS_SUCCESS);
1197 }
1198
1199 NTSTATUS
1200 NTAPI
1201 CcTryToInitializeFileCache(PFILE_OBJECT FileObject)
1202 {
1203 PBCB Bcb;
1204 NTSTATUS Status;
1205
1206 KeAcquireGuardedMutex(&ViewLock);
1207
1208 ASSERT(FileObject->SectionObjectPointer);
1209 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1210 if (Bcb == NULL)
1211 {
1212 Status = STATUS_UNSUCCESSFUL;
1213 }
1214 else
1215 {
1216 if (FileObject->PrivateCacheMap == NULL)
1217 {
1218 FileObject->PrivateCacheMap = Bcb;
1219 Bcb->RefCount++;
1220 }
1221 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1222 {
1223 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1224 Bcb->BcbRemoveListEntry.Flink = NULL;
1225 }
1226 Status = STATUS_SUCCESS;
1227 }
1228 KeReleaseGuardedMutex(&ViewLock);
1229
1230 return Status;
1231 }
1232
1233
1234 NTSTATUS NTAPI
1235 CcRosInitializeFileCache(PFILE_OBJECT FileObject,
1236 ULONG CacheSegmentSize,
1237 PCACHE_MANAGER_CALLBACKS CallBacks,
1238 PVOID LazyWriterContext)
1239 /*
1240 * FUNCTION: Initializes a BCB for a file object
1241 */
1242 {
1243 PBCB Bcb;
1244
1245 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1246 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, Bcb 0x%p, CacheSegmentSize %d)\n",
1247 FileObject, Bcb, CacheSegmentSize);
1248
1249 KeAcquireGuardedMutex(&ViewLock);
1250 if (Bcb == NULL)
1251 {
1252 Bcb = ExAllocateFromNPagedLookasideList(&BcbLookasideList);
1253 if (Bcb == NULL)
1254 {
1255 KeReleaseGuardedMutex(&ViewLock);
1256 return(STATUS_UNSUCCESSFUL);
1257 }
1258 memset(Bcb, 0, sizeof(BCB));
1259 ObReferenceObjectByPointer(FileObject,
1260 FILE_ALL_ACCESS,
1261 NULL,
1262 KernelMode);
1263 Bcb->FileObject = FileObject;
1264 Bcb->CacheSegmentSize = CacheSegmentSize;
1265 Bcb->Callbacks = CallBacks;
1266 Bcb->LazyWriteContext = LazyWriterContext;
1267 if (FileObject->FsContext)
1268 {
1269 Bcb->AllocationSize =
1270 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->AllocationSize;
1271 Bcb->FileSize =
1272 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->FileSize;
1273 }
1274 KeInitializeSpinLock(&Bcb->BcbLock);
1275 InitializeListHead(&Bcb->BcbSegmentListHead);
1276 FileObject->SectionObjectPointer->SharedCacheMap = Bcb;
1277 }
1278 if (FileObject->PrivateCacheMap == NULL)
1279 {
1280 FileObject->PrivateCacheMap = Bcb;
1281 Bcb->RefCount++;
1282 }
1283 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1284 {
1285 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1286 Bcb->BcbRemoveListEntry.Flink = NULL;
1287 }
1288 KeReleaseGuardedMutex(&ViewLock);
1289
1290 return(STATUS_SUCCESS);
1291 }
1292
1293 /*
1294 * @implemented
1295 */
1296 PFILE_OBJECT NTAPI
1297 CcGetFileObjectFromSectionPtrs(IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1298 {
1299 PBCB Bcb;
1300 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1301 {
1302 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
1303 ASSERT(Bcb);
1304 return Bcb->FileObject;
1305 }
1306 return NULL;
1307 }
1308
1309 VOID
1310 INIT_FUNCTION
1311 NTAPI
1312 CcInitView(VOID)
1313 {
1314 #ifdef CACHE_BITMAP
1315 PMEMORY_AREA marea;
1316 PVOID Buffer;
1317 PHYSICAL_ADDRESS BoundaryAddressMultiple;
1318 #endif
1319
1320 DPRINT("CcInitView()\n");
1321 #ifdef CACHE_BITMAP
1322 BoundaryAddressMultiple.QuadPart = 0;
1323 CiCacheSegMappingRegionHint = 0;
1324 CiCacheSegMappingRegionBase = NULL;
1325
1326 MmLockAddressSpace(MmGetKernelAddressSpace());
1327
1328 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
1329 MEMORY_AREA_CACHE_SEGMENT,
1330 &CiCacheSegMappingRegionBase,
1331 CI_CACHESEG_MAPPING_REGION_SIZE,
1332 PAGE_READWRITE,
1333 &marea,
1334 FALSE,
1335 0,
1336 BoundaryAddressMultiple);
1337 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1338 if (!NT_SUCCESS(Status))
1339 {
1340 KeBugCheck(CACHE_MANAGER);
1341 }
1342
1343 Buffer = ExAllocatePool(NonPagedPool, CI_CACHESEG_MAPPING_REGION_SIZE / (PAGE_SIZE * 8));
1344
1345 RtlInitializeBitMap(&CiCacheSegMappingRegionAllocMap, Buffer, CI_CACHESEG_MAPPING_REGION_SIZE / PAGE_SIZE);
1346 RtlClearAllBits(&CiCacheSegMappingRegionAllocMap);
1347
1348 KeInitializeSpinLock(&CiCacheSegMappingRegionLock);
1349 #endif
1350 InitializeListHead(&CacheSegmentListHead);
1351 InitializeListHead(&DirtySegmentListHead);
1352 InitializeListHead(&CacheSegmentLRUListHead);
1353 InitializeListHead(&ClosedListHead);
1354 KeInitializeGuardedMutex(&ViewLock);
1355 ExInitializeNPagedLookasideList (&iBcbLookasideList,
1356 NULL,
1357 NULL,
1358 0,
1359 sizeof(INTERNAL_BCB),
1360 TAG_IBCB,
1361 20);
1362 ExInitializeNPagedLookasideList (&BcbLookasideList,
1363 NULL,
1364 NULL,
1365 0,
1366 sizeof(BCB),
1367 TAG_BCB,
1368 20);
1369 ExInitializeNPagedLookasideList (&CacheSegLookasideList,
1370 NULL,
1371 NULL,
1372 0,
1373 sizeof(CACHE_SEGMENT),
1374 TAG_CSEG,
1375 20);
1376
1377 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1378
1379 CcInitCacheZeroPage();
1380
1381 }
1382
1383 /* EOF */
1384
1385
1386
1387
1388
1389
1390
1391