- Merge from trunk up to r45543
[reactos.git] / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 */
9
10 /* NOTES **********************************************************************
11 *
12 * This is not the NT implementation of a file cache nor anything much like
13 * it.
14 *
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
17 *
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
20 *
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
23 *
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
27 *
28 * (4) Copy the data into or out of the page as necessary.
29 *
30 * (5) Release the cache page
31 */
32 /* INCLUDES ******************************************************************/
33
34 #include <ntoskrnl.h>
35 #define NDEBUG
36 #include <debug.h>
37
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
40 #endif
41
42 /* GLOBALS *******************************************************************/
43
44 /*
45 * If CACHE_BITMAP is defined, the cache manager uses one large memory region
46 * within the kernel address space and allocate/deallocate space from this block
47 * over a bitmap. If CACHE_BITMAP is used, the size of the mdl mapping region
48 * must be reduced (ntoskrnl\mm\mdl.c, MI_MDLMAPPING_REGION_SIZE).
49 */
50 //#define CACHE_BITMAP
51
52 static LIST_ENTRY DirtySegmentListHead;
53 static LIST_ENTRY CacheSegmentListHead;
54 static LIST_ENTRY CacheSegmentLRUListHead;
55 static LIST_ENTRY ClosedListHead;
56 ULONG DirtyPageCount=0;
57
58 KGUARDED_MUTEX ViewLock;
59
60 #ifdef CACHE_BITMAP
61 #define CI_CACHESEG_MAPPING_REGION_SIZE (128*1024*1024)
62
63 static PVOID CiCacheSegMappingRegionBase = NULL;
64 static RTL_BITMAP CiCacheSegMappingRegionAllocMap;
65 static ULONG CiCacheSegMappingRegionHint;
66 static KSPIN_LOCK CiCacheSegMappingRegionLock;
67 #endif
68
69 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
70 static NPAGED_LOOKASIDE_LIST BcbLookasideList;
71 static NPAGED_LOOKASIDE_LIST CacheSegLookasideList;
72
73 #if DBG
74 static void CcRosCacheSegmentIncRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
75 {
76 ++cs->ReferenceCount;
77 if ( cs->Bcb->Trace )
78 {
79 DbgPrint("(%s:%i) CacheSegment %p ++RefCount=%d, Dirty %d, PageOut %d\n",
80 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
81 }
82 }
83 static void CcRosCacheSegmentDecRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
84 {
85 --cs->ReferenceCount;
86 if ( cs->Bcb->Trace )
87 {
88 DbgPrint("(%s:%i) CacheSegment %p --RefCount=%d, Dirty %d, PageOut %d\n",
89 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
90 }
91 }
92 #define CcRosCacheSegmentIncRefCount(cs) CcRosCacheSegmentIncRefCount_(cs,__FILE__,__LINE__)
93 #define CcRosCacheSegmentDecRefCount(cs) CcRosCacheSegmentDecRefCount_(cs,__FILE__,__LINE__)
94 #else
95 #define CcRosCacheSegmentIncRefCount(cs) (++((cs)->ReferenceCount))
96 #define CcRosCacheSegmentDecRefCount(cs) (--((cs)->ReferenceCount))
97 #endif
98
99 NTSTATUS
100 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg);
101
102
103 /* FUNCTIONS *****************************************************************/
104
105 VOID
106 NTAPI
107 CcRosTraceCacheMap (
108 PBCB Bcb,
109 BOOLEAN Trace )
110 {
111 #if DBG
112 KIRQL oldirql;
113 PLIST_ENTRY current_entry;
114 PCACHE_SEGMENT current;
115
116 if ( !Bcb )
117 return;
118
119 Bcb->Trace = Trace;
120
121 if ( Trace )
122 {
123 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", Bcb );
124
125 KeAcquireGuardedMutex(&ViewLock);
126 KeAcquireSpinLock(&Bcb->BcbLock, &oldirql);
127
128 current_entry = Bcb->BcbSegmentListHead.Flink;
129 while (current_entry != &Bcb->BcbSegmentListHead)
130 {
131 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
132 current_entry = current_entry->Flink;
133
134 DPRINT1(" CacheSegment 0x%p enabled, RefCount %d, Dirty %d, PageOut %d\n",
135 current, current->ReferenceCount, current->Dirty, current->PageOut );
136 }
137 KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
138 KeReleaseGuardedMutex(&ViewLock);
139 }
140 else
141 {
142 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", Bcb );
143 }
144
145 #else
146 Bcb = Bcb;
147 Trace = Trace;
148 #endif
149 }
150
151 NTSTATUS
152 NTAPI
153 CcRosFlushCacheSegment(PCACHE_SEGMENT CacheSegment)
154 {
155 NTSTATUS Status;
156 KIRQL oldIrql;
157
158 Status = WriteCacheSegment(CacheSegment);
159 if (NT_SUCCESS(Status))
160 {
161 KeAcquireGuardedMutex(&ViewLock);
162 KeAcquireSpinLock(&CacheSegment->Bcb->BcbLock, &oldIrql);
163
164 CacheSegment->Dirty = FALSE;
165 RemoveEntryList(&CacheSegment->DirtySegmentListEntry);
166 DirtyPageCount -= CacheSegment->Bcb->CacheSegmentSize / PAGE_SIZE;
167 CcRosCacheSegmentDecRefCount ( CacheSegment );
168
169 KeReleaseSpinLock(&CacheSegment->Bcb->BcbLock, oldIrql);
170 KeReleaseGuardedMutex(&ViewLock);
171 }
172
173 return(Status);
174 }
175
176 NTSTATUS
177 NTAPI
178 CcRosFlushDirtyPages(ULONG Target, PULONG Count)
179 {
180 PLIST_ENTRY current_entry;
181 PCACHE_SEGMENT current;
182 ULONG PagesPerSegment;
183 BOOLEAN Locked;
184 NTSTATUS Status;
185 static ULONG WriteCount[4] = {0, 0, 0, 0};
186 ULONG NewTarget;
187
188 DPRINT("CcRosFlushDirtyPages(Target %d)\n", Target);
189
190 (*Count) = 0;
191
192 KeAcquireGuardedMutex(&ViewLock);
193
194 WriteCount[0] = WriteCount[1];
195 WriteCount[1] = WriteCount[2];
196 WriteCount[2] = WriteCount[3];
197 WriteCount[3] = 0;
198
199 NewTarget = WriteCount[0] + WriteCount[1] + WriteCount[2];
200
201 if (NewTarget < DirtyPageCount)
202 {
203 NewTarget = (DirtyPageCount - NewTarget + 3) / 4;
204 WriteCount[0] += NewTarget;
205 WriteCount[1] += NewTarget;
206 WriteCount[2] += NewTarget;
207 WriteCount[3] += NewTarget;
208 }
209
210 NewTarget = WriteCount[0];
211
212 Target = max(NewTarget, Target);
213
214 current_entry = DirtySegmentListHead.Flink;
215 if (current_entry == &DirtySegmentListHead)
216 {
217 DPRINT("No Dirty pages\n");
218 }
219
220 while (current_entry != &DirtySegmentListHead && Target > 0)
221 {
222 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
223 DirtySegmentListEntry);
224 current_entry = current_entry->Flink;
225
226 Locked = current->Bcb->Callbacks->AcquireForLazyWrite(
227 current->Bcb->LazyWriteContext, FALSE);
228 if (!Locked)
229 {
230 continue;
231 }
232
233 Locked = ExTryToAcquirePushLockExclusive(&current->Lock);
234 if (!Locked)
235 {
236 current->Bcb->Callbacks->ReleaseFromLazyWrite(
237 current->Bcb->LazyWriteContext);
238
239 continue;
240 }
241
242 ASSERT(current->Dirty);
243 if (current->ReferenceCount > 1)
244 {
245 ExReleasePushLock(&current->Lock);
246 current->Bcb->Callbacks->ReleaseFromLazyWrite(
247 current->Bcb->LazyWriteContext);
248 continue;
249 }
250
251 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
252
253 KeReleaseGuardedMutex(&ViewLock);
254
255 Status = CcRosFlushCacheSegment(current);
256
257 ExReleasePushLock(&current->Lock);
258 current->Bcb->Callbacks->ReleaseFromLazyWrite(
259 current->Bcb->LazyWriteContext);
260
261 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
262 {
263 DPRINT1("CC: Failed to flush cache segment.\n");
264 }
265 else
266 {
267 (*Count) += PagesPerSegment;
268 Target -= PagesPerSegment;
269 }
270
271 KeAcquireGuardedMutex(&ViewLock);
272 current_entry = DirtySegmentListHead.Flink;
273 }
274
275 if (*Count < NewTarget)
276 {
277 WriteCount[1] += (NewTarget - *Count);
278 }
279
280 KeReleaseGuardedMutex(&ViewLock);
281
282 DPRINT("CcRosFlushDirtyPages() finished\n");
283 return(STATUS_SUCCESS);
284 }
285
286 NTSTATUS
287 CcRosTrimCache(ULONG Target, ULONG Priority, PULONG NrFreed)
288 /*
289 * FUNCTION: Try to free some memory from the file cache.
290 * ARGUMENTS:
291 * Target - The number of pages to be freed.
292 * Priority - The priority of free (currently unused).
293 * NrFreed - Points to a variable where the number of pages
294 * actually freed is returned.
295 */
296 {
297 PLIST_ENTRY current_entry;
298 PCACHE_SEGMENT current;
299 ULONG PagesPerSegment;
300 ULONG PagesFreed;
301 KIRQL oldIrql;
302 LIST_ENTRY FreeList;
303
304 DPRINT("CcRosTrimCache(Target %d)\n", Target);
305
306 *NrFreed = 0;
307
308 InitializeListHead(&FreeList);
309
310 KeAcquireGuardedMutex(&ViewLock);
311 current_entry = CacheSegmentLRUListHead.Flink;
312 while (current_entry != &CacheSegmentLRUListHead && Target > 0)
313 {
314 NTSTATUS Status;
315
316 Status = STATUS_SUCCESS;
317 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
318 CacheSegmentLRUListEntry);
319 current_entry = current_entry->Flink;
320
321 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
322
323 if (current->MappedCount > 0 && !current->Dirty && !current->PageOut)
324 {
325 ULONG i;
326
327 CcRosCacheSegmentIncRefCount(current);
328 current->PageOut = TRUE;
329 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
330 KeReleaseGuardedMutex(&ViewLock);
331 for (i = 0; i < current->Bcb->CacheSegmentSize / PAGE_SIZE; i++)
332 {
333 PFN_TYPE Page;
334 Page = (PFN_TYPE)(MmGetPhysicalAddress((char*)current->BaseAddress + i * PAGE_SIZE).QuadPart >> PAGE_SHIFT);
335 Status = MmPageOutPhysicalAddress(Page);
336 }
337 KeAcquireGuardedMutex(&ViewLock);
338 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
339 CcRosCacheSegmentDecRefCount(current);
340 }
341
342 if (current->ReferenceCount == 0)
343 {
344 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
345 // PagesFreed = PagesPerSegment;
346 PagesFreed = min(PagesPerSegment, Target);
347 Target -= PagesFreed;
348 (*NrFreed) += PagesFreed;
349 }
350
351 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
352 }
353
354 current_entry = CacheSegmentLRUListHead.Flink;
355 while (current_entry != &CacheSegmentLRUListHead)
356 {
357 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
358 CacheSegmentLRUListEntry);
359 current->PageOut = FALSE;
360 current_entry = current_entry->Flink;
361
362 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
363 if (current->ReferenceCount == 0)
364 {
365 RemoveEntryList(&current->BcbSegmentListEntry);
366 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
367 RemoveEntryList(&current->CacheSegmentListEntry);
368 RemoveEntryList(&current->CacheSegmentLRUListEntry);
369 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
370 }
371 else
372 {
373 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
374 }
375 }
376
377 KeReleaseGuardedMutex(&ViewLock);
378
379 while (!IsListEmpty(&FreeList))
380 {
381 current_entry = RemoveHeadList(&FreeList);
382 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
383 BcbSegmentListEntry);
384 CcRosInternalFreeCacheSegment(current);
385 }
386
387 return(STATUS_SUCCESS);
388 }
389
390 NTSTATUS
391 NTAPI
392 CcRosReleaseCacheSegment(PBCB Bcb,
393 PCACHE_SEGMENT CacheSeg,
394 BOOLEAN Valid,
395 BOOLEAN Dirty,
396 BOOLEAN Mapped)
397 {
398 BOOLEAN WasDirty = CacheSeg->Dirty;
399 KIRQL oldIrql;
400
401 ASSERT(Bcb);
402
403 DPRINT("CcReleaseCacheSegment(Bcb 0x%p, CacheSeg 0x%p, Valid %d)\n",
404 Bcb, CacheSeg, Valid);
405
406 CacheSeg->Valid = Valid;
407 CacheSeg->Dirty = CacheSeg->Dirty || Dirty;
408
409 KeAcquireGuardedMutex(&ViewLock);
410 if (!WasDirty && CacheSeg->Dirty)
411 {
412 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
413 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
414 }
415 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
416 InsertTailList(&CacheSegmentLRUListHead, &CacheSeg->CacheSegmentLRUListEntry);
417
418 if (Mapped)
419 {
420 CacheSeg->MappedCount++;
421 }
422 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
423 CcRosCacheSegmentDecRefCount(CacheSeg);
424 if (Mapped && CacheSeg->MappedCount == 1)
425 {
426 CcRosCacheSegmentIncRefCount(CacheSeg);
427 }
428 if (!WasDirty && CacheSeg->Dirty)
429 {
430 CcRosCacheSegmentIncRefCount(CacheSeg);
431 }
432 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
433 KeReleaseGuardedMutex(&ViewLock);
434 ExReleasePushLock(&CacheSeg->Lock);
435
436 return(STATUS_SUCCESS);
437 }
438
439 /* Returns with Cache Segment Lock Held! */
440 PCACHE_SEGMENT
441 NTAPI
442 CcRosLookupCacheSegment(PBCB Bcb, ULONG FileOffset)
443 {
444 PLIST_ENTRY current_entry;
445 PCACHE_SEGMENT current;
446 KIRQL oldIrql;
447
448 ASSERT(Bcb);
449
450 DPRINT("CcRosLookupCacheSegment(Bcb -x%p, FileOffset %d)\n", Bcb, FileOffset);
451
452 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
453 current_entry = Bcb->BcbSegmentListHead.Flink;
454 while (current_entry != &Bcb->BcbSegmentListHead)
455 {
456 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
457 BcbSegmentListEntry);
458 if (current->FileOffset <= FileOffset &&
459 (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
460 {
461 CcRosCacheSegmentIncRefCount(current);
462 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
463 ExAcquirePushLockExclusive(&current->Lock);
464 return(current);
465 }
466 current_entry = current_entry->Flink;
467 }
468 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
469 return(NULL);
470 }
471
472 NTSTATUS
473 NTAPI
474 CcRosMarkDirtyCacheSegment(PBCB Bcb, ULONG FileOffset)
475 {
476 PCACHE_SEGMENT CacheSeg;
477 KIRQL oldIrql;
478
479 ASSERT(Bcb);
480
481 DPRINT("CcRosMarkDirtyCacheSegment(Bcb 0x%p, FileOffset %d)\n", Bcb, FileOffset);
482
483 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
484 if (CacheSeg == NULL)
485 {
486 KeBugCheck(CACHE_MANAGER);
487 }
488 if (!CacheSeg->Dirty)
489 {
490 KeAcquireGuardedMutex(&ViewLock);
491 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
492 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
493 KeReleaseGuardedMutex(&ViewLock);
494 }
495 else
496 {
497 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
498 CcRosCacheSegmentDecRefCount(CacheSeg);
499 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
500 }
501
502
503 CacheSeg->Dirty = TRUE;
504 ExReleasePushLock(&CacheSeg->Lock);
505
506 return(STATUS_SUCCESS);
507 }
508
509 NTSTATUS
510 NTAPI
511 CcRosUnmapCacheSegment(PBCB Bcb, ULONG FileOffset, BOOLEAN NowDirty)
512 {
513 PCACHE_SEGMENT CacheSeg;
514 BOOLEAN WasDirty;
515 KIRQL oldIrql;
516
517 ASSERT(Bcb);
518
519 DPRINT("CcRosUnmapCacheSegment(Bcb 0x%p, FileOffset %d, NowDirty %d)\n",
520 Bcb, FileOffset, NowDirty);
521
522 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
523 if (CacheSeg == NULL)
524 {
525 return(STATUS_UNSUCCESSFUL);
526 }
527
528 WasDirty = CacheSeg->Dirty;
529 CacheSeg->Dirty = CacheSeg->Dirty || NowDirty;
530
531 CacheSeg->MappedCount--;
532
533 if (!WasDirty && NowDirty)
534 {
535 KeAcquireGuardedMutex(&ViewLock);
536 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
537 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
538 KeReleaseGuardedMutex(&ViewLock);
539 }
540
541 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
542 CcRosCacheSegmentDecRefCount(CacheSeg);
543 if (!WasDirty && NowDirty)
544 {
545 CcRosCacheSegmentIncRefCount(CacheSeg);
546 }
547 if (CacheSeg->MappedCount == 0)
548 {
549 CcRosCacheSegmentDecRefCount(CacheSeg);
550 }
551 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
552
553 ExReleasePushLock(&CacheSeg->Lock);
554 return(STATUS_SUCCESS);
555 }
556
557 static
558 NTSTATUS
559 CcRosCreateCacheSegment(PBCB Bcb,
560 ULONG FileOffset,
561 PCACHE_SEGMENT* CacheSeg)
562 {
563 PCACHE_SEGMENT current;
564 PCACHE_SEGMENT previous;
565 PLIST_ENTRY current_entry;
566 NTSTATUS Status;
567 KIRQL oldIrql;
568 #ifdef CACHE_BITMAP
569 ULONG StartingOffset;
570 #endif
571 PHYSICAL_ADDRESS BoundaryAddressMultiple;
572
573 ASSERT(Bcb);
574
575 DPRINT("CcRosCreateCacheSegment()\n");
576
577 BoundaryAddressMultiple.QuadPart = 0;
578 if (FileOffset >= Bcb->FileSize.u.LowPart)
579 {
580 CacheSeg = NULL;
581 return STATUS_INVALID_PARAMETER;
582 }
583
584 current = ExAllocateFromNPagedLookasideList(&CacheSegLookasideList);
585 current->Valid = FALSE;
586 current->Dirty = FALSE;
587 current->PageOut = FALSE;
588 current->FileOffset = ROUND_DOWN(FileOffset, Bcb->CacheSegmentSize);
589 current->Bcb = Bcb;
590 #if DBG
591 if ( Bcb->Trace )
592 {
593 DPRINT1("CacheMap 0x%p: new Cache Segment: 0x%p\n", Bcb, current );
594 }
595 #endif
596 current->MappedCount = 0;
597 current->DirtySegmentListEntry.Flink = NULL;
598 current->DirtySegmentListEntry.Blink = NULL;
599 current->ReferenceCount = 1;
600 ExInitializePushLock((PULONG_PTR)&current->Lock);
601 ExAcquirePushLockExclusive(&current->Lock);
602 KeAcquireGuardedMutex(&ViewLock);
603
604 *CacheSeg = current;
605 /* There is window between the call to CcRosLookupCacheSegment
606 * and CcRosCreateCacheSegment. We must check if a segment on
607 * the fileoffset exist. If there exist a segment, we release
608 * our new created segment and return the existing one.
609 */
610 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
611 current_entry = Bcb->BcbSegmentListHead.Flink;
612 previous = NULL;
613 while (current_entry != &Bcb->BcbSegmentListHead)
614 {
615 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
616 BcbSegmentListEntry);
617 if (current->FileOffset <= FileOffset &&
618 (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
619 {
620 CcRosCacheSegmentIncRefCount(current);
621 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
622 #if DBG
623 if ( Bcb->Trace )
624 {
625 DPRINT1("CacheMap 0x%p: deleting newly created Cache Segment 0x%p ( found existing one 0x%p )\n",
626 Bcb,
627 (*CacheSeg),
628 current );
629 }
630 #endif
631 ExReleasePushLock(&(*CacheSeg)->Lock);
632 KeReleaseGuardedMutex(&ViewLock);
633 ExFreeToNPagedLookasideList(&CacheSegLookasideList, *CacheSeg);
634 *CacheSeg = current;
635 ExAcquirePushLockExclusive(&current->Lock);
636 return STATUS_SUCCESS;
637 }
638 if (current->FileOffset < FileOffset)
639 {
640 if (previous == NULL)
641 {
642 previous = current;
643 }
644 else
645 {
646 if (previous->FileOffset < current->FileOffset)
647 {
648 previous = current;
649 }
650 }
651 }
652 current_entry = current_entry->Flink;
653 }
654 /* There was no existing segment. */
655 current = *CacheSeg;
656 if (previous)
657 {
658 InsertHeadList(&previous->BcbSegmentListEntry, &current->BcbSegmentListEntry);
659 }
660 else
661 {
662 InsertHeadList(&Bcb->BcbSegmentListHead, &current->BcbSegmentListEntry);
663 }
664 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
665 InsertTailList(&CacheSegmentListHead, &current->CacheSegmentListEntry);
666 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
667 KeReleaseGuardedMutex(&ViewLock);
668 #ifdef CACHE_BITMAP
669 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
670
671 StartingOffset = RtlFindClearBitsAndSet(&CiCacheSegMappingRegionAllocMap, Bcb->CacheSegmentSize / PAGE_SIZE, CiCacheSegMappingRegionHint);
672
673 if (StartingOffset == 0xffffffff)
674 {
675 DPRINT1("Out of CacheSeg mapping space\n");
676 KeBugCheck(CACHE_MANAGER);
677 }
678
679 current->BaseAddress = CiCacheSegMappingRegionBase + StartingOffset * PAGE_SIZE;
680
681 if (CiCacheSegMappingRegionHint == StartingOffset)
682 {
683 CiCacheSegMappingRegionHint += Bcb->CacheSegmentSize / PAGE_SIZE;
684 }
685
686 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
687 #else
688 MmLockAddressSpace(MmGetKernelAddressSpace());
689 current->BaseAddress = NULL;
690 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
691 MEMORY_AREA_CACHE_SEGMENT,
692 &current->BaseAddress,
693 Bcb->CacheSegmentSize,
694 PAGE_READWRITE,
695 (PMEMORY_AREA*)&current->MemoryArea,
696 FALSE,
697 0,
698 BoundaryAddressMultiple);
699 MmUnlockAddressSpace(MmGetKernelAddressSpace());
700 if (!NT_SUCCESS(Status))
701 {
702 KeBugCheck(CACHE_MANAGER);
703 }
704 #endif
705
706 /* Create a virtual mapping for this memory area */
707 MmMapMemoryArea(current->BaseAddress, Bcb->CacheSegmentSize,
708 MC_CACHE, PAGE_READWRITE);
709
710 return(STATUS_SUCCESS);
711 }
712
713 NTSTATUS
714 NTAPI
715 CcRosGetCacheSegmentChain(PBCB Bcb,
716 ULONG FileOffset,
717 ULONG Length,
718 PCACHE_SEGMENT* CacheSeg)
719 {
720 PCACHE_SEGMENT current;
721 ULONG i;
722 PCACHE_SEGMENT* CacheSegList;
723 PCACHE_SEGMENT Previous = NULL;
724
725 ASSERT(Bcb);
726
727 DPRINT("CcRosGetCacheSegmentChain()\n");
728
729 Length = ROUND_UP(Length, Bcb->CacheSegmentSize);
730
731 CacheSegList = _alloca(sizeof(PCACHE_SEGMENT) *
732 (Length / Bcb->CacheSegmentSize));
733
734 /*
735 * Look for a cache segment already mapping the same data.
736 */
737 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
738 {
739 ULONG CurrentOffset = FileOffset + (i * Bcb->CacheSegmentSize);
740 current = CcRosLookupCacheSegment(Bcb, CurrentOffset);
741 if (current != NULL)
742 {
743 CacheSegList[i] = current;
744 }
745 else
746 {
747 CcRosCreateCacheSegment(Bcb, CurrentOffset, &current);
748 CacheSegList[i] = current;
749 }
750 }
751
752 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
753 {
754 if (i == 0)
755 {
756 *CacheSeg = CacheSegList[i];
757 Previous = CacheSegList[i];
758 }
759 else
760 {
761 Previous->NextInChain = CacheSegList[i];
762 Previous = CacheSegList[i];
763 }
764 }
765 ASSERT(Previous);
766 Previous->NextInChain = NULL;
767
768 return(STATUS_SUCCESS);
769 }
770
771 NTSTATUS
772 NTAPI
773 CcRosGetCacheSegment(PBCB Bcb,
774 ULONG FileOffset,
775 PULONG BaseOffset,
776 PVOID* BaseAddress,
777 PBOOLEAN UptoDate,
778 PCACHE_SEGMENT* CacheSeg)
779 {
780 PCACHE_SEGMENT current;
781 NTSTATUS Status;
782
783 ASSERT(Bcb);
784
785 DPRINT("CcRosGetCacheSegment()\n");
786
787 /*
788 * Look for a cache segment already mapping the same data.
789 */
790 current = CcRosLookupCacheSegment(Bcb, FileOffset);
791 if (current == NULL)
792 {
793 /*
794 * Otherwise create a new segment.
795 */
796 Status = CcRosCreateCacheSegment(Bcb, FileOffset, &current);
797 if (!NT_SUCCESS(Status))
798 {
799 return Status;
800 }
801 }
802 /*
803 * Return information about the segment to the caller.
804 */
805 *UptoDate = current->Valid;
806 *BaseAddress = current->BaseAddress;
807 DPRINT("*BaseAddress 0x%.8X\n", *BaseAddress);
808 *CacheSeg = current;
809 *BaseOffset = current->FileOffset;
810 return(STATUS_SUCCESS);
811 }
812
813 NTSTATUS NTAPI
814 CcRosRequestCacheSegment(PBCB Bcb,
815 ULONG FileOffset,
816 PVOID* BaseAddress,
817 PBOOLEAN UptoDate,
818 PCACHE_SEGMENT* CacheSeg)
819 /*
820 * FUNCTION: Request a page mapping for a BCB
821 */
822 {
823 ULONG BaseOffset;
824
825 ASSERT(Bcb);
826
827 if ((FileOffset % Bcb->CacheSegmentSize) != 0)
828 {
829 DPRINT1("Bad fileoffset %x should be multiple of %x",
830 FileOffset, Bcb->CacheSegmentSize);
831 KeBugCheck(CACHE_MANAGER);
832 }
833
834 return(CcRosGetCacheSegment(Bcb,
835 FileOffset,
836 &BaseOffset,
837 BaseAddress,
838 UptoDate,
839 CacheSeg));
840 }
841 #ifdef CACHE_BITMAP
842 #else
843 static VOID
844 CcFreeCachePage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address,
845 PFN_TYPE Page, SWAPENTRY SwapEntry, BOOLEAN Dirty)
846 {
847 ASSERT(SwapEntry == 0);
848 if (Page != 0)
849 {
850 MmReleasePageMemoryConsumer(MC_CACHE, Page);
851 }
852 }
853 #endif
854 NTSTATUS
855 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg)
856 /*
857 * FUNCTION: Releases a cache segment associated with a BCB
858 */
859 {
860 #ifdef CACHE_BITMAP
861 ULONG i;
862 ULONG RegionSize;
863 ULONG Base;
864 PFN_TYPE Page;
865 KIRQL oldIrql;
866 #endif
867 DPRINT("Freeing cache segment 0x%p\n", CacheSeg);
868 #if DBG
869 if ( CacheSeg->Bcb->Trace )
870 {
871 DPRINT1("CacheMap 0x%p: deleting Cache Segment: 0x%p\n", CacheSeg->Bcb, CacheSeg );
872 }
873 #endif
874 #ifdef CACHE_BITMAP
875 RegionSize = CacheSeg->Bcb->CacheSegmentSize / PAGE_SIZE;
876
877 /* Unmap all the pages. */
878 for (i = 0; i < RegionSize; i++)
879 {
880 MmDeleteVirtualMapping(NULL,
881 CacheSeg->BaseAddress + (i * PAGE_SIZE),
882 FALSE,
883 NULL,
884 &Page);
885 MmReleasePageMemoryConsumer(MC_CACHE, Page);
886 }
887
888 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
889 /* Deallocate all the pages used. */
890 Base = (ULONG)(CacheSeg->BaseAddress - CiCacheSegMappingRegionBase) / PAGE_SIZE;
891
892 RtlClearBits(&CiCacheSegMappingRegionAllocMap, Base, RegionSize);
893
894 CiCacheSegMappingRegionHint = min (CiCacheSegMappingRegionHint, Base);
895
896 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
897 #else
898 MmLockAddressSpace(MmGetKernelAddressSpace());
899 MmFreeMemoryArea(MmGetKernelAddressSpace(),
900 CacheSeg->MemoryArea,
901 CcFreeCachePage,
902 NULL);
903 MmUnlockAddressSpace(MmGetKernelAddressSpace());
904 #endif
905 ExFreeToNPagedLookasideList(&CacheSegLookasideList, CacheSeg);
906 return(STATUS_SUCCESS);
907 }
908
909 NTSTATUS
910 NTAPI
911 CcRosFreeCacheSegment(PBCB Bcb, PCACHE_SEGMENT CacheSeg)
912 {
913 NTSTATUS Status;
914 KIRQL oldIrql;
915
916 ASSERT(Bcb);
917
918 DPRINT("CcRosFreeCacheSegment(Bcb 0x%p, CacheSeg 0x%p)\n",
919 Bcb, CacheSeg);
920
921 KeAcquireGuardedMutex(&ViewLock);
922 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
923 RemoveEntryList(&CacheSeg->BcbSegmentListEntry);
924 RemoveEntryList(&CacheSeg->CacheSegmentListEntry);
925 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
926 if (CacheSeg->Dirty)
927 {
928 RemoveEntryList(&CacheSeg->DirtySegmentListEntry);
929 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
930
931 }
932 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
933 KeReleaseGuardedMutex(&ViewLock);
934
935 Status = CcRosInternalFreeCacheSegment(CacheSeg);
936 return(Status);
937 }
938
939 /*
940 * @implemented
941 */
942 VOID NTAPI
943 CcFlushCache(IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
944 IN PLARGE_INTEGER FileOffset OPTIONAL,
945 IN ULONG Length,
946 OUT PIO_STATUS_BLOCK IoStatus)
947 {
948 PBCB Bcb;
949 LARGE_INTEGER Offset;
950 PCACHE_SEGMENT current;
951 NTSTATUS Status;
952 KIRQL oldIrql;
953
954 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %d, IoStatus 0x%p)\n",
955 SectionObjectPointers, FileOffset, Length, IoStatus);
956
957 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
958 {
959 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
960 ASSERT(Bcb);
961 if (FileOffset)
962 {
963 Offset = *FileOffset;
964 }
965 else
966 {
967 Offset.QuadPart = (LONGLONG)0;
968 Length = Bcb->FileSize.u.LowPart;
969 }
970
971 if (IoStatus)
972 {
973 IoStatus->Status = STATUS_SUCCESS;
974 IoStatus->Information = 0;
975 }
976
977 while (Length > 0)
978 {
979 current = CcRosLookupCacheSegment (Bcb, Offset.u.LowPart);
980 if (current != NULL)
981 {
982 if (current->Dirty)
983 {
984 Status = CcRosFlushCacheSegment(current);
985 if (!NT_SUCCESS(Status) && IoStatus != NULL)
986 {
987 IoStatus->Status = Status;
988 }
989 }
990 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
991 ExReleasePushLock(&current->Lock);
992 CcRosCacheSegmentDecRefCount(current);
993 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
994 }
995
996 Offset.QuadPart += Bcb->CacheSegmentSize;
997 if (Length > Bcb->CacheSegmentSize)
998 {
999 Length -= Bcb->CacheSegmentSize;
1000 }
1001 else
1002 {
1003 Length = 0;
1004 }
1005 }
1006 }
1007 else
1008 {
1009 if (IoStatus)
1010 {
1011 IoStatus->Status = STATUS_INVALID_PARAMETER;
1012 }
1013 }
1014 }
1015
1016 NTSTATUS
1017 NTAPI
1018 CcRosDeleteFileCache(PFILE_OBJECT FileObject, PBCB Bcb)
1019 /*
1020 * FUNCTION: Releases the BCB associated with a file object
1021 */
1022 {
1023 PLIST_ENTRY current_entry;
1024 PCACHE_SEGMENT current;
1025 NTSTATUS Status;
1026 LIST_ENTRY FreeList;
1027 KIRQL oldIrql;
1028
1029 ASSERT(Bcb);
1030
1031 Bcb->RefCount++;
1032 KeReleaseGuardedMutex(&ViewLock);
1033
1034 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1035
1036 KeAcquireGuardedMutex(&ViewLock);
1037 Bcb->RefCount--;
1038 if (Bcb->RefCount == 0)
1039 {
1040 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1041 {
1042 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1043 Bcb->BcbRemoveListEntry.Flink = NULL;
1044 }
1045
1046 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1047
1048 /*
1049 * Release all cache segments.
1050 */
1051 InitializeListHead(&FreeList);
1052 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1053 current_entry = Bcb->BcbSegmentListHead.Flink;
1054 while (!IsListEmpty(&Bcb->BcbSegmentListHead))
1055 {
1056 current_entry = RemoveTailList(&Bcb->BcbSegmentListHead);
1057 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1058 RemoveEntryList(&current->CacheSegmentListEntry);
1059 RemoveEntryList(&current->CacheSegmentLRUListEntry);
1060 if (current->Dirty)
1061 {
1062 RemoveEntryList(&current->DirtySegmentListEntry);
1063 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
1064 DPRINT1("Freeing dirty segment\n");
1065 }
1066 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
1067 }
1068 #if DBG
1069 Bcb->Trace = FALSE;
1070 #endif
1071 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1072
1073 KeReleaseGuardedMutex(&ViewLock);
1074 ObDereferenceObject (Bcb->FileObject);
1075
1076 while (!IsListEmpty(&FreeList))
1077 {
1078 current_entry = RemoveTailList(&FreeList);
1079 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1080 Status = CcRosInternalFreeCacheSegment(current);
1081 }
1082 ExFreeToNPagedLookasideList(&BcbLookasideList, Bcb);
1083 KeAcquireGuardedMutex(&ViewLock);
1084 }
1085 return(STATUS_SUCCESS);
1086 }
1087
1088 VOID
1089 NTAPI
1090 CcRosReferenceCache(PFILE_OBJECT FileObject)
1091 {
1092 PBCB Bcb;
1093 KeAcquireGuardedMutex(&ViewLock);
1094 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1095 ASSERT(Bcb);
1096 if (Bcb->RefCount == 0)
1097 {
1098 ASSERT(Bcb->BcbRemoveListEntry.Flink != NULL);
1099 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1100 Bcb->BcbRemoveListEntry.Flink = NULL;
1101
1102 }
1103 else
1104 {
1105 ASSERT(Bcb->BcbRemoveListEntry.Flink == NULL);
1106 }
1107 Bcb->RefCount++;
1108 KeReleaseGuardedMutex(&ViewLock);
1109 }
1110
1111 VOID
1112 NTAPI
1113 CcRosSetRemoveOnClose(PSECTION_OBJECT_POINTERS SectionObjectPointer)
1114 {
1115 PBCB Bcb;
1116 DPRINT("CcRosSetRemoveOnClose()\n");
1117 KeAcquireGuardedMutex(&ViewLock);
1118 Bcb = (PBCB)SectionObjectPointer->SharedCacheMap;
1119 if (Bcb)
1120 {
1121 Bcb->RemoveOnClose = TRUE;
1122 if (Bcb->RefCount == 0)
1123 {
1124 CcRosDeleteFileCache(Bcb->FileObject, Bcb);
1125 }
1126 }
1127 KeReleaseGuardedMutex(&ViewLock);
1128 }
1129
1130
1131 VOID
1132 NTAPI
1133 CcRosDereferenceCache(PFILE_OBJECT FileObject)
1134 {
1135 PBCB Bcb;
1136 KeAcquireGuardedMutex(&ViewLock);
1137 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1138 ASSERT(Bcb);
1139 if (Bcb->RefCount > 0)
1140 {
1141 Bcb->RefCount--;
1142 if (Bcb->RefCount == 0)
1143 {
1144 MmFreeSectionSegments(Bcb->FileObject);
1145 CcRosDeleteFileCache(FileObject, Bcb);
1146 }
1147 }
1148 KeReleaseGuardedMutex(&ViewLock);
1149 }
1150
1151 NTSTATUS NTAPI
1152 CcRosReleaseFileCache(PFILE_OBJECT FileObject)
1153 /*
1154 * FUNCTION: Called by the file system when a handle to a file object
1155 * has been closed.
1156 */
1157 {
1158 PBCB Bcb;
1159
1160 KeAcquireGuardedMutex(&ViewLock);
1161
1162 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1163 {
1164 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1165 if (FileObject->PrivateCacheMap != NULL)
1166 {
1167 FileObject->PrivateCacheMap = NULL;
1168 if (Bcb->RefCount > 0)
1169 {
1170 Bcb->RefCount--;
1171 if (Bcb->RefCount == 0)
1172 {
1173 MmFreeSectionSegments(Bcb->FileObject);
1174 CcRosDeleteFileCache(FileObject, Bcb);
1175 }
1176 }
1177 }
1178 }
1179 KeReleaseGuardedMutex(&ViewLock);
1180 return(STATUS_SUCCESS);
1181 }
1182
1183 NTSTATUS
1184 NTAPI
1185 CcTryToInitializeFileCache(PFILE_OBJECT FileObject)
1186 {
1187 PBCB Bcb;
1188 NTSTATUS Status;
1189
1190 KeAcquireGuardedMutex(&ViewLock);
1191
1192 ASSERT(FileObject->SectionObjectPointer);
1193 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1194 if (Bcb == NULL)
1195 {
1196 Status = STATUS_UNSUCCESSFUL;
1197 }
1198 else
1199 {
1200 if (FileObject->PrivateCacheMap == NULL)
1201 {
1202 FileObject->PrivateCacheMap = Bcb;
1203 Bcb->RefCount++;
1204 }
1205 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1206 {
1207 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1208 Bcb->BcbRemoveListEntry.Flink = NULL;
1209 }
1210 Status = STATUS_SUCCESS;
1211 }
1212 KeReleaseGuardedMutex(&ViewLock);
1213
1214 return Status;
1215 }
1216
1217
1218 NTSTATUS NTAPI
1219 CcRosInitializeFileCache(PFILE_OBJECT FileObject,
1220 ULONG CacheSegmentSize,
1221 PCACHE_MANAGER_CALLBACKS CallBacks,
1222 PVOID LazyWriterContext)
1223 /*
1224 * FUNCTION: Initializes a BCB for a file object
1225 */
1226 {
1227 PBCB Bcb;
1228
1229 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1230 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, Bcb 0x%p, CacheSegmentSize %d)\n",
1231 FileObject, Bcb, CacheSegmentSize);
1232
1233 KeAcquireGuardedMutex(&ViewLock);
1234 if (Bcb == NULL)
1235 {
1236 Bcb = ExAllocateFromNPagedLookasideList(&BcbLookasideList);
1237 if (Bcb == NULL)
1238 {
1239 KeReleaseGuardedMutex(&ViewLock);
1240 return(STATUS_UNSUCCESSFUL);
1241 }
1242 memset(Bcb, 0, sizeof(BCB));
1243 ObReferenceObjectByPointer(FileObject,
1244 FILE_ALL_ACCESS,
1245 NULL,
1246 KernelMode);
1247 Bcb->FileObject = FileObject;
1248 Bcb->CacheSegmentSize = CacheSegmentSize;
1249 Bcb->Callbacks = CallBacks;
1250 Bcb->LazyWriteContext = LazyWriterContext;
1251 if (FileObject->FsContext)
1252 {
1253 Bcb->AllocationSize =
1254 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->AllocationSize;
1255 Bcb->FileSize =
1256 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->FileSize;
1257 }
1258 KeInitializeSpinLock(&Bcb->BcbLock);
1259 InitializeListHead(&Bcb->BcbSegmentListHead);
1260 FileObject->SectionObjectPointer->SharedCacheMap = Bcb;
1261 }
1262 if (FileObject->PrivateCacheMap == NULL)
1263 {
1264 FileObject->PrivateCacheMap = Bcb;
1265 Bcb->RefCount++;
1266 }
1267 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1268 {
1269 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1270 Bcb->BcbRemoveListEntry.Flink = NULL;
1271 }
1272 KeReleaseGuardedMutex(&ViewLock);
1273
1274 return(STATUS_SUCCESS);
1275 }
1276
1277 /*
1278 * @implemented
1279 */
1280 PFILE_OBJECT NTAPI
1281 CcGetFileObjectFromSectionPtrs(IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1282 {
1283 PBCB Bcb;
1284 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1285 {
1286 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
1287 ASSERT(Bcb);
1288 return Bcb->FileObject;
1289 }
1290 return NULL;
1291 }
1292
1293 VOID
1294 INIT_FUNCTION
1295 NTAPI
1296 CcInitView(VOID)
1297 {
1298 #ifdef CACHE_BITMAP
1299 PMEMORY_AREA marea;
1300 PVOID Buffer;
1301 PHYSICAL_ADDRESS BoundaryAddressMultiple;
1302 #endif
1303
1304 DPRINT("CcInitView()\n");
1305 #ifdef CACHE_BITMAP
1306 BoundaryAddressMultiple.QuadPart = 0;
1307 CiCacheSegMappingRegionHint = 0;
1308 CiCacheSegMappingRegionBase = NULL;
1309
1310 MmLockAddressSpace(MmGetKernelAddressSpace());
1311
1312 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
1313 MEMORY_AREA_CACHE_SEGMENT,
1314 &CiCacheSegMappingRegionBase,
1315 CI_CACHESEG_MAPPING_REGION_SIZE,
1316 PAGE_READWRITE,
1317 &marea,
1318 FALSE,
1319 0,
1320 BoundaryAddressMultiple);
1321 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1322 if (!NT_SUCCESS(Status))
1323 {
1324 KeBugCheck(CACHE_MANAGER);
1325 }
1326
1327 Buffer = ExAllocatePool(NonPagedPool, CI_CACHESEG_MAPPING_REGION_SIZE / (PAGE_SIZE * 8));
1328 if (!Buffer)
1329 {
1330 KeBugCheck(CACHE_MANAGER);
1331 }
1332
1333 RtlInitializeBitMap(&CiCacheSegMappingRegionAllocMap, Buffer, CI_CACHESEG_MAPPING_REGION_SIZE / PAGE_SIZE);
1334 RtlClearAllBits(&CiCacheSegMappingRegionAllocMap);
1335
1336 KeInitializeSpinLock(&CiCacheSegMappingRegionLock);
1337 #endif
1338 InitializeListHead(&CacheSegmentListHead);
1339 InitializeListHead(&DirtySegmentListHead);
1340 InitializeListHead(&CacheSegmentLRUListHead);
1341 InitializeListHead(&ClosedListHead);
1342 KeInitializeGuardedMutex(&ViewLock);
1343 ExInitializeNPagedLookasideList (&iBcbLookasideList,
1344 NULL,
1345 NULL,
1346 0,
1347 sizeof(INTERNAL_BCB),
1348 TAG_IBCB,
1349 20);
1350 ExInitializeNPagedLookasideList (&BcbLookasideList,
1351 NULL,
1352 NULL,
1353 0,
1354 sizeof(BCB),
1355 TAG_BCB,
1356 20);
1357 ExInitializeNPagedLookasideList (&CacheSegLookasideList,
1358 NULL,
1359 NULL,
1360 0,
1361 sizeof(CACHE_SEGMENT),
1362 TAG_CSEG,
1363 20);
1364
1365 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1366
1367 CcInitCacheZeroPage();
1368
1369 }
1370
1371 /* EOF */
1372
1373
1374
1375
1376
1377
1378
1379