[NTOS]: Remove useless variables in kernel code that were set, but never actually...
[reactos.git] / reactos / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 */
9
10 /* NOTES **********************************************************************
11 *
12 * This is not the NT implementation of a file cache nor anything much like
13 * it.
14 *
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
17 *
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
20 *
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
23 *
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
27 *
28 * (4) Copy the data into or out of the page as necessary.
29 *
30 * (5) Release the cache page
31 */
32 /* INCLUDES ******************************************************************/
33
34 #include <ntoskrnl.h>
35 #define NDEBUG
36 #include <debug.h>
37
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
40 #endif
41
42 /* GLOBALS *******************************************************************/
43
44 /*
45 * If CACHE_BITMAP is defined, the cache manager uses one large memory region
46 * within the kernel address space and allocate/deallocate space from this block
47 * over a bitmap. If CACHE_BITMAP is used, the size of the mdl mapping region
48 * must be reduced (ntoskrnl\mm\mdl.c, MI_MDLMAPPING_REGION_SIZE).
49 */
50 //#define CACHE_BITMAP
51
52 static LIST_ENTRY DirtySegmentListHead;
53 static LIST_ENTRY CacheSegmentListHead;
54 static LIST_ENTRY CacheSegmentLRUListHead;
55 static LIST_ENTRY ClosedListHead;
56 ULONG DirtyPageCount=0;
57
58 KGUARDED_MUTEX ViewLock;
59
60 #ifdef CACHE_BITMAP
61 #define CI_CACHESEG_MAPPING_REGION_SIZE (128*1024*1024)
62
63 static PVOID CiCacheSegMappingRegionBase = NULL;
64 static RTL_BITMAP CiCacheSegMappingRegionAllocMap;
65 static ULONG CiCacheSegMappingRegionHint;
66 static KSPIN_LOCK CiCacheSegMappingRegionLock;
67 #endif
68
69 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
70 static NPAGED_LOOKASIDE_LIST BcbLookasideList;
71 static NPAGED_LOOKASIDE_LIST CacheSegLookasideList;
72
73 #if DBG
74 static void CcRosCacheSegmentIncRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
75 {
76 ++cs->ReferenceCount;
77 if ( cs->Bcb->Trace )
78 {
79 DbgPrint("(%s:%i) CacheSegment %p ++RefCount=%d, Dirty %d, PageOut %d\n",
80 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
81 }
82 }
83 static void CcRosCacheSegmentDecRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
84 {
85 --cs->ReferenceCount;
86 if ( cs->Bcb->Trace )
87 {
88 DbgPrint("(%s:%i) CacheSegment %p --RefCount=%d, Dirty %d, PageOut %d\n",
89 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
90 }
91 }
92 #define CcRosCacheSegmentIncRefCount(cs) CcRosCacheSegmentIncRefCount_(cs,__FILE__,__LINE__)
93 #define CcRosCacheSegmentDecRefCount(cs) CcRosCacheSegmentDecRefCount_(cs,__FILE__,__LINE__)
94 #else
95 #define CcRosCacheSegmentIncRefCount(cs) (++((cs)->ReferenceCount))
96 #define CcRosCacheSegmentDecRefCount(cs) (--((cs)->ReferenceCount))
97 #endif
98
99 NTSTATUS
100 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg);
101
102
103 /* FUNCTIONS *****************************************************************/
104
105 VOID
106 NTAPI
107 CcRosTraceCacheMap (
108 PBCB Bcb,
109 BOOLEAN Trace )
110 {
111 #if DBG
112 KIRQL oldirql;
113 PLIST_ENTRY current_entry;
114 PCACHE_SEGMENT current;
115
116 if ( !Bcb )
117 return;
118
119 Bcb->Trace = Trace;
120
121 if ( Trace )
122 {
123 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", Bcb );
124
125 KeAcquireGuardedMutex(&ViewLock);
126 KeAcquireSpinLock(&Bcb->BcbLock, &oldirql);
127
128 current_entry = Bcb->BcbSegmentListHead.Flink;
129 while (current_entry != &Bcb->BcbSegmentListHead)
130 {
131 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
132 current_entry = current_entry->Flink;
133
134 DPRINT1(" CacheSegment 0x%p enabled, RefCount %d, Dirty %d, PageOut %d\n",
135 current, current->ReferenceCount, current->Dirty, current->PageOut );
136 }
137 KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
138 KeReleaseGuardedMutex(&ViewLock);
139 }
140 else
141 {
142 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", Bcb );
143 }
144
145 #else
146 Bcb = Bcb;
147 Trace = Trace;
148 #endif
149 }
150
151 NTSTATUS
152 NTAPI
153 CcRosFlushCacheSegment(PCACHE_SEGMENT CacheSegment)
154 {
155 NTSTATUS Status;
156 KIRQL oldIrql;
157
158 Status = WriteCacheSegment(CacheSegment);
159 if (NT_SUCCESS(Status))
160 {
161 KeAcquireGuardedMutex(&ViewLock);
162 KeAcquireSpinLock(&CacheSegment->Bcb->BcbLock, &oldIrql);
163
164 CacheSegment->Dirty = FALSE;
165 RemoveEntryList(&CacheSegment->DirtySegmentListEntry);
166 DirtyPageCount -= CacheSegment->Bcb->CacheSegmentSize / PAGE_SIZE;
167 CcRosCacheSegmentDecRefCount ( CacheSegment );
168
169 KeReleaseSpinLock(&CacheSegment->Bcb->BcbLock, oldIrql);
170 KeReleaseGuardedMutex(&ViewLock);
171 }
172
173 return(Status);
174 }
175
176 NTSTATUS
177 NTAPI
178 CcRosFlushDirtyPages(ULONG Target, PULONG Count)
179 {
180 PLIST_ENTRY current_entry;
181 PCACHE_SEGMENT current;
182 ULONG PagesPerSegment;
183 BOOLEAN Locked;
184 NTSTATUS Status;
185 static ULONG WriteCount[4] = {0, 0, 0, 0};
186 ULONG NewTarget;
187
188 DPRINT("CcRosFlushDirtyPages(Target %d)\n", Target);
189
190 (*Count) = 0;
191
192 KeAcquireGuardedMutex(&ViewLock);
193
194 WriteCount[0] = WriteCount[1];
195 WriteCount[1] = WriteCount[2];
196 WriteCount[2] = WriteCount[3];
197 WriteCount[3] = 0;
198
199 NewTarget = WriteCount[0] + WriteCount[1] + WriteCount[2];
200
201 if (NewTarget < DirtyPageCount)
202 {
203 NewTarget = (DirtyPageCount - NewTarget + 3) / 4;
204 WriteCount[0] += NewTarget;
205 WriteCount[1] += NewTarget;
206 WriteCount[2] += NewTarget;
207 WriteCount[3] += NewTarget;
208 }
209
210 NewTarget = WriteCount[0];
211
212 Target = max(NewTarget, Target);
213
214 current_entry = DirtySegmentListHead.Flink;
215 if (current_entry == &DirtySegmentListHead)
216 {
217 DPRINT("No Dirty pages\n");
218 }
219
220 while (current_entry != &DirtySegmentListHead && Target > 0)
221 {
222 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
223 DirtySegmentListEntry);
224 current_entry = current_entry->Flink;
225
226 Locked = current->Bcb->Callbacks->AcquireForLazyWrite(
227 current->Bcb->LazyWriteContext, FALSE);
228 if (!Locked)
229 {
230 continue;
231 }
232
233 Locked = ExTryToAcquirePushLockExclusive(&current->Lock);
234 if (!Locked)
235 {
236 current->Bcb->Callbacks->ReleaseFromLazyWrite(
237 current->Bcb->LazyWriteContext);
238
239 continue;
240 }
241
242 ASSERT(current->Dirty);
243 if (current->ReferenceCount > 1)
244 {
245 ExReleasePushLock(&current->Lock);
246 current->Bcb->Callbacks->ReleaseFromLazyWrite(
247 current->Bcb->LazyWriteContext);
248 continue;
249 }
250
251 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
252
253 KeReleaseGuardedMutex(&ViewLock);
254
255 Status = CcRosFlushCacheSegment(current);
256
257 ExReleasePushLock(&current->Lock);
258 current->Bcb->Callbacks->ReleaseFromLazyWrite(
259 current->Bcb->LazyWriteContext);
260
261 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
262 {
263 DPRINT1("CC: Failed to flush cache segment.\n");
264 }
265 else
266 {
267 (*Count) += PagesPerSegment;
268 Target -= PagesPerSegment;
269 }
270
271 KeAcquireGuardedMutex(&ViewLock);
272 current_entry = DirtySegmentListHead.Flink;
273 }
274
275 if (*Count < NewTarget)
276 {
277 WriteCount[1] += (NewTarget - *Count);
278 }
279
280 KeReleaseGuardedMutex(&ViewLock);
281
282 DPRINT("CcRosFlushDirtyPages() finished\n");
283 return(STATUS_SUCCESS);
284 }
285
286 NTSTATUS
287 CcRosTrimCache(ULONG Target, ULONG Priority, PULONG NrFreed)
288 /*
289 * FUNCTION: Try to free some memory from the file cache.
290 * ARGUMENTS:
291 * Target - The number of pages to be freed.
292 * Priority - The priority of free (currently unused).
293 * NrFreed - Points to a variable where the number of pages
294 * actually freed is returned.
295 */
296 {
297 PLIST_ENTRY current_entry;
298 PCACHE_SEGMENT current;
299 ULONG PagesPerSegment;
300 ULONG PagesFreed;
301 KIRQL oldIrql;
302 LIST_ENTRY FreeList;
303
304 DPRINT("CcRosTrimCache(Target %d)\n", Target);
305
306 *NrFreed = 0;
307
308 InitializeListHead(&FreeList);
309
310 KeAcquireGuardedMutex(&ViewLock);
311 current_entry = CacheSegmentLRUListHead.Flink;
312 while (current_entry != &CacheSegmentLRUListHead && Target > 0)
313 {
314 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
315 CacheSegmentLRUListEntry);
316 current_entry = current_entry->Flink;
317
318 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
319
320 if (current->MappedCount > 0 && !current->Dirty && !current->PageOut)
321 {
322 ULONG i;
323
324 CcRosCacheSegmentIncRefCount(current);
325 current->PageOut = TRUE;
326 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
327 KeReleaseGuardedMutex(&ViewLock);
328 for (i = 0; i < current->Bcb->CacheSegmentSize / PAGE_SIZE; i++)
329 {
330 PFN_NUMBER Page;
331 Page = (PFN_NUMBER)(MmGetPhysicalAddress((char*)current->BaseAddress + i * PAGE_SIZE).QuadPart >> PAGE_SHIFT);
332 MmPageOutPhysicalAddress(Page);
333 }
334 KeAcquireGuardedMutex(&ViewLock);
335 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
336 CcRosCacheSegmentDecRefCount(current);
337 }
338
339 if (current->ReferenceCount == 0)
340 {
341 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
342 // PagesFreed = PagesPerSegment;
343 PagesFreed = min(PagesPerSegment, Target);
344 Target -= PagesFreed;
345 (*NrFreed) += PagesFreed;
346 }
347
348 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
349 }
350
351 current_entry = CacheSegmentLRUListHead.Flink;
352 while (current_entry != &CacheSegmentLRUListHead)
353 {
354 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
355 CacheSegmentLRUListEntry);
356 current->PageOut = FALSE;
357 current_entry = current_entry->Flink;
358
359 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
360 if (current->ReferenceCount == 0)
361 {
362 RemoveEntryList(&current->BcbSegmentListEntry);
363 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
364 RemoveEntryList(&current->CacheSegmentListEntry);
365 RemoveEntryList(&current->CacheSegmentLRUListEntry);
366 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
367 }
368 else
369 {
370 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
371 }
372 }
373
374 KeReleaseGuardedMutex(&ViewLock);
375
376 while (!IsListEmpty(&FreeList))
377 {
378 current_entry = RemoveHeadList(&FreeList);
379 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
380 BcbSegmentListEntry);
381 CcRosInternalFreeCacheSegment(current);
382 }
383
384 return(STATUS_SUCCESS);
385 }
386
387 NTSTATUS
388 NTAPI
389 CcRosReleaseCacheSegment(PBCB Bcb,
390 PCACHE_SEGMENT CacheSeg,
391 BOOLEAN Valid,
392 BOOLEAN Dirty,
393 BOOLEAN Mapped)
394 {
395 BOOLEAN WasDirty = CacheSeg->Dirty;
396 KIRQL oldIrql;
397
398 ASSERT(Bcb);
399
400 DPRINT("CcReleaseCacheSegment(Bcb 0x%p, CacheSeg 0x%p, Valid %d)\n",
401 Bcb, CacheSeg, Valid);
402
403 CacheSeg->Valid = Valid;
404 CacheSeg->Dirty = CacheSeg->Dirty || Dirty;
405
406 KeAcquireGuardedMutex(&ViewLock);
407 if (!WasDirty && CacheSeg->Dirty)
408 {
409 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
410 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
411 }
412 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
413 InsertTailList(&CacheSegmentLRUListHead, &CacheSeg->CacheSegmentLRUListEntry);
414
415 if (Mapped)
416 {
417 CacheSeg->MappedCount++;
418 }
419 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
420 CcRosCacheSegmentDecRefCount(CacheSeg);
421 if (Mapped && CacheSeg->MappedCount == 1)
422 {
423 CcRosCacheSegmentIncRefCount(CacheSeg);
424 }
425 if (!WasDirty && CacheSeg->Dirty)
426 {
427 CcRosCacheSegmentIncRefCount(CacheSeg);
428 }
429 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
430 KeReleaseGuardedMutex(&ViewLock);
431 ExReleasePushLock(&CacheSeg->Lock);
432
433 return(STATUS_SUCCESS);
434 }
435
436 /* Returns with Cache Segment Lock Held! */
437 PCACHE_SEGMENT
438 NTAPI
439 CcRosLookupCacheSegment(PBCB Bcb, ULONG FileOffset)
440 {
441 PLIST_ENTRY current_entry;
442 PCACHE_SEGMENT current;
443 KIRQL oldIrql;
444
445 ASSERT(Bcb);
446
447 DPRINT("CcRosLookupCacheSegment(Bcb -x%p, FileOffset %d)\n", Bcb, FileOffset);
448
449 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
450 current_entry = Bcb->BcbSegmentListHead.Flink;
451 while (current_entry != &Bcb->BcbSegmentListHead)
452 {
453 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
454 BcbSegmentListEntry);
455 if (current->FileOffset <= FileOffset &&
456 (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
457 {
458 CcRosCacheSegmentIncRefCount(current);
459 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
460 ExAcquirePushLockExclusive(&current->Lock);
461 return(current);
462 }
463 current_entry = current_entry->Flink;
464 }
465 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
466 return(NULL);
467 }
468
469 NTSTATUS
470 NTAPI
471 CcRosMarkDirtyCacheSegment(PBCB Bcb, ULONG FileOffset)
472 {
473 PCACHE_SEGMENT CacheSeg;
474 KIRQL oldIrql;
475
476 ASSERT(Bcb);
477
478 DPRINT("CcRosMarkDirtyCacheSegment(Bcb 0x%p, FileOffset %d)\n", Bcb, FileOffset);
479
480 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
481 if (CacheSeg == NULL)
482 {
483 KeBugCheck(CACHE_MANAGER);
484 }
485 if (!CacheSeg->Dirty)
486 {
487 KeAcquireGuardedMutex(&ViewLock);
488 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
489 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
490 KeReleaseGuardedMutex(&ViewLock);
491 }
492 else
493 {
494 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
495 CcRosCacheSegmentDecRefCount(CacheSeg);
496 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
497 }
498
499
500 CacheSeg->Dirty = TRUE;
501 ExReleasePushLock(&CacheSeg->Lock);
502
503 return(STATUS_SUCCESS);
504 }
505
506 NTSTATUS
507 NTAPI
508 CcRosUnmapCacheSegment(PBCB Bcb, ULONG FileOffset, BOOLEAN NowDirty)
509 {
510 PCACHE_SEGMENT CacheSeg;
511 BOOLEAN WasDirty;
512 KIRQL oldIrql;
513
514 ASSERT(Bcb);
515
516 DPRINT("CcRosUnmapCacheSegment(Bcb 0x%p, FileOffset %d, NowDirty %d)\n",
517 Bcb, FileOffset, NowDirty);
518
519 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
520 if (CacheSeg == NULL)
521 {
522 return(STATUS_UNSUCCESSFUL);
523 }
524
525 WasDirty = CacheSeg->Dirty;
526 CacheSeg->Dirty = CacheSeg->Dirty || NowDirty;
527
528 CacheSeg->MappedCount--;
529
530 if (!WasDirty && NowDirty)
531 {
532 KeAcquireGuardedMutex(&ViewLock);
533 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
534 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
535 KeReleaseGuardedMutex(&ViewLock);
536 }
537
538 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
539 CcRosCacheSegmentDecRefCount(CacheSeg);
540 if (!WasDirty && NowDirty)
541 {
542 CcRosCacheSegmentIncRefCount(CacheSeg);
543 }
544 if (CacheSeg->MappedCount == 0)
545 {
546 CcRosCacheSegmentDecRefCount(CacheSeg);
547 }
548 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
549
550 ExReleasePushLock(&CacheSeg->Lock);
551 return(STATUS_SUCCESS);
552 }
553
554 static
555 NTSTATUS
556 CcRosCreateCacheSegment(PBCB Bcb,
557 ULONG FileOffset,
558 PCACHE_SEGMENT* CacheSeg)
559 {
560 PCACHE_SEGMENT current;
561 PCACHE_SEGMENT previous;
562 PLIST_ENTRY current_entry;
563 NTSTATUS Status;
564 KIRQL oldIrql;
565 #ifdef CACHE_BITMAP
566 ULONG StartingOffset;
567 #endif
568 PHYSICAL_ADDRESS BoundaryAddressMultiple;
569
570 ASSERT(Bcb);
571
572 DPRINT("CcRosCreateCacheSegment()\n");
573
574 BoundaryAddressMultiple.QuadPart = 0;
575 if (FileOffset >= Bcb->FileSize.u.LowPart)
576 {
577 CacheSeg = NULL;
578 return STATUS_INVALID_PARAMETER;
579 }
580
581 current = ExAllocateFromNPagedLookasideList(&CacheSegLookasideList);
582 current->Valid = FALSE;
583 current->Dirty = FALSE;
584 current->PageOut = FALSE;
585 current->FileOffset = ROUND_DOWN(FileOffset, Bcb->CacheSegmentSize);
586 current->Bcb = Bcb;
587 #if DBG
588 if ( Bcb->Trace )
589 {
590 DPRINT1("CacheMap 0x%p: new Cache Segment: 0x%p\n", Bcb, current );
591 }
592 #endif
593 current->MappedCount = 0;
594 current->DirtySegmentListEntry.Flink = NULL;
595 current->DirtySegmentListEntry.Blink = NULL;
596 current->ReferenceCount = 1;
597 ExInitializePushLock((PULONG_PTR)&current->Lock);
598 ExAcquirePushLockExclusive(&current->Lock);
599 KeAcquireGuardedMutex(&ViewLock);
600
601 *CacheSeg = current;
602 /* There is window between the call to CcRosLookupCacheSegment
603 * and CcRosCreateCacheSegment. We must check if a segment on
604 * the fileoffset exist. If there exist a segment, we release
605 * our new created segment and return the existing one.
606 */
607 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
608 current_entry = Bcb->BcbSegmentListHead.Flink;
609 previous = NULL;
610 while (current_entry != &Bcb->BcbSegmentListHead)
611 {
612 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
613 BcbSegmentListEntry);
614 if (current->FileOffset <= FileOffset &&
615 (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
616 {
617 CcRosCacheSegmentIncRefCount(current);
618 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
619 #if DBG
620 if ( Bcb->Trace )
621 {
622 DPRINT1("CacheMap 0x%p: deleting newly created Cache Segment 0x%p ( found existing one 0x%p )\n",
623 Bcb,
624 (*CacheSeg),
625 current );
626 }
627 #endif
628 ExReleasePushLock(&(*CacheSeg)->Lock);
629 KeReleaseGuardedMutex(&ViewLock);
630 ExFreeToNPagedLookasideList(&CacheSegLookasideList, *CacheSeg);
631 *CacheSeg = current;
632 ExAcquirePushLockExclusive(&current->Lock);
633 return STATUS_SUCCESS;
634 }
635 if (current->FileOffset < FileOffset)
636 {
637 if (previous == NULL)
638 {
639 previous = current;
640 }
641 else
642 {
643 if (previous->FileOffset < current->FileOffset)
644 {
645 previous = current;
646 }
647 }
648 }
649 current_entry = current_entry->Flink;
650 }
651 /* There was no existing segment. */
652 current = *CacheSeg;
653 if (previous)
654 {
655 InsertHeadList(&previous->BcbSegmentListEntry, &current->BcbSegmentListEntry);
656 }
657 else
658 {
659 InsertHeadList(&Bcb->BcbSegmentListHead, &current->BcbSegmentListEntry);
660 }
661 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
662 InsertTailList(&CacheSegmentListHead, &current->CacheSegmentListEntry);
663 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
664 KeReleaseGuardedMutex(&ViewLock);
665 #ifdef CACHE_BITMAP
666 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
667
668 StartingOffset = RtlFindClearBitsAndSet(&CiCacheSegMappingRegionAllocMap, Bcb->CacheSegmentSize / PAGE_SIZE, CiCacheSegMappingRegionHint);
669
670 if (StartingOffset == 0xffffffff)
671 {
672 DPRINT1("Out of CacheSeg mapping space\n");
673 KeBugCheck(CACHE_MANAGER);
674 }
675
676 current->BaseAddress = CiCacheSegMappingRegionBase + StartingOffset * PAGE_SIZE;
677
678 if (CiCacheSegMappingRegionHint == StartingOffset)
679 {
680 CiCacheSegMappingRegionHint += Bcb->CacheSegmentSize / PAGE_SIZE;
681 }
682
683 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
684 #else
685 MmLockAddressSpace(MmGetKernelAddressSpace());
686 current->BaseAddress = NULL;
687 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
688 MEMORY_AREA_CACHE_SEGMENT,
689 &current->BaseAddress,
690 Bcb->CacheSegmentSize,
691 PAGE_READWRITE,
692 (PMEMORY_AREA*)&current->MemoryArea,
693 FALSE,
694 0,
695 BoundaryAddressMultiple);
696 MmUnlockAddressSpace(MmGetKernelAddressSpace());
697 if (!NT_SUCCESS(Status))
698 {
699 KeBugCheck(CACHE_MANAGER);
700 }
701 #endif
702
703 /* Create a virtual mapping for this memory area */
704 MmMapMemoryArea(current->BaseAddress, Bcb->CacheSegmentSize,
705 MC_CACHE, PAGE_READWRITE);
706
707 return(STATUS_SUCCESS);
708 }
709
710 NTSTATUS
711 NTAPI
712 CcRosGetCacheSegmentChain(PBCB Bcb,
713 ULONG FileOffset,
714 ULONG Length,
715 PCACHE_SEGMENT* CacheSeg)
716 {
717 PCACHE_SEGMENT current;
718 ULONG i;
719 PCACHE_SEGMENT* CacheSegList;
720 PCACHE_SEGMENT Previous = NULL;
721
722 ASSERT(Bcb);
723
724 DPRINT("CcRosGetCacheSegmentChain()\n");
725
726 Length = ROUND_UP(Length, Bcb->CacheSegmentSize);
727
728 CacheSegList = _alloca(sizeof(PCACHE_SEGMENT) *
729 (Length / Bcb->CacheSegmentSize));
730
731 /*
732 * Look for a cache segment already mapping the same data.
733 */
734 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
735 {
736 ULONG CurrentOffset = FileOffset + (i * Bcb->CacheSegmentSize);
737 current = CcRosLookupCacheSegment(Bcb, CurrentOffset);
738 if (current != NULL)
739 {
740 CacheSegList[i] = current;
741 }
742 else
743 {
744 CcRosCreateCacheSegment(Bcb, CurrentOffset, &current);
745 CacheSegList[i] = current;
746 }
747 }
748
749 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
750 {
751 if (i == 0)
752 {
753 *CacheSeg = CacheSegList[i];
754 Previous = CacheSegList[i];
755 }
756 else
757 {
758 Previous->NextInChain = CacheSegList[i];
759 Previous = CacheSegList[i];
760 }
761 }
762 ASSERT(Previous);
763 Previous->NextInChain = NULL;
764
765 return(STATUS_SUCCESS);
766 }
767
768 NTSTATUS
769 NTAPI
770 CcRosGetCacheSegment(PBCB Bcb,
771 ULONG FileOffset,
772 PULONG BaseOffset,
773 PVOID* BaseAddress,
774 PBOOLEAN UptoDate,
775 PCACHE_SEGMENT* CacheSeg)
776 {
777 PCACHE_SEGMENT current;
778 NTSTATUS Status;
779
780 ASSERT(Bcb);
781
782 DPRINT("CcRosGetCacheSegment()\n");
783
784 /*
785 * Look for a cache segment already mapping the same data.
786 */
787 current = CcRosLookupCacheSegment(Bcb, FileOffset);
788 if (current == NULL)
789 {
790 /*
791 * Otherwise create a new segment.
792 */
793 Status = CcRosCreateCacheSegment(Bcb, FileOffset, &current);
794 if (!NT_SUCCESS(Status))
795 {
796 return Status;
797 }
798 }
799 /*
800 * Return information about the segment to the caller.
801 */
802 *UptoDate = current->Valid;
803 *BaseAddress = current->BaseAddress;
804 DPRINT("*BaseAddress 0x%.8X\n", *BaseAddress);
805 *CacheSeg = current;
806 *BaseOffset = current->FileOffset;
807 return(STATUS_SUCCESS);
808 }
809
810 NTSTATUS NTAPI
811 CcRosRequestCacheSegment(PBCB Bcb,
812 ULONG FileOffset,
813 PVOID* BaseAddress,
814 PBOOLEAN UptoDate,
815 PCACHE_SEGMENT* CacheSeg)
816 /*
817 * FUNCTION: Request a page mapping for a BCB
818 */
819 {
820 ULONG BaseOffset;
821
822 ASSERT(Bcb);
823
824 if ((FileOffset % Bcb->CacheSegmentSize) != 0)
825 {
826 DPRINT1("Bad fileoffset %x should be multiple of %x",
827 FileOffset, Bcb->CacheSegmentSize);
828 KeBugCheck(CACHE_MANAGER);
829 }
830
831 return(CcRosGetCacheSegment(Bcb,
832 FileOffset,
833 &BaseOffset,
834 BaseAddress,
835 UptoDate,
836 CacheSeg));
837 }
838 #ifdef CACHE_BITMAP
839 #else
840 static VOID
841 CcFreeCachePage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address,
842 PFN_NUMBER Page, SWAPENTRY SwapEntry, BOOLEAN Dirty)
843 {
844 ASSERT(SwapEntry == 0);
845 if (Page != 0)
846 {
847 MmReleasePageMemoryConsumer(MC_CACHE, Page);
848 }
849 }
850 #endif
851 NTSTATUS
852 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg)
853 /*
854 * FUNCTION: Releases a cache segment associated with a BCB
855 */
856 {
857 #ifdef CACHE_BITMAP
858 ULONG i;
859 ULONG RegionSize;
860 ULONG Base;
861 PFN_NUMBER Page;
862 KIRQL oldIrql;
863 #endif
864 DPRINT("Freeing cache segment 0x%p\n", CacheSeg);
865 #if DBG
866 if ( CacheSeg->Bcb->Trace )
867 {
868 DPRINT1("CacheMap 0x%p: deleting Cache Segment: 0x%p\n", CacheSeg->Bcb, CacheSeg );
869 }
870 #endif
871 #ifdef CACHE_BITMAP
872 RegionSize = CacheSeg->Bcb->CacheSegmentSize / PAGE_SIZE;
873
874 /* Unmap all the pages. */
875 for (i = 0; i < RegionSize; i++)
876 {
877 MmDeleteVirtualMapping(NULL,
878 CacheSeg->BaseAddress + (i * PAGE_SIZE),
879 FALSE,
880 NULL,
881 &Page);
882 MmReleasePageMemoryConsumer(MC_CACHE, Page);
883 }
884
885 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
886 /* Deallocate all the pages used. */
887 Base = (ULONG)(CacheSeg->BaseAddress - CiCacheSegMappingRegionBase) / PAGE_SIZE;
888
889 RtlClearBits(&CiCacheSegMappingRegionAllocMap, Base, RegionSize);
890
891 CiCacheSegMappingRegionHint = min (CiCacheSegMappingRegionHint, Base);
892
893 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
894 #else
895 MmLockAddressSpace(MmGetKernelAddressSpace());
896 MmFreeMemoryArea(MmGetKernelAddressSpace(),
897 CacheSeg->MemoryArea,
898 CcFreeCachePage,
899 NULL);
900 MmUnlockAddressSpace(MmGetKernelAddressSpace());
901 #endif
902 ExFreeToNPagedLookasideList(&CacheSegLookasideList, CacheSeg);
903 return(STATUS_SUCCESS);
904 }
905
906 NTSTATUS
907 NTAPI
908 CcRosFreeCacheSegment(PBCB Bcb, PCACHE_SEGMENT CacheSeg)
909 {
910 NTSTATUS Status;
911 KIRQL oldIrql;
912
913 ASSERT(Bcb);
914
915 DPRINT("CcRosFreeCacheSegment(Bcb 0x%p, CacheSeg 0x%p)\n",
916 Bcb, CacheSeg);
917
918 KeAcquireGuardedMutex(&ViewLock);
919 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
920 RemoveEntryList(&CacheSeg->BcbSegmentListEntry);
921 RemoveEntryList(&CacheSeg->CacheSegmentListEntry);
922 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
923 if (CacheSeg->Dirty)
924 {
925 RemoveEntryList(&CacheSeg->DirtySegmentListEntry);
926 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
927
928 }
929 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
930 KeReleaseGuardedMutex(&ViewLock);
931
932 Status = CcRosInternalFreeCacheSegment(CacheSeg);
933 return(Status);
934 }
935
936 /*
937 * @implemented
938 */
939 VOID NTAPI
940 CcFlushCache(IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
941 IN PLARGE_INTEGER FileOffset OPTIONAL,
942 IN ULONG Length,
943 OUT PIO_STATUS_BLOCK IoStatus)
944 {
945 PBCB Bcb;
946 LARGE_INTEGER Offset;
947 PCACHE_SEGMENT current;
948 NTSTATUS Status;
949 KIRQL oldIrql;
950
951 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %d, IoStatus 0x%p)\n",
952 SectionObjectPointers, FileOffset, Length, IoStatus);
953
954 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
955 {
956 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
957 ASSERT(Bcb);
958 if (FileOffset)
959 {
960 Offset = *FileOffset;
961 }
962 else
963 {
964 Offset.QuadPart = (LONGLONG)0;
965 Length = Bcb->FileSize.u.LowPart;
966 }
967
968 if (IoStatus)
969 {
970 IoStatus->Status = STATUS_SUCCESS;
971 IoStatus->Information = 0;
972 }
973
974 while (Length > 0)
975 {
976 current = CcRosLookupCacheSegment (Bcb, Offset.u.LowPart);
977 if (current != NULL)
978 {
979 if (current->Dirty)
980 {
981 Status = CcRosFlushCacheSegment(current);
982 if (!NT_SUCCESS(Status) && IoStatus != NULL)
983 {
984 IoStatus->Status = Status;
985 }
986 }
987 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
988 ExReleasePushLock(&current->Lock);
989 CcRosCacheSegmentDecRefCount(current);
990 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
991 }
992
993 Offset.QuadPart += Bcb->CacheSegmentSize;
994 if (Length > Bcb->CacheSegmentSize)
995 {
996 Length -= Bcb->CacheSegmentSize;
997 }
998 else
999 {
1000 Length = 0;
1001 }
1002 }
1003 }
1004 else
1005 {
1006 if (IoStatus)
1007 {
1008 IoStatus->Status = STATUS_INVALID_PARAMETER;
1009 }
1010 }
1011 }
1012
1013 NTSTATUS
1014 NTAPI
1015 CcRosDeleteFileCache(PFILE_OBJECT FileObject, PBCB Bcb)
1016 /*
1017 * FUNCTION: Releases the BCB associated with a file object
1018 */
1019 {
1020 PLIST_ENTRY current_entry;
1021 PCACHE_SEGMENT current;
1022 LIST_ENTRY FreeList;
1023 KIRQL oldIrql;
1024
1025 ASSERT(Bcb);
1026
1027 Bcb->RefCount++;
1028 KeReleaseGuardedMutex(&ViewLock);
1029
1030 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1031
1032 KeAcquireGuardedMutex(&ViewLock);
1033 Bcb->RefCount--;
1034 if (Bcb->RefCount == 0)
1035 {
1036 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1037 {
1038 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1039 Bcb->BcbRemoveListEntry.Flink = NULL;
1040 }
1041
1042 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1043
1044 /*
1045 * Release all cache segments.
1046 */
1047 InitializeListHead(&FreeList);
1048 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1049 current_entry = Bcb->BcbSegmentListHead.Flink;
1050 while (!IsListEmpty(&Bcb->BcbSegmentListHead))
1051 {
1052 current_entry = RemoveTailList(&Bcb->BcbSegmentListHead);
1053 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1054 RemoveEntryList(&current->CacheSegmentListEntry);
1055 RemoveEntryList(&current->CacheSegmentLRUListEntry);
1056 if (current->Dirty)
1057 {
1058 RemoveEntryList(&current->DirtySegmentListEntry);
1059 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
1060 DPRINT1("Freeing dirty segment\n");
1061 }
1062 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
1063 }
1064 #if DBG
1065 Bcb->Trace = FALSE;
1066 #endif
1067 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1068
1069 KeReleaseGuardedMutex(&ViewLock);
1070 ObDereferenceObject (Bcb->FileObject);
1071
1072 while (!IsListEmpty(&FreeList))
1073 {
1074 current_entry = RemoveTailList(&FreeList);
1075 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1076 CcRosInternalFreeCacheSegment(current);
1077 }
1078 ExFreeToNPagedLookasideList(&BcbLookasideList, Bcb);
1079 KeAcquireGuardedMutex(&ViewLock);
1080 }
1081 return(STATUS_SUCCESS);
1082 }
1083
1084 VOID
1085 NTAPI
1086 CcRosReferenceCache(PFILE_OBJECT FileObject)
1087 {
1088 PBCB Bcb;
1089 KeAcquireGuardedMutex(&ViewLock);
1090 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1091 ASSERT(Bcb);
1092 if (Bcb->RefCount == 0)
1093 {
1094 ASSERT(Bcb->BcbRemoveListEntry.Flink != NULL);
1095 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1096 Bcb->BcbRemoveListEntry.Flink = NULL;
1097
1098 }
1099 else
1100 {
1101 ASSERT(Bcb->BcbRemoveListEntry.Flink == NULL);
1102 }
1103 Bcb->RefCount++;
1104 KeReleaseGuardedMutex(&ViewLock);
1105 }
1106
1107 VOID
1108 NTAPI
1109 CcRosSetRemoveOnClose(PSECTION_OBJECT_POINTERS SectionObjectPointer)
1110 {
1111 PBCB Bcb;
1112 DPRINT("CcRosSetRemoveOnClose()\n");
1113 KeAcquireGuardedMutex(&ViewLock);
1114 Bcb = (PBCB)SectionObjectPointer->SharedCacheMap;
1115 if (Bcb)
1116 {
1117 Bcb->RemoveOnClose = TRUE;
1118 if (Bcb->RefCount == 0)
1119 {
1120 CcRosDeleteFileCache(Bcb->FileObject, Bcb);
1121 }
1122 }
1123 KeReleaseGuardedMutex(&ViewLock);
1124 }
1125
1126
1127 VOID
1128 NTAPI
1129 CcRosDereferenceCache(PFILE_OBJECT FileObject)
1130 {
1131 PBCB Bcb;
1132 KeAcquireGuardedMutex(&ViewLock);
1133 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1134 ASSERT(Bcb);
1135 if (Bcb->RefCount > 0)
1136 {
1137 Bcb->RefCount--;
1138 if (Bcb->RefCount == 0)
1139 {
1140 MmFreeSectionSegments(Bcb->FileObject);
1141 CcRosDeleteFileCache(FileObject, Bcb);
1142 }
1143 }
1144 KeReleaseGuardedMutex(&ViewLock);
1145 }
1146
1147 NTSTATUS NTAPI
1148 CcRosReleaseFileCache(PFILE_OBJECT FileObject)
1149 /*
1150 * FUNCTION: Called by the file system when a handle to a file object
1151 * has been closed.
1152 */
1153 {
1154 PBCB Bcb;
1155
1156 KeAcquireGuardedMutex(&ViewLock);
1157
1158 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1159 {
1160 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1161 if (FileObject->PrivateCacheMap != NULL)
1162 {
1163 FileObject->PrivateCacheMap = NULL;
1164 if (Bcb->RefCount > 0)
1165 {
1166 Bcb->RefCount--;
1167 if (Bcb->RefCount == 0)
1168 {
1169 MmFreeSectionSegments(Bcb->FileObject);
1170 CcRosDeleteFileCache(FileObject, Bcb);
1171 }
1172 }
1173 }
1174 }
1175 KeReleaseGuardedMutex(&ViewLock);
1176 return(STATUS_SUCCESS);
1177 }
1178
1179 NTSTATUS
1180 NTAPI
1181 CcTryToInitializeFileCache(PFILE_OBJECT FileObject)
1182 {
1183 PBCB Bcb;
1184 NTSTATUS Status;
1185
1186 KeAcquireGuardedMutex(&ViewLock);
1187
1188 ASSERT(FileObject->SectionObjectPointer);
1189 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1190 if (Bcb == NULL)
1191 {
1192 Status = STATUS_UNSUCCESSFUL;
1193 }
1194 else
1195 {
1196 if (FileObject->PrivateCacheMap == NULL)
1197 {
1198 FileObject->PrivateCacheMap = Bcb;
1199 Bcb->RefCount++;
1200 }
1201 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1202 {
1203 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1204 Bcb->BcbRemoveListEntry.Flink = NULL;
1205 }
1206 Status = STATUS_SUCCESS;
1207 }
1208 KeReleaseGuardedMutex(&ViewLock);
1209
1210 return Status;
1211 }
1212
1213
1214 NTSTATUS NTAPI
1215 CcRosInitializeFileCache(PFILE_OBJECT FileObject,
1216 ULONG CacheSegmentSize,
1217 PCACHE_MANAGER_CALLBACKS CallBacks,
1218 PVOID LazyWriterContext)
1219 /*
1220 * FUNCTION: Initializes a BCB for a file object
1221 */
1222 {
1223 PBCB Bcb;
1224
1225 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1226 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, Bcb 0x%p, CacheSegmentSize %d)\n",
1227 FileObject, Bcb, CacheSegmentSize);
1228
1229 KeAcquireGuardedMutex(&ViewLock);
1230 if (Bcb == NULL)
1231 {
1232 Bcb = ExAllocateFromNPagedLookasideList(&BcbLookasideList);
1233 if (Bcb == NULL)
1234 {
1235 KeReleaseGuardedMutex(&ViewLock);
1236 return(STATUS_UNSUCCESSFUL);
1237 }
1238 memset(Bcb, 0, sizeof(BCB));
1239 ObReferenceObjectByPointer(FileObject,
1240 FILE_ALL_ACCESS,
1241 NULL,
1242 KernelMode);
1243 Bcb->FileObject = FileObject;
1244 Bcb->CacheSegmentSize = CacheSegmentSize;
1245 Bcb->Callbacks = CallBacks;
1246 Bcb->LazyWriteContext = LazyWriterContext;
1247 if (FileObject->FsContext)
1248 {
1249 Bcb->AllocationSize =
1250 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->AllocationSize;
1251 Bcb->FileSize =
1252 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->FileSize;
1253 }
1254 KeInitializeSpinLock(&Bcb->BcbLock);
1255 InitializeListHead(&Bcb->BcbSegmentListHead);
1256 FileObject->SectionObjectPointer->SharedCacheMap = Bcb;
1257 }
1258 if (FileObject->PrivateCacheMap == NULL)
1259 {
1260 FileObject->PrivateCacheMap = Bcb;
1261 Bcb->RefCount++;
1262 }
1263 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1264 {
1265 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1266 Bcb->BcbRemoveListEntry.Flink = NULL;
1267 }
1268 KeReleaseGuardedMutex(&ViewLock);
1269
1270 return(STATUS_SUCCESS);
1271 }
1272
1273 /*
1274 * @implemented
1275 */
1276 PFILE_OBJECT NTAPI
1277 CcGetFileObjectFromSectionPtrs(IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1278 {
1279 PBCB Bcb;
1280 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1281 {
1282 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
1283 ASSERT(Bcb);
1284 return Bcb->FileObject;
1285 }
1286 return NULL;
1287 }
1288
1289 VOID
1290 INIT_FUNCTION
1291 NTAPI
1292 CcInitView(VOID)
1293 {
1294 #ifdef CACHE_BITMAP
1295 PMEMORY_AREA marea;
1296 PVOID Buffer;
1297 PHYSICAL_ADDRESS BoundaryAddressMultiple;
1298 #endif
1299
1300 DPRINT("CcInitView()\n");
1301 #ifdef CACHE_BITMAP
1302 BoundaryAddressMultiple.QuadPart = 0;
1303 CiCacheSegMappingRegionHint = 0;
1304 CiCacheSegMappingRegionBase = NULL;
1305
1306 MmLockAddressSpace(MmGetKernelAddressSpace());
1307
1308 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
1309 MEMORY_AREA_CACHE_SEGMENT,
1310 &CiCacheSegMappingRegionBase,
1311 CI_CACHESEG_MAPPING_REGION_SIZE,
1312 PAGE_READWRITE,
1313 &marea,
1314 FALSE,
1315 0,
1316 BoundaryAddressMultiple);
1317 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1318 if (!NT_SUCCESS(Status))
1319 {
1320 KeBugCheck(CACHE_MANAGER);
1321 }
1322
1323 Buffer = ExAllocatePool(NonPagedPool, CI_CACHESEG_MAPPING_REGION_SIZE / (PAGE_SIZE * 8));
1324 if (!Buffer)
1325 {
1326 KeBugCheck(CACHE_MANAGER);
1327 }
1328
1329 RtlInitializeBitMap(&CiCacheSegMappingRegionAllocMap, Buffer, CI_CACHESEG_MAPPING_REGION_SIZE / PAGE_SIZE);
1330 RtlClearAllBits(&CiCacheSegMappingRegionAllocMap);
1331
1332 KeInitializeSpinLock(&CiCacheSegMappingRegionLock);
1333 #endif
1334 InitializeListHead(&CacheSegmentListHead);
1335 InitializeListHead(&DirtySegmentListHead);
1336 InitializeListHead(&CacheSegmentLRUListHead);
1337 InitializeListHead(&ClosedListHead);
1338 KeInitializeGuardedMutex(&ViewLock);
1339 ExInitializeNPagedLookasideList (&iBcbLookasideList,
1340 NULL,
1341 NULL,
1342 0,
1343 sizeof(INTERNAL_BCB),
1344 TAG_IBCB,
1345 20);
1346 ExInitializeNPagedLookasideList (&BcbLookasideList,
1347 NULL,
1348 NULL,
1349 0,
1350 sizeof(BCB),
1351 TAG_BCB,
1352 20);
1353 ExInitializeNPagedLookasideList (&CacheSegLookasideList,
1354 NULL,
1355 NULL,
1356 0,
1357 sizeof(CACHE_SEGMENT),
1358 TAG_CSEG,
1359 20);
1360
1361 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1362
1363 CcInitCacheZeroPage();
1364
1365 }
1366
1367 /* EOF */
1368
1369
1370
1371
1372
1373
1374
1375