b8ce087c1b912de5f03ac2a45c8542e99efedf42
[reactos.git] / reactos / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 */
9
10 /* NOTES **********************************************************************
11 *
12 * This is not the NT implementation of a file cache nor anything much like
13 * it.
14 *
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
17 *
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
20 *
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
23 *
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
27 *
28 * (4) Copy the data into or out of the page as necessary.
29 *
30 * (5) Release the cache page
31 */
32 /* INCLUDES ******************************************************************/
33
34 #include <ntoskrnl.h>
35 #define NDEBUG
36 #include <debug.h>
37
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
40 #endif
41
42 /* GLOBALS *******************************************************************/
43
44 /*
45 * If CACHE_BITMAP is defined, the cache manager uses one large memory region
46 * within the kernel address space and allocate/deallocate space from this block
47 * over a bitmap. If CACHE_BITMAP is used, the size of the mdl mapping region
48 * must be reduced (ntoskrnl\mm\mdl.c, MI_MDLMAPPING_REGION_SIZE).
49 */
50 //#define CACHE_BITMAP
51
52 static LIST_ENTRY DirtySegmentListHead;
53 static LIST_ENTRY CacheSegmentListHead;
54 static LIST_ENTRY CacheSegmentLRUListHead;
55 static LIST_ENTRY ClosedListHead;
56 ULONG DirtyPageCount=0;
57
58 KGUARDED_MUTEX ViewLock;
59
60 #ifdef CACHE_BITMAP
61 #define CI_CACHESEG_MAPPING_REGION_SIZE (128*1024*1024)
62
63 static PVOID CiCacheSegMappingRegionBase = NULL;
64 static RTL_BITMAP CiCacheSegMappingRegionAllocMap;
65 static ULONG CiCacheSegMappingRegionHint;
66 static KSPIN_LOCK CiCacheSegMappingRegionLock;
67 #endif
68
69 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
70 static NPAGED_LOOKASIDE_LIST BcbLookasideList;
71 static NPAGED_LOOKASIDE_LIST CacheSegLookasideList;
72
73 #if DBG
74 static void CcRosCacheSegmentIncRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
75 {
76 ++cs->ReferenceCount;
77 if ( cs->Bcb->Trace )
78 {
79 DbgPrint("(%s:%i) CacheSegment %p ++RefCount=%d, Dirty %d, PageOut %d\n",
80 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
81 }
82 }
83 static void CcRosCacheSegmentDecRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
84 {
85 --cs->ReferenceCount;
86 if ( cs->Bcb->Trace )
87 {
88 DbgPrint("(%s:%i) CacheSegment %p --RefCount=%d, Dirty %d, PageOut %d\n",
89 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
90 }
91 }
92 #define CcRosCacheSegmentIncRefCount(cs) CcRosCacheSegmentIncRefCount_(cs,__FILE__,__LINE__)
93 #define CcRosCacheSegmentDecRefCount(cs) CcRosCacheSegmentDecRefCount_(cs,__FILE__,__LINE__)
94 #else
95 #define CcRosCacheSegmentIncRefCount(cs) (++((cs)->ReferenceCount))
96 #define CcRosCacheSegmentDecRefCount(cs) (--((cs)->ReferenceCount))
97 #endif
98
99 NTSTATUS
100 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg);
101
102
103 /* FUNCTIONS *****************************************************************/
104
105 VOID
106 NTAPI
107 CcRosTraceCacheMap (
108 PBCB Bcb,
109 BOOLEAN Trace )
110 {
111 #if DBG
112 KIRQL oldirql;
113 PLIST_ENTRY current_entry;
114 PCACHE_SEGMENT current;
115
116 if ( !Bcb )
117 return;
118
119 Bcb->Trace = Trace;
120
121 if ( Trace )
122 {
123 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", Bcb );
124
125 KeAcquireGuardedMutex(&ViewLock);
126 KeAcquireSpinLock(&Bcb->BcbLock, &oldirql);
127
128 current_entry = Bcb->BcbSegmentListHead.Flink;
129 while (current_entry != &Bcb->BcbSegmentListHead)
130 {
131 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
132 current_entry = current_entry->Flink;
133
134 DPRINT1(" CacheSegment 0x%p enabled, RefCount %d, Dirty %d, PageOut %d\n",
135 current, current->ReferenceCount, current->Dirty, current->PageOut );
136 }
137 KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
138 KeReleaseGuardedMutex(&ViewLock);
139 }
140 else
141 {
142 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", Bcb );
143 }
144
145 #else
146 Bcb = Bcb;
147 Trace = Trace;
148 #endif
149 }
150
151 NTSTATUS
152 NTAPI
153 CcRosFlushCacheSegment(PCACHE_SEGMENT CacheSegment)
154 {
155 NTSTATUS Status;
156 KIRQL oldIrql;
157
158 Status = WriteCacheSegment(CacheSegment);
159 if (NT_SUCCESS(Status))
160 {
161 KeAcquireGuardedMutex(&ViewLock);
162 KeAcquireSpinLock(&CacheSegment->Bcb->BcbLock, &oldIrql);
163
164 CacheSegment->Dirty = FALSE;
165 RemoveEntryList(&CacheSegment->DirtySegmentListEntry);
166 DirtyPageCount -= CacheSegment->Bcb->CacheSegmentSize / PAGE_SIZE;
167 CcRosCacheSegmentDecRefCount ( CacheSegment );
168
169 KeReleaseSpinLock(&CacheSegment->Bcb->BcbLock, oldIrql);
170 KeReleaseGuardedMutex(&ViewLock);
171 }
172
173 return(Status);
174 }
175
176 NTSTATUS
177 NTAPI
178 CcRosFlushDirtyPages(ULONG Target, PULONG Count)
179 {
180 PLIST_ENTRY current_entry;
181 PCACHE_SEGMENT current;
182 ULONG PagesPerSegment;
183 BOOLEAN Locked;
184 NTSTATUS Status;
185 static ULONG WriteCount[4] = {0, 0, 0, 0};
186 ULONG NewTarget;
187
188 DPRINT("CcRosFlushDirtyPages(Target %d)\n", Target);
189
190 (*Count) = 0;
191
192 KeEnterCriticalRegion();
193 KeAcquireGuardedMutex(&ViewLock);
194
195 WriteCount[0] = WriteCount[1];
196 WriteCount[1] = WriteCount[2];
197 WriteCount[2] = WriteCount[3];
198 WriteCount[3] = 0;
199
200 NewTarget = WriteCount[0] + WriteCount[1] + WriteCount[2];
201
202 if (NewTarget < DirtyPageCount)
203 {
204 NewTarget = (DirtyPageCount - NewTarget + 3) / 4;
205 WriteCount[0] += NewTarget;
206 WriteCount[1] += NewTarget;
207 WriteCount[2] += NewTarget;
208 WriteCount[3] += NewTarget;
209 }
210
211 NewTarget = WriteCount[0];
212
213 Target = max(NewTarget, Target);
214
215 current_entry = DirtySegmentListHead.Flink;
216 if (current_entry == &DirtySegmentListHead)
217 {
218 DPRINT("No Dirty pages\n");
219 }
220
221 while (current_entry != &DirtySegmentListHead && Target > 0)
222 {
223 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
224 DirtySegmentListEntry);
225 current_entry = current_entry->Flink;
226
227 Locked = current->Bcb->Callbacks->AcquireForLazyWrite(
228 current->Bcb->LazyWriteContext, FALSE);
229 if (!Locked)
230 {
231 continue;
232 }
233
234 Locked = ExTryToAcquirePushLockExclusive(&current->Lock);
235 if (!Locked)
236 {
237 current->Bcb->Callbacks->ReleaseFromLazyWrite(
238 current->Bcb->LazyWriteContext);
239
240 continue;
241 }
242
243 ASSERT(current->Dirty);
244 if (current->ReferenceCount > 1)
245 {
246 ExReleasePushLock(&current->Lock);
247 current->Bcb->Callbacks->ReleaseFromLazyWrite(
248 current->Bcb->LazyWriteContext);
249 continue;
250 }
251
252 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
253
254 KeReleaseGuardedMutex(&ViewLock);
255
256 Status = CcRosFlushCacheSegment(current);
257
258 ExReleasePushLock(&current->Lock);
259 current->Bcb->Callbacks->ReleaseFromLazyWrite(
260 current->Bcb->LazyWriteContext);
261
262 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
263 {
264 DPRINT1("CC: Failed to flush cache segment.\n");
265 }
266 else
267 {
268 (*Count) += PagesPerSegment;
269 Target -= PagesPerSegment;
270 }
271
272 KeAcquireGuardedMutex(&ViewLock);
273 current_entry = DirtySegmentListHead.Flink;
274 }
275
276 if (*Count < NewTarget)
277 {
278 WriteCount[1] += (NewTarget - *Count);
279 }
280
281 KeReleaseGuardedMutex(&ViewLock);
282 KeLeaveCriticalRegion();
283
284 DPRINT("CcRosFlushDirtyPages() finished\n");
285 return(STATUS_SUCCESS);
286 }
287
288 NTSTATUS
289 CcRosTrimCache(ULONG Target, ULONG Priority, PULONG NrFreed)
290 /*
291 * FUNCTION: Try to free some memory from the file cache.
292 * ARGUMENTS:
293 * Target - The number of pages to be freed.
294 * Priority - The priority of free (currently unused).
295 * NrFreed - Points to a variable where the number of pages
296 * actually freed is returned.
297 */
298 {
299 PLIST_ENTRY current_entry;
300 PCACHE_SEGMENT current;
301 ULONG PagesPerSegment;
302 ULONG PagesFreed;
303 KIRQL oldIrql;
304 LIST_ENTRY FreeList;
305 PFN_NUMBER Page;
306 ULONG i;
307
308 DPRINT("CcRosTrimCache(Target %d)\n", Target);
309
310 InitializeListHead(&FreeList);
311
312 /* Flush dirty pages to disk */
313 CcRosFlushDirtyPages(Target, NrFreed);
314
315 if ((*NrFreed) != 0) DPRINT1("Flushed %d dirty cache pages to disk\n", (*NrFreed));
316
317 *NrFreed = 0;
318
319 KeAcquireGuardedMutex(&ViewLock);
320
321 current_entry = CacheSegmentLRUListHead.Flink;
322 while (current_entry != &CacheSegmentLRUListHead)
323 {
324 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
325 CacheSegmentLRUListEntry);
326 current_entry = current_entry->Flink;
327
328 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
329
330 /* Reference the cache segment */
331 CcRosCacheSegmentIncRefCount(current);
332
333 /* Check if it's mapped and not dirty */
334 if (current->MappedCount > 0 && !current->Dirty)
335 {
336 /* We have to break these locks because Cc sucks */
337 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
338 KeReleaseGuardedMutex(&ViewLock);
339
340 /* Page out the segment */
341 for (i = 0; i < current->Bcb->CacheSegmentSize / PAGE_SIZE; i++)
342 {
343 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
344
345 MmPageOutPhysicalAddress(Page);
346 }
347
348 /* Reacquire the locks */
349 KeAcquireGuardedMutex(&ViewLock);
350 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
351 }
352
353 /* Dereference the cache segment */
354 CcRosCacheSegmentDecRefCount(current);
355
356 /* Check if we can free this entry now */
357 if (current->ReferenceCount == 0)
358 {
359 ASSERT(!current->Dirty);
360 ASSERT(!current->MappedCount);
361
362 RemoveEntryList(&current->BcbSegmentListEntry);
363 RemoveEntryList(&current->CacheSegmentListEntry);
364 RemoveEntryList(&current->CacheSegmentLRUListEntry);
365 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
366
367 /* Calculate how many pages we freed for Mm */
368 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
369 PagesFreed = min(PagesPerSegment, Target);
370 Target -= PagesFreed;
371 (*NrFreed) += PagesFreed;
372 }
373
374 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
375 }
376
377 KeReleaseGuardedMutex(&ViewLock);
378
379 while (!IsListEmpty(&FreeList))
380 {
381 current_entry = RemoveHeadList(&FreeList);
382 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
383 BcbSegmentListEntry);
384 CcRosInternalFreeCacheSegment(current);
385 }
386
387 DPRINT1("Evicted %d cache pages\n", (*NrFreed));
388
389 return(STATUS_SUCCESS);
390 }
391
392 NTSTATUS
393 NTAPI
394 CcRosReleaseCacheSegment(PBCB Bcb,
395 PCACHE_SEGMENT CacheSeg,
396 BOOLEAN Valid,
397 BOOLEAN Dirty,
398 BOOLEAN Mapped)
399 {
400 BOOLEAN WasDirty = CacheSeg->Dirty;
401 KIRQL oldIrql;
402
403 ASSERT(Bcb);
404
405 DPRINT("CcReleaseCacheSegment(Bcb 0x%p, CacheSeg 0x%p, Valid %d)\n",
406 Bcb, CacheSeg, Valid);
407
408 CacheSeg->Valid = Valid;
409 CacheSeg->Dirty = CacheSeg->Dirty || Dirty;
410
411 KeAcquireGuardedMutex(&ViewLock);
412 if (!WasDirty && CacheSeg->Dirty)
413 {
414 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
415 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
416 }
417 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
418 InsertTailList(&CacheSegmentLRUListHead, &CacheSeg->CacheSegmentLRUListEntry);
419
420 if (Mapped)
421 {
422 CacheSeg->MappedCount++;
423 }
424 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
425 CcRosCacheSegmentDecRefCount(CacheSeg);
426 if (Mapped && CacheSeg->MappedCount == 1)
427 {
428 CcRosCacheSegmentIncRefCount(CacheSeg);
429 }
430 if (!WasDirty && CacheSeg->Dirty)
431 {
432 CcRosCacheSegmentIncRefCount(CacheSeg);
433 }
434 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
435 KeReleaseGuardedMutex(&ViewLock);
436 ExReleasePushLock(&CacheSeg->Lock);
437
438 return(STATUS_SUCCESS);
439 }
440
441 /* Returns with Cache Segment Lock Held! */
442 PCACHE_SEGMENT
443 NTAPI
444 CcRosLookupCacheSegment(PBCB Bcb, ULONG FileOffset)
445 {
446 PLIST_ENTRY current_entry;
447 PCACHE_SEGMENT current;
448 KIRQL oldIrql;
449
450 ASSERT(Bcb);
451
452 DPRINT("CcRosLookupCacheSegment(Bcb -x%p, FileOffset %d)\n", Bcb, FileOffset);
453
454 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
455 current_entry = Bcb->BcbSegmentListHead.Flink;
456 while (current_entry != &Bcb->BcbSegmentListHead)
457 {
458 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
459 BcbSegmentListEntry);
460 if (current->FileOffset <= FileOffset &&
461 (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
462 {
463 CcRosCacheSegmentIncRefCount(current);
464 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
465 ExAcquirePushLockExclusive(&current->Lock);
466 return(current);
467 }
468 current_entry = current_entry->Flink;
469 }
470 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
471 return(NULL);
472 }
473
474 NTSTATUS
475 NTAPI
476 CcRosMarkDirtyCacheSegment(PBCB Bcb, ULONG FileOffset)
477 {
478 PCACHE_SEGMENT CacheSeg;
479 KIRQL oldIrql;
480
481 ASSERT(Bcb);
482
483 DPRINT("CcRosMarkDirtyCacheSegment(Bcb 0x%p, FileOffset %d)\n", Bcb, FileOffset);
484
485 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
486 if (CacheSeg == NULL)
487 {
488 KeBugCheck(CACHE_MANAGER);
489 }
490 if (!CacheSeg->Dirty)
491 {
492 KeAcquireGuardedMutex(&ViewLock);
493 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
494 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
495 KeReleaseGuardedMutex(&ViewLock);
496 }
497 else
498 {
499 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
500 CcRosCacheSegmentDecRefCount(CacheSeg);
501 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
502 }
503
504
505 CacheSeg->Dirty = TRUE;
506 ExReleasePushLock(&CacheSeg->Lock);
507
508 return(STATUS_SUCCESS);
509 }
510
511 NTSTATUS
512 NTAPI
513 CcRosUnmapCacheSegment(PBCB Bcb, ULONG FileOffset, BOOLEAN NowDirty)
514 {
515 PCACHE_SEGMENT CacheSeg;
516 BOOLEAN WasDirty;
517 KIRQL oldIrql;
518
519 ASSERT(Bcb);
520
521 DPRINT("CcRosUnmapCacheSegment(Bcb 0x%p, FileOffset %d, NowDirty %d)\n",
522 Bcb, FileOffset, NowDirty);
523
524 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
525 if (CacheSeg == NULL)
526 {
527 return(STATUS_UNSUCCESSFUL);
528 }
529
530 WasDirty = CacheSeg->Dirty;
531 CacheSeg->Dirty = CacheSeg->Dirty || NowDirty;
532
533 CacheSeg->MappedCount--;
534
535 if (!WasDirty && NowDirty)
536 {
537 KeAcquireGuardedMutex(&ViewLock);
538 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
539 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
540 KeReleaseGuardedMutex(&ViewLock);
541 }
542
543 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
544 CcRosCacheSegmentDecRefCount(CacheSeg);
545 if (!WasDirty && NowDirty)
546 {
547 CcRosCacheSegmentIncRefCount(CacheSeg);
548 }
549 if (CacheSeg->MappedCount == 0)
550 {
551 CcRosCacheSegmentDecRefCount(CacheSeg);
552 }
553 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
554
555 ExReleasePushLock(&CacheSeg->Lock);
556 return(STATUS_SUCCESS);
557 }
558
559 static
560 NTSTATUS
561 CcRosCreateCacheSegment(PBCB Bcb,
562 ULONG FileOffset,
563 PCACHE_SEGMENT* CacheSeg)
564 {
565 PCACHE_SEGMENT current;
566 PCACHE_SEGMENT previous;
567 PLIST_ENTRY current_entry;
568 NTSTATUS Status;
569 KIRQL oldIrql;
570 #ifdef CACHE_BITMAP
571 ULONG StartingOffset;
572 #endif
573 PHYSICAL_ADDRESS BoundaryAddressMultiple;
574
575 ASSERT(Bcb);
576
577 DPRINT("CcRosCreateCacheSegment()\n");
578
579 BoundaryAddressMultiple.QuadPart = 0;
580 if (FileOffset >= Bcb->FileSize.u.LowPart)
581 {
582 CacheSeg = NULL;
583 return STATUS_INVALID_PARAMETER;
584 }
585
586 current = ExAllocateFromNPagedLookasideList(&CacheSegLookasideList);
587 current->Valid = FALSE;
588 current->Dirty = FALSE;
589 current->PageOut = FALSE;
590 current->FileOffset = ROUND_DOWN(FileOffset, Bcb->CacheSegmentSize);
591 current->Bcb = Bcb;
592 #if DBG
593 if ( Bcb->Trace )
594 {
595 DPRINT1("CacheMap 0x%p: new Cache Segment: 0x%p\n", Bcb, current );
596 }
597 #endif
598 current->MappedCount = 0;
599 current->DirtySegmentListEntry.Flink = NULL;
600 current->DirtySegmentListEntry.Blink = NULL;
601 current->ReferenceCount = 1;
602 ExInitializePushLock(&current->Lock);
603 ExAcquirePushLockExclusive(&current->Lock);
604 KeAcquireGuardedMutex(&ViewLock);
605
606 *CacheSeg = current;
607 /* There is window between the call to CcRosLookupCacheSegment
608 * and CcRosCreateCacheSegment. We must check if a segment on
609 * the fileoffset exist. If there exist a segment, we release
610 * our new created segment and return the existing one.
611 */
612 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
613 current_entry = Bcb->BcbSegmentListHead.Flink;
614 previous = NULL;
615 while (current_entry != &Bcb->BcbSegmentListHead)
616 {
617 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
618 BcbSegmentListEntry);
619 if (current->FileOffset <= FileOffset &&
620 (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
621 {
622 CcRosCacheSegmentIncRefCount(current);
623 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
624 #if DBG
625 if ( Bcb->Trace )
626 {
627 DPRINT1("CacheMap 0x%p: deleting newly created Cache Segment 0x%p ( found existing one 0x%p )\n",
628 Bcb,
629 (*CacheSeg),
630 current );
631 }
632 #endif
633 ExReleasePushLock(&(*CacheSeg)->Lock);
634 KeReleaseGuardedMutex(&ViewLock);
635 ExFreeToNPagedLookasideList(&CacheSegLookasideList, *CacheSeg);
636 *CacheSeg = current;
637 ExAcquirePushLockExclusive(&current->Lock);
638 return STATUS_SUCCESS;
639 }
640 if (current->FileOffset < FileOffset)
641 {
642 if (previous == NULL)
643 {
644 previous = current;
645 }
646 else
647 {
648 if (previous->FileOffset < current->FileOffset)
649 {
650 previous = current;
651 }
652 }
653 }
654 current_entry = current_entry->Flink;
655 }
656 /* There was no existing segment. */
657 current = *CacheSeg;
658 if (previous)
659 {
660 InsertHeadList(&previous->BcbSegmentListEntry, &current->BcbSegmentListEntry);
661 }
662 else
663 {
664 InsertHeadList(&Bcb->BcbSegmentListHead, &current->BcbSegmentListEntry);
665 }
666 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
667 InsertTailList(&CacheSegmentListHead, &current->CacheSegmentListEntry);
668 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
669 KeReleaseGuardedMutex(&ViewLock);
670 #ifdef CACHE_BITMAP
671 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
672
673 StartingOffset = RtlFindClearBitsAndSet(&CiCacheSegMappingRegionAllocMap, Bcb->CacheSegmentSize / PAGE_SIZE, CiCacheSegMappingRegionHint);
674
675 if (StartingOffset == 0xffffffff)
676 {
677 DPRINT1("Out of CacheSeg mapping space\n");
678 KeBugCheck(CACHE_MANAGER);
679 }
680
681 current->BaseAddress = CiCacheSegMappingRegionBase + StartingOffset * PAGE_SIZE;
682
683 if (CiCacheSegMappingRegionHint == StartingOffset)
684 {
685 CiCacheSegMappingRegionHint += Bcb->CacheSegmentSize / PAGE_SIZE;
686 }
687
688 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
689 #else
690 MmLockAddressSpace(MmGetKernelAddressSpace());
691 current->BaseAddress = NULL;
692 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
693 0, // nothing checks for cache_segment mareas, so set to 0
694 &current->BaseAddress,
695 Bcb->CacheSegmentSize,
696 PAGE_READWRITE,
697 (PMEMORY_AREA*)&current->MemoryArea,
698 FALSE,
699 0,
700 BoundaryAddressMultiple);
701 MmUnlockAddressSpace(MmGetKernelAddressSpace());
702 if (!NT_SUCCESS(Status))
703 {
704 KeBugCheck(CACHE_MANAGER);
705 }
706 #endif
707
708 /* Create a virtual mapping for this memory area */
709 MI_SET_USAGE(MI_USAGE_CACHE);
710 #if MI_TRACE_PFNS
711 PWCHAR pos = NULL;
712 ULONG len = 0;
713 if ((Bcb->FileObject) && (Bcb->FileObject->FileName.Buffer))
714 {
715 pos = wcsrchr(Bcb->FileObject->FileName.Buffer, '\\');
716 len = wcslen(pos) * sizeof(WCHAR);
717 if (pos) snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
718 }
719 #endif
720
721 MmMapMemoryArea(current->BaseAddress, Bcb->CacheSegmentSize,
722 MC_CACHE, PAGE_READWRITE);
723
724 return(STATUS_SUCCESS);
725 }
726
727 NTSTATUS
728 NTAPI
729 CcRosGetCacheSegmentChain(PBCB Bcb,
730 ULONG FileOffset,
731 ULONG Length,
732 PCACHE_SEGMENT* CacheSeg)
733 {
734 PCACHE_SEGMENT current;
735 ULONG i;
736 PCACHE_SEGMENT* CacheSegList;
737 PCACHE_SEGMENT Previous = NULL;
738
739 ASSERT(Bcb);
740
741 DPRINT("CcRosGetCacheSegmentChain()\n");
742
743 Length = ROUND_UP(Length, Bcb->CacheSegmentSize);
744
745 CacheSegList = _alloca(sizeof(PCACHE_SEGMENT) *
746 (Length / Bcb->CacheSegmentSize));
747
748 /*
749 * Look for a cache segment already mapping the same data.
750 */
751 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
752 {
753 ULONG CurrentOffset = FileOffset + (i * Bcb->CacheSegmentSize);
754 current = CcRosLookupCacheSegment(Bcb, CurrentOffset);
755 if (current != NULL)
756 {
757 CacheSegList[i] = current;
758 }
759 else
760 {
761 CcRosCreateCacheSegment(Bcb, CurrentOffset, &current);
762 CacheSegList[i] = current;
763 }
764 }
765
766 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
767 {
768 if (i == 0)
769 {
770 *CacheSeg = CacheSegList[i];
771 Previous = CacheSegList[i];
772 }
773 else
774 {
775 Previous->NextInChain = CacheSegList[i];
776 Previous = CacheSegList[i];
777 }
778 }
779 ASSERT(Previous);
780 Previous->NextInChain = NULL;
781
782 return(STATUS_SUCCESS);
783 }
784
785 NTSTATUS
786 NTAPI
787 CcRosGetCacheSegment(PBCB Bcb,
788 ULONG FileOffset,
789 PULONG BaseOffset,
790 PVOID* BaseAddress,
791 PBOOLEAN UptoDate,
792 PCACHE_SEGMENT* CacheSeg)
793 {
794 PCACHE_SEGMENT current;
795 NTSTATUS Status;
796
797 ASSERT(Bcb);
798
799 DPRINT("CcRosGetCacheSegment()\n");
800
801 /*
802 * Look for a cache segment already mapping the same data.
803 */
804 current = CcRosLookupCacheSegment(Bcb, FileOffset);
805 if (current == NULL)
806 {
807 /*
808 * Otherwise create a new segment.
809 */
810 Status = CcRosCreateCacheSegment(Bcb, FileOffset, &current);
811 if (!NT_SUCCESS(Status))
812 {
813 return Status;
814 }
815 }
816 /*
817 * Return information about the segment to the caller.
818 */
819 *UptoDate = current->Valid;
820 *BaseAddress = current->BaseAddress;
821 DPRINT("*BaseAddress 0x%.8X\n", *BaseAddress);
822 *CacheSeg = current;
823 *BaseOffset = current->FileOffset;
824 return(STATUS_SUCCESS);
825 }
826
827 NTSTATUS NTAPI
828 CcRosRequestCacheSegment(PBCB Bcb,
829 ULONG FileOffset,
830 PVOID* BaseAddress,
831 PBOOLEAN UptoDate,
832 PCACHE_SEGMENT* CacheSeg)
833 /*
834 * FUNCTION: Request a page mapping for a BCB
835 */
836 {
837 ULONG BaseOffset;
838
839 ASSERT(Bcb);
840
841 if ((FileOffset % Bcb->CacheSegmentSize) != 0)
842 {
843 DPRINT1("Bad fileoffset %x should be multiple of %x",
844 FileOffset, Bcb->CacheSegmentSize);
845 KeBugCheck(CACHE_MANAGER);
846 }
847
848 return(CcRosGetCacheSegment(Bcb,
849 FileOffset,
850 &BaseOffset,
851 BaseAddress,
852 UptoDate,
853 CacheSeg));
854 }
855 #ifdef CACHE_BITMAP
856 #else
857 static VOID
858 CcFreeCachePage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address,
859 PFN_NUMBER Page, SWAPENTRY SwapEntry, BOOLEAN Dirty)
860 {
861 ASSERT(SwapEntry == 0);
862 if (Page != 0)
863 {
864 MmReleasePageMemoryConsumer(MC_CACHE, Page);
865 }
866 }
867 #endif
868 NTSTATUS
869 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg)
870 /*
871 * FUNCTION: Releases a cache segment associated with a BCB
872 */
873 {
874 #ifdef CACHE_BITMAP
875 ULONG i;
876 ULONG RegionSize;
877 ULONG Base;
878 PFN_NUMBER Page;
879 KIRQL oldIrql;
880 #endif
881 DPRINT("Freeing cache segment 0x%p\n", CacheSeg);
882 #if DBG
883 if ( CacheSeg->Bcb->Trace )
884 {
885 DPRINT1("CacheMap 0x%p: deleting Cache Segment: 0x%p\n", CacheSeg->Bcb, CacheSeg );
886 }
887 #endif
888 #ifdef CACHE_BITMAP
889 RegionSize = CacheSeg->Bcb->CacheSegmentSize / PAGE_SIZE;
890
891 /* Unmap all the pages. */
892 for (i = 0; i < RegionSize; i++)
893 {
894 MmDeleteVirtualMapping(NULL,
895 CacheSeg->BaseAddress + (i * PAGE_SIZE),
896 FALSE,
897 NULL,
898 &Page);
899 MmReleasePageMemoryConsumer(MC_CACHE, Page);
900 }
901
902 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
903 /* Deallocate all the pages used. */
904 Base = (ULONG)(CacheSeg->BaseAddress - CiCacheSegMappingRegionBase) / PAGE_SIZE;
905
906 RtlClearBits(&CiCacheSegMappingRegionAllocMap, Base, RegionSize);
907
908 CiCacheSegMappingRegionHint = min (CiCacheSegMappingRegionHint, Base);
909
910 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
911 #else
912 MmLockAddressSpace(MmGetKernelAddressSpace());
913 MmFreeMemoryArea(MmGetKernelAddressSpace(),
914 CacheSeg->MemoryArea,
915 CcFreeCachePage,
916 NULL);
917 MmUnlockAddressSpace(MmGetKernelAddressSpace());
918 #endif
919 ExFreeToNPagedLookasideList(&CacheSegLookasideList, CacheSeg);
920 return(STATUS_SUCCESS);
921 }
922
923 NTSTATUS
924 NTAPI
925 CcRosFreeCacheSegment(PBCB Bcb, PCACHE_SEGMENT CacheSeg)
926 {
927 NTSTATUS Status;
928 KIRQL oldIrql;
929
930 ASSERT(Bcb);
931
932 DPRINT("CcRosFreeCacheSegment(Bcb 0x%p, CacheSeg 0x%p)\n",
933 Bcb, CacheSeg);
934
935 KeAcquireGuardedMutex(&ViewLock);
936 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
937 RemoveEntryList(&CacheSeg->BcbSegmentListEntry);
938 RemoveEntryList(&CacheSeg->CacheSegmentListEntry);
939 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
940 if (CacheSeg->Dirty)
941 {
942 RemoveEntryList(&CacheSeg->DirtySegmentListEntry);
943 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
944
945 }
946 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
947 KeReleaseGuardedMutex(&ViewLock);
948
949 Status = CcRosInternalFreeCacheSegment(CacheSeg);
950 return(Status);
951 }
952
953 /*
954 * @implemented
955 */
956 VOID NTAPI
957 CcFlushCache(IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
958 IN PLARGE_INTEGER FileOffset OPTIONAL,
959 IN ULONG Length,
960 OUT PIO_STATUS_BLOCK IoStatus)
961 {
962 PBCB Bcb;
963 LARGE_INTEGER Offset;
964 PCACHE_SEGMENT current;
965 NTSTATUS Status;
966 KIRQL oldIrql;
967
968 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %d, IoStatus 0x%p)\n",
969 SectionObjectPointers, FileOffset, Length, IoStatus);
970
971 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
972 {
973 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
974 ASSERT(Bcb);
975 if (FileOffset)
976 {
977 Offset = *FileOffset;
978 }
979 else
980 {
981 Offset.QuadPart = (LONGLONG)0;
982 Length = Bcb->FileSize.u.LowPart;
983 }
984
985 if (IoStatus)
986 {
987 IoStatus->Status = STATUS_SUCCESS;
988 IoStatus->Information = 0;
989 }
990
991 while (Length > 0)
992 {
993 current = CcRosLookupCacheSegment (Bcb, Offset.u.LowPart);
994 if (current != NULL)
995 {
996 if (current->Dirty)
997 {
998 Status = CcRosFlushCacheSegment(current);
999 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1000 {
1001 IoStatus->Status = Status;
1002 }
1003 }
1004 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1005 ExReleasePushLock(&current->Lock);
1006 CcRosCacheSegmentDecRefCount(current);
1007 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1008 }
1009
1010 Offset.QuadPart += Bcb->CacheSegmentSize;
1011 if (Length > Bcb->CacheSegmentSize)
1012 {
1013 Length -= Bcb->CacheSegmentSize;
1014 }
1015 else
1016 {
1017 Length = 0;
1018 }
1019 }
1020 }
1021 else
1022 {
1023 if (IoStatus)
1024 {
1025 IoStatus->Status = STATUS_INVALID_PARAMETER;
1026 }
1027 }
1028 }
1029
1030 NTSTATUS
1031 NTAPI
1032 CcRosDeleteFileCache(PFILE_OBJECT FileObject, PBCB Bcb)
1033 /*
1034 * FUNCTION: Releases the BCB associated with a file object
1035 */
1036 {
1037 PLIST_ENTRY current_entry;
1038 PCACHE_SEGMENT current;
1039 LIST_ENTRY FreeList;
1040 KIRQL oldIrql;
1041
1042 ASSERT(Bcb);
1043
1044 Bcb->RefCount++;
1045 KeReleaseGuardedMutex(&ViewLock);
1046
1047 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1048
1049 KeAcquireGuardedMutex(&ViewLock);
1050 Bcb->RefCount--;
1051 if (Bcb->RefCount == 0)
1052 {
1053 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1054 {
1055 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1056 Bcb->BcbRemoveListEntry.Flink = NULL;
1057 }
1058
1059 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1060
1061 /*
1062 * Release all cache segments.
1063 */
1064 InitializeListHead(&FreeList);
1065 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1066 current_entry = Bcb->BcbSegmentListHead.Flink;
1067 while (!IsListEmpty(&Bcb->BcbSegmentListHead))
1068 {
1069 current_entry = RemoveTailList(&Bcb->BcbSegmentListHead);
1070 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1071 RemoveEntryList(&current->CacheSegmentListEntry);
1072 RemoveEntryList(&current->CacheSegmentLRUListEntry);
1073 if (current->Dirty)
1074 {
1075 RemoveEntryList(&current->DirtySegmentListEntry);
1076 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
1077 DPRINT1("Freeing dirty segment\n");
1078 }
1079 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
1080 }
1081 #if DBG
1082 Bcb->Trace = FALSE;
1083 #endif
1084 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1085
1086 KeReleaseGuardedMutex(&ViewLock);
1087 ObDereferenceObject (Bcb->FileObject);
1088
1089 while (!IsListEmpty(&FreeList))
1090 {
1091 current_entry = RemoveTailList(&FreeList);
1092 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1093 CcRosInternalFreeCacheSegment(current);
1094 }
1095 ExFreeToNPagedLookasideList(&BcbLookasideList, Bcb);
1096 KeAcquireGuardedMutex(&ViewLock);
1097 }
1098 return(STATUS_SUCCESS);
1099 }
1100
1101 VOID
1102 NTAPI
1103 CcRosReferenceCache(PFILE_OBJECT FileObject)
1104 {
1105 PBCB Bcb;
1106 KeAcquireGuardedMutex(&ViewLock);
1107 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1108 ASSERT(Bcb);
1109 if (Bcb->RefCount == 0)
1110 {
1111 ASSERT(Bcb->BcbRemoveListEntry.Flink != NULL);
1112 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1113 Bcb->BcbRemoveListEntry.Flink = NULL;
1114
1115 }
1116 else
1117 {
1118 ASSERT(Bcb->BcbRemoveListEntry.Flink == NULL);
1119 }
1120 Bcb->RefCount++;
1121 KeReleaseGuardedMutex(&ViewLock);
1122 }
1123
1124 VOID
1125 NTAPI
1126 CcRosSetRemoveOnClose(PSECTION_OBJECT_POINTERS SectionObjectPointer)
1127 {
1128 PBCB Bcb;
1129 DPRINT("CcRosSetRemoveOnClose()\n");
1130 KeAcquireGuardedMutex(&ViewLock);
1131 Bcb = (PBCB)SectionObjectPointer->SharedCacheMap;
1132 if (Bcb)
1133 {
1134 Bcb->RemoveOnClose = TRUE;
1135 if (Bcb->RefCount == 0)
1136 {
1137 CcRosDeleteFileCache(Bcb->FileObject, Bcb);
1138 }
1139 }
1140 KeReleaseGuardedMutex(&ViewLock);
1141 }
1142
1143
1144 VOID
1145 NTAPI
1146 CcRosDereferenceCache(PFILE_OBJECT FileObject)
1147 {
1148 PBCB Bcb;
1149 KeAcquireGuardedMutex(&ViewLock);
1150 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1151 ASSERT(Bcb);
1152 if (Bcb->RefCount > 0)
1153 {
1154 Bcb->RefCount--;
1155 if (Bcb->RefCount == 0)
1156 {
1157 MmFreeSectionSegments(Bcb->FileObject);
1158 CcRosDeleteFileCache(FileObject, Bcb);
1159 }
1160 }
1161 KeReleaseGuardedMutex(&ViewLock);
1162 }
1163
1164 NTSTATUS NTAPI
1165 CcRosReleaseFileCache(PFILE_OBJECT FileObject)
1166 /*
1167 * FUNCTION: Called by the file system when a handle to a file object
1168 * has been closed.
1169 */
1170 {
1171 PBCB Bcb;
1172
1173 KeAcquireGuardedMutex(&ViewLock);
1174
1175 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1176 {
1177 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1178 if (FileObject->PrivateCacheMap != NULL)
1179 {
1180 FileObject->PrivateCacheMap = NULL;
1181 if (Bcb->RefCount > 0)
1182 {
1183 Bcb->RefCount--;
1184 if (Bcb->RefCount == 0)
1185 {
1186 MmFreeSectionSegments(Bcb->FileObject);
1187 CcRosDeleteFileCache(FileObject, Bcb);
1188 }
1189 }
1190 }
1191 }
1192 KeReleaseGuardedMutex(&ViewLock);
1193 return(STATUS_SUCCESS);
1194 }
1195
1196 NTSTATUS
1197 NTAPI
1198 CcTryToInitializeFileCache(PFILE_OBJECT FileObject)
1199 {
1200 PBCB Bcb;
1201 NTSTATUS Status;
1202
1203 KeAcquireGuardedMutex(&ViewLock);
1204
1205 ASSERT(FileObject->SectionObjectPointer);
1206 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1207 if (Bcb == NULL)
1208 {
1209 Status = STATUS_UNSUCCESSFUL;
1210 }
1211 else
1212 {
1213 if (FileObject->PrivateCacheMap == NULL)
1214 {
1215 FileObject->PrivateCacheMap = Bcb;
1216 Bcb->RefCount++;
1217 }
1218 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1219 {
1220 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1221 Bcb->BcbRemoveListEntry.Flink = NULL;
1222 }
1223 Status = STATUS_SUCCESS;
1224 }
1225 KeReleaseGuardedMutex(&ViewLock);
1226
1227 return Status;
1228 }
1229
1230
1231 NTSTATUS NTAPI
1232 CcRosInitializeFileCache(PFILE_OBJECT FileObject,
1233 ULONG CacheSegmentSize,
1234 PCACHE_MANAGER_CALLBACKS CallBacks,
1235 PVOID LazyWriterContext)
1236 /*
1237 * FUNCTION: Initializes a BCB for a file object
1238 */
1239 {
1240 PBCB Bcb;
1241
1242 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1243 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, Bcb 0x%p, CacheSegmentSize %d)\n",
1244 FileObject, Bcb, CacheSegmentSize);
1245
1246 KeAcquireGuardedMutex(&ViewLock);
1247 if (Bcb == NULL)
1248 {
1249 Bcb = ExAllocateFromNPagedLookasideList(&BcbLookasideList);
1250 if (Bcb == NULL)
1251 {
1252 KeReleaseGuardedMutex(&ViewLock);
1253 return(STATUS_UNSUCCESSFUL);
1254 }
1255 memset(Bcb, 0, sizeof(BCB));
1256 ObReferenceObjectByPointer(FileObject,
1257 FILE_ALL_ACCESS,
1258 NULL,
1259 KernelMode);
1260 Bcb->FileObject = FileObject;
1261 Bcb->CacheSegmentSize = CacheSegmentSize;
1262 Bcb->Callbacks = CallBacks;
1263 Bcb->LazyWriteContext = LazyWriterContext;
1264 if (FileObject->FsContext)
1265 {
1266 Bcb->AllocationSize =
1267 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->AllocationSize;
1268 Bcb->FileSize =
1269 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->FileSize;
1270 }
1271 KeInitializeSpinLock(&Bcb->BcbLock);
1272 InitializeListHead(&Bcb->BcbSegmentListHead);
1273 FileObject->SectionObjectPointer->SharedCacheMap = Bcb;
1274 }
1275 if (FileObject->PrivateCacheMap == NULL)
1276 {
1277 FileObject->PrivateCacheMap = Bcb;
1278 Bcb->RefCount++;
1279 }
1280 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1281 {
1282 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1283 Bcb->BcbRemoveListEntry.Flink = NULL;
1284 }
1285 KeReleaseGuardedMutex(&ViewLock);
1286
1287 return(STATUS_SUCCESS);
1288 }
1289
1290 /*
1291 * @implemented
1292 */
1293 PFILE_OBJECT NTAPI
1294 CcGetFileObjectFromSectionPtrs(IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1295 {
1296 PBCB Bcb;
1297 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1298 {
1299 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
1300 ASSERT(Bcb);
1301 return Bcb->FileObject;
1302 }
1303 return NULL;
1304 }
1305
1306 VOID
1307 INIT_FUNCTION
1308 NTAPI
1309 CcInitView(VOID)
1310 {
1311 #ifdef CACHE_BITMAP
1312 PMEMORY_AREA marea;
1313 PVOID Buffer;
1314 PHYSICAL_ADDRESS BoundaryAddressMultiple;
1315 #endif
1316
1317 DPRINT("CcInitView()\n");
1318 #ifdef CACHE_BITMAP
1319 BoundaryAddressMultiple.QuadPart = 0;
1320 CiCacheSegMappingRegionHint = 0;
1321 CiCacheSegMappingRegionBase = NULL;
1322
1323 MmLockAddressSpace(MmGetKernelAddressSpace());
1324
1325 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
1326 MEMORY_AREA_CACHE_SEGMENT,
1327 &CiCacheSegMappingRegionBase,
1328 CI_CACHESEG_MAPPING_REGION_SIZE,
1329 PAGE_READWRITE,
1330 &marea,
1331 FALSE,
1332 0,
1333 BoundaryAddressMultiple);
1334 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1335 if (!NT_SUCCESS(Status))
1336 {
1337 KeBugCheck(CACHE_MANAGER);
1338 }
1339
1340 Buffer = ExAllocatePool(NonPagedPool, CI_CACHESEG_MAPPING_REGION_SIZE / (PAGE_SIZE * 8));
1341 if (!Buffer)
1342 {
1343 KeBugCheck(CACHE_MANAGER);
1344 }
1345
1346 RtlInitializeBitMap(&CiCacheSegMappingRegionAllocMap, Buffer, CI_CACHESEG_MAPPING_REGION_SIZE / PAGE_SIZE);
1347 RtlClearAllBits(&CiCacheSegMappingRegionAllocMap);
1348
1349 KeInitializeSpinLock(&CiCacheSegMappingRegionLock);
1350 #endif
1351 InitializeListHead(&CacheSegmentListHead);
1352 InitializeListHead(&DirtySegmentListHead);
1353 InitializeListHead(&CacheSegmentLRUListHead);
1354 InitializeListHead(&ClosedListHead);
1355 KeInitializeGuardedMutex(&ViewLock);
1356 ExInitializeNPagedLookasideList (&iBcbLookasideList,
1357 NULL,
1358 NULL,
1359 0,
1360 sizeof(INTERNAL_BCB),
1361 TAG_IBCB,
1362 20);
1363 ExInitializeNPagedLookasideList (&BcbLookasideList,
1364 NULL,
1365 NULL,
1366 0,
1367 sizeof(BCB),
1368 TAG_BCB,
1369 20);
1370 ExInitializeNPagedLookasideList (&CacheSegLookasideList,
1371 NULL,
1372 NULL,
1373 0,
1374 sizeof(CACHE_SEGMENT),
1375 TAG_CSEG,
1376 20);
1377
1378 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1379
1380 CcInitCacheZeroPage();
1381
1382 }
1383
1384 /* EOF */
1385
1386
1387
1388
1389
1390
1391
1392