[ROSTESTS]
[reactos.git] / reactos / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 */
9
10 /* NOTES **********************************************************************
11 *
12 * This is not the NT implementation of a file cache nor anything much like
13 * it.
14 *
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
17 *
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
20 *
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
23 *
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
27 *
28 * (4) Copy the data into or out of the page as necessary.
29 *
30 * (5) Release the cache page
31 */
32 /* INCLUDES ******************************************************************/
33
34 #include <ntoskrnl.h>
35 #define NDEBUG
36 #include <debug.h>
37
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
40 #endif
41
42 /* GLOBALS *******************************************************************/
43
44 /*
45 * If CACHE_BITMAP is defined, the cache manager uses one large memory region
46 * within the kernel address space and allocate/deallocate space from this block
47 * over a bitmap. If CACHE_BITMAP is used, the size of the mdl mapping region
48 * must be reduced (ntoskrnl\mm\mdl.c, MI_MDLMAPPING_REGION_SIZE).
49 */
50 //#define CACHE_BITMAP
51
52 static LIST_ENTRY DirtySegmentListHead;
53 static LIST_ENTRY CacheSegmentListHead;
54 static LIST_ENTRY CacheSegmentLRUListHead;
55 static LIST_ENTRY ClosedListHead;
56 ULONG DirtyPageCount=0;
57
58 KGUARDED_MUTEX ViewLock;
59
60 #ifdef CACHE_BITMAP
61 #define CI_CACHESEG_MAPPING_REGION_SIZE (128*1024*1024)
62
63 static PVOID CiCacheSegMappingRegionBase = NULL;
64 static RTL_BITMAP CiCacheSegMappingRegionAllocMap;
65 static ULONG CiCacheSegMappingRegionHint;
66 static KSPIN_LOCK CiCacheSegMappingRegionLock;
67 #endif
68
69 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
70 static NPAGED_LOOKASIDE_LIST BcbLookasideList;
71 static NPAGED_LOOKASIDE_LIST CacheSegLookasideList;
72
73 #if DBG
74 static void CcRosCacheSegmentIncRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
75 {
76 ++cs->ReferenceCount;
77 if ( cs->Bcb->Trace )
78 {
79 DbgPrint("(%s:%i) CacheSegment %p ++RefCount=%d, Dirty %d, PageOut %d\n",
80 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
81 }
82 }
83 static void CcRosCacheSegmentDecRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
84 {
85 --cs->ReferenceCount;
86 if ( cs->Bcb->Trace )
87 {
88 DbgPrint("(%s:%i) CacheSegment %p --RefCount=%d, Dirty %d, PageOut %d\n",
89 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
90 }
91 }
92 #define CcRosCacheSegmentIncRefCount(cs) CcRosCacheSegmentIncRefCount_(cs,__FILE__,__LINE__)
93 #define CcRosCacheSegmentDecRefCount(cs) CcRosCacheSegmentDecRefCount_(cs,__FILE__,__LINE__)
94 #else
95 #define CcRosCacheSegmentIncRefCount(cs) (++((cs)->ReferenceCount))
96 #define CcRosCacheSegmentDecRefCount(cs) (--((cs)->ReferenceCount))
97 #endif
98
99 NTSTATUS
100 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg);
101
102
103 /* FUNCTIONS *****************************************************************/
104
105 VOID
106 NTAPI
107 CcRosTraceCacheMap (
108 PBCB Bcb,
109 BOOLEAN Trace )
110 {
111 #if DBG
112 KIRQL oldirql;
113 PLIST_ENTRY current_entry;
114 PCACHE_SEGMENT current;
115
116 if ( !Bcb )
117 return;
118
119 Bcb->Trace = Trace;
120
121 if ( Trace )
122 {
123 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", Bcb );
124
125 KeAcquireGuardedMutex(&ViewLock);
126 KeAcquireSpinLock(&Bcb->BcbLock, &oldirql);
127
128 current_entry = Bcb->BcbSegmentListHead.Flink;
129 while (current_entry != &Bcb->BcbSegmentListHead)
130 {
131 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
132 current_entry = current_entry->Flink;
133
134 DPRINT1(" CacheSegment 0x%p enabled, RefCount %d, Dirty %d, PageOut %d\n",
135 current, current->ReferenceCount, current->Dirty, current->PageOut );
136 }
137 KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
138 KeReleaseGuardedMutex(&ViewLock);
139 }
140 else
141 {
142 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", Bcb );
143 }
144
145 #else
146 Bcb = Bcb;
147 Trace = Trace;
148 #endif
149 }
150
151 NTSTATUS
152 NTAPI
153 CcRosFlushCacheSegment(PCACHE_SEGMENT CacheSegment)
154 {
155 NTSTATUS Status;
156 KIRQL oldIrql;
157
158 Status = WriteCacheSegment(CacheSegment);
159 if (NT_SUCCESS(Status))
160 {
161 KeAcquireGuardedMutex(&ViewLock);
162 KeAcquireSpinLock(&CacheSegment->Bcb->BcbLock, &oldIrql);
163
164 CacheSegment->Dirty = FALSE;
165 RemoveEntryList(&CacheSegment->DirtySegmentListEntry);
166 DirtyPageCount -= CacheSegment->Bcb->CacheSegmentSize / PAGE_SIZE;
167 CcRosCacheSegmentDecRefCount ( CacheSegment );
168
169 KeReleaseSpinLock(&CacheSegment->Bcb->BcbLock, oldIrql);
170 KeReleaseGuardedMutex(&ViewLock);
171 }
172
173 return(Status);
174 }
175
176 NTSTATUS
177 NTAPI
178 CcRosFlushDirtyPages(ULONG Target, PULONG Count)
179 {
180 PLIST_ENTRY current_entry;
181 PCACHE_SEGMENT current;
182 ULONG PagesPerSegment;
183 BOOLEAN Locked;
184 NTSTATUS Status;
185 static ULONG WriteCount[4] = {0, 0, 0, 0};
186 ULONG NewTarget;
187
188 DPRINT("CcRosFlushDirtyPages(Target %d)\n", Target);
189
190 (*Count) = 0;
191
192 KeEnterCriticalRegion();
193 KeAcquireGuardedMutex(&ViewLock);
194
195 WriteCount[0] = WriteCount[1];
196 WriteCount[1] = WriteCount[2];
197 WriteCount[2] = WriteCount[3];
198 WriteCount[3] = 0;
199
200 NewTarget = WriteCount[0] + WriteCount[1] + WriteCount[2];
201
202 if (NewTarget < DirtyPageCount)
203 {
204 NewTarget = (DirtyPageCount - NewTarget + 3) / 4;
205 WriteCount[0] += NewTarget;
206 WriteCount[1] += NewTarget;
207 WriteCount[2] += NewTarget;
208 WriteCount[3] += NewTarget;
209 }
210
211 NewTarget = WriteCount[0];
212
213 Target = max(NewTarget, Target);
214
215 current_entry = DirtySegmentListHead.Flink;
216 if (current_entry == &DirtySegmentListHead)
217 {
218 DPRINT("No Dirty pages\n");
219 }
220
221 while (current_entry != &DirtySegmentListHead && Target > 0)
222 {
223 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
224 DirtySegmentListEntry);
225 current_entry = current_entry->Flink;
226
227 Locked = current->Bcb->Callbacks->AcquireForLazyWrite(
228 current->Bcb->LazyWriteContext, FALSE);
229 if (!Locked)
230 {
231 continue;
232 }
233
234 Locked = ExTryToAcquirePushLockExclusive(&current->Lock);
235 if (!Locked)
236 {
237 current->Bcb->Callbacks->ReleaseFromLazyWrite(
238 current->Bcb->LazyWriteContext);
239
240 continue;
241 }
242
243 ASSERT(current->Dirty);
244 if (current->ReferenceCount > 1)
245 {
246 ExReleasePushLock(&current->Lock);
247 current->Bcb->Callbacks->ReleaseFromLazyWrite(
248 current->Bcb->LazyWriteContext);
249 continue;
250 }
251
252 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
253
254 KeReleaseGuardedMutex(&ViewLock);
255
256 Status = CcRosFlushCacheSegment(current);
257
258 ExReleasePushLock(&current->Lock);
259 current->Bcb->Callbacks->ReleaseFromLazyWrite(
260 current->Bcb->LazyWriteContext);
261
262 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
263 {
264 DPRINT1("CC: Failed to flush cache segment.\n");
265 }
266 else
267 {
268 (*Count) += PagesPerSegment;
269 Target -= PagesPerSegment;
270 }
271
272 KeAcquireGuardedMutex(&ViewLock);
273 current_entry = DirtySegmentListHead.Flink;
274 }
275
276 if (*Count < NewTarget)
277 {
278 WriteCount[1] += (NewTarget - *Count);
279 }
280
281 KeReleaseGuardedMutex(&ViewLock);
282 KeLeaveCriticalRegion();
283
284 DPRINT("CcRosFlushDirtyPages() finished\n");
285 return(STATUS_SUCCESS);
286 }
287
288 NTSTATUS
289 CcRosTrimCache(ULONG Target, ULONG Priority, PULONG NrFreed)
290 /*
291 * FUNCTION: Try to free some memory from the file cache.
292 * ARGUMENTS:
293 * Target - The number of pages to be freed.
294 * Priority - The priority of free (currently unused).
295 * NrFreed - Points to a variable where the number of pages
296 * actually freed is returned.
297 */
298 {
299 PLIST_ENTRY current_entry;
300 PCACHE_SEGMENT current;
301 ULONG PagesPerSegment;
302 ULONG PagesFreed;
303 KIRQL oldIrql;
304 LIST_ENTRY FreeList;
305
306 DPRINT("CcRosTrimCache(Target %d)\n", Target);
307
308 *NrFreed = 0;
309
310 InitializeListHead(&FreeList);
311
312 KeAcquireGuardedMutex(&ViewLock);
313 current_entry = CacheSegmentLRUListHead.Flink;
314 while (current_entry != &CacheSegmentLRUListHead && Target > 0)
315 {
316 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
317 CacheSegmentLRUListEntry);
318 current_entry = current_entry->Flink;
319
320 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
321
322 if (current->MappedCount > 0 && !current->Dirty && !current->PageOut)
323 {
324 ULONG i;
325
326 CcRosCacheSegmentIncRefCount(current);
327 current->PageOut = TRUE;
328 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
329 KeReleaseGuardedMutex(&ViewLock);
330 for (i = 0; i < current->Bcb->CacheSegmentSize / PAGE_SIZE; i++)
331 {
332 PFN_NUMBER Page;
333 Page = (PFN_NUMBER)(MmGetPhysicalAddress((char*)current->BaseAddress + i * PAGE_SIZE).QuadPart >> PAGE_SHIFT);
334 MmPageOutPhysicalAddress(Page);
335 }
336 KeAcquireGuardedMutex(&ViewLock);
337 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
338 CcRosCacheSegmentDecRefCount(current);
339 }
340
341 if (current->ReferenceCount == 0)
342 {
343 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
344 // PagesFreed = PagesPerSegment;
345 PagesFreed = min(PagesPerSegment, Target);
346 Target -= PagesFreed;
347 (*NrFreed) += PagesFreed;
348 }
349
350 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
351 }
352
353 current_entry = CacheSegmentLRUListHead.Flink;
354 while (current_entry != &CacheSegmentLRUListHead)
355 {
356 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
357 CacheSegmentLRUListEntry);
358 current->PageOut = FALSE;
359 current_entry = current_entry->Flink;
360
361 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
362 if (current->ReferenceCount == 0)
363 {
364 RemoveEntryList(&current->BcbSegmentListEntry);
365 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
366 RemoveEntryList(&current->CacheSegmentListEntry);
367 RemoveEntryList(&current->CacheSegmentLRUListEntry);
368 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
369 }
370 else
371 {
372 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
373 }
374 }
375
376 KeReleaseGuardedMutex(&ViewLock);
377
378 while (!IsListEmpty(&FreeList))
379 {
380 current_entry = RemoveHeadList(&FreeList);
381 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
382 BcbSegmentListEntry);
383 CcRosInternalFreeCacheSegment(current);
384 }
385
386 return(STATUS_SUCCESS);
387 }
388
389 NTSTATUS
390 NTAPI
391 CcRosReleaseCacheSegment(PBCB Bcb,
392 PCACHE_SEGMENT CacheSeg,
393 BOOLEAN Valid,
394 BOOLEAN Dirty,
395 BOOLEAN Mapped)
396 {
397 BOOLEAN WasDirty = CacheSeg->Dirty;
398 KIRQL oldIrql;
399
400 ASSERT(Bcb);
401
402 DPRINT("CcReleaseCacheSegment(Bcb 0x%p, CacheSeg 0x%p, Valid %d)\n",
403 Bcb, CacheSeg, Valid);
404
405 CacheSeg->Valid = Valid;
406 CacheSeg->Dirty = CacheSeg->Dirty || Dirty;
407
408 KeAcquireGuardedMutex(&ViewLock);
409 if (!WasDirty && CacheSeg->Dirty)
410 {
411 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
412 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
413 }
414 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
415 InsertTailList(&CacheSegmentLRUListHead, &CacheSeg->CacheSegmentLRUListEntry);
416
417 if (Mapped)
418 {
419 CacheSeg->MappedCount++;
420 }
421 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
422 CcRosCacheSegmentDecRefCount(CacheSeg);
423 if (Mapped && CacheSeg->MappedCount == 1)
424 {
425 CcRosCacheSegmentIncRefCount(CacheSeg);
426 }
427 if (!WasDirty && CacheSeg->Dirty)
428 {
429 CcRosCacheSegmentIncRefCount(CacheSeg);
430 }
431 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
432 KeReleaseGuardedMutex(&ViewLock);
433 ExReleasePushLock(&CacheSeg->Lock);
434
435 return(STATUS_SUCCESS);
436 }
437
438 /* Returns with Cache Segment Lock Held! */
439 PCACHE_SEGMENT
440 NTAPI
441 CcRosLookupCacheSegment(PBCB Bcb, ULONG FileOffset)
442 {
443 PLIST_ENTRY current_entry;
444 PCACHE_SEGMENT current;
445 KIRQL oldIrql;
446
447 ASSERT(Bcb);
448
449 DPRINT("CcRosLookupCacheSegment(Bcb -x%p, FileOffset %d)\n", Bcb, FileOffset);
450
451 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
452 current_entry = Bcb->BcbSegmentListHead.Flink;
453 while (current_entry != &Bcb->BcbSegmentListHead)
454 {
455 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
456 BcbSegmentListEntry);
457 if (current->FileOffset <= FileOffset &&
458 (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
459 {
460 CcRosCacheSegmentIncRefCount(current);
461 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
462 ExAcquirePushLockExclusive(&current->Lock);
463 return(current);
464 }
465 current_entry = current_entry->Flink;
466 }
467 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
468 return(NULL);
469 }
470
471 NTSTATUS
472 NTAPI
473 CcRosMarkDirtyCacheSegment(PBCB Bcb, ULONG FileOffset)
474 {
475 PCACHE_SEGMENT CacheSeg;
476 KIRQL oldIrql;
477
478 ASSERT(Bcb);
479
480 DPRINT("CcRosMarkDirtyCacheSegment(Bcb 0x%p, FileOffset %d)\n", Bcb, FileOffset);
481
482 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
483 if (CacheSeg == NULL)
484 {
485 KeBugCheck(CACHE_MANAGER);
486 }
487 if (!CacheSeg->Dirty)
488 {
489 KeAcquireGuardedMutex(&ViewLock);
490 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
491 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
492 KeReleaseGuardedMutex(&ViewLock);
493 }
494 else
495 {
496 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
497 CcRosCacheSegmentDecRefCount(CacheSeg);
498 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
499 }
500
501
502 CacheSeg->Dirty = TRUE;
503 ExReleasePushLock(&CacheSeg->Lock);
504
505 return(STATUS_SUCCESS);
506 }
507
508 NTSTATUS
509 NTAPI
510 CcRosUnmapCacheSegment(PBCB Bcb, ULONG FileOffset, BOOLEAN NowDirty)
511 {
512 PCACHE_SEGMENT CacheSeg;
513 BOOLEAN WasDirty;
514 KIRQL oldIrql;
515
516 ASSERT(Bcb);
517
518 DPRINT("CcRosUnmapCacheSegment(Bcb 0x%p, FileOffset %d, NowDirty %d)\n",
519 Bcb, FileOffset, NowDirty);
520
521 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
522 if (CacheSeg == NULL)
523 {
524 return(STATUS_UNSUCCESSFUL);
525 }
526
527 WasDirty = CacheSeg->Dirty;
528 CacheSeg->Dirty = CacheSeg->Dirty || NowDirty;
529
530 CacheSeg->MappedCount--;
531
532 if (!WasDirty && NowDirty)
533 {
534 KeAcquireGuardedMutex(&ViewLock);
535 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
536 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
537 KeReleaseGuardedMutex(&ViewLock);
538 }
539
540 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
541 CcRosCacheSegmentDecRefCount(CacheSeg);
542 if (!WasDirty && NowDirty)
543 {
544 CcRosCacheSegmentIncRefCount(CacheSeg);
545 }
546 if (CacheSeg->MappedCount == 0)
547 {
548 CcRosCacheSegmentDecRefCount(CacheSeg);
549 }
550 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
551
552 ExReleasePushLock(&CacheSeg->Lock);
553 return(STATUS_SUCCESS);
554 }
555
556 static
557 NTSTATUS
558 CcRosCreateCacheSegment(PBCB Bcb,
559 ULONG FileOffset,
560 PCACHE_SEGMENT* CacheSeg)
561 {
562 PCACHE_SEGMENT current;
563 PCACHE_SEGMENT previous;
564 PLIST_ENTRY current_entry;
565 NTSTATUS Status;
566 KIRQL oldIrql;
567 #ifdef CACHE_BITMAP
568 ULONG StartingOffset;
569 #endif
570 PHYSICAL_ADDRESS BoundaryAddressMultiple;
571
572 ASSERT(Bcb);
573
574 DPRINT("CcRosCreateCacheSegment()\n");
575
576 BoundaryAddressMultiple.QuadPart = 0;
577 if (FileOffset >= Bcb->FileSize.u.LowPart)
578 {
579 CacheSeg = NULL;
580 return STATUS_INVALID_PARAMETER;
581 }
582
583 current = ExAllocateFromNPagedLookasideList(&CacheSegLookasideList);
584 current->Valid = FALSE;
585 current->Dirty = FALSE;
586 current->PageOut = FALSE;
587 current->FileOffset = ROUND_DOWN(FileOffset, Bcb->CacheSegmentSize);
588 current->Bcb = Bcb;
589 #if DBG
590 if ( Bcb->Trace )
591 {
592 DPRINT1("CacheMap 0x%p: new Cache Segment: 0x%p\n", Bcb, current );
593 }
594 #endif
595 current->MappedCount = 0;
596 current->DirtySegmentListEntry.Flink = NULL;
597 current->DirtySegmentListEntry.Blink = NULL;
598 current->ReferenceCount = 1;
599 ExInitializePushLock(&current->Lock);
600 ExAcquirePushLockExclusive(&current->Lock);
601 KeAcquireGuardedMutex(&ViewLock);
602
603 *CacheSeg = current;
604 /* There is window between the call to CcRosLookupCacheSegment
605 * and CcRosCreateCacheSegment. We must check if a segment on
606 * the fileoffset exist. If there exist a segment, we release
607 * our new created segment and return the existing one.
608 */
609 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
610 current_entry = Bcb->BcbSegmentListHead.Flink;
611 previous = NULL;
612 while (current_entry != &Bcb->BcbSegmentListHead)
613 {
614 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
615 BcbSegmentListEntry);
616 if (current->FileOffset <= FileOffset &&
617 (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
618 {
619 CcRosCacheSegmentIncRefCount(current);
620 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
621 #if DBG
622 if ( Bcb->Trace )
623 {
624 DPRINT1("CacheMap 0x%p: deleting newly created Cache Segment 0x%p ( found existing one 0x%p )\n",
625 Bcb,
626 (*CacheSeg),
627 current );
628 }
629 #endif
630 ExReleasePushLock(&(*CacheSeg)->Lock);
631 KeReleaseGuardedMutex(&ViewLock);
632 ExFreeToNPagedLookasideList(&CacheSegLookasideList, *CacheSeg);
633 *CacheSeg = current;
634 ExAcquirePushLockExclusive(&current->Lock);
635 return STATUS_SUCCESS;
636 }
637 if (current->FileOffset < FileOffset)
638 {
639 if (previous == NULL)
640 {
641 previous = current;
642 }
643 else
644 {
645 if (previous->FileOffset < current->FileOffset)
646 {
647 previous = current;
648 }
649 }
650 }
651 current_entry = current_entry->Flink;
652 }
653 /* There was no existing segment. */
654 current = *CacheSeg;
655 if (previous)
656 {
657 InsertHeadList(&previous->BcbSegmentListEntry, &current->BcbSegmentListEntry);
658 }
659 else
660 {
661 InsertHeadList(&Bcb->BcbSegmentListHead, &current->BcbSegmentListEntry);
662 }
663 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
664 InsertTailList(&CacheSegmentListHead, &current->CacheSegmentListEntry);
665 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
666 KeReleaseGuardedMutex(&ViewLock);
667 #ifdef CACHE_BITMAP
668 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
669
670 StartingOffset = RtlFindClearBitsAndSet(&CiCacheSegMappingRegionAllocMap, Bcb->CacheSegmentSize / PAGE_SIZE, CiCacheSegMappingRegionHint);
671
672 if (StartingOffset == 0xffffffff)
673 {
674 DPRINT1("Out of CacheSeg mapping space\n");
675 KeBugCheck(CACHE_MANAGER);
676 }
677
678 current->BaseAddress = CiCacheSegMappingRegionBase + StartingOffset * PAGE_SIZE;
679
680 if (CiCacheSegMappingRegionHint == StartingOffset)
681 {
682 CiCacheSegMappingRegionHint += Bcb->CacheSegmentSize / PAGE_SIZE;
683 }
684
685 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
686 #else
687 MmLockAddressSpace(MmGetKernelAddressSpace());
688 current->BaseAddress = NULL;
689 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
690 0, // nothing checks for cache_segment mareas, so set to 0
691 &current->BaseAddress,
692 Bcb->CacheSegmentSize,
693 PAGE_READWRITE,
694 (PMEMORY_AREA*)&current->MemoryArea,
695 FALSE,
696 0,
697 BoundaryAddressMultiple);
698 MmUnlockAddressSpace(MmGetKernelAddressSpace());
699 if (!NT_SUCCESS(Status))
700 {
701 KeBugCheck(CACHE_MANAGER);
702 }
703 #endif
704
705 /* Create a virtual mapping for this memory area */
706 MI_SET_USAGE(MI_USAGE_CACHE);
707 #if MI_TRACE_PFNS
708 PWCHAR pos = NULL;
709 ULONG len = 0;
710 if ((Bcb->FileObject) && (Bcb->FileObject->FileName.Buffer))
711 {
712 pos = wcsrchr(Bcb->FileObject->FileName.Buffer, '\\');
713 len = wcslen(pos) * sizeof(WCHAR);
714 if (pos) snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
715 }
716 #endif
717
718 MmMapMemoryArea(current->BaseAddress, Bcb->CacheSegmentSize,
719 MC_CACHE, PAGE_READWRITE);
720
721 return(STATUS_SUCCESS);
722 }
723
724 NTSTATUS
725 NTAPI
726 CcRosGetCacheSegmentChain(PBCB Bcb,
727 ULONG FileOffset,
728 ULONG Length,
729 PCACHE_SEGMENT* CacheSeg)
730 {
731 PCACHE_SEGMENT current;
732 ULONG i;
733 PCACHE_SEGMENT* CacheSegList;
734 PCACHE_SEGMENT Previous = NULL;
735
736 ASSERT(Bcb);
737
738 DPRINT("CcRosGetCacheSegmentChain()\n");
739
740 Length = ROUND_UP(Length, Bcb->CacheSegmentSize);
741
742 CacheSegList = _alloca(sizeof(PCACHE_SEGMENT) *
743 (Length / Bcb->CacheSegmentSize));
744
745 /*
746 * Look for a cache segment already mapping the same data.
747 */
748 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
749 {
750 ULONG CurrentOffset = FileOffset + (i * Bcb->CacheSegmentSize);
751 current = CcRosLookupCacheSegment(Bcb, CurrentOffset);
752 if (current != NULL)
753 {
754 CacheSegList[i] = current;
755 }
756 else
757 {
758 CcRosCreateCacheSegment(Bcb, CurrentOffset, &current);
759 CacheSegList[i] = current;
760 }
761 }
762
763 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
764 {
765 if (i == 0)
766 {
767 *CacheSeg = CacheSegList[i];
768 Previous = CacheSegList[i];
769 }
770 else
771 {
772 Previous->NextInChain = CacheSegList[i];
773 Previous = CacheSegList[i];
774 }
775 }
776 ASSERT(Previous);
777 Previous->NextInChain = NULL;
778
779 return(STATUS_SUCCESS);
780 }
781
782 NTSTATUS
783 NTAPI
784 CcRosGetCacheSegment(PBCB Bcb,
785 ULONG FileOffset,
786 PULONG BaseOffset,
787 PVOID* BaseAddress,
788 PBOOLEAN UptoDate,
789 PCACHE_SEGMENT* CacheSeg)
790 {
791 PCACHE_SEGMENT current;
792 NTSTATUS Status;
793
794 ASSERT(Bcb);
795
796 DPRINT("CcRosGetCacheSegment()\n");
797
798 /*
799 * Look for a cache segment already mapping the same data.
800 */
801 current = CcRosLookupCacheSegment(Bcb, FileOffset);
802 if (current == NULL)
803 {
804 /*
805 * Otherwise create a new segment.
806 */
807 Status = CcRosCreateCacheSegment(Bcb, FileOffset, &current);
808 if (!NT_SUCCESS(Status))
809 {
810 return Status;
811 }
812 }
813 /*
814 * Return information about the segment to the caller.
815 */
816 *UptoDate = current->Valid;
817 *BaseAddress = current->BaseAddress;
818 DPRINT("*BaseAddress 0x%.8X\n", *BaseAddress);
819 *CacheSeg = current;
820 *BaseOffset = current->FileOffset;
821 return(STATUS_SUCCESS);
822 }
823
824 NTSTATUS NTAPI
825 CcRosRequestCacheSegment(PBCB Bcb,
826 ULONG FileOffset,
827 PVOID* BaseAddress,
828 PBOOLEAN UptoDate,
829 PCACHE_SEGMENT* CacheSeg)
830 /*
831 * FUNCTION: Request a page mapping for a BCB
832 */
833 {
834 ULONG BaseOffset;
835
836 ASSERT(Bcb);
837
838 if ((FileOffset % Bcb->CacheSegmentSize) != 0)
839 {
840 DPRINT1("Bad fileoffset %x should be multiple of %x",
841 FileOffset, Bcb->CacheSegmentSize);
842 KeBugCheck(CACHE_MANAGER);
843 }
844
845 return(CcRosGetCacheSegment(Bcb,
846 FileOffset,
847 &BaseOffset,
848 BaseAddress,
849 UptoDate,
850 CacheSeg));
851 }
852 #ifdef CACHE_BITMAP
853 #else
854 static VOID
855 CcFreeCachePage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address,
856 PFN_NUMBER Page, SWAPENTRY SwapEntry, BOOLEAN Dirty)
857 {
858 ASSERT(SwapEntry == 0);
859 if (Page != 0)
860 {
861 MmReleasePageMemoryConsumer(MC_CACHE, Page);
862 }
863 }
864 #endif
865 NTSTATUS
866 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg)
867 /*
868 * FUNCTION: Releases a cache segment associated with a BCB
869 */
870 {
871 #ifdef CACHE_BITMAP
872 ULONG i;
873 ULONG RegionSize;
874 ULONG Base;
875 PFN_NUMBER Page;
876 KIRQL oldIrql;
877 #endif
878 DPRINT("Freeing cache segment 0x%p\n", CacheSeg);
879 #if DBG
880 if ( CacheSeg->Bcb->Trace )
881 {
882 DPRINT1("CacheMap 0x%p: deleting Cache Segment: 0x%p\n", CacheSeg->Bcb, CacheSeg );
883 }
884 #endif
885 #ifdef CACHE_BITMAP
886 RegionSize = CacheSeg->Bcb->CacheSegmentSize / PAGE_SIZE;
887
888 /* Unmap all the pages. */
889 for (i = 0; i < RegionSize; i++)
890 {
891 MmDeleteVirtualMapping(NULL,
892 CacheSeg->BaseAddress + (i * PAGE_SIZE),
893 FALSE,
894 NULL,
895 &Page);
896 MmReleasePageMemoryConsumer(MC_CACHE, Page);
897 }
898
899 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
900 /* Deallocate all the pages used. */
901 Base = (ULONG)(CacheSeg->BaseAddress - CiCacheSegMappingRegionBase) / PAGE_SIZE;
902
903 RtlClearBits(&CiCacheSegMappingRegionAllocMap, Base, RegionSize);
904
905 CiCacheSegMappingRegionHint = min (CiCacheSegMappingRegionHint, Base);
906
907 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
908 #else
909 MmLockAddressSpace(MmGetKernelAddressSpace());
910 MmFreeMemoryArea(MmGetKernelAddressSpace(),
911 CacheSeg->MemoryArea,
912 CcFreeCachePage,
913 NULL);
914 MmUnlockAddressSpace(MmGetKernelAddressSpace());
915 #endif
916 ExFreeToNPagedLookasideList(&CacheSegLookasideList, CacheSeg);
917 return(STATUS_SUCCESS);
918 }
919
920 NTSTATUS
921 NTAPI
922 CcRosFreeCacheSegment(PBCB Bcb, PCACHE_SEGMENT CacheSeg)
923 {
924 NTSTATUS Status;
925 KIRQL oldIrql;
926
927 ASSERT(Bcb);
928
929 DPRINT("CcRosFreeCacheSegment(Bcb 0x%p, CacheSeg 0x%p)\n",
930 Bcb, CacheSeg);
931
932 KeAcquireGuardedMutex(&ViewLock);
933 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
934 RemoveEntryList(&CacheSeg->BcbSegmentListEntry);
935 RemoveEntryList(&CacheSeg->CacheSegmentListEntry);
936 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
937 if (CacheSeg->Dirty)
938 {
939 RemoveEntryList(&CacheSeg->DirtySegmentListEntry);
940 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
941
942 }
943 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
944 KeReleaseGuardedMutex(&ViewLock);
945
946 Status = CcRosInternalFreeCacheSegment(CacheSeg);
947 return(Status);
948 }
949
950 /*
951 * @implemented
952 */
953 VOID NTAPI
954 CcFlushCache(IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
955 IN PLARGE_INTEGER FileOffset OPTIONAL,
956 IN ULONG Length,
957 OUT PIO_STATUS_BLOCK IoStatus)
958 {
959 PBCB Bcb;
960 LARGE_INTEGER Offset;
961 PCACHE_SEGMENT current;
962 NTSTATUS Status;
963 KIRQL oldIrql;
964
965 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %d, IoStatus 0x%p)\n",
966 SectionObjectPointers, FileOffset, Length, IoStatus);
967
968 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
969 {
970 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
971 ASSERT(Bcb);
972 if (FileOffset)
973 {
974 Offset = *FileOffset;
975 }
976 else
977 {
978 Offset.QuadPart = (LONGLONG)0;
979 Length = Bcb->FileSize.u.LowPart;
980 }
981
982 if (IoStatus)
983 {
984 IoStatus->Status = STATUS_SUCCESS;
985 IoStatus->Information = 0;
986 }
987
988 while (Length > 0)
989 {
990 current = CcRosLookupCacheSegment (Bcb, Offset.u.LowPart);
991 if (current != NULL)
992 {
993 if (current->Dirty)
994 {
995 Status = CcRosFlushCacheSegment(current);
996 if (!NT_SUCCESS(Status) && IoStatus != NULL)
997 {
998 IoStatus->Status = Status;
999 }
1000 }
1001 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1002 ExReleasePushLock(&current->Lock);
1003 CcRosCacheSegmentDecRefCount(current);
1004 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1005 }
1006
1007 Offset.QuadPart += Bcb->CacheSegmentSize;
1008 if (Length > Bcb->CacheSegmentSize)
1009 {
1010 Length -= Bcb->CacheSegmentSize;
1011 }
1012 else
1013 {
1014 Length = 0;
1015 }
1016 }
1017 }
1018 else
1019 {
1020 if (IoStatus)
1021 {
1022 IoStatus->Status = STATUS_INVALID_PARAMETER;
1023 }
1024 }
1025 }
1026
1027 NTSTATUS
1028 NTAPI
1029 CcRosDeleteFileCache(PFILE_OBJECT FileObject, PBCB Bcb)
1030 /*
1031 * FUNCTION: Releases the BCB associated with a file object
1032 */
1033 {
1034 PLIST_ENTRY current_entry;
1035 PCACHE_SEGMENT current;
1036 LIST_ENTRY FreeList;
1037 KIRQL oldIrql;
1038
1039 ASSERT(Bcb);
1040
1041 Bcb->RefCount++;
1042 KeReleaseGuardedMutex(&ViewLock);
1043
1044 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1045
1046 KeAcquireGuardedMutex(&ViewLock);
1047 Bcb->RefCount--;
1048 if (Bcb->RefCount == 0)
1049 {
1050 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1051 {
1052 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1053 Bcb->BcbRemoveListEntry.Flink = NULL;
1054 }
1055
1056 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1057
1058 /*
1059 * Release all cache segments.
1060 */
1061 InitializeListHead(&FreeList);
1062 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1063 current_entry = Bcb->BcbSegmentListHead.Flink;
1064 while (!IsListEmpty(&Bcb->BcbSegmentListHead))
1065 {
1066 current_entry = RemoveTailList(&Bcb->BcbSegmentListHead);
1067 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1068 RemoveEntryList(&current->CacheSegmentListEntry);
1069 RemoveEntryList(&current->CacheSegmentLRUListEntry);
1070 if (current->Dirty)
1071 {
1072 RemoveEntryList(&current->DirtySegmentListEntry);
1073 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
1074 DPRINT1("Freeing dirty segment\n");
1075 }
1076 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
1077 }
1078 #if DBG
1079 Bcb->Trace = FALSE;
1080 #endif
1081 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1082
1083 KeReleaseGuardedMutex(&ViewLock);
1084 ObDereferenceObject (Bcb->FileObject);
1085
1086 while (!IsListEmpty(&FreeList))
1087 {
1088 current_entry = RemoveTailList(&FreeList);
1089 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1090 CcRosInternalFreeCacheSegment(current);
1091 }
1092 ExFreeToNPagedLookasideList(&BcbLookasideList, Bcb);
1093 KeAcquireGuardedMutex(&ViewLock);
1094 }
1095 return(STATUS_SUCCESS);
1096 }
1097
1098 VOID
1099 NTAPI
1100 CcRosReferenceCache(PFILE_OBJECT FileObject)
1101 {
1102 PBCB Bcb;
1103 KeAcquireGuardedMutex(&ViewLock);
1104 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1105 ASSERT(Bcb);
1106 if (Bcb->RefCount == 0)
1107 {
1108 ASSERT(Bcb->BcbRemoveListEntry.Flink != NULL);
1109 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1110 Bcb->BcbRemoveListEntry.Flink = NULL;
1111
1112 }
1113 else
1114 {
1115 ASSERT(Bcb->BcbRemoveListEntry.Flink == NULL);
1116 }
1117 Bcb->RefCount++;
1118 KeReleaseGuardedMutex(&ViewLock);
1119 }
1120
1121 VOID
1122 NTAPI
1123 CcRosSetRemoveOnClose(PSECTION_OBJECT_POINTERS SectionObjectPointer)
1124 {
1125 PBCB Bcb;
1126 DPRINT("CcRosSetRemoveOnClose()\n");
1127 KeAcquireGuardedMutex(&ViewLock);
1128 Bcb = (PBCB)SectionObjectPointer->SharedCacheMap;
1129 if (Bcb)
1130 {
1131 Bcb->RemoveOnClose = TRUE;
1132 if (Bcb->RefCount == 0)
1133 {
1134 CcRosDeleteFileCache(Bcb->FileObject, Bcb);
1135 }
1136 }
1137 KeReleaseGuardedMutex(&ViewLock);
1138 }
1139
1140
1141 VOID
1142 NTAPI
1143 CcRosDereferenceCache(PFILE_OBJECT FileObject)
1144 {
1145 PBCB Bcb;
1146 KeAcquireGuardedMutex(&ViewLock);
1147 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1148 ASSERT(Bcb);
1149 if (Bcb->RefCount > 0)
1150 {
1151 Bcb->RefCount--;
1152 if (Bcb->RefCount == 0)
1153 {
1154 MmFreeSectionSegments(Bcb->FileObject);
1155 CcRosDeleteFileCache(FileObject, Bcb);
1156 }
1157 }
1158 KeReleaseGuardedMutex(&ViewLock);
1159 }
1160
1161 NTSTATUS NTAPI
1162 CcRosReleaseFileCache(PFILE_OBJECT FileObject)
1163 /*
1164 * FUNCTION: Called by the file system when a handle to a file object
1165 * has been closed.
1166 */
1167 {
1168 PBCB Bcb;
1169
1170 KeAcquireGuardedMutex(&ViewLock);
1171
1172 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1173 {
1174 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1175 if (FileObject->PrivateCacheMap != NULL)
1176 {
1177 FileObject->PrivateCacheMap = NULL;
1178 if (Bcb->RefCount > 0)
1179 {
1180 Bcb->RefCount--;
1181 if (Bcb->RefCount == 0)
1182 {
1183 MmFreeSectionSegments(Bcb->FileObject);
1184 CcRosDeleteFileCache(FileObject, Bcb);
1185 }
1186 }
1187 }
1188 }
1189 KeReleaseGuardedMutex(&ViewLock);
1190 return(STATUS_SUCCESS);
1191 }
1192
1193 NTSTATUS
1194 NTAPI
1195 CcTryToInitializeFileCache(PFILE_OBJECT FileObject)
1196 {
1197 PBCB Bcb;
1198 NTSTATUS Status;
1199
1200 KeAcquireGuardedMutex(&ViewLock);
1201
1202 ASSERT(FileObject->SectionObjectPointer);
1203 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1204 if (Bcb == NULL)
1205 {
1206 Status = STATUS_UNSUCCESSFUL;
1207 }
1208 else
1209 {
1210 if (FileObject->PrivateCacheMap == NULL)
1211 {
1212 FileObject->PrivateCacheMap = Bcb;
1213 Bcb->RefCount++;
1214 }
1215 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1216 {
1217 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1218 Bcb->BcbRemoveListEntry.Flink = NULL;
1219 }
1220 Status = STATUS_SUCCESS;
1221 }
1222 KeReleaseGuardedMutex(&ViewLock);
1223
1224 return Status;
1225 }
1226
1227
1228 NTSTATUS NTAPI
1229 CcRosInitializeFileCache(PFILE_OBJECT FileObject,
1230 ULONG CacheSegmentSize,
1231 PCACHE_MANAGER_CALLBACKS CallBacks,
1232 PVOID LazyWriterContext)
1233 /*
1234 * FUNCTION: Initializes a BCB for a file object
1235 */
1236 {
1237 PBCB Bcb;
1238
1239 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1240 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, Bcb 0x%p, CacheSegmentSize %d)\n",
1241 FileObject, Bcb, CacheSegmentSize);
1242
1243 KeAcquireGuardedMutex(&ViewLock);
1244 if (Bcb == NULL)
1245 {
1246 Bcb = ExAllocateFromNPagedLookasideList(&BcbLookasideList);
1247 if (Bcb == NULL)
1248 {
1249 KeReleaseGuardedMutex(&ViewLock);
1250 return(STATUS_UNSUCCESSFUL);
1251 }
1252 memset(Bcb, 0, sizeof(BCB));
1253 ObReferenceObjectByPointer(FileObject,
1254 FILE_ALL_ACCESS,
1255 NULL,
1256 KernelMode);
1257 Bcb->FileObject = FileObject;
1258 Bcb->CacheSegmentSize = CacheSegmentSize;
1259 Bcb->Callbacks = CallBacks;
1260 Bcb->LazyWriteContext = LazyWriterContext;
1261 if (FileObject->FsContext)
1262 {
1263 Bcb->AllocationSize =
1264 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->AllocationSize;
1265 Bcb->FileSize =
1266 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->FileSize;
1267 }
1268 KeInitializeSpinLock(&Bcb->BcbLock);
1269 InitializeListHead(&Bcb->BcbSegmentListHead);
1270 FileObject->SectionObjectPointer->SharedCacheMap = Bcb;
1271 }
1272 if (FileObject->PrivateCacheMap == NULL)
1273 {
1274 FileObject->PrivateCacheMap = Bcb;
1275 Bcb->RefCount++;
1276 }
1277 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1278 {
1279 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1280 Bcb->BcbRemoveListEntry.Flink = NULL;
1281 }
1282 KeReleaseGuardedMutex(&ViewLock);
1283
1284 return(STATUS_SUCCESS);
1285 }
1286
1287 /*
1288 * @implemented
1289 */
1290 PFILE_OBJECT NTAPI
1291 CcGetFileObjectFromSectionPtrs(IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1292 {
1293 PBCB Bcb;
1294 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1295 {
1296 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
1297 ASSERT(Bcb);
1298 return Bcb->FileObject;
1299 }
1300 return NULL;
1301 }
1302
1303 VOID
1304 INIT_FUNCTION
1305 NTAPI
1306 CcInitView(VOID)
1307 {
1308 #ifdef CACHE_BITMAP
1309 PMEMORY_AREA marea;
1310 PVOID Buffer;
1311 PHYSICAL_ADDRESS BoundaryAddressMultiple;
1312 #endif
1313
1314 DPRINT("CcInitView()\n");
1315 #ifdef CACHE_BITMAP
1316 BoundaryAddressMultiple.QuadPart = 0;
1317 CiCacheSegMappingRegionHint = 0;
1318 CiCacheSegMappingRegionBase = NULL;
1319
1320 MmLockAddressSpace(MmGetKernelAddressSpace());
1321
1322 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
1323 MEMORY_AREA_CACHE_SEGMENT,
1324 &CiCacheSegMappingRegionBase,
1325 CI_CACHESEG_MAPPING_REGION_SIZE,
1326 PAGE_READWRITE,
1327 &marea,
1328 FALSE,
1329 0,
1330 BoundaryAddressMultiple);
1331 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1332 if (!NT_SUCCESS(Status))
1333 {
1334 KeBugCheck(CACHE_MANAGER);
1335 }
1336
1337 Buffer = ExAllocatePool(NonPagedPool, CI_CACHESEG_MAPPING_REGION_SIZE / (PAGE_SIZE * 8));
1338 if (!Buffer)
1339 {
1340 KeBugCheck(CACHE_MANAGER);
1341 }
1342
1343 RtlInitializeBitMap(&CiCacheSegMappingRegionAllocMap, Buffer, CI_CACHESEG_MAPPING_REGION_SIZE / PAGE_SIZE);
1344 RtlClearAllBits(&CiCacheSegMappingRegionAllocMap);
1345
1346 KeInitializeSpinLock(&CiCacheSegMappingRegionLock);
1347 #endif
1348 InitializeListHead(&CacheSegmentListHead);
1349 InitializeListHead(&DirtySegmentListHead);
1350 InitializeListHead(&CacheSegmentLRUListHead);
1351 InitializeListHead(&ClosedListHead);
1352 KeInitializeGuardedMutex(&ViewLock);
1353 ExInitializeNPagedLookasideList (&iBcbLookasideList,
1354 NULL,
1355 NULL,
1356 0,
1357 sizeof(INTERNAL_BCB),
1358 TAG_IBCB,
1359 20);
1360 ExInitializeNPagedLookasideList (&BcbLookasideList,
1361 NULL,
1362 NULL,
1363 0,
1364 sizeof(BCB),
1365 TAG_BCB,
1366 20);
1367 ExInitializeNPagedLookasideList (&CacheSegLookasideList,
1368 NULL,
1369 NULL,
1370 0,
1371 sizeof(CACHE_SEGMENT),
1372 TAG_CSEG,
1373 20);
1374
1375 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1376
1377 CcInitCacheZeroPage();
1378
1379 }
1380
1381 /* EOF */
1382
1383
1384
1385
1386
1387
1388
1389