bd52296509ca48ffa9e7ff5b3a2099a44cd4919c
[reactos.git] / reactos / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 */
9
10 /* NOTES **********************************************************************
11 *
12 * This is not the NT implementation of a file cache nor anything much like
13 * it.
14 *
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
17 *
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
20 *
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
23 *
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
27 *
28 * (4) Copy the data into or out of the page as necessary.
29 *
30 * (5) Release the cache page
31 */
32 /* INCLUDES ******************************************************************/
33
34 #include <ntoskrnl.h>
35 #define NDEBUG
36 #include <debug.h>
37
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
40 #endif
41
42 /* GLOBALS *******************************************************************/
43
44 /*
45 * If CACHE_BITMAP is defined, the cache manager uses one large memory region
46 * within the kernel address space and allocate/deallocate space from this block
47 * over a bitmap. If CACHE_BITMAP is used, the size of the mdl mapping region
48 * must be reduced (ntoskrnl\mm\mdl.c, MI_MDLMAPPING_REGION_SIZE).
49 */
50 //#define CACHE_BITMAP
51
52 static LIST_ENTRY DirtySegmentListHead;
53 static LIST_ENTRY CacheSegmentListHead;
54 static LIST_ENTRY CacheSegmentLRUListHead;
55 static LIST_ENTRY ClosedListHead;
56 ULONG DirtyPageCount=0;
57
58 KGUARDED_MUTEX ViewLock;
59
60 #ifdef CACHE_BITMAP
61 #define CI_CACHESEG_MAPPING_REGION_SIZE (128*1024*1024)
62
63 static PVOID CiCacheSegMappingRegionBase = NULL;
64 static RTL_BITMAP CiCacheSegMappingRegionAllocMap;
65 static ULONG CiCacheSegMappingRegionHint;
66 static KSPIN_LOCK CiCacheSegMappingRegionLock;
67 #endif
68
69 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
70 static NPAGED_LOOKASIDE_LIST BcbLookasideList;
71 static NPAGED_LOOKASIDE_LIST CacheSegLookasideList;
72
73 #if DBG
74 static void CcRosCacheSegmentIncRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
75 {
76 ++cs->ReferenceCount;
77 if ( cs->Bcb->Trace )
78 {
79 DbgPrint("(%s:%i) CacheSegment %p ++RefCount=%d, Dirty %d, PageOut %d\n",
80 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
81 }
82 }
83 static void CcRosCacheSegmentDecRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
84 {
85 --cs->ReferenceCount;
86 if ( cs->Bcb->Trace )
87 {
88 DbgPrint("(%s:%i) CacheSegment %p --RefCount=%d, Dirty %d, PageOut %d\n",
89 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
90 }
91 }
92 #define CcRosCacheSegmentIncRefCount(cs) CcRosCacheSegmentIncRefCount_(cs,__FILE__,__LINE__)
93 #define CcRosCacheSegmentDecRefCount(cs) CcRosCacheSegmentDecRefCount_(cs,__FILE__,__LINE__)
94 #else
95 #define CcRosCacheSegmentIncRefCount(cs) (++((cs)->ReferenceCount))
96 #define CcRosCacheSegmentDecRefCount(cs) (--((cs)->ReferenceCount))
97 #endif
98
99 NTSTATUS
100 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg);
101
102
103 /* FUNCTIONS *****************************************************************/
104
105 VOID
106 NTAPI
107 CcRosTraceCacheMap (
108 PBCB Bcb,
109 BOOLEAN Trace )
110 {
111 #if DBG
112 KIRQL oldirql;
113 PLIST_ENTRY current_entry;
114 PCACHE_SEGMENT current;
115
116 if ( !Bcb )
117 return;
118
119 Bcb->Trace = Trace;
120
121 if ( Trace )
122 {
123 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", Bcb );
124
125 KeAcquireGuardedMutex(&ViewLock);
126 KeAcquireSpinLock(&Bcb->BcbLock, &oldirql);
127
128 current_entry = Bcb->BcbSegmentListHead.Flink;
129 while (current_entry != &Bcb->BcbSegmentListHead)
130 {
131 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
132 current_entry = current_entry->Flink;
133
134 DPRINT1(" CacheSegment 0x%p enabled, RefCount %d, Dirty %d, PageOut %d\n",
135 current, current->ReferenceCount, current->Dirty, current->PageOut );
136 }
137 KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
138 KeReleaseGuardedMutex(&ViewLock);
139 }
140 else
141 {
142 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", Bcb );
143 }
144
145 #else
146 Bcb = Bcb;
147 Trace = Trace;
148 #endif
149 }
150
151 NTSTATUS
152 NTAPI
153 CcRosFlushCacheSegment(PCACHE_SEGMENT CacheSegment)
154 {
155 NTSTATUS Status;
156 KIRQL oldIrql;
157
158 Status = WriteCacheSegment(CacheSegment);
159 if (NT_SUCCESS(Status))
160 {
161 KeAcquireGuardedMutex(&ViewLock);
162 KeAcquireSpinLock(&CacheSegment->Bcb->BcbLock, &oldIrql);
163
164 CacheSegment->Dirty = FALSE;
165 RemoveEntryList(&CacheSegment->DirtySegmentListEntry);
166 DirtyPageCount -= CacheSegment->Bcb->CacheSegmentSize / PAGE_SIZE;
167 CcRosCacheSegmentDecRefCount ( CacheSegment );
168
169 KeReleaseSpinLock(&CacheSegment->Bcb->BcbLock, oldIrql);
170 KeReleaseGuardedMutex(&ViewLock);
171 }
172
173 return(Status);
174 }
175
176 NTSTATUS
177 NTAPI
178 CcRosFlushDirtyPages(ULONG Target, PULONG Count)
179 {
180 PLIST_ENTRY current_entry;
181 PCACHE_SEGMENT current;
182 ULONG PagesPerSegment;
183 BOOLEAN Locked;
184 NTSTATUS Status;
185 static ULONG WriteCount[4] = {0, 0, 0, 0};
186 ULONG NewTarget;
187
188 DPRINT("CcRosFlushDirtyPages(Target %d)\n", Target);
189
190 (*Count) = 0;
191
192 KeEnterCriticalRegion();
193 KeAcquireGuardedMutex(&ViewLock);
194
195 WriteCount[0] = WriteCount[1];
196 WriteCount[1] = WriteCount[2];
197 WriteCount[2] = WriteCount[3];
198 WriteCount[3] = 0;
199
200 NewTarget = WriteCount[0] + WriteCount[1] + WriteCount[2];
201
202 if (NewTarget < DirtyPageCount)
203 {
204 NewTarget = (DirtyPageCount - NewTarget + 3) / 4;
205 WriteCount[0] += NewTarget;
206 WriteCount[1] += NewTarget;
207 WriteCount[2] += NewTarget;
208 WriteCount[3] += NewTarget;
209 }
210
211 NewTarget = WriteCount[0];
212
213 Target = max(NewTarget, Target);
214
215 current_entry = DirtySegmentListHead.Flink;
216 if (current_entry == &DirtySegmentListHead)
217 {
218 DPRINT("No Dirty pages\n");
219 }
220
221 while (current_entry != &DirtySegmentListHead && Target > 0)
222 {
223 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
224 DirtySegmentListEntry);
225 current_entry = current_entry->Flink;
226
227 Locked = current->Bcb->Callbacks->AcquireForLazyWrite(
228 current->Bcb->LazyWriteContext, FALSE);
229 if (!Locked)
230 {
231 continue;
232 }
233
234 Locked = ExTryToAcquirePushLockExclusive(&current->Lock);
235 if (!Locked)
236 {
237 current->Bcb->Callbacks->ReleaseFromLazyWrite(
238 current->Bcb->LazyWriteContext);
239
240 continue;
241 }
242
243 ASSERT(current->Dirty);
244 if (current->ReferenceCount > 1)
245 {
246 ExReleasePushLock(&current->Lock);
247 current->Bcb->Callbacks->ReleaseFromLazyWrite(
248 current->Bcb->LazyWriteContext);
249 continue;
250 }
251
252 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
253
254 KeReleaseGuardedMutex(&ViewLock);
255
256 Status = CcRosFlushCacheSegment(current);
257
258 ExReleasePushLock(&current->Lock);
259 current->Bcb->Callbacks->ReleaseFromLazyWrite(
260 current->Bcb->LazyWriteContext);
261
262 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
263 {
264 DPRINT1("CC: Failed to flush cache segment.\n");
265 }
266 else
267 {
268 (*Count) += PagesPerSegment;
269 Target -= PagesPerSegment;
270 }
271
272 KeAcquireGuardedMutex(&ViewLock);
273 current_entry = DirtySegmentListHead.Flink;
274 }
275
276 if (*Count < NewTarget)
277 {
278 WriteCount[1] += (NewTarget - *Count);
279 }
280
281 KeReleaseGuardedMutex(&ViewLock);
282 KeLeaveCriticalRegion();
283
284 DPRINT("CcRosFlushDirtyPages() finished\n");
285 return(STATUS_SUCCESS);
286 }
287
288 NTSTATUS
289 CcRosTrimCache(ULONG Target, ULONG Priority, PULONG NrFreed)
290 /*
291 * FUNCTION: Try to free some memory from the file cache.
292 * ARGUMENTS:
293 * Target - The number of pages to be freed.
294 * Priority - The priority of free (currently unused).
295 * NrFreed - Points to a variable where the number of pages
296 * actually freed is returned.
297 */
298 {
299 PLIST_ENTRY current_entry;
300 PCACHE_SEGMENT current;
301 ULONG PagesPerSegment;
302 ULONG PagesFreed;
303 KIRQL oldIrql;
304 LIST_ENTRY FreeList;
305 PFN_NUMBER Page;
306 ULONG i;
307 BOOLEAN FlushedPages = FALSE;
308
309 DPRINT("CcRosTrimCache(Target %d)\n", Target);
310
311 InitializeListHead(&FreeList);
312
313 *NrFreed = 0;
314
315 retry:
316 KeAcquireGuardedMutex(&ViewLock);
317
318 current_entry = CacheSegmentLRUListHead.Flink;
319 while (current_entry != &CacheSegmentLRUListHead)
320 {
321 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
322 CacheSegmentLRUListEntry);
323 current_entry = current_entry->Flink;
324
325 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
326
327 /* Reference the cache segment */
328 CcRosCacheSegmentIncRefCount(current);
329
330 /* Check if it's mapped and not dirty */
331 if (current->MappedCount > 0 && !current->Dirty)
332 {
333 /* We have to break these locks because Cc sucks */
334 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
335 KeReleaseGuardedMutex(&ViewLock);
336
337 /* Page out the segment */
338 for (i = 0; i < current->Bcb->CacheSegmentSize / PAGE_SIZE; i++)
339 {
340 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
341
342 MmPageOutPhysicalAddress(Page);
343 }
344
345 /* Reacquire the locks */
346 KeAcquireGuardedMutex(&ViewLock);
347 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
348 }
349
350 /* Dereference the cache segment */
351 CcRosCacheSegmentDecRefCount(current);
352
353 /* Check if we can free this entry now */
354 if (current->ReferenceCount == 0)
355 {
356 ASSERT(!current->Dirty);
357 ASSERT(!current->MappedCount);
358
359 RemoveEntryList(&current->BcbSegmentListEntry);
360 RemoveEntryList(&current->CacheSegmentListEntry);
361 RemoveEntryList(&current->CacheSegmentLRUListEntry);
362 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
363
364 /* Calculate how many pages we freed for Mm */
365 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
366 PagesFreed = min(PagesPerSegment, Target);
367 Target -= PagesFreed;
368 (*NrFreed) += PagesFreed;
369 }
370
371 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
372 }
373
374 KeReleaseGuardedMutex(&ViewLock);
375
376 /* Try flushing pages if we haven't met our target */
377 if (Target > 0 && !FlushedPages)
378 {
379 /* Flush dirty pages to disk */
380 CcRosFlushDirtyPages(Target, &PagesFreed);
381 FlushedPages = TRUE;
382
383 /* We can only swap as many pages as we flushed */
384 if (PagesFreed < Target) Target = PagesFreed;
385
386 /* Check if we flushed anything */
387 if (PagesFreed != 0)
388 {
389 /* Try again after flushing dirty pages */
390 DPRINT("Flushed %d dirty cache pages to disk\n", PagesFreed);
391 goto retry;
392 }
393 }
394
395 while (!IsListEmpty(&FreeList))
396 {
397 current_entry = RemoveHeadList(&FreeList);
398 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
399 BcbSegmentListEntry);
400 CcRosInternalFreeCacheSegment(current);
401 }
402
403 DPRINT("Evicted %d cache pages\n", (*NrFreed));
404
405 return(STATUS_SUCCESS);
406 }
407
408 NTSTATUS
409 NTAPI
410 CcRosReleaseCacheSegment(PBCB Bcb,
411 PCACHE_SEGMENT CacheSeg,
412 BOOLEAN Valid,
413 BOOLEAN Dirty,
414 BOOLEAN Mapped)
415 {
416 BOOLEAN WasDirty = CacheSeg->Dirty;
417 KIRQL oldIrql;
418
419 ASSERT(Bcb);
420
421 DPRINT("CcReleaseCacheSegment(Bcb 0x%p, CacheSeg 0x%p, Valid %d)\n",
422 Bcb, CacheSeg, Valid);
423
424 CacheSeg->Valid = Valid;
425 CacheSeg->Dirty = CacheSeg->Dirty || Dirty;
426
427 KeAcquireGuardedMutex(&ViewLock);
428 if (!WasDirty && CacheSeg->Dirty)
429 {
430 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
431 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
432 }
433
434 if (Mapped)
435 {
436 CacheSeg->MappedCount++;
437 }
438 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
439 CcRosCacheSegmentDecRefCount(CacheSeg);
440 if (Mapped && CacheSeg->MappedCount == 1)
441 {
442 CcRosCacheSegmentIncRefCount(CacheSeg);
443 }
444 if (!WasDirty && CacheSeg->Dirty)
445 {
446 CcRosCacheSegmentIncRefCount(CacheSeg);
447 }
448 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
449 KeReleaseGuardedMutex(&ViewLock);
450 ExReleasePushLock(&CacheSeg->Lock);
451
452 return(STATUS_SUCCESS);
453 }
454
455 /* Returns with Cache Segment Lock Held! */
456 PCACHE_SEGMENT
457 NTAPI
458 CcRosLookupCacheSegment(PBCB Bcb, ULONG FileOffset)
459 {
460 PLIST_ENTRY current_entry;
461 PCACHE_SEGMENT current;
462 KIRQL oldIrql;
463
464 ASSERT(Bcb);
465
466 DPRINT("CcRosLookupCacheSegment(Bcb -x%p, FileOffset %d)\n", Bcb, FileOffset);
467
468 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
469 current_entry = Bcb->BcbSegmentListHead.Flink;
470 while (current_entry != &Bcb->BcbSegmentListHead)
471 {
472 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
473 BcbSegmentListEntry);
474 if (current->FileOffset <= FileOffset &&
475 (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
476 {
477 CcRosCacheSegmentIncRefCount(current);
478 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
479 ExAcquirePushLockExclusive(&current->Lock);
480 return(current);
481 }
482 current_entry = current_entry->Flink;
483 }
484 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
485 return(NULL);
486 }
487
488 NTSTATUS
489 NTAPI
490 CcRosMarkDirtyCacheSegment(PBCB Bcb, ULONG FileOffset)
491 {
492 PCACHE_SEGMENT CacheSeg;
493 KIRQL oldIrql;
494
495 ASSERT(Bcb);
496
497 DPRINT("CcRosMarkDirtyCacheSegment(Bcb 0x%p, FileOffset %d)\n", Bcb, FileOffset);
498
499 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
500 if (CacheSeg == NULL)
501 {
502 KeBugCheck(CACHE_MANAGER);
503 }
504 if (!CacheSeg->Dirty)
505 {
506 KeAcquireGuardedMutex(&ViewLock);
507 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
508 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
509 KeReleaseGuardedMutex(&ViewLock);
510 }
511 else
512 {
513 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
514 CcRosCacheSegmentDecRefCount(CacheSeg);
515 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
516 }
517
518 KeAcquireGuardedMutex(&ViewLock);
519
520 /* Move to the tail of the LRU list */
521 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
522 InsertTailList(&CacheSegmentLRUListHead, &CacheSeg->CacheSegmentLRUListEntry);
523
524 KeReleaseGuardedMutex(&ViewLock);
525
526 CacheSeg->Dirty = TRUE;
527 ExReleasePushLock(&CacheSeg->Lock);
528
529 return(STATUS_SUCCESS);
530 }
531
532 NTSTATUS
533 NTAPI
534 CcRosUnmapCacheSegment(PBCB Bcb, ULONG FileOffset, BOOLEAN NowDirty)
535 {
536 PCACHE_SEGMENT CacheSeg;
537 BOOLEAN WasDirty;
538 KIRQL oldIrql;
539
540 ASSERT(Bcb);
541
542 DPRINT("CcRosUnmapCacheSegment(Bcb 0x%p, FileOffset %d, NowDirty %d)\n",
543 Bcb, FileOffset, NowDirty);
544
545 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
546 if (CacheSeg == NULL)
547 {
548 return(STATUS_UNSUCCESSFUL);
549 }
550
551 WasDirty = CacheSeg->Dirty;
552 CacheSeg->Dirty = CacheSeg->Dirty || NowDirty;
553
554 CacheSeg->MappedCount--;
555
556 if (!WasDirty && NowDirty)
557 {
558 KeAcquireGuardedMutex(&ViewLock);
559 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
560 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
561 KeReleaseGuardedMutex(&ViewLock);
562 }
563
564 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
565 CcRosCacheSegmentDecRefCount(CacheSeg);
566 if (!WasDirty && NowDirty)
567 {
568 CcRosCacheSegmentIncRefCount(CacheSeg);
569 }
570 if (CacheSeg->MappedCount == 0)
571 {
572 CcRosCacheSegmentDecRefCount(CacheSeg);
573 }
574 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
575
576 ExReleasePushLock(&CacheSeg->Lock);
577 return(STATUS_SUCCESS);
578 }
579
580 static
581 NTSTATUS
582 CcRosCreateCacheSegment(PBCB Bcb,
583 ULONG FileOffset,
584 PCACHE_SEGMENT* CacheSeg)
585 {
586 PCACHE_SEGMENT current;
587 PCACHE_SEGMENT previous;
588 PLIST_ENTRY current_entry;
589 NTSTATUS Status;
590 KIRQL oldIrql;
591 #ifdef CACHE_BITMAP
592 ULONG StartingOffset;
593 #endif
594 PHYSICAL_ADDRESS BoundaryAddressMultiple;
595
596 ASSERT(Bcb);
597
598 DPRINT("CcRosCreateCacheSegment()\n");
599
600 BoundaryAddressMultiple.QuadPart = 0;
601 if (FileOffset >= Bcb->FileSize.u.LowPart)
602 {
603 CacheSeg = NULL;
604 return STATUS_INVALID_PARAMETER;
605 }
606
607 current = ExAllocateFromNPagedLookasideList(&CacheSegLookasideList);
608 current->Valid = FALSE;
609 current->Dirty = FALSE;
610 current->PageOut = FALSE;
611 current->FileOffset = ROUND_DOWN(FileOffset, Bcb->CacheSegmentSize);
612 current->Bcb = Bcb;
613 #if DBG
614 if ( Bcb->Trace )
615 {
616 DPRINT1("CacheMap 0x%p: new Cache Segment: 0x%p\n", Bcb, current );
617 }
618 #endif
619 current->MappedCount = 0;
620 current->DirtySegmentListEntry.Flink = NULL;
621 current->DirtySegmentListEntry.Blink = NULL;
622 current->ReferenceCount = 1;
623 ExInitializePushLock(&current->Lock);
624 ExAcquirePushLockExclusive(&current->Lock);
625 KeAcquireGuardedMutex(&ViewLock);
626
627 *CacheSeg = current;
628 /* There is window between the call to CcRosLookupCacheSegment
629 * and CcRosCreateCacheSegment. We must check if a segment on
630 * the fileoffset exist. If there exist a segment, we release
631 * our new created segment and return the existing one.
632 */
633 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
634 current_entry = Bcb->BcbSegmentListHead.Flink;
635 previous = NULL;
636 while (current_entry != &Bcb->BcbSegmentListHead)
637 {
638 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
639 BcbSegmentListEntry);
640 if (current->FileOffset <= FileOffset &&
641 (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
642 {
643 CcRosCacheSegmentIncRefCount(current);
644 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
645 #if DBG
646 if ( Bcb->Trace )
647 {
648 DPRINT1("CacheMap 0x%p: deleting newly created Cache Segment 0x%p ( found existing one 0x%p )\n",
649 Bcb,
650 (*CacheSeg),
651 current );
652 }
653 #endif
654 ExReleasePushLock(&(*CacheSeg)->Lock);
655 KeReleaseGuardedMutex(&ViewLock);
656 ExFreeToNPagedLookasideList(&CacheSegLookasideList, *CacheSeg);
657 *CacheSeg = current;
658 ExAcquirePushLockExclusive(&current->Lock);
659 return STATUS_SUCCESS;
660 }
661 if (current->FileOffset < FileOffset)
662 {
663 if (previous == NULL)
664 {
665 previous = current;
666 }
667 else
668 {
669 if (previous->FileOffset < current->FileOffset)
670 {
671 previous = current;
672 }
673 }
674 }
675 current_entry = current_entry->Flink;
676 }
677 /* There was no existing segment. */
678 current = *CacheSeg;
679 if (previous)
680 {
681 InsertHeadList(&previous->BcbSegmentListEntry, &current->BcbSegmentListEntry);
682 }
683 else
684 {
685 InsertHeadList(&Bcb->BcbSegmentListHead, &current->BcbSegmentListEntry);
686 }
687 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
688 InsertTailList(&CacheSegmentListHead, &current->CacheSegmentListEntry);
689 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
690 KeReleaseGuardedMutex(&ViewLock);
691 #ifdef CACHE_BITMAP
692 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
693
694 StartingOffset = RtlFindClearBitsAndSet(&CiCacheSegMappingRegionAllocMap, Bcb->CacheSegmentSize / PAGE_SIZE, CiCacheSegMappingRegionHint);
695
696 if (StartingOffset == 0xffffffff)
697 {
698 DPRINT1("Out of CacheSeg mapping space\n");
699 KeBugCheck(CACHE_MANAGER);
700 }
701
702 current->BaseAddress = CiCacheSegMappingRegionBase + StartingOffset * PAGE_SIZE;
703
704 if (CiCacheSegMappingRegionHint == StartingOffset)
705 {
706 CiCacheSegMappingRegionHint += Bcb->CacheSegmentSize / PAGE_SIZE;
707 }
708
709 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
710 #else
711 MmLockAddressSpace(MmGetKernelAddressSpace());
712 current->BaseAddress = NULL;
713 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
714 0, // nothing checks for cache_segment mareas, so set to 0
715 &current->BaseAddress,
716 Bcb->CacheSegmentSize,
717 PAGE_READWRITE,
718 (PMEMORY_AREA*)&current->MemoryArea,
719 FALSE,
720 0,
721 BoundaryAddressMultiple);
722 MmUnlockAddressSpace(MmGetKernelAddressSpace());
723 if (!NT_SUCCESS(Status))
724 {
725 KeBugCheck(CACHE_MANAGER);
726 }
727 #endif
728
729 /* Create a virtual mapping for this memory area */
730 MI_SET_USAGE(MI_USAGE_CACHE);
731 #if MI_TRACE_PFNS
732 PWCHAR pos = NULL;
733 ULONG len = 0;
734 if ((Bcb->FileObject) && (Bcb->FileObject->FileName.Buffer))
735 {
736 pos = wcsrchr(Bcb->FileObject->FileName.Buffer, '\\');
737 len = wcslen(pos) * sizeof(WCHAR);
738 if (pos) snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
739 }
740 #endif
741
742 MmMapMemoryArea(current->BaseAddress, Bcb->CacheSegmentSize,
743 MC_CACHE, PAGE_READWRITE);
744
745 return(STATUS_SUCCESS);
746 }
747
748 NTSTATUS
749 NTAPI
750 CcRosGetCacheSegmentChain(PBCB Bcb,
751 ULONG FileOffset,
752 ULONG Length,
753 PCACHE_SEGMENT* CacheSeg)
754 {
755 PCACHE_SEGMENT current;
756 ULONG i;
757 PCACHE_SEGMENT* CacheSegList;
758 PCACHE_SEGMENT Previous = NULL;
759
760 ASSERT(Bcb);
761
762 DPRINT("CcRosGetCacheSegmentChain()\n");
763
764 Length = ROUND_UP(Length, Bcb->CacheSegmentSize);
765
766 CacheSegList = _alloca(sizeof(PCACHE_SEGMENT) *
767 (Length / Bcb->CacheSegmentSize));
768
769 /*
770 * Look for a cache segment already mapping the same data.
771 */
772 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
773 {
774 ULONG CurrentOffset = FileOffset + (i * Bcb->CacheSegmentSize);
775 current = CcRosLookupCacheSegment(Bcb, CurrentOffset);
776 if (current != NULL)
777 {
778 KeAcquireGuardedMutex(&ViewLock);
779
780 /* Move to tail of LRU list */
781 RemoveEntryList(&current->CacheSegmentLRUListEntry);
782 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
783
784 KeReleaseGuardedMutex(&ViewLock);
785
786 CacheSegList[i] = current;
787 }
788 else
789 {
790 CcRosCreateCacheSegment(Bcb, CurrentOffset, &current);
791 CacheSegList[i] = current;
792 }
793 }
794
795 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
796 {
797 if (i == 0)
798 {
799 *CacheSeg = CacheSegList[i];
800 Previous = CacheSegList[i];
801 }
802 else
803 {
804 Previous->NextInChain = CacheSegList[i];
805 Previous = CacheSegList[i];
806 }
807 }
808 ASSERT(Previous);
809 Previous->NextInChain = NULL;
810
811 return(STATUS_SUCCESS);
812 }
813
814 NTSTATUS
815 NTAPI
816 CcRosGetCacheSegment(PBCB Bcb,
817 ULONG FileOffset,
818 PULONG BaseOffset,
819 PVOID* BaseAddress,
820 PBOOLEAN UptoDate,
821 PCACHE_SEGMENT* CacheSeg)
822 {
823 PCACHE_SEGMENT current;
824 NTSTATUS Status;
825
826 ASSERT(Bcb);
827
828 DPRINT("CcRosGetCacheSegment()\n");
829
830 /*
831 * Look for a cache segment already mapping the same data.
832 */
833 current = CcRosLookupCacheSegment(Bcb, FileOffset);
834 if (current == NULL)
835 {
836 /*
837 * Otherwise create a new segment.
838 */
839 Status = CcRosCreateCacheSegment(Bcb, FileOffset, &current);
840 if (!NT_SUCCESS(Status))
841 {
842 return Status;
843 }
844 }
845
846 KeAcquireGuardedMutex(&ViewLock);
847
848 /* Move to the tail of the LRU list */
849 RemoveEntryList(&current->CacheSegmentLRUListEntry);
850 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
851
852 KeReleaseGuardedMutex(&ViewLock);
853
854 /*
855 * Return information about the segment to the caller.
856 */
857 *UptoDate = current->Valid;
858 *BaseAddress = current->BaseAddress;
859 DPRINT("*BaseAddress 0x%.8X\n", *BaseAddress);
860 *CacheSeg = current;
861 *BaseOffset = current->FileOffset;
862 return(STATUS_SUCCESS);
863 }
864
865 NTSTATUS NTAPI
866 CcRosRequestCacheSegment(PBCB Bcb,
867 ULONG FileOffset,
868 PVOID* BaseAddress,
869 PBOOLEAN UptoDate,
870 PCACHE_SEGMENT* CacheSeg)
871 /*
872 * FUNCTION: Request a page mapping for a BCB
873 */
874 {
875 ULONG BaseOffset;
876
877 ASSERT(Bcb);
878
879 if ((FileOffset % Bcb->CacheSegmentSize) != 0)
880 {
881 DPRINT1("Bad fileoffset %x should be multiple of %x",
882 FileOffset, Bcb->CacheSegmentSize);
883 KeBugCheck(CACHE_MANAGER);
884 }
885
886 return(CcRosGetCacheSegment(Bcb,
887 FileOffset,
888 &BaseOffset,
889 BaseAddress,
890 UptoDate,
891 CacheSeg));
892 }
893 #ifdef CACHE_BITMAP
894 #else
895 static VOID
896 CcFreeCachePage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address,
897 PFN_NUMBER Page, SWAPENTRY SwapEntry, BOOLEAN Dirty)
898 {
899 ASSERT(SwapEntry == 0);
900 if (Page != 0)
901 {
902 ASSERT(MmGetReferenceCountPage(Page) == 1);
903 MmReleasePageMemoryConsumer(MC_CACHE, Page);
904 }
905 }
906 #endif
907 NTSTATUS
908 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg)
909 /*
910 * FUNCTION: Releases a cache segment associated with a BCB
911 */
912 {
913 #ifdef CACHE_BITMAP
914 ULONG i;
915 ULONG RegionSize;
916 ULONG Base;
917 PFN_NUMBER Page;
918 KIRQL oldIrql;
919 #endif
920 DPRINT("Freeing cache segment 0x%p\n", CacheSeg);
921 #if DBG
922 if ( CacheSeg->Bcb->Trace )
923 {
924 DPRINT1("CacheMap 0x%p: deleting Cache Segment: 0x%p\n", CacheSeg->Bcb, CacheSeg );
925 }
926 #endif
927 #ifdef CACHE_BITMAP
928 RegionSize = CacheSeg->Bcb->CacheSegmentSize / PAGE_SIZE;
929
930 /* Unmap all the pages. */
931 for (i = 0; i < RegionSize; i++)
932 {
933 MmDeleteVirtualMapping(NULL,
934 CacheSeg->BaseAddress + (i * PAGE_SIZE),
935 FALSE,
936 NULL,
937 &Page);
938 MmReleasePageMemoryConsumer(MC_CACHE, Page);
939 }
940
941 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
942 /* Deallocate all the pages used. */
943 Base = (ULONG)(CacheSeg->BaseAddress - CiCacheSegMappingRegionBase) / PAGE_SIZE;
944
945 RtlClearBits(&CiCacheSegMappingRegionAllocMap, Base, RegionSize);
946
947 CiCacheSegMappingRegionHint = min (CiCacheSegMappingRegionHint, Base);
948
949 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
950 #else
951 MmLockAddressSpace(MmGetKernelAddressSpace());
952 MmFreeMemoryArea(MmGetKernelAddressSpace(),
953 CacheSeg->MemoryArea,
954 CcFreeCachePage,
955 NULL);
956 MmUnlockAddressSpace(MmGetKernelAddressSpace());
957 #endif
958 ExFreeToNPagedLookasideList(&CacheSegLookasideList, CacheSeg);
959 return(STATUS_SUCCESS);
960 }
961
962 NTSTATUS
963 NTAPI
964 CcRosFreeCacheSegment(PBCB Bcb, PCACHE_SEGMENT CacheSeg)
965 {
966 NTSTATUS Status;
967 KIRQL oldIrql;
968
969 ASSERT(Bcb);
970
971 DPRINT("CcRosFreeCacheSegment(Bcb 0x%p, CacheSeg 0x%p)\n",
972 Bcb, CacheSeg);
973
974 KeAcquireGuardedMutex(&ViewLock);
975 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
976 RemoveEntryList(&CacheSeg->BcbSegmentListEntry);
977 RemoveEntryList(&CacheSeg->CacheSegmentListEntry);
978 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
979 if (CacheSeg->Dirty)
980 {
981 RemoveEntryList(&CacheSeg->DirtySegmentListEntry);
982 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
983
984 }
985 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
986 KeReleaseGuardedMutex(&ViewLock);
987
988 Status = CcRosInternalFreeCacheSegment(CacheSeg);
989 return(Status);
990 }
991
992 /*
993 * @implemented
994 */
995 VOID NTAPI
996 CcFlushCache(IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
997 IN PLARGE_INTEGER FileOffset OPTIONAL,
998 IN ULONG Length,
999 OUT PIO_STATUS_BLOCK IoStatus)
1000 {
1001 PBCB Bcb;
1002 LARGE_INTEGER Offset;
1003 PCACHE_SEGMENT current;
1004 NTSTATUS Status;
1005 KIRQL oldIrql;
1006
1007 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %d, IoStatus 0x%p)\n",
1008 SectionObjectPointers, FileOffset, Length, IoStatus);
1009
1010 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1011 {
1012 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
1013 ASSERT(Bcb);
1014 if (FileOffset)
1015 {
1016 Offset = *FileOffset;
1017 }
1018 else
1019 {
1020 Offset.QuadPart = (LONGLONG)0;
1021 Length = Bcb->FileSize.u.LowPart;
1022 }
1023
1024 if (IoStatus)
1025 {
1026 IoStatus->Status = STATUS_SUCCESS;
1027 IoStatus->Information = 0;
1028 }
1029
1030 while (Length > 0)
1031 {
1032 current = CcRosLookupCacheSegment (Bcb, Offset.u.LowPart);
1033 if (current != NULL)
1034 {
1035 if (current->Dirty)
1036 {
1037 Status = CcRosFlushCacheSegment(current);
1038 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1039 {
1040 IoStatus->Status = Status;
1041 }
1042 }
1043 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1044 ExReleasePushLock(&current->Lock);
1045 CcRosCacheSegmentDecRefCount(current);
1046 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1047 }
1048
1049 Offset.QuadPart += Bcb->CacheSegmentSize;
1050 if (Length > Bcb->CacheSegmentSize)
1051 {
1052 Length -= Bcb->CacheSegmentSize;
1053 }
1054 else
1055 {
1056 Length = 0;
1057 }
1058 }
1059 }
1060 else
1061 {
1062 if (IoStatus)
1063 {
1064 IoStatus->Status = STATUS_INVALID_PARAMETER;
1065 }
1066 }
1067 }
1068
1069 NTSTATUS
1070 NTAPI
1071 CcRosDeleteFileCache(PFILE_OBJECT FileObject, PBCB Bcb)
1072 /*
1073 * FUNCTION: Releases the BCB associated with a file object
1074 */
1075 {
1076 PLIST_ENTRY current_entry;
1077 PCACHE_SEGMENT current;
1078 LIST_ENTRY FreeList;
1079 KIRQL oldIrql;
1080
1081 ASSERT(Bcb);
1082
1083 Bcb->RefCount++;
1084 KeReleaseGuardedMutex(&ViewLock);
1085
1086 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1087
1088 KeAcquireGuardedMutex(&ViewLock);
1089 Bcb->RefCount--;
1090 if (Bcb->RefCount == 0)
1091 {
1092 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1093 {
1094 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1095 Bcb->BcbRemoveListEntry.Flink = NULL;
1096 }
1097
1098 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1099
1100 /*
1101 * Release all cache segments.
1102 */
1103 InitializeListHead(&FreeList);
1104 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1105 current_entry = Bcb->BcbSegmentListHead.Flink;
1106 while (!IsListEmpty(&Bcb->BcbSegmentListHead))
1107 {
1108 current_entry = RemoveTailList(&Bcb->BcbSegmentListHead);
1109 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1110 RemoveEntryList(&current->CacheSegmentListEntry);
1111 RemoveEntryList(&current->CacheSegmentLRUListEntry);
1112 if (current->Dirty)
1113 {
1114 RemoveEntryList(&current->DirtySegmentListEntry);
1115 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
1116 DPRINT1("Freeing dirty segment\n");
1117 }
1118 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
1119 }
1120 #if DBG
1121 Bcb->Trace = FALSE;
1122 #endif
1123 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1124
1125 KeReleaseGuardedMutex(&ViewLock);
1126 ObDereferenceObject (Bcb->FileObject);
1127
1128 while (!IsListEmpty(&FreeList))
1129 {
1130 current_entry = RemoveTailList(&FreeList);
1131 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1132 CcRosInternalFreeCacheSegment(current);
1133 }
1134 ExFreeToNPagedLookasideList(&BcbLookasideList, Bcb);
1135 KeAcquireGuardedMutex(&ViewLock);
1136 }
1137 return(STATUS_SUCCESS);
1138 }
1139
1140 VOID
1141 NTAPI
1142 CcRosReferenceCache(PFILE_OBJECT FileObject)
1143 {
1144 PBCB Bcb;
1145 KeAcquireGuardedMutex(&ViewLock);
1146 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1147 ASSERT(Bcb);
1148 if (Bcb->RefCount == 0)
1149 {
1150 ASSERT(Bcb->BcbRemoveListEntry.Flink != NULL);
1151 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1152 Bcb->BcbRemoveListEntry.Flink = NULL;
1153
1154 }
1155 else
1156 {
1157 ASSERT(Bcb->BcbRemoveListEntry.Flink == NULL);
1158 }
1159 Bcb->RefCount++;
1160 KeReleaseGuardedMutex(&ViewLock);
1161 }
1162
1163 VOID
1164 NTAPI
1165 CcRosSetRemoveOnClose(PSECTION_OBJECT_POINTERS SectionObjectPointer)
1166 {
1167 PBCB Bcb;
1168 DPRINT("CcRosSetRemoveOnClose()\n");
1169 KeAcquireGuardedMutex(&ViewLock);
1170 Bcb = (PBCB)SectionObjectPointer->SharedCacheMap;
1171 if (Bcb)
1172 {
1173 Bcb->RemoveOnClose = TRUE;
1174 if (Bcb->RefCount == 0)
1175 {
1176 CcRosDeleteFileCache(Bcb->FileObject, Bcb);
1177 }
1178 }
1179 KeReleaseGuardedMutex(&ViewLock);
1180 }
1181
1182
1183 VOID
1184 NTAPI
1185 CcRosDereferenceCache(PFILE_OBJECT FileObject)
1186 {
1187 PBCB Bcb;
1188 KeAcquireGuardedMutex(&ViewLock);
1189 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1190 ASSERT(Bcb);
1191 if (Bcb->RefCount > 0)
1192 {
1193 Bcb->RefCount--;
1194 if (Bcb->RefCount == 0)
1195 {
1196 MmFreeSectionSegments(Bcb->FileObject);
1197 CcRosDeleteFileCache(FileObject, Bcb);
1198 }
1199 }
1200 KeReleaseGuardedMutex(&ViewLock);
1201 }
1202
1203 NTSTATUS NTAPI
1204 CcRosReleaseFileCache(PFILE_OBJECT FileObject)
1205 /*
1206 * FUNCTION: Called by the file system when a handle to a file object
1207 * has been closed.
1208 */
1209 {
1210 PBCB Bcb;
1211
1212 KeAcquireGuardedMutex(&ViewLock);
1213
1214 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1215 {
1216 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1217 if (FileObject->PrivateCacheMap != NULL)
1218 {
1219 FileObject->PrivateCacheMap = NULL;
1220 if (Bcb->RefCount > 0)
1221 {
1222 Bcb->RefCount--;
1223 if (Bcb->RefCount == 0)
1224 {
1225 MmFreeSectionSegments(Bcb->FileObject);
1226 CcRosDeleteFileCache(FileObject, Bcb);
1227 }
1228 }
1229 }
1230 }
1231 KeReleaseGuardedMutex(&ViewLock);
1232 return(STATUS_SUCCESS);
1233 }
1234
1235 NTSTATUS
1236 NTAPI
1237 CcTryToInitializeFileCache(PFILE_OBJECT FileObject)
1238 {
1239 PBCB Bcb;
1240 NTSTATUS Status;
1241
1242 KeAcquireGuardedMutex(&ViewLock);
1243
1244 ASSERT(FileObject->SectionObjectPointer);
1245 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1246 if (Bcb == NULL)
1247 {
1248 Status = STATUS_UNSUCCESSFUL;
1249 }
1250 else
1251 {
1252 if (FileObject->PrivateCacheMap == NULL)
1253 {
1254 FileObject->PrivateCacheMap = Bcb;
1255 Bcb->RefCount++;
1256 }
1257 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1258 {
1259 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1260 Bcb->BcbRemoveListEntry.Flink = NULL;
1261 }
1262 Status = STATUS_SUCCESS;
1263 }
1264 KeReleaseGuardedMutex(&ViewLock);
1265
1266 return Status;
1267 }
1268
1269
1270 NTSTATUS NTAPI
1271 CcRosInitializeFileCache(PFILE_OBJECT FileObject,
1272 ULONG CacheSegmentSize,
1273 PCACHE_MANAGER_CALLBACKS CallBacks,
1274 PVOID LazyWriterContext)
1275 /*
1276 * FUNCTION: Initializes a BCB for a file object
1277 */
1278 {
1279 PBCB Bcb;
1280
1281 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1282 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, Bcb 0x%p, CacheSegmentSize %d)\n",
1283 FileObject, Bcb, CacheSegmentSize);
1284
1285 KeAcquireGuardedMutex(&ViewLock);
1286 if (Bcb == NULL)
1287 {
1288 Bcb = ExAllocateFromNPagedLookasideList(&BcbLookasideList);
1289 if (Bcb == NULL)
1290 {
1291 KeReleaseGuardedMutex(&ViewLock);
1292 return(STATUS_UNSUCCESSFUL);
1293 }
1294 memset(Bcb, 0, sizeof(BCB));
1295 ObReferenceObjectByPointer(FileObject,
1296 FILE_ALL_ACCESS,
1297 NULL,
1298 KernelMode);
1299 Bcb->FileObject = FileObject;
1300 Bcb->CacheSegmentSize = CacheSegmentSize;
1301 Bcb->Callbacks = CallBacks;
1302 Bcb->LazyWriteContext = LazyWriterContext;
1303 if (FileObject->FsContext)
1304 {
1305 Bcb->AllocationSize =
1306 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->AllocationSize;
1307 Bcb->FileSize =
1308 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->FileSize;
1309 }
1310 KeInitializeSpinLock(&Bcb->BcbLock);
1311 InitializeListHead(&Bcb->BcbSegmentListHead);
1312 FileObject->SectionObjectPointer->SharedCacheMap = Bcb;
1313 }
1314 if (FileObject->PrivateCacheMap == NULL)
1315 {
1316 FileObject->PrivateCacheMap = Bcb;
1317 Bcb->RefCount++;
1318 }
1319 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1320 {
1321 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1322 Bcb->BcbRemoveListEntry.Flink = NULL;
1323 }
1324 KeReleaseGuardedMutex(&ViewLock);
1325
1326 return(STATUS_SUCCESS);
1327 }
1328
1329 /*
1330 * @implemented
1331 */
1332 PFILE_OBJECT NTAPI
1333 CcGetFileObjectFromSectionPtrs(IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1334 {
1335 PBCB Bcb;
1336 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1337 {
1338 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
1339 ASSERT(Bcb);
1340 return Bcb->FileObject;
1341 }
1342 return NULL;
1343 }
1344
1345 VOID
1346 INIT_FUNCTION
1347 NTAPI
1348 CcInitView(VOID)
1349 {
1350 #ifdef CACHE_BITMAP
1351 PMEMORY_AREA marea;
1352 PVOID Buffer;
1353 PHYSICAL_ADDRESS BoundaryAddressMultiple;
1354 #endif
1355
1356 DPRINT("CcInitView()\n");
1357 #ifdef CACHE_BITMAP
1358 BoundaryAddressMultiple.QuadPart = 0;
1359 CiCacheSegMappingRegionHint = 0;
1360 CiCacheSegMappingRegionBase = NULL;
1361
1362 MmLockAddressSpace(MmGetKernelAddressSpace());
1363
1364 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
1365 MEMORY_AREA_CACHE_SEGMENT,
1366 &CiCacheSegMappingRegionBase,
1367 CI_CACHESEG_MAPPING_REGION_SIZE,
1368 PAGE_READWRITE,
1369 &marea,
1370 FALSE,
1371 0,
1372 BoundaryAddressMultiple);
1373 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1374 if (!NT_SUCCESS(Status))
1375 {
1376 KeBugCheck(CACHE_MANAGER);
1377 }
1378
1379 Buffer = ExAllocatePool(NonPagedPool, CI_CACHESEG_MAPPING_REGION_SIZE / (PAGE_SIZE * 8));
1380 if (!Buffer)
1381 {
1382 KeBugCheck(CACHE_MANAGER);
1383 }
1384
1385 RtlInitializeBitMap(&CiCacheSegMappingRegionAllocMap, Buffer, CI_CACHESEG_MAPPING_REGION_SIZE / PAGE_SIZE);
1386 RtlClearAllBits(&CiCacheSegMappingRegionAllocMap);
1387
1388 KeInitializeSpinLock(&CiCacheSegMappingRegionLock);
1389 #endif
1390 InitializeListHead(&CacheSegmentListHead);
1391 InitializeListHead(&DirtySegmentListHead);
1392 InitializeListHead(&CacheSegmentLRUListHead);
1393 InitializeListHead(&ClosedListHead);
1394 KeInitializeGuardedMutex(&ViewLock);
1395 ExInitializeNPagedLookasideList (&iBcbLookasideList,
1396 NULL,
1397 NULL,
1398 0,
1399 sizeof(INTERNAL_BCB),
1400 TAG_IBCB,
1401 20);
1402 ExInitializeNPagedLookasideList (&BcbLookasideList,
1403 NULL,
1404 NULL,
1405 0,
1406 sizeof(BCB),
1407 TAG_BCB,
1408 20);
1409 ExInitializeNPagedLookasideList (&CacheSegLookasideList,
1410 NULL,
1411 NULL,
1412 0,
1413 sizeof(CACHE_SEGMENT),
1414 TAG_CSEG,
1415 20);
1416
1417 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1418
1419 CcInitCacheZeroPage();
1420
1421 }
1422
1423 /* EOF */
1424
1425
1426
1427
1428
1429
1430
1431