89167bfde3f8d3f805563a684f8bcb910be8f719
[reactos.git] / reactos / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 */
9
10 /* NOTES **********************************************************************
11 *
12 * This is not the NT implementation of a file cache nor anything much like
13 * it.
14 *
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
17 *
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
20 *
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
23 *
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
27 *
28 * (4) Copy the data into or out of the page as necessary.
29 *
30 * (5) Release the cache page
31 */
32 /* INCLUDES ******************************************************************/
33
34 #include <ntoskrnl.h>
35 #define NDEBUG
36 #include <debug.h>
37
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
40 #endif
41
42 /* GLOBALS *******************************************************************/
43
44 /*
45 * If CACHE_BITMAP is defined, the cache manager uses one large memory region
46 * within the kernel address space and allocate/deallocate space from this block
47 * over a bitmap. If CACHE_BITMAP is used, the size of the mdl mapping region
48 * must be reduced (ntoskrnl\mm\mdl.c, MI_MDLMAPPING_REGION_SIZE).
49 */
50 //#define CACHE_BITMAP
51
52 static LIST_ENTRY DirtySegmentListHead;
53 static LIST_ENTRY CacheSegmentListHead;
54 static LIST_ENTRY CacheSegmentLRUListHead;
55 static LIST_ENTRY ClosedListHead;
56 ULONG DirtyPageCount=0;
57
58 KGUARDED_MUTEX ViewLock;
59
60 #ifdef CACHE_BITMAP
61 #define CI_CACHESEG_MAPPING_REGION_SIZE (128*1024*1024)
62
63 static PVOID CiCacheSegMappingRegionBase = NULL;
64 static RTL_BITMAP CiCacheSegMappingRegionAllocMap;
65 static ULONG CiCacheSegMappingRegionHint;
66 static KSPIN_LOCK CiCacheSegMappingRegionLock;
67 #endif
68
69 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
70 static NPAGED_LOOKASIDE_LIST BcbLookasideList;
71 static NPAGED_LOOKASIDE_LIST CacheSegLookasideList;
72
73 #if DBG
74 static void CcRosCacheSegmentIncRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
75 {
76 ++cs->ReferenceCount;
77 if ( cs->Bcb->Trace )
78 {
79 DbgPrint("(%s:%i) CacheSegment %p ++RefCount=%d, Dirty %d, PageOut %d\n",
80 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
81 }
82 }
83 static void CcRosCacheSegmentDecRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
84 {
85 --cs->ReferenceCount;
86 if ( cs->Bcb->Trace )
87 {
88 DbgPrint("(%s:%i) CacheSegment %p --RefCount=%d, Dirty %d, PageOut %d\n",
89 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
90 }
91 }
92 #define CcRosCacheSegmentIncRefCount(cs) CcRosCacheSegmentIncRefCount_(cs,__FILE__,__LINE__)
93 #define CcRosCacheSegmentDecRefCount(cs) CcRosCacheSegmentDecRefCount_(cs,__FILE__,__LINE__)
94 #else
95 #define CcRosCacheSegmentIncRefCount(cs) (++((cs)->ReferenceCount))
96 #define CcRosCacheSegmentDecRefCount(cs) (--((cs)->ReferenceCount))
97 #endif
98
99 NTSTATUS
100 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg);
101
102
103 /* FUNCTIONS *****************************************************************/
104
105 VOID
106 NTAPI
107 CcRosTraceCacheMap (
108 PBCB Bcb,
109 BOOLEAN Trace )
110 {
111 #if DBG
112 KIRQL oldirql;
113 PLIST_ENTRY current_entry;
114 PCACHE_SEGMENT current;
115
116 if ( !Bcb )
117 return;
118
119 Bcb->Trace = Trace;
120
121 if ( Trace )
122 {
123 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", Bcb );
124
125 KeAcquireGuardedMutex(&ViewLock);
126 KeAcquireSpinLock(&Bcb->BcbLock, &oldirql);
127
128 current_entry = Bcb->BcbSegmentListHead.Flink;
129 while (current_entry != &Bcb->BcbSegmentListHead)
130 {
131 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
132 current_entry = current_entry->Flink;
133
134 DPRINT1(" CacheSegment 0x%p enabled, RefCount %d, Dirty %d, PageOut %d\n",
135 current, current->ReferenceCount, current->Dirty, current->PageOut );
136 }
137 KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
138 KeReleaseGuardedMutex(&ViewLock);
139 }
140 else
141 {
142 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", Bcb );
143 }
144
145 #else
146 Bcb = Bcb;
147 Trace = Trace;
148 #endif
149 }
150
151 NTSTATUS
152 NTAPI
153 CcRosFlushCacheSegment(PCACHE_SEGMENT CacheSegment)
154 {
155 NTSTATUS Status;
156 KIRQL oldIrql;
157
158 Status = WriteCacheSegment(CacheSegment);
159 if (NT_SUCCESS(Status))
160 {
161 KeAcquireGuardedMutex(&ViewLock);
162 KeAcquireSpinLock(&CacheSegment->Bcb->BcbLock, &oldIrql);
163
164 CacheSegment->Dirty = FALSE;
165 RemoveEntryList(&CacheSegment->DirtySegmentListEntry);
166 DirtyPageCount -= CacheSegment->Bcb->CacheSegmentSize / PAGE_SIZE;
167 CcRosCacheSegmentDecRefCount ( CacheSegment );
168
169 KeReleaseSpinLock(&CacheSegment->Bcb->BcbLock, oldIrql);
170 KeReleaseGuardedMutex(&ViewLock);
171 }
172
173 return(Status);
174 }
175
176 NTSTATUS
177 NTAPI
178 CcRosFlushDirtyPages(ULONG Target, PULONG Count)
179 {
180 PLIST_ENTRY current_entry;
181 PCACHE_SEGMENT current;
182 ULONG PagesPerSegment;
183 BOOLEAN Locked;
184 NTSTATUS Status;
185 static ULONG WriteCount[4] = {0, 0, 0, 0};
186 ULONG NewTarget;
187
188 DPRINT("CcRosFlushDirtyPages(Target %d)\n", Target);
189
190 (*Count) = 0;
191
192 KeEnterCriticalRegion();
193 KeAcquireGuardedMutex(&ViewLock);
194
195 WriteCount[0] = WriteCount[1];
196 WriteCount[1] = WriteCount[2];
197 WriteCount[2] = WriteCount[3];
198 WriteCount[3] = 0;
199
200 NewTarget = WriteCount[0] + WriteCount[1] + WriteCount[2];
201
202 if (NewTarget < DirtyPageCount)
203 {
204 NewTarget = (DirtyPageCount - NewTarget + 3) / 4;
205 WriteCount[0] += NewTarget;
206 WriteCount[1] += NewTarget;
207 WriteCount[2] += NewTarget;
208 WriteCount[3] += NewTarget;
209 }
210
211 NewTarget = WriteCount[0];
212
213 Target = max(NewTarget, Target);
214
215 current_entry = DirtySegmentListHead.Flink;
216 if (current_entry == &DirtySegmentListHead)
217 {
218 DPRINT("No Dirty pages\n");
219 }
220
221 while (current_entry != &DirtySegmentListHead && Target > 0)
222 {
223 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
224 DirtySegmentListEntry);
225 current_entry = current_entry->Flink;
226
227 Locked = current->Bcb->Callbacks->AcquireForLazyWrite(
228 current->Bcb->LazyWriteContext, FALSE);
229 if (!Locked)
230 {
231 continue;
232 }
233
234 Locked = ExTryToAcquirePushLockExclusive(&current->Lock);
235 if (!Locked)
236 {
237 current->Bcb->Callbacks->ReleaseFromLazyWrite(
238 current->Bcb->LazyWriteContext);
239
240 continue;
241 }
242
243 ASSERT(current->Dirty);
244 if (current->ReferenceCount > 1)
245 {
246 ExReleasePushLock(&current->Lock);
247 current->Bcb->Callbacks->ReleaseFromLazyWrite(
248 current->Bcb->LazyWriteContext);
249 continue;
250 }
251
252 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
253
254 KeReleaseGuardedMutex(&ViewLock);
255
256 Status = CcRosFlushCacheSegment(current);
257
258 ExReleasePushLock(&current->Lock);
259 current->Bcb->Callbacks->ReleaseFromLazyWrite(
260 current->Bcb->LazyWriteContext);
261
262 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
263 {
264 DPRINT1("CC: Failed to flush cache segment.\n");
265 }
266 else
267 {
268 (*Count) += PagesPerSegment;
269 Target -= PagesPerSegment;
270 }
271
272 KeAcquireGuardedMutex(&ViewLock);
273 current_entry = DirtySegmentListHead.Flink;
274 }
275
276 if (*Count < NewTarget)
277 {
278 WriteCount[1] += (NewTarget - *Count);
279 }
280
281 KeReleaseGuardedMutex(&ViewLock);
282 KeLeaveCriticalRegion();
283
284 DPRINT("CcRosFlushDirtyPages() finished\n");
285 return(STATUS_SUCCESS);
286 }
287
288 NTSTATUS
289 CcRosTrimCache(ULONG Target, ULONG Priority, PULONG NrFreed)
290 /*
291 * FUNCTION: Try to free some memory from the file cache.
292 * ARGUMENTS:
293 * Target - The number of pages to be freed.
294 * Priority - The priority of free (currently unused).
295 * NrFreed - Points to a variable where the number of pages
296 * actually freed is returned.
297 */
298 {
299 PLIST_ENTRY current_entry;
300 PCACHE_SEGMENT current;
301 ULONG PagesPerSegment;
302 ULONG PagesFreed;
303 KIRQL oldIrql;
304 LIST_ENTRY FreeList;
305 PFN_NUMBER Page;
306 ULONG i;
307 BOOLEAN FlushedPages = FALSE;
308
309 DPRINT("CcRosTrimCache(Target %d)\n", Target);
310
311 InitializeListHead(&FreeList);
312
313 *NrFreed = 0;
314
315 retry:
316 KeAcquireGuardedMutex(&ViewLock);
317
318 current_entry = CacheSegmentLRUListHead.Flink;
319 while (current_entry != &CacheSegmentLRUListHead)
320 {
321 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
322 CacheSegmentLRUListEntry);
323 current_entry = current_entry->Flink;
324
325 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
326
327 /* Reference the cache segment */
328 CcRosCacheSegmentIncRefCount(current);
329
330 /* Check if it's mapped and not dirty */
331 if (current->MappedCount > 0 && !current->Dirty)
332 {
333 /* We have to break these locks because Cc sucks */
334 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
335 KeReleaseGuardedMutex(&ViewLock);
336
337 /* Page out the segment */
338 for (i = 0; i < current->Bcb->CacheSegmentSize / PAGE_SIZE; i++)
339 {
340 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
341
342 MmPageOutPhysicalAddress(Page);
343 }
344
345 /* Reacquire the locks */
346 KeAcquireGuardedMutex(&ViewLock);
347 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
348 }
349
350 /* Dereference the cache segment */
351 CcRosCacheSegmentDecRefCount(current);
352
353 /* Check if we can free this entry now */
354 if (current->ReferenceCount == 0)
355 {
356 ASSERT(!current->Dirty);
357 ASSERT(!current->MappedCount);
358
359 RemoveEntryList(&current->BcbSegmentListEntry);
360 RemoveEntryList(&current->CacheSegmentListEntry);
361 RemoveEntryList(&current->CacheSegmentLRUListEntry);
362 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
363
364 /* Calculate how many pages we freed for Mm */
365 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
366 PagesFreed = min(PagesPerSegment, Target);
367 Target -= PagesFreed;
368 (*NrFreed) += PagesFreed;
369 }
370
371 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
372 }
373
374 KeReleaseGuardedMutex(&ViewLock);
375
376 /* Try flushing pages if we haven't met our target */
377 if (Target > 0 && !FlushedPages)
378 {
379 /* Flush dirty pages to disk */
380 CcRosFlushDirtyPages(Target, &PagesFreed);
381 FlushedPages = TRUE;
382
383 /* We can only swap as many pages as we flushed */
384 if (PagesFreed < Target) Target = PagesFreed;
385
386 /* Check if we flushed anything */
387 if (PagesFreed != 0)
388 {
389 /* Try again after flushing dirty pages */
390 DPRINT("Flushed %d dirty cache pages to disk\n", PagesFreed);
391 goto retry;
392 }
393 }
394
395 while (!IsListEmpty(&FreeList))
396 {
397 current_entry = RemoveHeadList(&FreeList);
398 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
399 BcbSegmentListEntry);
400 CcRosInternalFreeCacheSegment(current);
401 }
402
403 DPRINT("Evicted %d cache pages\n", (*NrFreed));
404
405 return(STATUS_SUCCESS);
406 }
407
408 NTSTATUS
409 NTAPI
410 CcRosReleaseCacheSegment(PBCB Bcb,
411 PCACHE_SEGMENT CacheSeg,
412 BOOLEAN Valid,
413 BOOLEAN Dirty,
414 BOOLEAN Mapped)
415 {
416 BOOLEAN WasDirty = CacheSeg->Dirty;
417 KIRQL oldIrql;
418
419 ASSERT(Bcb);
420
421 DPRINT("CcReleaseCacheSegment(Bcb 0x%p, CacheSeg 0x%p, Valid %d)\n",
422 Bcb, CacheSeg, Valid);
423
424 CacheSeg->Valid = Valid;
425 CacheSeg->Dirty = CacheSeg->Dirty || Dirty;
426
427 KeAcquireGuardedMutex(&ViewLock);
428 if (!WasDirty && CacheSeg->Dirty)
429 {
430 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
431 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
432 }
433
434 if (Mapped)
435 {
436 CacheSeg->MappedCount++;
437 }
438 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
439 CcRosCacheSegmentDecRefCount(CacheSeg);
440 if (Mapped && CacheSeg->MappedCount == 1)
441 {
442 CcRosCacheSegmentIncRefCount(CacheSeg);
443 }
444 if (!WasDirty && CacheSeg->Dirty)
445 {
446 CcRosCacheSegmentIncRefCount(CacheSeg);
447 }
448 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
449 KeReleaseGuardedMutex(&ViewLock);
450 ExReleasePushLock(&CacheSeg->Lock);
451 KeLeaveCriticalRegion();
452
453 return(STATUS_SUCCESS);
454 }
455
456 /* Returns with Cache Segment Lock Held! */
457 PCACHE_SEGMENT
458 NTAPI
459 CcRosLookupCacheSegment(PBCB Bcb, ULONG FileOffset)
460 {
461 PLIST_ENTRY current_entry;
462 PCACHE_SEGMENT current;
463 KIRQL oldIrql;
464
465 ASSERT(Bcb);
466
467 DPRINT("CcRosLookupCacheSegment(Bcb -x%p, FileOffset %d)\n", Bcb, FileOffset);
468
469 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
470 current_entry = Bcb->BcbSegmentListHead.Flink;
471 while (current_entry != &Bcb->BcbSegmentListHead)
472 {
473 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
474 BcbSegmentListEntry);
475 if (current->FileOffset <= FileOffset &&
476 (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
477 {
478 CcRosCacheSegmentIncRefCount(current);
479 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
480 KeEnterCriticalRegion();
481 ExAcquirePushLockExclusive(&current->Lock);
482 return(current);
483 }
484 current_entry = current_entry->Flink;
485 }
486 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
487 return(NULL);
488 }
489
490 NTSTATUS
491 NTAPI
492 CcRosMarkDirtyCacheSegment(PBCB Bcb, ULONG FileOffset)
493 {
494 PCACHE_SEGMENT CacheSeg;
495 KIRQL oldIrql;
496
497 ASSERT(Bcb);
498
499 DPRINT("CcRosMarkDirtyCacheSegment(Bcb 0x%p, FileOffset %d)\n", Bcb, FileOffset);
500
501 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
502 if (CacheSeg == NULL)
503 {
504 KeBugCheck(CACHE_MANAGER);
505 }
506 if (!CacheSeg->Dirty)
507 {
508 KeAcquireGuardedMutex(&ViewLock);
509 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
510 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
511 KeReleaseGuardedMutex(&ViewLock);
512 }
513 else
514 {
515 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
516 CcRosCacheSegmentDecRefCount(CacheSeg);
517 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
518 }
519
520 KeAcquireGuardedMutex(&ViewLock);
521
522 /* Move to the tail of the LRU list */
523 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
524 InsertTailList(&CacheSegmentLRUListHead, &CacheSeg->CacheSegmentLRUListEntry);
525
526 KeReleaseGuardedMutex(&ViewLock);
527
528 CacheSeg->Dirty = TRUE;
529 ExReleasePushLock(&CacheSeg->Lock);
530 KeLeaveCriticalRegion();
531
532 return(STATUS_SUCCESS);
533 }
534
535 NTSTATUS
536 NTAPI
537 CcRosUnmapCacheSegment(PBCB Bcb, ULONG FileOffset, BOOLEAN NowDirty)
538 {
539 PCACHE_SEGMENT CacheSeg;
540 BOOLEAN WasDirty;
541 KIRQL oldIrql;
542
543 ASSERT(Bcb);
544
545 DPRINT("CcRosUnmapCacheSegment(Bcb 0x%p, FileOffset %d, NowDirty %d)\n",
546 Bcb, FileOffset, NowDirty);
547
548 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
549 if (CacheSeg == NULL)
550 {
551 return(STATUS_UNSUCCESSFUL);
552 }
553
554 WasDirty = CacheSeg->Dirty;
555 CacheSeg->Dirty = CacheSeg->Dirty || NowDirty;
556
557 CacheSeg->MappedCount--;
558
559 if (!WasDirty && NowDirty)
560 {
561 KeAcquireGuardedMutex(&ViewLock);
562 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
563 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
564 KeReleaseGuardedMutex(&ViewLock);
565 }
566
567 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
568 CcRosCacheSegmentDecRefCount(CacheSeg);
569 if (!WasDirty && NowDirty)
570 {
571 CcRosCacheSegmentIncRefCount(CacheSeg);
572 }
573 if (CacheSeg->MappedCount == 0)
574 {
575 CcRosCacheSegmentDecRefCount(CacheSeg);
576 }
577 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
578
579 ExReleasePushLock(&CacheSeg->Lock);
580 KeLeaveCriticalRegion();
581
582 return(STATUS_SUCCESS);
583 }
584
585 static
586 NTSTATUS
587 CcRosCreateCacheSegment(PBCB Bcb,
588 ULONG FileOffset,
589 PCACHE_SEGMENT* CacheSeg)
590 {
591 PCACHE_SEGMENT current;
592 PCACHE_SEGMENT previous;
593 PLIST_ENTRY current_entry;
594 NTSTATUS Status;
595 KIRQL oldIrql;
596 #ifdef CACHE_BITMAP
597 ULONG StartingOffset;
598 #endif
599 PHYSICAL_ADDRESS BoundaryAddressMultiple;
600
601 ASSERT(Bcb);
602
603 DPRINT("CcRosCreateCacheSegment()\n");
604
605 BoundaryAddressMultiple.QuadPart = 0;
606 if (FileOffset >= Bcb->FileSize.u.LowPart)
607 {
608 CacheSeg = NULL;
609 return STATUS_INVALID_PARAMETER;
610 }
611
612 current = ExAllocateFromNPagedLookasideList(&CacheSegLookasideList);
613 current->Valid = FALSE;
614 current->Dirty = FALSE;
615 current->PageOut = FALSE;
616 current->FileOffset = ROUND_DOWN(FileOffset, Bcb->CacheSegmentSize);
617 current->Bcb = Bcb;
618 #if DBG
619 if ( Bcb->Trace )
620 {
621 DPRINT1("CacheMap 0x%p: new Cache Segment: 0x%p\n", Bcb, current );
622 }
623 #endif
624 current->MappedCount = 0;
625 current->DirtySegmentListEntry.Flink = NULL;
626 current->DirtySegmentListEntry.Blink = NULL;
627 current->ReferenceCount = 1;
628 ExInitializePushLock(&current->Lock);
629 KeEnterCriticalRegion();
630 ExAcquirePushLockExclusive(&current->Lock);
631 KeAcquireGuardedMutex(&ViewLock);
632
633 *CacheSeg = current;
634 /* There is window between the call to CcRosLookupCacheSegment
635 * and CcRosCreateCacheSegment. We must check if a segment on
636 * the fileoffset exist. If there exist a segment, we release
637 * our new created segment and return the existing one.
638 */
639 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
640 current_entry = Bcb->BcbSegmentListHead.Flink;
641 previous = NULL;
642 while (current_entry != &Bcb->BcbSegmentListHead)
643 {
644 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
645 BcbSegmentListEntry);
646 if (current->FileOffset <= FileOffset &&
647 (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
648 {
649 CcRosCacheSegmentIncRefCount(current);
650 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
651 #if DBG
652 if ( Bcb->Trace )
653 {
654 DPRINT1("CacheMap 0x%p: deleting newly created Cache Segment 0x%p ( found existing one 0x%p )\n",
655 Bcb,
656 (*CacheSeg),
657 current );
658 }
659 #endif
660 ExReleasePushLock(&(*CacheSeg)->Lock);
661 KeReleaseGuardedMutex(&ViewLock);
662 ExFreeToNPagedLookasideList(&CacheSegLookasideList, *CacheSeg);
663 *CacheSeg = current;
664 /* We're still in the critical region from above */
665 ExAcquirePushLockExclusive(&current->Lock);
666 return STATUS_SUCCESS;
667 }
668 if (current->FileOffset < FileOffset)
669 {
670 if (previous == NULL)
671 {
672 previous = current;
673 }
674 else
675 {
676 if (previous->FileOffset < current->FileOffset)
677 {
678 previous = current;
679 }
680 }
681 }
682 current_entry = current_entry->Flink;
683 }
684 /* There was no existing segment. */
685 current = *CacheSeg;
686 if (previous)
687 {
688 InsertHeadList(&previous->BcbSegmentListEntry, &current->BcbSegmentListEntry);
689 }
690 else
691 {
692 InsertHeadList(&Bcb->BcbSegmentListHead, &current->BcbSegmentListEntry);
693 }
694 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
695 InsertTailList(&CacheSegmentListHead, &current->CacheSegmentListEntry);
696 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
697 KeReleaseGuardedMutex(&ViewLock);
698 #ifdef CACHE_BITMAP
699 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
700
701 StartingOffset = RtlFindClearBitsAndSet(&CiCacheSegMappingRegionAllocMap, Bcb->CacheSegmentSize / PAGE_SIZE, CiCacheSegMappingRegionHint);
702
703 if (StartingOffset == 0xffffffff)
704 {
705 DPRINT1("Out of CacheSeg mapping space\n");
706 KeBugCheck(CACHE_MANAGER);
707 }
708
709 current->BaseAddress = CiCacheSegMappingRegionBase + StartingOffset * PAGE_SIZE;
710
711 if (CiCacheSegMappingRegionHint == StartingOffset)
712 {
713 CiCacheSegMappingRegionHint += Bcb->CacheSegmentSize / PAGE_SIZE;
714 }
715
716 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
717 #else
718 MmLockAddressSpace(MmGetKernelAddressSpace());
719 current->BaseAddress = NULL;
720 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
721 0, // nothing checks for cache_segment mareas, so set to 0
722 &current->BaseAddress,
723 Bcb->CacheSegmentSize,
724 PAGE_READWRITE,
725 (PMEMORY_AREA*)&current->MemoryArea,
726 FALSE,
727 0,
728 BoundaryAddressMultiple);
729 MmUnlockAddressSpace(MmGetKernelAddressSpace());
730 if (!NT_SUCCESS(Status))
731 {
732 KeBugCheck(CACHE_MANAGER);
733 }
734 #endif
735
736 /* Create a virtual mapping for this memory area */
737 MI_SET_USAGE(MI_USAGE_CACHE);
738 #if MI_TRACE_PFNS
739 PWCHAR pos = NULL;
740 ULONG len = 0;
741 if ((Bcb->FileObject) && (Bcb->FileObject->FileName.Buffer))
742 {
743 pos = wcsrchr(Bcb->FileObject->FileName.Buffer, '\\');
744 len = wcslen(pos) * sizeof(WCHAR);
745 if (pos) snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
746 }
747 #endif
748
749 MmMapMemoryArea(current->BaseAddress, Bcb->CacheSegmentSize,
750 MC_CACHE, PAGE_READWRITE);
751
752 return(STATUS_SUCCESS);
753 }
754
755 NTSTATUS
756 NTAPI
757 CcRosGetCacheSegmentChain(PBCB Bcb,
758 ULONG FileOffset,
759 ULONG Length,
760 PCACHE_SEGMENT* CacheSeg)
761 {
762 PCACHE_SEGMENT current;
763 ULONG i;
764 PCACHE_SEGMENT* CacheSegList;
765 PCACHE_SEGMENT Previous = NULL;
766
767 ASSERT(Bcb);
768
769 DPRINT("CcRosGetCacheSegmentChain()\n");
770
771 Length = ROUND_UP(Length, Bcb->CacheSegmentSize);
772
773 CacheSegList = _alloca(sizeof(PCACHE_SEGMENT) *
774 (Length / Bcb->CacheSegmentSize));
775
776 /*
777 * Look for a cache segment already mapping the same data.
778 */
779 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
780 {
781 ULONG CurrentOffset = FileOffset + (i * Bcb->CacheSegmentSize);
782 current = CcRosLookupCacheSegment(Bcb, CurrentOffset);
783 if (current != NULL)
784 {
785 KeAcquireGuardedMutex(&ViewLock);
786
787 /* Move to tail of LRU list */
788 RemoveEntryList(&current->CacheSegmentLRUListEntry);
789 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
790
791 KeReleaseGuardedMutex(&ViewLock);
792
793 CacheSegList[i] = current;
794 }
795 else
796 {
797 CcRosCreateCacheSegment(Bcb, CurrentOffset, &current);
798 CacheSegList[i] = current;
799 }
800 }
801
802 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
803 {
804 if (i == 0)
805 {
806 *CacheSeg = CacheSegList[i];
807 Previous = CacheSegList[i];
808 }
809 else
810 {
811 Previous->NextInChain = CacheSegList[i];
812 Previous = CacheSegList[i];
813 }
814 }
815 ASSERT(Previous);
816 Previous->NextInChain = NULL;
817
818 return(STATUS_SUCCESS);
819 }
820
821 NTSTATUS
822 NTAPI
823 CcRosGetCacheSegment(PBCB Bcb,
824 ULONG FileOffset,
825 PULONG BaseOffset,
826 PVOID* BaseAddress,
827 PBOOLEAN UptoDate,
828 PCACHE_SEGMENT* CacheSeg)
829 {
830 PCACHE_SEGMENT current;
831 NTSTATUS Status;
832
833 ASSERT(Bcb);
834
835 DPRINT("CcRosGetCacheSegment()\n");
836
837 /*
838 * Look for a cache segment already mapping the same data.
839 */
840 current = CcRosLookupCacheSegment(Bcb, FileOffset);
841 if (current == NULL)
842 {
843 /*
844 * Otherwise create a new segment.
845 */
846 Status = CcRosCreateCacheSegment(Bcb, FileOffset, &current);
847 if (!NT_SUCCESS(Status))
848 {
849 return Status;
850 }
851 }
852
853 KeAcquireGuardedMutex(&ViewLock);
854
855 /* Move to the tail of the LRU list */
856 RemoveEntryList(&current->CacheSegmentLRUListEntry);
857 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
858
859 KeReleaseGuardedMutex(&ViewLock);
860
861 /*
862 * Return information about the segment to the caller.
863 */
864 *UptoDate = current->Valid;
865 *BaseAddress = current->BaseAddress;
866 DPRINT("*BaseAddress 0x%.8X\n", *BaseAddress);
867 *CacheSeg = current;
868 *BaseOffset = current->FileOffset;
869 return(STATUS_SUCCESS);
870 }
871
872 NTSTATUS NTAPI
873 CcRosRequestCacheSegment(PBCB Bcb,
874 ULONG FileOffset,
875 PVOID* BaseAddress,
876 PBOOLEAN UptoDate,
877 PCACHE_SEGMENT* CacheSeg)
878 /*
879 * FUNCTION: Request a page mapping for a BCB
880 */
881 {
882 ULONG BaseOffset;
883
884 ASSERT(Bcb);
885
886 if ((FileOffset % Bcb->CacheSegmentSize) != 0)
887 {
888 DPRINT1("Bad fileoffset %x should be multiple of %x",
889 FileOffset, Bcb->CacheSegmentSize);
890 KeBugCheck(CACHE_MANAGER);
891 }
892
893 return(CcRosGetCacheSegment(Bcb,
894 FileOffset,
895 &BaseOffset,
896 BaseAddress,
897 UptoDate,
898 CacheSeg));
899 }
900 #ifdef CACHE_BITMAP
901 #else
902 static VOID
903 CcFreeCachePage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address,
904 PFN_NUMBER Page, SWAPENTRY SwapEntry, BOOLEAN Dirty)
905 {
906 ASSERT(SwapEntry == 0);
907 if (Page != 0)
908 {
909 ASSERT(MmGetReferenceCountPage(Page) == 1);
910 MmReleasePageMemoryConsumer(MC_CACHE, Page);
911 }
912 }
913 #endif
914 NTSTATUS
915 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg)
916 /*
917 * FUNCTION: Releases a cache segment associated with a BCB
918 */
919 {
920 #ifdef CACHE_BITMAP
921 ULONG i;
922 ULONG RegionSize;
923 ULONG Base;
924 PFN_NUMBER Page;
925 KIRQL oldIrql;
926 #endif
927 DPRINT("Freeing cache segment 0x%p\n", CacheSeg);
928 #if DBG
929 if ( CacheSeg->Bcb->Trace )
930 {
931 DPRINT1("CacheMap 0x%p: deleting Cache Segment: 0x%p\n", CacheSeg->Bcb, CacheSeg );
932 }
933 #endif
934 #ifdef CACHE_BITMAP
935 RegionSize = CacheSeg->Bcb->CacheSegmentSize / PAGE_SIZE;
936
937 /* Unmap all the pages. */
938 for (i = 0; i < RegionSize; i++)
939 {
940 MmDeleteVirtualMapping(NULL,
941 CacheSeg->BaseAddress + (i * PAGE_SIZE),
942 FALSE,
943 NULL,
944 &Page);
945 MmReleasePageMemoryConsumer(MC_CACHE, Page);
946 }
947
948 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
949 /* Deallocate all the pages used. */
950 Base = (ULONG)(CacheSeg->BaseAddress - CiCacheSegMappingRegionBase) / PAGE_SIZE;
951
952 RtlClearBits(&CiCacheSegMappingRegionAllocMap, Base, RegionSize);
953
954 CiCacheSegMappingRegionHint = min (CiCacheSegMappingRegionHint, Base);
955
956 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
957 #else
958 MmLockAddressSpace(MmGetKernelAddressSpace());
959 MmFreeMemoryArea(MmGetKernelAddressSpace(),
960 CacheSeg->MemoryArea,
961 CcFreeCachePage,
962 NULL);
963 MmUnlockAddressSpace(MmGetKernelAddressSpace());
964 #endif
965 ExFreeToNPagedLookasideList(&CacheSegLookasideList, CacheSeg);
966 return(STATUS_SUCCESS);
967 }
968
969 NTSTATUS
970 NTAPI
971 CcRosFreeCacheSegment(PBCB Bcb, PCACHE_SEGMENT CacheSeg)
972 {
973 NTSTATUS Status;
974 KIRQL oldIrql;
975
976 ASSERT(Bcb);
977
978 DPRINT("CcRosFreeCacheSegment(Bcb 0x%p, CacheSeg 0x%p)\n",
979 Bcb, CacheSeg);
980
981 KeAcquireGuardedMutex(&ViewLock);
982 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
983 RemoveEntryList(&CacheSeg->BcbSegmentListEntry);
984 RemoveEntryList(&CacheSeg->CacheSegmentListEntry);
985 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
986 if (CacheSeg->Dirty)
987 {
988 RemoveEntryList(&CacheSeg->DirtySegmentListEntry);
989 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
990
991 }
992 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
993 KeReleaseGuardedMutex(&ViewLock);
994
995 Status = CcRosInternalFreeCacheSegment(CacheSeg);
996 return(Status);
997 }
998
999 /*
1000 * @implemented
1001 */
1002 VOID NTAPI
1003 CcFlushCache(IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
1004 IN PLARGE_INTEGER FileOffset OPTIONAL,
1005 IN ULONG Length,
1006 OUT PIO_STATUS_BLOCK IoStatus)
1007 {
1008 PBCB Bcb;
1009 LARGE_INTEGER Offset;
1010 PCACHE_SEGMENT current;
1011 NTSTATUS Status;
1012 KIRQL oldIrql;
1013
1014 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %d, IoStatus 0x%p)\n",
1015 SectionObjectPointers, FileOffset, Length, IoStatus);
1016
1017 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1018 {
1019 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
1020 ASSERT(Bcb);
1021 if (FileOffset)
1022 {
1023 Offset = *FileOffset;
1024 }
1025 else
1026 {
1027 Offset.QuadPart = (LONGLONG)0;
1028 Length = Bcb->FileSize.u.LowPart;
1029 }
1030
1031 if (IoStatus)
1032 {
1033 IoStatus->Status = STATUS_SUCCESS;
1034 IoStatus->Information = 0;
1035 }
1036
1037 while (Length > 0)
1038 {
1039 current = CcRosLookupCacheSegment (Bcb, Offset.u.LowPart);
1040 if (current != NULL)
1041 {
1042 if (current->Dirty)
1043 {
1044 Status = CcRosFlushCacheSegment(current);
1045 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1046 {
1047 IoStatus->Status = Status;
1048 }
1049 }
1050 ExReleasePushLock(&current->Lock);
1051 KeLeaveCriticalRegion();
1052 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1053 CcRosCacheSegmentDecRefCount(current);
1054 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1055 }
1056
1057 Offset.QuadPart += Bcb->CacheSegmentSize;
1058 if (Length > Bcb->CacheSegmentSize)
1059 {
1060 Length -= Bcb->CacheSegmentSize;
1061 }
1062 else
1063 {
1064 Length = 0;
1065 }
1066 }
1067 }
1068 else
1069 {
1070 if (IoStatus)
1071 {
1072 IoStatus->Status = STATUS_INVALID_PARAMETER;
1073 }
1074 }
1075 }
1076
1077 NTSTATUS
1078 NTAPI
1079 CcRosDeleteFileCache(PFILE_OBJECT FileObject, PBCB Bcb)
1080 /*
1081 * FUNCTION: Releases the BCB associated with a file object
1082 */
1083 {
1084 PLIST_ENTRY current_entry;
1085 PCACHE_SEGMENT current;
1086 LIST_ENTRY FreeList;
1087 KIRQL oldIrql;
1088
1089 ASSERT(Bcb);
1090
1091 Bcb->RefCount++;
1092 KeReleaseGuardedMutex(&ViewLock);
1093
1094 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1095
1096 KeAcquireGuardedMutex(&ViewLock);
1097 Bcb->RefCount--;
1098 if (Bcb->RefCount == 0)
1099 {
1100 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1101 {
1102 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1103 Bcb->BcbRemoveListEntry.Flink = NULL;
1104 }
1105
1106 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1107
1108 /*
1109 * Release all cache segments.
1110 */
1111 InitializeListHead(&FreeList);
1112 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1113 current_entry = Bcb->BcbSegmentListHead.Flink;
1114 while (!IsListEmpty(&Bcb->BcbSegmentListHead))
1115 {
1116 current_entry = RemoveTailList(&Bcb->BcbSegmentListHead);
1117 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1118 RemoveEntryList(&current->CacheSegmentListEntry);
1119 RemoveEntryList(&current->CacheSegmentLRUListEntry);
1120 if (current->Dirty)
1121 {
1122 RemoveEntryList(&current->DirtySegmentListEntry);
1123 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
1124 DPRINT1("Freeing dirty segment\n");
1125 }
1126 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
1127 }
1128 #if DBG
1129 Bcb->Trace = FALSE;
1130 #endif
1131 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1132
1133 KeReleaseGuardedMutex(&ViewLock);
1134 ObDereferenceObject (Bcb->FileObject);
1135
1136 while (!IsListEmpty(&FreeList))
1137 {
1138 current_entry = RemoveTailList(&FreeList);
1139 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1140 CcRosInternalFreeCacheSegment(current);
1141 }
1142 ExFreeToNPagedLookasideList(&BcbLookasideList, Bcb);
1143 KeAcquireGuardedMutex(&ViewLock);
1144 }
1145 return(STATUS_SUCCESS);
1146 }
1147
1148 VOID
1149 NTAPI
1150 CcRosReferenceCache(PFILE_OBJECT FileObject)
1151 {
1152 PBCB Bcb;
1153 KeAcquireGuardedMutex(&ViewLock);
1154 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1155 ASSERT(Bcb);
1156 if (Bcb->RefCount == 0)
1157 {
1158 ASSERT(Bcb->BcbRemoveListEntry.Flink != NULL);
1159 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1160 Bcb->BcbRemoveListEntry.Flink = NULL;
1161
1162 }
1163 else
1164 {
1165 ASSERT(Bcb->BcbRemoveListEntry.Flink == NULL);
1166 }
1167 Bcb->RefCount++;
1168 KeReleaseGuardedMutex(&ViewLock);
1169 }
1170
1171 VOID
1172 NTAPI
1173 CcRosSetRemoveOnClose(PSECTION_OBJECT_POINTERS SectionObjectPointer)
1174 {
1175 PBCB Bcb;
1176 DPRINT("CcRosSetRemoveOnClose()\n");
1177 KeAcquireGuardedMutex(&ViewLock);
1178 Bcb = (PBCB)SectionObjectPointer->SharedCacheMap;
1179 if (Bcb)
1180 {
1181 Bcb->RemoveOnClose = TRUE;
1182 if (Bcb->RefCount == 0)
1183 {
1184 CcRosDeleteFileCache(Bcb->FileObject, Bcb);
1185 }
1186 }
1187 KeReleaseGuardedMutex(&ViewLock);
1188 }
1189
1190
1191 VOID
1192 NTAPI
1193 CcRosDereferenceCache(PFILE_OBJECT FileObject)
1194 {
1195 PBCB Bcb;
1196 KeAcquireGuardedMutex(&ViewLock);
1197 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1198 ASSERT(Bcb);
1199 if (Bcb->RefCount > 0)
1200 {
1201 Bcb->RefCount--;
1202 if (Bcb->RefCount == 0)
1203 {
1204 MmFreeSectionSegments(Bcb->FileObject);
1205 CcRosDeleteFileCache(FileObject, Bcb);
1206 }
1207 }
1208 KeReleaseGuardedMutex(&ViewLock);
1209 }
1210
1211 NTSTATUS NTAPI
1212 CcRosReleaseFileCache(PFILE_OBJECT FileObject)
1213 /*
1214 * FUNCTION: Called by the file system when a handle to a file object
1215 * has been closed.
1216 */
1217 {
1218 PBCB Bcb;
1219
1220 KeAcquireGuardedMutex(&ViewLock);
1221
1222 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1223 {
1224 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1225 if (FileObject->PrivateCacheMap != NULL)
1226 {
1227 FileObject->PrivateCacheMap = NULL;
1228 if (Bcb->RefCount > 0)
1229 {
1230 Bcb->RefCount--;
1231 if (Bcb->RefCount == 0)
1232 {
1233 MmFreeSectionSegments(Bcb->FileObject);
1234 CcRosDeleteFileCache(FileObject, Bcb);
1235 }
1236 }
1237 }
1238 }
1239 KeReleaseGuardedMutex(&ViewLock);
1240 return(STATUS_SUCCESS);
1241 }
1242
1243 NTSTATUS
1244 NTAPI
1245 CcTryToInitializeFileCache(PFILE_OBJECT FileObject)
1246 {
1247 PBCB Bcb;
1248 NTSTATUS Status;
1249
1250 KeAcquireGuardedMutex(&ViewLock);
1251
1252 ASSERT(FileObject->SectionObjectPointer);
1253 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1254 if (Bcb == NULL)
1255 {
1256 Status = STATUS_UNSUCCESSFUL;
1257 }
1258 else
1259 {
1260 if (FileObject->PrivateCacheMap == NULL)
1261 {
1262 FileObject->PrivateCacheMap = Bcb;
1263 Bcb->RefCount++;
1264 }
1265 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1266 {
1267 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1268 Bcb->BcbRemoveListEntry.Flink = NULL;
1269 }
1270 Status = STATUS_SUCCESS;
1271 }
1272 KeReleaseGuardedMutex(&ViewLock);
1273
1274 return Status;
1275 }
1276
1277
1278 NTSTATUS NTAPI
1279 CcRosInitializeFileCache(PFILE_OBJECT FileObject,
1280 ULONG CacheSegmentSize,
1281 PCACHE_MANAGER_CALLBACKS CallBacks,
1282 PVOID LazyWriterContext)
1283 /*
1284 * FUNCTION: Initializes a BCB for a file object
1285 */
1286 {
1287 PBCB Bcb;
1288
1289 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1290 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, Bcb 0x%p, CacheSegmentSize %d)\n",
1291 FileObject, Bcb, CacheSegmentSize);
1292
1293 KeAcquireGuardedMutex(&ViewLock);
1294 if (Bcb == NULL)
1295 {
1296 Bcb = ExAllocateFromNPagedLookasideList(&BcbLookasideList);
1297 if (Bcb == NULL)
1298 {
1299 KeReleaseGuardedMutex(&ViewLock);
1300 return(STATUS_UNSUCCESSFUL);
1301 }
1302 memset(Bcb, 0, sizeof(BCB));
1303 ObReferenceObjectByPointer(FileObject,
1304 FILE_ALL_ACCESS,
1305 NULL,
1306 KernelMode);
1307 Bcb->FileObject = FileObject;
1308 Bcb->CacheSegmentSize = CacheSegmentSize;
1309 Bcb->Callbacks = CallBacks;
1310 Bcb->LazyWriteContext = LazyWriterContext;
1311 if (FileObject->FsContext)
1312 {
1313 Bcb->AllocationSize =
1314 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->AllocationSize;
1315 Bcb->FileSize =
1316 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->FileSize;
1317 }
1318 KeInitializeSpinLock(&Bcb->BcbLock);
1319 InitializeListHead(&Bcb->BcbSegmentListHead);
1320 FileObject->SectionObjectPointer->SharedCacheMap = Bcb;
1321 }
1322 if (FileObject->PrivateCacheMap == NULL)
1323 {
1324 FileObject->PrivateCacheMap = Bcb;
1325 Bcb->RefCount++;
1326 }
1327 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1328 {
1329 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1330 Bcb->BcbRemoveListEntry.Flink = NULL;
1331 }
1332 KeReleaseGuardedMutex(&ViewLock);
1333
1334 return(STATUS_SUCCESS);
1335 }
1336
1337 /*
1338 * @implemented
1339 */
1340 PFILE_OBJECT NTAPI
1341 CcGetFileObjectFromSectionPtrs(IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1342 {
1343 PBCB Bcb;
1344 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1345 {
1346 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
1347 ASSERT(Bcb);
1348 return Bcb->FileObject;
1349 }
1350 return NULL;
1351 }
1352
1353 VOID
1354 INIT_FUNCTION
1355 NTAPI
1356 CcInitView(VOID)
1357 {
1358 #ifdef CACHE_BITMAP
1359 PMEMORY_AREA marea;
1360 PVOID Buffer;
1361 PHYSICAL_ADDRESS BoundaryAddressMultiple;
1362 #endif
1363
1364 DPRINT("CcInitView()\n");
1365 #ifdef CACHE_BITMAP
1366 BoundaryAddressMultiple.QuadPart = 0;
1367 CiCacheSegMappingRegionHint = 0;
1368 CiCacheSegMappingRegionBase = NULL;
1369
1370 MmLockAddressSpace(MmGetKernelAddressSpace());
1371
1372 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
1373 MEMORY_AREA_CACHE_SEGMENT,
1374 &CiCacheSegMappingRegionBase,
1375 CI_CACHESEG_MAPPING_REGION_SIZE,
1376 PAGE_READWRITE,
1377 &marea,
1378 FALSE,
1379 0,
1380 BoundaryAddressMultiple);
1381 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1382 if (!NT_SUCCESS(Status))
1383 {
1384 KeBugCheck(CACHE_MANAGER);
1385 }
1386
1387 Buffer = ExAllocatePool(NonPagedPool, CI_CACHESEG_MAPPING_REGION_SIZE / (PAGE_SIZE * 8));
1388 if (!Buffer)
1389 {
1390 KeBugCheck(CACHE_MANAGER);
1391 }
1392
1393 RtlInitializeBitMap(&CiCacheSegMappingRegionAllocMap, Buffer, CI_CACHESEG_MAPPING_REGION_SIZE / PAGE_SIZE);
1394 RtlClearAllBits(&CiCacheSegMappingRegionAllocMap);
1395
1396 KeInitializeSpinLock(&CiCacheSegMappingRegionLock);
1397 #endif
1398 InitializeListHead(&CacheSegmentListHead);
1399 InitializeListHead(&DirtySegmentListHead);
1400 InitializeListHead(&CacheSegmentLRUListHead);
1401 InitializeListHead(&ClosedListHead);
1402 KeInitializeGuardedMutex(&ViewLock);
1403 ExInitializeNPagedLookasideList (&iBcbLookasideList,
1404 NULL,
1405 NULL,
1406 0,
1407 sizeof(INTERNAL_BCB),
1408 TAG_IBCB,
1409 20);
1410 ExInitializeNPagedLookasideList (&BcbLookasideList,
1411 NULL,
1412 NULL,
1413 0,
1414 sizeof(BCB),
1415 TAG_BCB,
1416 20);
1417 ExInitializeNPagedLookasideList (&CacheSegLookasideList,
1418 NULL,
1419 NULL,
1420 0,
1421 sizeof(CACHE_SEGMENT),
1422 TAG_CSEG,
1423 20);
1424
1425 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1426
1427 CcInitCacheZeroPage();
1428
1429 }
1430
1431 /* EOF */
1432
1433
1434
1435
1436
1437
1438
1439