Synchronize up to trunk's revision r57784.
[reactos.git] / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 */
9
10 /* NOTES **********************************************************************
11 *
12 * This is not the NT implementation of a file cache nor anything much like
13 * it.
14 *
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
17 *
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
20 *
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
23 *
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
27 *
28 * (4) Copy the data into or out of the page as necessary.
29 *
30 * (5) Release the cache page
31 */
32 /* INCLUDES ******************************************************************/
33
34 #include <ntoskrnl.h>
35 #define NDEBUG
36 #include <debug.h>
37
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
40 #endif
41
42 /* GLOBALS *******************************************************************/
43
44 /*
45 * If CACHE_BITMAP is defined, the cache manager uses one large memory region
46 * within the kernel address space and allocate/deallocate space from this block
47 * over a bitmap. If CACHE_BITMAP is used, the size of the mdl mapping region
48 * must be reduced (ntoskrnl\mm\mdl.c, MI_MDLMAPPING_REGION_SIZE).
49 */
50 //#define CACHE_BITMAP
51
52 static LIST_ENTRY DirtySegmentListHead;
53 static LIST_ENTRY CacheSegmentListHead;
54 static LIST_ENTRY CacheSegmentLRUListHead;
55 static LIST_ENTRY ClosedListHead;
56 ULONG DirtyPageCount=0;
57
58 KGUARDED_MUTEX ViewLock;
59
60 #ifdef CACHE_BITMAP
61 #define CI_CACHESEG_MAPPING_REGION_SIZE (128*1024*1024)
62
63 static PVOID CiCacheSegMappingRegionBase = NULL;
64 static RTL_BITMAP CiCacheSegMappingRegionAllocMap;
65 static ULONG CiCacheSegMappingRegionHint;
66 static KSPIN_LOCK CiCacheSegMappingRegionLock;
67 #endif
68
69 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
70 static NPAGED_LOOKASIDE_LIST BcbLookasideList;
71 static NPAGED_LOOKASIDE_LIST CacheSegLookasideList;
72
73 #if DBG
74 static void CcRosCacheSegmentIncRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
75 {
76 ++cs->ReferenceCount;
77 if ( cs->Bcb->Trace )
78 {
79 DbgPrint("(%s:%i) CacheSegment %p ++RefCount=%d, Dirty %d, PageOut %d\n",
80 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
81 }
82 }
83 static void CcRosCacheSegmentDecRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
84 {
85 --cs->ReferenceCount;
86 if ( cs->Bcb->Trace )
87 {
88 DbgPrint("(%s:%i) CacheSegment %p --RefCount=%d, Dirty %d, PageOut %d\n",
89 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
90 }
91 }
92 #define CcRosCacheSegmentIncRefCount(cs) CcRosCacheSegmentIncRefCount_(cs,__FILE__,__LINE__)
93 #define CcRosCacheSegmentDecRefCount(cs) CcRosCacheSegmentDecRefCount_(cs,__FILE__,__LINE__)
94 #else
95 #define CcRosCacheSegmentIncRefCount(cs) (++((cs)->ReferenceCount))
96 #define CcRosCacheSegmentDecRefCount(cs) (--((cs)->ReferenceCount))
97 #endif
98
99 NTSTATUS
100 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg);
101
102
103 /* FUNCTIONS *****************************************************************/
104
105 VOID
106 NTAPI
107 CcRosTraceCacheMap (
108 PBCB Bcb,
109 BOOLEAN Trace )
110 {
111 #if DBG
112 KIRQL oldirql;
113 PLIST_ENTRY current_entry;
114 PCACHE_SEGMENT current;
115
116 if ( !Bcb )
117 return;
118
119 Bcb->Trace = Trace;
120
121 if ( Trace )
122 {
123 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", Bcb );
124
125 KeAcquireGuardedMutex(&ViewLock);
126 KeAcquireSpinLock(&Bcb->BcbLock, &oldirql);
127
128 current_entry = Bcb->BcbSegmentListHead.Flink;
129 while (current_entry != &Bcb->BcbSegmentListHead)
130 {
131 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
132 current_entry = current_entry->Flink;
133
134 DPRINT1(" CacheSegment 0x%p enabled, RefCount %d, Dirty %d, PageOut %d\n",
135 current, current->ReferenceCount, current->Dirty, current->PageOut );
136 }
137 KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
138 KeReleaseGuardedMutex(&ViewLock);
139 }
140 else
141 {
142 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", Bcb );
143 }
144
145 #else
146 Bcb = Bcb;
147 Trace = Trace;
148 #endif
149 }
150
151 NTSTATUS
152 NTAPI
153 CcRosFlushCacheSegment(PCACHE_SEGMENT CacheSegment)
154 {
155 NTSTATUS Status;
156 KIRQL oldIrql;
157
158 Status = WriteCacheSegment(CacheSegment);
159 if (NT_SUCCESS(Status))
160 {
161 KeAcquireGuardedMutex(&ViewLock);
162 KeAcquireSpinLock(&CacheSegment->Bcb->BcbLock, &oldIrql);
163
164 CacheSegment->Dirty = FALSE;
165 RemoveEntryList(&CacheSegment->DirtySegmentListEntry);
166 DirtyPageCount -= CacheSegment->Bcb->CacheSegmentSize / PAGE_SIZE;
167 CcRosCacheSegmentDecRefCount ( CacheSegment );
168
169 KeReleaseSpinLock(&CacheSegment->Bcb->BcbLock, oldIrql);
170 KeReleaseGuardedMutex(&ViewLock);
171 }
172
173 return(Status);
174 }
175
176 NTSTATUS
177 NTAPI
178 CcRosFlushDirtyPages(ULONG Target, PULONG Count, BOOLEAN Wait)
179 {
180 PLIST_ENTRY current_entry;
181 PCACHE_SEGMENT current;
182 ULONG PagesPerSegment;
183 BOOLEAN Locked;
184 NTSTATUS Status;
185 LARGE_INTEGER ZeroTimeout;
186
187 DPRINT("CcRosFlushDirtyPages(Target %d)\n", Target);
188
189 (*Count) = 0;
190 ZeroTimeout.QuadPart = 0;
191
192 KeEnterCriticalRegion();
193 KeAcquireGuardedMutex(&ViewLock);
194
195 current_entry = DirtySegmentListHead.Flink;
196 if (current_entry == &DirtySegmentListHead)
197 {
198 DPRINT("No Dirty pages\n");
199 }
200
201 while (current_entry != &DirtySegmentListHead && Target > 0)
202 {
203 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
204 DirtySegmentListEntry);
205 current_entry = current_entry->Flink;
206
207 CcRosCacheSegmentIncRefCount(current);
208
209 Locked = current->Bcb->Callbacks->AcquireForLazyWrite(
210 current->Bcb->LazyWriteContext, Wait);
211 if (!Locked)
212 {
213 CcRosCacheSegmentDecRefCount(current);
214 continue;
215 }
216
217 Status = KeWaitForSingleObject(&current->Mutex,
218 Executive,
219 KernelMode,
220 FALSE,
221 Wait ? NULL : &ZeroTimeout);
222 if (Status != STATUS_SUCCESS)
223 {
224 current->Bcb->Callbacks->ReleaseFromLazyWrite(
225 current->Bcb->LazyWriteContext);
226 CcRosCacheSegmentDecRefCount(current);
227 continue;
228 }
229
230 ASSERT(current->Dirty);
231
232 /* One reference is added above */
233 if (current->ReferenceCount > 2)
234 {
235 KeReleaseMutex(&current->Mutex, 0);
236 current->Bcb->Callbacks->ReleaseFromLazyWrite(
237 current->Bcb->LazyWriteContext);
238 CcRosCacheSegmentDecRefCount(current);
239 continue;
240 }
241
242 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
243
244 KeReleaseGuardedMutex(&ViewLock);
245
246 Status = CcRosFlushCacheSegment(current);
247
248 KeReleaseMutex(&current->Mutex, 0);
249 current->Bcb->Callbacks->ReleaseFromLazyWrite(
250 current->Bcb->LazyWriteContext);
251
252 KeAcquireGuardedMutex(&ViewLock);
253 CcRosCacheSegmentDecRefCount(current);
254
255 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
256 {
257 DPRINT1("CC: Failed to flush cache segment.\n");
258 }
259 else
260 {
261 (*Count) += PagesPerSegment;
262 Target -= PagesPerSegment;
263 }
264
265 current_entry = DirtySegmentListHead.Flink;
266 }
267
268 KeReleaseGuardedMutex(&ViewLock);
269 KeLeaveCriticalRegion();
270
271 DPRINT("CcRosFlushDirtyPages() finished\n");
272 return(STATUS_SUCCESS);
273 }
274
275 NTSTATUS
276 CcRosTrimCache(ULONG Target, ULONG Priority, PULONG NrFreed)
277 /*
278 * FUNCTION: Try to free some memory from the file cache.
279 * ARGUMENTS:
280 * Target - The number of pages to be freed.
281 * Priority - The priority of free (currently unused).
282 * NrFreed - Points to a variable where the number of pages
283 * actually freed is returned.
284 */
285 {
286 PLIST_ENTRY current_entry;
287 PCACHE_SEGMENT current;
288 ULONG PagesPerSegment;
289 ULONG PagesFreed;
290 KIRQL oldIrql;
291 LIST_ENTRY FreeList;
292 PFN_NUMBER Page;
293 ULONG i;
294 BOOLEAN FlushedPages = FALSE;
295
296 DPRINT("CcRosTrimCache(Target %d)\n", Target);
297
298 InitializeListHead(&FreeList);
299
300 *NrFreed = 0;
301
302 retry:
303 KeAcquireGuardedMutex(&ViewLock);
304
305 current_entry = CacheSegmentLRUListHead.Flink;
306 while (current_entry != &CacheSegmentLRUListHead)
307 {
308 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
309 CacheSegmentLRUListEntry);
310 current_entry = current_entry->Flink;
311
312 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
313
314 /* Reference the cache segment */
315 CcRosCacheSegmentIncRefCount(current);
316
317 /* Check if it's mapped and not dirty */
318 if (current->MappedCount > 0 && !current->Dirty)
319 {
320 /* We have to break these locks because Cc sucks */
321 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
322 KeReleaseGuardedMutex(&ViewLock);
323
324 /* Page out the segment */
325 for (i = 0; i < current->Bcb->CacheSegmentSize / PAGE_SIZE; i++)
326 {
327 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
328
329 MmPageOutPhysicalAddress(Page);
330 }
331
332 /* Reacquire the locks */
333 KeAcquireGuardedMutex(&ViewLock);
334 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
335 }
336
337 /* Dereference the cache segment */
338 CcRosCacheSegmentDecRefCount(current);
339
340 /* Check if we can free this entry now */
341 if (current->ReferenceCount == 0)
342 {
343 ASSERT(!current->Dirty);
344 ASSERT(!current->MappedCount);
345
346 RemoveEntryList(&current->BcbSegmentListEntry);
347 RemoveEntryList(&current->CacheSegmentListEntry);
348 RemoveEntryList(&current->CacheSegmentLRUListEntry);
349 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
350
351 /* Calculate how many pages we freed for Mm */
352 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
353 PagesFreed = min(PagesPerSegment, Target);
354 Target -= PagesFreed;
355 (*NrFreed) += PagesFreed;
356 }
357
358 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
359 }
360
361 KeReleaseGuardedMutex(&ViewLock);
362
363 /* Try flushing pages if we haven't met our target */
364 if (Target > 0 && !FlushedPages)
365 {
366 /* Flush dirty pages to disk */
367 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE);
368 FlushedPages = TRUE;
369
370 /* We can only swap as many pages as we flushed */
371 if (PagesFreed < Target) Target = PagesFreed;
372
373 /* Check if we flushed anything */
374 if (PagesFreed != 0)
375 {
376 /* Try again after flushing dirty pages */
377 DPRINT("Flushed %d dirty cache pages to disk\n", PagesFreed);
378 goto retry;
379 }
380 }
381
382 while (!IsListEmpty(&FreeList))
383 {
384 current_entry = RemoveHeadList(&FreeList);
385 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
386 BcbSegmentListEntry);
387 CcRosInternalFreeCacheSegment(current);
388 }
389
390 DPRINT("Evicted %d cache pages\n", (*NrFreed));
391
392 return(STATUS_SUCCESS);
393 }
394
395 NTSTATUS
396 NTAPI
397 CcRosReleaseCacheSegment(PBCB Bcb,
398 PCACHE_SEGMENT CacheSeg,
399 BOOLEAN Valid,
400 BOOLEAN Dirty,
401 BOOLEAN Mapped)
402 {
403 BOOLEAN WasDirty;
404 KIRQL oldIrql;
405
406 ASSERT(Bcb);
407
408 DPRINT("CcReleaseCacheSegment(Bcb 0x%p, CacheSeg 0x%p, Valid %d)\n",
409 Bcb, CacheSeg, Valid);
410
411 KeAcquireGuardedMutex(&ViewLock);
412 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
413
414 CacheSeg->Valid = Valid;
415
416 WasDirty = CacheSeg->Dirty;
417 CacheSeg->Dirty = CacheSeg->Dirty || Dirty;
418
419 if (!WasDirty && CacheSeg->Dirty)
420 {
421 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
422 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
423 }
424
425 if (Mapped)
426 {
427 CacheSeg->MappedCount++;
428 }
429 CcRosCacheSegmentDecRefCount(CacheSeg);
430 if (Mapped && CacheSeg->MappedCount == 1)
431 {
432 CcRosCacheSegmentIncRefCount(CacheSeg);
433 }
434 if (!WasDirty && CacheSeg->Dirty)
435 {
436 CcRosCacheSegmentIncRefCount(CacheSeg);
437 }
438
439 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
440 KeReleaseGuardedMutex(&ViewLock);
441 KeReleaseMutex(&CacheSeg->Mutex, 0);
442
443 return(STATUS_SUCCESS);
444 }
445
446 /* Returns with Cache Segment Lock Held! */
447 PCACHE_SEGMENT
448 NTAPI
449 CcRosLookupCacheSegment(PBCB Bcb, ULONG FileOffset)
450 {
451 PLIST_ENTRY current_entry;
452 PCACHE_SEGMENT current;
453 KIRQL oldIrql;
454
455 ASSERT(Bcb);
456
457 DPRINT("CcRosLookupCacheSegment(Bcb -x%p, FileOffset %d)\n", Bcb, FileOffset);
458
459 KeAcquireGuardedMutex(&ViewLock);
460 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
461
462 current_entry = Bcb->BcbSegmentListHead.Flink;
463 while (current_entry != &Bcb->BcbSegmentListHead)
464 {
465 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
466 BcbSegmentListEntry);
467 if (current->FileOffset <= FileOffset &&
468 (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
469 {
470 CcRosCacheSegmentIncRefCount(current);
471 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
472 KeReleaseGuardedMutex(&ViewLock);
473 KeWaitForSingleObject(&current->Mutex,
474 Executive,
475 KernelMode,
476 FALSE,
477 NULL);
478 return(current);
479 }
480 current_entry = current_entry->Flink;
481 }
482
483 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
484 KeReleaseGuardedMutex(&ViewLock);
485
486 return(NULL);
487 }
488
489 NTSTATUS
490 NTAPI
491 CcRosMarkDirtyCacheSegment(PBCB Bcb, ULONG FileOffset)
492 {
493 PCACHE_SEGMENT CacheSeg;
494 KIRQL oldIrql;
495
496 ASSERT(Bcb);
497
498 DPRINT("CcRosMarkDirtyCacheSegment(Bcb 0x%p, FileOffset %d)\n", Bcb, FileOffset);
499
500 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
501 if (CacheSeg == NULL)
502 {
503 KeBugCheck(CACHE_MANAGER);
504 }
505
506 KeAcquireGuardedMutex(&ViewLock);
507 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
508
509 if (!CacheSeg->Dirty)
510 {
511 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
512 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
513 }
514 else
515 {
516 CcRosCacheSegmentDecRefCount(CacheSeg);
517 }
518
519 /* Move to the tail of the LRU list */
520 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
521 InsertTailList(&CacheSegmentLRUListHead, &CacheSeg->CacheSegmentLRUListEntry);
522
523 CacheSeg->Dirty = TRUE;
524
525 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
526 KeReleaseGuardedMutex(&ViewLock);
527 KeReleaseMutex(&CacheSeg->Mutex, 0);
528
529 return(STATUS_SUCCESS);
530 }
531
532 NTSTATUS
533 NTAPI
534 CcRosUnmapCacheSegment(PBCB Bcb, ULONG FileOffset, BOOLEAN NowDirty)
535 {
536 PCACHE_SEGMENT CacheSeg;
537 BOOLEAN WasDirty;
538 KIRQL oldIrql;
539
540 ASSERT(Bcb);
541
542 DPRINT("CcRosUnmapCacheSegment(Bcb 0x%p, FileOffset %d, NowDirty %d)\n",
543 Bcb, FileOffset, NowDirty);
544
545 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
546 if (CacheSeg == NULL)
547 {
548 return(STATUS_UNSUCCESSFUL);
549 }
550
551 KeAcquireGuardedMutex(&ViewLock);
552 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
553
554 WasDirty = CacheSeg->Dirty;
555 CacheSeg->Dirty = CacheSeg->Dirty || NowDirty;
556
557 CacheSeg->MappedCount--;
558
559 if (!WasDirty && NowDirty)
560 {
561 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
562 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
563 }
564
565 CcRosCacheSegmentDecRefCount(CacheSeg);
566 if (!WasDirty && NowDirty)
567 {
568 CcRosCacheSegmentIncRefCount(CacheSeg);
569 }
570 if (CacheSeg->MappedCount == 0)
571 {
572 CcRosCacheSegmentDecRefCount(CacheSeg);
573 }
574
575 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
576 KeReleaseGuardedMutex(&ViewLock);
577 KeReleaseMutex(&CacheSeg->Mutex, 0);
578
579 return(STATUS_SUCCESS);
580 }
581
582 static
583 NTSTATUS
584 CcRosCreateCacheSegment(PBCB Bcb,
585 ULONG FileOffset,
586 PCACHE_SEGMENT* CacheSeg)
587 {
588 PCACHE_SEGMENT current;
589 PCACHE_SEGMENT previous;
590 PLIST_ENTRY current_entry;
591 NTSTATUS Status;
592 KIRQL oldIrql;
593 #ifdef CACHE_BITMAP
594 ULONG StartingOffset;
595 #endif
596 PHYSICAL_ADDRESS BoundaryAddressMultiple;
597
598 ASSERT(Bcb);
599
600 DPRINT("CcRosCreateCacheSegment()\n");
601
602 BoundaryAddressMultiple.QuadPart = 0;
603 if (FileOffset >= Bcb->FileSize.u.LowPart)
604 {
605 CacheSeg = NULL;
606 return STATUS_INVALID_PARAMETER;
607 }
608
609 current = ExAllocateFromNPagedLookasideList(&CacheSegLookasideList);
610 current->Valid = FALSE;
611 current->Dirty = FALSE;
612 current->PageOut = FALSE;
613 current->FileOffset = ROUND_DOWN(FileOffset, Bcb->CacheSegmentSize);
614 current->Bcb = Bcb;
615 #if DBG
616 if ( Bcb->Trace )
617 {
618 DPRINT1("CacheMap 0x%p: new Cache Segment: 0x%p\n", Bcb, current );
619 }
620 #endif
621 current->MappedCount = 0;
622 current->DirtySegmentListEntry.Flink = NULL;
623 current->DirtySegmentListEntry.Blink = NULL;
624 current->ReferenceCount = 1;
625 KeInitializeMutex(&current->Mutex, 0);
626 KeWaitForSingleObject(&current->Mutex,
627 Executive,
628 KernelMode,
629 FALSE,
630 NULL);
631 KeAcquireGuardedMutex(&ViewLock);
632
633 *CacheSeg = current;
634 /* There is window between the call to CcRosLookupCacheSegment
635 * and CcRosCreateCacheSegment. We must check if a segment on
636 * the fileoffset exist. If there exist a segment, we release
637 * our new created segment and return the existing one.
638 */
639 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
640 current_entry = Bcb->BcbSegmentListHead.Flink;
641 previous = NULL;
642 while (current_entry != &Bcb->BcbSegmentListHead)
643 {
644 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
645 BcbSegmentListEntry);
646 if (current->FileOffset <= FileOffset &&
647 (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
648 {
649 CcRosCacheSegmentIncRefCount(current);
650 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
651 #if DBG
652 if ( Bcb->Trace )
653 {
654 DPRINT1("CacheMap 0x%p: deleting newly created Cache Segment 0x%p ( found existing one 0x%p )\n",
655 Bcb,
656 (*CacheSeg),
657 current );
658 }
659 #endif
660 KeReleaseMutex(&(*CacheSeg)->Mutex, 0);
661 KeReleaseGuardedMutex(&ViewLock);
662 ExFreeToNPagedLookasideList(&CacheSegLookasideList, *CacheSeg);
663 *CacheSeg = current;
664 KeWaitForSingleObject(&current->Mutex,
665 Executive,
666 KernelMode,
667 FALSE,
668 NULL);
669 return STATUS_SUCCESS;
670 }
671 if (current->FileOffset < FileOffset)
672 {
673 if (previous == NULL)
674 {
675 previous = current;
676 }
677 else
678 {
679 if (previous->FileOffset < current->FileOffset)
680 {
681 previous = current;
682 }
683 }
684 }
685 current_entry = current_entry->Flink;
686 }
687 /* There was no existing segment. */
688 current = *CacheSeg;
689 if (previous)
690 {
691 InsertHeadList(&previous->BcbSegmentListEntry, &current->BcbSegmentListEntry);
692 }
693 else
694 {
695 InsertHeadList(&Bcb->BcbSegmentListHead, &current->BcbSegmentListEntry);
696 }
697 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
698 InsertTailList(&CacheSegmentListHead, &current->CacheSegmentListEntry);
699 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
700 KeReleaseGuardedMutex(&ViewLock);
701 #ifdef CACHE_BITMAP
702 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
703
704 StartingOffset = RtlFindClearBitsAndSet(&CiCacheSegMappingRegionAllocMap, Bcb->CacheSegmentSize / PAGE_SIZE, CiCacheSegMappingRegionHint);
705
706 if (StartingOffset == 0xffffffff)
707 {
708 DPRINT1("Out of CacheSeg mapping space\n");
709 KeBugCheck(CACHE_MANAGER);
710 }
711
712 current->BaseAddress = CiCacheSegMappingRegionBase + StartingOffset * PAGE_SIZE;
713
714 if (CiCacheSegMappingRegionHint == StartingOffset)
715 {
716 CiCacheSegMappingRegionHint += Bcb->CacheSegmentSize / PAGE_SIZE;
717 }
718
719 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
720 #else
721 MmLockAddressSpace(MmGetKernelAddressSpace());
722 current->BaseAddress = NULL;
723 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
724 0, // nothing checks for cache_segment mareas, so set to 0
725 &current->BaseAddress,
726 Bcb->CacheSegmentSize,
727 PAGE_READWRITE,
728 (PMEMORY_AREA*)&current->MemoryArea,
729 FALSE,
730 0,
731 BoundaryAddressMultiple);
732 MmUnlockAddressSpace(MmGetKernelAddressSpace());
733 if (!NT_SUCCESS(Status))
734 {
735 KeBugCheck(CACHE_MANAGER);
736 }
737 #endif
738
739 /* Create a virtual mapping for this memory area */
740 MI_SET_USAGE(MI_USAGE_CACHE);
741 #if MI_TRACE_PFNS
742 PWCHAR pos = NULL;
743 ULONG len = 0;
744 if ((Bcb->FileObject) && (Bcb->FileObject->FileName.Buffer))
745 {
746 pos = wcsrchr(Bcb->FileObject->FileName.Buffer, '\\');
747 len = wcslen(pos) * sizeof(WCHAR);
748 if (pos) snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
749 }
750 #endif
751
752 MmMapMemoryArea(current->BaseAddress, Bcb->CacheSegmentSize,
753 MC_CACHE, PAGE_READWRITE);
754
755 return(STATUS_SUCCESS);
756 }
757
758 NTSTATUS
759 NTAPI
760 CcRosGetCacheSegmentChain(PBCB Bcb,
761 ULONG FileOffset,
762 ULONG Length,
763 PCACHE_SEGMENT* CacheSeg)
764 {
765 PCACHE_SEGMENT current;
766 ULONG i;
767 PCACHE_SEGMENT* CacheSegList;
768 PCACHE_SEGMENT Previous = NULL;
769
770 ASSERT(Bcb);
771
772 DPRINT("CcRosGetCacheSegmentChain()\n");
773
774 Length = ROUND_UP(Length, Bcb->CacheSegmentSize);
775
776 CacheSegList = _alloca(sizeof(PCACHE_SEGMENT) *
777 (Length / Bcb->CacheSegmentSize));
778
779 /*
780 * Look for a cache segment already mapping the same data.
781 */
782 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
783 {
784 ULONG CurrentOffset = FileOffset + (i * Bcb->CacheSegmentSize);
785 current = CcRosLookupCacheSegment(Bcb, CurrentOffset);
786 if (current != NULL)
787 {
788 KeAcquireGuardedMutex(&ViewLock);
789
790 /* Move to tail of LRU list */
791 RemoveEntryList(&current->CacheSegmentLRUListEntry);
792 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
793
794 KeReleaseGuardedMutex(&ViewLock);
795
796 CacheSegList[i] = current;
797 }
798 else
799 {
800 CcRosCreateCacheSegment(Bcb, CurrentOffset, &current);
801 CacheSegList[i] = current;
802 }
803 }
804
805 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
806 {
807 if (i == 0)
808 {
809 *CacheSeg = CacheSegList[i];
810 Previous = CacheSegList[i];
811 }
812 else
813 {
814 Previous->NextInChain = CacheSegList[i];
815 Previous = CacheSegList[i];
816 }
817 }
818 ASSERT(Previous);
819 Previous->NextInChain = NULL;
820
821 return(STATUS_SUCCESS);
822 }
823
824 NTSTATUS
825 NTAPI
826 CcRosGetCacheSegment(PBCB Bcb,
827 ULONG FileOffset,
828 PULONG BaseOffset,
829 PVOID* BaseAddress,
830 PBOOLEAN UptoDate,
831 PCACHE_SEGMENT* CacheSeg)
832 {
833 PCACHE_SEGMENT current;
834 NTSTATUS Status;
835
836 ASSERT(Bcb);
837
838 DPRINT("CcRosGetCacheSegment()\n");
839
840 /*
841 * Look for a cache segment already mapping the same data.
842 */
843 current = CcRosLookupCacheSegment(Bcb, FileOffset);
844 if (current == NULL)
845 {
846 /*
847 * Otherwise create a new segment.
848 */
849 Status = CcRosCreateCacheSegment(Bcb, FileOffset, &current);
850 if (!NT_SUCCESS(Status))
851 {
852 return Status;
853 }
854 }
855
856 KeAcquireGuardedMutex(&ViewLock);
857
858 /* Move to the tail of the LRU list */
859 RemoveEntryList(&current->CacheSegmentLRUListEntry);
860 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
861
862 KeReleaseGuardedMutex(&ViewLock);
863
864 /*
865 * Return information about the segment to the caller.
866 */
867 *UptoDate = current->Valid;
868 *BaseAddress = current->BaseAddress;
869 DPRINT("*BaseAddress %p\n", *BaseAddress);
870 *CacheSeg = current;
871 *BaseOffset = current->FileOffset;
872 return(STATUS_SUCCESS);
873 }
874
875 NTSTATUS NTAPI
876 CcRosRequestCacheSegment(PBCB Bcb,
877 ULONG FileOffset,
878 PVOID* BaseAddress,
879 PBOOLEAN UptoDate,
880 PCACHE_SEGMENT* CacheSeg)
881 /*
882 * FUNCTION: Request a page mapping for a BCB
883 */
884 {
885 ULONG BaseOffset;
886
887 ASSERT(Bcb);
888
889 if ((FileOffset % Bcb->CacheSegmentSize) != 0)
890 {
891 DPRINT1("Bad fileoffset %x should be multiple of %x",
892 FileOffset, Bcb->CacheSegmentSize);
893 KeBugCheck(CACHE_MANAGER);
894 }
895
896 return(CcRosGetCacheSegment(Bcb,
897 FileOffset,
898 &BaseOffset,
899 BaseAddress,
900 UptoDate,
901 CacheSeg));
902 }
903 #ifdef CACHE_BITMAP
904 #else
905 static VOID
906 CcFreeCachePage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address,
907 PFN_NUMBER Page, SWAPENTRY SwapEntry, BOOLEAN Dirty)
908 {
909 ASSERT(SwapEntry == 0);
910 if (Page != 0)
911 {
912 ASSERT(MmGetReferenceCountPage(Page) == 1);
913 MmReleasePageMemoryConsumer(MC_CACHE, Page);
914 }
915 }
916 #endif
917 NTSTATUS
918 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg)
919 /*
920 * FUNCTION: Releases a cache segment associated with a BCB
921 */
922 {
923 #ifdef CACHE_BITMAP
924 ULONG i;
925 ULONG RegionSize;
926 ULONG Base;
927 PFN_NUMBER Page;
928 KIRQL oldIrql;
929 #endif
930 DPRINT("Freeing cache segment 0x%p\n", CacheSeg);
931 #if DBG
932 if ( CacheSeg->Bcb->Trace )
933 {
934 DPRINT1("CacheMap 0x%p: deleting Cache Segment: 0x%p\n", CacheSeg->Bcb, CacheSeg );
935 }
936 #endif
937 #ifdef CACHE_BITMAP
938 RegionSize = CacheSeg->Bcb->CacheSegmentSize / PAGE_SIZE;
939
940 /* Unmap all the pages. */
941 for (i = 0; i < RegionSize; i++)
942 {
943 MmDeleteVirtualMapping(NULL,
944 CacheSeg->BaseAddress + (i * PAGE_SIZE),
945 FALSE,
946 NULL,
947 &Page);
948 MmReleasePageMemoryConsumer(MC_CACHE, Page);
949 }
950
951 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
952 /* Deallocate all the pages used. */
953 Base = (ULONG)(CacheSeg->BaseAddress - CiCacheSegMappingRegionBase) / PAGE_SIZE;
954
955 RtlClearBits(&CiCacheSegMappingRegionAllocMap, Base, RegionSize);
956
957 CiCacheSegMappingRegionHint = min (CiCacheSegMappingRegionHint, Base);
958
959 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
960 #else
961 MmLockAddressSpace(MmGetKernelAddressSpace());
962 MmFreeMemoryArea(MmGetKernelAddressSpace(),
963 CacheSeg->MemoryArea,
964 CcFreeCachePage,
965 NULL);
966 MmUnlockAddressSpace(MmGetKernelAddressSpace());
967 #endif
968 ExFreeToNPagedLookasideList(&CacheSegLookasideList, CacheSeg);
969 return(STATUS_SUCCESS);
970 }
971
972 NTSTATUS
973 NTAPI
974 CcRosFreeCacheSegment(PBCB Bcb, PCACHE_SEGMENT CacheSeg)
975 {
976 NTSTATUS Status;
977 KIRQL oldIrql;
978
979 ASSERT(Bcb);
980
981 DPRINT("CcRosFreeCacheSegment(Bcb 0x%p, CacheSeg 0x%p)\n",
982 Bcb, CacheSeg);
983
984 KeAcquireGuardedMutex(&ViewLock);
985 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
986 RemoveEntryList(&CacheSeg->BcbSegmentListEntry);
987 RemoveEntryList(&CacheSeg->CacheSegmentListEntry);
988 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
989 if (CacheSeg->Dirty)
990 {
991 RemoveEntryList(&CacheSeg->DirtySegmentListEntry);
992 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
993
994 }
995 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
996 KeReleaseGuardedMutex(&ViewLock);
997
998 Status = CcRosInternalFreeCacheSegment(CacheSeg);
999 return(Status);
1000 }
1001
1002 /*
1003 * @implemented
1004 */
1005 VOID NTAPI
1006 CcFlushCache(IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
1007 IN PLARGE_INTEGER FileOffset OPTIONAL,
1008 IN ULONG Length,
1009 OUT PIO_STATUS_BLOCK IoStatus)
1010 {
1011 PBCB Bcb;
1012 LARGE_INTEGER Offset;
1013 PCACHE_SEGMENT current;
1014 NTSTATUS Status;
1015 KIRQL oldIrql;
1016
1017 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %d, IoStatus 0x%p)\n",
1018 SectionObjectPointers, FileOffset, Length, IoStatus);
1019
1020 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1021 {
1022 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
1023 ASSERT(Bcb);
1024 if (FileOffset)
1025 {
1026 Offset = *FileOffset;
1027 }
1028 else
1029 {
1030 Offset.QuadPart = (LONGLONG)0;
1031 Length = Bcb->FileSize.u.LowPart;
1032 }
1033
1034 if (IoStatus)
1035 {
1036 IoStatus->Status = STATUS_SUCCESS;
1037 IoStatus->Information = 0;
1038 }
1039
1040 while (Length > 0)
1041 {
1042 current = CcRosLookupCacheSegment (Bcb, Offset.u.LowPart);
1043 if (current != NULL)
1044 {
1045 if (current->Dirty)
1046 {
1047 Status = CcRosFlushCacheSegment(current);
1048 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1049 {
1050 IoStatus->Status = Status;
1051 }
1052 }
1053 KeReleaseMutex(&current->Mutex, 0);
1054
1055 KeAcquireGuardedMutex(&ViewLock);
1056 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1057 CcRosCacheSegmentDecRefCount(current);
1058 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1059 KeReleaseGuardedMutex(&ViewLock);
1060 }
1061
1062 Offset.QuadPart += Bcb->CacheSegmentSize;
1063 if (Length > Bcb->CacheSegmentSize)
1064 {
1065 Length -= Bcb->CacheSegmentSize;
1066 }
1067 else
1068 {
1069 Length = 0;
1070 }
1071 }
1072 }
1073 else
1074 {
1075 if (IoStatus)
1076 {
1077 IoStatus->Status = STATUS_INVALID_PARAMETER;
1078 }
1079 }
1080 }
1081
1082 NTSTATUS
1083 NTAPI
1084 CcRosDeleteFileCache(PFILE_OBJECT FileObject, PBCB Bcb)
1085 /*
1086 * FUNCTION: Releases the BCB associated with a file object
1087 */
1088 {
1089 PLIST_ENTRY current_entry;
1090 PCACHE_SEGMENT current;
1091 LIST_ENTRY FreeList;
1092 KIRQL oldIrql;
1093
1094 ASSERT(Bcb);
1095
1096 Bcb->RefCount++;
1097 KeReleaseGuardedMutex(&ViewLock);
1098
1099 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1100
1101 KeAcquireGuardedMutex(&ViewLock);
1102 Bcb->RefCount--;
1103 if (Bcb->RefCount == 0)
1104 {
1105 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1106 {
1107 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1108 Bcb->BcbRemoveListEntry.Flink = NULL;
1109 }
1110
1111 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1112
1113 /*
1114 * Release all cache segments.
1115 */
1116 InitializeListHead(&FreeList);
1117 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1118 current_entry = Bcb->BcbSegmentListHead.Flink;
1119 while (!IsListEmpty(&Bcb->BcbSegmentListHead))
1120 {
1121 current_entry = RemoveTailList(&Bcb->BcbSegmentListHead);
1122 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1123 RemoveEntryList(&current->CacheSegmentListEntry);
1124 RemoveEntryList(&current->CacheSegmentLRUListEntry);
1125 if (current->Dirty)
1126 {
1127 RemoveEntryList(&current->DirtySegmentListEntry);
1128 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
1129 DPRINT1("Freeing dirty segment\n");
1130 }
1131 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
1132 }
1133 #if DBG
1134 Bcb->Trace = FALSE;
1135 #endif
1136 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1137
1138 KeReleaseGuardedMutex(&ViewLock);
1139 ObDereferenceObject (Bcb->FileObject);
1140
1141 while (!IsListEmpty(&FreeList))
1142 {
1143 current_entry = RemoveTailList(&FreeList);
1144 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1145 CcRosInternalFreeCacheSegment(current);
1146 }
1147 ExFreeToNPagedLookasideList(&BcbLookasideList, Bcb);
1148 KeAcquireGuardedMutex(&ViewLock);
1149 }
1150 return(STATUS_SUCCESS);
1151 }
1152
1153 VOID
1154 NTAPI
1155 CcRosReferenceCache(PFILE_OBJECT FileObject)
1156 {
1157 PBCB Bcb;
1158 KeAcquireGuardedMutex(&ViewLock);
1159 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1160 ASSERT(Bcb);
1161 if (Bcb->RefCount == 0)
1162 {
1163 ASSERT(Bcb->BcbRemoveListEntry.Flink != NULL);
1164 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1165 Bcb->BcbRemoveListEntry.Flink = NULL;
1166
1167 }
1168 else
1169 {
1170 ASSERT(Bcb->BcbRemoveListEntry.Flink == NULL);
1171 }
1172 Bcb->RefCount++;
1173 KeReleaseGuardedMutex(&ViewLock);
1174 }
1175
1176 VOID
1177 NTAPI
1178 CcRosSetRemoveOnClose(PSECTION_OBJECT_POINTERS SectionObjectPointer)
1179 {
1180 PBCB Bcb;
1181 DPRINT("CcRosSetRemoveOnClose()\n");
1182 KeAcquireGuardedMutex(&ViewLock);
1183 Bcb = (PBCB)SectionObjectPointer->SharedCacheMap;
1184 if (Bcb)
1185 {
1186 Bcb->RemoveOnClose = TRUE;
1187 if (Bcb->RefCount == 0)
1188 {
1189 CcRosDeleteFileCache(Bcb->FileObject, Bcb);
1190 }
1191 }
1192 KeReleaseGuardedMutex(&ViewLock);
1193 }
1194
1195
1196 VOID
1197 NTAPI
1198 CcRosDereferenceCache(PFILE_OBJECT FileObject)
1199 {
1200 PBCB Bcb;
1201 KeAcquireGuardedMutex(&ViewLock);
1202 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1203 ASSERT(Bcb);
1204 if (Bcb->RefCount > 0)
1205 {
1206 Bcb->RefCount--;
1207 if (Bcb->RefCount == 0)
1208 {
1209 MmFreeSectionSegments(Bcb->FileObject);
1210 CcRosDeleteFileCache(FileObject, Bcb);
1211 }
1212 }
1213 KeReleaseGuardedMutex(&ViewLock);
1214 }
1215
1216 NTSTATUS NTAPI
1217 CcRosReleaseFileCache(PFILE_OBJECT FileObject)
1218 /*
1219 * FUNCTION: Called by the file system when a handle to a file object
1220 * has been closed.
1221 */
1222 {
1223 PBCB Bcb;
1224
1225 KeAcquireGuardedMutex(&ViewLock);
1226
1227 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1228 {
1229 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1230 if (FileObject->PrivateCacheMap != NULL)
1231 {
1232 FileObject->PrivateCacheMap = NULL;
1233 if (Bcb->RefCount > 0)
1234 {
1235 Bcb->RefCount--;
1236 if (Bcb->RefCount == 0)
1237 {
1238 MmFreeSectionSegments(Bcb->FileObject);
1239 CcRosDeleteFileCache(FileObject, Bcb);
1240 }
1241 }
1242 }
1243 }
1244 KeReleaseGuardedMutex(&ViewLock);
1245 return(STATUS_SUCCESS);
1246 }
1247
1248 NTSTATUS
1249 NTAPI
1250 CcTryToInitializeFileCache(PFILE_OBJECT FileObject)
1251 {
1252 PBCB Bcb;
1253 NTSTATUS Status;
1254
1255 KeAcquireGuardedMutex(&ViewLock);
1256
1257 ASSERT(FileObject->SectionObjectPointer);
1258 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1259 if (Bcb == NULL)
1260 {
1261 Status = STATUS_UNSUCCESSFUL;
1262 }
1263 else
1264 {
1265 if (FileObject->PrivateCacheMap == NULL)
1266 {
1267 FileObject->PrivateCacheMap = Bcb;
1268 Bcb->RefCount++;
1269 }
1270 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1271 {
1272 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1273 Bcb->BcbRemoveListEntry.Flink = NULL;
1274 }
1275 Status = STATUS_SUCCESS;
1276 }
1277 KeReleaseGuardedMutex(&ViewLock);
1278
1279 return Status;
1280 }
1281
1282
1283 NTSTATUS NTAPI
1284 CcRosInitializeFileCache(PFILE_OBJECT FileObject,
1285 ULONG CacheSegmentSize,
1286 PCACHE_MANAGER_CALLBACKS CallBacks,
1287 PVOID LazyWriterContext)
1288 /*
1289 * FUNCTION: Initializes a BCB for a file object
1290 */
1291 {
1292 PBCB Bcb;
1293
1294 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1295 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, Bcb 0x%p, CacheSegmentSize %d)\n",
1296 FileObject, Bcb, CacheSegmentSize);
1297
1298 KeAcquireGuardedMutex(&ViewLock);
1299 if (Bcb == NULL)
1300 {
1301 Bcb = ExAllocateFromNPagedLookasideList(&BcbLookasideList);
1302 if (Bcb == NULL)
1303 {
1304 KeReleaseGuardedMutex(&ViewLock);
1305 return(STATUS_UNSUCCESSFUL);
1306 }
1307 memset(Bcb, 0, sizeof(BCB));
1308 ObReferenceObjectByPointer(FileObject,
1309 FILE_ALL_ACCESS,
1310 NULL,
1311 KernelMode);
1312 Bcb->FileObject = FileObject;
1313 Bcb->CacheSegmentSize = CacheSegmentSize;
1314 Bcb->Callbacks = CallBacks;
1315 Bcb->LazyWriteContext = LazyWriterContext;
1316 if (FileObject->FsContext)
1317 {
1318 Bcb->AllocationSize =
1319 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->AllocationSize;
1320 Bcb->FileSize =
1321 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->FileSize;
1322 }
1323 KeInitializeSpinLock(&Bcb->BcbLock);
1324 InitializeListHead(&Bcb->BcbSegmentListHead);
1325 FileObject->SectionObjectPointer->SharedCacheMap = Bcb;
1326 }
1327 if (FileObject->PrivateCacheMap == NULL)
1328 {
1329 FileObject->PrivateCacheMap = Bcb;
1330 Bcb->RefCount++;
1331 }
1332 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1333 {
1334 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1335 Bcb->BcbRemoveListEntry.Flink = NULL;
1336 }
1337 KeReleaseGuardedMutex(&ViewLock);
1338
1339 return(STATUS_SUCCESS);
1340 }
1341
1342 /*
1343 * @implemented
1344 */
1345 PFILE_OBJECT NTAPI
1346 CcGetFileObjectFromSectionPtrs(IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1347 {
1348 PBCB Bcb;
1349 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1350 {
1351 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
1352 ASSERT(Bcb);
1353 return Bcb->FileObject;
1354 }
1355 return NULL;
1356 }
1357
1358 VOID
1359 INIT_FUNCTION
1360 NTAPI
1361 CcInitView(VOID)
1362 {
1363 #ifdef CACHE_BITMAP
1364 PMEMORY_AREA marea;
1365 PVOID Buffer;
1366 PHYSICAL_ADDRESS BoundaryAddressMultiple;
1367 #endif
1368
1369 DPRINT("CcInitView()\n");
1370 #ifdef CACHE_BITMAP
1371 BoundaryAddressMultiple.QuadPart = 0;
1372 CiCacheSegMappingRegionHint = 0;
1373 CiCacheSegMappingRegionBase = NULL;
1374
1375 MmLockAddressSpace(MmGetKernelAddressSpace());
1376
1377 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
1378 MEMORY_AREA_CACHE_SEGMENT,
1379 &CiCacheSegMappingRegionBase,
1380 CI_CACHESEG_MAPPING_REGION_SIZE,
1381 PAGE_READWRITE,
1382 &marea,
1383 FALSE,
1384 0,
1385 BoundaryAddressMultiple);
1386 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1387 if (!NT_SUCCESS(Status))
1388 {
1389 KeBugCheck(CACHE_MANAGER);
1390 }
1391
1392 Buffer = ExAllocatePool(NonPagedPool, CI_CACHESEG_MAPPING_REGION_SIZE / (PAGE_SIZE * 8));
1393 if (!Buffer)
1394 {
1395 KeBugCheck(CACHE_MANAGER);
1396 }
1397
1398 RtlInitializeBitMap(&CiCacheSegMappingRegionAllocMap, Buffer, CI_CACHESEG_MAPPING_REGION_SIZE / PAGE_SIZE);
1399 RtlClearAllBits(&CiCacheSegMappingRegionAllocMap);
1400
1401 KeInitializeSpinLock(&CiCacheSegMappingRegionLock);
1402 #endif
1403 InitializeListHead(&CacheSegmentListHead);
1404 InitializeListHead(&DirtySegmentListHead);
1405 InitializeListHead(&CacheSegmentLRUListHead);
1406 InitializeListHead(&ClosedListHead);
1407 KeInitializeGuardedMutex(&ViewLock);
1408 ExInitializeNPagedLookasideList (&iBcbLookasideList,
1409 NULL,
1410 NULL,
1411 0,
1412 sizeof(INTERNAL_BCB),
1413 TAG_IBCB,
1414 20);
1415 ExInitializeNPagedLookasideList (&BcbLookasideList,
1416 NULL,
1417 NULL,
1418 0,
1419 sizeof(BCB),
1420 TAG_BCB,
1421 20);
1422 ExInitializeNPagedLookasideList (&CacheSegLookasideList,
1423 NULL,
1424 NULL,
1425 0,
1426 sizeof(CACHE_SEGMENT),
1427 TAG_CSEG,
1428 20);
1429
1430 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1431
1432 CcInitCacheZeroPage();
1433
1434 }
1435
1436 /* EOF */
1437
1438
1439
1440
1441
1442
1443
1444