dc12c7fd134ce3780978670a0b8162a2579eb928
[reactos.git] / reactos / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 */
9
10 /* NOTES **********************************************************************
11 *
12 * This is not the NT implementation of a file cache nor anything much like
13 * it.
14 *
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
17 *
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
20 *
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
23 *
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
27 *
28 * (4) Copy the data into or out of the page as necessary.
29 *
30 * (5) Release the cache page
31 */
32 /* INCLUDES ******************************************************************/
33
34 #include <ntoskrnl.h>
35 #define NDEBUG
36 #include <debug.h>
37
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
40 #endif
41
42 /* GLOBALS *******************************************************************/
43
44 /*
45 * If CACHE_BITMAP is defined, the cache manager uses one large memory region
46 * within the kernel address space and allocate/deallocate space from this block
47 * over a bitmap. If CACHE_BITMAP is used, the size of the mdl mapping region
48 * must be reduced (ntoskrnl\mm\mdl.c, MI_MDLMAPPING_REGION_SIZE).
49 */
50 //#define CACHE_BITMAP
51
52 static LIST_ENTRY DirtySegmentListHead;
53 static LIST_ENTRY CacheSegmentListHead;
54 static LIST_ENTRY CacheSegmentLRUListHead;
55 static LIST_ENTRY ClosedListHead;
56 ULONG DirtyPageCount=0;
57
58 KGUARDED_MUTEX ViewLock;
59
60 #ifdef CACHE_BITMAP
61 #define CI_CACHESEG_MAPPING_REGION_SIZE (128*1024*1024)
62
63 static PVOID CiCacheSegMappingRegionBase = NULL;
64 static RTL_BITMAP CiCacheSegMappingRegionAllocMap;
65 static ULONG CiCacheSegMappingRegionHint;
66 static KSPIN_LOCK CiCacheSegMappingRegionLock;
67 #endif
68
69 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
70 static NPAGED_LOOKASIDE_LIST BcbLookasideList;
71 static NPAGED_LOOKASIDE_LIST CacheSegLookasideList;
72
73 #if DBG
74 static void CcRosCacheSegmentIncRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
75 {
76 ++cs->ReferenceCount;
77 if ( cs->Bcb->Trace )
78 {
79 DbgPrint("(%s:%i) CacheSegment %p ++RefCount=%d, Dirty %d, PageOut %d\n",
80 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
81 }
82 }
83 static void CcRosCacheSegmentDecRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
84 {
85 --cs->ReferenceCount;
86 if ( cs->Bcb->Trace )
87 {
88 DbgPrint("(%s:%i) CacheSegment %p --RefCount=%d, Dirty %d, PageOut %d\n",
89 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
90 }
91 }
92 #define CcRosCacheSegmentIncRefCount(cs) CcRosCacheSegmentIncRefCount_(cs,__FILE__,__LINE__)
93 #define CcRosCacheSegmentDecRefCount(cs) CcRosCacheSegmentDecRefCount_(cs,__FILE__,__LINE__)
94 #else
95 #define CcRosCacheSegmentIncRefCount(cs) (++((cs)->ReferenceCount))
96 #define CcRosCacheSegmentDecRefCount(cs) (--((cs)->ReferenceCount))
97 #endif
98
99 NTSTATUS
100 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg);
101
102
103 /* FUNCTIONS *****************************************************************/
104
105 VOID
106 NTAPI
107 CcRosTraceCacheMap (
108 PBCB Bcb,
109 BOOLEAN Trace )
110 {
111 #if DBG
112 KIRQL oldirql;
113 PLIST_ENTRY current_entry;
114 PCACHE_SEGMENT current;
115
116 if ( !Bcb )
117 return;
118
119 Bcb->Trace = Trace;
120
121 if ( Trace )
122 {
123 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", Bcb );
124
125 KeAcquireGuardedMutex(&ViewLock);
126 KeAcquireSpinLock(&Bcb->BcbLock, &oldirql);
127
128 current_entry = Bcb->BcbSegmentListHead.Flink;
129 while (current_entry != &Bcb->BcbSegmentListHead)
130 {
131 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
132 current_entry = current_entry->Flink;
133
134 DPRINT1(" CacheSegment 0x%p enabled, RefCount %d, Dirty %d, PageOut %d\n",
135 current, current->ReferenceCount, current->Dirty, current->PageOut );
136 }
137 KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
138 KeReleaseGuardedMutex(&ViewLock);
139 }
140 else
141 {
142 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", Bcb );
143 }
144
145 #else
146 Bcb = Bcb;
147 Trace = Trace;
148 #endif
149 }
150
151 NTSTATUS
152 NTAPI
153 CcRosFlushCacheSegment (
154 PCACHE_SEGMENT CacheSegment)
155 {
156 NTSTATUS Status;
157 KIRQL oldIrql;
158
159 Status = WriteCacheSegment(CacheSegment);
160 if (NT_SUCCESS(Status))
161 {
162 KeAcquireGuardedMutex(&ViewLock);
163 KeAcquireSpinLock(&CacheSegment->Bcb->BcbLock, &oldIrql);
164
165 CacheSegment->Dirty = FALSE;
166 RemoveEntryList(&CacheSegment->DirtySegmentListEntry);
167 DirtyPageCount -= CacheSegment->Bcb->CacheSegmentSize / PAGE_SIZE;
168 CcRosCacheSegmentDecRefCount(CacheSegment);
169
170 KeReleaseSpinLock(&CacheSegment->Bcb->BcbLock, oldIrql);
171 KeReleaseGuardedMutex(&ViewLock);
172 }
173
174 return(Status);
175 }
176
177 NTSTATUS
178 NTAPI
179 CcRosFlushDirtyPages (
180 ULONG Target,
181 PULONG Count,
182 BOOLEAN Wait)
183 {
184 PLIST_ENTRY current_entry;
185 PCACHE_SEGMENT current;
186 ULONG PagesPerSegment;
187 BOOLEAN Locked;
188 NTSTATUS Status;
189 LARGE_INTEGER ZeroTimeout;
190
191 DPRINT("CcRosFlushDirtyPages(Target %d)\n", Target);
192
193 (*Count) = 0;
194 ZeroTimeout.QuadPart = 0;
195
196 KeEnterCriticalRegion();
197 KeAcquireGuardedMutex(&ViewLock);
198
199 current_entry = DirtySegmentListHead.Flink;
200 if (current_entry == &DirtySegmentListHead)
201 {
202 DPRINT("No Dirty pages\n");
203 }
204
205 while ((current_entry != &DirtySegmentListHead) && (Target > 0))
206 {
207 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
208 DirtySegmentListEntry);
209 current_entry = current_entry->Flink;
210
211 CcRosCacheSegmentIncRefCount(current);
212
213 Locked = current->Bcb->Callbacks->AcquireForLazyWrite(
214 current->Bcb->LazyWriteContext, Wait);
215 if (!Locked)
216 {
217 CcRosCacheSegmentDecRefCount(current);
218 continue;
219 }
220
221 Status = KeWaitForSingleObject(&current->Mutex,
222 Executive,
223 KernelMode,
224 FALSE,
225 Wait ? NULL : &ZeroTimeout);
226 if (Status != STATUS_SUCCESS)
227 {
228 current->Bcb->Callbacks->ReleaseFromLazyWrite(
229 current->Bcb->LazyWriteContext);
230 CcRosCacheSegmentDecRefCount(current);
231 continue;
232 }
233
234 ASSERT(current->Dirty);
235
236 /* One reference is added above */
237 if (current->ReferenceCount > 2)
238 {
239 KeReleaseMutex(&current->Mutex, 0);
240 current->Bcb->Callbacks->ReleaseFromLazyWrite(
241 current->Bcb->LazyWriteContext);
242 CcRosCacheSegmentDecRefCount(current);
243 continue;
244 }
245
246 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
247
248 KeReleaseGuardedMutex(&ViewLock);
249
250 Status = CcRosFlushCacheSegment(current);
251
252 KeReleaseMutex(&current->Mutex, 0);
253 current->Bcb->Callbacks->ReleaseFromLazyWrite(
254 current->Bcb->LazyWriteContext);
255
256 KeAcquireGuardedMutex(&ViewLock);
257 CcRosCacheSegmentDecRefCount(current);
258
259 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
260 {
261 DPRINT1("CC: Failed to flush cache segment.\n");
262 }
263 else
264 {
265 (*Count) += PagesPerSegment;
266 Target -= PagesPerSegment;
267 }
268
269 current_entry = DirtySegmentListHead.Flink;
270 }
271
272 KeReleaseGuardedMutex(&ViewLock);
273 KeLeaveCriticalRegion();
274
275 DPRINT("CcRosFlushDirtyPages() finished\n");
276 return(STATUS_SUCCESS);
277 }
278
279 NTSTATUS
280 CcRosTrimCache (
281 ULONG Target,
282 ULONG Priority,
283 PULONG NrFreed)
284 /*
285 * FUNCTION: Try to free some memory from the file cache.
286 * ARGUMENTS:
287 * Target - The number of pages to be freed.
288 * Priority - The priority of free (currently unused).
289 * NrFreed - Points to a variable where the number of pages
290 * actually freed is returned.
291 */
292 {
293 PLIST_ENTRY current_entry;
294 PCACHE_SEGMENT current;
295 ULONG PagesPerSegment;
296 ULONG PagesFreed;
297 KIRQL oldIrql;
298 LIST_ENTRY FreeList;
299 PFN_NUMBER Page;
300 ULONG i;
301 BOOLEAN FlushedPages = FALSE;
302
303 DPRINT("CcRosTrimCache(Target %d)\n", Target);
304
305 InitializeListHead(&FreeList);
306
307 *NrFreed = 0;
308
309 retry:
310 KeAcquireGuardedMutex(&ViewLock);
311
312 current_entry = CacheSegmentLRUListHead.Flink;
313 while (current_entry != &CacheSegmentLRUListHead)
314 {
315 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
316 CacheSegmentLRUListEntry);
317 current_entry = current_entry->Flink;
318
319 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
320
321 /* Reference the cache segment */
322 CcRosCacheSegmentIncRefCount(current);
323
324 /* Check if it's mapped and not dirty */
325 if (current->MappedCount > 0 && !current->Dirty)
326 {
327 /* We have to break these locks because Cc sucks */
328 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
329 KeReleaseGuardedMutex(&ViewLock);
330
331 /* Page out the segment */
332 for (i = 0; i < current->Bcb->CacheSegmentSize / PAGE_SIZE; i++)
333 {
334 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
335
336 MmPageOutPhysicalAddress(Page);
337 }
338
339 /* Reacquire the locks */
340 KeAcquireGuardedMutex(&ViewLock);
341 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
342 }
343
344 /* Dereference the cache segment */
345 CcRosCacheSegmentDecRefCount(current);
346
347 /* Check if we can free this entry now */
348 if (current->ReferenceCount == 0)
349 {
350 ASSERT(!current->Dirty);
351 ASSERT(!current->MappedCount);
352
353 RemoveEntryList(&current->BcbSegmentListEntry);
354 RemoveEntryList(&current->CacheSegmentListEntry);
355 RemoveEntryList(&current->CacheSegmentLRUListEntry);
356 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
357
358 /* Calculate how many pages we freed for Mm */
359 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
360 PagesFreed = min(PagesPerSegment, Target);
361 Target -= PagesFreed;
362 (*NrFreed) += PagesFreed;
363 }
364
365 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
366 }
367
368 KeReleaseGuardedMutex(&ViewLock);
369
370 /* Try flushing pages if we haven't met our target */
371 if ((Target > 0) && !FlushedPages)
372 {
373 /* Flush dirty pages to disk */
374 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE);
375 FlushedPages = TRUE;
376
377 /* We can only swap as many pages as we flushed */
378 if (PagesFreed < Target) Target = PagesFreed;
379
380 /* Check if we flushed anything */
381 if (PagesFreed != 0)
382 {
383 /* Try again after flushing dirty pages */
384 DPRINT("Flushed %d dirty cache pages to disk\n", PagesFreed);
385 goto retry;
386 }
387 }
388
389 while (!IsListEmpty(&FreeList))
390 {
391 current_entry = RemoveHeadList(&FreeList);
392 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
393 BcbSegmentListEntry);
394 CcRosInternalFreeCacheSegment(current);
395 }
396
397 DPRINT("Evicted %d cache pages\n", (*NrFreed));
398
399 return(STATUS_SUCCESS);
400 }
401
402 NTSTATUS
403 NTAPI
404 CcRosReleaseCacheSegment (
405 PBCB Bcb,
406 PCACHE_SEGMENT CacheSeg,
407 BOOLEAN Valid,
408 BOOLEAN Dirty,
409 BOOLEAN Mapped)
410 {
411 BOOLEAN WasDirty;
412 KIRQL oldIrql;
413
414 ASSERT(Bcb);
415
416 DPRINT("CcReleaseCacheSegment(Bcb 0x%p, CacheSeg 0x%p, Valid %d)\n",
417 Bcb, CacheSeg, Valid);
418
419 KeAcquireGuardedMutex(&ViewLock);
420 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
421
422 CacheSeg->Valid = Valid;
423
424 WasDirty = CacheSeg->Dirty;
425 CacheSeg->Dirty = CacheSeg->Dirty || Dirty;
426
427 if (!WasDirty && CacheSeg->Dirty)
428 {
429 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
430 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
431 }
432
433 if (Mapped)
434 {
435 CacheSeg->MappedCount++;
436 }
437 CcRosCacheSegmentDecRefCount(CacheSeg);
438 if (Mapped && (CacheSeg->MappedCount == 1))
439 {
440 CcRosCacheSegmentIncRefCount(CacheSeg);
441 }
442 if (!WasDirty && CacheSeg->Dirty)
443 {
444 CcRosCacheSegmentIncRefCount(CacheSeg);
445 }
446
447 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
448 KeReleaseGuardedMutex(&ViewLock);
449 KeReleaseMutex(&CacheSeg->Mutex, 0);
450
451 return(STATUS_SUCCESS);
452 }
453
454 /* Returns with Cache Segment Lock Held! */
455 PCACHE_SEGMENT
456 NTAPI
457 CcRosLookupCacheSegment (
458 PBCB Bcb,
459 ULONG FileOffset)
460 {
461 PLIST_ENTRY current_entry;
462 PCACHE_SEGMENT current;
463 KIRQL oldIrql;
464
465 ASSERT(Bcb);
466
467 DPRINT("CcRosLookupCacheSegment(Bcb -x%p, FileOffset %d)\n", Bcb, FileOffset);
468
469 KeAcquireGuardedMutex(&ViewLock);
470 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
471
472 current_entry = Bcb->BcbSegmentListHead.Flink;
473 while (current_entry != &Bcb->BcbSegmentListHead)
474 {
475 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
476 BcbSegmentListEntry);
477 if ((current->FileOffset <= FileOffset) &&
478 ((current->FileOffset + Bcb->CacheSegmentSize) > FileOffset))
479 {
480 CcRosCacheSegmentIncRefCount(current);
481 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
482 KeReleaseGuardedMutex(&ViewLock);
483 KeWaitForSingleObject(&current->Mutex,
484 Executive,
485 KernelMode,
486 FALSE,
487 NULL);
488 return(current);
489 }
490 current_entry = current_entry->Flink;
491 }
492
493 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
494 KeReleaseGuardedMutex(&ViewLock);
495
496 return(NULL);
497 }
498
499 NTSTATUS
500 NTAPI
501 CcRosMarkDirtyCacheSegment (
502 PBCB Bcb,
503 ULONG FileOffset)
504 {
505 PCACHE_SEGMENT CacheSeg;
506 KIRQL oldIrql;
507
508 ASSERT(Bcb);
509
510 DPRINT("CcRosMarkDirtyCacheSegment(Bcb 0x%p, FileOffset %d)\n", Bcb, FileOffset);
511
512 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
513 if (CacheSeg == NULL)
514 {
515 KeBugCheck(CACHE_MANAGER);
516 }
517
518 KeAcquireGuardedMutex(&ViewLock);
519 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
520
521 if (!CacheSeg->Dirty)
522 {
523 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
524 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
525 }
526 else
527 {
528 CcRosCacheSegmentDecRefCount(CacheSeg);
529 }
530
531 /* Move to the tail of the LRU list */
532 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
533 InsertTailList(&CacheSegmentLRUListHead, &CacheSeg->CacheSegmentLRUListEntry);
534
535 CacheSeg->Dirty = TRUE;
536
537 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
538 KeReleaseGuardedMutex(&ViewLock);
539 KeReleaseMutex(&CacheSeg->Mutex, 0);
540
541 return(STATUS_SUCCESS);
542 }
543
544 NTSTATUS
545 NTAPI
546 CcRosUnmapCacheSegment (
547 PBCB Bcb,
548 ULONG FileOffset,
549 BOOLEAN NowDirty)
550 {
551 PCACHE_SEGMENT CacheSeg;
552 BOOLEAN WasDirty;
553 KIRQL oldIrql;
554
555 ASSERT(Bcb);
556
557 DPRINT("CcRosUnmapCacheSegment(Bcb 0x%p, FileOffset %d, NowDirty %d)\n",
558 Bcb, FileOffset, NowDirty);
559
560 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
561 if (CacheSeg == NULL)
562 {
563 return(STATUS_UNSUCCESSFUL);
564 }
565
566 KeAcquireGuardedMutex(&ViewLock);
567 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
568
569 WasDirty = CacheSeg->Dirty;
570 CacheSeg->Dirty = CacheSeg->Dirty || NowDirty;
571
572 CacheSeg->MappedCount--;
573
574 if (!WasDirty && NowDirty)
575 {
576 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
577 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
578 }
579
580 CcRosCacheSegmentDecRefCount(CacheSeg);
581 if (!WasDirty && NowDirty)
582 {
583 CcRosCacheSegmentIncRefCount(CacheSeg);
584 }
585 if (CacheSeg->MappedCount == 0)
586 {
587 CcRosCacheSegmentDecRefCount(CacheSeg);
588 }
589
590 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
591 KeReleaseGuardedMutex(&ViewLock);
592 KeReleaseMutex(&CacheSeg->Mutex, 0);
593
594 return(STATUS_SUCCESS);
595 }
596
597 static
598 NTSTATUS
599 CcRosCreateCacheSegment (
600 PBCB Bcb,
601 ULONG FileOffset,
602 PCACHE_SEGMENT* CacheSeg)
603 {
604 PCACHE_SEGMENT current;
605 PCACHE_SEGMENT previous;
606 PLIST_ENTRY current_entry;
607 NTSTATUS Status;
608 KIRQL oldIrql;
609 #ifdef CACHE_BITMAP
610 ULONG StartingOffset;
611 #endif
612 PHYSICAL_ADDRESS BoundaryAddressMultiple;
613
614 ASSERT(Bcb);
615
616 DPRINT("CcRosCreateCacheSegment()\n");
617
618 BoundaryAddressMultiple.QuadPart = 0;
619 if (FileOffset >= Bcb->FileSize.u.LowPart)
620 {
621 CacheSeg = NULL;
622 return STATUS_INVALID_PARAMETER;
623 }
624
625 current = ExAllocateFromNPagedLookasideList(&CacheSegLookasideList);
626 current->Valid = FALSE;
627 current->Dirty = FALSE;
628 current->PageOut = FALSE;
629 current->FileOffset = ROUND_DOWN(FileOffset, Bcb->CacheSegmentSize);
630 current->Bcb = Bcb;
631 #if DBG
632 if ( Bcb->Trace )
633 {
634 DPRINT1("CacheMap 0x%p: new Cache Segment: 0x%p\n", Bcb, current );
635 }
636 #endif
637 current->MappedCount = 0;
638 current->DirtySegmentListEntry.Flink = NULL;
639 current->DirtySegmentListEntry.Blink = NULL;
640 current->ReferenceCount = 1;
641 KeInitializeMutex(&current->Mutex, 0);
642 KeWaitForSingleObject(&current->Mutex,
643 Executive,
644 KernelMode,
645 FALSE,
646 NULL);
647 KeAcquireGuardedMutex(&ViewLock);
648
649 *CacheSeg = current;
650 /* There is window between the call to CcRosLookupCacheSegment
651 * and CcRosCreateCacheSegment. We must check if a segment on
652 * the fileoffset exist. If there exist a segment, we release
653 * our new created segment and return the existing one.
654 */
655 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
656 current_entry = Bcb->BcbSegmentListHead.Flink;
657 previous = NULL;
658 while (current_entry != &Bcb->BcbSegmentListHead)
659 {
660 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
661 BcbSegmentListEntry);
662 if (current->FileOffset <= FileOffset &&
663 (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
664 {
665 CcRosCacheSegmentIncRefCount(current);
666 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
667 #if DBG
668 if ( Bcb->Trace )
669 {
670 DPRINT1("CacheMap 0x%p: deleting newly created Cache Segment 0x%p ( found existing one 0x%p )\n",
671 Bcb,
672 (*CacheSeg),
673 current );
674 }
675 #endif
676 KeReleaseMutex(&(*CacheSeg)->Mutex, 0);
677 KeReleaseGuardedMutex(&ViewLock);
678 ExFreeToNPagedLookasideList(&CacheSegLookasideList, *CacheSeg);
679 *CacheSeg = current;
680 KeWaitForSingleObject(&current->Mutex,
681 Executive,
682 KernelMode,
683 FALSE,
684 NULL);
685 return STATUS_SUCCESS;
686 }
687 if (current->FileOffset < FileOffset)
688 {
689 if (previous == NULL)
690 {
691 previous = current;
692 }
693 else
694 {
695 if (previous->FileOffset < current->FileOffset)
696 {
697 previous = current;
698 }
699 }
700 }
701 current_entry = current_entry->Flink;
702 }
703 /* There was no existing segment. */
704 current = *CacheSeg;
705 if (previous)
706 {
707 InsertHeadList(&previous->BcbSegmentListEntry, &current->BcbSegmentListEntry);
708 }
709 else
710 {
711 InsertHeadList(&Bcb->BcbSegmentListHead, &current->BcbSegmentListEntry);
712 }
713 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
714 InsertTailList(&CacheSegmentListHead, &current->CacheSegmentListEntry);
715 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
716 KeReleaseGuardedMutex(&ViewLock);
717 #ifdef CACHE_BITMAP
718 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
719
720 StartingOffset = RtlFindClearBitsAndSet(&CiCacheSegMappingRegionAllocMap, Bcb->CacheSegmentSize / PAGE_SIZE, CiCacheSegMappingRegionHint);
721
722 if (StartingOffset == 0xffffffff)
723 {
724 DPRINT1("Out of CacheSeg mapping space\n");
725 KeBugCheck(CACHE_MANAGER);
726 }
727
728 current->BaseAddress = CiCacheSegMappingRegionBase + StartingOffset * PAGE_SIZE;
729
730 if (CiCacheSegMappingRegionHint == StartingOffset)
731 {
732 CiCacheSegMappingRegionHint += Bcb->CacheSegmentSize / PAGE_SIZE;
733 }
734
735 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
736 #else
737 MmLockAddressSpace(MmGetKernelAddressSpace());
738 current->BaseAddress = NULL;
739 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
740 0, // nothing checks for cache_segment mareas, so set to 0
741 &current->BaseAddress,
742 Bcb->CacheSegmentSize,
743 PAGE_READWRITE,
744 (PMEMORY_AREA*)&current->MemoryArea,
745 FALSE,
746 0,
747 BoundaryAddressMultiple);
748 MmUnlockAddressSpace(MmGetKernelAddressSpace());
749 if (!NT_SUCCESS(Status))
750 {
751 KeBugCheck(CACHE_MANAGER);
752 }
753 #endif
754
755 /* Create a virtual mapping for this memory area */
756 MI_SET_USAGE(MI_USAGE_CACHE);
757 #if MI_TRACE_PFNS
758 PWCHAR pos = NULL;
759 ULONG len = 0;
760 if ((Bcb->FileObject) && (Bcb->FileObject->FileName.Buffer))
761 {
762 pos = wcsrchr(Bcb->FileObject->FileName.Buffer, '\\');
763 len = wcslen(pos) * sizeof(WCHAR);
764 if (pos) snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
765 }
766 #endif
767
768 MmMapMemoryArea(current->BaseAddress, Bcb->CacheSegmentSize,
769 MC_CACHE, PAGE_READWRITE);
770
771 return(STATUS_SUCCESS);
772 }
773
774 NTSTATUS
775 NTAPI
776 CcRosGetCacheSegmentChain (
777 PBCB Bcb,
778 ULONG FileOffset,
779 ULONG Length,
780 PCACHE_SEGMENT* CacheSeg)
781 {
782 PCACHE_SEGMENT current;
783 ULONG i;
784 PCACHE_SEGMENT* CacheSegList;
785 PCACHE_SEGMENT Previous = NULL;
786
787 ASSERT(Bcb);
788
789 DPRINT("CcRosGetCacheSegmentChain()\n");
790
791 Length = ROUND_UP(Length, Bcb->CacheSegmentSize);
792
793 CacheSegList = _alloca(sizeof(PCACHE_SEGMENT) *
794 (Length / Bcb->CacheSegmentSize));
795
796 /*
797 * Look for a cache segment already mapping the same data.
798 */
799 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
800 {
801 ULONG CurrentOffset = FileOffset + (i * Bcb->CacheSegmentSize);
802 current = CcRosLookupCacheSegment(Bcb, CurrentOffset);
803 if (current != NULL)
804 {
805 KeAcquireGuardedMutex(&ViewLock);
806
807 /* Move to tail of LRU list */
808 RemoveEntryList(&current->CacheSegmentLRUListEntry);
809 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
810
811 KeReleaseGuardedMutex(&ViewLock);
812
813 CacheSegList[i] = current;
814 }
815 else
816 {
817 CcRosCreateCacheSegment(Bcb, CurrentOffset, &current);
818 CacheSegList[i] = current;
819 }
820 }
821
822 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
823 {
824 if (i == 0)
825 {
826 *CacheSeg = CacheSegList[i];
827 Previous = CacheSegList[i];
828 }
829 else
830 {
831 Previous->NextInChain = CacheSegList[i];
832 Previous = CacheSegList[i];
833 }
834 }
835 ASSERT(Previous);
836 Previous->NextInChain = NULL;
837
838 return(STATUS_SUCCESS);
839 }
840
841 NTSTATUS
842 NTAPI
843 CcRosGetCacheSegment (
844 PBCB Bcb,
845 ULONG FileOffset,
846 PULONG BaseOffset,
847 PVOID* BaseAddress,
848 PBOOLEAN UptoDate,
849 PCACHE_SEGMENT* CacheSeg)
850 {
851 PCACHE_SEGMENT current;
852 NTSTATUS Status;
853
854 ASSERT(Bcb);
855
856 DPRINT("CcRosGetCacheSegment()\n");
857
858 /*
859 * Look for a cache segment already mapping the same data.
860 */
861 current = CcRosLookupCacheSegment(Bcb, FileOffset);
862 if (current == NULL)
863 {
864 /*
865 * Otherwise create a new segment.
866 */
867 Status = CcRosCreateCacheSegment(Bcb, FileOffset, &current);
868 if (!NT_SUCCESS(Status))
869 {
870 return Status;
871 }
872 }
873
874 KeAcquireGuardedMutex(&ViewLock);
875
876 /* Move to the tail of the LRU list */
877 RemoveEntryList(&current->CacheSegmentLRUListEntry);
878 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
879
880 KeReleaseGuardedMutex(&ViewLock);
881
882 /*
883 * Return information about the segment to the caller.
884 */
885 *UptoDate = current->Valid;
886 *BaseAddress = current->BaseAddress;
887 DPRINT("*BaseAddress %p\n", *BaseAddress);
888 *CacheSeg = current;
889 *BaseOffset = current->FileOffset;
890 return(STATUS_SUCCESS);
891 }
892
893 NTSTATUS
894 NTAPI
895 CcRosRequestCacheSegment (
896 PBCB Bcb,
897 ULONG FileOffset,
898 PVOID* BaseAddress,
899 PBOOLEAN UptoDate,
900 PCACHE_SEGMENT* CacheSeg)
901 /*
902 * FUNCTION: Request a page mapping for a BCB
903 */
904 {
905 ULONG BaseOffset;
906
907 ASSERT(Bcb);
908
909 if ((FileOffset % Bcb->CacheSegmentSize) != 0)
910 {
911 DPRINT1("Bad fileoffset %x should be multiple of %x",
912 FileOffset, Bcb->CacheSegmentSize);
913 KeBugCheck(CACHE_MANAGER);
914 }
915
916 return(CcRosGetCacheSegment(Bcb,
917 FileOffset,
918 &BaseOffset,
919 BaseAddress,
920 UptoDate,
921 CacheSeg));
922 }
923 #ifdef CACHE_BITMAP
924 #else
925 static
926 VOID
927 CcFreeCachePage (
928 PVOID Context,
929 MEMORY_AREA* MemoryArea,
930 PVOID Address,
931 PFN_NUMBER Page,
932 SWAPENTRY SwapEntry,
933 BOOLEAN Dirty)
934 {
935 ASSERT(SwapEntry == 0);
936 if (Page != 0)
937 {
938 ASSERT(MmGetReferenceCountPage(Page) == 1);
939 MmReleasePageMemoryConsumer(MC_CACHE, Page);
940 }
941 }
942 #endif
943 NTSTATUS
944 CcRosInternalFreeCacheSegment (
945 PCACHE_SEGMENT CacheSeg)
946 /*
947 * FUNCTION: Releases a cache segment associated with a BCB
948 */
949 {
950 #ifdef CACHE_BITMAP
951 ULONG i;
952 ULONG RegionSize;
953 ULONG Base;
954 PFN_NUMBER Page;
955 KIRQL oldIrql;
956 #endif
957 DPRINT("Freeing cache segment 0x%p\n", CacheSeg);
958 #if DBG
959 if ( CacheSeg->Bcb->Trace )
960 {
961 DPRINT1("CacheMap 0x%p: deleting Cache Segment: 0x%p\n", CacheSeg->Bcb, CacheSeg );
962 }
963 #endif
964 #ifdef CACHE_BITMAP
965 RegionSize = CacheSeg->Bcb->CacheSegmentSize / PAGE_SIZE;
966
967 /* Unmap all the pages. */
968 for (i = 0; i < RegionSize; i++)
969 {
970 MmDeleteVirtualMapping(NULL,
971 CacheSeg->BaseAddress + (i * PAGE_SIZE),
972 FALSE,
973 NULL,
974 &Page);
975 MmReleasePageMemoryConsumer(MC_CACHE, Page);
976 }
977
978 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
979 /* Deallocate all the pages used. */
980 Base = (ULONG)(CacheSeg->BaseAddress - CiCacheSegMappingRegionBase) / PAGE_SIZE;
981
982 RtlClearBits(&CiCacheSegMappingRegionAllocMap, Base, RegionSize);
983
984 CiCacheSegMappingRegionHint = min (CiCacheSegMappingRegionHint, Base);
985
986 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
987 #else
988 MmLockAddressSpace(MmGetKernelAddressSpace());
989 MmFreeMemoryArea(MmGetKernelAddressSpace(),
990 CacheSeg->MemoryArea,
991 CcFreeCachePage,
992 NULL);
993 MmUnlockAddressSpace(MmGetKernelAddressSpace());
994 #endif
995 ExFreeToNPagedLookasideList(&CacheSegLookasideList, CacheSeg);
996 return(STATUS_SUCCESS);
997 }
998
999 NTSTATUS
1000 NTAPI
1001 CcRosFreeCacheSegment (
1002 PBCB Bcb,
1003 PCACHE_SEGMENT CacheSeg)
1004 {
1005 NTSTATUS Status;
1006 KIRQL oldIrql;
1007
1008 ASSERT(Bcb);
1009
1010 DPRINT("CcRosFreeCacheSegment(Bcb 0x%p, CacheSeg 0x%p)\n",
1011 Bcb, CacheSeg);
1012
1013 KeAcquireGuardedMutex(&ViewLock);
1014 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1015 RemoveEntryList(&CacheSeg->BcbSegmentListEntry);
1016 RemoveEntryList(&CacheSeg->CacheSegmentListEntry);
1017 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
1018 if (CacheSeg->Dirty)
1019 {
1020 RemoveEntryList(&CacheSeg->DirtySegmentListEntry);
1021 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
1022
1023 }
1024 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1025 KeReleaseGuardedMutex(&ViewLock);
1026
1027 Status = CcRosInternalFreeCacheSegment(CacheSeg);
1028 return(Status);
1029 }
1030
1031 /*
1032 * @implemented
1033 */
1034 VOID
1035 NTAPI
1036 CcFlushCache (
1037 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
1038 IN PLARGE_INTEGER FileOffset OPTIONAL,
1039 IN ULONG Length,
1040 OUT PIO_STATUS_BLOCK IoStatus)
1041 {
1042 PBCB Bcb;
1043 LARGE_INTEGER Offset;
1044 PCACHE_SEGMENT current;
1045 NTSTATUS Status;
1046 KIRQL oldIrql;
1047
1048 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %d, IoStatus 0x%p)\n",
1049 SectionObjectPointers, FileOffset, Length, IoStatus);
1050
1051 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1052 {
1053 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
1054 ASSERT(Bcb);
1055 if (FileOffset)
1056 {
1057 Offset = *FileOffset;
1058 }
1059 else
1060 {
1061 Offset.QuadPart = (LONGLONG)0;
1062 Length = Bcb->FileSize.u.LowPart;
1063 }
1064
1065 if (IoStatus)
1066 {
1067 IoStatus->Status = STATUS_SUCCESS;
1068 IoStatus->Information = 0;
1069 }
1070
1071 while (Length > 0)
1072 {
1073 current = CcRosLookupCacheSegment (Bcb, Offset.u.LowPart);
1074 if (current != NULL)
1075 {
1076 if (current->Dirty)
1077 {
1078 Status = CcRosFlushCacheSegment(current);
1079 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1080 {
1081 IoStatus->Status = Status;
1082 }
1083 }
1084 KeReleaseMutex(&current->Mutex, 0);
1085
1086 KeAcquireGuardedMutex(&ViewLock);
1087 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1088 CcRosCacheSegmentDecRefCount(current);
1089 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1090 KeReleaseGuardedMutex(&ViewLock);
1091 }
1092
1093 Offset.QuadPart += Bcb->CacheSegmentSize;
1094 if (Length > Bcb->CacheSegmentSize)
1095 {
1096 Length -= Bcb->CacheSegmentSize;
1097 }
1098 else
1099 {
1100 Length = 0;
1101 }
1102 }
1103 }
1104 else
1105 {
1106 if (IoStatus)
1107 {
1108 IoStatus->Status = STATUS_INVALID_PARAMETER;
1109 }
1110 }
1111 }
1112
1113 NTSTATUS
1114 NTAPI
1115 CcRosDeleteFileCache (
1116 PFILE_OBJECT FileObject,
1117 PBCB Bcb)
1118 /*
1119 * FUNCTION: Releases the BCB associated with a file object
1120 */
1121 {
1122 PLIST_ENTRY current_entry;
1123 PCACHE_SEGMENT current;
1124 LIST_ENTRY FreeList;
1125 KIRQL oldIrql;
1126
1127 ASSERT(Bcb);
1128
1129 Bcb->RefCount++;
1130 KeReleaseGuardedMutex(&ViewLock);
1131
1132 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1133
1134 KeAcquireGuardedMutex(&ViewLock);
1135 Bcb->RefCount--;
1136 if (Bcb->RefCount == 0)
1137 {
1138 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1139 {
1140 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1141 Bcb->BcbRemoveListEntry.Flink = NULL;
1142 }
1143
1144 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1145
1146 /*
1147 * Release all cache segments.
1148 */
1149 InitializeListHead(&FreeList);
1150 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1151 current_entry = Bcb->BcbSegmentListHead.Flink;
1152 while (!IsListEmpty(&Bcb->BcbSegmentListHead))
1153 {
1154 current_entry = RemoveTailList(&Bcb->BcbSegmentListHead);
1155 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1156 RemoveEntryList(&current->CacheSegmentListEntry);
1157 RemoveEntryList(&current->CacheSegmentLRUListEntry);
1158 if (current->Dirty)
1159 {
1160 RemoveEntryList(&current->DirtySegmentListEntry);
1161 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
1162 DPRINT1("Freeing dirty segment\n");
1163 }
1164 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
1165 }
1166 #if DBG
1167 Bcb->Trace = FALSE;
1168 #endif
1169 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1170
1171 KeReleaseGuardedMutex(&ViewLock);
1172 ObDereferenceObject (Bcb->FileObject);
1173
1174 while (!IsListEmpty(&FreeList))
1175 {
1176 current_entry = RemoveTailList(&FreeList);
1177 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1178 CcRosInternalFreeCacheSegment(current);
1179 }
1180 ExFreeToNPagedLookasideList(&BcbLookasideList, Bcb);
1181 KeAcquireGuardedMutex(&ViewLock);
1182 }
1183 return(STATUS_SUCCESS);
1184 }
1185
1186 VOID
1187 NTAPI
1188 CcRosReferenceCache (
1189 PFILE_OBJECT FileObject)
1190 {
1191 PBCB Bcb;
1192 KeAcquireGuardedMutex(&ViewLock);
1193 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1194 ASSERT(Bcb);
1195 if (Bcb->RefCount == 0)
1196 {
1197 ASSERT(Bcb->BcbRemoveListEntry.Flink != NULL);
1198 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1199 Bcb->BcbRemoveListEntry.Flink = NULL;
1200
1201 }
1202 else
1203 {
1204 ASSERT(Bcb->BcbRemoveListEntry.Flink == NULL);
1205 }
1206 Bcb->RefCount++;
1207 KeReleaseGuardedMutex(&ViewLock);
1208 }
1209
1210 VOID
1211 NTAPI
1212 CcRosSetRemoveOnClose (
1213 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1214 {
1215 PBCB Bcb;
1216 DPRINT("CcRosSetRemoveOnClose()\n");
1217 KeAcquireGuardedMutex(&ViewLock);
1218 Bcb = (PBCB)SectionObjectPointer->SharedCacheMap;
1219 if (Bcb)
1220 {
1221 Bcb->RemoveOnClose = TRUE;
1222 if (Bcb->RefCount == 0)
1223 {
1224 CcRosDeleteFileCache(Bcb->FileObject, Bcb);
1225 }
1226 }
1227 KeReleaseGuardedMutex(&ViewLock);
1228 }
1229
1230
1231 VOID
1232 NTAPI
1233 CcRosDereferenceCache (
1234 PFILE_OBJECT FileObject)
1235 {
1236 PBCB Bcb;
1237 KeAcquireGuardedMutex(&ViewLock);
1238 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1239 ASSERT(Bcb);
1240 if (Bcb->RefCount > 0)
1241 {
1242 Bcb->RefCount--;
1243 if (Bcb->RefCount == 0)
1244 {
1245 MmFreeSectionSegments(Bcb->FileObject);
1246 CcRosDeleteFileCache(FileObject, Bcb);
1247 }
1248 }
1249 KeReleaseGuardedMutex(&ViewLock);
1250 }
1251
1252 NTSTATUS
1253 NTAPI
1254 CcRosReleaseFileCache (
1255 PFILE_OBJECT FileObject)
1256 /*
1257 * FUNCTION: Called by the file system when a handle to a file object
1258 * has been closed.
1259 */
1260 {
1261 PBCB Bcb;
1262
1263 KeAcquireGuardedMutex(&ViewLock);
1264
1265 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1266 {
1267 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1268 if (FileObject->PrivateCacheMap != NULL)
1269 {
1270 FileObject->PrivateCacheMap = NULL;
1271 if (Bcb->RefCount > 0)
1272 {
1273 Bcb->RefCount--;
1274 if (Bcb->RefCount == 0)
1275 {
1276 MmFreeSectionSegments(Bcb->FileObject);
1277 CcRosDeleteFileCache(FileObject, Bcb);
1278 }
1279 }
1280 }
1281 }
1282 KeReleaseGuardedMutex(&ViewLock);
1283 return(STATUS_SUCCESS);
1284 }
1285
1286 NTSTATUS
1287 NTAPI
1288 CcTryToInitializeFileCache (
1289 PFILE_OBJECT FileObject)
1290 {
1291 PBCB Bcb;
1292 NTSTATUS Status;
1293
1294 KeAcquireGuardedMutex(&ViewLock);
1295
1296 ASSERT(FileObject->SectionObjectPointer);
1297 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1298 if (Bcb == NULL)
1299 {
1300 Status = STATUS_UNSUCCESSFUL;
1301 }
1302 else
1303 {
1304 if (FileObject->PrivateCacheMap == NULL)
1305 {
1306 FileObject->PrivateCacheMap = Bcb;
1307 Bcb->RefCount++;
1308 }
1309 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1310 {
1311 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1312 Bcb->BcbRemoveListEntry.Flink = NULL;
1313 }
1314 Status = STATUS_SUCCESS;
1315 }
1316 KeReleaseGuardedMutex(&ViewLock);
1317
1318 return Status;
1319 }
1320
1321
1322 NTSTATUS
1323 NTAPI
1324 CcRosInitializeFileCache (
1325 PFILE_OBJECT FileObject,
1326 ULONG CacheSegmentSize,
1327 PCACHE_MANAGER_CALLBACKS CallBacks,
1328 PVOID LazyWriterContext)
1329 /*
1330 * FUNCTION: Initializes a BCB for a file object
1331 */
1332 {
1333 PBCB Bcb;
1334
1335 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1336 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, Bcb 0x%p, CacheSegmentSize %d)\n",
1337 FileObject, Bcb, CacheSegmentSize);
1338
1339 KeAcquireGuardedMutex(&ViewLock);
1340 if (Bcb == NULL)
1341 {
1342 Bcb = ExAllocateFromNPagedLookasideList(&BcbLookasideList);
1343 if (Bcb == NULL)
1344 {
1345 KeReleaseGuardedMutex(&ViewLock);
1346 return(STATUS_UNSUCCESSFUL);
1347 }
1348 memset(Bcb, 0, sizeof(BCB));
1349 ObReferenceObjectByPointer(FileObject,
1350 FILE_ALL_ACCESS,
1351 NULL,
1352 KernelMode);
1353 Bcb->FileObject = FileObject;
1354 Bcb->CacheSegmentSize = CacheSegmentSize;
1355 Bcb->Callbacks = CallBacks;
1356 Bcb->LazyWriteContext = LazyWriterContext;
1357 if (FileObject->FsContext)
1358 {
1359 Bcb->AllocationSize =
1360 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->AllocationSize;
1361 Bcb->FileSize =
1362 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->FileSize;
1363 }
1364 KeInitializeSpinLock(&Bcb->BcbLock);
1365 InitializeListHead(&Bcb->BcbSegmentListHead);
1366 FileObject->SectionObjectPointer->SharedCacheMap = Bcb;
1367 }
1368 if (FileObject->PrivateCacheMap == NULL)
1369 {
1370 FileObject->PrivateCacheMap = Bcb;
1371 Bcb->RefCount++;
1372 }
1373 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1374 {
1375 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1376 Bcb->BcbRemoveListEntry.Flink = NULL;
1377 }
1378 KeReleaseGuardedMutex(&ViewLock);
1379
1380 return(STATUS_SUCCESS);
1381 }
1382
1383 /*
1384 * @implemented
1385 */
1386 PFILE_OBJECT
1387 NTAPI
1388 CcGetFileObjectFromSectionPtrs (
1389 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1390 {
1391 PBCB Bcb;
1392 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1393 {
1394 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
1395 ASSERT(Bcb);
1396 return Bcb->FileObject;
1397 }
1398 return NULL;
1399 }
1400
1401 VOID
1402 INIT_FUNCTION
1403 NTAPI
1404 CcInitView (
1405 VOID)
1406 {
1407 #ifdef CACHE_BITMAP
1408 PMEMORY_AREA marea;
1409 PVOID Buffer;
1410 PHYSICAL_ADDRESS BoundaryAddressMultiple;
1411 #endif
1412
1413 DPRINT("CcInitView()\n");
1414 #ifdef CACHE_BITMAP
1415 BoundaryAddressMultiple.QuadPart = 0;
1416 CiCacheSegMappingRegionHint = 0;
1417 CiCacheSegMappingRegionBase = NULL;
1418
1419 MmLockAddressSpace(MmGetKernelAddressSpace());
1420
1421 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
1422 MEMORY_AREA_CACHE_SEGMENT,
1423 &CiCacheSegMappingRegionBase,
1424 CI_CACHESEG_MAPPING_REGION_SIZE,
1425 PAGE_READWRITE,
1426 &marea,
1427 FALSE,
1428 0,
1429 BoundaryAddressMultiple);
1430 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1431 if (!NT_SUCCESS(Status))
1432 {
1433 KeBugCheck(CACHE_MANAGER);
1434 }
1435
1436 Buffer = ExAllocatePool(NonPagedPool, CI_CACHESEG_MAPPING_REGION_SIZE / (PAGE_SIZE * 8));
1437 if (!Buffer)
1438 {
1439 KeBugCheck(CACHE_MANAGER);
1440 }
1441
1442 RtlInitializeBitMap(&CiCacheSegMappingRegionAllocMap, Buffer, CI_CACHESEG_MAPPING_REGION_SIZE / PAGE_SIZE);
1443 RtlClearAllBits(&CiCacheSegMappingRegionAllocMap);
1444
1445 KeInitializeSpinLock(&CiCacheSegMappingRegionLock);
1446 #endif
1447 InitializeListHead(&CacheSegmentListHead);
1448 InitializeListHead(&DirtySegmentListHead);
1449 InitializeListHead(&CacheSegmentLRUListHead);
1450 InitializeListHead(&ClosedListHead);
1451 KeInitializeGuardedMutex(&ViewLock);
1452 ExInitializeNPagedLookasideList (&iBcbLookasideList,
1453 NULL,
1454 NULL,
1455 0,
1456 sizeof(INTERNAL_BCB),
1457 TAG_IBCB,
1458 20);
1459 ExInitializeNPagedLookasideList (&BcbLookasideList,
1460 NULL,
1461 NULL,
1462 0,
1463 sizeof(BCB),
1464 TAG_BCB,
1465 20);
1466 ExInitializeNPagedLookasideList (&CacheSegLookasideList,
1467 NULL,
1468 NULL,
1469 0,
1470 sizeof(CACHE_SEGMENT),
1471 TAG_CSEG,
1472 20);
1473
1474 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1475
1476 CcInitCacheZeroPage();
1477
1478 }
1479
1480 /* EOF */
1481
1482
1483
1484
1485
1486
1487
1488