[NTOSKRNL]
[reactos.git] / reactos / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 */
9
10 /* NOTES **********************************************************************
11 *
12 * This is not the NT implementation of a file cache nor anything much like
13 * it.
14 *
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
17 *
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
20 *
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
23 *
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
27 *
28 * (4) Copy the data into or out of the page as necessary.
29 *
30 * (5) Release the cache page
31 */
32 /* INCLUDES ******************************************************************/
33
34 #include <ntoskrnl.h>
35 #define NDEBUG
36 #include <debug.h>
37
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
40 #endif
41
42 /* GLOBALS *******************************************************************/
43
44 /*
45 * If CACHE_BITMAP is defined, the cache manager uses one large memory region
46 * within the kernel address space and allocate/deallocate space from this block
47 * over a bitmap. If CACHE_BITMAP is used, the size of the mdl mapping region
48 * must be reduced (ntoskrnl\mm\mdl.c, MI_MDLMAPPING_REGION_SIZE).
49 */
50 //#define CACHE_BITMAP
51
52 static LIST_ENTRY DirtySegmentListHead;
53 static LIST_ENTRY CacheSegmentListHead;
54 static LIST_ENTRY CacheSegmentLRUListHead;
55 static LIST_ENTRY ClosedListHead;
56 ULONG DirtyPageCount=0;
57
58 KGUARDED_MUTEX ViewLock;
59
60 #ifdef CACHE_BITMAP
61 #define CI_CACHESEG_MAPPING_REGION_SIZE (128*1024*1024)
62
63 static PVOID CiCacheSegMappingRegionBase = NULL;
64 static RTL_BITMAP CiCacheSegMappingRegionAllocMap;
65 static ULONG CiCacheSegMappingRegionHint;
66 static KSPIN_LOCK CiCacheSegMappingRegionLock;
67 #endif
68
69 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
70 static NPAGED_LOOKASIDE_LIST BcbLookasideList;
71 static NPAGED_LOOKASIDE_LIST CacheSegLookasideList;
72
73 #if DBG
74 static void CcRosCacheSegmentIncRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
75 {
76 ++cs->ReferenceCount;
77 if ( cs->Bcb->Trace )
78 {
79 DbgPrint("(%s:%i) CacheSegment %p ++RefCount=%d, Dirty %d, PageOut %d\n",
80 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
81 }
82 }
83 static void CcRosCacheSegmentDecRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
84 {
85 --cs->ReferenceCount;
86 if ( cs->Bcb->Trace )
87 {
88 DbgPrint("(%s:%i) CacheSegment %p --RefCount=%d, Dirty %d, PageOut %d\n",
89 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
90 }
91 }
92 #define CcRosCacheSegmentIncRefCount(cs) CcRosCacheSegmentIncRefCount_(cs,__FILE__,__LINE__)
93 #define CcRosCacheSegmentDecRefCount(cs) CcRosCacheSegmentDecRefCount_(cs,__FILE__,__LINE__)
94 #else
95 #define CcRosCacheSegmentIncRefCount(cs) (++((cs)->ReferenceCount))
96 #define CcRosCacheSegmentDecRefCount(cs) (--((cs)->ReferenceCount))
97 #endif
98
99 NTSTATUS
100 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg);
101
102
103 /* FUNCTIONS *****************************************************************/
104
105 VOID
106 NTAPI
107 CcRosTraceCacheMap (
108 PBCB Bcb,
109 BOOLEAN Trace )
110 {
111 #if DBG
112 KIRQL oldirql;
113 PLIST_ENTRY current_entry;
114 PCACHE_SEGMENT current;
115
116 if ( !Bcb )
117 return;
118
119 Bcb->Trace = Trace;
120
121 if ( Trace )
122 {
123 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", Bcb );
124
125 KeAcquireGuardedMutex(&ViewLock);
126 KeAcquireSpinLock(&Bcb->BcbLock, &oldirql);
127
128 current_entry = Bcb->BcbSegmentListHead.Flink;
129 while (current_entry != &Bcb->BcbSegmentListHead)
130 {
131 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
132 current_entry = current_entry->Flink;
133
134 DPRINT1(" CacheSegment 0x%p enabled, RefCount %d, Dirty %d, PageOut %d\n",
135 current, current->ReferenceCount, current->Dirty, current->PageOut );
136 }
137 KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
138 KeReleaseGuardedMutex(&ViewLock);
139 }
140 else
141 {
142 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", Bcb );
143 }
144
145 #else
146 Bcb = Bcb;
147 Trace = Trace;
148 #endif
149 }
150
151 NTSTATUS
152 NTAPI
153 CcRosFlushCacheSegment(PCACHE_SEGMENT CacheSegment)
154 {
155 NTSTATUS Status;
156 KIRQL oldIrql;
157
158 Status = WriteCacheSegment(CacheSegment);
159 if (NT_SUCCESS(Status))
160 {
161 KeAcquireGuardedMutex(&ViewLock);
162 KeAcquireSpinLock(&CacheSegment->Bcb->BcbLock, &oldIrql);
163
164 CacheSegment->Dirty = FALSE;
165 RemoveEntryList(&CacheSegment->DirtySegmentListEntry);
166 DirtyPageCount -= CacheSegment->Bcb->CacheSegmentSize / PAGE_SIZE;
167 CcRosCacheSegmentDecRefCount ( CacheSegment );
168
169 KeReleaseSpinLock(&CacheSegment->Bcb->BcbLock, oldIrql);
170 KeReleaseGuardedMutex(&ViewLock);
171 }
172
173 return(Status);
174 }
175
176 NTSTATUS
177 NTAPI
178 CcRosFlushDirtyPages(ULONG Target, PULONG Count, BOOLEAN Wait)
179 {
180 PLIST_ENTRY current_entry;
181 PCACHE_SEGMENT current;
182 ULONG PagesPerSegment;
183 BOOLEAN Locked;
184 NTSTATUS Status;
185 LARGE_INTEGER ZeroTimeout;
186
187 DPRINT("CcRosFlushDirtyPages(Target %d)\n", Target);
188
189 (*Count) = 0;
190 ZeroTimeout.QuadPart = 0;
191
192 KeEnterCriticalRegion();
193 KeAcquireGuardedMutex(&ViewLock);
194
195 current_entry = DirtySegmentListHead.Flink;
196 if (current_entry == &DirtySegmentListHead)
197 {
198 DPRINT("No Dirty pages\n");
199 }
200
201 while (current_entry != &DirtySegmentListHead && Target > 0)
202 {
203 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
204 DirtySegmentListEntry);
205 current_entry = current_entry->Flink;
206
207 CcRosCacheSegmentIncRefCount(current);
208
209 Locked = current->Bcb->Callbacks->AcquireForLazyWrite(
210 current->Bcb->LazyWriteContext, Wait);
211 if (!Locked)
212 {
213 CcRosCacheSegmentDecRefCount(current);
214 continue;
215 }
216
217 Status = KeWaitForSingleObject(&current->Mutex,
218 Executive,
219 KernelMode,
220 FALSE,
221 Wait ? NULL : &ZeroTimeout);
222 if (Status != STATUS_SUCCESS)
223 {
224 current->Bcb->Callbacks->ReleaseFromLazyWrite(
225 current->Bcb->LazyWriteContext);
226 CcRosCacheSegmentDecRefCount(current);
227 continue;
228 }
229
230 ASSERT(current->Dirty);
231 if (current->ReferenceCount > 1)
232 {
233 KeReleaseMutex(&current->Mutex, 0);
234 current->Bcb->Callbacks->ReleaseFromLazyWrite(
235 current->Bcb->LazyWriteContext);
236 CcRosCacheSegmentDecRefCount(current);
237 continue;
238 }
239
240 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
241
242 KeReleaseGuardedMutex(&ViewLock);
243
244 Status = CcRosFlushCacheSegment(current);
245
246 KeReleaseMutex(&current->Mutex, 0);
247 current->Bcb->Callbacks->ReleaseFromLazyWrite(
248 current->Bcb->LazyWriteContext);
249
250 KeAcquireGuardedMutex(&ViewLock);
251 CcRosCacheSegmentDecRefCount(current);
252
253 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
254 {
255 DPRINT1("CC: Failed to flush cache segment.\n");
256 }
257 else
258 {
259 (*Count) += PagesPerSegment;
260 Target -= PagesPerSegment;
261 }
262
263 current_entry = DirtySegmentListHead.Flink;
264 }
265
266 KeReleaseGuardedMutex(&ViewLock);
267 KeLeaveCriticalRegion();
268
269 DPRINT("CcRosFlushDirtyPages() finished\n");
270 return(STATUS_SUCCESS);
271 }
272
273 NTSTATUS
274 CcRosTrimCache(ULONG Target, ULONG Priority, PULONG NrFreed)
275 /*
276 * FUNCTION: Try to free some memory from the file cache.
277 * ARGUMENTS:
278 * Target - The number of pages to be freed.
279 * Priority - The priority of free (currently unused).
280 * NrFreed - Points to a variable where the number of pages
281 * actually freed is returned.
282 */
283 {
284 PLIST_ENTRY current_entry;
285 PCACHE_SEGMENT current;
286 ULONG PagesPerSegment;
287 ULONG PagesFreed;
288 KIRQL oldIrql;
289 LIST_ENTRY FreeList;
290 PFN_NUMBER Page;
291 ULONG i;
292 BOOLEAN FlushedPages = FALSE;
293
294 DPRINT("CcRosTrimCache(Target %d)\n", Target);
295
296 InitializeListHead(&FreeList);
297
298 *NrFreed = 0;
299
300 retry:
301 KeAcquireGuardedMutex(&ViewLock);
302
303 current_entry = CacheSegmentLRUListHead.Flink;
304 while (current_entry != &CacheSegmentLRUListHead)
305 {
306 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
307 CacheSegmentLRUListEntry);
308 current_entry = current_entry->Flink;
309
310 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
311
312 /* Reference the cache segment */
313 CcRosCacheSegmentIncRefCount(current);
314
315 /* Check if it's mapped and not dirty */
316 if (current->MappedCount > 0 && !current->Dirty)
317 {
318 /* We have to break these locks because Cc sucks */
319 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
320 KeReleaseGuardedMutex(&ViewLock);
321
322 /* Page out the segment */
323 for (i = 0; i < current->Bcb->CacheSegmentSize / PAGE_SIZE; i++)
324 {
325 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
326
327 MmPageOutPhysicalAddress(Page);
328 }
329
330 /* Reacquire the locks */
331 KeAcquireGuardedMutex(&ViewLock);
332 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
333 }
334
335 /* Dereference the cache segment */
336 CcRosCacheSegmentDecRefCount(current);
337
338 /* Check if we can free this entry now */
339 if (current->ReferenceCount == 0)
340 {
341 ASSERT(!current->Dirty);
342 ASSERT(!current->MappedCount);
343
344 RemoveEntryList(&current->BcbSegmentListEntry);
345 RemoveEntryList(&current->CacheSegmentListEntry);
346 RemoveEntryList(&current->CacheSegmentLRUListEntry);
347 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
348
349 /* Calculate how many pages we freed for Mm */
350 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
351 PagesFreed = min(PagesPerSegment, Target);
352 Target -= PagesFreed;
353 (*NrFreed) += PagesFreed;
354 }
355
356 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
357 }
358
359 KeReleaseGuardedMutex(&ViewLock);
360
361 /* Try flushing pages if we haven't met our target */
362 if (Target > 0 && !FlushedPages)
363 {
364 /* Flush dirty pages to disk */
365 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE);
366 FlushedPages = TRUE;
367
368 /* We can only swap as many pages as we flushed */
369 if (PagesFreed < Target) Target = PagesFreed;
370
371 /* Check if we flushed anything */
372 if (PagesFreed != 0)
373 {
374 /* Try again after flushing dirty pages */
375 DPRINT("Flushed %d dirty cache pages to disk\n", PagesFreed);
376 goto retry;
377 }
378 }
379
380 while (!IsListEmpty(&FreeList))
381 {
382 current_entry = RemoveHeadList(&FreeList);
383 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
384 BcbSegmentListEntry);
385 CcRosInternalFreeCacheSegment(current);
386 }
387
388 DPRINT("Evicted %d cache pages\n", (*NrFreed));
389
390 return(STATUS_SUCCESS);
391 }
392
393 NTSTATUS
394 NTAPI
395 CcRosReleaseCacheSegment(PBCB Bcb,
396 PCACHE_SEGMENT CacheSeg,
397 BOOLEAN Valid,
398 BOOLEAN Dirty,
399 BOOLEAN Mapped)
400 {
401 BOOLEAN WasDirty;
402 KIRQL oldIrql;
403
404 ASSERT(Bcb);
405
406 DPRINT("CcReleaseCacheSegment(Bcb 0x%p, CacheSeg 0x%p, Valid %d)\n",
407 Bcb, CacheSeg, Valid);
408
409 KeAcquireGuardedMutex(&ViewLock);
410 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
411
412 CacheSeg->Valid = Valid;
413
414 WasDirty = CacheSeg->Dirty;
415 CacheSeg->Dirty = CacheSeg->Dirty || Dirty;
416
417 if (!WasDirty && CacheSeg->Dirty)
418 {
419 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
420 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
421 }
422
423 if (Mapped)
424 {
425 CacheSeg->MappedCount++;
426 }
427 CcRosCacheSegmentDecRefCount(CacheSeg);
428 if (Mapped && CacheSeg->MappedCount == 1)
429 {
430 CcRosCacheSegmentIncRefCount(CacheSeg);
431 }
432 if (!WasDirty && CacheSeg->Dirty)
433 {
434 CcRosCacheSegmentIncRefCount(CacheSeg);
435 }
436
437 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
438 KeReleaseGuardedMutex(&ViewLock);
439 KeReleaseMutex(&CacheSeg->Mutex, 0);
440
441 return(STATUS_SUCCESS);
442 }
443
444 /* Returns with Cache Segment Lock Held! */
445 PCACHE_SEGMENT
446 NTAPI
447 CcRosLookupCacheSegment(PBCB Bcb, ULONG FileOffset)
448 {
449 PLIST_ENTRY current_entry;
450 PCACHE_SEGMENT current;
451 KIRQL oldIrql;
452
453 ASSERT(Bcb);
454
455 DPRINT("CcRosLookupCacheSegment(Bcb -x%p, FileOffset %d)\n", Bcb, FileOffset);
456
457 KeAcquireGuardedMutex(&ViewLock);
458 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
459
460 current_entry = Bcb->BcbSegmentListHead.Flink;
461 while (current_entry != &Bcb->BcbSegmentListHead)
462 {
463 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
464 BcbSegmentListEntry);
465 if (current->FileOffset <= FileOffset &&
466 (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
467 {
468 CcRosCacheSegmentIncRefCount(current);
469 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
470 KeReleaseGuardedMutex(&ViewLock);
471 KeWaitForSingleObject(&current->Mutex,
472 Executive,
473 KernelMode,
474 FALSE,
475 NULL);
476 return(current);
477 }
478 current_entry = current_entry->Flink;
479 }
480
481 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
482 KeReleaseGuardedMutex(&ViewLock);
483
484 return(NULL);
485 }
486
487 NTSTATUS
488 NTAPI
489 CcRosMarkDirtyCacheSegment(PBCB Bcb, ULONG FileOffset)
490 {
491 PCACHE_SEGMENT CacheSeg;
492 KIRQL oldIrql;
493
494 ASSERT(Bcb);
495
496 DPRINT("CcRosMarkDirtyCacheSegment(Bcb 0x%p, FileOffset %d)\n", Bcb, FileOffset);
497
498 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
499 if (CacheSeg == NULL)
500 {
501 KeBugCheck(CACHE_MANAGER);
502 }
503
504 KeAcquireGuardedMutex(&ViewLock);
505 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
506
507 if (!CacheSeg->Dirty)
508 {
509 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
510 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
511 }
512 else
513 {
514 CcRosCacheSegmentDecRefCount(CacheSeg);
515 }
516
517 /* Move to the tail of the LRU list */
518 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
519 InsertTailList(&CacheSegmentLRUListHead, &CacheSeg->CacheSegmentLRUListEntry);
520
521 CacheSeg->Dirty = TRUE;
522
523 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
524 KeReleaseGuardedMutex(&ViewLock);
525 KeReleaseMutex(&CacheSeg->Mutex, 0);
526
527 return(STATUS_SUCCESS);
528 }
529
530 NTSTATUS
531 NTAPI
532 CcRosUnmapCacheSegment(PBCB Bcb, ULONG FileOffset, BOOLEAN NowDirty)
533 {
534 PCACHE_SEGMENT CacheSeg;
535 BOOLEAN WasDirty;
536 KIRQL oldIrql;
537
538 ASSERT(Bcb);
539
540 DPRINT("CcRosUnmapCacheSegment(Bcb 0x%p, FileOffset %d, NowDirty %d)\n",
541 Bcb, FileOffset, NowDirty);
542
543 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
544 if (CacheSeg == NULL)
545 {
546 return(STATUS_UNSUCCESSFUL);
547 }
548
549 KeAcquireGuardedMutex(&ViewLock);
550 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
551
552 WasDirty = CacheSeg->Dirty;
553 CacheSeg->Dirty = CacheSeg->Dirty || NowDirty;
554
555 CacheSeg->MappedCount--;
556
557 if (!WasDirty && NowDirty)
558 {
559 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
560 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
561 }
562
563 CcRosCacheSegmentDecRefCount(CacheSeg);
564 if (!WasDirty && NowDirty)
565 {
566 CcRosCacheSegmentIncRefCount(CacheSeg);
567 }
568 if (CacheSeg->MappedCount == 0)
569 {
570 CcRosCacheSegmentDecRefCount(CacheSeg);
571 }
572
573 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
574 KeReleaseGuardedMutex(&ViewLock);
575 KeReleaseMutex(&CacheSeg->Mutex, 0);
576
577 return(STATUS_SUCCESS);
578 }
579
580 static
581 NTSTATUS
582 CcRosCreateCacheSegment(PBCB Bcb,
583 ULONG FileOffset,
584 PCACHE_SEGMENT* CacheSeg)
585 {
586 PCACHE_SEGMENT current;
587 PCACHE_SEGMENT previous;
588 PLIST_ENTRY current_entry;
589 NTSTATUS Status;
590 KIRQL oldIrql;
591 #ifdef CACHE_BITMAP
592 ULONG StartingOffset;
593 #endif
594 PHYSICAL_ADDRESS BoundaryAddressMultiple;
595
596 ASSERT(Bcb);
597
598 DPRINT("CcRosCreateCacheSegment()\n");
599
600 BoundaryAddressMultiple.QuadPart = 0;
601 if (FileOffset >= Bcb->FileSize.u.LowPart)
602 {
603 CacheSeg = NULL;
604 return STATUS_INVALID_PARAMETER;
605 }
606
607 current = ExAllocateFromNPagedLookasideList(&CacheSegLookasideList);
608 current->Valid = FALSE;
609 current->Dirty = FALSE;
610 current->PageOut = FALSE;
611 current->FileOffset = ROUND_DOWN(FileOffset, Bcb->CacheSegmentSize);
612 current->Bcb = Bcb;
613 #if DBG
614 if ( Bcb->Trace )
615 {
616 DPRINT1("CacheMap 0x%p: new Cache Segment: 0x%p\n", Bcb, current );
617 }
618 #endif
619 current->MappedCount = 0;
620 current->DirtySegmentListEntry.Flink = NULL;
621 current->DirtySegmentListEntry.Blink = NULL;
622 current->ReferenceCount = 1;
623 KeInitializeMutex(&current->Mutex, 0);
624 KeWaitForSingleObject(&current->Mutex,
625 Executive,
626 KernelMode,
627 FALSE,
628 NULL);
629 KeAcquireGuardedMutex(&ViewLock);
630
631 *CacheSeg = current;
632 /* There is window between the call to CcRosLookupCacheSegment
633 * and CcRosCreateCacheSegment. We must check if a segment on
634 * the fileoffset exist. If there exist a segment, we release
635 * our new created segment and return the existing one.
636 */
637 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
638 current_entry = Bcb->BcbSegmentListHead.Flink;
639 previous = NULL;
640 while (current_entry != &Bcb->BcbSegmentListHead)
641 {
642 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
643 BcbSegmentListEntry);
644 if (current->FileOffset <= FileOffset &&
645 (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
646 {
647 CcRosCacheSegmentIncRefCount(current);
648 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
649 #if DBG
650 if ( Bcb->Trace )
651 {
652 DPRINT1("CacheMap 0x%p: deleting newly created Cache Segment 0x%p ( found existing one 0x%p )\n",
653 Bcb,
654 (*CacheSeg),
655 current );
656 }
657 #endif
658 KeReleaseMutex(&(*CacheSeg)->Mutex, 0);
659 KeReleaseGuardedMutex(&ViewLock);
660 ExFreeToNPagedLookasideList(&CacheSegLookasideList, *CacheSeg);
661 *CacheSeg = current;
662 KeWaitForSingleObject(&current->Mutex,
663 Executive,
664 KernelMode,
665 FALSE,
666 NULL);
667 return STATUS_SUCCESS;
668 }
669 if (current->FileOffset < FileOffset)
670 {
671 if (previous == NULL)
672 {
673 previous = current;
674 }
675 else
676 {
677 if (previous->FileOffset < current->FileOffset)
678 {
679 previous = current;
680 }
681 }
682 }
683 current_entry = current_entry->Flink;
684 }
685 /* There was no existing segment. */
686 current = *CacheSeg;
687 if (previous)
688 {
689 InsertHeadList(&previous->BcbSegmentListEntry, &current->BcbSegmentListEntry);
690 }
691 else
692 {
693 InsertHeadList(&Bcb->BcbSegmentListHead, &current->BcbSegmentListEntry);
694 }
695 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
696 InsertTailList(&CacheSegmentListHead, &current->CacheSegmentListEntry);
697 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
698 KeReleaseGuardedMutex(&ViewLock);
699 #ifdef CACHE_BITMAP
700 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
701
702 StartingOffset = RtlFindClearBitsAndSet(&CiCacheSegMappingRegionAllocMap, Bcb->CacheSegmentSize / PAGE_SIZE, CiCacheSegMappingRegionHint);
703
704 if (StartingOffset == 0xffffffff)
705 {
706 DPRINT1("Out of CacheSeg mapping space\n");
707 KeBugCheck(CACHE_MANAGER);
708 }
709
710 current->BaseAddress = CiCacheSegMappingRegionBase + StartingOffset * PAGE_SIZE;
711
712 if (CiCacheSegMappingRegionHint == StartingOffset)
713 {
714 CiCacheSegMappingRegionHint += Bcb->CacheSegmentSize / PAGE_SIZE;
715 }
716
717 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
718 #else
719 MmLockAddressSpace(MmGetKernelAddressSpace());
720 current->BaseAddress = NULL;
721 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
722 0, // nothing checks for cache_segment mareas, so set to 0
723 &current->BaseAddress,
724 Bcb->CacheSegmentSize,
725 PAGE_READWRITE,
726 (PMEMORY_AREA*)&current->MemoryArea,
727 FALSE,
728 0,
729 BoundaryAddressMultiple);
730 MmUnlockAddressSpace(MmGetKernelAddressSpace());
731 if (!NT_SUCCESS(Status))
732 {
733 KeBugCheck(CACHE_MANAGER);
734 }
735 #endif
736
737 /* Create a virtual mapping for this memory area */
738 MI_SET_USAGE(MI_USAGE_CACHE);
739 #if MI_TRACE_PFNS
740 PWCHAR pos = NULL;
741 ULONG len = 0;
742 if ((Bcb->FileObject) && (Bcb->FileObject->FileName.Buffer))
743 {
744 pos = wcsrchr(Bcb->FileObject->FileName.Buffer, '\\');
745 len = wcslen(pos) * sizeof(WCHAR);
746 if (pos) snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
747 }
748 #endif
749
750 MmMapMemoryArea(current->BaseAddress, Bcb->CacheSegmentSize,
751 MC_CACHE, PAGE_READWRITE);
752
753 return(STATUS_SUCCESS);
754 }
755
756 NTSTATUS
757 NTAPI
758 CcRosGetCacheSegmentChain(PBCB Bcb,
759 ULONG FileOffset,
760 ULONG Length,
761 PCACHE_SEGMENT* CacheSeg)
762 {
763 PCACHE_SEGMENT current;
764 ULONG i;
765 PCACHE_SEGMENT* CacheSegList;
766 PCACHE_SEGMENT Previous = NULL;
767
768 ASSERT(Bcb);
769
770 DPRINT("CcRosGetCacheSegmentChain()\n");
771
772 Length = ROUND_UP(Length, Bcb->CacheSegmentSize);
773
774 CacheSegList = _alloca(sizeof(PCACHE_SEGMENT) *
775 (Length / Bcb->CacheSegmentSize));
776
777 /*
778 * Look for a cache segment already mapping the same data.
779 */
780 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
781 {
782 ULONG CurrentOffset = FileOffset + (i * Bcb->CacheSegmentSize);
783 current = CcRosLookupCacheSegment(Bcb, CurrentOffset);
784 if (current != NULL)
785 {
786 KeAcquireGuardedMutex(&ViewLock);
787
788 /* Move to tail of LRU list */
789 RemoveEntryList(&current->CacheSegmentLRUListEntry);
790 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
791
792 KeReleaseGuardedMutex(&ViewLock);
793
794 CacheSegList[i] = current;
795 }
796 else
797 {
798 CcRosCreateCacheSegment(Bcb, CurrentOffset, &current);
799 CacheSegList[i] = current;
800 }
801 }
802
803 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
804 {
805 if (i == 0)
806 {
807 *CacheSeg = CacheSegList[i];
808 Previous = CacheSegList[i];
809 }
810 else
811 {
812 Previous->NextInChain = CacheSegList[i];
813 Previous = CacheSegList[i];
814 }
815 }
816 ASSERT(Previous);
817 Previous->NextInChain = NULL;
818
819 return(STATUS_SUCCESS);
820 }
821
822 NTSTATUS
823 NTAPI
824 CcRosGetCacheSegment(PBCB Bcb,
825 ULONG FileOffset,
826 PULONG BaseOffset,
827 PVOID* BaseAddress,
828 PBOOLEAN UptoDate,
829 PCACHE_SEGMENT* CacheSeg)
830 {
831 PCACHE_SEGMENT current;
832 NTSTATUS Status;
833
834 ASSERT(Bcb);
835
836 DPRINT("CcRosGetCacheSegment()\n");
837
838 /*
839 * Look for a cache segment already mapping the same data.
840 */
841 current = CcRosLookupCacheSegment(Bcb, FileOffset);
842 if (current == NULL)
843 {
844 /*
845 * Otherwise create a new segment.
846 */
847 Status = CcRosCreateCacheSegment(Bcb, FileOffset, &current);
848 if (!NT_SUCCESS(Status))
849 {
850 return Status;
851 }
852 }
853
854 KeAcquireGuardedMutex(&ViewLock);
855
856 /* Move to the tail of the LRU list */
857 RemoveEntryList(&current->CacheSegmentLRUListEntry);
858 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
859
860 KeReleaseGuardedMutex(&ViewLock);
861
862 /*
863 * Return information about the segment to the caller.
864 */
865 *UptoDate = current->Valid;
866 *BaseAddress = current->BaseAddress;
867 DPRINT("*BaseAddress 0x%.8X\n", *BaseAddress);
868 *CacheSeg = current;
869 *BaseOffset = current->FileOffset;
870 return(STATUS_SUCCESS);
871 }
872
873 NTSTATUS NTAPI
874 CcRosRequestCacheSegment(PBCB Bcb,
875 ULONG FileOffset,
876 PVOID* BaseAddress,
877 PBOOLEAN UptoDate,
878 PCACHE_SEGMENT* CacheSeg)
879 /*
880 * FUNCTION: Request a page mapping for a BCB
881 */
882 {
883 ULONG BaseOffset;
884
885 ASSERT(Bcb);
886
887 if ((FileOffset % Bcb->CacheSegmentSize) != 0)
888 {
889 DPRINT1("Bad fileoffset %x should be multiple of %x",
890 FileOffset, Bcb->CacheSegmentSize);
891 KeBugCheck(CACHE_MANAGER);
892 }
893
894 return(CcRosGetCacheSegment(Bcb,
895 FileOffset,
896 &BaseOffset,
897 BaseAddress,
898 UptoDate,
899 CacheSeg));
900 }
901 #ifdef CACHE_BITMAP
902 #else
903 static VOID
904 CcFreeCachePage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address,
905 PFN_NUMBER Page, SWAPENTRY SwapEntry, BOOLEAN Dirty)
906 {
907 ASSERT(SwapEntry == 0);
908 if (Page != 0)
909 {
910 ASSERT(MmGetReferenceCountPage(Page) == 1);
911 MmReleasePageMemoryConsumer(MC_CACHE, Page);
912 }
913 }
914 #endif
915 NTSTATUS
916 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg)
917 /*
918 * FUNCTION: Releases a cache segment associated with a BCB
919 */
920 {
921 #ifdef CACHE_BITMAP
922 ULONG i;
923 ULONG RegionSize;
924 ULONG Base;
925 PFN_NUMBER Page;
926 KIRQL oldIrql;
927 #endif
928 DPRINT("Freeing cache segment 0x%p\n", CacheSeg);
929 #if DBG
930 if ( CacheSeg->Bcb->Trace )
931 {
932 DPRINT1("CacheMap 0x%p: deleting Cache Segment: 0x%p\n", CacheSeg->Bcb, CacheSeg );
933 }
934 #endif
935 #ifdef CACHE_BITMAP
936 RegionSize = CacheSeg->Bcb->CacheSegmentSize / PAGE_SIZE;
937
938 /* Unmap all the pages. */
939 for (i = 0; i < RegionSize; i++)
940 {
941 MmDeleteVirtualMapping(NULL,
942 CacheSeg->BaseAddress + (i * PAGE_SIZE),
943 FALSE,
944 NULL,
945 &Page);
946 MmReleasePageMemoryConsumer(MC_CACHE, Page);
947 }
948
949 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
950 /* Deallocate all the pages used. */
951 Base = (ULONG)(CacheSeg->BaseAddress - CiCacheSegMappingRegionBase) / PAGE_SIZE;
952
953 RtlClearBits(&CiCacheSegMappingRegionAllocMap, Base, RegionSize);
954
955 CiCacheSegMappingRegionHint = min (CiCacheSegMappingRegionHint, Base);
956
957 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
958 #else
959 MmLockAddressSpace(MmGetKernelAddressSpace());
960 MmFreeMemoryArea(MmGetKernelAddressSpace(),
961 CacheSeg->MemoryArea,
962 CcFreeCachePage,
963 NULL);
964 MmUnlockAddressSpace(MmGetKernelAddressSpace());
965 #endif
966 ExFreeToNPagedLookasideList(&CacheSegLookasideList, CacheSeg);
967 return(STATUS_SUCCESS);
968 }
969
970 NTSTATUS
971 NTAPI
972 CcRosFreeCacheSegment(PBCB Bcb, PCACHE_SEGMENT CacheSeg)
973 {
974 NTSTATUS Status;
975 KIRQL oldIrql;
976
977 ASSERT(Bcb);
978
979 DPRINT("CcRosFreeCacheSegment(Bcb 0x%p, CacheSeg 0x%p)\n",
980 Bcb, CacheSeg);
981
982 KeAcquireGuardedMutex(&ViewLock);
983 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
984 RemoveEntryList(&CacheSeg->BcbSegmentListEntry);
985 RemoveEntryList(&CacheSeg->CacheSegmentListEntry);
986 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
987 if (CacheSeg->Dirty)
988 {
989 RemoveEntryList(&CacheSeg->DirtySegmentListEntry);
990 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
991
992 }
993 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
994 KeReleaseGuardedMutex(&ViewLock);
995
996 Status = CcRosInternalFreeCacheSegment(CacheSeg);
997 return(Status);
998 }
999
1000 /*
1001 * @implemented
1002 */
1003 VOID NTAPI
1004 CcFlushCache(IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
1005 IN PLARGE_INTEGER FileOffset OPTIONAL,
1006 IN ULONG Length,
1007 OUT PIO_STATUS_BLOCK IoStatus)
1008 {
1009 PBCB Bcb;
1010 LARGE_INTEGER Offset;
1011 PCACHE_SEGMENT current;
1012 NTSTATUS Status;
1013 KIRQL oldIrql;
1014
1015 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %d, IoStatus 0x%p)\n",
1016 SectionObjectPointers, FileOffset, Length, IoStatus);
1017
1018 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1019 {
1020 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
1021 ASSERT(Bcb);
1022 if (FileOffset)
1023 {
1024 Offset = *FileOffset;
1025 }
1026 else
1027 {
1028 Offset.QuadPart = (LONGLONG)0;
1029 Length = Bcb->FileSize.u.LowPart;
1030 }
1031
1032 if (IoStatus)
1033 {
1034 IoStatus->Status = STATUS_SUCCESS;
1035 IoStatus->Information = 0;
1036 }
1037
1038 while (Length > 0)
1039 {
1040 current = CcRosLookupCacheSegment (Bcb, Offset.u.LowPart);
1041 if (current != NULL)
1042 {
1043 if (current->Dirty)
1044 {
1045 Status = CcRosFlushCacheSegment(current);
1046 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1047 {
1048 IoStatus->Status = Status;
1049 }
1050 }
1051 KeReleaseMutex(&current->Mutex, 0);
1052
1053 KeAcquireGuardedMutex(&ViewLock);
1054 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1055 CcRosCacheSegmentDecRefCount(current);
1056 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1057 KeReleaseGuardedMutex(&ViewLock);
1058 }
1059
1060 Offset.QuadPart += Bcb->CacheSegmentSize;
1061 if (Length > Bcb->CacheSegmentSize)
1062 {
1063 Length -= Bcb->CacheSegmentSize;
1064 }
1065 else
1066 {
1067 Length = 0;
1068 }
1069 }
1070 }
1071 else
1072 {
1073 if (IoStatus)
1074 {
1075 IoStatus->Status = STATUS_INVALID_PARAMETER;
1076 }
1077 }
1078 }
1079
1080 NTSTATUS
1081 NTAPI
1082 CcRosDeleteFileCache(PFILE_OBJECT FileObject, PBCB Bcb)
1083 /*
1084 * FUNCTION: Releases the BCB associated with a file object
1085 */
1086 {
1087 PLIST_ENTRY current_entry;
1088 PCACHE_SEGMENT current;
1089 LIST_ENTRY FreeList;
1090 KIRQL oldIrql;
1091
1092 ASSERT(Bcb);
1093
1094 Bcb->RefCount++;
1095 KeReleaseGuardedMutex(&ViewLock);
1096
1097 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1098
1099 KeAcquireGuardedMutex(&ViewLock);
1100 Bcb->RefCount--;
1101 if (Bcb->RefCount == 0)
1102 {
1103 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1104 {
1105 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1106 Bcb->BcbRemoveListEntry.Flink = NULL;
1107 }
1108
1109 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1110
1111 /*
1112 * Release all cache segments.
1113 */
1114 InitializeListHead(&FreeList);
1115 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1116 current_entry = Bcb->BcbSegmentListHead.Flink;
1117 while (!IsListEmpty(&Bcb->BcbSegmentListHead))
1118 {
1119 current_entry = RemoveTailList(&Bcb->BcbSegmentListHead);
1120 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1121 RemoveEntryList(&current->CacheSegmentListEntry);
1122 RemoveEntryList(&current->CacheSegmentLRUListEntry);
1123 if (current->Dirty)
1124 {
1125 RemoveEntryList(&current->DirtySegmentListEntry);
1126 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
1127 DPRINT1("Freeing dirty segment\n");
1128 }
1129 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
1130 }
1131 #if DBG
1132 Bcb->Trace = FALSE;
1133 #endif
1134 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1135
1136 KeReleaseGuardedMutex(&ViewLock);
1137 ObDereferenceObject (Bcb->FileObject);
1138
1139 while (!IsListEmpty(&FreeList))
1140 {
1141 current_entry = RemoveTailList(&FreeList);
1142 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1143 CcRosInternalFreeCacheSegment(current);
1144 }
1145 ExFreeToNPagedLookasideList(&BcbLookasideList, Bcb);
1146 KeAcquireGuardedMutex(&ViewLock);
1147 }
1148 return(STATUS_SUCCESS);
1149 }
1150
1151 VOID
1152 NTAPI
1153 CcRosReferenceCache(PFILE_OBJECT FileObject)
1154 {
1155 PBCB Bcb;
1156 KeAcquireGuardedMutex(&ViewLock);
1157 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1158 ASSERT(Bcb);
1159 if (Bcb->RefCount == 0)
1160 {
1161 ASSERT(Bcb->BcbRemoveListEntry.Flink != NULL);
1162 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1163 Bcb->BcbRemoveListEntry.Flink = NULL;
1164
1165 }
1166 else
1167 {
1168 ASSERT(Bcb->BcbRemoveListEntry.Flink == NULL);
1169 }
1170 Bcb->RefCount++;
1171 KeReleaseGuardedMutex(&ViewLock);
1172 }
1173
1174 VOID
1175 NTAPI
1176 CcRosSetRemoveOnClose(PSECTION_OBJECT_POINTERS SectionObjectPointer)
1177 {
1178 PBCB Bcb;
1179 DPRINT("CcRosSetRemoveOnClose()\n");
1180 KeAcquireGuardedMutex(&ViewLock);
1181 Bcb = (PBCB)SectionObjectPointer->SharedCacheMap;
1182 if (Bcb)
1183 {
1184 Bcb->RemoveOnClose = TRUE;
1185 if (Bcb->RefCount == 0)
1186 {
1187 CcRosDeleteFileCache(Bcb->FileObject, Bcb);
1188 }
1189 }
1190 KeReleaseGuardedMutex(&ViewLock);
1191 }
1192
1193
1194 VOID
1195 NTAPI
1196 CcRosDereferenceCache(PFILE_OBJECT FileObject)
1197 {
1198 PBCB Bcb;
1199 KeAcquireGuardedMutex(&ViewLock);
1200 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1201 ASSERT(Bcb);
1202 if (Bcb->RefCount > 0)
1203 {
1204 Bcb->RefCount--;
1205 if (Bcb->RefCount == 0)
1206 {
1207 MmFreeSectionSegments(Bcb->FileObject);
1208 CcRosDeleteFileCache(FileObject, Bcb);
1209 }
1210 }
1211 KeReleaseGuardedMutex(&ViewLock);
1212 }
1213
1214 NTSTATUS NTAPI
1215 CcRosReleaseFileCache(PFILE_OBJECT FileObject)
1216 /*
1217 * FUNCTION: Called by the file system when a handle to a file object
1218 * has been closed.
1219 */
1220 {
1221 PBCB Bcb;
1222
1223 KeAcquireGuardedMutex(&ViewLock);
1224
1225 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1226 {
1227 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1228 if (FileObject->PrivateCacheMap != NULL)
1229 {
1230 FileObject->PrivateCacheMap = NULL;
1231 if (Bcb->RefCount > 0)
1232 {
1233 Bcb->RefCount--;
1234 if (Bcb->RefCount == 0)
1235 {
1236 MmFreeSectionSegments(Bcb->FileObject);
1237 CcRosDeleteFileCache(FileObject, Bcb);
1238 }
1239 }
1240 }
1241 }
1242 KeReleaseGuardedMutex(&ViewLock);
1243 return(STATUS_SUCCESS);
1244 }
1245
1246 NTSTATUS
1247 NTAPI
1248 CcTryToInitializeFileCache(PFILE_OBJECT FileObject)
1249 {
1250 PBCB Bcb;
1251 NTSTATUS Status;
1252
1253 KeAcquireGuardedMutex(&ViewLock);
1254
1255 ASSERT(FileObject->SectionObjectPointer);
1256 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1257 if (Bcb == NULL)
1258 {
1259 Status = STATUS_UNSUCCESSFUL;
1260 }
1261 else
1262 {
1263 if (FileObject->PrivateCacheMap == NULL)
1264 {
1265 FileObject->PrivateCacheMap = Bcb;
1266 Bcb->RefCount++;
1267 }
1268 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1269 {
1270 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1271 Bcb->BcbRemoveListEntry.Flink = NULL;
1272 }
1273 Status = STATUS_SUCCESS;
1274 }
1275 KeReleaseGuardedMutex(&ViewLock);
1276
1277 return Status;
1278 }
1279
1280
1281 NTSTATUS NTAPI
1282 CcRosInitializeFileCache(PFILE_OBJECT FileObject,
1283 ULONG CacheSegmentSize,
1284 PCACHE_MANAGER_CALLBACKS CallBacks,
1285 PVOID LazyWriterContext)
1286 /*
1287 * FUNCTION: Initializes a BCB for a file object
1288 */
1289 {
1290 PBCB Bcb;
1291
1292 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1293 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, Bcb 0x%p, CacheSegmentSize %d)\n",
1294 FileObject, Bcb, CacheSegmentSize);
1295
1296 KeAcquireGuardedMutex(&ViewLock);
1297 if (Bcb == NULL)
1298 {
1299 Bcb = ExAllocateFromNPagedLookasideList(&BcbLookasideList);
1300 if (Bcb == NULL)
1301 {
1302 KeReleaseGuardedMutex(&ViewLock);
1303 return(STATUS_UNSUCCESSFUL);
1304 }
1305 memset(Bcb, 0, sizeof(BCB));
1306 ObReferenceObjectByPointer(FileObject,
1307 FILE_ALL_ACCESS,
1308 NULL,
1309 KernelMode);
1310 Bcb->FileObject = FileObject;
1311 Bcb->CacheSegmentSize = CacheSegmentSize;
1312 Bcb->Callbacks = CallBacks;
1313 Bcb->LazyWriteContext = LazyWriterContext;
1314 if (FileObject->FsContext)
1315 {
1316 Bcb->AllocationSize =
1317 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->AllocationSize;
1318 Bcb->FileSize =
1319 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->FileSize;
1320 }
1321 KeInitializeSpinLock(&Bcb->BcbLock);
1322 InitializeListHead(&Bcb->BcbSegmentListHead);
1323 FileObject->SectionObjectPointer->SharedCacheMap = Bcb;
1324 }
1325 if (FileObject->PrivateCacheMap == NULL)
1326 {
1327 FileObject->PrivateCacheMap = Bcb;
1328 Bcb->RefCount++;
1329 }
1330 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1331 {
1332 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1333 Bcb->BcbRemoveListEntry.Flink = NULL;
1334 }
1335 KeReleaseGuardedMutex(&ViewLock);
1336
1337 return(STATUS_SUCCESS);
1338 }
1339
1340 /*
1341 * @implemented
1342 */
1343 PFILE_OBJECT NTAPI
1344 CcGetFileObjectFromSectionPtrs(IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1345 {
1346 PBCB Bcb;
1347 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1348 {
1349 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
1350 ASSERT(Bcb);
1351 return Bcb->FileObject;
1352 }
1353 return NULL;
1354 }
1355
1356 VOID
1357 INIT_FUNCTION
1358 NTAPI
1359 CcInitView(VOID)
1360 {
1361 #ifdef CACHE_BITMAP
1362 PMEMORY_AREA marea;
1363 PVOID Buffer;
1364 PHYSICAL_ADDRESS BoundaryAddressMultiple;
1365 #endif
1366
1367 DPRINT("CcInitView()\n");
1368 #ifdef CACHE_BITMAP
1369 BoundaryAddressMultiple.QuadPart = 0;
1370 CiCacheSegMappingRegionHint = 0;
1371 CiCacheSegMappingRegionBase = NULL;
1372
1373 MmLockAddressSpace(MmGetKernelAddressSpace());
1374
1375 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
1376 MEMORY_AREA_CACHE_SEGMENT,
1377 &CiCacheSegMappingRegionBase,
1378 CI_CACHESEG_MAPPING_REGION_SIZE,
1379 PAGE_READWRITE,
1380 &marea,
1381 FALSE,
1382 0,
1383 BoundaryAddressMultiple);
1384 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1385 if (!NT_SUCCESS(Status))
1386 {
1387 KeBugCheck(CACHE_MANAGER);
1388 }
1389
1390 Buffer = ExAllocatePool(NonPagedPool, CI_CACHESEG_MAPPING_REGION_SIZE / (PAGE_SIZE * 8));
1391 if (!Buffer)
1392 {
1393 KeBugCheck(CACHE_MANAGER);
1394 }
1395
1396 RtlInitializeBitMap(&CiCacheSegMappingRegionAllocMap, Buffer, CI_CACHESEG_MAPPING_REGION_SIZE / PAGE_SIZE);
1397 RtlClearAllBits(&CiCacheSegMappingRegionAllocMap);
1398
1399 KeInitializeSpinLock(&CiCacheSegMappingRegionLock);
1400 #endif
1401 InitializeListHead(&CacheSegmentListHead);
1402 InitializeListHead(&DirtySegmentListHead);
1403 InitializeListHead(&CacheSegmentLRUListHead);
1404 InitializeListHead(&ClosedListHead);
1405 KeInitializeGuardedMutex(&ViewLock);
1406 ExInitializeNPagedLookasideList (&iBcbLookasideList,
1407 NULL,
1408 NULL,
1409 0,
1410 sizeof(INTERNAL_BCB),
1411 TAG_IBCB,
1412 20);
1413 ExInitializeNPagedLookasideList (&BcbLookasideList,
1414 NULL,
1415 NULL,
1416 0,
1417 sizeof(BCB),
1418 TAG_BCB,
1419 20);
1420 ExInitializeNPagedLookasideList (&CacheSegLookasideList,
1421 NULL,
1422 NULL,
1423 0,
1424 sizeof(CACHE_SEGMENT),
1425 TAG_CSEG,
1426 20);
1427
1428 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1429
1430 CcInitCacheZeroPage();
1431
1432 }
1433
1434 /* EOF */
1435
1436
1437
1438
1439
1440
1441
1442