[SHELL32] - Try to fix MSVC build
[reactos.git] / reactos / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 */
9
10 /* NOTES **********************************************************************
11 *
12 * This is not the NT implementation of a file cache nor anything much like
13 * it.
14 *
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
17 *
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
20 *
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
23 *
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
27 *
28 * (4) Copy the data into or out of the page as necessary.
29 *
30 * (5) Release the cache page
31 */
32 /* INCLUDES ******************************************************************/
33
34 #include <ntoskrnl.h>
35 #define NDEBUG
36 #include <debug.h>
37
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
40 #endif
41
42 /* GLOBALS *******************************************************************/
43
44 /*
45 * If CACHE_BITMAP is defined, the cache manager uses one large memory region
46 * within the kernel address space and allocate/deallocate space from this block
47 * over a bitmap. If CACHE_BITMAP is used, the size of the mdl mapping region
48 * must be reduced (ntoskrnl\mm\mdl.c, MI_MDLMAPPING_REGION_SIZE).
49 */
50 //#define CACHE_BITMAP
51
52 static LIST_ENTRY DirtySegmentListHead;
53 static LIST_ENTRY CacheSegmentListHead;
54 static LIST_ENTRY CacheSegmentLRUListHead;
55 static LIST_ENTRY ClosedListHead;
56 ULONG DirtyPageCount=0;
57
58 KGUARDED_MUTEX ViewLock;
59
60 #ifdef CACHE_BITMAP
61 #define CI_CACHESEG_MAPPING_REGION_SIZE (128*1024*1024)
62
63 static PVOID CiCacheSegMappingRegionBase = NULL;
64 static RTL_BITMAP CiCacheSegMappingRegionAllocMap;
65 static ULONG CiCacheSegMappingRegionHint;
66 static KSPIN_LOCK CiCacheSegMappingRegionLock;
67 #endif
68
69 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
70 static NPAGED_LOOKASIDE_LIST BcbLookasideList;
71 static NPAGED_LOOKASIDE_LIST CacheSegLookasideList;
72
73 #if DBG
74 static void CcRosCacheSegmentIncRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
75 {
76 ++cs->ReferenceCount;
77 if ( cs->Bcb->Trace )
78 {
79 DbgPrint("(%s:%i) CacheSegment %p ++RefCount=%d, Dirty %d, PageOut %d\n",
80 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
81 }
82 }
83 static void CcRosCacheSegmentDecRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
84 {
85 --cs->ReferenceCount;
86 if ( cs->Bcb->Trace )
87 {
88 DbgPrint("(%s:%i) CacheSegment %p --RefCount=%d, Dirty %d, PageOut %d\n",
89 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
90 }
91 }
92 #define CcRosCacheSegmentIncRefCount(cs) CcRosCacheSegmentIncRefCount_(cs,__FILE__,__LINE__)
93 #define CcRosCacheSegmentDecRefCount(cs) CcRosCacheSegmentDecRefCount_(cs,__FILE__,__LINE__)
94 #else
95 #define CcRosCacheSegmentIncRefCount(cs) (++((cs)->ReferenceCount))
96 #define CcRosCacheSegmentDecRefCount(cs) (--((cs)->ReferenceCount))
97 #endif
98
99 NTSTATUS
100 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg);
101
102
103 /* FUNCTIONS *****************************************************************/
104
105 VOID
106 NTAPI
107 CcRosTraceCacheMap (
108 PBCB Bcb,
109 BOOLEAN Trace )
110 {
111 #if DBG
112 KIRQL oldirql;
113 PLIST_ENTRY current_entry;
114 PCACHE_SEGMENT current;
115
116 if ( !Bcb )
117 return;
118
119 Bcb->Trace = Trace;
120
121 if ( Trace )
122 {
123 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", Bcb );
124
125 KeAcquireGuardedMutex(&ViewLock);
126 KeAcquireSpinLock(&Bcb->BcbLock, &oldirql);
127
128 current_entry = Bcb->BcbSegmentListHead.Flink;
129 while (current_entry != &Bcb->BcbSegmentListHead)
130 {
131 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
132 current_entry = current_entry->Flink;
133
134 DPRINT1(" CacheSegment 0x%p enabled, RefCount %d, Dirty %d, PageOut %d\n",
135 current, current->ReferenceCount, current->Dirty, current->PageOut );
136 }
137 KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
138 KeReleaseGuardedMutex(&ViewLock);
139 }
140 else
141 {
142 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", Bcb );
143 }
144
145 #else
146 Bcb = Bcb;
147 Trace = Trace;
148 #endif
149 }
150
151 NTSTATUS
152 NTAPI
153 CcRosFlushCacheSegment(PCACHE_SEGMENT CacheSegment)
154 {
155 NTSTATUS Status;
156 KIRQL oldIrql;
157
158 Status = WriteCacheSegment(CacheSegment);
159 if (NT_SUCCESS(Status))
160 {
161 KeAcquireGuardedMutex(&ViewLock);
162 KeAcquireSpinLock(&CacheSegment->Bcb->BcbLock, &oldIrql);
163
164 CacheSegment->Dirty = FALSE;
165 RemoveEntryList(&CacheSegment->DirtySegmentListEntry);
166 DirtyPageCount -= CacheSegment->Bcb->CacheSegmentSize / PAGE_SIZE;
167 CcRosCacheSegmentDecRefCount ( CacheSegment );
168
169 KeReleaseSpinLock(&CacheSegment->Bcb->BcbLock, oldIrql);
170 KeReleaseGuardedMutex(&ViewLock);
171 }
172
173 return(Status);
174 }
175
176 NTSTATUS
177 NTAPI
178 CcRosFlushDirtyPages(ULONG Target, PULONG Count)
179 {
180 PLIST_ENTRY current_entry;
181 PCACHE_SEGMENT current;
182 ULONG PagesPerSegment;
183 BOOLEAN Locked;
184 NTSTATUS Status;
185 static ULONG WriteCount[4] = {0, 0, 0, 0};
186 ULONG NewTarget;
187
188 DPRINT("CcRosFlushDirtyPages(Target %d)\n", Target);
189
190 (*Count) = 0;
191
192 KeEnterCriticalRegion();
193 KeAcquireGuardedMutex(&ViewLock);
194
195 WriteCount[0] = WriteCount[1];
196 WriteCount[1] = WriteCount[2];
197 WriteCount[2] = WriteCount[3];
198 WriteCount[3] = 0;
199
200 NewTarget = WriteCount[0] + WriteCount[1] + WriteCount[2];
201
202 if (NewTarget < DirtyPageCount)
203 {
204 NewTarget = (DirtyPageCount - NewTarget + 3) / 4;
205 WriteCount[0] += NewTarget;
206 WriteCount[1] += NewTarget;
207 WriteCount[2] += NewTarget;
208 WriteCount[3] += NewTarget;
209 }
210
211 NewTarget = WriteCount[0];
212
213 Target = max(NewTarget, Target);
214
215 current_entry = DirtySegmentListHead.Flink;
216 if (current_entry == &DirtySegmentListHead)
217 {
218 DPRINT("No Dirty pages\n");
219 }
220
221 while (current_entry != &DirtySegmentListHead && Target > 0)
222 {
223 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
224 DirtySegmentListEntry);
225 current_entry = current_entry->Flink;
226
227 Locked = current->Bcb->Callbacks->AcquireForLazyWrite(
228 current->Bcb->LazyWriteContext, TRUE);
229 if (!Locked)
230 {
231 continue;
232 }
233
234 ExAcquirePushLockExclusive(&current->Lock);
235
236 ASSERT(current->Dirty);
237 if (current->ReferenceCount > 1)
238 {
239 ExReleasePushLock(&current->Lock);
240 current->Bcb->Callbacks->ReleaseFromLazyWrite(
241 current->Bcb->LazyWriteContext);
242 continue;
243 }
244
245 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
246
247 KeReleaseGuardedMutex(&ViewLock);
248
249 Status = CcRosFlushCacheSegment(current);
250
251 ExReleasePushLock(&current->Lock);
252 current->Bcb->Callbacks->ReleaseFromLazyWrite(
253 current->Bcb->LazyWriteContext);
254
255 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
256 {
257 DPRINT1("CC: Failed to flush cache segment.\n");
258 }
259 else
260 {
261 (*Count) += PagesPerSegment;
262 Target -= PagesPerSegment;
263 }
264
265 KeAcquireGuardedMutex(&ViewLock);
266 current_entry = DirtySegmentListHead.Flink;
267 }
268
269 if (*Count < NewTarget)
270 {
271 WriteCount[1] += (NewTarget - *Count);
272 }
273
274 KeReleaseGuardedMutex(&ViewLock);
275 KeLeaveCriticalRegion();
276
277 DPRINT("CcRosFlushDirtyPages() finished\n");
278 return(STATUS_SUCCESS);
279 }
280
281 NTSTATUS
282 CcRosTrimCache(ULONG Target, ULONG Priority, PULONG NrFreed)
283 /*
284 * FUNCTION: Try to free some memory from the file cache.
285 * ARGUMENTS:
286 * Target - The number of pages to be freed.
287 * Priority - The priority of free (currently unused).
288 * NrFreed - Points to a variable where the number of pages
289 * actually freed is returned.
290 */
291 {
292 PLIST_ENTRY current_entry;
293 PCACHE_SEGMENT current;
294 ULONG PagesPerSegment;
295 ULONG PagesFreed;
296 KIRQL oldIrql;
297 LIST_ENTRY FreeList;
298 PFN_NUMBER Page;
299 ULONG i;
300 BOOLEAN FlushedPages = FALSE;
301
302 DPRINT("CcRosTrimCache(Target %d)\n", Target);
303
304 InitializeListHead(&FreeList);
305
306 *NrFreed = 0;
307
308 retry:
309 KeAcquireGuardedMutex(&ViewLock);
310
311 current_entry = CacheSegmentLRUListHead.Flink;
312 while (current_entry != &CacheSegmentLRUListHead)
313 {
314 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
315 CacheSegmentLRUListEntry);
316 current_entry = current_entry->Flink;
317
318 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
319
320 /* Reference the cache segment */
321 CcRosCacheSegmentIncRefCount(current);
322
323 /* Check if it's mapped and not dirty */
324 if (current->MappedCount > 0 && !current->Dirty)
325 {
326 /* We have to break these locks because Cc sucks */
327 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
328 KeReleaseGuardedMutex(&ViewLock);
329
330 /* Page out the segment */
331 for (i = 0; i < current->Bcb->CacheSegmentSize / PAGE_SIZE; i++)
332 {
333 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
334
335 MmPageOutPhysicalAddress(Page);
336 }
337
338 /* Reacquire the locks */
339 KeAcquireGuardedMutex(&ViewLock);
340 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
341 }
342
343 /* Dereference the cache segment */
344 CcRosCacheSegmentDecRefCount(current);
345
346 /* Check if we can free this entry now */
347 if (current->ReferenceCount == 0)
348 {
349 ASSERT(!current->Dirty);
350 ASSERT(!current->MappedCount);
351
352 RemoveEntryList(&current->BcbSegmentListEntry);
353 RemoveEntryList(&current->CacheSegmentListEntry);
354 RemoveEntryList(&current->CacheSegmentLRUListEntry);
355 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
356
357 /* Calculate how many pages we freed for Mm */
358 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
359 PagesFreed = min(PagesPerSegment, Target);
360 Target -= PagesFreed;
361 (*NrFreed) += PagesFreed;
362 }
363
364 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
365 }
366
367 KeReleaseGuardedMutex(&ViewLock);
368
369 /* Try flushing pages if we haven't met our target */
370 if (Target > 0 && !FlushedPages)
371 {
372 /* Flush dirty pages to disk */
373 CcRosFlushDirtyPages(Target, &PagesFreed);
374 FlushedPages = TRUE;
375
376 /* We can only swap as many pages as we flushed */
377 if (PagesFreed < Target) Target = PagesFreed;
378
379 /* Check if we flushed anything */
380 if (PagesFreed != 0)
381 {
382 /* Try again after flushing dirty pages */
383 DPRINT("Flushed %d dirty cache pages to disk\n", PagesFreed);
384 goto retry;
385 }
386 }
387
388 while (!IsListEmpty(&FreeList))
389 {
390 current_entry = RemoveHeadList(&FreeList);
391 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
392 BcbSegmentListEntry);
393 CcRosInternalFreeCacheSegment(current);
394 }
395
396 DPRINT("Evicted %d cache pages\n", (*NrFreed));
397
398 return(STATUS_SUCCESS);
399 }
400
401 NTSTATUS
402 NTAPI
403 CcRosReleaseCacheSegment(PBCB Bcb,
404 PCACHE_SEGMENT CacheSeg,
405 BOOLEAN Valid,
406 BOOLEAN Dirty,
407 BOOLEAN Mapped)
408 {
409 BOOLEAN WasDirty = CacheSeg->Dirty;
410 KIRQL oldIrql;
411
412 ASSERT(Bcb);
413
414 DPRINT("CcReleaseCacheSegment(Bcb 0x%p, CacheSeg 0x%p, Valid %d)\n",
415 Bcb, CacheSeg, Valid);
416
417 CacheSeg->Valid = Valid;
418 CacheSeg->Dirty = CacheSeg->Dirty || Dirty;
419
420 KeAcquireGuardedMutex(&ViewLock);
421 if (!WasDirty && CacheSeg->Dirty)
422 {
423 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
424 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
425 }
426
427 if (Mapped)
428 {
429 CacheSeg->MappedCount++;
430 }
431 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
432 CcRosCacheSegmentDecRefCount(CacheSeg);
433 if (Mapped && CacheSeg->MappedCount == 1)
434 {
435 CcRosCacheSegmentIncRefCount(CacheSeg);
436 }
437 if (!WasDirty && CacheSeg->Dirty)
438 {
439 CcRosCacheSegmentIncRefCount(CacheSeg);
440 }
441 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
442 KeReleaseGuardedMutex(&ViewLock);
443 ExReleasePushLock(&CacheSeg->Lock);
444 KeLeaveCriticalRegion();
445
446 return(STATUS_SUCCESS);
447 }
448
449 /* Returns with Cache Segment Lock Held! */
450 PCACHE_SEGMENT
451 NTAPI
452 CcRosLookupCacheSegment(PBCB Bcb, ULONG FileOffset)
453 {
454 PLIST_ENTRY current_entry;
455 PCACHE_SEGMENT current;
456 KIRQL oldIrql;
457
458 ASSERT(Bcb);
459
460 DPRINT("CcRosLookupCacheSegment(Bcb -x%p, FileOffset %d)\n", Bcb, FileOffset);
461
462 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
463 current_entry = Bcb->BcbSegmentListHead.Flink;
464 while (current_entry != &Bcb->BcbSegmentListHead)
465 {
466 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
467 BcbSegmentListEntry);
468 if (current->FileOffset <= FileOffset &&
469 (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
470 {
471 CcRosCacheSegmentIncRefCount(current);
472 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
473 KeEnterCriticalRegion();
474 ExAcquirePushLockExclusive(&current->Lock);
475 return(current);
476 }
477 current_entry = current_entry->Flink;
478 }
479 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
480 return(NULL);
481 }
482
483 NTSTATUS
484 NTAPI
485 CcRosMarkDirtyCacheSegment(PBCB Bcb, ULONG FileOffset)
486 {
487 PCACHE_SEGMENT CacheSeg;
488 KIRQL oldIrql;
489
490 ASSERT(Bcb);
491
492 DPRINT("CcRosMarkDirtyCacheSegment(Bcb 0x%p, FileOffset %d)\n", Bcb, FileOffset);
493
494 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
495 if (CacheSeg == NULL)
496 {
497 KeBugCheck(CACHE_MANAGER);
498 }
499 if (!CacheSeg->Dirty)
500 {
501 KeAcquireGuardedMutex(&ViewLock);
502 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
503 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
504 KeReleaseGuardedMutex(&ViewLock);
505 }
506 else
507 {
508 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
509 CcRosCacheSegmentDecRefCount(CacheSeg);
510 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
511 }
512
513 KeAcquireGuardedMutex(&ViewLock);
514
515 /* Move to the tail of the LRU list */
516 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
517 InsertTailList(&CacheSegmentLRUListHead, &CacheSeg->CacheSegmentLRUListEntry);
518
519 KeReleaseGuardedMutex(&ViewLock);
520
521 CacheSeg->Dirty = TRUE;
522 ExReleasePushLock(&CacheSeg->Lock);
523 KeLeaveCriticalRegion();
524
525 return(STATUS_SUCCESS);
526 }
527
528 NTSTATUS
529 NTAPI
530 CcRosUnmapCacheSegment(PBCB Bcb, ULONG FileOffset, BOOLEAN NowDirty)
531 {
532 PCACHE_SEGMENT CacheSeg;
533 BOOLEAN WasDirty;
534 KIRQL oldIrql;
535
536 ASSERT(Bcb);
537
538 DPRINT("CcRosUnmapCacheSegment(Bcb 0x%p, FileOffset %d, NowDirty %d)\n",
539 Bcb, FileOffset, NowDirty);
540
541 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
542 if (CacheSeg == NULL)
543 {
544 return(STATUS_UNSUCCESSFUL);
545 }
546
547 WasDirty = CacheSeg->Dirty;
548 CacheSeg->Dirty = CacheSeg->Dirty || NowDirty;
549
550 CacheSeg->MappedCount--;
551
552 if (!WasDirty && NowDirty)
553 {
554 KeAcquireGuardedMutex(&ViewLock);
555 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
556 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
557 KeReleaseGuardedMutex(&ViewLock);
558 }
559
560 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
561 CcRosCacheSegmentDecRefCount(CacheSeg);
562 if (!WasDirty && NowDirty)
563 {
564 CcRosCacheSegmentIncRefCount(CacheSeg);
565 }
566 if (CacheSeg->MappedCount == 0)
567 {
568 CcRosCacheSegmentDecRefCount(CacheSeg);
569 }
570 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
571
572 ExReleasePushLock(&CacheSeg->Lock);
573 KeLeaveCriticalRegion();
574
575 return(STATUS_SUCCESS);
576 }
577
578 static
579 NTSTATUS
580 CcRosCreateCacheSegment(PBCB Bcb,
581 ULONG FileOffset,
582 PCACHE_SEGMENT* CacheSeg)
583 {
584 PCACHE_SEGMENT current;
585 PCACHE_SEGMENT previous;
586 PLIST_ENTRY current_entry;
587 NTSTATUS Status;
588 KIRQL oldIrql;
589 #ifdef CACHE_BITMAP
590 ULONG StartingOffset;
591 #endif
592 PHYSICAL_ADDRESS BoundaryAddressMultiple;
593
594 ASSERT(Bcb);
595
596 DPRINT("CcRosCreateCacheSegment()\n");
597
598 BoundaryAddressMultiple.QuadPart = 0;
599 if (FileOffset >= Bcb->FileSize.u.LowPart)
600 {
601 CacheSeg = NULL;
602 return STATUS_INVALID_PARAMETER;
603 }
604
605 current = ExAllocateFromNPagedLookasideList(&CacheSegLookasideList);
606 current->Valid = FALSE;
607 current->Dirty = FALSE;
608 current->PageOut = FALSE;
609 current->FileOffset = ROUND_DOWN(FileOffset, Bcb->CacheSegmentSize);
610 current->Bcb = Bcb;
611 #if DBG
612 if ( Bcb->Trace )
613 {
614 DPRINT1("CacheMap 0x%p: new Cache Segment: 0x%p\n", Bcb, current );
615 }
616 #endif
617 current->MappedCount = 0;
618 current->DirtySegmentListEntry.Flink = NULL;
619 current->DirtySegmentListEntry.Blink = NULL;
620 current->ReferenceCount = 1;
621 ExInitializePushLock(&current->Lock);
622 KeEnterCriticalRegion();
623 ExAcquirePushLockExclusive(&current->Lock);
624 KeAcquireGuardedMutex(&ViewLock);
625
626 *CacheSeg = current;
627 /* There is window between the call to CcRosLookupCacheSegment
628 * and CcRosCreateCacheSegment. We must check if a segment on
629 * the fileoffset exist. If there exist a segment, we release
630 * our new created segment and return the existing one.
631 */
632 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
633 current_entry = Bcb->BcbSegmentListHead.Flink;
634 previous = NULL;
635 while (current_entry != &Bcb->BcbSegmentListHead)
636 {
637 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
638 BcbSegmentListEntry);
639 if (current->FileOffset <= FileOffset &&
640 (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
641 {
642 CcRosCacheSegmentIncRefCount(current);
643 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
644 #if DBG
645 if ( Bcb->Trace )
646 {
647 DPRINT1("CacheMap 0x%p: deleting newly created Cache Segment 0x%p ( found existing one 0x%p )\n",
648 Bcb,
649 (*CacheSeg),
650 current );
651 }
652 #endif
653 ExReleasePushLock(&(*CacheSeg)->Lock);
654 KeReleaseGuardedMutex(&ViewLock);
655 ExFreeToNPagedLookasideList(&CacheSegLookasideList, *CacheSeg);
656 *CacheSeg = current;
657 /* We're still in the critical region from above */
658 ExAcquirePushLockExclusive(&current->Lock);
659 return STATUS_SUCCESS;
660 }
661 if (current->FileOffset < FileOffset)
662 {
663 if (previous == NULL)
664 {
665 previous = current;
666 }
667 else
668 {
669 if (previous->FileOffset < current->FileOffset)
670 {
671 previous = current;
672 }
673 }
674 }
675 current_entry = current_entry->Flink;
676 }
677 /* There was no existing segment. */
678 current = *CacheSeg;
679 if (previous)
680 {
681 InsertHeadList(&previous->BcbSegmentListEntry, &current->BcbSegmentListEntry);
682 }
683 else
684 {
685 InsertHeadList(&Bcb->BcbSegmentListHead, &current->BcbSegmentListEntry);
686 }
687 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
688 InsertTailList(&CacheSegmentListHead, &current->CacheSegmentListEntry);
689 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
690 KeReleaseGuardedMutex(&ViewLock);
691 #ifdef CACHE_BITMAP
692 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
693
694 StartingOffset = RtlFindClearBitsAndSet(&CiCacheSegMappingRegionAllocMap, Bcb->CacheSegmentSize / PAGE_SIZE, CiCacheSegMappingRegionHint);
695
696 if (StartingOffset == 0xffffffff)
697 {
698 DPRINT1("Out of CacheSeg mapping space\n");
699 KeBugCheck(CACHE_MANAGER);
700 }
701
702 current->BaseAddress = CiCacheSegMappingRegionBase + StartingOffset * PAGE_SIZE;
703
704 if (CiCacheSegMappingRegionHint == StartingOffset)
705 {
706 CiCacheSegMappingRegionHint += Bcb->CacheSegmentSize / PAGE_SIZE;
707 }
708
709 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
710 #else
711 MmLockAddressSpace(MmGetKernelAddressSpace());
712 current->BaseAddress = NULL;
713 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
714 0, // nothing checks for cache_segment mareas, so set to 0
715 &current->BaseAddress,
716 Bcb->CacheSegmentSize,
717 PAGE_READWRITE,
718 (PMEMORY_AREA*)&current->MemoryArea,
719 FALSE,
720 0,
721 BoundaryAddressMultiple);
722 MmUnlockAddressSpace(MmGetKernelAddressSpace());
723 if (!NT_SUCCESS(Status))
724 {
725 KeBugCheck(CACHE_MANAGER);
726 }
727 #endif
728
729 /* Create a virtual mapping for this memory area */
730 MI_SET_USAGE(MI_USAGE_CACHE);
731 #if MI_TRACE_PFNS
732 PWCHAR pos = NULL;
733 ULONG len = 0;
734 if ((Bcb->FileObject) && (Bcb->FileObject->FileName.Buffer))
735 {
736 pos = wcsrchr(Bcb->FileObject->FileName.Buffer, '\\');
737 len = wcslen(pos) * sizeof(WCHAR);
738 if (pos) snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
739 }
740 #endif
741
742 MmMapMemoryArea(current->BaseAddress, Bcb->CacheSegmentSize,
743 MC_CACHE, PAGE_READWRITE);
744
745 return(STATUS_SUCCESS);
746 }
747
748 NTSTATUS
749 NTAPI
750 CcRosGetCacheSegmentChain(PBCB Bcb,
751 ULONG FileOffset,
752 ULONG Length,
753 PCACHE_SEGMENT* CacheSeg)
754 {
755 PCACHE_SEGMENT current;
756 ULONG i;
757 PCACHE_SEGMENT* CacheSegList;
758 PCACHE_SEGMENT Previous = NULL;
759
760 ASSERT(Bcb);
761
762 DPRINT("CcRosGetCacheSegmentChain()\n");
763
764 Length = ROUND_UP(Length, Bcb->CacheSegmentSize);
765
766 CacheSegList = _alloca(sizeof(PCACHE_SEGMENT) *
767 (Length / Bcb->CacheSegmentSize));
768
769 /*
770 * Look for a cache segment already mapping the same data.
771 */
772 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
773 {
774 ULONG CurrentOffset = FileOffset + (i * Bcb->CacheSegmentSize);
775 current = CcRosLookupCacheSegment(Bcb, CurrentOffset);
776 if (current != NULL)
777 {
778 KeAcquireGuardedMutex(&ViewLock);
779
780 /* Move to tail of LRU list */
781 RemoveEntryList(&current->CacheSegmentLRUListEntry);
782 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
783
784 KeReleaseGuardedMutex(&ViewLock);
785
786 CacheSegList[i] = current;
787 }
788 else
789 {
790 CcRosCreateCacheSegment(Bcb, CurrentOffset, &current);
791 CacheSegList[i] = current;
792 }
793 }
794
795 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
796 {
797 if (i == 0)
798 {
799 *CacheSeg = CacheSegList[i];
800 Previous = CacheSegList[i];
801 }
802 else
803 {
804 Previous->NextInChain = CacheSegList[i];
805 Previous = CacheSegList[i];
806 }
807 }
808 ASSERT(Previous);
809 Previous->NextInChain = NULL;
810
811 return(STATUS_SUCCESS);
812 }
813
814 NTSTATUS
815 NTAPI
816 CcRosGetCacheSegment(PBCB Bcb,
817 ULONG FileOffset,
818 PULONG BaseOffset,
819 PVOID* BaseAddress,
820 PBOOLEAN UptoDate,
821 PCACHE_SEGMENT* CacheSeg)
822 {
823 PCACHE_SEGMENT current;
824 NTSTATUS Status;
825
826 ASSERT(Bcb);
827
828 DPRINT("CcRosGetCacheSegment()\n");
829
830 /*
831 * Look for a cache segment already mapping the same data.
832 */
833 current = CcRosLookupCacheSegment(Bcb, FileOffset);
834 if (current == NULL)
835 {
836 /*
837 * Otherwise create a new segment.
838 */
839 Status = CcRosCreateCacheSegment(Bcb, FileOffset, &current);
840 if (!NT_SUCCESS(Status))
841 {
842 return Status;
843 }
844 }
845
846 KeAcquireGuardedMutex(&ViewLock);
847
848 /* Move to the tail of the LRU list */
849 RemoveEntryList(&current->CacheSegmentLRUListEntry);
850 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
851
852 KeReleaseGuardedMutex(&ViewLock);
853
854 /*
855 * Return information about the segment to the caller.
856 */
857 *UptoDate = current->Valid;
858 *BaseAddress = current->BaseAddress;
859 DPRINT("*BaseAddress 0x%.8X\n", *BaseAddress);
860 *CacheSeg = current;
861 *BaseOffset = current->FileOffset;
862 return(STATUS_SUCCESS);
863 }
864
865 NTSTATUS NTAPI
866 CcRosRequestCacheSegment(PBCB Bcb,
867 ULONG FileOffset,
868 PVOID* BaseAddress,
869 PBOOLEAN UptoDate,
870 PCACHE_SEGMENT* CacheSeg)
871 /*
872 * FUNCTION: Request a page mapping for a BCB
873 */
874 {
875 ULONG BaseOffset;
876
877 ASSERT(Bcb);
878
879 if ((FileOffset % Bcb->CacheSegmentSize) != 0)
880 {
881 DPRINT1("Bad fileoffset %x should be multiple of %x",
882 FileOffset, Bcb->CacheSegmentSize);
883 KeBugCheck(CACHE_MANAGER);
884 }
885
886 return(CcRosGetCacheSegment(Bcb,
887 FileOffset,
888 &BaseOffset,
889 BaseAddress,
890 UptoDate,
891 CacheSeg));
892 }
893 #ifdef CACHE_BITMAP
894 #else
895 static VOID
896 CcFreeCachePage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address,
897 PFN_NUMBER Page, SWAPENTRY SwapEntry, BOOLEAN Dirty)
898 {
899 ASSERT(SwapEntry == 0);
900 if (Page != 0)
901 {
902 ASSERT(MmGetReferenceCountPage(Page) == 1);
903 MmReleasePageMemoryConsumer(MC_CACHE, Page);
904 }
905 }
906 #endif
907 NTSTATUS
908 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg)
909 /*
910 * FUNCTION: Releases a cache segment associated with a BCB
911 */
912 {
913 #ifdef CACHE_BITMAP
914 ULONG i;
915 ULONG RegionSize;
916 ULONG Base;
917 PFN_NUMBER Page;
918 KIRQL oldIrql;
919 #endif
920 DPRINT("Freeing cache segment 0x%p\n", CacheSeg);
921 #if DBG
922 if ( CacheSeg->Bcb->Trace )
923 {
924 DPRINT1("CacheMap 0x%p: deleting Cache Segment: 0x%p\n", CacheSeg->Bcb, CacheSeg );
925 }
926 #endif
927 #ifdef CACHE_BITMAP
928 RegionSize = CacheSeg->Bcb->CacheSegmentSize / PAGE_SIZE;
929
930 /* Unmap all the pages. */
931 for (i = 0; i < RegionSize; i++)
932 {
933 MmDeleteVirtualMapping(NULL,
934 CacheSeg->BaseAddress + (i * PAGE_SIZE),
935 FALSE,
936 NULL,
937 &Page);
938 MmReleasePageMemoryConsumer(MC_CACHE, Page);
939 }
940
941 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
942 /* Deallocate all the pages used. */
943 Base = (ULONG)(CacheSeg->BaseAddress - CiCacheSegMappingRegionBase) / PAGE_SIZE;
944
945 RtlClearBits(&CiCacheSegMappingRegionAllocMap, Base, RegionSize);
946
947 CiCacheSegMappingRegionHint = min (CiCacheSegMappingRegionHint, Base);
948
949 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
950 #else
951 MmLockAddressSpace(MmGetKernelAddressSpace());
952 MmFreeMemoryArea(MmGetKernelAddressSpace(),
953 CacheSeg->MemoryArea,
954 CcFreeCachePage,
955 NULL);
956 MmUnlockAddressSpace(MmGetKernelAddressSpace());
957 #endif
958 ExFreeToNPagedLookasideList(&CacheSegLookasideList, CacheSeg);
959 return(STATUS_SUCCESS);
960 }
961
962 NTSTATUS
963 NTAPI
964 CcRosFreeCacheSegment(PBCB Bcb, PCACHE_SEGMENT CacheSeg)
965 {
966 NTSTATUS Status;
967 KIRQL oldIrql;
968
969 ASSERT(Bcb);
970
971 DPRINT("CcRosFreeCacheSegment(Bcb 0x%p, CacheSeg 0x%p)\n",
972 Bcb, CacheSeg);
973
974 KeAcquireGuardedMutex(&ViewLock);
975 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
976 RemoveEntryList(&CacheSeg->BcbSegmentListEntry);
977 RemoveEntryList(&CacheSeg->CacheSegmentListEntry);
978 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
979 if (CacheSeg->Dirty)
980 {
981 RemoveEntryList(&CacheSeg->DirtySegmentListEntry);
982 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
983
984 }
985 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
986 KeReleaseGuardedMutex(&ViewLock);
987
988 Status = CcRosInternalFreeCacheSegment(CacheSeg);
989 return(Status);
990 }
991
992 /*
993 * @implemented
994 */
995 VOID NTAPI
996 CcFlushCache(IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
997 IN PLARGE_INTEGER FileOffset OPTIONAL,
998 IN ULONG Length,
999 OUT PIO_STATUS_BLOCK IoStatus)
1000 {
1001 PBCB Bcb;
1002 LARGE_INTEGER Offset;
1003 PCACHE_SEGMENT current;
1004 NTSTATUS Status;
1005 KIRQL oldIrql;
1006
1007 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %d, IoStatus 0x%p)\n",
1008 SectionObjectPointers, FileOffset, Length, IoStatus);
1009
1010 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1011 {
1012 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
1013 ASSERT(Bcb);
1014 if (FileOffset)
1015 {
1016 Offset = *FileOffset;
1017 }
1018 else
1019 {
1020 Offset.QuadPart = (LONGLONG)0;
1021 Length = Bcb->FileSize.u.LowPart;
1022 }
1023
1024 if (IoStatus)
1025 {
1026 IoStatus->Status = STATUS_SUCCESS;
1027 IoStatus->Information = 0;
1028 }
1029
1030 while (Length > 0)
1031 {
1032 current = CcRosLookupCacheSegment (Bcb, Offset.u.LowPart);
1033 if (current != NULL)
1034 {
1035 if (current->Dirty)
1036 {
1037 Status = CcRosFlushCacheSegment(current);
1038 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1039 {
1040 IoStatus->Status = Status;
1041 }
1042 }
1043 ExReleasePushLock(&current->Lock);
1044 KeLeaveCriticalRegion();
1045 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1046 CcRosCacheSegmentDecRefCount(current);
1047 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1048 }
1049
1050 Offset.QuadPart += Bcb->CacheSegmentSize;
1051 if (Length > Bcb->CacheSegmentSize)
1052 {
1053 Length -= Bcb->CacheSegmentSize;
1054 }
1055 else
1056 {
1057 Length = 0;
1058 }
1059 }
1060 }
1061 else
1062 {
1063 if (IoStatus)
1064 {
1065 IoStatus->Status = STATUS_INVALID_PARAMETER;
1066 }
1067 }
1068 }
1069
1070 NTSTATUS
1071 NTAPI
1072 CcRosDeleteFileCache(PFILE_OBJECT FileObject, PBCB Bcb)
1073 /*
1074 * FUNCTION: Releases the BCB associated with a file object
1075 */
1076 {
1077 PLIST_ENTRY current_entry;
1078 PCACHE_SEGMENT current;
1079 LIST_ENTRY FreeList;
1080 KIRQL oldIrql;
1081
1082 ASSERT(Bcb);
1083
1084 Bcb->RefCount++;
1085 KeReleaseGuardedMutex(&ViewLock);
1086
1087 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1088
1089 KeAcquireGuardedMutex(&ViewLock);
1090 Bcb->RefCount--;
1091 if (Bcb->RefCount == 0)
1092 {
1093 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1094 {
1095 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1096 Bcb->BcbRemoveListEntry.Flink = NULL;
1097 }
1098
1099 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1100
1101 /*
1102 * Release all cache segments.
1103 */
1104 InitializeListHead(&FreeList);
1105 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1106 current_entry = Bcb->BcbSegmentListHead.Flink;
1107 while (!IsListEmpty(&Bcb->BcbSegmentListHead))
1108 {
1109 current_entry = RemoveTailList(&Bcb->BcbSegmentListHead);
1110 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1111 RemoveEntryList(&current->CacheSegmentListEntry);
1112 RemoveEntryList(&current->CacheSegmentLRUListEntry);
1113 if (current->Dirty)
1114 {
1115 RemoveEntryList(&current->DirtySegmentListEntry);
1116 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
1117 DPRINT1("Freeing dirty segment\n");
1118 }
1119 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
1120 }
1121 #if DBG
1122 Bcb->Trace = FALSE;
1123 #endif
1124 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1125
1126 KeReleaseGuardedMutex(&ViewLock);
1127 ObDereferenceObject (Bcb->FileObject);
1128
1129 while (!IsListEmpty(&FreeList))
1130 {
1131 current_entry = RemoveTailList(&FreeList);
1132 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1133 CcRosInternalFreeCacheSegment(current);
1134 }
1135 ExFreeToNPagedLookasideList(&BcbLookasideList, Bcb);
1136 KeAcquireGuardedMutex(&ViewLock);
1137 }
1138 return(STATUS_SUCCESS);
1139 }
1140
1141 VOID
1142 NTAPI
1143 CcRosReferenceCache(PFILE_OBJECT FileObject)
1144 {
1145 PBCB Bcb;
1146 KeAcquireGuardedMutex(&ViewLock);
1147 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1148 ASSERT(Bcb);
1149 if (Bcb->RefCount == 0)
1150 {
1151 ASSERT(Bcb->BcbRemoveListEntry.Flink != NULL);
1152 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1153 Bcb->BcbRemoveListEntry.Flink = NULL;
1154
1155 }
1156 else
1157 {
1158 ASSERT(Bcb->BcbRemoveListEntry.Flink == NULL);
1159 }
1160 Bcb->RefCount++;
1161 KeReleaseGuardedMutex(&ViewLock);
1162 }
1163
1164 VOID
1165 NTAPI
1166 CcRosSetRemoveOnClose(PSECTION_OBJECT_POINTERS SectionObjectPointer)
1167 {
1168 PBCB Bcb;
1169 DPRINT("CcRosSetRemoveOnClose()\n");
1170 KeAcquireGuardedMutex(&ViewLock);
1171 Bcb = (PBCB)SectionObjectPointer->SharedCacheMap;
1172 if (Bcb)
1173 {
1174 Bcb->RemoveOnClose = TRUE;
1175 if (Bcb->RefCount == 0)
1176 {
1177 CcRosDeleteFileCache(Bcb->FileObject, Bcb);
1178 }
1179 }
1180 KeReleaseGuardedMutex(&ViewLock);
1181 }
1182
1183
1184 VOID
1185 NTAPI
1186 CcRosDereferenceCache(PFILE_OBJECT FileObject)
1187 {
1188 PBCB Bcb;
1189 KeAcquireGuardedMutex(&ViewLock);
1190 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1191 ASSERT(Bcb);
1192 if (Bcb->RefCount > 0)
1193 {
1194 Bcb->RefCount--;
1195 if (Bcb->RefCount == 0)
1196 {
1197 MmFreeSectionSegments(Bcb->FileObject);
1198 CcRosDeleteFileCache(FileObject, Bcb);
1199 }
1200 }
1201 KeReleaseGuardedMutex(&ViewLock);
1202 }
1203
1204 NTSTATUS NTAPI
1205 CcRosReleaseFileCache(PFILE_OBJECT FileObject)
1206 /*
1207 * FUNCTION: Called by the file system when a handle to a file object
1208 * has been closed.
1209 */
1210 {
1211 PBCB Bcb;
1212
1213 KeAcquireGuardedMutex(&ViewLock);
1214
1215 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1216 {
1217 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1218 if (FileObject->PrivateCacheMap != NULL)
1219 {
1220 FileObject->PrivateCacheMap = NULL;
1221 if (Bcb->RefCount > 0)
1222 {
1223 Bcb->RefCount--;
1224 if (Bcb->RefCount == 0)
1225 {
1226 MmFreeSectionSegments(Bcb->FileObject);
1227 CcRosDeleteFileCache(FileObject, Bcb);
1228 }
1229 }
1230 }
1231 }
1232 KeReleaseGuardedMutex(&ViewLock);
1233 return(STATUS_SUCCESS);
1234 }
1235
1236 NTSTATUS
1237 NTAPI
1238 CcTryToInitializeFileCache(PFILE_OBJECT FileObject)
1239 {
1240 PBCB Bcb;
1241 NTSTATUS Status;
1242
1243 KeAcquireGuardedMutex(&ViewLock);
1244
1245 ASSERT(FileObject->SectionObjectPointer);
1246 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1247 if (Bcb == NULL)
1248 {
1249 Status = STATUS_UNSUCCESSFUL;
1250 }
1251 else
1252 {
1253 if (FileObject->PrivateCacheMap == NULL)
1254 {
1255 FileObject->PrivateCacheMap = Bcb;
1256 Bcb->RefCount++;
1257 }
1258 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1259 {
1260 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1261 Bcb->BcbRemoveListEntry.Flink = NULL;
1262 }
1263 Status = STATUS_SUCCESS;
1264 }
1265 KeReleaseGuardedMutex(&ViewLock);
1266
1267 return Status;
1268 }
1269
1270
1271 NTSTATUS NTAPI
1272 CcRosInitializeFileCache(PFILE_OBJECT FileObject,
1273 ULONG CacheSegmentSize,
1274 PCACHE_MANAGER_CALLBACKS CallBacks,
1275 PVOID LazyWriterContext)
1276 /*
1277 * FUNCTION: Initializes a BCB for a file object
1278 */
1279 {
1280 PBCB Bcb;
1281
1282 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1283 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, Bcb 0x%p, CacheSegmentSize %d)\n",
1284 FileObject, Bcb, CacheSegmentSize);
1285
1286 KeAcquireGuardedMutex(&ViewLock);
1287 if (Bcb == NULL)
1288 {
1289 Bcb = ExAllocateFromNPagedLookasideList(&BcbLookasideList);
1290 if (Bcb == NULL)
1291 {
1292 KeReleaseGuardedMutex(&ViewLock);
1293 return(STATUS_UNSUCCESSFUL);
1294 }
1295 memset(Bcb, 0, sizeof(BCB));
1296 ObReferenceObjectByPointer(FileObject,
1297 FILE_ALL_ACCESS,
1298 NULL,
1299 KernelMode);
1300 Bcb->FileObject = FileObject;
1301 Bcb->CacheSegmentSize = CacheSegmentSize;
1302 Bcb->Callbacks = CallBacks;
1303 Bcb->LazyWriteContext = LazyWriterContext;
1304 if (FileObject->FsContext)
1305 {
1306 Bcb->AllocationSize =
1307 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->AllocationSize;
1308 Bcb->FileSize =
1309 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->FileSize;
1310 }
1311 KeInitializeSpinLock(&Bcb->BcbLock);
1312 InitializeListHead(&Bcb->BcbSegmentListHead);
1313 FileObject->SectionObjectPointer->SharedCacheMap = Bcb;
1314 }
1315 if (FileObject->PrivateCacheMap == NULL)
1316 {
1317 FileObject->PrivateCacheMap = Bcb;
1318 Bcb->RefCount++;
1319 }
1320 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1321 {
1322 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1323 Bcb->BcbRemoveListEntry.Flink = NULL;
1324 }
1325 KeReleaseGuardedMutex(&ViewLock);
1326
1327 return(STATUS_SUCCESS);
1328 }
1329
1330 /*
1331 * @implemented
1332 */
1333 PFILE_OBJECT NTAPI
1334 CcGetFileObjectFromSectionPtrs(IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1335 {
1336 PBCB Bcb;
1337 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1338 {
1339 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
1340 ASSERT(Bcb);
1341 return Bcb->FileObject;
1342 }
1343 return NULL;
1344 }
1345
1346 VOID
1347 INIT_FUNCTION
1348 NTAPI
1349 CcInitView(VOID)
1350 {
1351 #ifdef CACHE_BITMAP
1352 PMEMORY_AREA marea;
1353 PVOID Buffer;
1354 PHYSICAL_ADDRESS BoundaryAddressMultiple;
1355 #endif
1356
1357 DPRINT("CcInitView()\n");
1358 #ifdef CACHE_BITMAP
1359 BoundaryAddressMultiple.QuadPart = 0;
1360 CiCacheSegMappingRegionHint = 0;
1361 CiCacheSegMappingRegionBase = NULL;
1362
1363 MmLockAddressSpace(MmGetKernelAddressSpace());
1364
1365 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
1366 MEMORY_AREA_CACHE_SEGMENT,
1367 &CiCacheSegMappingRegionBase,
1368 CI_CACHESEG_MAPPING_REGION_SIZE,
1369 PAGE_READWRITE,
1370 &marea,
1371 FALSE,
1372 0,
1373 BoundaryAddressMultiple);
1374 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1375 if (!NT_SUCCESS(Status))
1376 {
1377 KeBugCheck(CACHE_MANAGER);
1378 }
1379
1380 Buffer = ExAllocatePool(NonPagedPool, CI_CACHESEG_MAPPING_REGION_SIZE / (PAGE_SIZE * 8));
1381 if (!Buffer)
1382 {
1383 KeBugCheck(CACHE_MANAGER);
1384 }
1385
1386 RtlInitializeBitMap(&CiCacheSegMappingRegionAllocMap, Buffer, CI_CACHESEG_MAPPING_REGION_SIZE / PAGE_SIZE);
1387 RtlClearAllBits(&CiCacheSegMappingRegionAllocMap);
1388
1389 KeInitializeSpinLock(&CiCacheSegMappingRegionLock);
1390 #endif
1391 InitializeListHead(&CacheSegmentListHead);
1392 InitializeListHead(&DirtySegmentListHead);
1393 InitializeListHead(&CacheSegmentLRUListHead);
1394 InitializeListHead(&ClosedListHead);
1395 KeInitializeGuardedMutex(&ViewLock);
1396 ExInitializeNPagedLookasideList (&iBcbLookasideList,
1397 NULL,
1398 NULL,
1399 0,
1400 sizeof(INTERNAL_BCB),
1401 TAG_IBCB,
1402 20);
1403 ExInitializeNPagedLookasideList (&BcbLookasideList,
1404 NULL,
1405 NULL,
1406 0,
1407 sizeof(BCB),
1408 TAG_BCB,
1409 20);
1410 ExInitializeNPagedLookasideList (&CacheSegLookasideList,
1411 NULL,
1412 NULL,
1413 0,
1414 sizeof(CACHE_SEGMENT),
1415 TAG_CSEG,
1416 20);
1417
1418 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1419
1420 CcInitCacheZeroPage();
1421
1422 }
1423
1424 /* EOF */
1425
1426
1427
1428
1429
1430
1431
1432