The real, definitive, Visual C++ support branch. Accept no substitutes
[reactos.git] / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 */
9
10 /* NOTES **********************************************************************
11 *
12 * This is not the NT implementation of a file cache nor anything much like
13 * it.
14 *
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
17 *
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
20 *
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
23 *
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
27 *
28 * (4) Copy the data into or out of the page as necessary.
29 *
30 * (5) Release the cache page
31 */
32 /* INCLUDES ******************************************************************/
33
34 #include <ntoskrnl.h>
35 #define NDEBUG
36 #include <debug.h>
37
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
40 #endif
41
42 /* GLOBALS *******************************************************************/
43
44 /*
45 * If CACHE_BITMAP is defined, the cache manager uses one large memory region
46 * within the kernel address space and allocate/deallocate space from this block
47 * over a bitmap. If CACHE_BITMAP is used, the size of the mdl mapping region
48 * must be reduced (ntoskrnl\mm\mdl.c, MI_MDLMAPPING_REGION_SIZE).
49 */
50 //#define CACHE_BITMAP
51
52 static LIST_ENTRY DirtySegmentListHead;
53 static LIST_ENTRY CacheSegmentListHead;
54 static LIST_ENTRY CacheSegmentLRUListHead;
55 static LIST_ENTRY ClosedListHead;
56 ULONG DirtyPageCount=0;
57
58 KGUARDED_MUTEX ViewLock;
59
60 #ifdef CACHE_BITMAP
61 #define CI_CACHESEG_MAPPING_REGION_SIZE (128*1024*1024)
62
63 static PVOID CiCacheSegMappingRegionBase = NULL;
64 static RTL_BITMAP CiCacheSegMappingRegionAllocMap;
65 static ULONG CiCacheSegMappingRegionHint;
66 static KSPIN_LOCK CiCacheSegMappingRegionLock;
67 #endif
68
69 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
70 static NPAGED_LOOKASIDE_LIST BcbLookasideList;
71 static NPAGED_LOOKASIDE_LIST CacheSegLookasideList;
72
73 static ULONG CcTimeStamp;
74 static KEVENT LazyCloseThreadEvent;
75 static HANDLE LazyCloseThreadHandle;
76 static CLIENT_ID LazyCloseThreadId;
77 static volatile BOOLEAN LazyCloseThreadShouldTerminate;
78
79 #if defined(__GNUC__)
80 /* void * alloca(size_t size); */
81 #elif defined(_MSC_VER)
82 void* _alloca(size_t size);
83 #else
84 #error Unknown compiler for alloca intrinsic stack allocation "function"
85 #endif
86
87 #if defined(DBG) || defined(KDBG)
88 static void CcRosCacheSegmentIncRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
89 {
90 ++cs->ReferenceCount;
91 if ( cs->Bcb->Trace )
92 {
93 DbgPrint("(%s:%i) CacheSegment %p ++RefCount=%d, Dirty %d, PageOut %d\n",
94 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
95 }
96 }
97 static void CcRosCacheSegmentDecRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
98 {
99 --cs->ReferenceCount;
100 if ( cs->Bcb->Trace )
101 {
102 DbgPrint("(%s:%i) CacheSegment %p --RefCount=%d, Dirty %d, PageOut %d\n",
103 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
104 }
105 }
106 #define CcRosCacheSegmentIncRefCount(cs) CcRosCacheSegmentIncRefCount_(cs,__FILE__,__LINE__)
107 #define CcRosCacheSegmentDecRefCount(cs) CcRosCacheSegmentDecRefCount_(cs,__FILE__,__LINE__)
108 #else
109 #define CcRosCacheSegmentIncRefCount(cs) (++((cs)->ReferenceCount))
110 #define CcRosCacheSegmentDecRefCount(cs) (--((cs)->ReferenceCount))
111 #endif
112
113 NTSTATUS
114 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg);
115
116
117 /* FUNCTIONS *****************************************************************/
118
119 VOID
120 STDCALL
121 CcRosTraceCacheMap (
122 PBCB Bcb,
123 BOOLEAN Trace )
124 {
125 #if defined(DBG) || defined(KDBG)
126 KIRQL oldirql;
127 PLIST_ENTRY current_entry;
128 PCACHE_SEGMENT current;
129
130 if ( !Bcb )
131 return;
132
133 Bcb->Trace = Trace;
134
135 if ( Trace )
136 {
137 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", Bcb );
138
139 KeAcquireGuardedMutex(&ViewLock);
140 KeAcquireSpinLock(&Bcb->BcbLock, &oldirql);
141
142 current_entry = Bcb->BcbSegmentListHead.Flink;
143 while (current_entry != &Bcb->BcbSegmentListHead)
144 {
145 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
146 current_entry = current_entry->Flink;
147
148 DPRINT1(" CacheSegment 0x%p enabled, RefCount %d, Dirty %d, PageOut %d\n",
149 current, current->ReferenceCount, current->Dirty, current->PageOut );
150 }
151 KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
152 KeReleaseGuardedMutex(&ViewLock);
153 }
154 else
155 {
156 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", Bcb );
157 }
158
159 #else
160 Bcb = Bcb;
161 Trace = Trace;
162 #endif
163 }
164
165 NTSTATUS
166 NTAPI
167 CcRosFlushCacheSegment(PCACHE_SEGMENT CacheSegment)
168 {
169 NTSTATUS Status;
170 KIRQL oldIrql;
171
172 Status = WriteCacheSegment(CacheSegment);
173 if (NT_SUCCESS(Status))
174 {
175 KeAcquireGuardedMutex(&ViewLock);
176 KeAcquireSpinLock(&CacheSegment->Bcb->BcbLock, &oldIrql);
177
178 CacheSegment->Dirty = FALSE;
179 RemoveEntryList(&CacheSegment->DirtySegmentListEntry);
180 DirtyPageCount -= CacheSegment->Bcb->CacheSegmentSize / PAGE_SIZE;
181 CcRosCacheSegmentDecRefCount ( CacheSegment );
182
183 KeReleaseSpinLock(&CacheSegment->Bcb->BcbLock, oldIrql);
184 KeReleaseGuardedMutex(&ViewLock);
185 }
186
187 return(Status);
188 }
189
190 NTSTATUS
191 NTAPI
192 CcRosFlushDirtyPages(ULONG Target, PULONG Count)
193 {
194 PLIST_ENTRY current_entry;
195 PCACHE_SEGMENT current;
196 ULONG PagesPerSegment;
197 BOOLEAN Locked;
198 NTSTATUS Status;
199 static ULONG WriteCount[4] = {0, 0, 0, 0};
200 ULONG NewTarget;
201
202 DPRINT("CcRosFlushDirtyPages(Target %d)\n", Target);
203
204 (*Count) = 0;
205
206 KeAcquireGuardedMutex(&ViewLock);
207
208 WriteCount[0] = WriteCount[1];
209 WriteCount[1] = WriteCount[2];
210 WriteCount[2] = WriteCount[3];
211 WriteCount[3] = 0;
212
213 NewTarget = WriteCount[0] + WriteCount[1] + WriteCount[2];
214
215 if (NewTarget < DirtyPageCount)
216 {
217 NewTarget = (DirtyPageCount - NewTarget + 3) / 4;
218 WriteCount[0] += NewTarget;
219 WriteCount[1] += NewTarget;
220 WriteCount[2] += NewTarget;
221 WriteCount[3] += NewTarget;
222 }
223
224 NewTarget = WriteCount[0];
225
226 Target = max(NewTarget, Target);
227
228 current_entry = DirtySegmentListHead.Flink;
229 if (current_entry == &DirtySegmentListHead)
230 {
231 DPRINT("No Dirty pages\n");
232 }
233
234 while (current_entry != &DirtySegmentListHead && Target > 0)
235 {
236 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
237 DirtySegmentListEntry);
238 current_entry = current_entry->Flink;
239
240 Locked = current->Bcb->Callbacks->AcquireForLazyWrite(
241 current->Bcb->LazyWriteContext, FALSE);
242 if (!Locked)
243 {
244 continue;
245 }
246
247 Locked = ExTryToAcquirePushLockExclusive(&current->Lock);
248 if (!Locked)
249 {
250 current->Bcb->Callbacks->ReleaseFromLazyWrite(
251 current->Bcb->LazyWriteContext);
252
253 continue;
254 }
255
256 ASSERT(current->Dirty);
257 if (current->ReferenceCount > 1)
258 {
259 ExReleasePushLock(&current->Lock);
260 current->Bcb->Callbacks->ReleaseFromLazyWrite(
261 current->Bcb->LazyWriteContext);
262 continue;
263 }
264
265 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
266
267 KeReleaseGuardedMutex(&ViewLock);
268
269 Status = CcRosFlushCacheSegment(current);
270
271 ExReleasePushLock(&current->Lock);
272 current->Bcb->Callbacks->ReleaseFromLazyWrite(
273 current->Bcb->LazyWriteContext);
274
275 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
276 {
277 DPRINT1("CC: Failed to flush cache segment.\n");
278 }
279 else
280 {
281 (*Count) += PagesPerSegment;
282 Target -= PagesPerSegment;
283 }
284
285 KeAcquireGuardedMutex(&ViewLock);
286 current_entry = DirtySegmentListHead.Flink;
287 }
288
289 if (*Count < NewTarget)
290 {
291 WriteCount[1] += (NewTarget - *Count);
292 }
293
294 KeReleaseGuardedMutex(&ViewLock);
295
296 DPRINT("CcRosFlushDirtyPages() finished\n");
297 return(STATUS_SUCCESS);
298 }
299
300 NTSTATUS
301 CcRosTrimCache(ULONG Target, ULONG Priority, PULONG NrFreed)
302 /*
303 * FUNCTION: Try to free some memory from the file cache.
304 * ARGUMENTS:
305 * Target - The number of pages to be freed.
306 * Priority - The priority of free (currently unused).
307 * NrFreed - Points to a variable where the number of pages
308 * actually freed is returned.
309 */
310 {
311 PLIST_ENTRY current_entry;
312 PCACHE_SEGMENT current, last = NULL;
313 ULONG PagesPerSegment;
314 ULONG PagesFreed;
315 KIRQL oldIrql;
316 LIST_ENTRY FreeList;
317
318 DPRINT("CcRosTrimCache(Target %d)\n", Target);
319
320 *NrFreed = 0;
321
322 InitializeListHead(&FreeList);
323
324 KeAcquireGuardedMutex(&ViewLock);
325 current_entry = CacheSegmentLRUListHead.Flink;
326 while (current_entry != &CacheSegmentLRUListHead && Target > 0)
327 {
328 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
329 CacheSegmentLRUListEntry);
330 current_entry = current_entry->Flink;
331
332 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
333 if (current->ReferenceCount == 0)
334 {
335 RemoveEntryList(&current->BcbSegmentListEntry);
336 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
337 RemoveEntryList(&current->CacheSegmentListEntry);
338 RemoveEntryList(&current->CacheSegmentLRUListEntry);
339 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
340 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
341 PagesFreed = min(PagesPerSegment, Target);
342 Target -= PagesFreed;
343 (*NrFreed) += PagesFreed;
344 }
345 else
346 {
347 if (last != current && current->MappedCount > 0 && !current->Dirty && !current->PageOut)
348 {
349 ULONG i;
350 NTSTATUS Status;
351
352 CcRosCacheSegmentIncRefCount(current);
353 last = current;
354 current->PageOut = TRUE;
355 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
356 KeReleaseGuardedMutex(&ViewLock);
357 for (i = 0; i < current->Bcb->CacheSegmentSize / PAGE_SIZE; i++)
358 {
359 PFN_TYPE Page;
360 Page = (PFN_TYPE)(MmGetPhysicalAddress((char*)current->BaseAddress + i * PAGE_SIZE).QuadPart >> PAGE_SHIFT);
361 Status = MmPageOutPhysicalAddress(Page);
362 if (!NT_SUCCESS(Status))
363 {
364 break;
365 }
366 }
367 KeAcquireGuardedMutex(&ViewLock);
368 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
369 CcRosCacheSegmentDecRefCount(current);
370 current->PageOut = FALSE;
371 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
372 current_entry = &current->CacheSegmentLRUListEntry;
373 continue;
374 }
375 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
376 }
377 }
378 KeReleaseGuardedMutex(&ViewLock);
379
380 while (!IsListEmpty(&FreeList))
381 {
382 current_entry = RemoveHeadList(&FreeList);
383 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
384 BcbSegmentListEntry);
385 CcRosInternalFreeCacheSegment(current);
386 }
387
388 DPRINT("CcRosTrimCache() finished\n");
389 return(STATUS_SUCCESS);
390 }
391
392 NTSTATUS
393 NTAPI
394 CcRosReleaseCacheSegment(PBCB Bcb,
395 PCACHE_SEGMENT CacheSeg,
396 BOOLEAN Valid,
397 BOOLEAN Dirty,
398 BOOLEAN Mapped)
399 {
400 BOOLEAN WasDirty = CacheSeg->Dirty;
401 KIRQL oldIrql;
402
403 ASSERT(Bcb);
404
405 DPRINT("CcReleaseCacheSegment(Bcb 0x%p, CacheSeg 0x%p, Valid %d)\n",
406 Bcb, CacheSeg, Valid);
407
408 CacheSeg->Valid = Valid;
409 CacheSeg->Dirty = CacheSeg->Dirty || Dirty;
410
411 KeAcquireGuardedMutex(&ViewLock);
412 if (!WasDirty && CacheSeg->Dirty)
413 {
414 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
415 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
416 }
417 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
418 InsertTailList(&CacheSegmentLRUListHead, &CacheSeg->CacheSegmentLRUListEntry);
419
420 if (Mapped)
421 {
422 CacheSeg->MappedCount++;
423 }
424 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
425 CcRosCacheSegmentDecRefCount(CacheSeg);
426 if (Mapped && CacheSeg->MappedCount == 1)
427 {
428 CcRosCacheSegmentIncRefCount(CacheSeg);
429 }
430 if (!WasDirty && CacheSeg->Dirty)
431 {
432 CcRosCacheSegmentIncRefCount(CacheSeg);
433 }
434 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
435 KeReleaseGuardedMutex(&ViewLock);
436 ExReleasePushLock(&CacheSeg->Lock);
437
438 return(STATUS_SUCCESS);
439 }
440
441 /* Returns with Cache Segment Lock Held! */
442 PCACHE_SEGMENT
443 NTAPI
444 CcRosLookupCacheSegment(PBCB Bcb, ULONG FileOffset)
445 {
446 PLIST_ENTRY current_entry;
447 PCACHE_SEGMENT current;
448 KIRQL oldIrql;
449
450 ASSERT(Bcb);
451
452 DPRINT("CcRosLookupCacheSegment(Bcb -x%p, FileOffset %d)\n", Bcb, FileOffset);
453
454 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
455 current_entry = Bcb->BcbSegmentListHead.Flink;
456 while (current_entry != &Bcb->BcbSegmentListHead)
457 {
458 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
459 BcbSegmentListEntry);
460 if (current->FileOffset <= FileOffset &&
461 (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
462 {
463 CcRosCacheSegmentIncRefCount(current);
464 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
465 ExAcquirePushLockExclusive(&current->Lock);
466 return(current);
467 }
468 current_entry = current_entry->Flink;
469 }
470 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
471 return(NULL);
472 }
473
474 NTSTATUS
475 NTAPI
476 CcRosMarkDirtyCacheSegment(PBCB Bcb, ULONG FileOffset)
477 {
478 PCACHE_SEGMENT CacheSeg;
479 KIRQL oldIrql;
480
481 ASSERT(Bcb);
482
483 DPRINT("CcRosMarkDirtyCacheSegment(Bcb 0x%p, FileOffset %d)\n", Bcb, FileOffset);
484
485 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
486 if (CacheSeg == NULL)
487 {
488 KeBugCheck(CACHE_MANAGER);
489 }
490 if (!CacheSeg->Dirty)
491 {
492 KeAcquireGuardedMutex(&ViewLock);
493 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
494 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
495 KeReleaseGuardedMutex(&ViewLock);
496 }
497 else
498 {
499 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
500 CcRosCacheSegmentDecRefCount(CacheSeg);
501 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
502 }
503
504
505 CacheSeg->Dirty = TRUE;
506 ExReleasePushLock(&CacheSeg->Lock);
507
508 return(STATUS_SUCCESS);
509 }
510
511 NTSTATUS
512 NTAPI
513 CcRosUnmapCacheSegment(PBCB Bcb, ULONG FileOffset, BOOLEAN NowDirty)
514 {
515 PCACHE_SEGMENT CacheSeg;
516 BOOLEAN WasDirty;
517 KIRQL oldIrql;
518
519 ASSERT(Bcb);
520
521 DPRINT("CcRosUnmapCacheSegment(Bcb 0x%p, FileOffset %d, NowDirty %d)\n",
522 Bcb, FileOffset, NowDirty);
523
524 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
525 if (CacheSeg == NULL)
526 {
527 return(STATUS_UNSUCCESSFUL);
528 }
529
530 WasDirty = CacheSeg->Dirty;
531 CacheSeg->Dirty = CacheSeg->Dirty || NowDirty;
532
533 CacheSeg->MappedCount--;
534
535 if (!WasDirty && NowDirty)
536 {
537 KeAcquireGuardedMutex(&ViewLock);
538 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
539 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
540 KeReleaseGuardedMutex(&ViewLock);
541 }
542
543 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
544 CcRosCacheSegmentDecRefCount(CacheSeg);
545 if (!WasDirty && NowDirty)
546 {
547 CcRosCacheSegmentIncRefCount(CacheSeg);
548 }
549 if (CacheSeg->MappedCount == 0)
550 {
551 CcRosCacheSegmentDecRefCount(CacheSeg);
552 }
553 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
554
555 ExReleasePushLock(&CacheSeg->Lock);
556 return(STATUS_SUCCESS);
557 }
558
559 NTSTATUS static
560 CcRosCreateCacheSegment(PBCB Bcb,
561 ULONG FileOffset,
562 PCACHE_SEGMENT* CacheSeg)
563 {
564 PCACHE_SEGMENT current;
565 PCACHE_SEGMENT previous;
566 PLIST_ENTRY current_entry;
567 NTSTATUS Status;
568 KIRQL oldIrql;
569 #ifdef CACHE_BITMAP
570 ULONG StartingOffset;
571 #endif
572 PHYSICAL_ADDRESS BoundaryAddressMultiple;
573
574 ASSERT(Bcb);
575
576 DPRINT("CcRosCreateCacheSegment()\n");
577
578 BoundaryAddressMultiple.QuadPart = 0;
579 if (FileOffset >= Bcb->FileSize.u.LowPart)
580 {
581 CacheSeg = NULL;
582 return STATUS_INVALID_PARAMETER;
583 }
584
585 current = ExAllocateFromNPagedLookasideList(&CacheSegLookasideList);
586 current->Valid = FALSE;
587 current->Dirty = FALSE;
588 current->PageOut = FALSE;
589 current->FileOffset = ROUND_DOWN(FileOffset, Bcb->CacheSegmentSize);
590 current->Bcb = Bcb;
591 #if defined(DBG) || defined(KDBG)
592 if ( Bcb->Trace )
593 {
594 DPRINT1("CacheMap 0x%p: new Cache Segment: 0x%p\n", Bcb, current );
595 }
596 #endif
597 current->MappedCount = 0;
598 current->DirtySegmentListEntry.Flink = NULL;
599 current->DirtySegmentListEntry.Blink = NULL;
600 current->ReferenceCount = 1;
601 ExInitializePushLock((PULONG_PTR)&current->Lock);
602 ExAcquirePushLockExclusive(&current->Lock);
603 KeAcquireGuardedMutex(&ViewLock);
604
605 *CacheSeg = current;
606 /* There is window between the call to CcRosLookupCacheSegment
607 * and CcRosCreateCacheSegment. We must check if a segment on
608 * the fileoffset exist. If there exist a segment, we release
609 * our new created segment and return the existing one.
610 */
611 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
612 current_entry = Bcb->BcbSegmentListHead.Flink;
613 previous = NULL;
614 while (current_entry != &Bcb->BcbSegmentListHead)
615 {
616 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
617 BcbSegmentListEntry);
618 if (current->FileOffset <= FileOffset &&
619 (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
620 {
621 CcRosCacheSegmentIncRefCount(current);
622 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
623 #if defined(DBG) || defined(KDBG)
624 if ( Bcb->Trace )
625 {
626 DPRINT1("CacheMap 0x%p: deleting newly created Cache Segment 0x%p ( found existing one 0x%p )\n",
627 Bcb,
628 (*CacheSeg),
629 current );
630 }
631 #endif
632 ExReleasePushLock(&(*CacheSeg)->Lock);
633 KeReleaseGuardedMutex(&ViewLock);
634 ExFreeToNPagedLookasideList(&CacheSegLookasideList, *CacheSeg);
635 *CacheSeg = current;
636 ExAcquirePushLockExclusive(&current->Lock);
637 return STATUS_SUCCESS;
638 }
639 if (current->FileOffset < FileOffset)
640 {
641 if (previous == NULL)
642 {
643 previous = current;
644 }
645 else
646 {
647 if (previous->FileOffset < current->FileOffset)
648 {
649 previous = current;
650 }
651 }
652 }
653 current_entry = current_entry->Flink;
654 }
655 /* There was no existing segment. */
656 current = *CacheSeg;
657 if (previous)
658 {
659 InsertHeadList(&previous->BcbSegmentListEntry, &current->BcbSegmentListEntry);
660 }
661 else
662 {
663 InsertHeadList(&Bcb->BcbSegmentListHead, &current->BcbSegmentListEntry);
664 }
665 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
666 InsertTailList(&CacheSegmentListHead, &current->CacheSegmentListEntry);
667 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
668 KeReleaseGuardedMutex(&ViewLock);
669 #ifdef CACHE_BITMAP
670 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
671
672 StartingOffset = RtlFindClearBitsAndSet(&CiCacheSegMappingRegionAllocMap, Bcb->CacheSegmentSize / PAGE_SIZE, CiCacheSegMappingRegionHint);
673
674 if (StartingOffset == 0xffffffff)
675 {
676 DPRINT1("Out of CacheSeg mapping space\n");
677 KeBugCheck(CACHE_MANAGER);
678 }
679
680 current->BaseAddress = CiCacheSegMappingRegionBase + StartingOffset * PAGE_SIZE;
681
682 if (CiCacheSegMappingRegionHint == StartingOffset)
683 {
684 CiCacheSegMappingRegionHint += Bcb->CacheSegmentSize / PAGE_SIZE;
685 }
686
687 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
688 #else
689 MmLockAddressSpace(MmGetKernelAddressSpace());
690 current->BaseAddress = NULL;
691 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
692 MEMORY_AREA_CACHE_SEGMENT,
693 &current->BaseAddress,
694 Bcb->CacheSegmentSize,
695 PAGE_READWRITE,
696 (PMEMORY_AREA*)&current->MemoryArea,
697 FALSE,
698 0,
699 BoundaryAddressMultiple);
700 MmUnlockAddressSpace(MmGetKernelAddressSpace());
701 if (!NT_SUCCESS(Status))
702 {
703 KeBugCheck(CACHE_MANAGER);
704 }
705 #endif
706
707 /* Create a virtual mapping for this memory area */
708 MmMapMemoryArea(current->BaseAddress, Bcb->CacheSegmentSize,
709 MC_CACHE, PAGE_READWRITE);
710
711 return(STATUS_SUCCESS);
712 }
713
714 NTSTATUS
715 NTAPI
716 CcRosGetCacheSegmentChain(PBCB Bcb,
717 ULONG FileOffset,
718 ULONG Length,
719 PCACHE_SEGMENT* CacheSeg)
720 {
721 PCACHE_SEGMENT current;
722 ULONG i;
723 PCACHE_SEGMENT* CacheSegList;
724 PCACHE_SEGMENT Previous = NULL;
725
726 ASSERT(Bcb);
727
728 DPRINT("CcRosGetCacheSegmentChain()\n");
729
730 Length = ROUND_UP(Length, Bcb->CacheSegmentSize);
731
732 #if defined(__GNUC__)
733 CacheSegList = alloca(sizeof(PCACHE_SEGMENT) *
734 (Length / Bcb->CacheSegmentSize));
735 #elif defined(_MSC_VER)
736 CacheSegList = _alloca(sizeof(PCACHE_SEGMENT) *
737 (Length / Bcb->CacheSegmentSize));
738 #else
739 #error Unknown compiler for alloca intrinsic stack allocation "function"
740 #endif
741
742 /*
743 * Look for a cache segment already mapping the same data.
744 */
745 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
746 {
747 ULONG CurrentOffset = FileOffset + (i * Bcb->CacheSegmentSize);
748 current = CcRosLookupCacheSegment(Bcb, CurrentOffset);
749 if (current != NULL)
750 {
751 CacheSegList[i] = current;
752 }
753 else
754 {
755 CcRosCreateCacheSegment(Bcb, CurrentOffset, &current);
756 CacheSegList[i] = current;
757 }
758 }
759
760 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
761 {
762 if (i == 0)
763 {
764 *CacheSeg = CacheSegList[i];
765 Previous = CacheSegList[i];
766 }
767 else
768 {
769 Previous->NextInChain = CacheSegList[i];
770 Previous = CacheSegList[i];
771 }
772 }
773 ASSERT(Previous);
774 Previous->NextInChain = NULL;
775
776 return(STATUS_SUCCESS);
777 }
778
779 NTSTATUS
780 NTAPI
781 CcRosGetCacheSegment(PBCB Bcb,
782 ULONG FileOffset,
783 PULONG BaseOffset,
784 PVOID* BaseAddress,
785 PBOOLEAN UptoDate,
786 PCACHE_SEGMENT* CacheSeg)
787 {
788 PCACHE_SEGMENT current;
789 NTSTATUS Status;
790
791 ASSERT(Bcb);
792
793 DPRINT("CcRosGetCacheSegment()\n");
794
795 /*
796 * Look for a cache segment already mapping the same data.
797 */
798 current = CcRosLookupCacheSegment(Bcb, FileOffset);
799 if (current == NULL)
800 {
801 /*
802 * Otherwise create a new segment.
803 */
804 Status = CcRosCreateCacheSegment(Bcb, FileOffset, &current);
805 if (!NT_SUCCESS(Status))
806 {
807 return Status;
808 }
809 }
810 /*
811 * Return information about the segment to the caller.
812 */
813 *UptoDate = current->Valid;
814 *BaseAddress = current->BaseAddress;
815 DPRINT("*BaseAddress 0x%.8X\n", *BaseAddress);
816 *CacheSeg = current;
817 *BaseOffset = current->FileOffset;
818 return(STATUS_SUCCESS);
819 }
820
821 NTSTATUS STDCALL
822 CcRosRequestCacheSegment(PBCB Bcb,
823 ULONG FileOffset,
824 PVOID* BaseAddress,
825 PBOOLEAN UptoDate,
826 PCACHE_SEGMENT* CacheSeg)
827 /*
828 * FUNCTION: Request a page mapping for a BCB
829 */
830 {
831 ULONG BaseOffset;
832
833 ASSERT(Bcb);
834
835 if ((FileOffset % Bcb->CacheSegmentSize) != 0)
836 {
837 DPRINT1("Bad fileoffset %x should be multiple of %x",
838 FileOffset, Bcb->CacheSegmentSize);
839 KeBugCheck(CACHE_MANAGER);
840 }
841
842 return(CcRosGetCacheSegment(Bcb,
843 FileOffset,
844 &BaseOffset,
845 BaseAddress,
846 UptoDate,
847 CacheSeg));
848 }
849 #ifdef CACHE_BITMAP
850 #else
851 static VOID
852 CcFreeCachePage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address,
853 PFN_TYPE Page, SWAPENTRY SwapEntry, BOOLEAN Dirty)
854 {
855 ASSERT(SwapEntry == 0);
856 if (Page != 0)
857 {
858 MmReleasePageMemoryConsumer(MC_CACHE, Page);
859 }
860 }
861 #endif
862 NTSTATUS
863 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg)
864 /*
865 * FUNCTION: Releases a cache segment associated with a BCB
866 */
867 {
868 #ifdef CACHE_BITMAP
869 ULONG i;
870 ULONG RegionSize;
871 ULONG Base;
872 PFN_TYPE Page;
873 KIRQL oldIrql;
874 #endif
875 DPRINT("Freeing cache segment 0x%p\n", CacheSeg);
876 #if defined(DBG) || defined(KDBG)
877 if ( CacheSeg->Bcb->Trace )
878 {
879 DPRINT1("CacheMap 0x%p: deleting Cache Segment: 0x%p\n", CacheSeg->Bcb, CacheSeg );
880 }
881 #endif
882 #ifdef CACHE_BITMAP
883 RegionSize = CacheSeg->Bcb->CacheSegmentSize / PAGE_SIZE;
884
885 /* Unmap all the pages. */
886 for (i = 0; i < RegionSize; i++)
887 {
888 MmDeleteVirtualMapping(NULL,
889 CacheSeg->BaseAddress + (i * PAGE_SIZE),
890 FALSE,
891 NULL,
892 &Page);
893 MmReleasePageMemoryConsumer(MC_CACHE, Page);
894 }
895
896 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
897 /* Deallocate all the pages used. */
898 Base = (ULONG)(CacheSeg->BaseAddress - CiCacheSegMappingRegionBase) / PAGE_SIZE;
899
900 RtlClearBits(&CiCacheSegMappingRegionAllocMap, Base, RegionSize);
901
902 CiCacheSegMappingRegionHint = min (CiCacheSegMappingRegionHint, Base);
903
904 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
905 #else
906 MmLockAddressSpace(MmGetKernelAddressSpace());
907 MmFreeMemoryArea(MmGetKernelAddressSpace(),
908 CacheSeg->MemoryArea,
909 CcFreeCachePage,
910 NULL);
911 MmUnlockAddressSpace(MmGetKernelAddressSpace());
912 #endif
913 ExFreeToNPagedLookasideList(&CacheSegLookasideList, CacheSeg);
914 return(STATUS_SUCCESS);
915 }
916
917 NTSTATUS
918 NTAPI
919 CcRosFreeCacheSegment(PBCB Bcb, PCACHE_SEGMENT CacheSeg)
920 {
921 NTSTATUS Status;
922 KIRQL oldIrql;
923
924 ASSERT(Bcb);
925
926 DPRINT("CcRosFreeCacheSegment(Bcb 0x%p, CacheSeg 0x%p)\n",
927 Bcb, CacheSeg);
928
929 KeAcquireGuardedMutex(&ViewLock);
930 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
931 RemoveEntryList(&CacheSeg->BcbSegmentListEntry);
932 RemoveEntryList(&CacheSeg->CacheSegmentListEntry);
933 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
934 if (CacheSeg->Dirty)
935 {
936 RemoveEntryList(&CacheSeg->DirtySegmentListEntry);
937 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
938
939 }
940 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
941 KeReleaseGuardedMutex(&ViewLock);
942
943 Status = CcRosInternalFreeCacheSegment(CacheSeg);
944 return(Status);
945 }
946
947 /*
948 * @implemented
949 */
950 VOID STDCALL
951 CcFlushCache(IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
952 IN PLARGE_INTEGER FileOffset OPTIONAL,
953 IN ULONG Length,
954 OUT PIO_STATUS_BLOCK IoStatus)
955 {
956 PBCB Bcb;
957 LARGE_INTEGER Offset;
958 PCACHE_SEGMENT current;
959 NTSTATUS Status;
960 KIRQL oldIrql;
961
962 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %d, IoStatus 0x%p)\n",
963 SectionObjectPointers, FileOffset, Length, IoStatus);
964
965 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
966 {
967 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
968 ASSERT(Bcb);
969 if (FileOffset)
970 {
971 Offset = *FileOffset;
972 }
973 else
974 {
975 Offset.QuadPart = (LONGLONG)0;
976 Length = Bcb->FileSize.u.LowPart;
977 }
978
979 if (IoStatus)
980 {
981 IoStatus->Status = STATUS_SUCCESS;
982 IoStatus->Information = 0;
983 }
984
985 while (Length > 0)
986 {
987 current = CcRosLookupCacheSegment (Bcb, Offset.u.LowPart);
988 if (current != NULL)
989 {
990 if (current->Dirty)
991 {
992 Status = CcRosFlushCacheSegment(current);
993 if (!NT_SUCCESS(Status) && IoStatus != NULL)
994 {
995 IoStatus->Status = Status;
996 }
997 }
998 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
999 ExReleasePushLock(&current->Lock);
1000 CcRosCacheSegmentDecRefCount(current);
1001 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1002 }
1003
1004 Offset.QuadPart += Bcb->CacheSegmentSize;
1005 if (Length > Bcb->CacheSegmentSize)
1006 {
1007 Length -= Bcb->CacheSegmentSize;
1008 }
1009 else
1010 {
1011 Length = 0;
1012 }
1013 }
1014 }
1015 else
1016 {
1017 if (IoStatus)
1018 {
1019 IoStatus->Status = STATUS_INVALID_PARAMETER;
1020 }
1021 }
1022 }
1023
1024 NTSTATUS
1025 NTAPI
1026 CcRosDeleteFileCache(PFILE_OBJECT FileObject, PBCB Bcb)
1027 /*
1028 * FUNCTION: Releases the BCB associated with a file object
1029 */
1030 {
1031 PLIST_ENTRY current_entry;
1032 PCACHE_SEGMENT current;
1033 NTSTATUS Status;
1034 LIST_ENTRY FreeList;
1035 KIRQL oldIrql;
1036
1037 ASSERT(Bcb);
1038
1039 Bcb->RefCount++;
1040 KeReleaseGuardedMutex(&ViewLock);
1041
1042 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1043
1044 KeAcquireGuardedMutex(&ViewLock);
1045 Bcb->RefCount--;
1046 if (Bcb->RefCount == 0)
1047 {
1048 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1049 {
1050 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1051 Bcb->BcbRemoveListEntry.Flink = NULL;
1052 }
1053
1054 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1055
1056 /*
1057 * Release all cache segments.
1058 */
1059 InitializeListHead(&FreeList);
1060 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1061 current_entry = Bcb->BcbSegmentListHead.Flink;
1062 while (!IsListEmpty(&Bcb->BcbSegmentListHead))
1063 {
1064 current_entry = RemoveTailList(&Bcb->BcbSegmentListHead);
1065 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1066 RemoveEntryList(&current->CacheSegmentListEntry);
1067 RemoveEntryList(&current->CacheSegmentLRUListEntry);
1068 if (current->Dirty)
1069 {
1070 RemoveEntryList(&current->DirtySegmentListEntry);
1071 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
1072 DPRINT1("Freeing dirty segment\n");
1073 }
1074 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
1075 }
1076 #if defined(DBG) || defined(KDBG)
1077 Bcb->Trace = FALSE;
1078 #endif
1079 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1080
1081 KeReleaseGuardedMutex(&ViewLock);
1082 ObDereferenceObject (Bcb->FileObject);
1083
1084 while (!IsListEmpty(&FreeList))
1085 {
1086 current_entry = RemoveTailList(&FreeList);
1087 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1088 Status = CcRosInternalFreeCacheSegment(current);
1089 }
1090 ExFreeToNPagedLookasideList(&BcbLookasideList, Bcb);
1091 KeAcquireGuardedMutex(&ViewLock);
1092 }
1093 return(STATUS_SUCCESS);
1094 }
1095
1096 VOID
1097 NTAPI
1098 CcRosReferenceCache(PFILE_OBJECT FileObject)
1099 {
1100 PBCB Bcb;
1101 KeAcquireGuardedMutex(&ViewLock);
1102 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1103 ASSERT(Bcb);
1104 if (Bcb->RefCount == 0)
1105 {
1106 ASSERT(Bcb->BcbRemoveListEntry.Flink != NULL);
1107 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1108 Bcb->BcbRemoveListEntry.Flink = NULL;
1109
1110 }
1111 else
1112 {
1113 ASSERT(Bcb->BcbRemoveListEntry.Flink == NULL);
1114 }
1115 Bcb->RefCount++;
1116 KeReleaseGuardedMutex(&ViewLock);
1117 }
1118
1119 VOID
1120 NTAPI
1121 CcRosSetRemoveOnClose(PSECTION_OBJECT_POINTERS SectionObjectPointer)
1122 {
1123 PBCB Bcb;
1124 DPRINT("CcRosSetRemoveOnClose()\n");
1125 KeAcquireGuardedMutex(&ViewLock);
1126 Bcb = (PBCB)SectionObjectPointer->SharedCacheMap;
1127 if (Bcb)
1128 {
1129 Bcb->RemoveOnClose = TRUE;
1130 if (Bcb->RefCount == 0)
1131 {
1132 CcRosDeleteFileCache(Bcb->FileObject, Bcb);
1133 }
1134 }
1135 KeReleaseGuardedMutex(&ViewLock);
1136 }
1137
1138
1139 VOID
1140 NTAPI
1141 CcRosDereferenceCache(PFILE_OBJECT FileObject)
1142 {
1143 PBCB Bcb;
1144 KeAcquireGuardedMutex(&ViewLock);
1145 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1146 ASSERT(Bcb);
1147 if (Bcb->RefCount > 0)
1148 {
1149 Bcb->RefCount--;
1150 if (Bcb->RefCount == 0)
1151 {
1152 MmFreeSectionSegments(Bcb->FileObject);
1153 if (Bcb->RemoveOnClose)
1154 {
1155 CcRosDeleteFileCache(FileObject, Bcb);
1156 }
1157 else
1158 {
1159 Bcb->TimeStamp = CcTimeStamp;
1160 InsertHeadList(&ClosedListHead, &Bcb->BcbRemoveListEntry);
1161 }
1162 }
1163 }
1164 KeReleaseGuardedMutex(&ViewLock);
1165 }
1166
1167 NTSTATUS STDCALL
1168 CcRosReleaseFileCache(PFILE_OBJECT FileObject)
1169 /*
1170 * FUNCTION: Called by the file system when a handle to a file object
1171 * has been closed.
1172 */
1173 {
1174 PBCB Bcb;
1175
1176 KeAcquireGuardedMutex(&ViewLock);
1177
1178 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1179 {
1180 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1181 if (FileObject->PrivateCacheMap != NULL)
1182 {
1183 FileObject->PrivateCacheMap = NULL;
1184 if (Bcb->RefCount > 0)
1185 {
1186 Bcb->RefCount--;
1187 if (Bcb->RefCount == 0)
1188 {
1189 MmFreeSectionSegments(Bcb->FileObject);
1190 if (Bcb->RemoveOnClose)
1191 {
1192 CcRosDeleteFileCache(FileObject, Bcb);
1193 }
1194 else
1195 {
1196 Bcb->TimeStamp = CcTimeStamp;
1197 InsertHeadList(&ClosedListHead, &Bcb->BcbRemoveListEntry);
1198 }
1199 }
1200 }
1201 }
1202 }
1203 KeReleaseGuardedMutex(&ViewLock);
1204 return(STATUS_SUCCESS);
1205 }
1206
1207 NTSTATUS
1208 NTAPI
1209 CcTryToInitializeFileCache(PFILE_OBJECT FileObject)
1210 {
1211 PBCB Bcb;
1212 NTSTATUS Status;
1213
1214 KeAcquireGuardedMutex(&ViewLock);
1215
1216 ASSERT(FileObject->SectionObjectPointer);
1217 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1218 if (Bcb == NULL)
1219 {
1220 Status = STATUS_UNSUCCESSFUL;
1221 }
1222 else
1223 {
1224 if (FileObject->PrivateCacheMap == NULL)
1225 {
1226 FileObject->PrivateCacheMap = Bcb;
1227 Bcb->RefCount++;
1228 }
1229 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1230 {
1231 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1232 Bcb->BcbRemoveListEntry.Flink = NULL;
1233 }
1234 Status = STATUS_SUCCESS;
1235 }
1236 KeReleaseGuardedMutex(&ViewLock);
1237
1238 return Status;
1239 }
1240
1241
1242 NTSTATUS NTAPI
1243 CcRosInitializeFileCache(PFILE_OBJECT FileObject,
1244 ULONG CacheSegmentSize,
1245 PCACHE_MANAGER_CALLBACKS CallBacks,
1246 PVOID LazyWriterContext)
1247 /*
1248 * FUNCTION: Initializes a BCB for a file object
1249 */
1250 {
1251 PBCB Bcb;
1252
1253 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1254 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, Bcb 0x%p, CacheSegmentSize %d)\n",
1255 FileObject, Bcb, CacheSegmentSize);
1256
1257 KeAcquireGuardedMutex(&ViewLock);
1258 if (Bcb == NULL)
1259 {
1260 Bcb = ExAllocateFromNPagedLookasideList(&BcbLookasideList);
1261 if (Bcb == NULL)
1262 {
1263 KeReleaseGuardedMutex(&ViewLock);
1264 return(STATUS_UNSUCCESSFUL);
1265 }
1266 memset(Bcb, 0, sizeof(BCB));
1267 ObReferenceObjectByPointer(FileObject,
1268 FILE_ALL_ACCESS,
1269 NULL,
1270 KernelMode);
1271 Bcb->FileObject = FileObject;
1272 Bcb->CacheSegmentSize = CacheSegmentSize;
1273 Bcb->Callbacks = CallBacks;
1274 Bcb->LazyWriteContext = LazyWriterContext;
1275 if (FileObject->FsContext)
1276 {
1277 Bcb->AllocationSize =
1278 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->AllocationSize;
1279 Bcb->FileSize =
1280 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->FileSize;
1281 }
1282 KeInitializeSpinLock(&Bcb->BcbLock);
1283 InitializeListHead(&Bcb->BcbSegmentListHead);
1284 FileObject->SectionObjectPointer->SharedCacheMap = Bcb;
1285 }
1286 if (FileObject->PrivateCacheMap == NULL)
1287 {
1288 FileObject->PrivateCacheMap = Bcb;
1289 Bcb->RefCount++;
1290 }
1291 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1292 {
1293 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1294 Bcb->BcbRemoveListEntry.Flink = NULL;
1295 }
1296 KeReleaseGuardedMutex(&ViewLock);
1297
1298 return(STATUS_SUCCESS);
1299 }
1300
1301 /*
1302 * @implemented
1303 */
1304 PFILE_OBJECT STDCALL
1305 CcGetFileObjectFromSectionPtrs(IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1306 {
1307 PBCB Bcb;
1308 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1309 {
1310 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
1311 ASSERT(Bcb);
1312 return Bcb->FileObject;
1313 }
1314 return NULL;
1315 }
1316
1317 VOID STDCALL
1318 CmLazyCloseThreadMain(PVOID Ignored)
1319 {
1320 LARGE_INTEGER Timeout;
1321 PLIST_ENTRY current_entry;
1322 PBCB current;
1323 ULONG RemoveTimeStamp;
1324 NTSTATUS Status;
1325
1326 KeQuerySystemTime (&Timeout);
1327
1328 while (1)
1329 {
1330 Timeout.QuadPart += (LONGLONG)100000000; // 10sec
1331 Status = KeWaitForSingleObject(&LazyCloseThreadEvent,
1332 0,
1333 KernelMode,
1334 FALSE,
1335 &Timeout);
1336
1337 DPRINT("LazyCloseThreadMain %d\n", CcTimeStamp);
1338
1339 if (!NT_SUCCESS(Status))
1340 {
1341 DbgPrint("LazyCloseThread: Wait failed\n");
1342 KeBugCheck(CACHE_MANAGER);
1343 break;
1344 }
1345 if (LazyCloseThreadShouldTerminate)
1346 {
1347 DbgPrint("LazyCloseThread: Terminating\n");
1348 break;
1349 }
1350
1351 KeAcquireGuardedMutex(&ViewLock);
1352 CcTimeStamp++;
1353 if (CcTimeStamp >= 30)
1354 {
1355 RemoveTimeStamp = CcTimeStamp - 30; /* 5min = 10sec * 30 */
1356 while (!IsListEmpty(&ClosedListHead))
1357 {
1358 current_entry = ClosedListHead.Blink;
1359 current = CONTAINING_RECORD(current_entry, BCB, BcbRemoveListEntry);
1360 if (current->TimeStamp >= RemoveTimeStamp)
1361 {
1362 break;
1363 }
1364 CcRosDeleteFileCache(current->FileObject, current);
1365 }
1366 }
1367 KeReleaseGuardedMutex(&ViewLock);
1368 }
1369 }
1370
1371 VOID
1372 INIT_FUNCTION
1373 NTAPI
1374 CcInitView(VOID)
1375 {
1376 #ifdef CACHE_BITMAP
1377 PMEMORY_AREA marea;
1378 PVOID Buffer;
1379 PHYSICAL_ADDRESS BoundaryAddressMultiple;
1380 #endif
1381 NTSTATUS Status;
1382 KPRIORITY Priority;
1383
1384 DPRINT("CcInitView()\n");
1385 #ifdef CACHE_BITMAP
1386 BoundaryAddressMultiple.QuadPart = 0;
1387 CiCacheSegMappingRegionHint = 0;
1388 CiCacheSegMappingRegionBase = NULL;
1389
1390 MmLockAddressSpace(MmGetKernelAddressSpace());
1391
1392 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
1393 MEMORY_AREA_CACHE_SEGMENT,
1394 &CiCacheSegMappingRegionBase,
1395 CI_CACHESEG_MAPPING_REGION_SIZE,
1396 PAGE_READWRITE,
1397 &marea,
1398 FALSE,
1399 0,
1400 BoundaryAddressMultiple);
1401 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1402 if (!NT_SUCCESS(Status))
1403 {
1404 KeBugCheck(CACHE_MANAGER);
1405 }
1406
1407 Buffer = ExAllocatePool(NonPagedPool, CI_CACHESEG_MAPPING_REGION_SIZE / (PAGE_SIZE * 8));
1408
1409 RtlInitializeBitMap(&CiCacheSegMappingRegionAllocMap, Buffer, CI_CACHESEG_MAPPING_REGION_SIZE / PAGE_SIZE);
1410 RtlClearAllBits(&CiCacheSegMappingRegionAllocMap);
1411
1412 KeInitializeSpinLock(&CiCacheSegMappingRegionLock);
1413 #endif
1414 InitializeListHead(&CacheSegmentListHead);
1415 InitializeListHead(&DirtySegmentListHead);
1416 InitializeListHead(&CacheSegmentLRUListHead);
1417 InitializeListHead(&ClosedListHead);
1418 KeInitializeGuardedMutex(&ViewLock);
1419 ExInitializeNPagedLookasideList (&iBcbLookasideList,
1420 NULL,
1421 NULL,
1422 0,
1423 sizeof(INTERNAL_BCB),
1424 TAG_IBCB,
1425 20);
1426 ExInitializeNPagedLookasideList (&BcbLookasideList,
1427 NULL,
1428 NULL,
1429 0,
1430 sizeof(BCB),
1431 TAG_BCB,
1432 20);
1433 ExInitializeNPagedLookasideList (&CacheSegLookasideList,
1434 NULL,
1435 NULL,
1436 0,
1437 sizeof(CACHE_SEGMENT),
1438 TAG_CSEG,
1439 20);
1440
1441 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1442
1443 CcInitCacheZeroPage();
1444
1445 CcTimeStamp = 0;
1446 LazyCloseThreadShouldTerminate = FALSE;
1447 KeInitializeEvent (&LazyCloseThreadEvent, SynchronizationEvent, FALSE);
1448 Status = PsCreateSystemThread(&LazyCloseThreadHandle,
1449 THREAD_ALL_ACCESS,
1450 NULL,
1451 NULL,
1452 &LazyCloseThreadId,
1453 (PKSTART_ROUTINE)CmLazyCloseThreadMain,
1454 NULL);
1455 if (NT_SUCCESS(Status))
1456 {
1457 Priority = LOW_REALTIME_PRIORITY;
1458 NtSetInformationThread(LazyCloseThreadHandle,
1459 ThreadPriority,
1460 &Priority,
1461 sizeof(Priority));
1462 }
1463
1464 }
1465
1466 /* EOF */
1467
1468
1469
1470
1471
1472
1473
1474