Fixed CcTryToAcquireBrokenMutex.
[reactos.git] / reactos / ntoskrnl / cc / view.c
1 /* $Id$
2 *
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/cc/view.c
6 * PURPOSE: Cache manager
7 *
8 * PROGRAMMERS: David Welch (welch@mcmail.com)
9 */
10
11 /* NOTES **********************************************************************
12 *
13 * This is not the NT implementation of a file cache nor anything much like
14 * it.
15 *
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
18 *
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
21 *
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
24 *
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 *
29 * (4) Copy the data into or out of the page as necessary.
30 *
31 * (5) Release the cache page
32 */
33 /* INCLUDES ******************************************************************/
34
35 #include <ntoskrnl.h>
36 #define NDEBUG
37 #include <internal/debug.h>
38
39 /* GLOBALS *******************************************************************/
40
41 /*
42 * If CACHE_BITMAP is defined, the cache manager uses one large memory region
43 * within the kernel address space and allocate/deallocate space from this block
44 * over a bitmap. If CACHE_BITMAP is used, the size of the mdl mapping region
45 * must be reduced (ntoskrnl\mm\mdl.c, MI_MDLMAPPING_REGION_SIZE).
46 */
47 //#define CACHE_BITMAP
48
49 static LIST_ENTRY DirtySegmentListHead;
50 static LIST_ENTRY CacheSegmentListHead;
51 static LIST_ENTRY CacheSegmentLRUListHead;
52 static LIST_ENTRY ClosedListHead;
53 ULONG DirtyPageCount=0;
54
55 FAST_MUTEX ViewLock;
56
57 #ifdef CACHE_BITMAP
58 #define CI_CACHESEG_MAPPING_REGION_SIZE (128*1024*1024)
59
60 static PVOID CiCacheSegMappingRegionBase = NULL;
61 static RTL_BITMAP CiCacheSegMappingRegionAllocMap;
62 static ULONG CiCacheSegMappingRegionHint;
63 static KSPIN_LOCK CiCacheSegMappingRegionLock;
64 #endif
65
66 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
67 static NPAGED_LOOKASIDE_LIST BcbLookasideList;
68 static NPAGED_LOOKASIDE_LIST CacheSegLookasideList;
69
70 static ULONG CcTimeStamp;
71 static KEVENT LazyCloseThreadEvent;
72 static HANDLE LazyCloseThreadHandle;
73 static CLIENT_ID LazyCloseThreadId;
74 static volatile BOOLEAN LazyCloseThreadShouldTerminate;
75
76 #if defined(__GNUC__)
77 /* void * alloca(size_t size); */
78 #elif defined(_MSC_VER)
79 void* _alloca(size_t size);
80 #else
81 #error Unknown compiler for alloca intrinsic stack allocation "function"
82 #endif
83
84 #if defined(DBG) || defined(KDBG)
85 static void CcRosCacheSegmentIncRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
86 {
87 ++cs->ReferenceCount;
88 if ( cs->Bcb->Trace )
89 {
90 DbgPrint("(%s:%i) CacheSegment %p ++RefCount=%d, Dirty %d, PageOut %d\n",
91 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
92 }
93 }
94 static void CcRosCacheSegmentDecRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
95 {
96 --cs->ReferenceCount;
97 if ( cs->Bcb->Trace )
98 {
99 DbgPrint("(%s:%i) CacheSegment %p --RefCount=%d, Dirty %d, PageOut %d\n",
100 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
101 }
102 }
103 #define CcRosCacheSegmentIncRefCount(cs) CcRosCacheSegmentIncRefCount_(cs,__FILE__,__LINE__)
104 #define CcRosCacheSegmentDecRefCount(cs) CcRosCacheSegmentDecRefCount_(cs,__FILE__,__LINE__)
105 #else
106 #define CcRosCacheSegmentIncRefCount(cs) (++((cs)->ReferenceCount))
107 #define CcRosCacheSegmentDecRefCount(cs) (--((cs)->ReferenceCount))
108 #endif
109
110 NTSTATUS
111 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg);
112
113 BOOLEAN
114 FASTCALL
115 CcTryToAcquireBrokenMutex(PFAST_MUTEX FastMutex)
116 {
117 KeEnterCriticalRegion();
118 if (InterlockedCompareExchange(&FastMutex->Count, 0, 1) == 1)
119 {
120 FastMutex->Owner = KeGetCurrentThread();
121 return(TRUE);
122 }
123 else
124 {
125 KeLeaveCriticalRegion();
126 return(FALSE);
127 }
128 }
129
130 /* FUNCTIONS *****************************************************************/
131
132 VOID
133 STDCALL
134 CcRosTraceCacheMap (
135 PBCB Bcb,
136 BOOLEAN Trace )
137 {
138 #if defined(DBG) || defined(KDBG)
139 KIRQL oldirql;
140 PLIST_ENTRY current_entry;
141 PCACHE_SEGMENT current;
142
143 if ( !Bcb )
144 return;
145
146 Bcb->Trace = Trace;
147
148 if ( Trace )
149 {
150 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", Bcb );
151
152 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
153 KeAcquireSpinLock(&Bcb->BcbLock, &oldirql);
154
155 current_entry = Bcb->BcbSegmentListHead.Flink;
156 while (current_entry != &Bcb->BcbSegmentListHead)
157 {
158 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
159 current_entry = current_entry->Flink;
160
161 DPRINT1(" CacheSegment 0x%p enabled, RefCount %d, Dirty %d, PageOut %d\n",
162 current, current->ReferenceCount, current->Dirty, current->PageOut );
163 }
164 KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
165 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
166 }
167 else
168 {
169 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", Bcb );
170 }
171
172 #else
173 Bcb = Bcb;
174 Trace = Trace;
175 #endif
176 }
177
178 NTSTATUS
179 NTAPI
180 CcRosFlushCacheSegment(PCACHE_SEGMENT CacheSegment)
181 {
182 NTSTATUS Status;
183 KIRQL oldIrql;
184 Status = WriteCacheSegment(CacheSegment);
185 if (NT_SUCCESS(Status))
186 {
187 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
188 KeAcquireSpinLock(&CacheSegment->Bcb->BcbLock, &oldIrql);
189 CacheSegment->Dirty = FALSE;
190 RemoveEntryList(&CacheSegment->DirtySegmentListEntry);
191 DirtyPageCount -= CacheSegment->Bcb->CacheSegmentSize / PAGE_SIZE;
192 CcRosCacheSegmentDecRefCount ( CacheSegment );
193 KeReleaseSpinLock(&CacheSegment->Bcb->BcbLock, oldIrql);
194 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
195 }
196 return(Status);
197 }
198
199 NTSTATUS
200 NTAPI
201 CcRosFlushDirtyPages(ULONG Target, PULONG Count)
202 {
203 PLIST_ENTRY current_entry;
204 PCACHE_SEGMENT current;
205 ULONG PagesPerSegment;
206 BOOLEAN Locked;
207 NTSTATUS Status;
208 static ULONG WriteCount[4] = {0, 0, 0, 0};
209 ULONG NewTarget;
210
211 DPRINT("CcRosFlushDirtyPages(Target %d)\n", Target);
212
213 (*Count) = 0;
214
215 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
216
217 WriteCount[0] = WriteCount[1];
218 WriteCount[1] = WriteCount[2];
219 WriteCount[2] = WriteCount[3];
220 WriteCount[3] = 0;
221
222 NewTarget = WriteCount[0] + WriteCount[1] + WriteCount[2];
223
224 if (NewTarget < DirtyPageCount)
225 {
226 NewTarget = (DirtyPageCount - NewTarget + 3) / 4;
227 WriteCount[0] += NewTarget;
228 WriteCount[1] += NewTarget;
229 WriteCount[2] += NewTarget;
230 WriteCount[3] += NewTarget;
231 }
232
233 NewTarget = WriteCount[0];
234
235 Target = max(NewTarget, Target);
236
237 current_entry = DirtySegmentListHead.Flink;
238 if (current_entry == &DirtySegmentListHead)
239 {
240 DPRINT("No Dirty pages\n");
241 }
242 while (current_entry != &DirtySegmentListHead && Target > 0)
243 {
244 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
245 DirtySegmentListEntry);
246 current_entry = current_entry->Flink;
247 Locked = CcTryToAcquireBrokenMutex(&current->Lock);
248 if (!Locked)
249 {
250 continue;
251 }
252 ASSERT(current->Dirty);
253 if (current->ReferenceCount > 1)
254 {
255 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&current->Lock);
256 continue;
257 }
258 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
259 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
260 Status = CcRosFlushCacheSegment(current);
261 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&current->Lock);
262 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
263 {
264 DPRINT1("CC: Failed to flush cache segment.\n");
265 }
266 else
267 {
268 (*Count) += PagesPerSegment;
269 Target -= PagesPerSegment;
270 }
271 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
272 current_entry = DirtySegmentListHead.Flink;
273 }
274 if (*Count < NewTarget)
275 {
276 WriteCount[1] += (NewTarget - *Count);
277 }
278 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
279 DPRINT("CcRosFlushDirtyPages() finished\n");
280
281 return(STATUS_SUCCESS);
282 }
283
284 NTSTATUS
285 CcRosTrimCache(ULONG Target, ULONG Priority, PULONG NrFreed)
286 /*
287 * FUNCTION: Try to free some memory from the file cache.
288 * ARGUMENTS:
289 * Target - The number of pages to be freed.
290 * Priority - The priority of free (currently unused).
291 * NrFreed - Points to a variable where the number of pages
292 * actually freed is returned.
293 */
294 {
295 PLIST_ENTRY current_entry;
296 PCACHE_SEGMENT current, last = NULL;
297 ULONG PagesPerSegment;
298 ULONG PagesFreed;
299 KIRQL oldIrql;
300 LIST_ENTRY FreeList;
301
302 DPRINT("CcRosTrimCache(Target %d)\n", Target);
303
304 *NrFreed = 0;
305
306 InitializeListHead(&FreeList);
307
308 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
309 current_entry = CacheSegmentLRUListHead.Flink;
310 while (current_entry != &CacheSegmentLRUListHead && Target > 0)
311 {
312 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
313 CacheSegmentLRUListEntry);
314 current_entry = current_entry->Flink;
315
316 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
317 if (current->ReferenceCount == 0)
318 {
319 RemoveEntryList(&current->BcbSegmentListEntry);
320 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
321 RemoveEntryList(&current->CacheSegmentListEntry);
322 RemoveEntryList(&current->CacheSegmentLRUListEntry);
323 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
324 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
325 PagesFreed = min(PagesPerSegment, Target);
326 Target -= PagesFreed;
327 (*NrFreed) += PagesFreed;
328 }
329 else
330 {
331 if (last != current && current->MappedCount > 0 && !current->Dirty && !current->PageOut)
332 {
333 ULONG i;
334 NTSTATUS Status;
335
336 CcRosCacheSegmentIncRefCount(current);
337 last = current;
338 current->PageOut = TRUE;
339 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
340 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
341 for (i = 0; i < current->Bcb->CacheSegmentSize / PAGE_SIZE; i++)
342 {
343 PFN_TYPE Page;
344 Page = MmGetPhysicalAddress((char*)current->BaseAddress + i * PAGE_SIZE).QuadPart >> PAGE_SHIFT;
345 Status = MmPageOutPhysicalAddress(Page);
346 if (!NT_SUCCESS(Status))
347 {
348 break;
349 }
350 }
351 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
352 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
353 CcRosCacheSegmentDecRefCount(current);
354 current->PageOut = FALSE;
355 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
356 current_entry = &current->CacheSegmentLRUListEntry;
357 continue;
358 }
359 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
360 }
361 }
362 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
363
364 while (!IsListEmpty(&FreeList))
365 {
366 current_entry = RemoveHeadList(&FreeList);
367 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
368 BcbSegmentListEntry);
369 CcRosInternalFreeCacheSegment(current);
370 }
371
372 DPRINT("CcRosTrimCache() finished\n");
373 return(STATUS_SUCCESS);
374 }
375
376 NTSTATUS
377 NTAPI
378 CcRosReleaseCacheSegment(PBCB Bcb,
379 PCACHE_SEGMENT CacheSeg,
380 BOOLEAN Valid,
381 BOOLEAN Dirty,
382 BOOLEAN Mapped)
383 {
384 BOOLEAN WasDirty = CacheSeg->Dirty;
385 KIRQL oldIrql;
386
387 ASSERT(Bcb);
388
389 DPRINT("CcReleaseCacheSegment(Bcb 0x%p, CacheSeg 0x%p, Valid %d)\n",
390 Bcb, CacheSeg, Valid);
391
392 CacheSeg->Valid = Valid;
393 CacheSeg->Dirty = CacheSeg->Dirty || Dirty;
394
395 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
396 if (!WasDirty && CacheSeg->Dirty)
397 {
398 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
399 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
400 }
401 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
402 InsertTailList(&CacheSegmentLRUListHead, &CacheSeg->CacheSegmentLRUListEntry);
403
404 if (Mapped)
405 {
406 CacheSeg->MappedCount++;
407 }
408 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
409 CcRosCacheSegmentDecRefCount(CacheSeg);
410 if (Mapped && CacheSeg->MappedCount == 1)
411 {
412 CcRosCacheSegmentIncRefCount(CacheSeg);
413 }
414 if (!WasDirty && CacheSeg->Dirty)
415 {
416 CcRosCacheSegmentIncRefCount(CacheSeg);
417 }
418 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
419 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
420 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&CacheSeg->Lock);
421
422 return(STATUS_SUCCESS);
423 }
424
425 PCACHE_SEGMENT
426 NTAPI
427 CcRosLookupCacheSegment(PBCB Bcb, ULONG FileOffset)
428 {
429 PLIST_ENTRY current_entry;
430 PCACHE_SEGMENT current;
431 KIRQL oldIrql;
432
433 ASSERT(Bcb);
434
435 DPRINT("CcRosLookupCacheSegment(Bcb -x%p, FileOffset %d)\n", Bcb, FileOffset);
436
437 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
438 current_entry = Bcb->BcbSegmentListHead.Flink;
439 while (current_entry != &Bcb->BcbSegmentListHead)
440 {
441 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
442 BcbSegmentListEntry);
443 if (current->FileOffset <= FileOffset &&
444 (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
445 {
446 CcRosCacheSegmentIncRefCount(current);
447 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
448 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&current->Lock);
449 return(current);
450 }
451 current_entry = current_entry->Flink;
452 }
453 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
454 return(NULL);
455 }
456
457 NTSTATUS
458 NTAPI
459 CcRosMarkDirtyCacheSegment(PBCB Bcb, ULONG FileOffset)
460 {
461 PCACHE_SEGMENT CacheSeg;
462 KIRQL oldIrql;
463
464 ASSERT(Bcb);
465
466 DPRINT("CcRosMarkDirtyCacheSegment(Bcb 0x%p, FileOffset %d)\n", Bcb, FileOffset);
467
468 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
469 if (CacheSeg == NULL)
470 {
471 KEBUGCHECKCC;
472 }
473 if (!CacheSeg->Dirty)
474 {
475 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
476 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
477 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
478 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
479 }
480 else
481 {
482 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
483 CcRosCacheSegmentDecRefCount(CacheSeg);
484 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
485 }
486
487
488 CacheSeg->Dirty = TRUE;
489 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&CacheSeg->Lock);
490
491 return(STATUS_SUCCESS);
492 }
493
494 NTSTATUS
495 NTAPI
496 CcRosUnmapCacheSegment(PBCB Bcb, ULONG FileOffset, BOOLEAN NowDirty)
497 {
498 PCACHE_SEGMENT CacheSeg;
499 BOOLEAN WasDirty;
500 KIRQL oldIrql;
501
502 ASSERT(Bcb);
503
504 DPRINT("CcRosUnmapCacheSegment(Bcb 0x%p, FileOffset %d, NowDirty %d)\n",
505 Bcb, FileOffset, NowDirty);
506
507 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
508 if (CacheSeg == NULL)
509 {
510 return(STATUS_UNSUCCESSFUL);
511 }
512
513 WasDirty = CacheSeg->Dirty;
514 CacheSeg->Dirty = CacheSeg->Dirty || NowDirty;
515
516 CacheSeg->MappedCount--;
517
518 if (!WasDirty && NowDirty)
519 {
520 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
521 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
522 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
523 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
524 }
525
526 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
527 CcRosCacheSegmentDecRefCount(CacheSeg);
528 if (!WasDirty && NowDirty)
529 {
530 CcRosCacheSegmentIncRefCount(CacheSeg);
531 }
532 if (CacheSeg->MappedCount == 0)
533 {
534 CcRosCacheSegmentDecRefCount(CacheSeg);
535 }
536 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
537
538 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&CacheSeg->Lock);
539 return(STATUS_SUCCESS);
540 }
541
542 NTSTATUS STATIC
543 CcRosCreateCacheSegment(PBCB Bcb,
544 ULONG FileOffset,
545 PCACHE_SEGMENT* CacheSeg)
546 {
547 ULONG i;
548 PCACHE_SEGMENT current;
549 PCACHE_SEGMENT previous;
550 PLIST_ENTRY current_entry;
551 NTSTATUS Status;
552 KIRQL oldIrql;
553 PPFN_TYPE Pfn;
554 #ifdef CACHE_BITMAP
555 ULONG StartingOffset;
556 #else
557 #endif
558 PHYSICAL_ADDRESS BoundaryAddressMultiple;
559
560 ASSERT(Bcb);
561
562 DPRINT("CcRosCreateCacheSegment()\n");
563
564 BoundaryAddressMultiple.QuadPart = 0;
565 if (FileOffset >= Bcb->FileSize.u.LowPart)
566 {
567 CacheSeg = NULL;
568 return STATUS_INVALID_PARAMETER;
569 }
570
571 current = ExAllocateFromNPagedLookasideList(&CacheSegLookasideList);
572 current->Valid = FALSE;
573 current->Dirty = FALSE;
574 current->PageOut = FALSE;
575 current->FileOffset = ROUND_DOWN(FileOffset, Bcb->CacheSegmentSize);
576 current->Bcb = Bcb;
577 #if defined(DBG) || defined(KDBG)
578 if ( Bcb->Trace )
579 {
580 DPRINT1("CacheMap 0x%p: new Cache Segment: 0x%p\n", Bcb, current );
581 }
582 #endif
583 current->MappedCount = 0;
584 current->DirtySegmentListEntry.Flink = NULL;
585 current->DirtySegmentListEntry.Blink = NULL;
586 current->ReferenceCount = 1;
587 ExInitializeFastMutex(&current->Lock);
588 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&current->Lock);
589 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
590
591 *CacheSeg = current;
592 /* There is window between the call to CcRosLookupCacheSegment
593 * and CcRosCreateCacheSegment. We must check if a segment on
594 * the fileoffset exist. If there exist a segment, we release
595 * our new created segment and return the existing one.
596 */
597 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
598 current_entry = Bcb->BcbSegmentListHead.Flink;
599 previous = NULL;
600 while (current_entry != &Bcb->BcbSegmentListHead)
601 {
602 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
603 BcbSegmentListEntry);
604 if (current->FileOffset <= FileOffset &&
605 (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
606 {
607 CcRosCacheSegmentIncRefCount(current);
608 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
609 #if defined(DBG) || defined(KDBG)
610 if ( Bcb->Trace )
611 {
612 DPRINT1("CacheMap 0x%p: deleting newly created Cache Segment 0x%p ( found existing one 0x%p )\n",
613 Bcb,
614 (*CacheSeg),
615 current );
616 }
617 #endif
618 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&(*CacheSeg)->Lock);
619 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
620 ExFreeToNPagedLookasideList(&CacheSegLookasideList, *CacheSeg);
621 *CacheSeg = current;
622 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&current->Lock);
623 return STATUS_SUCCESS;
624 }
625 if (current->FileOffset < FileOffset)
626 {
627 if (previous == NULL)
628 {
629 previous = current;
630 }
631 else
632 {
633 if (previous->FileOffset < current->FileOffset)
634 {
635 previous = current;
636 }
637 }
638 }
639 current_entry = current_entry->Flink;
640 }
641 /* There was no existing segment. */
642 current = *CacheSeg;
643 if (previous)
644 {
645 InsertHeadList(&previous->BcbSegmentListEntry, &current->BcbSegmentListEntry);
646 }
647 else
648 {
649 InsertHeadList(&Bcb->BcbSegmentListHead, &current->BcbSegmentListEntry);
650 }
651 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
652 InsertTailList(&CacheSegmentListHead, &current->CacheSegmentListEntry);
653 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
654 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
655 #ifdef CACHE_BITMAP
656 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
657
658 StartingOffset = RtlFindClearBitsAndSet(&CiCacheSegMappingRegionAllocMap, Bcb->CacheSegmentSize / PAGE_SIZE, CiCacheSegMappingRegionHint);
659
660 if (StartingOffset == 0xffffffff)
661 {
662 DPRINT1("Out of CacheSeg mapping space\n");
663 KEBUGCHECKCC;
664 }
665
666 current->BaseAddress = CiCacheSegMappingRegionBase + StartingOffset * PAGE_SIZE;
667
668 if (CiCacheSegMappingRegionHint == StartingOffset)
669 {
670 CiCacheSegMappingRegionHint += Bcb->CacheSegmentSize / PAGE_SIZE;
671 }
672
673 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
674 #else
675 MmLockAddressSpace(MmGetKernelAddressSpace());
676 current->BaseAddress = NULL;
677 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
678 MEMORY_AREA_CACHE_SEGMENT,
679 &current->BaseAddress,
680 Bcb->CacheSegmentSize,
681 PAGE_READWRITE,
682 (PMEMORY_AREA*)&current->MemoryArea,
683 FALSE,
684 0,
685 BoundaryAddressMultiple);
686 MmUnlockAddressSpace(MmGetKernelAddressSpace());
687 if (!NT_SUCCESS(Status))
688 {
689 KEBUGCHECKCC;
690 }
691 #endif
692 Pfn = alloca(sizeof(PFN_TYPE) * (Bcb->CacheSegmentSize / PAGE_SIZE));
693 for (i = 0; i < (Bcb->CacheSegmentSize / PAGE_SIZE); i++)
694 {
695 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &Pfn[i]);
696 if (!NT_SUCCESS(Status))
697 {
698 KEBUGCHECKCC;
699 }
700 }
701 Status = MmCreateVirtualMapping(NULL,
702 current->BaseAddress,
703 PAGE_READWRITE,
704 Pfn,
705 Bcb->CacheSegmentSize / PAGE_SIZE);
706 if (!NT_SUCCESS(Status))
707 {
708 KEBUGCHECKCC;
709 }
710 return(STATUS_SUCCESS);
711 }
712
713 NTSTATUS
714 NTAPI
715 CcRosGetCacheSegmentChain(PBCB Bcb,
716 ULONG FileOffset,
717 ULONG Length,
718 PCACHE_SEGMENT* CacheSeg)
719 {
720 PCACHE_SEGMENT current;
721 ULONG i;
722 PCACHE_SEGMENT* CacheSegList;
723 PCACHE_SEGMENT Previous = NULL;
724
725 ASSERT(Bcb);
726
727 DPRINT("CcRosGetCacheSegmentChain()\n");
728
729 Length = ROUND_UP(Length, Bcb->CacheSegmentSize);
730
731 #if defined(__GNUC__)
732 CacheSegList = alloca(sizeof(PCACHE_SEGMENT) *
733 (Length / Bcb->CacheSegmentSize));
734 #elif defined(_MSC_VER)
735 CacheSegList = _alloca(sizeof(PCACHE_SEGMENT) *
736 (Length / Bcb->CacheSegmentSize));
737 #else
738 #error Unknown compiler for alloca intrinsic stack allocation "function"
739 #endif
740
741 /*
742 * Look for a cache segment already mapping the same data.
743 */
744 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
745 {
746 ULONG CurrentOffset = FileOffset + (i * Bcb->CacheSegmentSize);
747 current = CcRosLookupCacheSegment(Bcb, CurrentOffset);
748 if (current != NULL)
749 {
750 CacheSegList[i] = current;
751 }
752 else
753 {
754 CcRosCreateCacheSegment(Bcb, CurrentOffset, &current);
755 CacheSegList[i] = current;
756 }
757 }
758
759 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
760 {
761 if (i == 0)
762 {
763 *CacheSeg = CacheSegList[i];
764 Previous = CacheSegList[i];
765 }
766 else
767 {
768 Previous->NextInChain = CacheSegList[i];
769 Previous = CacheSegList[i];
770 }
771 }
772 Previous->NextInChain = NULL;
773
774 return(STATUS_SUCCESS);
775 }
776
777 NTSTATUS
778 NTAPI
779 CcRosGetCacheSegment(PBCB Bcb,
780 ULONG FileOffset,
781 PULONG BaseOffset,
782 PVOID* BaseAddress,
783 PBOOLEAN UptoDate,
784 PCACHE_SEGMENT* CacheSeg)
785 {
786 PCACHE_SEGMENT current;
787 NTSTATUS Status;
788
789 ASSERT(Bcb);
790
791 DPRINT("CcRosGetCacheSegment()\n");
792
793 /*
794 * Look for a cache segment already mapping the same data.
795 */
796 current = CcRosLookupCacheSegment(Bcb, FileOffset);
797 if (current == NULL)
798 {
799 /*
800 * Otherwise create a new segment.
801 */
802 Status = CcRosCreateCacheSegment(Bcb, FileOffset, &current);
803 if (!NT_SUCCESS(Status))
804 {
805 return Status;
806 }
807 }
808 /*
809 * Return information about the segment to the caller.
810 */
811 *UptoDate = current->Valid;
812 *BaseAddress = current->BaseAddress;
813 DPRINT("*BaseAddress 0x%.8X\n", *BaseAddress);
814 *CacheSeg = current;
815 *BaseOffset = current->FileOffset;
816 return(STATUS_SUCCESS);
817 }
818
819 NTSTATUS STDCALL
820 CcRosRequestCacheSegment(PBCB Bcb,
821 ULONG FileOffset,
822 PVOID* BaseAddress,
823 PBOOLEAN UptoDate,
824 PCACHE_SEGMENT* CacheSeg)
825 /*
826 * FUNCTION: Request a page mapping for a BCB
827 */
828 {
829 ULONG BaseOffset;
830
831 ASSERT(Bcb);
832
833 if ((FileOffset % Bcb->CacheSegmentSize) != 0)
834 {
835 CPRINT("Bad fileoffset %x should be multiple of %x",
836 FileOffset, Bcb->CacheSegmentSize);
837 KEBUGCHECKCC;
838 }
839
840 return(CcRosGetCacheSegment(Bcb,
841 FileOffset,
842 &BaseOffset,
843 BaseAddress,
844 UptoDate,
845 CacheSeg));
846 }
847 #ifdef CACHE_BITMAP
848 #else
849 STATIC VOID
850 CcFreeCachePage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address,
851 PFN_TYPE Page, SWAPENTRY SwapEntry, BOOLEAN Dirty)
852 {
853 ASSERT(SwapEntry == 0);
854 if (Page != 0)
855 {
856 MmReleasePageMemoryConsumer(MC_CACHE, Page);
857 }
858 }
859 #endif
860 NTSTATUS
861 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg)
862 /*
863 * FUNCTION: Releases a cache segment associated with a BCB
864 */
865 {
866 #ifdef CACHE_BITMAP
867 ULONG i;
868 ULONG RegionSize;
869 ULONG Base;
870 PFN_TYPE Page;
871 KIRQL oldIrql;
872 #endif
873 DPRINT("Freeing cache segment 0x%p\n", CacheSeg);
874 #if defined(DBG) || defined(KDBG)
875 if ( CacheSeg->Bcb->Trace )
876 {
877 DPRINT1("CacheMap 0x%p: deleting Cache Segment: 0x%p\n", CacheSeg->Bcb, CacheSeg );
878 }
879 #endif
880 #ifdef CACHE_BITMAP
881 RegionSize = CacheSeg->Bcb->CacheSegmentSize / PAGE_SIZE;
882
883 /* Unmap all the pages. */
884 for (i = 0; i < RegionSize; i++)
885 {
886 MmDeleteVirtualMapping(NULL,
887 CacheSeg->BaseAddress + (i * PAGE_SIZE),
888 FALSE,
889 NULL,
890 &Page);
891 MmReleasePageMemoryConsumer(MC_CACHE, Page);
892 }
893
894 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
895 /* Deallocate all the pages used. */
896 Base = (ULONG)(CacheSeg->BaseAddress - CiCacheSegMappingRegionBase) / PAGE_SIZE;
897
898 RtlClearBits(&CiCacheSegMappingRegionAllocMap, Base, RegionSize);
899
900 CiCacheSegMappingRegionHint = min (CiCacheSegMappingRegionHint, Base);
901
902 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
903 #else
904 MmLockAddressSpace(MmGetKernelAddressSpace());
905 MmFreeMemoryArea(MmGetKernelAddressSpace(),
906 CacheSeg->MemoryArea,
907 CcFreeCachePage,
908 NULL);
909 MmUnlockAddressSpace(MmGetKernelAddressSpace());
910 #endif
911 ExFreeToNPagedLookasideList(&CacheSegLookasideList, CacheSeg);
912 return(STATUS_SUCCESS);
913 }
914
915 NTSTATUS
916 NTAPI
917 CcRosFreeCacheSegment(PBCB Bcb, PCACHE_SEGMENT CacheSeg)
918 {
919 NTSTATUS Status;
920 KIRQL oldIrql;
921
922 ASSERT(Bcb);
923
924 DPRINT("CcRosFreeCacheSegment(Bcb 0x%p, CacheSeg 0x%p)\n",
925 Bcb, CacheSeg);
926
927 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
928 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
929 RemoveEntryList(&CacheSeg->BcbSegmentListEntry);
930 RemoveEntryList(&CacheSeg->CacheSegmentListEntry);
931 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
932 if (CacheSeg->Dirty)
933 {
934 RemoveEntryList(&CacheSeg->DirtySegmentListEntry);
935 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
936
937 }
938 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
939 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
940
941 Status = CcRosInternalFreeCacheSegment(CacheSeg);
942 return(Status);
943 }
944
945 /*
946 * @implemented
947 */
948 VOID STDCALL
949 CcFlushCache(IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
950 IN PLARGE_INTEGER FileOffset OPTIONAL,
951 IN ULONG Length,
952 OUT PIO_STATUS_BLOCK IoStatus)
953 {
954 PBCB Bcb;
955 LARGE_INTEGER Offset;
956 PCACHE_SEGMENT current;
957 NTSTATUS Status;
958 KIRQL oldIrql;
959
960 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %d, IoStatus 0x%p)\n",
961 SectionObjectPointers, FileOffset, Length, IoStatus);
962
963 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
964 {
965 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
966 ASSERT(Bcb);
967 if (FileOffset)
968 {
969 Offset = *FileOffset;
970 }
971 else
972 {
973 Offset.QuadPart = (LONGLONG)0;
974 Length = Bcb->FileSize.u.LowPart;
975 }
976
977 if (IoStatus)
978 {
979 IoStatus->Status = STATUS_SUCCESS;
980 IoStatus->Information = 0;
981 }
982
983 while (Length > 0)
984 {
985 current = CcRosLookupCacheSegment (Bcb, Offset.u.LowPart);
986 if (current != NULL)
987 {
988 if (current->Dirty)
989 {
990 Status = CcRosFlushCacheSegment(current);
991 if (!NT_SUCCESS(Status) && IoStatus != NULL)
992 {
993 IoStatus->Status = Status;
994 }
995 }
996 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
997 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&current->Lock);
998 CcRosCacheSegmentDecRefCount(current);
999 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1000 }
1001
1002 Offset.QuadPart += Bcb->CacheSegmentSize;
1003 if (Length > Bcb->CacheSegmentSize)
1004 {
1005 Length -= Bcb->CacheSegmentSize;
1006 }
1007 else
1008 {
1009 Length = 0;
1010 }
1011 }
1012 }
1013 else
1014 {
1015 if (IoStatus)
1016 {
1017 IoStatus->Status = STATUS_INVALID_PARAMETER;
1018 }
1019 }
1020 }
1021
1022 NTSTATUS
1023 NTAPI
1024 CcRosDeleteFileCache(PFILE_OBJECT FileObject, PBCB Bcb)
1025 /*
1026 * FUNCTION: Releases the BCB associated with a file object
1027 */
1028 {
1029 PLIST_ENTRY current_entry;
1030 PCACHE_SEGMENT current;
1031 NTSTATUS Status;
1032 LIST_ENTRY FreeList;
1033 KIRQL oldIrql;
1034
1035 ASSERT(Bcb);
1036
1037 Bcb->RefCount++;
1038 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1039
1040 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1041
1042 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
1043 Bcb->RefCount--;
1044 if (Bcb->RefCount == 0)
1045 {
1046 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1047 {
1048 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1049 Bcb->BcbRemoveListEntry.Flink = NULL;
1050 }
1051
1052 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1053
1054 /*
1055 * Release all cache segments.
1056 */
1057 InitializeListHead(&FreeList);
1058 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1059 current_entry = Bcb->BcbSegmentListHead.Flink;
1060 while (!IsListEmpty(&Bcb->BcbSegmentListHead))
1061 {
1062 current_entry = RemoveTailList(&Bcb->BcbSegmentListHead);
1063 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1064 RemoveEntryList(&current->CacheSegmentListEntry);
1065 RemoveEntryList(&current->CacheSegmentLRUListEntry);
1066 if (current->Dirty)
1067 {
1068 RemoveEntryList(&current->DirtySegmentListEntry);
1069 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
1070 DPRINT1("Freeing dirty segment\n");
1071 }
1072 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
1073 }
1074 #if defined(DBG) || defined(KDBG)
1075 Bcb->Trace = FALSE;
1076 #endif
1077 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1078
1079 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1080 ObDereferenceObject (Bcb->FileObject);
1081
1082 while (!IsListEmpty(&FreeList))
1083 {
1084 current_entry = RemoveTailList(&FreeList);
1085 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1086 Status = CcRosInternalFreeCacheSegment(current);
1087 }
1088 ExFreeToNPagedLookasideList(&BcbLookasideList, Bcb);
1089 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
1090 }
1091 return(STATUS_SUCCESS);
1092 }
1093
1094 VOID
1095 NTAPI
1096 CcRosReferenceCache(PFILE_OBJECT FileObject)
1097 {
1098 PBCB Bcb;
1099 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
1100 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1101 ASSERT(Bcb);
1102 if (Bcb->RefCount == 0)
1103 {
1104 ASSERT(Bcb->BcbRemoveListEntry.Flink != NULL);
1105 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1106 Bcb->BcbRemoveListEntry.Flink = NULL;
1107
1108 }
1109 else
1110 {
1111 ASSERT(Bcb->BcbRemoveListEntry.Flink == NULL);
1112 }
1113 Bcb->RefCount++;
1114 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1115 }
1116
1117 VOID
1118 NTAPI
1119 CcRosSetRemoveOnClose(PSECTION_OBJECT_POINTERS SectionObjectPointer)
1120 {
1121 PBCB Bcb;
1122 DPRINT("CcRosSetRemoveOnClose()\n");
1123 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
1124 Bcb = (PBCB)SectionObjectPointer->SharedCacheMap;
1125 if (Bcb)
1126 {
1127 Bcb->RemoveOnClose = TRUE;
1128 if (Bcb->RefCount == 0)
1129 {
1130 CcRosDeleteFileCache(Bcb->FileObject, Bcb);
1131 }
1132 }
1133 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1134 }
1135
1136
1137 VOID
1138 NTAPI
1139 CcRosDereferenceCache(PFILE_OBJECT FileObject)
1140 {
1141 PBCB Bcb;
1142 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
1143 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1144 ASSERT(Bcb);
1145 if (Bcb->RefCount > 0)
1146 {
1147 Bcb->RefCount--;
1148 if (Bcb->RefCount == 0)
1149 {
1150 MmFreeSectionSegments(Bcb->FileObject);
1151 if (Bcb->RemoveOnClose)
1152 {
1153 CcRosDeleteFileCache(FileObject, Bcb);
1154 }
1155 else
1156 {
1157 Bcb->TimeStamp = CcTimeStamp;
1158 InsertHeadList(&ClosedListHead, &Bcb->BcbRemoveListEntry);
1159 }
1160 }
1161 }
1162 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1163 }
1164
1165 NTSTATUS STDCALL
1166 CcRosReleaseFileCache(PFILE_OBJECT FileObject)
1167 /*
1168 * FUNCTION: Called by the file system when a handle to a file object
1169 * has been closed.
1170 */
1171 {
1172 PBCB Bcb;
1173
1174 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
1175
1176 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1177 {
1178 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1179 if (FileObject->PrivateCacheMap != NULL)
1180 {
1181 FileObject->PrivateCacheMap = NULL;
1182 if (Bcb->RefCount > 0)
1183 {
1184 Bcb->RefCount--;
1185 if (Bcb->RefCount == 0)
1186 {
1187 MmFreeSectionSegments(Bcb->FileObject);
1188 if (Bcb->RemoveOnClose)
1189 {
1190 CcRosDeleteFileCache(FileObject, Bcb);
1191 }
1192 else
1193 {
1194 Bcb->TimeStamp = CcTimeStamp;
1195 InsertHeadList(&ClosedListHead, &Bcb->BcbRemoveListEntry);
1196 }
1197 }
1198 }
1199 }
1200 }
1201 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1202 return(STATUS_SUCCESS);
1203 }
1204
1205 NTSTATUS
1206 NTAPI
1207 CcTryToInitializeFileCache(PFILE_OBJECT FileObject)
1208 {
1209 PBCB Bcb;
1210 NTSTATUS Status;
1211
1212 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
1213
1214 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1215 if (Bcb == NULL)
1216 {
1217 Status = STATUS_UNSUCCESSFUL;
1218 }
1219 else
1220 {
1221 if (FileObject->PrivateCacheMap == NULL)
1222 {
1223 FileObject->PrivateCacheMap = Bcb;
1224 Bcb->RefCount++;
1225 }
1226 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1227 {
1228 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1229 Bcb->BcbRemoveListEntry.Flink = NULL;
1230 }
1231 Status = STATUS_SUCCESS;
1232 }
1233 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1234
1235 return Status;
1236 }
1237
1238
1239 NTSTATUS STDCALL
1240 CcRosInitializeFileCache(PFILE_OBJECT FileObject,
1241 ULONG CacheSegmentSize)
1242 /*
1243 * FUNCTION: Initializes a BCB for a file object
1244 */
1245 {
1246 PBCB Bcb;
1247
1248 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1249 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, Bcb 0x%p, CacheSegmentSize %d)\n",
1250 FileObject, Bcb, CacheSegmentSize);
1251
1252 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
1253 if (Bcb == NULL)
1254 {
1255 Bcb = ExAllocateFromNPagedLookasideList(&BcbLookasideList);
1256 if (Bcb == NULL)
1257 {
1258 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1259 return(STATUS_UNSUCCESSFUL);
1260 }
1261 memset(Bcb, 0, sizeof(BCB));
1262 ObReferenceObjectByPointer(FileObject,
1263 FILE_ALL_ACCESS,
1264 NULL,
1265 KernelMode);
1266 Bcb->FileObject = FileObject;
1267 Bcb->CacheSegmentSize = CacheSegmentSize;
1268 if (FileObject->FsContext)
1269 {
1270 Bcb->AllocationSize =
1271 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->AllocationSize;
1272 Bcb->FileSize =
1273 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->FileSize;
1274 }
1275 KeInitializeSpinLock(&Bcb->BcbLock);
1276 InitializeListHead(&Bcb->BcbSegmentListHead);
1277 FileObject->SectionObjectPointer->SharedCacheMap = Bcb;
1278 }
1279 if (FileObject->PrivateCacheMap == NULL)
1280 {
1281 FileObject->PrivateCacheMap = Bcb;
1282 Bcb->RefCount++;
1283 }
1284 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1285 {
1286 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1287 Bcb->BcbRemoveListEntry.Flink = NULL;
1288 }
1289 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1290
1291 return(STATUS_SUCCESS);
1292 }
1293
1294 /*
1295 * @implemented
1296 */
1297 PFILE_OBJECT STDCALL
1298 CcGetFileObjectFromSectionPtrs(IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1299 {
1300 PBCB Bcb;
1301 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1302 {
1303 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
1304 ASSERT(Bcb);
1305 return Bcb->FileObject;
1306 }
1307 return NULL;
1308 }
1309
1310 VOID STDCALL
1311 CmLazyCloseThreadMain(PVOID Ignored)
1312 {
1313 LARGE_INTEGER Timeout;
1314 PLIST_ENTRY current_entry;
1315 PBCB current;
1316 ULONG RemoveTimeStamp;
1317 NTSTATUS Status;
1318
1319 KeQuerySystemTime (&Timeout);
1320
1321 while (1)
1322 {
1323 Timeout.QuadPart += (LONGLONG)100000000; // 10sec
1324 Status = KeWaitForSingleObject(&LazyCloseThreadEvent,
1325 0,
1326 KernelMode,
1327 FALSE,
1328 &Timeout);
1329
1330 DPRINT("LazyCloseThreadMain %d\n", CcTimeStamp);
1331
1332 if (!NT_SUCCESS(Status))
1333 {
1334 DbgPrint("LazyCloseThread: Wait failed\n");
1335 KEBUGCHECKCC;
1336 break;
1337 }
1338 if (LazyCloseThreadShouldTerminate)
1339 {
1340 DbgPrint("LazyCloseThread: Terminating\n");
1341 break;
1342 }
1343
1344 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
1345 CcTimeStamp++;
1346 if (CcTimeStamp >= 30)
1347 {
1348 RemoveTimeStamp = CcTimeStamp - 30; /* 5min = 10sec * 30 */
1349 while (!IsListEmpty(&ClosedListHead))
1350 {
1351 current_entry = ClosedListHead.Blink;
1352 current = CONTAINING_RECORD(current_entry, BCB, BcbRemoveListEntry);
1353 if (current->TimeStamp >= RemoveTimeStamp)
1354 {
1355 break;
1356 }
1357 CcRosDeleteFileCache(current->FileObject, current);
1358 }
1359 }
1360 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1361 }
1362 }
1363
1364 VOID
1365 INIT_FUNCTION
1366 NTAPI
1367 CcInitView(VOID)
1368 {
1369 #ifdef CACHE_BITMAP
1370 PMEMORY_AREA marea;
1371 PVOID Buffer;
1372 PHYSICAL_ADDRESS BoundaryAddressMultiple;
1373 #endif
1374 NTSTATUS Status;
1375 KPRIORITY Priority;
1376
1377 DPRINT("CcInitView()\n");
1378 #ifdef CACHE_BITMAP
1379 BoundaryAddressMultiple.QuadPart = 0;
1380 CiCacheSegMappingRegionHint = 0;
1381 CiCacheSegMappingRegionBase = NULL;
1382
1383 MmLockAddressSpace(MmGetKernelAddressSpace());
1384
1385 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
1386 MEMORY_AREA_CACHE_SEGMENT,
1387 &CiCacheSegMappingRegionBase,
1388 CI_CACHESEG_MAPPING_REGION_SIZE,
1389 PAGE_READWRITE,
1390 &marea,
1391 FALSE,
1392 0,
1393 BoundaryAddressMultiple);
1394 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1395 if (!NT_SUCCESS(Status))
1396 {
1397 KEBUGCHECKCC;
1398 }
1399
1400 Buffer = ExAllocatePool(NonPagedPool, CI_CACHESEG_MAPPING_REGION_SIZE / (PAGE_SIZE * 8));
1401
1402 RtlInitializeBitMap(&CiCacheSegMappingRegionAllocMap, Buffer, CI_CACHESEG_MAPPING_REGION_SIZE / PAGE_SIZE);
1403 RtlClearAllBits(&CiCacheSegMappingRegionAllocMap);
1404
1405 KeInitializeSpinLock(&CiCacheSegMappingRegionLock);
1406 #endif
1407 InitializeListHead(&CacheSegmentListHead);
1408 InitializeListHead(&DirtySegmentListHead);
1409 InitializeListHead(&CacheSegmentLRUListHead);
1410 InitializeListHead(&ClosedListHead);
1411 ExInitializeFastMutex(&ViewLock);
1412 ExInitializeNPagedLookasideList (&iBcbLookasideList,
1413 NULL,
1414 NULL,
1415 0,
1416 sizeof(INTERNAL_BCB),
1417 TAG_IBCB,
1418 20);
1419 ExInitializeNPagedLookasideList (&BcbLookasideList,
1420 NULL,
1421 NULL,
1422 0,
1423 sizeof(BCB),
1424 TAG_BCB,
1425 20);
1426 ExInitializeNPagedLookasideList (&CacheSegLookasideList,
1427 NULL,
1428 NULL,
1429 0,
1430 sizeof(CACHE_SEGMENT),
1431 TAG_CSEG,
1432 20);
1433
1434 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1435
1436 CcInitCacheZeroPage();
1437
1438 CcTimeStamp = 0;
1439 LazyCloseThreadShouldTerminate = FALSE;
1440 KeInitializeEvent (&LazyCloseThreadEvent, SynchronizationEvent, FALSE);
1441 Status = PsCreateSystemThread(&LazyCloseThreadHandle,
1442 THREAD_ALL_ACCESS,
1443 NULL,
1444 NULL,
1445 &LazyCloseThreadId,
1446 (PKSTART_ROUTINE)CmLazyCloseThreadMain,
1447 NULL);
1448 if (NT_SUCCESS(Status))
1449 {
1450 Priority = LOW_REALTIME_PRIORITY;
1451 NtSetInformationThread(LazyCloseThreadHandle,
1452 ThreadPriority,
1453 &Priority,
1454 sizeof(Priority));
1455 }
1456
1457 }
1458
1459 /* EOF */
1460
1461
1462
1463
1464
1465
1466