- KdDebuggerNotPresent should be FALSE by default.
[reactos.git] / reactos / ntoskrnl / cc / view.c
1 /* $Id$
2 *
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/cc/view.c
6 * PURPOSE: Cache manager
7 *
8 * PROGRAMMERS: David Welch (welch@mcmail.com)
9 */
10
11 /* NOTES **********************************************************************
12 *
13 * This is not the NT implementation of a file cache nor anything much like
14 * it.
15 *
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
18 *
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
21 *
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
24 *
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 *
29 * (4) Copy the data into or out of the page as necessary.
30 *
31 * (5) Release the cache page
32 */
33 /* INCLUDES ******************************************************************/
34
35 #include <ntoskrnl.h>
36 #define NDEBUG
37 #include <internal/debug.h>
38
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
41 #endif
42
43 /* GLOBALS *******************************************************************/
44
45 /*
46 * If CACHE_BITMAP is defined, the cache manager uses one large memory region
47 * within the kernel address space and allocate/deallocate space from this block
48 * over a bitmap. If CACHE_BITMAP is used, the size of the mdl mapping region
49 * must be reduced (ntoskrnl\mm\mdl.c, MI_MDLMAPPING_REGION_SIZE).
50 */
51 //#define CACHE_BITMAP
52
53 static LIST_ENTRY DirtySegmentListHead;
54 static LIST_ENTRY CacheSegmentListHead;
55 static LIST_ENTRY CacheSegmentLRUListHead;
56 static LIST_ENTRY ClosedListHead;
57 ULONG DirtyPageCount=0;
58
59 FAST_MUTEX ViewLock;
60
61 #ifdef CACHE_BITMAP
62 #define CI_CACHESEG_MAPPING_REGION_SIZE (128*1024*1024)
63
64 static PVOID CiCacheSegMappingRegionBase = NULL;
65 static RTL_BITMAP CiCacheSegMappingRegionAllocMap;
66 static ULONG CiCacheSegMappingRegionHint;
67 static KSPIN_LOCK CiCacheSegMappingRegionLock;
68 #endif
69
70 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
71 static NPAGED_LOOKASIDE_LIST BcbLookasideList;
72 static NPAGED_LOOKASIDE_LIST CacheSegLookasideList;
73
74 static ULONG CcTimeStamp;
75 static KEVENT LazyCloseThreadEvent;
76 static HANDLE LazyCloseThreadHandle;
77 static CLIENT_ID LazyCloseThreadId;
78 static volatile BOOLEAN LazyCloseThreadShouldTerminate;
79
80 #if defined(__GNUC__)
81 /* void * alloca(size_t size); */
82 #elif defined(_MSC_VER)
83 void* _alloca(size_t size);
84 #else
85 #error Unknown compiler for alloca intrinsic stack allocation "function"
86 #endif
87
88 #if defined(DBG) || defined(KDBG)
89 static void CcRosCacheSegmentIncRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
90 {
91 ++cs->ReferenceCount;
92 if ( cs->Bcb->Trace )
93 {
94 DbgPrint("(%s:%i) CacheSegment %p ++RefCount=%d, Dirty %d, PageOut %d\n",
95 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
96 }
97 }
98 static void CcRosCacheSegmentDecRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
99 {
100 --cs->ReferenceCount;
101 if ( cs->Bcb->Trace )
102 {
103 DbgPrint("(%s:%i) CacheSegment %p --RefCount=%d, Dirty %d, PageOut %d\n",
104 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
105 }
106 }
107 #define CcRosCacheSegmentIncRefCount(cs) CcRosCacheSegmentIncRefCount_(cs,__FILE__,__LINE__)
108 #define CcRosCacheSegmentDecRefCount(cs) CcRosCacheSegmentDecRefCount_(cs,__FILE__,__LINE__)
109 #else
110 #define CcRosCacheSegmentIncRefCount(cs) (++((cs)->ReferenceCount))
111 #define CcRosCacheSegmentDecRefCount(cs) (--((cs)->ReferenceCount))
112 #endif
113
114 NTSTATUS
115 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg);
116
117 BOOLEAN
118 FASTCALL
119 CcTryToAcquireBrokenMutex(PFAST_MUTEX FastMutex)
120 {
121 KeEnterCriticalRegion();
122 if (InterlockedCompareExchange(&FastMutex->Count, 0, 1) == 1)
123 {
124 FastMutex->Owner = KeGetCurrentThread();
125 return(TRUE);
126 }
127 else
128 {
129 KeLeaveCriticalRegion();
130 return(FALSE);
131 }
132 }
133
134 /* FUNCTIONS *****************************************************************/
135
136 VOID
137 STDCALL
138 CcRosTraceCacheMap (
139 PBCB Bcb,
140 BOOLEAN Trace )
141 {
142 #if defined(DBG) || defined(KDBG)
143 KIRQL oldirql;
144 PLIST_ENTRY current_entry;
145 PCACHE_SEGMENT current;
146
147 if ( !Bcb )
148 return;
149
150 Bcb->Trace = Trace;
151
152 if ( Trace )
153 {
154 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", Bcb );
155
156 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
157 KeAcquireSpinLock(&Bcb->BcbLock, &oldirql);
158
159 current_entry = Bcb->BcbSegmentListHead.Flink;
160 while (current_entry != &Bcb->BcbSegmentListHead)
161 {
162 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
163 current_entry = current_entry->Flink;
164
165 DPRINT1(" CacheSegment 0x%p enabled, RefCount %d, Dirty %d, PageOut %d\n",
166 current, current->ReferenceCount, current->Dirty, current->PageOut );
167 }
168 KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
169 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
170 }
171 else
172 {
173 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", Bcb );
174 }
175
176 #else
177 Bcb = Bcb;
178 Trace = Trace;
179 #endif
180 }
181
182 NTSTATUS
183 NTAPI
184 CcRosFlushCacheSegment(PCACHE_SEGMENT CacheSegment)
185 {
186 NTSTATUS Status;
187 KIRQL oldIrql;
188 Status = WriteCacheSegment(CacheSegment);
189 if (NT_SUCCESS(Status))
190 {
191 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
192 KeAcquireSpinLock(&CacheSegment->Bcb->BcbLock, &oldIrql);
193 CacheSegment->Dirty = FALSE;
194 RemoveEntryList(&CacheSegment->DirtySegmentListEntry);
195 DirtyPageCount -= CacheSegment->Bcb->CacheSegmentSize / PAGE_SIZE;
196 CcRosCacheSegmentDecRefCount ( CacheSegment );
197 KeReleaseSpinLock(&CacheSegment->Bcb->BcbLock, oldIrql);
198 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
199 }
200 return(Status);
201 }
202
203 NTSTATUS
204 NTAPI
205 CcRosFlushDirtyPages(ULONG Target, PULONG Count)
206 {
207 PLIST_ENTRY current_entry;
208 PCACHE_SEGMENT current;
209 ULONG PagesPerSegment;
210 BOOLEAN Locked;
211 NTSTATUS Status;
212 static ULONG WriteCount[4] = {0, 0, 0, 0};
213 ULONG NewTarget;
214
215 DPRINT("CcRosFlushDirtyPages(Target %d)\n", Target);
216
217 (*Count) = 0;
218
219 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
220
221 WriteCount[0] = WriteCount[1];
222 WriteCount[1] = WriteCount[2];
223 WriteCount[2] = WriteCount[3];
224 WriteCount[3] = 0;
225
226 NewTarget = WriteCount[0] + WriteCount[1] + WriteCount[2];
227
228 if (NewTarget < DirtyPageCount)
229 {
230 NewTarget = (DirtyPageCount - NewTarget + 3) / 4;
231 WriteCount[0] += NewTarget;
232 WriteCount[1] += NewTarget;
233 WriteCount[2] += NewTarget;
234 WriteCount[3] += NewTarget;
235 }
236
237 NewTarget = WriteCount[0];
238
239 Target = max(NewTarget, Target);
240
241 current_entry = DirtySegmentListHead.Flink;
242 if (current_entry == &DirtySegmentListHead)
243 {
244 DPRINT("No Dirty pages\n");
245 }
246 while (current_entry != &DirtySegmentListHead && Target > 0)
247 {
248 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
249 DirtySegmentListEntry);
250 current_entry = current_entry->Flink;
251
252 // Locked = current->Bcb->Callbacks.AcquireForLazyWrite(current->Bcb->Context, FALSE);
253 Locked = ExTryToAcquireResourceExclusiveLite(((FSRTL_COMMON_FCB_HEADER*)(current->Bcb->FileObject->FsContext))->Resource);
254 if (!Locked)
255 {
256 continue;
257 }
258 Locked = CcTryToAcquireBrokenMutex(&current->Lock);
259 if (!Locked)
260 {
261 // current->Bcb->Callbacks.ReleaseFromLazyWrite(current->Bcb->Context);
262 ExReleaseResourceLite(((FSRTL_COMMON_FCB_HEADER*)(current->Bcb->FileObject->FsContext))->Resource);
263 continue;
264 }
265 ASSERT(current->Dirty);
266 if (current->ReferenceCount > 1)
267 {
268 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&current->Lock);
269 // current->Bcb->Callbacks.ReleaseFromLazyWrite(current->Bcb->Context);
270 ExReleaseResourceLite(((FSRTL_COMMON_FCB_HEADER*)(current->Bcb->FileObject->FsContext))->Resource);
271 continue;
272 }
273 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
274 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
275 Status = CcRosFlushCacheSegment(current);
276 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&current->Lock);
277 // current->Bcb->Callbacks.ReleaseFromLazyWrite(current->Bcb->Context);
278 ExReleaseResourceLite(((FSRTL_COMMON_FCB_HEADER*)(current->Bcb->FileObject->FsContext))->Resource);
279 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
280 {
281 DPRINT1("CC: Failed to flush cache segment.\n");
282 }
283 else
284 {
285 (*Count) += PagesPerSegment;
286 Target -= PagesPerSegment;
287 }
288 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
289 current_entry = DirtySegmentListHead.Flink;
290 }
291 if (*Count < NewTarget)
292 {
293 WriteCount[1] += (NewTarget - *Count);
294 }
295 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
296 DPRINT("CcRosFlushDirtyPages() finished\n");
297
298 return(STATUS_SUCCESS);
299 }
300
301 NTSTATUS
302 CcRosTrimCache(ULONG Target, ULONG Priority, PULONG NrFreed)
303 /*
304 * FUNCTION: Try to free some memory from the file cache.
305 * ARGUMENTS:
306 * Target - The number of pages to be freed.
307 * Priority - The priority of free (currently unused).
308 * NrFreed - Points to a variable where the number of pages
309 * actually freed is returned.
310 */
311 {
312 PLIST_ENTRY current_entry;
313 PCACHE_SEGMENT current, last = NULL;
314 ULONG PagesPerSegment;
315 ULONG PagesFreed;
316 KIRQL oldIrql;
317 LIST_ENTRY FreeList;
318
319 DPRINT("CcRosTrimCache(Target %d)\n", Target);
320
321 *NrFreed = 0;
322
323 InitializeListHead(&FreeList);
324
325 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
326 current_entry = CacheSegmentLRUListHead.Flink;
327 while (current_entry != &CacheSegmentLRUListHead && Target > 0)
328 {
329 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
330 CacheSegmentLRUListEntry);
331 current_entry = current_entry->Flink;
332
333 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
334 if (current->ReferenceCount == 0)
335 {
336 RemoveEntryList(&current->BcbSegmentListEntry);
337 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
338 RemoveEntryList(&current->CacheSegmentListEntry);
339 RemoveEntryList(&current->CacheSegmentLRUListEntry);
340 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
341 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
342 PagesFreed = min(PagesPerSegment, Target);
343 Target -= PagesFreed;
344 (*NrFreed) += PagesFreed;
345 }
346 else
347 {
348 if (last != current && current->MappedCount > 0 && !current->Dirty && !current->PageOut)
349 {
350 ULONG i;
351 NTSTATUS Status;
352
353 CcRosCacheSegmentIncRefCount(current);
354 last = current;
355 current->PageOut = TRUE;
356 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
357 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
358 for (i = 0; i < current->Bcb->CacheSegmentSize / PAGE_SIZE; i++)
359 {
360 PFN_TYPE Page;
361 Page = (PFN_TYPE)(MmGetPhysicalAddress((char*)current->BaseAddress + i * PAGE_SIZE).QuadPart >> PAGE_SHIFT);
362 Status = MmPageOutPhysicalAddress(Page);
363 if (!NT_SUCCESS(Status))
364 {
365 break;
366 }
367 }
368 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
369 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
370 CcRosCacheSegmentDecRefCount(current);
371 current->PageOut = FALSE;
372 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
373 current_entry = &current->CacheSegmentLRUListEntry;
374 continue;
375 }
376 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
377 }
378 }
379 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
380
381 while (!IsListEmpty(&FreeList))
382 {
383 current_entry = RemoveHeadList(&FreeList);
384 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
385 BcbSegmentListEntry);
386 CcRosInternalFreeCacheSegment(current);
387 }
388
389 DPRINT("CcRosTrimCache() finished\n");
390 return(STATUS_SUCCESS);
391 }
392
393 NTSTATUS
394 NTAPI
395 CcRosReleaseCacheSegment(PBCB Bcb,
396 PCACHE_SEGMENT CacheSeg,
397 BOOLEAN Valid,
398 BOOLEAN Dirty,
399 BOOLEAN Mapped)
400 {
401 BOOLEAN WasDirty = CacheSeg->Dirty;
402 KIRQL oldIrql;
403
404 ASSERT(Bcb);
405
406 DPRINT("CcReleaseCacheSegment(Bcb 0x%p, CacheSeg 0x%p, Valid %d)\n",
407 Bcb, CacheSeg, Valid);
408
409 CacheSeg->Valid = Valid;
410 CacheSeg->Dirty = CacheSeg->Dirty || Dirty;
411
412 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
413 if (!WasDirty && CacheSeg->Dirty)
414 {
415 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
416 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
417 }
418 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
419 InsertTailList(&CacheSegmentLRUListHead, &CacheSeg->CacheSegmentLRUListEntry);
420
421 if (Mapped)
422 {
423 CacheSeg->MappedCount++;
424 }
425 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
426 CcRosCacheSegmentDecRefCount(CacheSeg);
427 if (Mapped && CacheSeg->MappedCount == 1)
428 {
429 CcRosCacheSegmentIncRefCount(CacheSeg);
430 }
431 if (!WasDirty && CacheSeg->Dirty)
432 {
433 CcRosCacheSegmentIncRefCount(CacheSeg);
434 }
435 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
436 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
437 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&CacheSeg->Lock);
438
439 return(STATUS_SUCCESS);
440 }
441
442 PCACHE_SEGMENT
443 NTAPI
444 CcRosLookupCacheSegment(PBCB Bcb, ULONG FileOffset)
445 {
446 PLIST_ENTRY current_entry;
447 PCACHE_SEGMENT current;
448 KIRQL oldIrql;
449
450 ASSERT(Bcb);
451
452 DPRINT("CcRosLookupCacheSegment(Bcb -x%p, FileOffset %d)\n", Bcb, FileOffset);
453
454 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
455 current_entry = Bcb->BcbSegmentListHead.Flink;
456 while (current_entry != &Bcb->BcbSegmentListHead)
457 {
458 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
459 BcbSegmentListEntry);
460 if (current->FileOffset <= FileOffset &&
461 (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
462 {
463 CcRosCacheSegmentIncRefCount(current);
464 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
465 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&current->Lock);
466 return(current);
467 }
468 current_entry = current_entry->Flink;
469 }
470 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
471 return(NULL);
472 }
473
474 NTSTATUS
475 NTAPI
476 CcRosMarkDirtyCacheSegment(PBCB Bcb, ULONG FileOffset)
477 {
478 PCACHE_SEGMENT CacheSeg;
479 KIRQL oldIrql;
480
481 ASSERT(Bcb);
482
483 DPRINT("CcRosMarkDirtyCacheSegment(Bcb 0x%p, FileOffset %d)\n", Bcb, FileOffset);
484
485 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
486 if (CacheSeg == NULL)
487 {
488 KEBUGCHECKCC;
489 }
490 if (!CacheSeg->Dirty)
491 {
492 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
493 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
494 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
495 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
496 }
497 else
498 {
499 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
500 CcRosCacheSegmentDecRefCount(CacheSeg);
501 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
502 }
503
504
505 CacheSeg->Dirty = TRUE;
506 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&CacheSeg->Lock);
507
508 return(STATUS_SUCCESS);
509 }
510
511 NTSTATUS
512 NTAPI
513 CcRosUnmapCacheSegment(PBCB Bcb, ULONG FileOffset, BOOLEAN NowDirty)
514 {
515 PCACHE_SEGMENT CacheSeg;
516 BOOLEAN WasDirty;
517 KIRQL oldIrql;
518
519 ASSERT(Bcb);
520
521 DPRINT("CcRosUnmapCacheSegment(Bcb 0x%p, FileOffset %d, NowDirty %d)\n",
522 Bcb, FileOffset, NowDirty);
523
524 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
525 if (CacheSeg == NULL)
526 {
527 return(STATUS_UNSUCCESSFUL);
528 }
529
530 WasDirty = CacheSeg->Dirty;
531 CacheSeg->Dirty = CacheSeg->Dirty || NowDirty;
532
533 CacheSeg->MappedCount--;
534
535 if (!WasDirty && NowDirty)
536 {
537 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
538 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
539 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
540 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
541 }
542
543 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
544 CcRosCacheSegmentDecRefCount(CacheSeg);
545 if (!WasDirty && NowDirty)
546 {
547 CcRosCacheSegmentIncRefCount(CacheSeg);
548 }
549 if (CacheSeg->MappedCount == 0)
550 {
551 CcRosCacheSegmentDecRefCount(CacheSeg);
552 }
553 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
554
555 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&CacheSeg->Lock);
556 return(STATUS_SUCCESS);
557 }
558
559 NTSTATUS static
560 CcRosCreateCacheSegment(PBCB Bcb,
561 ULONG FileOffset,
562 PCACHE_SEGMENT* CacheSeg)
563 {
564 ULONG i;
565 PCACHE_SEGMENT current;
566 PCACHE_SEGMENT previous;
567 PLIST_ENTRY current_entry;
568 NTSTATUS Status;
569 KIRQL oldIrql;
570 PPFN_TYPE Pfn;
571 #ifdef CACHE_BITMAP
572 ULONG StartingOffset;
573 #else
574 #endif
575 PHYSICAL_ADDRESS BoundaryAddressMultiple;
576
577 ASSERT(Bcb);
578
579 DPRINT("CcRosCreateCacheSegment()\n");
580
581 BoundaryAddressMultiple.QuadPart = 0;
582 if (FileOffset >= Bcb->FileSize.u.LowPart)
583 {
584 CacheSeg = NULL;
585 return STATUS_INVALID_PARAMETER;
586 }
587
588 current = ExAllocateFromNPagedLookasideList(&CacheSegLookasideList);
589 current->Valid = FALSE;
590 current->Dirty = FALSE;
591 current->PageOut = FALSE;
592 current->FileOffset = ROUND_DOWN(FileOffset, Bcb->CacheSegmentSize);
593 current->Bcb = Bcb;
594 #if defined(DBG) || defined(KDBG)
595 if ( Bcb->Trace )
596 {
597 DPRINT1("CacheMap 0x%p: new Cache Segment: 0x%p\n", Bcb, current );
598 }
599 #endif
600 current->MappedCount = 0;
601 current->DirtySegmentListEntry.Flink = NULL;
602 current->DirtySegmentListEntry.Blink = NULL;
603 current->ReferenceCount = 1;
604 ExInitializeFastMutex(&current->Lock);
605 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&current->Lock);
606 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
607
608 *CacheSeg = current;
609 /* There is window between the call to CcRosLookupCacheSegment
610 * and CcRosCreateCacheSegment. We must check if a segment on
611 * the fileoffset exist. If there exist a segment, we release
612 * our new created segment and return the existing one.
613 */
614 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
615 current_entry = Bcb->BcbSegmentListHead.Flink;
616 previous = NULL;
617 while (current_entry != &Bcb->BcbSegmentListHead)
618 {
619 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
620 BcbSegmentListEntry);
621 if (current->FileOffset <= FileOffset &&
622 (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
623 {
624 CcRosCacheSegmentIncRefCount(current);
625 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
626 #if defined(DBG) || defined(KDBG)
627 if ( Bcb->Trace )
628 {
629 DPRINT1("CacheMap 0x%p: deleting newly created Cache Segment 0x%p ( found existing one 0x%p )\n",
630 Bcb,
631 (*CacheSeg),
632 current );
633 }
634 #endif
635 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&(*CacheSeg)->Lock);
636 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
637 ExFreeToNPagedLookasideList(&CacheSegLookasideList, *CacheSeg);
638 *CacheSeg = current;
639 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&current->Lock);
640 return STATUS_SUCCESS;
641 }
642 if (current->FileOffset < FileOffset)
643 {
644 if (previous == NULL)
645 {
646 previous = current;
647 }
648 else
649 {
650 if (previous->FileOffset < current->FileOffset)
651 {
652 previous = current;
653 }
654 }
655 }
656 current_entry = current_entry->Flink;
657 }
658 /* There was no existing segment. */
659 current = *CacheSeg;
660 if (previous)
661 {
662 InsertHeadList(&previous->BcbSegmentListEntry, &current->BcbSegmentListEntry);
663 }
664 else
665 {
666 InsertHeadList(&Bcb->BcbSegmentListHead, &current->BcbSegmentListEntry);
667 }
668 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
669 InsertTailList(&CacheSegmentListHead, &current->CacheSegmentListEntry);
670 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
671 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
672 #ifdef CACHE_BITMAP
673 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
674
675 StartingOffset = RtlFindClearBitsAndSet(&CiCacheSegMappingRegionAllocMap, Bcb->CacheSegmentSize / PAGE_SIZE, CiCacheSegMappingRegionHint);
676
677 if (StartingOffset == 0xffffffff)
678 {
679 DPRINT1("Out of CacheSeg mapping space\n");
680 KEBUGCHECKCC;
681 }
682
683 current->BaseAddress = CiCacheSegMappingRegionBase + StartingOffset * PAGE_SIZE;
684
685 if (CiCacheSegMappingRegionHint == StartingOffset)
686 {
687 CiCacheSegMappingRegionHint += Bcb->CacheSegmentSize / PAGE_SIZE;
688 }
689
690 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
691 #else
692 MmLockAddressSpace(MmGetKernelAddressSpace());
693 current->BaseAddress = NULL;
694 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
695 MEMORY_AREA_CACHE_SEGMENT,
696 &current->BaseAddress,
697 Bcb->CacheSegmentSize,
698 PAGE_READWRITE,
699 (PMEMORY_AREA*)&current->MemoryArea,
700 FALSE,
701 0,
702 BoundaryAddressMultiple);
703 MmUnlockAddressSpace(MmGetKernelAddressSpace());
704 if (!NT_SUCCESS(Status))
705 {
706 KEBUGCHECKCC;
707 }
708 #endif
709 Pfn = alloca(sizeof(PFN_TYPE) * (Bcb->CacheSegmentSize / PAGE_SIZE));
710 for (i = 0; i < (Bcb->CacheSegmentSize / PAGE_SIZE); i++)
711 {
712 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &Pfn[i]);
713 if (!NT_SUCCESS(Status))
714 {
715 KEBUGCHECKCC;
716 }
717 }
718 Status = MmCreateVirtualMapping(NULL,
719 current->BaseAddress,
720 PAGE_READWRITE,
721 Pfn,
722 Bcb->CacheSegmentSize / PAGE_SIZE);
723 if (!NT_SUCCESS(Status))
724 {
725 KEBUGCHECKCC;
726 }
727 return(STATUS_SUCCESS);
728 }
729
730 NTSTATUS
731 NTAPI
732 CcRosGetCacheSegmentChain(PBCB Bcb,
733 ULONG FileOffset,
734 ULONG Length,
735 PCACHE_SEGMENT* CacheSeg)
736 {
737 PCACHE_SEGMENT current;
738 ULONG i;
739 PCACHE_SEGMENT* CacheSegList;
740 PCACHE_SEGMENT Previous = NULL;
741
742 ASSERT(Bcb);
743
744 DPRINT("CcRosGetCacheSegmentChain()\n");
745
746 Length = ROUND_UP(Length, Bcb->CacheSegmentSize);
747
748 #if defined(__GNUC__)
749 CacheSegList = alloca(sizeof(PCACHE_SEGMENT) *
750 (Length / Bcb->CacheSegmentSize));
751 #elif defined(_MSC_VER)
752 CacheSegList = _alloca(sizeof(PCACHE_SEGMENT) *
753 (Length / Bcb->CacheSegmentSize));
754 #else
755 #error Unknown compiler for alloca intrinsic stack allocation "function"
756 #endif
757
758 /*
759 * Look for a cache segment already mapping the same data.
760 */
761 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
762 {
763 ULONG CurrentOffset = FileOffset + (i * Bcb->CacheSegmentSize);
764 current = CcRosLookupCacheSegment(Bcb, CurrentOffset);
765 if (current != NULL)
766 {
767 CacheSegList[i] = current;
768 }
769 else
770 {
771 CcRosCreateCacheSegment(Bcb, CurrentOffset, &current);
772 CacheSegList[i] = current;
773 }
774 }
775
776 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
777 {
778 if (i == 0)
779 {
780 *CacheSeg = CacheSegList[i];
781 Previous = CacheSegList[i];
782 }
783 else
784 {
785 Previous->NextInChain = CacheSegList[i];
786 Previous = CacheSegList[i];
787 }
788 }
789 ASSERT(Previous);
790 Previous->NextInChain = NULL;
791
792 return(STATUS_SUCCESS);
793 }
794
795 NTSTATUS
796 NTAPI
797 CcRosGetCacheSegment(PBCB Bcb,
798 ULONG FileOffset,
799 PULONG BaseOffset,
800 PVOID* BaseAddress,
801 PBOOLEAN UptoDate,
802 PCACHE_SEGMENT* CacheSeg)
803 {
804 PCACHE_SEGMENT current;
805 NTSTATUS Status;
806
807 ASSERT(Bcb);
808
809 DPRINT("CcRosGetCacheSegment()\n");
810
811 /*
812 * Look for a cache segment already mapping the same data.
813 */
814 current = CcRosLookupCacheSegment(Bcb, FileOffset);
815 if (current == NULL)
816 {
817 /*
818 * Otherwise create a new segment.
819 */
820 Status = CcRosCreateCacheSegment(Bcb, FileOffset, &current);
821 if (!NT_SUCCESS(Status))
822 {
823 return Status;
824 }
825 }
826 /*
827 * Return information about the segment to the caller.
828 */
829 *UptoDate = current->Valid;
830 *BaseAddress = current->BaseAddress;
831 DPRINT("*BaseAddress 0x%.8X\n", *BaseAddress);
832 *CacheSeg = current;
833 *BaseOffset = current->FileOffset;
834 return(STATUS_SUCCESS);
835 }
836
837 NTSTATUS STDCALL
838 CcRosRequestCacheSegment(PBCB Bcb,
839 ULONG FileOffset,
840 PVOID* BaseAddress,
841 PBOOLEAN UptoDate,
842 PCACHE_SEGMENT* CacheSeg)
843 /*
844 * FUNCTION: Request a page mapping for a BCB
845 */
846 {
847 ULONG BaseOffset;
848
849 ASSERT(Bcb);
850
851 if ((FileOffset % Bcb->CacheSegmentSize) != 0)
852 {
853 CPRINT("Bad fileoffset %x should be multiple of %x",
854 FileOffset, Bcb->CacheSegmentSize);
855 KEBUGCHECKCC;
856 }
857
858 return(CcRosGetCacheSegment(Bcb,
859 FileOffset,
860 &BaseOffset,
861 BaseAddress,
862 UptoDate,
863 CacheSeg));
864 }
865 #ifdef CACHE_BITMAP
866 #else
867 static VOID
868 CcFreeCachePage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address,
869 PFN_TYPE Page, SWAPENTRY SwapEntry, BOOLEAN Dirty)
870 {
871 ASSERT(SwapEntry == 0);
872 if (Page != 0)
873 {
874 MmReleasePageMemoryConsumer(MC_CACHE, Page);
875 }
876 }
877 #endif
878 NTSTATUS
879 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg)
880 /*
881 * FUNCTION: Releases a cache segment associated with a BCB
882 */
883 {
884 #ifdef CACHE_BITMAP
885 ULONG i;
886 ULONG RegionSize;
887 ULONG Base;
888 PFN_TYPE Page;
889 KIRQL oldIrql;
890 #endif
891 DPRINT("Freeing cache segment 0x%p\n", CacheSeg);
892 #if defined(DBG) || defined(KDBG)
893 if ( CacheSeg->Bcb->Trace )
894 {
895 DPRINT1("CacheMap 0x%p: deleting Cache Segment: 0x%p\n", CacheSeg->Bcb, CacheSeg );
896 }
897 #endif
898 #ifdef CACHE_BITMAP
899 RegionSize = CacheSeg->Bcb->CacheSegmentSize / PAGE_SIZE;
900
901 /* Unmap all the pages. */
902 for (i = 0; i < RegionSize; i++)
903 {
904 MmDeleteVirtualMapping(NULL,
905 CacheSeg->BaseAddress + (i * PAGE_SIZE),
906 FALSE,
907 NULL,
908 &Page);
909 MmReleasePageMemoryConsumer(MC_CACHE, Page);
910 }
911
912 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
913 /* Deallocate all the pages used. */
914 Base = (ULONG)(CacheSeg->BaseAddress - CiCacheSegMappingRegionBase) / PAGE_SIZE;
915
916 RtlClearBits(&CiCacheSegMappingRegionAllocMap, Base, RegionSize);
917
918 CiCacheSegMappingRegionHint = min (CiCacheSegMappingRegionHint, Base);
919
920 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
921 #else
922 MmLockAddressSpace(MmGetKernelAddressSpace());
923 MmFreeMemoryArea(MmGetKernelAddressSpace(),
924 CacheSeg->MemoryArea,
925 CcFreeCachePage,
926 NULL);
927 MmUnlockAddressSpace(MmGetKernelAddressSpace());
928 #endif
929 ExFreeToNPagedLookasideList(&CacheSegLookasideList, CacheSeg);
930 return(STATUS_SUCCESS);
931 }
932
933 NTSTATUS
934 NTAPI
935 CcRosFreeCacheSegment(PBCB Bcb, PCACHE_SEGMENT CacheSeg)
936 {
937 NTSTATUS Status;
938 KIRQL oldIrql;
939
940 ASSERT(Bcb);
941
942 DPRINT("CcRosFreeCacheSegment(Bcb 0x%p, CacheSeg 0x%p)\n",
943 Bcb, CacheSeg);
944
945 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
946 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
947 RemoveEntryList(&CacheSeg->BcbSegmentListEntry);
948 RemoveEntryList(&CacheSeg->CacheSegmentListEntry);
949 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
950 if (CacheSeg->Dirty)
951 {
952 RemoveEntryList(&CacheSeg->DirtySegmentListEntry);
953 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
954
955 }
956 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
957 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
958
959 Status = CcRosInternalFreeCacheSegment(CacheSeg);
960 return(Status);
961 }
962
963 /*
964 * @implemented
965 */
966 VOID STDCALL
967 CcFlushCache(IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
968 IN PLARGE_INTEGER FileOffset OPTIONAL,
969 IN ULONG Length,
970 OUT PIO_STATUS_BLOCK IoStatus)
971 {
972 PBCB Bcb;
973 LARGE_INTEGER Offset;
974 PCACHE_SEGMENT current;
975 NTSTATUS Status;
976 KIRQL oldIrql;
977
978 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %d, IoStatus 0x%p)\n",
979 SectionObjectPointers, FileOffset, Length, IoStatus);
980
981 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
982 {
983 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
984 ASSERT(Bcb);
985 if (FileOffset)
986 {
987 Offset = *FileOffset;
988 }
989 else
990 {
991 Offset.QuadPart = (LONGLONG)0;
992 Length = Bcb->FileSize.u.LowPart;
993 }
994
995 if (IoStatus)
996 {
997 IoStatus->Status = STATUS_SUCCESS;
998 IoStatus->Information = 0;
999 }
1000
1001 while (Length > 0)
1002 {
1003 current = CcRosLookupCacheSegment (Bcb, Offset.u.LowPart);
1004 if (current != NULL)
1005 {
1006 if (current->Dirty)
1007 {
1008 Status = CcRosFlushCacheSegment(current);
1009 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1010 {
1011 IoStatus->Status = Status;
1012 }
1013 }
1014 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1015 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&current->Lock);
1016 CcRosCacheSegmentDecRefCount(current);
1017 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1018 }
1019
1020 Offset.QuadPart += Bcb->CacheSegmentSize;
1021 if (Length > Bcb->CacheSegmentSize)
1022 {
1023 Length -= Bcb->CacheSegmentSize;
1024 }
1025 else
1026 {
1027 Length = 0;
1028 }
1029 }
1030 }
1031 else
1032 {
1033 if (IoStatus)
1034 {
1035 IoStatus->Status = STATUS_INVALID_PARAMETER;
1036 }
1037 }
1038 }
1039
1040 NTSTATUS
1041 NTAPI
1042 CcRosDeleteFileCache(PFILE_OBJECT FileObject, PBCB Bcb)
1043 /*
1044 * FUNCTION: Releases the BCB associated with a file object
1045 */
1046 {
1047 PLIST_ENTRY current_entry;
1048 PCACHE_SEGMENT current;
1049 NTSTATUS Status;
1050 LIST_ENTRY FreeList;
1051 KIRQL oldIrql;
1052
1053 ASSERT(Bcb);
1054
1055 Bcb->RefCount++;
1056 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1057
1058 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1059
1060 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
1061 Bcb->RefCount--;
1062 if (Bcb->RefCount == 0)
1063 {
1064 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1065 {
1066 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1067 Bcb->BcbRemoveListEntry.Flink = NULL;
1068 }
1069
1070 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1071
1072 /*
1073 * Release all cache segments.
1074 */
1075 InitializeListHead(&FreeList);
1076 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1077 current_entry = Bcb->BcbSegmentListHead.Flink;
1078 while (!IsListEmpty(&Bcb->BcbSegmentListHead))
1079 {
1080 current_entry = RemoveTailList(&Bcb->BcbSegmentListHead);
1081 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1082 RemoveEntryList(&current->CacheSegmentListEntry);
1083 RemoveEntryList(&current->CacheSegmentLRUListEntry);
1084 if (current->Dirty)
1085 {
1086 RemoveEntryList(&current->DirtySegmentListEntry);
1087 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
1088 DPRINT1("Freeing dirty segment\n");
1089 }
1090 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
1091 }
1092 #if defined(DBG) || defined(KDBG)
1093 Bcb->Trace = FALSE;
1094 #endif
1095 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1096
1097 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1098 ObDereferenceObject (Bcb->FileObject);
1099
1100 while (!IsListEmpty(&FreeList))
1101 {
1102 current_entry = RemoveTailList(&FreeList);
1103 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1104 Status = CcRosInternalFreeCacheSegment(current);
1105 }
1106 ExFreeToNPagedLookasideList(&BcbLookasideList, Bcb);
1107 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
1108 }
1109 return(STATUS_SUCCESS);
1110 }
1111
1112 VOID
1113 NTAPI
1114 CcRosReferenceCache(PFILE_OBJECT FileObject)
1115 {
1116 PBCB Bcb;
1117 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
1118 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1119 ASSERT(Bcb);
1120 if (Bcb->RefCount == 0)
1121 {
1122 ASSERT(Bcb->BcbRemoveListEntry.Flink != NULL);
1123 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1124 Bcb->BcbRemoveListEntry.Flink = NULL;
1125
1126 }
1127 else
1128 {
1129 ASSERT(Bcb->BcbRemoveListEntry.Flink == NULL);
1130 }
1131 Bcb->RefCount++;
1132 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1133 }
1134
1135 VOID
1136 NTAPI
1137 CcRosSetRemoveOnClose(PSECTION_OBJECT_POINTERS SectionObjectPointer)
1138 {
1139 PBCB Bcb;
1140 DPRINT("CcRosSetRemoveOnClose()\n");
1141 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
1142 Bcb = (PBCB)SectionObjectPointer->SharedCacheMap;
1143 if (Bcb)
1144 {
1145 Bcb->RemoveOnClose = TRUE;
1146 if (Bcb->RefCount == 0)
1147 {
1148 CcRosDeleteFileCache(Bcb->FileObject, Bcb);
1149 }
1150 }
1151 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1152 }
1153
1154
1155 VOID
1156 NTAPI
1157 CcRosDereferenceCache(PFILE_OBJECT FileObject)
1158 {
1159 PBCB Bcb;
1160 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
1161 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1162 ASSERT(Bcb);
1163 if (Bcb->RefCount > 0)
1164 {
1165 Bcb->RefCount--;
1166 if (Bcb->RefCount == 0)
1167 {
1168 MmFreeSectionSegments(Bcb->FileObject);
1169 if (Bcb->RemoveOnClose)
1170 {
1171 CcRosDeleteFileCache(FileObject, Bcb);
1172 }
1173 else
1174 {
1175 Bcb->TimeStamp = CcTimeStamp;
1176 InsertHeadList(&ClosedListHead, &Bcb->BcbRemoveListEntry);
1177 }
1178 }
1179 }
1180 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1181 }
1182
1183 NTSTATUS STDCALL
1184 CcRosReleaseFileCache(PFILE_OBJECT FileObject)
1185 /*
1186 * FUNCTION: Called by the file system when a handle to a file object
1187 * has been closed.
1188 */
1189 {
1190 PBCB Bcb;
1191
1192 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
1193
1194 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1195 {
1196 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1197 if (FileObject->PrivateCacheMap != NULL)
1198 {
1199 FileObject->PrivateCacheMap = NULL;
1200 if (Bcb->RefCount > 0)
1201 {
1202 Bcb->RefCount--;
1203 if (Bcb->RefCount == 0)
1204 {
1205 MmFreeSectionSegments(Bcb->FileObject);
1206 if (Bcb->RemoveOnClose)
1207 {
1208 CcRosDeleteFileCache(FileObject, Bcb);
1209 }
1210 else
1211 {
1212 Bcb->TimeStamp = CcTimeStamp;
1213 InsertHeadList(&ClosedListHead, &Bcb->BcbRemoveListEntry);
1214 }
1215 }
1216 }
1217 }
1218 }
1219 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1220 return(STATUS_SUCCESS);
1221 }
1222
1223 NTSTATUS
1224 NTAPI
1225 CcTryToInitializeFileCache(PFILE_OBJECT FileObject)
1226 {
1227 PBCB Bcb;
1228 NTSTATUS Status;
1229
1230 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
1231
1232 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1233 if (Bcb == NULL)
1234 {
1235 Status = STATUS_UNSUCCESSFUL;
1236 }
1237 else
1238 {
1239 if (FileObject->PrivateCacheMap == NULL)
1240 {
1241 FileObject->PrivateCacheMap = Bcb;
1242 Bcb->RefCount++;
1243 }
1244 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1245 {
1246 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1247 Bcb->BcbRemoveListEntry.Flink = NULL;
1248 }
1249 Status = STATUS_SUCCESS;
1250 }
1251 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1252
1253 return Status;
1254 }
1255
1256
1257 NTSTATUS STDCALL
1258 CcRosInitializeFileCache(PFILE_OBJECT FileObject,
1259 ULONG CacheSegmentSize)
1260 /*
1261 * FUNCTION: Initializes a BCB for a file object
1262 */
1263 {
1264 PBCB Bcb;
1265
1266 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1267 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, Bcb 0x%p, CacheSegmentSize %d)\n",
1268 FileObject, Bcb, CacheSegmentSize);
1269
1270 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
1271 if (Bcb == NULL)
1272 {
1273 Bcb = ExAllocateFromNPagedLookasideList(&BcbLookasideList);
1274 if (Bcb == NULL)
1275 {
1276 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1277 return(STATUS_UNSUCCESSFUL);
1278 }
1279 memset(Bcb, 0, sizeof(BCB));
1280 ObReferenceObjectByPointer(FileObject,
1281 FILE_ALL_ACCESS,
1282 NULL,
1283 KernelMode);
1284 Bcb->FileObject = FileObject;
1285 Bcb->CacheSegmentSize = CacheSegmentSize;
1286 if (FileObject->FsContext)
1287 {
1288 Bcb->AllocationSize =
1289 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->AllocationSize;
1290 Bcb->FileSize =
1291 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->FileSize;
1292 }
1293 KeInitializeSpinLock(&Bcb->BcbLock);
1294 InitializeListHead(&Bcb->BcbSegmentListHead);
1295 FileObject->SectionObjectPointer->SharedCacheMap = Bcb;
1296 }
1297 if (FileObject->PrivateCacheMap == NULL)
1298 {
1299 FileObject->PrivateCacheMap = Bcb;
1300 Bcb->RefCount++;
1301 }
1302 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1303 {
1304 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1305 Bcb->BcbRemoveListEntry.Flink = NULL;
1306 }
1307 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1308
1309 return(STATUS_SUCCESS);
1310 }
1311
1312 /*
1313 * @implemented
1314 */
1315 PFILE_OBJECT STDCALL
1316 CcGetFileObjectFromSectionPtrs(IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1317 {
1318 PBCB Bcb;
1319 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1320 {
1321 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
1322 ASSERT(Bcb);
1323 return Bcb->FileObject;
1324 }
1325 return NULL;
1326 }
1327
1328 VOID STDCALL
1329 CmLazyCloseThreadMain(PVOID Ignored)
1330 {
1331 LARGE_INTEGER Timeout;
1332 PLIST_ENTRY current_entry;
1333 PBCB current;
1334 ULONG RemoveTimeStamp;
1335 NTSTATUS Status;
1336
1337 KeQuerySystemTime (&Timeout);
1338
1339 while (1)
1340 {
1341 Timeout.QuadPart += (LONGLONG)100000000; // 10sec
1342 Status = KeWaitForSingleObject(&LazyCloseThreadEvent,
1343 0,
1344 KernelMode,
1345 FALSE,
1346 &Timeout);
1347
1348 DPRINT("LazyCloseThreadMain %d\n", CcTimeStamp);
1349
1350 if (!NT_SUCCESS(Status))
1351 {
1352 DbgPrint("LazyCloseThread: Wait failed\n");
1353 KEBUGCHECKCC;
1354 break;
1355 }
1356 if (LazyCloseThreadShouldTerminate)
1357 {
1358 DbgPrint("LazyCloseThread: Terminating\n");
1359 break;
1360 }
1361
1362 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
1363 CcTimeStamp++;
1364 if (CcTimeStamp >= 30)
1365 {
1366 RemoveTimeStamp = CcTimeStamp - 30; /* 5min = 10sec * 30 */
1367 while (!IsListEmpty(&ClosedListHead))
1368 {
1369 current_entry = ClosedListHead.Blink;
1370 current = CONTAINING_RECORD(current_entry, BCB, BcbRemoveListEntry);
1371 if (current->TimeStamp >= RemoveTimeStamp)
1372 {
1373 break;
1374 }
1375 CcRosDeleteFileCache(current->FileObject, current);
1376 }
1377 }
1378 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1379 }
1380 }
1381
1382 VOID
1383 INIT_FUNCTION
1384 NTAPI
1385 CcInitView(VOID)
1386 {
1387 #ifdef CACHE_BITMAP
1388 PMEMORY_AREA marea;
1389 PVOID Buffer;
1390 PHYSICAL_ADDRESS BoundaryAddressMultiple;
1391 #endif
1392 NTSTATUS Status;
1393 KPRIORITY Priority;
1394
1395 DPRINT("CcInitView()\n");
1396 #ifdef CACHE_BITMAP
1397 BoundaryAddressMultiple.QuadPart = 0;
1398 CiCacheSegMappingRegionHint = 0;
1399 CiCacheSegMappingRegionBase = NULL;
1400
1401 MmLockAddressSpace(MmGetKernelAddressSpace());
1402
1403 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
1404 MEMORY_AREA_CACHE_SEGMENT,
1405 &CiCacheSegMappingRegionBase,
1406 CI_CACHESEG_MAPPING_REGION_SIZE,
1407 PAGE_READWRITE,
1408 &marea,
1409 FALSE,
1410 0,
1411 BoundaryAddressMultiple);
1412 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1413 if (!NT_SUCCESS(Status))
1414 {
1415 KEBUGCHECKCC;
1416 }
1417
1418 Buffer = ExAllocatePool(NonPagedPool, CI_CACHESEG_MAPPING_REGION_SIZE / (PAGE_SIZE * 8));
1419
1420 RtlInitializeBitMap(&CiCacheSegMappingRegionAllocMap, Buffer, CI_CACHESEG_MAPPING_REGION_SIZE / PAGE_SIZE);
1421 RtlClearAllBits(&CiCacheSegMappingRegionAllocMap);
1422
1423 KeInitializeSpinLock(&CiCacheSegMappingRegionLock);
1424 #endif
1425 InitializeListHead(&CacheSegmentListHead);
1426 InitializeListHead(&DirtySegmentListHead);
1427 InitializeListHead(&CacheSegmentLRUListHead);
1428 InitializeListHead(&ClosedListHead);
1429 ExInitializeFastMutex(&ViewLock);
1430 ExInitializeNPagedLookasideList (&iBcbLookasideList,
1431 NULL,
1432 NULL,
1433 0,
1434 sizeof(INTERNAL_BCB),
1435 TAG_IBCB,
1436 20);
1437 ExInitializeNPagedLookasideList (&BcbLookasideList,
1438 NULL,
1439 NULL,
1440 0,
1441 sizeof(BCB),
1442 TAG_BCB,
1443 20);
1444 ExInitializeNPagedLookasideList (&CacheSegLookasideList,
1445 NULL,
1446 NULL,
1447 0,
1448 sizeof(CACHE_SEGMENT),
1449 TAG_CSEG,
1450 20);
1451
1452 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1453
1454 CcInitCacheZeroPage();
1455
1456 CcTimeStamp = 0;
1457 LazyCloseThreadShouldTerminate = FALSE;
1458 KeInitializeEvent (&LazyCloseThreadEvent, SynchronizationEvent, FALSE);
1459 Status = PsCreateSystemThread(&LazyCloseThreadHandle,
1460 THREAD_ALL_ACCESS,
1461 NULL,
1462 NULL,
1463 &LazyCloseThreadId,
1464 (PKSTART_ROUTINE)CmLazyCloseThreadMain,
1465 NULL);
1466 if (NT_SUCCESS(Status))
1467 {
1468 Priority = LOW_REALTIME_PRIORITY;
1469 NtSetInformationThread(LazyCloseThreadHandle,
1470 ThreadPriority,
1471 &Priority,
1472 sizeof(Priority));
1473 }
1474
1475 }
1476
1477 /* EOF */
1478
1479
1480
1481
1482
1483
1484