Lock the MainResource from fcb, if we are trying to write back a modified cache segment.
[reactos.git] / reactos / ntoskrnl / cc / view.c
1 /* $Id$
2 *
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/cc/view.c
6 * PURPOSE: Cache manager
7 *
8 * PROGRAMMERS: David Welch (welch@mcmail.com)
9 */
10
11 /* NOTES **********************************************************************
12 *
13 * This is not the NT implementation of a file cache nor anything much like
14 * it.
15 *
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
18 *
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
21 *
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
24 *
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 *
29 * (4) Copy the data into or out of the page as necessary.
30 *
31 * (5) Release the cache page
32 */
33 /* INCLUDES ******************************************************************/
34
35 #include <ntoskrnl.h>
36 #define NDEBUG
37 #include <internal/debug.h>
38
39 /* GLOBALS *******************************************************************/
40
41 /*
42 * If CACHE_BITMAP is defined, the cache manager uses one large memory region
43 * within the kernel address space and allocate/deallocate space from this block
44 * over a bitmap. If CACHE_BITMAP is used, the size of the mdl mapping region
45 * must be reduced (ntoskrnl\mm\mdl.c, MI_MDLMAPPING_REGION_SIZE).
46 */
47 //#define CACHE_BITMAP
48
49 static LIST_ENTRY DirtySegmentListHead;
50 static LIST_ENTRY CacheSegmentListHead;
51 static LIST_ENTRY CacheSegmentLRUListHead;
52 static LIST_ENTRY ClosedListHead;
53 ULONG DirtyPageCount=0;
54
55 FAST_MUTEX ViewLock;
56
57 #ifdef CACHE_BITMAP
58 #define CI_CACHESEG_MAPPING_REGION_SIZE (128*1024*1024)
59
60 static PVOID CiCacheSegMappingRegionBase = NULL;
61 static RTL_BITMAP CiCacheSegMappingRegionAllocMap;
62 static ULONG CiCacheSegMappingRegionHint;
63 static KSPIN_LOCK CiCacheSegMappingRegionLock;
64 #endif
65
66 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
67 static NPAGED_LOOKASIDE_LIST BcbLookasideList;
68 static NPAGED_LOOKASIDE_LIST CacheSegLookasideList;
69
70 static ULONG CcTimeStamp;
71 static KEVENT LazyCloseThreadEvent;
72 static HANDLE LazyCloseThreadHandle;
73 static CLIENT_ID LazyCloseThreadId;
74 static volatile BOOLEAN LazyCloseThreadShouldTerminate;
75
76 #if defined(__GNUC__)
77 /* void * alloca(size_t size); */
78 #elif defined(_MSC_VER)
79 void* _alloca(size_t size);
80 #else
81 #error Unknown compiler for alloca intrinsic stack allocation "function"
82 #endif
83
84 #if defined(DBG) || defined(KDBG)
85 static void CcRosCacheSegmentIncRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
86 {
87 ++cs->ReferenceCount;
88 if ( cs->Bcb->Trace )
89 {
90 DbgPrint("(%s:%i) CacheSegment %p ++RefCount=%d, Dirty %d, PageOut %d\n",
91 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
92 }
93 }
94 static void CcRosCacheSegmentDecRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
95 {
96 --cs->ReferenceCount;
97 if ( cs->Bcb->Trace )
98 {
99 DbgPrint("(%s:%i) CacheSegment %p --RefCount=%d, Dirty %d, PageOut %d\n",
100 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
101 }
102 }
103 #define CcRosCacheSegmentIncRefCount(cs) CcRosCacheSegmentIncRefCount_(cs,__FILE__,__LINE__)
104 #define CcRosCacheSegmentDecRefCount(cs) CcRosCacheSegmentDecRefCount_(cs,__FILE__,__LINE__)
105 #else
106 #define CcRosCacheSegmentIncRefCount(cs) (++((cs)->ReferenceCount))
107 #define CcRosCacheSegmentDecRefCount(cs) (--((cs)->ReferenceCount))
108 #endif
109
110 NTSTATUS
111 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg);
112
113 BOOLEAN
114 FASTCALL
115 CcTryToAcquireBrokenMutex(PFAST_MUTEX FastMutex)
116 {
117 KeEnterCriticalRegion();
118 if (InterlockedCompareExchange(&FastMutex->Count, 0, 1) == 1)
119 {
120 FastMutex->Owner = KeGetCurrentThread();
121 return(TRUE);
122 }
123 else
124 {
125 KeLeaveCriticalRegion();
126 return(FALSE);
127 }
128 }
129
130 /* FUNCTIONS *****************************************************************/
131
132 VOID
133 STDCALL
134 CcRosTraceCacheMap (
135 PBCB Bcb,
136 BOOLEAN Trace )
137 {
138 #if defined(DBG) || defined(KDBG)
139 KIRQL oldirql;
140 PLIST_ENTRY current_entry;
141 PCACHE_SEGMENT current;
142
143 if ( !Bcb )
144 return;
145
146 Bcb->Trace = Trace;
147
148 if ( Trace )
149 {
150 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", Bcb );
151
152 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
153 KeAcquireSpinLock(&Bcb->BcbLock, &oldirql);
154
155 current_entry = Bcb->BcbSegmentListHead.Flink;
156 while (current_entry != &Bcb->BcbSegmentListHead)
157 {
158 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
159 current_entry = current_entry->Flink;
160
161 DPRINT1(" CacheSegment 0x%p enabled, RefCount %d, Dirty %d, PageOut %d\n",
162 current, current->ReferenceCount, current->Dirty, current->PageOut );
163 }
164 KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
165 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
166 }
167 else
168 {
169 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", Bcb );
170 }
171
172 #else
173 Bcb = Bcb;
174 Trace = Trace;
175 #endif
176 }
177
178 NTSTATUS
179 NTAPI
180 CcRosFlushCacheSegment(PCACHE_SEGMENT CacheSegment)
181 {
182 NTSTATUS Status;
183 KIRQL oldIrql;
184 Status = WriteCacheSegment(CacheSegment);
185 if (NT_SUCCESS(Status))
186 {
187 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
188 KeAcquireSpinLock(&CacheSegment->Bcb->BcbLock, &oldIrql);
189 CacheSegment->Dirty = FALSE;
190 RemoveEntryList(&CacheSegment->DirtySegmentListEntry);
191 DirtyPageCount -= CacheSegment->Bcb->CacheSegmentSize / PAGE_SIZE;
192 CcRosCacheSegmentDecRefCount ( CacheSegment );
193 KeReleaseSpinLock(&CacheSegment->Bcb->BcbLock, oldIrql);
194 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
195 }
196 return(Status);
197 }
198
199 NTSTATUS
200 NTAPI
201 CcRosFlushDirtyPages(ULONG Target, PULONG Count)
202 {
203 PLIST_ENTRY current_entry;
204 PCACHE_SEGMENT current;
205 ULONG PagesPerSegment;
206 BOOLEAN Locked;
207 NTSTATUS Status;
208 static ULONG WriteCount[4] = {0, 0, 0, 0};
209 ULONG NewTarget;
210
211 DPRINT("CcRosFlushDirtyPages(Target %d)\n", Target);
212
213 (*Count) = 0;
214
215 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
216
217 WriteCount[0] = WriteCount[1];
218 WriteCount[1] = WriteCount[2];
219 WriteCount[2] = WriteCount[3];
220 WriteCount[3] = 0;
221
222 NewTarget = WriteCount[0] + WriteCount[1] + WriteCount[2];
223
224 if (NewTarget < DirtyPageCount)
225 {
226 NewTarget = (DirtyPageCount - NewTarget + 3) / 4;
227 WriteCount[0] += NewTarget;
228 WriteCount[1] += NewTarget;
229 WriteCount[2] += NewTarget;
230 WriteCount[3] += NewTarget;
231 }
232
233 NewTarget = WriteCount[0];
234
235 Target = max(NewTarget, Target);
236
237 current_entry = DirtySegmentListHead.Flink;
238 if (current_entry == &DirtySegmentListHead)
239 {
240 DPRINT("No Dirty pages\n");
241 }
242 while (current_entry != &DirtySegmentListHead && Target > 0)
243 {
244 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
245 DirtySegmentListEntry);
246 current_entry = current_entry->Flink;
247
248 // Locked = current->Bcb->Callbacks.AcquireForLazyWrite(current->Bcb->Context, FALSE);
249 Locked = ExTryToAcquireResourceExclusiveLite(((FSRTL_COMMON_FCB_HEADER*)(current->Bcb->FileObject->FsContext))->Resource);
250 if (!Locked)
251 {
252 continue;
253 }
254 Locked = CcTryToAcquireBrokenMutex(&current->Lock);
255 if (!Locked)
256 {
257 // current->Bcb->Callbacks.ReleaseFromLazyWrite(current->Bcb->Context);
258 ExReleaseResourceLite(((FSRTL_COMMON_FCB_HEADER*)(current->Bcb->FileObject->FsContext))->Resource);
259 continue;
260 }
261 ASSERT(current->Dirty);
262 if (current->ReferenceCount > 1)
263 {
264 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&current->Lock);
265 // current->Bcb->Callbacks.ReleaseFromLazyWrite(current->Bcb->Context);
266 ExReleaseResourceLite(((FSRTL_COMMON_FCB_HEADER*)(current->Bcb->FileObject->FsContext))->Resource);
267 continue;
268 }
269 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
270 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
271 Status = CcRosFlushCacheSegment(current);
272 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&current->Lock);
273 // current->Bcb->Callbacks.ReleaseFromLazyWrite(current->Bcb->Context);
274 ExReleaseResourceLite(((FSRTL_COMMON_FCB_HEADER*)(current->Bcb->FileObject->FsContext))->Resource);
275 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
276 {
277 DPRINT1("CC: Failed to flush cache segment.\n");
278 }
279 else
280 {
281 (*Count) += PagesPerSegment;
282 Target -= PagesPerSegment;
283 }
284 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
285 current_entry = DirtySegmentListHead.Flink;
286 }
287 if (*Count < NewTarget)
288 {
289 WriteCount[1] += (NewTarget - *Count);
290 }
291 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
292 DPRINT("CcRosFlushDirtyPages() finished\n");
293
294 return(STATUS_SUCCESS);
295 }
296
297 NTSTATUS
298 CcRosTrimCache(ULONG Target, ULONG Priority, PULONG NrFreed)
299 /*
300 * FUNCTION: Try to free some memory from the file cache.
301 * ARGUMENTS:
302 * Target - The number of pages to be freed.
303 * Priority - The priority of free (currently unused).
304 * NrFreed - Points to a variable where the number of pages
305 * actually freed is returned.
306 */
307 {
308 PLIST_ENTRY current_entry;
309 PCACHE_SEGMENT current, last = NULL;
310 ULONG PagesPerSegment;
311 ULONG PagesFreed;
312 KIRQL oldIrql;
313 LIST_ENTRY FreeList;
314
315 DPRINT("CcRosTrimCache(Target %d)\n", Target);
316
317 *NrFreed = 0;
318
319 InitializeListHead(&FreeList);
320
321 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
322 current_entry = CacheSegmentLRUListHead.Flink;
323 while (current_entry != &CacheSegmentLRUListHead && Target > 0)
324 {
325 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
326 CacheSegmentLRUListEntry);
327 current_entry = current_entry->Flink;
328
329 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
330 if (current->ReferenceCount == 0)
331 {
332 RemoveEntryList(&current->BcbSegmentListEntry);
333 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
334 RemoveEntryList(&current->CacheSegmentListEntry);
335 RemoveEntryList(&current->CacheSegmentLRUListEntry);
336 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
337 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
338 PagesFreed = min(PagesPerSegment, Target);
339 Target -= PagesFreed;
340 (*NrFreed) += PagesFreed;
341 }
342 else
343 {
344 if (last != current && current->MappedCount > 0 && !current->Dirty && !current->PageOut)
345 {
346 ULONG i;
347 NTSTATUS Status;
348
349 CcRosCacheSegmentIncRefCount(current);
350 last = current;
351 current->PageOut = TRUE;
352 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
353 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
354 for (i = 0; i < current->Bcb->CacheSegmentSize / PAGE_SIZE; i++)
355 {
356 PFN_TYPE Page;
357 Page = MmGetPhysicalAddress((char*)current->BaseAddress + i * PAGE_SIZE).QuadPart >> PAGE_SHIFT;
358 Status = MmPageOutPhysicalAddress(Page);
359 if (!NT_SUCCESS(Status))
360 {
361 break;
362 }
363 }
364 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
365 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
366 CcRosCacheSegmentDecRefCount(current);
367 current->PageOut = FALSE;
368 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
369 current_entry = &current->CacheSegmentLRUListEntry;
370 continue;
371 }
372 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
373 }
374 }
375 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
376
377 while (!IsListEmpty(&FreeList))
378 {
379 current_entry = RemoveHeadList(&FreeList);
380 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
381 BcbSegmentListEntry);
382 CcRosInternalFreeCacheSegment(current);
383 }
384
385 DPRINT("CcRosTrimCache() finished\n");
386 return(STATUS_SUCCESS);
387 }
388
389 NTSTATUS
390 NTAPI
391 CcRosReleaseCacheSegment(PBCB Bcb,
392 PCACHE_SEGMENT CacheSeg,
393 BOOLEAN Valid,
394 BOOLEAN Dirty,
395 BOOLEAN Mapped)
396 {
397 BOOLEAN WasDirty = CacheSeg->Dirty;
398 KIRQL oldIrql;
399
400 ASSERT(Bcb);
401
402 DPRINT("CcReleaseCacheSegment(Bcb 0x%p, CacheSeg 0x%p, Valid %d)\n",
403 Bcb, CacheSeg, Valid);
404
405 CacheSeg->Valid = Valid;
406 CacheSeg->Dirty = CacheSeg->Dirty || Dirty;
407
408 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
409 if (!WasDirty && CacheSeg->Dirty)
410 {
411 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
412 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
413 }
414 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
415 InsertTailList(&CacheSegmentLRUListHead, &CacheSeg->CacheSegmentLRUListEntry);
416
417 if (Mapped)
418 {
419 CacheSeg->MappedCount++;
420 }
421 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
422 CcRosCacheSegmentDecRefCount(CacheSeg);
423 if (Mapped && CacheSeg->MappedCount == 1)
424 {
425 CcRosCacheSegmentIncRefCount(CacheSeg);
426 }
427 if (!WasDirty && CacheSeg->Dirty)
428 {
429 CcRosCacheSegmentIncRefCount(CacheSeg);
430 }
431 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
432 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
433 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&CacheSeg->Lock);
434
435 return(STATUS_SUCCESS);
436 }
437
438 PCACHE_SEGMENT
439 NTAPI
440 CcRosLookupCacheSegment(PBCB Bcb, ULONG FileOffset)
441 {
442 PLIST_ENTRY current_entry;
443 PCACHE_SEGMENT current;
444 KIRQL oldIrql;
445
446 ASSERT(Bcb);
447
448 DPRINT("CcRosLookupCacheSegment(Bcb -x%p, FileOffset %d)\n", Bcb, FileOffset);
449
450 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
451 current_entry = Bcb->BcbSegmentListHead.Flink;
452 while (current_entry != &Bcb->BcbSegmentListHead)
453 {
454 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
455 BcbSegmentListEntry);
456 if (current->FileOffset <= FileOffset &&
457 (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
458 {
459 CcRosCacheSegmentIncRefCount(current);
460 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
461 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&current->Lock);
462 return(current);
463 }
464 current_entry = current_entry->Flink;
465 }
466 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
467 return(NULL);
468 }
469
470 NTSTATUS
471 NTAPI
472 CcRosMarkDirtyCacheSegment(PBCB Bcb, ULONG FileOffset)
473 {
474 PCACHE_SEGMENT CacheSeg;
475 KIRQL oldIrql;
476
477 ASSERT(Bcb);
478
479 DPRINT("CcRosMarkDirtyCacheSegment(Bcb 0x%p, FileOffset %d)\n", Bcb, FileOffset);
480
481 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
482 if (CacheSeg == NULL)
483 {
484 KEBUGCHECKCC;
485 }
486 if (!CacheSeg->Dirty)
487 {
488 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
489 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
490 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
491 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
492 }
493 else
494 {
495 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
496 CcRosCacheSegmentDecRefCount(CacheSeg);
497 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
498 }
499
500
501 CacheSeg->Dirty = TRUE;
502 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&CacheSeg->Lock);
503
504 return(STATUS_SUCCESS);
505 }
506
507 NTSTATUS
508 NTAPI
509 CcRosUnmapCacheSegment(PBCB Bcb, ULONG FileOffset, BOOLEAN NowDirty)
510 {
511 PCACHE_SEGMENT CacheSeg;
512 BOOLEAN WasDirty;
513 KIRQL oldIrql;
514
515 ASSERT(Bcb);
516
517 DPRINT("CcRosUnmapCacheSegment(Bcb 0x%p, FileOffset %d, NowDirty %d)\n",
518 Bcb, FileOffset, NowDirty);
519
520 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
521 if (CacheSeg == NULL)
522 {
523 return(STATUS_UNSUCCESSFUL);
524 }
525
526 WasDirty = CacheSeg->Dirty;
527 CacheSeg->Dirty = CacheSeg->Dirty || NowDirty;
528
529 CacheSeg->MappedCount--;
530
531 if (!WasDirty && NowDirty)
532 {
533 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
534 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
535 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
536 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
537 }
538
539 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
540 CcRosCacheSegmentDecRefCount(CacheSeg);
541 if (!WasDirty && NowDirty)
542 {
543 CcRosCacheSegmentIncRefCount(CacheSeg);
544 }
545 if (CacheSeg->MappedCount == 0)
546 {
547 CcRosCacheSegmentDecRefCount(CacheSeg);
548 }
549 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
550
551 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&CacheSeg->Lock);
552 return(STATUS_SUCCESS);
553 }
554
555 NTSTATUS STATIC
556 CcRosCreateCacheSegment(PBCB Bcb,
557 ULONG FileOffset,
558 PCACHE_SEGMENT* CacheSeg)
559 {
560 ULONG i;
561 PCACHE_SEGMENT current;
562 PCACHE_SEGMENT previous;
563 PLIST_ENTRY current_entry;
564 NTSTATUS Status;
565 KIRQL oldIrql;
566 PPFN_TYPE Pfn;
567 #ifdef CACHE_BITMAP
568 ULONG StartingOffset;
569 #else
570 #endif
571 PHYSICAL_ADDRESS BoundaryAddressMultiple;
572
573 ASSERT(Bcb);
574
575 DPRINT("CcRosCreateCacheSegment()\n");
576
577 BoundaryAddressMultiple.QuadPart = 0;
578 if (FileOffset >= Bcb->FileSize.u.LowPart)
579 {
580 CacheSeg = NULL;
581 return STATUS_INVALID_PARAMETER;
582 }
583
584 current = ExAllocateFromNPagedLookasideList(&CacheSegLookasideList);
585 current->Valid = FALSE;
586 current->Dirty = FALSE;
587 current->PageOut = FALSE;
588 current->FileOffset = ROUND_DOWN(FileOffset, Bcb->CacheSegmentSize);
589 current->Bcb = Bcb;
590 #if defined(DBG) || defined(KDBG)
591 if ( Bcb->Trace )
592 {
593 DPRINT1("CacheMap 0x%p: new Cache Segment: 0x%p\n", Bcb, current );
594 }
595 #endif
596 current->MappedCount = 0;
597 current->DirtySegmentListEntry.Flink = NULL;
598 current->DirtySegmentListEntry.Blink = NULL;
599 current->ReferenceCount = 1;
600 ExInitializeFastMutex(&current->Lock);
601 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&current->Lock);
602 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
603
604 *CacheSeg = current;
605 /* There is window between the call to CcRosLookupCacheSegment
606 * and CcRosCreateCacheSegment. We must check if a segment on
607 * the fileoffset exist. If there exist a segment, we release
608 * our new created segment and return the existing one.
609 */
610 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
611 current_entry = Bcb->BcbSegmentListHead.Flink;
612 previous = NULL;
613 while (current_entry != &Bcb->BcbSegmentListHead)
614 {
615 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
616 BcbSegmentListEntry);
617 if (current->FileOffset <= FileOffset &&
618 (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
619 {
620 CcRosCacheSegmentIncRefCount(current);
621 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
622 #if defined(DBG) || defined(KDBG)
623 if ( Bcb->Trace )
624 {
625 DPRINT1("CacheMap 0x%p: deleting newly created Cache Segment 0x%p ( found existing one 0x%p )\n",
626 Bcb,
627 (*CacheSeg),
628 current );
629 }
630 #endif
631 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&(*CacheSeg)->Lock);
632 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
633 ExFreeToNPagedLookasideList(&CacheSegLookasideList, *CacheSeg);
634 *CacheSeg = current;
635 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&current->Lock);
636 return STATUS_SUCCESS;
637 }
638 if (current->FileOffset < FileOffset)
639 {
640 if (previous == NULL)
641 {
642 previous = current;
643 }
644 else
645 {
646 if (previous->FileOffset < current->FileOffset)
647 {
648 previous = current;
649 }
650 }
651 }
652 current_entry = current_entry->Flink;
653 }
654 /* There was no existing segment. */
655 current = *CacheSeg;
656 if (previous)
657 {
658 InsertHeadList(&previous->BcbSegmentListEntry, &current->BcbSegmentListEntry);
659 }
660 else
661 {
662 InsertHeadList(&Bcb->BcbSegmentListHead, &current->BcbSegmentListEntry);
663 }
664 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
665 InsertTailList(&CacheSegmentListHead, &current->CacheSegmentListEntry);
666 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
667 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
668 #ifdef CACHE_BITMAP
669 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
670
671 StartingOffset = RtlFindClearBitsAndSet(&CiCacheSegMappingRegionAllocMap, Bcb->CacheSegmentSize / PAGE_SIZE, CiCacheSegMappingRegionHint);
672
673 if (StartingOffset == 0xffffffff)
674 {
675 DPRINT1("Out of CacheSeg mapping space\n");
676 KEBUGCHECKCC;
677 }
678
679 current->BaseAddress = CiCacheSegMappingRegionBase + StartingOffset * PAGE_SIZE;
680
681 if (CiCacheSegMappingRegionHint == StartingOffset)
682 {
683 CiCacheSegMappingRegionHint += Bcb->CacheSegmentSize / PAGE_SIZE;
684 }
685
686 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
687 #else
688 MmLockAddressSpace(MmGetKernelAddressSpace());
689 current->BaseAddress = NULL;
690 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
691 MEMORY_AREA_CACHE_SEGMENT,
692 &current->BaseAddress,
693 Bcb->CacheSegmentSize,
694 PAGE_READWRITE,
695 (PMEMORY_AREA*)&current->MemoryArea,
696 FALSE,
697 0,
698 BoundaryAddressMultiple);
699 MmUnlockAddressSpace(MmGetKernelAddressSpace());
700 if (!NT_SUCCESS(Status))
701 {
702 KEBUGCHECKCC;
703 }
704 #endif
705 Pfn = alloca(sizeof(PFN_TYPE) * (Bcb->CacheSegmentSize / PAGE_SIZE));
706 for (i = 0; i < (Bcb->CacheSegmentSize / PAGE_SIZE); i++)
707 {
708 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &Pfn[i]);
709 if (!NT_SUCCESS(Status))
710 {
711 KEBUGCHECKCC;
712 }
713 }
714 Status = MmCreateVirtualMapping(NULL,
715 current->BaseAddress,
716 PAGE_READWRITE,
717 Pfn,
718 Bcb->CacheSegmentSize / PAGE_SIZE);
719 if (!NT_SUCCESS(Status))
720 {
721 KEBUGCHECKCC;
722 }
723 return(STATUS_SUCCESS);
724 }
725
726 NTSTATUS
727 NTAPI
728 CcRosGetCacheSegmentChain(PBCB Bcb,
729 ULONG FileOffset,
730 ULONG Length,
731 PCACHE_SEGMENT* CacheSeg)
732 {
733 PCACHE_SEGMENT current;
734 ULONG i;
735 PCACHE_SEGMENT* CacheSegList;
736 PCACHE_SEGMENT Previous = NULL;
737
738 ASSERT(Bcb);
739
740 DPRINT("CcRosGetCacheSegmentChain()\n");
741
742 Length = ROUND_UP(Length, Bcb->CacheSegmentSize);
743
744 #if defined(__GNUC__)
745 CacheSegList = alloca(sizeof(PCACHE_SEGMENT) *
746 (Length / Bcb->CacheSegmentSize));
747 #elif defined(_MSC_VER)
748 CacheSegList = _alloca(sizeof(PCACHE_SEGMENT) *
749 (Length / Bcb->CacheSegmentSize));
750 #else
751 #error Unknown compiler for alloca intrinsic stack allocation "function"
752 #endif
753
754 /*
755 * Look for a cache segment already mapping the same data.
756 */
757 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
758 {
759 ULONG CurrentOffset = FileOffset + (i * Bcb->CacheSegmentSize);
760 current = CcRosLookupCacheSegment(Bcb, CurrentOffset);
761 if (current != NULL)
762 {
763 CacheSegList[i] = current;
764 }
765 else
766 {
767 CcRosCreateCacheSegment(Bcb, CurrentOffset, &current);
768 CacheSegList[i] = current;
769 }
770 }
771
772 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
773 {
774 if (i == 0)
775 {
776 *CacheSeg = CacheSegList[i];
777 Previous = CacheSegList[i];
778 }
779 else
780 {
781 Previous->NextInChain = CacheSegList[i];
782 Previous = CacheSegList[i];
783 }
784 }
785 Previous->NextInChain = NULL;
786
787 return(STATUS_SUCCESS);
788 }
789
790 NTSTATUS
791 NTAPI
792 CcRosGetCacheSegment(PBCB Bcb,
793 ULONG FileOffset,
794 PULONG BaseOffset,
795 PVOID* BaseAddress,
796 PBOOLEAN UptoDate,
797 PCACHE_SEGMENT* CacheSeg)
798 {
799 PCACHE_SEGMENT current;
800 NTSTATUS Status;
801
802 ASSERT(Bcb);
803
804 DPRINT("CcRosGetCacheSegment()\n");
805
806 /*
807 * Look for a cache segment already mapping the same data.
808 */
809 current = CcRosLookupCacheSegment(Bcb, FileOffset);
810 if (current == NULL)
811 {
812 /*
813 * Otherwise create a new segment.
814 */
815 Status = CcRosCreateCacheSegment(Bcb, FileOffset, &current);
816 if (!NT_SUCCESS(Status))
817 {
818 return Status;
819 }
820 }
821 /*
822 * Return information about the segment to the caller.
823 */
824 *UptoDate = current->Valid;
825 *BaseAddress = current->BaseAddress;
826 DPRINT("*BaseAddress 0x%.8X\n", *BaseAddress);
827 *CacheSeg = current;
828 *BaseOffset = current->FileOffset;
829 return(STATUS_SUCCESS);
830 }
831
832 NTSTATUS STDCALL
833 CcRosRequestCacheSegment(PBCB Bcb,
834 ULONG FileOffset,
835 PVOID* BaseAddress,
836 PBOOLEAN UptoDate,
837 PCACHE_SEGMENT* CacheSeg)
838 /*
839 * FUNCTION: Request a page mapping for a BCB
840 */
841 {
842 ULONG BaseOffset;
843
844 ASSERT(Bcb);
845
846 if ((FileOffset % Bcb->CacheSegmentSize) != 0)
847 {
848 CPRINT("Bad fileoffset %x should be multiple of %x",
849 FileOffset, Bcb->CacheSegmentSize);
850 KEBUGCHECKCC;
851 }
852
853 return(CcRosGetCacheSegment(Bcb,
854 FileOffset,
855 &BaseOffset,
856 BaseAddress,
857 UptoDate,
858 CacheSeg));
859 }
860 #ifdef CACHE_BITMAP
861 #else
862 STATIC VOID
863 CcFreeCachePage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address,
864 PFN_TYPE Page, SWAPENTRY SwapEntry, BOOLEAN Dirty)
865 {
866 ASSERT(SwapEntry == 0);
867 if (Page != 0)
868 {
869 MmReleasePageMemoryConsumer(MC_CACHE, Page);
870 }
871 }
872 #endif
873 NTSTATUS
874 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg)
875 /*
876 * FUNCTION: Releases a cache segment associated with a BCB
877 */
878 {
879 #ifdef CACHE_BITMAP
880 ULONG i;
881 ULONG RegionSize;
882 ULONG Base;
883 PFN_TYPE Page;
884 KIRQL oldIrql;
885 #endif
886 DPRINT("Freeing cache segment 0x%p\n", CacheSeg);
887 #if defined(DBG) || defined(KDBG)
888 if ( CacheSeg->Bcb->Trace )
889 {
890 DPRINT1("CacheMap 0x%p: deleting Cache Segment: 0x%p\n", CacheSeg->Bcb, CacheSeg );
891 }
892 #endif
893 #ifdef CACHE_BITMAP
894 RegionSize = CacheSeg->Bcb->CacheSegmentSize / PAGE_SIZE;
895
896 /* Unmap all the pages. */
897 for (i = 0; i < RegionSize; i++)
898 {
899 MmDeleteVirtualMapping(NULL,
900 CacheSeg->BaseAddress + (i * PAGE_SIZE),
901 FALSE,
902 NULL,
903 &Page);
904 MmReleasePageMemoryConsumer(MC_CACHE, Page);
905 }
906
907 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
908 /* Deallocate all the pages used. */
909 Base = (ULONG)(CacheSeg->BaseAddress - CiCacheSegMappingRegionBase) / PAGE_SIZE;
910
911 RtlClearBits(&CiCacheSegMappingRegionAllocMap, Base, RegionSize);
912
913 CiCacheSegMappingRegionHint = min (CiCacheSegMappingRegionHint, Base);
914
915 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
916 #else
917 MmLockAddressSpace(MmGetKernelAddressSpace());
918 MmFreeMemoryArea(MmGetKernelAddressSpace(),
919 CacheSeg->MemoryArea,
920 CcFreeCachePage,
921 NULL);
922 MmUnlockAddressSpace(MmGetKernelAddressSpace());
923 #endif
924 ExFreeToNPagedLookasideList(&CacheSegLookasideList, CacheSeg);
925 return(STATUS_SUCCESS);
926 }
927
928 NTSTATUS
929 NTAPI
930 CcRosFreeCacheSegment(PBCB Bcb, PCACHE_SEGMENT CacheSeg)
931 {
932 NTSTATUS Status;
933 KIRQL oldIrql;
934
935 ASSERT(Bcb);
936
937 DPRINT("CcRosFreeCacheSegment(Bcb 0x%p, CacheSeg 0x%p)\n",
938 Bcb, CacheSeg);
939
940 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
941 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
942 RemoveEntryList(&CacheSeg->BcbSegmentListEntry);
943 RemoveEntryList(&CacheSeg->CacheSegmentListEntry);
944 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
945 if (CacheSeg->Dirty)
946 {
947 RemoveEntryList(&CacheSeg->DirtySegmentListEntry);
948 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
949
950 }
951 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
952 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
953
954 Status = CcRosInternalFreeCacheSegment(CacheSeg);
955 return(Status);
956 }
957
958 /*
959 * @implemented
960 */
961 VOID STDCALL
962 CcFlushCache(IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
963 IN PLARGE_INTEGER FileOffset OPTIONAL,
964 IN ULONG Length,
965 OUT PIO_STATUS_BLOCK IoStatus)
966 {
967 PBCB Bcb;
968 LARGE_INTEGER Offset;
969 PCACHE_SEGMENT current;
970 NTSTATUS Status;
971 KIRQL oldIrql;
972
973 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %d, IoStatus 0x%p)\n",
974 SectionObjectPointers, FileOffset, Length, IoStatus);
975
976 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
977 {
978 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
979 ASSERT(Bcb);
980 if (FileOffset)
981 {
982 Offset = *FileOffset;
983 }
984 else
985 {
986 Offset.QuadPart = (LONGLONG)0;
987 Length = Bcb->FileSize.u.LowPart;
988 }
989
990 if (IoStatus)
991 {
992 IoStatus->Status = STATUS_SUCCESS;
993 IoStatus->Information = 0;
994 }
995
996 while (Length > 0)
997 {
998 current = CcRosLookupCacheSegment (Bcb, Offset.u.LowPart);
999 if (current != NULL)
1000 {
1001 if (current->Dirty)
1002 {
1003 Status = CcRosFlushCacheSegment(current);
1004 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1005 {
1006 IoStatus->Status = Status;
1007 }
1008 }
1009 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1010 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&current->Lock);
1011 CcRosCacheSegmentDecRefCount(current);
1012 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1013 }
1014
1015 Offset.QuadPart += Bcb->CacheSegmentSize;
1016 if (Length > Bcb->CacheSegmentSize)
1017 {
1018 Length -= Bcb->CacheSegmentSize;
1019 }
1020 else
1021 {
1022 Length = 0;
1023 }
1024 }
1025 }
1026 else
1027 {
1028 if (IoStatus)
1029 {
1030 IoStatus->Status = STATUS_INVALID_PARAMETER;
1031 }
1032 }
1033 }
1034
1035 NTSTATUS
1036 NTAPI
1037 CcRosDeleteFileCache(PFILE_OBJECT FileObject, PBCB Bcb)
1038 /*
1039 * FUNCTION: Releases the BCB associated with a file object
1040 */
1041 {
1042 PLIST_ENTRY current_entry;
1043 PCACHE_SEGMENT current;
1044 NTSTATUS Status;
1045 LIST_ENTRY FreeList;
1046 KIRQL oldIrql;
1047
1048 ASSERT(Bcb);
1049
1050 Bcb->RefCount++;
1051 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1052
1053 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1054
1055 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
1056 Bcb->RefCount--;
1057 if (Bcb->RefCount == 0)
1058 {
1059 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1060 {
1061 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1062 Bcb->BcbRemoveListEntry.Flink = NULL;
1063 }
1064
1065 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1066
1067 /*
1068 * Release all cache segments.
1069 */
1070 InitializeListHead(&FreeList);
1071 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1072 current_entry = Bcb->BcbSegmentListHead.Flink;
1073 while (!IsListEmpty(&Bcb->BcbSegmentListHead))
1074 {
1075 current_entry = RemoveTailList(&Bcb->BcbSegmentListHead);
1076 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1077 RemoveEntryList(&current->CacheSegmentListEntry);
1078 RemoveEntryList(&current->CacheSegmentLRUListEntry);
1079 if (current->Dirty)
1080 {
1081 RemoveEntryList(&current->DirtySegmentListEntry);
1082 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
1083 DPRINT1("Freeing dirty segment\n");
1084 }
1085 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
1086 }
1087 #if defined(DBG) || defined(KDBG)
1088 Bcb->Trace = FALSE;
1089 #endif
1090 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1091
1092 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1093 ObDereferenceObject (Bcb->FileObject);
1094
1095 while (!IsListEmpty(&FreeList))
1096 {
1097 current_entry = RemoveTailList(&FreeList);
1098 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1099 Status = CcRosInternalFreeCacheSegment(current);
1100 }
1101 ExFreeToNPagedLookasideList(&BcbLookasideList, Bcb);
1102 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
1103 }
1104 return(STATUS_SUCCESS);
1105 }
1106
1107 VOID
1108 NTAPI
1109 CcRosReferenceCache(PFILE_OBJECT FileObject)
1110 {
1111 PBCB Bcb;
1112 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
1113 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1114 ASSERT(Bcb);
1115 if (Bcb->RefCount == 0)
1116 {
1117 ASSERT(Bcb->BcbRemoveListEntry.Flink != NULL);
1118 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1119 Bcb->BcbRemoveListEntry.Flink = NULL;
1120
1121 }
1122 else
1123 {
1124 ASSERT(Bcb->BcbRemoveListEntry.Flink == NULL);
1125 }
1126 Bcb->RefCount++;
1127 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1128 }
1129
1130 VOID
1131 NTAPI
1132 CcRosSetRemoveOnClose(PSECTION_OBJECT_POINTERS SectionObjectPointer)
1133 {
1134 PBCB Bcb;
1135 DPRINT("CcRosSetRemoveOnClose()\n");
1136 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
1137 Bcb = (PBCB)SectionObjectPointer->SharedCacheMap;
1138 if (Bcb)
1139 {
1140 Bcb->RemoveOnClose = TRUE;
1141 if (Bcb->RefCount == 0)
1142 {
1143 CcRosDeleteFileCache(Bcb->FileObject, Bcb);
1144 }
1145 }
1146 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1147 }
1148
1149
1150 VOID
1151 NTAPI
1152 CcRosDereferenceCache(PFILE_OBJECT FileObject)
1153 {
1154 PBCB Bcb;
1155 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
1156 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1157 ASSERT(Bcb);
1158 if (Bcb->RefCount > 0)
1159 {
1160 Bcb->RefCount--;
1161 if (Bcb->RefCount == 0)
1162 {
1163 MmFreeSectionSegments(Bcb->FileObject);
1164 if (Bcb->RemoveOnClose)
1165 {
1166 CcRosDeleteFileCache(FileObject, Bcb);
1167 }
1168 else
1169 {
1170 Bcb->TimeStamp = CcTimeStamp;
1171 InsertHeadList(&ClosedListHead, &Bcb->BcbRemoveListEntry);
1172 }
1173 }
1174 }
1175 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1176 }
1177
1178 NTSTATUS STDCALL
1179 CcRosReleaseFileCache(PFILE_OBJECT FileObject)
1180 /*
1181 * FUNCTION: Called by the file system when a handle to a file object
1182 * has been closed.
1183 */
1184 {
1185 PBCB Bcb;
1186
1187 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
1188
1189 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1190 {
1191 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1192 if (FileObject->PrivateCacheMap != NULL)
1193 {
1194 FileObject->PrivateCacheMap = NULL;
1195 if (Bcb->RefCount > 0)
1196 {
1197 Bcb->RefCount--;
1198 if (Bcb->RefCount == 0)
1199 {
1200 MmFreeSectionSegments(Bcb->FileObject);
1201 if (Bcb->RemoveOnClose)
1202 {
1203 CcRosDeleteFileCache(FileObject, Bcb);
1204 }
1205 else
1206 {
1207 Bcb->TimeStamp = CcTimeStamp;
1208 InsertHeadList(&ClosedListHead, &Bcb->BcbRemoveListEntry);
1209 }
1210 }
1211 }
1212 }
1213 }
1214 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1215 return(STATUS_SUCCESS);
1216 }
1217
1218 NTSTATUS
1219 NTAPI
1220 CcTryToInitializeFileCache(PFILE_OBJECT FileObject)
1221 {
1222 PBCB Bcb;
1223 NTSTATUS Status;
1224
1225 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
1226
1227 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1228 if (Bcb == NULL)
1229 {
1230 Status = STATUS_UNSUCCESSFUL;
1231 }
1232 else
1233 {
1234 if (FileObject->PrivateCacheMap == NULL)
1235 {
1236 FileObject->PrivateCacheMap = Bcb;
1237 Bcb->RefCount++;
1238 }
1239 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1240 {
1241 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1242 Bcb->BcbRemoveListEntry.Flink = NULL;
1243 }
1244 Status = STATUS_SUCCESS;
1245 }
1246 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1247
1248 return Status;
1249 }
1250
1251
1252 NTSTATUS STDCALL
1253 CcRosInitializeFileCache(PFILE_OBJECT FileObject,
1254 ULONG CacheSegmentSize)
1255 /*
1256 * FUNCTION: Initializes a BCB for a file object
1257 */
1258 {
1259 PBCB Bcb;
1260
1261 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1262 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, Bcb 0x%p, CacheSegmentSize %d)\n",
1263 FileObject, Bcb, CacheSegmentSize);
1264
1265 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
1266 if (Bcb == NULL)
1267 {
1268 Bcb = ExAllocateFromNPagedLookasideList(&BcbLookasideList);
1269 if (Bcb == NULL)
1270 {
1271 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1272 return(STATUS_UNSUCCESSFUL);
1273 }
1274 memset(Bcb, 0, sizeof(BCB));
1275 ObReferenceObjectByPointer(FileObject,
1276 FILE_ALL_ACCESS,
1277 NULL,
1278 KernelMode);
1279 Bcb->FileObject = FileObject;
1280 Bcb->CacheSegmentSize = CacheSegmentSize;
1281 if (FileObject->FsContext)
1282 {
1283 Bcb->AllocationSize =
1284 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->AllocationSize;
1285 Bcb->FileSize =
1286 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->FileSize;
1287 }
1288 KeInitializeSpinLock(&Bcb->BcbLock);
1289 InitializeListHead(&Bcb->BcbSegmentListHead);
1290 FileObject->SectionObjectPointer->SharedCacheMap = Bcb;
1291 }
1292 if (FileObject->PrivateCacheMap == NULL)
1293 {
1294 FileObject->PrivateCacheMap = Bcb;
1295 Bcb->RefCount++;
1296 }
1297 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1298 {
1299 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1300 Bcb->BcbRemoveListEntry.Flink = NULL;
1301 }
1302 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1303
1304 return(STATUS_SUCCESS);
1305 }
1306
1307 /*
1308 * @implemented
1309 */
1310 PFILE_OBJECT STDCALL
1311 CcGetFileObjectFromSectionPtrs(IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1312 {
1313 PBCB Bcb;
1314 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1315 {
1316 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
1317 ASSERT(Bcb);
1318 return Bcb->FileObject;
1319 }
1320 return NULL;
1321 }
1322
1323 VOID STDCALL
1324 CmLazyCloseThreadMain(PVOID Ignored)
1325 {
1326 LARGE_INTEGER Timeout;
1327 PLIST_ENTRY current_entry;
1328 PBCB current;
1329 ULONG RemoveTimeStamp;
1330 NTSTATUS Status;
1331
1332 KeQuerySystemTime (&Timeout);
1333
1334 while (1)
1335 {
1336 Timeout.QuadPart += (LONGLONG)100000000; // 10sec
1337 Status = KeWaitForSingleObject(&LazyCloseThreadEvent,
1338 0,
1339 KernelMode,
1340 FALSE,
1341 &Timeout);
1342
1343 DPRINT("LazyCloseThreadMain %d\n", CcTimeStamp);
1344
1345 if (!NT_SUCCESS(Status))
1346 {
1347 DbgPrint("LazyCloseThread: Wait failed\n");
1348 KEBUGCHECKCC;
1349 break;
1350 }
1351 if (LazyCloseThreadShouldTerminate)
1352 {
1353 DbgPrint("LazyCloseThread: Terminating\n");
1354 break;
1355 }
1356
1357 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
1358 CcTimeStamp++;
1359 if (CcTimeStamp >= 30)
1360 {
1361 RemoveTimeStamp = CcTimeStamp - 30; /* 5min = 10sec * 30 */
1362 while (!IsListEmpty(&ClosedListHead))
1363 {
1364 current_entry = ClosedListHead.Blink;
1365 current = CONTAINING_RECORD(current_entry, BCB, BcbRemoveListEntry);
1366 if (current->TimeStamp >= RemoveTimeStamp)
1367 {
1368 break;
1369 }
1370 CcRosDeleteFileCache(current->FileObject, current);
1371 }
1372 }
1373 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1374 }
1375 }
1376
1377 VOID
1378 INIT_FUNCTION
1379 NTAPI
1380 CcInitView(VOID)
1381 {
1382 #ifdef CACHE_BITMAP
1383 PMEMORY_AREA marea;
1384 PVOID Buffer;
1385 PHYSICAL_ADDRESS BoundaryAddressMultiple;
1386 #endif
1387 NTSTATUS Status;
1388 KPRIORITY Priority;
1389
1390 DPRINT("CcInitView()\n");
1391 #ifdef CACHE_BITMAP
1392 BoundaryAddressMultiple.QuadPart = 0;
1393 CiCacheSegMappingRegionHint = 0;
1394 CiCacheSegMappingRegionBase = NULL;
1395
1396 MmLockAddressSpace(MmGetKernelAddressSpace());
1397
1398 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
1399 MEMORY_AREA_CACHE_SEGMENT,
1400 &CiCacheSegMappingRegionBase,
1401 CI_CACHESEG_MAPPING_REGION_SIZE,
1402 PAGE_READWRITE,
1403 &marea,
1404 FALSE,
1405 0,
1406 BoundaryAddressMultiple);
1407 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1408 if (!NT_SUCCESS(Status))
1409 {
1410 KEBUGCHECKCC;
1411 }
1412
1413 Buffer = ExAllocatePool(NonPagedPool, CI_CACHESEG_MAPPING_REGION_SIZE / (PAGE_SIZE * 8));
1414
1415 RtlInitializeBitMap(&CiCacheSegMappingRegionAllocMap, Buffer, CI_CACHESEG_MAPPING_REGION_SIZE / PAGE_SIZE);
1416 RtlClearAllBits(&CiCacheSegMappingRegionAllocMap);
1417
1418 KeInitializeSpinLock(&CiCacheSegMappingRegionLock);
1419 #endif
1420 InitializeListHead(&CacheSegmentListHead);
1421 InitializeListHead(&DirtySegmentListHead);
1422 InitializeListHead(&CacheSegmentLRUListHead);
1423 InitializeListHead(&ClosedListHead);
1424 ExInitializeFastMutex(&ViewLock);
1425 ExInitializeNPagedLookasideList (&iBcbLookasideList,
1426 NULL,
1427 NULL,
1428 0,
1429 sizeof(INTERNAL_BCB),
1430 TAG_IBCB,
1431 20);
1432 ExInitializeNPagedLookasideList (&BcbLookasideList,
1433 NULL,
1434 NULL,
1435 0,
1436 sizeof(BCB),
1437 TAG_BCB,
1438 20);
1439 ExInitializeNPagedLookasideList (&CacheSegLookasideList,
1440 NULL,
1441 NULL,
1442 0,
1443 sizeof(CACHE_SEGMENT),
1444 TAG_CSEG,
1445 20);
1446
1447 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1448
1449 CcInitCacheZeroPage();
1450
1451 CcTimeStamp = 0;
1452 LazyCloseThreadShouldTerminate = FALSE;
1453 KeInitializeEvent (&LazyCloseThreadEvent, SynchronizationEvent, FALSE);
1454 Status = PsCreateSystemThread(&LazyCloseThreadHandle,
1455 THREAD_ALL_ACCESS,
1456 NULL,
1457 NULL,
1458 &LazyCloseThreadId,
1459 (PKSTART_ROUTINE)CmLazyCloseThreadMain,
1460 NULL);
1461 if (NT_SUCCESS(Status))
1462 {
1463 Priority = LOW_REALTIME_PRIORITY;
1464 NtSetInformationThread(LazyCloseThreadHandle,
1465 ThreadPriority,
1466 &Priority,
1467 sizeof(Priority));
1468 }
1469
1470 }
1471
1472 /* EOF */
1473
1474
1475
1476
1477
1478
1479