- Support INIT section pragmas for msvc. Patch by Brezenbak.
[reactos.git] / reactos / ntoskrnl / cc / view.c
1 /* $Id$
2 *
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/cc/view.c
6 * PURPOSE: Cache manager
7 *
8 * PROGRAMMERS: David Welch (welch@mcmail.com)
9 */
10
11 /* NOTES **********************************************************************
12 *
13 * This is not the NT implementation of a file cache nor anything much like
14 * it.
15 *
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
18 *
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
21 *
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
24 *
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 *
29 * (4) Copy the data into or out of the page as necessary.
30 *
31 * (5) Release the cache page
32 */
33 /* INCLUDES ******************************************************************/
34
35 #include <ntoskrnl.h>
36 #define NDEBUG
37 #include <internal/debug.h>
38
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
41 #endif
42
43 /* GLOBALS *******************************************************************/
44
45 /*
46 * If CACHE_BITMAP is defined, the cache manager uses one large memory region
47 * within the kernel address space and allocate/deallocate space from this block
48 * over a bitmap. If CACHE_BITMAP is used, the size of the mdl mapping region
49 * must be reduced (ntoskrnl\mm\mdl.c, MI_MDLMAPPING_REGION_SIZE).
50 */
51 //#define CACHE_BITMAP
52
53 static LIST_ENTRY DirtySegmentListHead;
54 static LIST_ENTRY CacheSegmentListHead;
55 static LIST_ENTRY CacheSegmentLRUListHead;
56 static LIST_ENTRY ClosedListHead;
57 ULONG DirtyPageCount=0;
58
59 FAST_MUTEX ViewLock;
60
61 #ifdef CACHE_BITMAP
62 #define CI_CACHESEG_MAPPING_REGION_SIZE (128*1024*1024)
63
64 static PVOID CiCacheSegMappingRegionBase = NULL;
65 static RTL_BITMAP CiCacheSegMappingRegionAllocMap;
66 static ULONG CiCacheSegMappingRegionHint;
67 static KSPIN_LOCK CiCacheSegMappingRegionLock;
68 #endif
69
70 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
71 static NPAGED_LOOKASIDE_LIST BcbLookasideList;
72 static NPAGED_LOOKASIDE_LIST CacheSegLookasideList;
73
74 static ULONG CcTimeStamp;
75 static KEVENT LazyCloseThreadEvent;
76 static HANDLE LazyCloseThreadHandle;
77 static CLIENT_ID LazyCloseThreadId;
78 static volatile BOOLEAN LazyCloseThreadShouldTerminate;
79
80 #if defined(__GNUC__)
81 /* void * alloca(size_t size); */
82 #elif defined(_MSC_VER)
83 void* _alloca(size_t size);
84 #else
85 #error Unknown compiler for alloca intrinsic stack allocation "function"
86 #endif
87
88 #if defined(DBG) || defined(KDBG)
89 static void CcRosCacheSegmentIncRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
90 {
91 ++cs->ReferenceCount;
92 if ( cs->Bcb->Trace )
93 {
94 DbgPrint("(%s:%i) CacheSegment %p ++RefCount=%d, Dirty %d, PageOut %d\n",
95 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
96 }
97 }
98 static void CcRosCacheSegmentDecRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
99 {
100 --cs->ReferenceCount;
101 if ( cs->Bcb->Trace )
102 {
103 DbgPrint("(%s:%i) CacheSegment %p --RefCount=%d, Dirty %d, PageOut %d\n",
104 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
105 }
106 }
107 #define CcRosCacheSegmentIncRefCount(cs) CcRosCacheSegmentIncRefCount_(cs,__FILE__,__LINE__)
108 #define CcRosCacheSegmentDecRefCount(cs) CcRosCacheSegmentDecRefCount_(cs,__FILE__,__LINE__)
109 #else
110 #define CcRosCacheSegmentIncRefCount(cs) (++((cs)->ReferenceCount))
111 #define CcRosCacheSegmentDecRefCount(cs) (--((cs)->ReferenceCount))
112 #endif
113
114 NTSTATUS
115 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg);
116
117 BOOLEAN
118 FASTCALL
119 CcTryToAcquireBrokenMutex(PFAST_MUTEX FastMutex)
120 {
121 KeEnterCriticalRegion();
122 if (InterlockedCompareExchange(&FastMutex->Count, 0, 1) == 1)
123 {
124 FastMutex->Owner = KeGetCurrentThread();
125 return(TRUE);
126 }
127 else
128 {
129 KeLeaveCriticalRegion();
130 return(FALSE);
131 }
132 }
133
134 /* FUNCTIONS *****************************************************************/
135
136 VOID
137 STDCALL
138 CcRosTraceCacheMap (
139 PBCB Bcb,
140 BOOLEAN Trace )
141 {
142 #if defined(DBG) || defined(KDBG)
143 KIRQL oldirql;
144 PLIST_ENTRY current_entry;
145 PCACHE_SEGMENT current;
146
147 if ( !Bcb )
148 return;
149
150 Bcb->Trace = Trace;
151
152 if ( Trace )
153 {
154 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", Bcb );
155
156 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
157 KeAcquireSpinLock(&Bcb->BcbLock, &oldirql);
158
159 current_entry = Bcb->BcbSegmentListHead.Flink;
160 while (current_entry != &Bcb->BcbSegmentListHead)
161 {
162 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
163 current_entry = current_entry->Flink;
164
165 DPRINT1(" CacheSegment 0x%p enabled, RefCount %d, Dirty %d, PageOut %d\n",
166 current, current->ReferenceCount, current->Dirty, current->PageOut );
167 }
168 KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
169 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
170 }
171 else
172 {
173 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", Bcb );
174 }
175
176 #else
177 Bcb = Bcb;
178 Trace = Trace;
179 #endif
180 }
181
182 NTSTATUS
183 NTAPI
184 CcRosFlushCacheSegment(PCACHE_SEGMENT CacheSegment)
185 {
186 NTSTATUS Status;
187 KIRQL oldIrql;
188 Status = WriteCacheSegment(CacheSegment);
189 if (NT_SUCCESS(Status))
190 {
191 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
192 KeAcquireSpinLock(&CacheSegment->Bcb->BcbLock, &oldIrql);
193 CacheSegment->Dirty = FALSE;
194 RemoveEntryList(&CacheSegment->DirtySegmentListEntry);
195 DirtyPageCount -= CacheSegment->Bcb->CacheSegmentSize / PAGE_SIZE;
196 CcRosCacheSegmentDecRefCount ( CacheSegment );
197 KeReleaseSpinLock(&CacheSegment->Bcb->BcbLock, oldIrql);
198 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
199 }
200 return(Status);
201 }
202
203 NTSTATUS
204 NTAPI
205 CcRosFlushDirtyPages(ULONG Target, PULONG Count)
206 {
207 PLIST_ENTRY current_entry;
208 PCACHE_SEGMENT current;
209 ULONG PagesPerSegment;
210 BOOLEAN Locked;
211 NTSTATUS Status;
212 static ULONG WriteCount[4] = {0, 0, 0, 0};
213 ULONG NewTarget;
214
215 DPRINT("CcRosFlushDirtyPages(Target %d)\n", Target);
216
217 (*Count) = 0;
218
219 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
220
221 WriteCount[0] = WriteCount[1];
222 WriteCount[1] = WriteCount[2];
223 WriteCount[2] = WriteCount[3];
224 WriteCount[3] = 0;
225
226 NewTarget = WriteCount[0] + WriteCount[1] + WriteCount[2];
227
228 if (NewTarget < DirtyPageCount)
229 {
230 NewTarget = (DirtyPageCount - NewTarget + 3) / 4;
231 WriteCount[0] += NewTarget;
232 WriteCount[1] += NewTarget;
233 WriteCount[2] += NewTarget;
234 WriteCount[3] += NewTarget;
235 }
236
237 NewTarget = WriteCount[0];
238
239 Target = max(NewTarget, Target);
240
241 current_entry = DirtySegmentListHead.Flink;
242 if (current_entry == &DirtySegmentListHead)
243 {
244 DPRINT("No Dirty pages\n");
245 }
246 while (current_entry != &DirtySegmentListHead && Target > 0)
247 {
248 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
249 DirtySegmentListEntry);
250 current_entry = current_entry->Flink;
251
252 // Locked = current->Bcb->Callbacks.AcquireForLazyWrite(current->Bcb->Context, FALSE);
253 Locked = ExTryToAcquireResourceExclusiveLite(((FSRTL_COMMON_FCB_HEADER*)(current->Bcb->FileObject->FsContext))->Resource);
254 if (!Locked)
255 {
256 continue;
257 }
258 Locked = CcTryToAcquireBrokenMutex(&current->Lock);
259 if (!Locked)
260 {
261 // current->Bcb->Callbacks.ReleaseFromLazyWrite(current->Bcb->Context);
262 ExReleaseResourceLite(((FSRTL_COMMON_FCB_HEADER*)(current->Bcb->FileObject->FsContext))->Resource);
263 continue;
264 }
265 ASSERT(current->Dirty);
266 if (current->ReferenceCount > 1)
267 {
268 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&current->Lock);
269 // current->Bcb->Callbacks.ReleaseFromLazyWrite(current->Bcb->Context);
270 ExReleaseResourceLite(((FSRTL_COMMON_FCB_HEADER*)(current->Bcb->FileObject->FsContext))->Resource);
271 continue;
272 }
273 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
274 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
275 Status = CcRosFlushCacheSegment(current);
276 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&current->Lock);
277 // current->Bcb->Callbacks.ReleaseFromLazyWrite(current->Bcb->Context);
278 ExReleaseResourceLite(((FSRTL_COMMON_FCB_HEADER*)(current->Bcb->FileObject->FsContext))->Resource);
279 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
280 {
281 DPRINT1("CC: Failed to flush cache segment.\n");
282 }
283 else
284 {
285 (*Count) += PagesPerSegment;
286 Target -= PagesPerSegment;
287 }
288 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
289 current_entry = DirtySegmentListHead.Flink;
290 }
291 if (*Count < NewTarget)
292 {
293 WriteCount[1] += (NewTarget - *Count);
294 }
295 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
296 DPRINT("CcRosFlushDirtyPages() finished\n");
297
298 return(STATUS_SUCCESS);
299 }
300
301 NTSTATUS
302 CcRosTrimCache(ULONG Target, ULONG Priority, PULONG NrFreed)
303 /*
304 * FUNCTION: Try to free some memory from the file cache.
305 * ARGUMENTS:
306 * Target - The number of pages to be freed.
307 * Priority - The priority of free (currently unused).
308 * NrFreed - Points to a variable where the number of pages
309 * actually freed is returned.
310 */
311 {
312 PLIST_ENTRY current_entry;
313 PCACHE_SEGMENT current, last = NULL;
314 ULONG PagesPerSegment;
315 ULONG PagesFreed;
316 KIRQL oldIrql;
317 LIST_ENTRY FreeList;
318
319 DPRINT("CcRosTrimCache(Target %d)\n", Target);
320
321 *NrFreed = 0;
322
323 InitializeListHead(&FreeList);
324
325 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
326 current_entry = CacheSegmentLRUListHead.Flink;
327 while (current_entry != &CacheSegmentLRUListHead && Target > 0)
328 {
329 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
330 CacheSegmentLRUListEntry);
331 current_entry = current_entry->Flink;
332
333 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
334 if (current->ReferenceCount == 0)
335 {
336 RemoveEntryList(&current->BcbSegmentListEntry);
337 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
338 RemoveEntryList(&current->CacheSegmentListEntry);
339 RemoveEntryList(&current->CacheSegmentLRUListEntry);
340 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
341 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
342 PagesFreed = min(PagesPerSegment, Target);
343 Target -= PagesFreed;
344 (*NrFreed) += PagesFreed;
345 }
346 else
347 {
348 if (last != current && current->MappedCount > 0 && !current->Dirty && !current->PageOut)
349 {
350 ULONG i;
351 NTSTATUS Status;
352
353 CcRosCacheSegmentIncRefCount(current);
354 last = current;
355 current->PageOut = TRUE;
356 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
357 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
358 for (i = 0; i < current->Bcb->CacheSegmentSize / PAGE_SIZE; i++)
359 {
360 PFN_TYPE Page;
361 Page = MmGetPhysicalAddress((char*)current->BaseAddress + i * PAGE_SIZE).QuadPart >> PAGE_SHIFT;
362 Status = MmPageOutPhysicalAddress(Page);
363 if (!NT_SUCCESS(Status))
364 {
365 break;
366 }
367 }
368 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
369 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
370 CcRosCacheSegmentDecRefCount(current);
371 current->PageOut = FALSE;
372 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
373 current_entry = &current->CacheSegmentLRUListEntry;
374 continue;
375 }
376 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
377 }
378 }
379 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
380
381 while (!IsListEmpty(&FreeList))
382 {
383 current_entry = RemoveHeadList(&FreeList);
384 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
385 BcbSegmentListEntry);
386 CcRosInternalFreeCacheSegment(current);
387 }
388
389 DPRINT("CcRosTrimCache() finished\n");
390 return(STATUS_SUCCESS);
391 }
392
393 NTSTATUS
394 NTAPI
395 CcRosReleaseCacheSegment(PBCB Bcb,
396 PCACHE_SEGMENT CacheSeg,
397 BOOLEAN Valid,
398 BOOLEAN Dirty,
399 BOOLEAN Mapped)
400 {
401 BOOLEAN WasDirty = CacheSeg->Dirty;
402 KIRQL oldIrql;
403
404 ASSERT(Bcb);
405
406 DPRINT("CcReleaseCacheSegment(Bcb 0x%p, CacheSeg 0x%p, Valid %d)\n",
407 Bcb, CacheSeg, Valid);
408
409 CacheSeg->Valid = Valid;
410 CacheSeg->Dirty = CacheSeg->Dirty || Dirty;
411
412 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
413 if (!WasDirty && CacheSeg->Dirty)
414 {
415 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
416 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
417 }
418 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
419 InsertTailList(&CacheSegmentLRUListHead, &CacheSeg->CacheSegmentLRUListEntry);
420
421 if (Mapped)
422 {
423 CacheSeg->MappedCount++;
424 }
425 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
426 CcRosCacheSegmentDecRefCount(CacheSeg);
427 if (Mapped && CacheSeg->MappedCount == 1)
428 {
429 CcRosCacheSegmentIncRefCount(CacheSeg);
430 }
431 if (!WasDirty && CacheSeg->Dirty)
432 {
433 CcRosCacheSegmentIncRefCount(CacheSeg);
434 }
435 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
436 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
437 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&CacheSeg->Lock);
438
439 return(STATUS_SUCCESS);
440 }
441
442 PCACHE_SEGMENT
443 NTAPI
444 CcRosLookupCacheSegment(PBCB Bcb, ULONG FileOffset)
445 {
446 PLIST_ENTRY current_entry;
447 PCACHE_SEGMENT current;
448 KIRQL oldIrql;
449
450 ASSERT(Bcb);
451
452 DPRINT("CcRosLookupCacheSegment(Bcb -x%p, FileOffset %d)\n", Bcb, FileOffset);
453
454 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
455 current_entry = Bcb->BcbSegmentListHead.Flink;
456 while (current_entry != &Bcb->BcbSegmentListHead)
457 {
458 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
459 BcbSegmentListEntry);
460 if (current->FileOffset <= FileOffset &&
461 (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
462 {
463 CcRosCacheSegmentIncRefCount(current);
464 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
465 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&current->Lock);
466 return(current);
467 }
468 current_entry = current_entry->Flink;
469 }
470 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
471 return(NULL);
472 }
473
474 NTSTATUS
475 NTAPI
476 CcRosMarkDirtyCacheSegment(PBCB Bcb, ULONG FileOffset)
477 {
478 PCACHE_SEGMENT CacheSeg;
479 KIRQL oldIrql;
480
481 ASSERT(Bcb);
482
483 DPRINT("CcRosMarkDirtyCacheSegment(Bcb 0x%p, FileOffset %d)\n", Bcb, FileOffset);
484
485 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
486 if (CacheSeg == NULL)
487 {
488 KEBUGCHECKCC;
489 }
490 if (!CacheSeg->Dirty)
491 {
492 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
493 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
494 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
495 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
496 }
497 else
498 {
499 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
500 CcRosCacheSegmentDecRefCount(CacheSeg);
501 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
502 }
503
504
505 CacheSeg->Dirty = TRUE;
506 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&CacheSeg->Lock);
507
508 return(STATUS_SUCCESS);
509 }
510
511 NTSTATUS
512 NTAPI
513 CcRosUnmapCacheSegment(PBCB Bcb, ULONG FileOffset, BOOLEAN NowDirty)
514 {
515 PCACHE_SEGMENT CacheSeg;
516 BOOLEAN WasDirty;
517 KIRQL oldIrql;
518
519 ASSERT(Bcb);
520
521 DPRINT("CcRosUnmapCacheSegment(Bcb 0x%p, FileOffset %d, NowDirty %d)\n",
522 Bcb, FileOffset, NowDirty);
523
524 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
525 if (CacheSeg == NULL)
526 {
527 return(STATUS_UNSUCCESSFUL);
528 }
529
530 WasDirty = CacheSeg->Dirty;
531 CacheSeg->Dirty = CacheSeg->Dirty || NowDirty;
532
533 CacheSeg->MappedCount--;
534
535 if (!WasDirty && NowDirty)
536 {
537 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
538 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
539 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
540 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
541 }
542
543 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
544 CcRosCacheSegmentDecRefCount(CacheSeg);
545 if (!WasDirty && NowDirty)
546 {
547 CcRosCacheSegmentIncRefCount(CacheSeg);
548 }
549 if (CacheSeg->MappedCount == 0)
550 {
551 CcRosCacheSegmentDecRefCount(CacheSeg);
552 }
553 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
554
555 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&CacheSeg->Lock);
556 return(STATUS_SUCCESS);
557 }
558
559 NTSTATUS STATIC
560 CcRosCreateCacheSegment(PBCB Bcb,
561 ULONG FileOffset,
562 PCACHE_SEGMENT* CacheSeg)
563 {
564 ULONG i;
565 PCACHE_SEGMENT current;
566 PCACHE_SEGMENT previous;
567 PLIST_ENTRY current_entry;
568 NTSTATUS Status;
569 KIRQL oldIrql;
570 PPFN_TYPE Pfn;
571 #ifdef CACHE_BITMAP
572 ULONG StartingOffset;
573 #else
574 #endif
575 PHYSICAL_ADDRESS BoundaryAddressMultiple;
576
577 ASSERT(Bcb);
578
579 DPRINT("CcRosCreateCacheSegment()\n");
580
581 BoundaryAddressMultiple.QuadPart = 0;
582 if (FileOffset >= Bcb->FileSize.u.LowPart)
583 {
584 CacheSeg = NULL;
585 return STATUS_INVALID_PARAMETER;
586 }
587
588 current = ExAllocateFromNPagedLookasideList(&CacheSegLookasideList);
589 current->Valid = FALSE;
590 current->Dirty = FALSE;
591 current->PageOut = FALSE;
592 current->FileOffset = ROUND_DOWN(FileOffset, Bcb->CacheSegmentSize);
593 current->Bcb = Bcb;
594 #if defined(DBG) || defined(KDBG)
595 if ( Bcb->Trace )
596 {
597 DPRINT1("CacheMap 0x%p: new Cache Segment: 0x%p\n", Bcb, current );
598 }
599 #endif
600 current->MappedCount = 0;
601 current->DirtySegmentListEntry.Flink = NULL;
602 current->DirtySegmentListEntry.Blink = NULL;
603 current->ReferenceCount = 1;
604 ExInitializeFastMutex(&current->Lock);
605 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&current->Lock);
606 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
607
608 *CacheSeg = current;
609 /* There is window between the call to CcRosLookupCacheSegment
610 * and CcRosCreateCacheSegment. We must check if a segment on
611 * the fileoffset exist. If there exist a segment, we release
612 * our new created segment and return the existing one.
613 */
614 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
615 current_entry = Bcb->BcbSegmentListHead.Flink;
616 previous = NULL;
617 while (current_entry != &Bcb->BcbSegmentListHead)
618 {
619 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
620 BcbSegmentListEntry);
621 if (current->FileOffset <= FileOffset &&
622 (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
623 {
624 CcRosCacheSegmentIncRefCount(current);
625 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
626 #if defined(DBG) || defined(KDBG)
627 if ( Bcb->Trace )
628 {
629 DPRINT1("CacheMap 0x%p: deleting newly created Cache Segment 0x%p ( found existing one 0x%p )\n",
630 Bcb,
631 (*CacheSeg),
632 current );
633 }
634 #endif
635 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&(*CacheSeg)->Lock);
636 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
637 ExFreeToNPagedLookasideList(&CacheSegLookasideList, *CacheSeg);
638 *CacheSeg = current;
639 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&current->Lock);
640 return STATUS_SUCCESS;
641 }
642 if (current->FileOffset < FileOffset)
643 {
644 if (previous == NULL)
645 {
646 previous = current;
647 }
648 else
649 {
650 if (previous->FileOffset < current->FileOffset)
651 {
652 previous = current;
653 }
654 }
655 }
656 current_entry = current_entry->Flink;
657 }
658 /* There was no existing segment. */
659 current = *CacheSeg;
660 if (previous)
661 {
662 InsertHeadList(&previous->BcbSegmentListEntry, &current->BcbSegmentListEntry);
663 }
664 else
665 {
666 InsertHeadList(&Bcb->BcbSegmentListHead, &current->BcbSegmentListEntry);
667 }
668 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
669 InsertTailList(&CacheSegmentListHead, &current->CacheSegmentListEntry);
670 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
671 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
672 #ifdef CACHE_BITMAP
673 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
674
675 StartingOffset = RtlFindClearBitsAndSet(&CiCacheSegMappingRegionAllocMap, Bcb->CacheSegmentSize / PAGE_SIZE, CiCacheSegMappingRegionHint);
676
677 if (StartingOffset == 0xffffffff)
678 {
679 DPRINT1("Out of CacheSeg mapping space\n");
680 KEBUGCHECKCC;
681 }
682
683 current->BaseAddress = CiCacheSegMappingRegionBase + StartingOffset * PAGE_SIZE;
684
685 if (CiCacheSegMappingRegionHint == StartingOffset)
686 {
687 CiCacheSegMappingRegionHint += Bcb->CacheSegmentSize / PAGE_SIZE;
688 }
689
690 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
691 #else
692 MmLockAddressSpace(MmGetKernelAddressSpace());
693 current->BaseAddress = NULL;
694 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
695 MEMORY_AREA_CACHE_SEGMENT,
696 &current->BaseAddress,
697 Bcb->CacheSegmentSize,
698 PAGE_READWRITE,
699 (PMEMORY_AREA*)&current->MemoryArea,
700 FALSE,
701 0,
702 BoundaryAddressMultiple);
703 MmUnlockAddressSpace(MmGetKernelAddressSpace());
704 if (!NT_SUCCESS(Status))
705 {
706 KEBUGCHECKCC;
707 }
708 #endif
709 Pfn = alloca(sizeof(PFN_TYPE) * (Bcb->CacheSegmentSize / PAGE_SIZE));
710 for (i = 0; i < (Bcb->CacheSegmentSize / PAGE_SIZE); i++)
711 {
712 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &Pfn[i]);
713 if (!NT_SUCCESS(Status))
714 {
715 KEBUGCHECKCC;
716 }
717 }
718 Status = MmCreateVirtualMapping(NULL,
719 current->BaseAddress,
720 PAGE_READWRITE,
721 Pfn,
722 Bcb->CacheSegmentSize / PAGE_SIZE);
723 if (!NT_SUCCESS(Status))
724 {
725 KEBUGCHECKCC;
726 }
727 return(STATUS_SUCCESS);
728 }
729
730 NTSTATUS
731 NTAPI
732 CcRosGetCacheSegmentChain(PBCB Bcb,
733 ULONG FileOffset,
734 ULONG Length,
735 PCACHE_SEGMENT* CacheSeg)
736 {
737 PCACHE_SEGMENT current;
738 ULONG i;
739 PCACHE_SEGMENT* CacheSegList;
740 PCACHE_SEGMENT Previous = NULL;
741
742 ASSERT(Bcb);
743
744 DPRINT("CcRosGetCacheSegmentChain()\n");
745
746 Length = ROUND_UP(Length, Bcb->CacheSegmentSize);
747
748 #if defined(__GNUC__)
749 CacheSegList = alloca(sizeof(PCACHE_SEGMENT) *
750 (Length / Bcb->CacheSegmentSize));
751 #elif defined(_MSC_VER)
752 CacheSegList = _alloca(sizeof(PCACHE_SEGMENT) *
753 (Length / Bcb->CacheSegmentSize));
754 #else
755 #error Unknown compiler for alloca intrinsic stack allocation "function"
756 #endif
757
758 /*
759 * Look for a cache segment already mapping the same data.
760 */
761 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
762 {
763 ULONG CurrentOffset = FileOffset + (i * Bcb->CacheSegmentSize);
764 current = CcRosLookupCacheSegment(Bcb, CurrentOffset);
765 if (current != NULL)
766 {
767 CacheSegList[i] = current;
768 }
769 else
770 {
771 CcRosCreateCacheSegment(Bcb, CurrentOffset, &current);
772 CacheSegList[i] = current;
773 }
774 }
775
776 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
777 {
778 if (i == 0)
779 {
780 *CacheSeg = CacheSegList[i];
781 Previous = CacheSegList[i];
782 }
783 else
784 {
785 Previous->NextInChain = CacheSegList[i];
786 Previous = CacheSegList[i];
787 }
788 }
789 Previous->NextInChain = NULL;
790
791 return(STATUS_SUCCESS);
792 }
793
794 NTSTATUS
795 NTAPI
796 CcRosGetCacheSegment(PBCB Bcb,
797 ULONG FileOffset,
798 PULONG BaseOffset,
799 PVOID* BaseAddress,
800 PBOOLEAN UptoDate,
801 PCACHE_SEGMENT* CacheSeg)
802 {
803 PCACHE_SEGMENT current;
804 NTSTATUS Status;
805
806 ASSERT(Bcb);
807
808 DPRINT("CcRosGetCacheSegment()\n");
809
810 /*
811 * Look for a cache segment already mapping the same data.
812 */
813 current = CcRosLookupCacheSegment(Bcb, FileOffset);
814 if (current == NULL)
815 {
816 /*
817 * Otherwise create a new segment.
818 */
819 Status = CcRosCreateCacheSegment(Bcb, FileOffset, &current);
820 if (!NT_SUCCESS(Status))
821 {
822 return Status;
823 }
824 }
825 /*
826 * Return information about the segment to the caller.
827 */
828 *UptoDate = current->Valid;
829 *BaseAddress = current->BaseAddress;
830 DPRINT("*BaseAddress 0x%.8X\n", *BaseAddress);
831 *CacheSeg = current;
832 *BaseOffset = current->FileOffset;
833 return(STATUS_SUCCESS);
834 }
835
836 NTSTATUS STDCALL
837 CcRosRequestCacheSegment(PBCB Bcb,
838 ULONG FileOffset,
839 PVOID* BaseAddress,
840 PBOOLEAN UptoDate,
841 PCACHE_SEGMENT* CacheSeg)
842 /*
843 * FUNCTION: Request a page mapping for a BCB
844 */
845 {
846 ULONG BaseOffset;
847
848 ASSERT(Bcb);
849
850 if ((FileOffset % Bcb->CacheSegmentSize) != 0)
851 {
852 CPRINT("Bad fileoffset %x should be multiple of %x",
853 FileOffset, Bcb->CacheSegmentSize);
854 KEBUGCHECKCC;
855 }
856
857 return(CcRosGetCacheSegment(Bcb,
858 FileOffset,
859 &BaseOffset,
860 BaseAddress,
861 UptoDate,
862 CacheSeg));
863 }
864 #ifdef CACHE_BITMAP
865 #else
866 STATIC VOID
867 CcFreeCachePage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address,
868 PFN_TYPE Page, SWAPENTRY SwapEntry, BOOLEAN Dirty)
869 {
870 ASSERT(SwapEntry == 0);
871 if (Page != 0)
872 {
873 MmReleasePageMemoryConsumer(MC_CACHE, Page);
874 }
875 }
876 #endif
877 NTSTATUS
878 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg)
879 /*
880 * FUNCTION: Releases a cache segment associated with a BCB
881 */
882 {
883 #ifdef CACHE_BITMAP
884 ULONG i;
885 ULONG RegionSize;
886 ULONG Base;
887 PFN_TYPE Page;
888 KIRQL oldIrql;
889 #endif
890 DPRINT("Freeing cache segment 0x%p\n", CacheSeg);
891 #if defined(DBG) || defined(KDBG)
892 if ( CacheSeg->Bcb->Trace )
893 {
894 DPRINT1("CacheMap 0x%p: deleting Cache Segment: 0x%p\n", CacheSeg->Bcb, CacheSeg );
895 }
896 #endif
897 #ifdef CACHE_BITMAP
898 RegionSize = CacheSeg->Bcb->CacheSegmentSize / PAGE_SIZE;
899
900 /* Unmap all the pages. */
901 for (i = 0; i < RegionSize; i++)
902 {
903 MmDeleteVirtualMapping(NULL,
904 CacheSeg->BaseAddress + (i * PAGE_SIZE),
905 FALSE,
906 NULL,
907 &Page);
908 MmReleasePageMemoryConsumer(MC_CACHE, Page);
909 }
910
911 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
912 /* Deallocate all the pages used. */
913 Base = (ULONG)(CacheSeg->BaseAddress - CiCacheSegMappingRegionBase) / PAGE_SIZE;
914
915 RtlClearBits(&CiCacheSegMappingRegionAllocMap, Base, RegionSize);
916
917 CiCacheSegMappingRegionHint = min (CiCacheSegMappingRegionHint, Base);
918
919 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
920 #else
921 MmLockAddressSpace(MmGetKernelAddressSpace());
922 MmFreeMemoryArea(MmGetKernelAddressSpace(),
923 CacheSeg->MemoryArea,
924 CcFreeCachePage,
925 NULL);
926 MmUnlockAddressSpace(MmGetKernelAddressSpace());
927 #endif
928 ExFreeToNPagedLookasideList(&CacheSegLookasideList, CacheSeg);
929 return(STATUS_SUCCESS);
930 }
931
932 NTSTATUS
933 NTAPI
934 CcRosFreeCacheSegment(PBCB Bcb, PCACHE_SEGMENT CacheSeg)
935 {
936 NTSTATUS Status;
937 KIRQL oldIrql;
938
939 ASSERT(Bcb);
940
941 DPRINT("CcRosFreeCacheSegment(Bcb 0x%p, CacheSeg 0x%p)\n",
942 Bcb, CacheSeg);
943
944 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
945 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
946 RemoveEntryList(&CacheSeg->BcbSegmentListEntry);
947 RemoveEntryList(&CacheSeg->CacheSegmentListEntry);
948 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
949 if (CacheSeg->Dirty)
950 {
951 RemoveEntryList(&CacheSeg->DirtySegmentListEntry);
952 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
953
954 }
955 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
956 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
957
958 Status = CcRosInternalFreeCacheSegment(CacheSeg);
959 return(Status);
960 }
961
962 /*
963 * @implemented
964 */
965 VOID STDCALL
966 CcFlushCache(IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
967 IN PLARGE_INTEGER FileOffset OPTIONAL,
968 IN ULONG Length,
969 OUT PIO_STATUS_BLOCK IoStatus)
970 {
971 PBCB Bcb;
972 LARGE_INTEGER Offset;
973 PCACHE_SEGMENT current;
974 NTSTATUS Status;
975 KIRQL oldIrql;
976
977 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %d, IoStatus 0x%p)\n",
978 SectionObjectPointers, FileOffset, Length, IoStatus);
979
980 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
981 {
982 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
983 ASSERT(Bcb);
984 if (FileOffset)
985 {
986 Offset = *FileOffset;
987 }
988 else
989 {
990 Offset.QuadPart = (LONGLONG)0;
991 Length = Bcb->FileSize.u.LowPart;
992 }
993
994 if (IoStatus)
995 {
996 IoStatus->Status = STATUS_SUCCESS;
997 IoStatus->Information = 0;
998 }
999
1000 while (Length > 0)
1001 {
1002 current = CcRosLookupCacheSegment (Bcb, Offset.u.LowPart);
1003 if (current != NULL)
1004 {
1005 if (current->Dirty)
1006 {
1007 Status = CcRosFlushCacheSegment(current);
1008 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1009 {
1010 IoStatus->Status = Status;
1011 }
1012 }
1013 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1014 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&current->Lock);
1015 CcRosCacheSegmentDecRefCount(current);
1016 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1017 }
1018
1019 Offset.QuadPart += Bcb->CacheSegmentSize;
1020 if (Length > Bcb->CacheSegmentSize)
1021 {
1022 Length -= Bcb->CacheSegmentSize;
1023 }
1024 else
1025 {
1026 Length = 0;
1027 }
1028 }
1029 }
1030 else
1031 {
1032 if (IoStatus)
1033 {
1034 IoStatus->Status = STATUS_INVALID_PARAMETER;
1035 }
1036 }
1037 }
1038
1039 NTSTATUS
1040 NTAPI
1041 CcRosDeleteFileCache(PFILE_OBJECT FileObject, PBCB Bcb)
1042 /*
1043 * FUNCTION: Releases the BCB associated with a file object
1044 */
1045 {
1046 PLIST_ENTRY current_entry;
1047 PCACHE_SEGMENT current;
1048 NTSTATUS Status;
1049 LIST_ENTRY FreeList;
1050 KIRQL oldIrql;
1051
1052 ASSERT(Bcb);
1053
1054 Bcb->RefCount++;
1055 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1056
1057 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1058
1059 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
1060 Bcb->RefCount--;
1061 if (Bcb->RefCount == 0)
1062 {
1063 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1064 {
1065 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1066 Bcb->BcbRemoveListEntry.Flink = NULL;
1067 }
1068
1069 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1070
1071 /*
1072 * Release all cache segments.
1073 */
1074 InitializeListHead(&FreeList);
1075 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1076 current_entry = Bcb->BcbSegmentListHead.Flink;
1077 while (!IsListEmpty(&Bcb->BcbSegmentListHead))
1078 {
1079 current_entry = RemoveTailList(&Bcb->BcbSegmentListHead);
1080 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1081 RemoveEntryList(&current->CacheSegmentListEntry);
1082 RemoveEntryList(&current->CacheSegmentLRUListEntry);
1083 if (current->Dirty)
1084 {
1085 RemoveEntryList(&current->DirtySegmentListEntry);
1086 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
1087 DPRINT1("Freeing dirty segment\n");
1088 }
1089 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
1090 }
1091 #if defined(DBG) || defined(KDBG)
1092 Bcb->Trace = FALSE;
1093 #endif
1094 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1095
1096 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1097 ObDereferenceObject (Bcb->FileObject);
1098
1099 while (!IsListEmpty(&FreeList))
1100 {
1101 current_entry = RemoveTailList(&FreeList);
1102 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1103 Status = CcRosInternalFreeCacheSegment(current);
1104 }
1105 ExFreeToNPagedLookasideList(&BcbLookasideList, Bcb);
1106 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
1107 }
1108 return(STATUS_SUCCESS);
1109 }
1110
1111 VOID
1112 NTAPI
1113 CcRosReferenceCache(PFILE_OBJECT FileObject)
1114 {
1115 PBCB Bcb;
1116 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
1117 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1118 ASSERT(Bcb);
1119 if (Bcb->RefCount == 0)
1120 {
1121 ASSERT(Bcb->BcbRemoveListEntry.Flink != NULL);
1122 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1123 Bcb->BcbRemoveListEntry.Flink = NULL;
1124
1125 }
1126 else
1127 {
1128 ASSERT(Bcb->BcbRemoveListEntry.Flink == NULL);
1129 }
1130 Bcb->RefCount++;
1131 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1132 }
1133
1134 VOID
1135 NTAPI
1136 CcRosSetRemoveOnClose(PSECTION_OBJECT_POINTERS SectionObjectPointer)
1137 {
1138 PBCB Bcb;
1139 DPRINT("CcRosSetRemoveOnClose()\n");
1140 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
1141 Bcb = (PBCB)SectionObjectPointer->SharedCacheMap;
1142 if (Bcb)
1143 {
1144 Bcb->RemoveOnClose = TRUE;
1145 if (Bcb->RefCount == 0)
1146 {
1147 CcRosDeleteFileCache(Bcb->FileObject, Bcb);
1148 }
1149 }
1150 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1151 }
1152
1153
1154 VOID
1155 NTAPI
1156 CcRosDereferenceCache(PFILE_OBJECT FileObject)
1157 {
1158 PBCB Bcb;
1159 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
1160 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1161 ASSERT(Bcb);
1162 if (Bcb->RefCount > 0)
1163 {
1164 Bcb->RefCount--;
1165 if (Bcb->RefCount == 0)
1166 {
1167 MmFreeSectionSegments(Bcb->FileObject);
1168 if (Bcb->RemoveOnClose)
1169 {
1170 CcRosDeleteFileCache(FileObject, Bcb);
1171 }
1172 else
1173 {
1174 Bcb->TimeStamp = CcTimeStamp;
1175 InsertHeadList(&ClosedListHead, &Bcb->BcbRemoveListEntry);
1176 }
1177 }
1178 }
1179 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1180 }
1181
1182 NTSTATUS STDCALL
1183 CcRosReleaseFileCache(PFILE_OBJECT FileObject)
1184 /*
1185 * FUNCTION: Called by the file system when a handle to a file object
1186 * has been closed.
1187 */
1188 {
1189 PBCB Bcb;
1190
1191 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
1192
1193 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1194 {
1195 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1196 if (FileObject->PrivateCacheMap != NULL)
1197 {
1198 FileObject->PrivateCacheMap = NULL;
1199 if (Bcb->RefCount > 0)
1200 {
1201 Bcb->RefCount--;
1202 if (Bcb->RefCount == 0)
1203 {
1204 MmFreeSectionSegments(Bcb->FileObject);
1205 if (Bcb->RemoveOnClose)
1206 {
1207 CcRosDeleteFileCache(FileObject, Bcb);
1208 }
1209 else
1210 {
1211 Bcb->TimeStamp = CcTimeStamp;
1212 InsertHeadList(&ClosedListHead, &Bcb->BcbRemoveListEntry);
1213 }
1214 }
1215 }
1216 }
1217 }
1218 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1219 return(STATUS_SUCCESS);
1220 }
1221
1222 NTSTATUS
1223 NTAPI
1224 CcTryToInitializeFileCache(PFILE_OBJECT FileObject)
1225 {
1226 PBCB Bcb;
1227 NTSTATUS Status;
1228
1229 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
1230
1231 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1232 if (Bcb == NULL)
1233 {
1234 Status = STATUS_UNSUCCESSFUL;
1235 }
1236 else
1237 {
1238 if (FileObject->PrivateCacheMap == NULL)
1239 {
1240 FileObject->PrivateCacheMap = Bcb;
1241 Bcb->RefCount++;
1242 }
1243 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1244 {
1245 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1246 Bcb->BcbRemoveListEntry.Flink = NULL;
1247 }
1248 Status = STATUS_SUCCESS;
1249 }
1250 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1251
1252 return Status;
1253 }
1254
1255
1256 NTSTATUS STDCALL
1257 CcRosInitializeFileCache(PFILE_OBJECT FileObject,
1258 ULONG CacheSegmentSize)
1259 /*
1260 * FUNCTION: Initializes a BCB for a file object
1261 */
1262 {
1263 PBCB Bcb;
1264
1265 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1266 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, Bcb 0x%p, CacheSegmentSize %d)\n",
1267 FileObject, Bcb, CacheSegmentSize);
1268
1269 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
1270 if (Bcb == NULL)
1271 {
1272 Bcb = ExAllocateFromNPagedLookasideList(&BcbLookasideList);
1273 if (Bcb == NULL)
1274 {
1275 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1276 return(STATUS_UNSUCCESSFUL);
1277 }
1278 memset(Bcb, 0, sizeof(BCB));
1279 ObReferenceObjectByPointer(FileObject,
1280 FILE_ALL_ACCESS,
1281 NULL,
1282 KernelMode);
1283 Bcb->FileObject = FileObject;
1284 Bcb->CacheSegmentSize = CacheSegmentSize;
1285 if (FileObject->FsContext)
1286 {
1287 Bcb->AllocationSize =
1288 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->AllocationSize;
1289 Bcb->FileSize =
1290 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->FileSize;
1291 }
1292 KeInitializeSpinLock(&Bcb->BcbLock);
1293 InitializeListHead(&Bcb->BcbSegmentListHead);
1294 FileObject->SectionObjectPointer->SharedCacheMap = Bcb;
1295 }
1296 if (FileObject->PrivateCacheMap == NULL)
1297 {
1298 FileObject->PrivateCacheMap = Bcb;
1299 Bcb->RefCount++;
1300 }
1301 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1302 {
1303 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1304 Bcb->BcbRemoveListEntry.Flink = NULL;
1305 }
1306 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1307
1308 return(STATUS_SUCCESS);
1309 }
1310
1311 /*
1312 * @implemented
1313 */
1314 PFILE_OBJECT STDCALL
1315 CcGetFileObjectFromSectionPtrs(IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1316 {
1317 PBCB Bcb;
1318 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1319 {
1320 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
1321 ASSERT(Bcb);
1322 return Bcb->FileObject;
1323 }
1324 return NULL;
1325 }
1326
1327 VOID STDCALL
1328 CmLazyCloseThreadMain(PVOID Ignored)
1329 {
1330 LARGE_INTEGER Timeout;
1331 PLIST_ENTRY current_entry;
1332 PBCB current;
1333 ULONG RemoveTimeStamp;
1334 NTSTATUS Status;
1335
1336 KeQuerySystemTime (&Timeout);
1337
1338 while (1)
1339 {
1340 Timeout.QuadPart += (LONGLONG)100000000; // 10sec
1341 Status = KeWaitForSingleObject(&LazyCloseThreadEvent,
1342 0,
1343 KernelMode,
1344 FALSE,
1345 &Timeout);
1346
1347 DPRINT("LazyCloseThreadMain %d\n", CcTimeStamp);
1348
1349 if (!NT_SUCCESS(Status))
1350 {
1351 DbgPrint("LazyCloseThread: Wait failed\n");
1352 KEBUGCHECKCC;
1353 break;
1354 }
1355 if (LazyCloseThreadShouldTerminate)
1356 {
1357 DbgPrint("LazyCloseThread: Terminating\n");
1358 break;
1359 }
1360
1361 ExEnterCriticalRegionAndAcquireFastMutexUnsafe(&ViewLock);
1362 CcTimeStamp++;
1363 if (CcTimeStamp >= 30)
1364 {
1365 RemoveTimeStamp = CcTimeStamp - 30; /* 5min = 10sec * 30 */
1366 while (!IsListEmpty(&ClosedListHead))
1367 {
1368 current_entry = ClosedListHead.Blink;
1369 current = CONTAINING_RECORD(current_entry, BCB, BcbRemoveListEntry);
1370 if (current->TimeStamp >= RemoveTimeStamp)
1371 {
1372 break;
1373 }
1374 CcRosDeleteFileCache(current->FileObject, current);
1375 }
1376 }
1377 ExReleaseFastMutexUnsafeAndLeaveCriticalRegion(&ViewLock);
1378 }
1379 }
1380
1381 VOID
1382 INIT_FUNCTION
1383 NTAPI
1384 CcInitView(VOID)
1385 {
1386 #ifdef CACHE_BITMAP
1387 PMEMORY_AREA marea;
1388 PVOID Buffer;
1389 PHYSICAL_ADDRESS BoundaryAddressMultiple;
1390 #endif
1391 NTSTATUS Status;
1392 KPRIORITY Priority;
1393
1394 DPRINT("CcInitView()\n");
1395 #ifdef CACHE_BITMAP
1396 BoundaryAddressMultiple.QuadPart = 0;
1397 CiCacheSegMappingRegionHint = 0;
1398 CiCacheSegMappingRegionBase = NULL;
1399
1400 MmLockAddressSpace(MmGetKernelAddressSpace());
1401
1402 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
1403 MEMORY_AREA_CACHE_SEGMENT,
1404 &CiCacheSegMappingRegionBase,
1405 CI_CACHESEG_MAPPING_REGION_SIZE,
1406 PAGE_READWRITE,
1407 &marea,
1408 FALSE,
1409 0,
1410 BoundaryAddressMultiple);
1411 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1412 if (!NT_SUCCESS(Status))
1413 {
1414 KEBUGCHECKCC;
1415 }
1416
1417 Buffer = ExAllocatePool(NonPagedPool, CI_CACHESEG_MAPPING_REGION_SIZE / (PAGE_SIZE * 8));
1418
1419 RtlInitializeBitMap(&CiCacheSegMappingRegionAllocMap, Buffer, CI_CACHESEG_MAPPING_REGION_SIZE / PAGE_SIZE);
1420 RtlClearAllBits(&CiCacheSegMappingRegionAllocMap);
1421
1422 KeInitializeSpinLock(&CiCacheSegMappingRegionLock);
1423 #endif
1424 InitializeListHead(&CacheSegmentListHead);
1425 InitializeListHead(&DirtySegmentListHead);
1426 InitializeListHead(&CacheSegmentLRUListHead);
1427 InitializeListHead(&ClosedListHead);
1428 ExInitializeFastMutex(&ViewLock);
1429 ExInitializeNPagedLookasideList (&iBcbLookasideList,
1430 NULL,
1431 NULL,
1432 0,
1433 sizeof(INTERNAL_BCB),
1434 TAG_IBCB,
1435 20);
1436 ExInitializeNPagedLookasideList (&BcbLookasideList,
1437 NULL,
1438 NULL,
1439 0,
1440 sizeof(BCB),
1441 TAG_BCB,
1442 20);
1443 ExInitializeNPagedLookasideList (&CacheSegLookasideList,
1444 NULL,
1445 NULL,
1446 0,
1447 sizeof(CACHE_SEGMENT),
1448 TAG_CSEG,
1449 20);
1450
1451 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1452
1453 CcInitCacheZeroPage();
1454
1455 CcTimeStamp = 0;
1456 LazyCloseThreadShouldTerminate = FALSE;
1457 KeInitializeEvent (&LazyCloseThreadEvent, SynchronizationEvent, FALSE);
1458 Status = PsCreateSystemThread(&LazyCloseThreadHandle,
1459 THREAD_ALL_ACCESS,
1460 NULL,
1461 NULL,
1462 &LazyCloseThreadId,
1463 (PKSTART_ROUTINE)CmLazyCloseThreadMain,
1464 NULL);
1465 if (NT_SUCCESS(Status))
1466 {
1467 Priority = LOW_REALTIME_PRIORITY;
1468 NtSetInformationThread(LazyCloseThreadHandle,
1469 ThreadPriority,
1470 &Priority,
1471 sizeof(Priority));
1472 }
1473
1474 }
1475
1476 /* EOF */
1477
1478
1479
1480
1481
1482
1483