Continue of MSVC-compiling changes....
[reactos.git] / reactos / ntoskrnl / cc / view.c
1 /*
2 * ReactOS kernel
3 * Copyright (C) 1998, 1999, 2000, 2001 ReactOS Team
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */
19 /* $Id: view.c,v 1.70 2003/12/30 18:52:03 fireball Exp $
20 *
21 * PROJECT: ReactOS kernel
22 * FILE: ntoskrnl/cc/view.c
23 * PURPOSE: Cache manager
24 * PROGRAMMER: David Welch (welch@mcmail.com)
25 * PORTABILITY: Checked
26 * UPDATE HISTORY:
27 * Created 22/05/98
28 */
29
30 /* NOTES **********************************************************************
31 *
32 * This is not the NT implementation of a file cache nor anything much like
33 * it.
34 *
35 * The general procedure for a filesystem to implement a read or write
36 * dispatch routine is as follows
37 *
38 * (1) If caching for the FCB hasn't been initiated then so do by calling
39 * CcInitializeFileCache.
40 *
41 * (2) For each 4k region which is being read or written obtain a cache page
42 * by calling CcRequestCachePage.
43 *
44 * (3) If either the page is being read or not completely written, and it is
45 * not up to date then read its data from the underlying medium. If the read
46 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
47 *
48 * (4) Copy the data into or out of the page as necessary.
49 *
50 * (5) Release the cache page
51 */
52 /* INCLUDES ******************************************************************/
53
54 #include <ddk/ntddk.h>
55 #include <ddk/ntifs.h>
56 #include <internal/mm.h>
57 #include <internal/cc.h>
58 #include <internal/pool.h>
59 #include <ntos/minmax.h>
60
61 #define NDEBUG
62 #include <internal/debug.h>
63
64 /* GLOBALS *******************************************************************/
65
66 /*
67 * If CACHE_BITMAP is defined, the cache manager uses one large memory region
68 * within the kernel address space and allocate/deallocate space from this block
69 * over a bitmap. If CACHE_BITMAP is used, the size of the mdl mapping region
70 * must be reduced (ntoskrnl\mm\mdl.c, MI_MDLMAPPING_REGION_SIZE).
71 */
72 //#define CACHE_BITMAP
73
74 #define ROUND_UP(N, S) ((((N) + (S) - 1) / (S)) * (S))
75 #define ROUND_DOWN(N, S) (((N) % (S)) ? ROUND_UP(N, S) - S : N)
76
77 #define TAG_CSEG TAG('C', 'S', 'E', 'G')
78 #define TAG_BCB TAG('B', 'C', 'B', ' ')
79 #define TAG_IBCB TAG('i', 'B', 'C', 'B')
80
81 static LIST_ENTRY DirtySegmentListHead;
82 static LIST_ENTRY CacheSegmentListHead;
83 static LIST_ENTRY CacheSegmentLRUListHead;
84 static LIST_ENTRY ClosedListHead;
85 ULONG DirtyPageCount=0;
86
87 FAST_MUTEX ViewLock;
88
89 #ifdef CACHE_BITMAP
90 #define CI_CACHESEG_MAPPING_REGION_SIZE (128*1024*1024)
91
92 static PVOID CiCacheSegMappingRegionBase = NULL;
93 static RTL_BITMAP CiCacheSegMappingRegionAllocMap;
94 static ULONG CiCacheSegMappingRegionHint;
95 static KSPIN_LOCK CiCacheSegMappingRegionLock;
96 #endif
97
98 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
99 static NPAGED_LOOKASIDE_LIST BcbLookasideList;
100 static NPAGED_LOOKASIDE_LIST CacheSegLookasideList;
101
102 static ULONG CcTimeStamp;
103 static KEVENT LazyCloseThreadEvent;
104 static HANDLE LazyCloseThreadHandle;
105 static CLIENT_ID LazyCloseThreadId;
106 static volatile BOOLEAN LazyCloseThreadShouldTerminate;
107
108 #if defined(__GNUC__)
109 void * alloca(size_t size);
110 #elif defined(_MSC_VER)
111 void* _alloca(size_t size);
112 #else
113 #error Unknown compiler for alloca intrinsic stack allocation "function"
114 #endif
115
116
117 NTSTATUS
118 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg);
119
120 /* FUNCTIONS *****************************************************************/
121
122 NTSTATUS STATIC
123 CcRosFlushCacheSegment(PCACHE_SEGMENT CacheSegment)
124 {
125 NTSTATUS Status;
126 KIRQL oldIrql;
127 Status = WriteCacheSegment(CacheSegment);
128 if (NT_SUCCESS(Status))
129 {
130 ExAcquireFastMutex(&ViewLock);
131 KeAcquireSpinLock(&CacheSegment->Bcb->BcbLock, &oldIrql);
132 CacheSegment->Dirty = FALSE;
133 RemoveEntryList(&CacheSegment->DirtySegmentListEntry);
134 DirtyPageCount -= CacheSegment->Bcb->CacheSegmentSize / PAGE_SIZE;
135 CacheSegment->ReferenceCount--;
136 KeReleaseSpinLock(&CacheSegment->Bcb->BcbLock, oldIrql);
137 ExReleaseFastMutex(&ViewLock);
138 }
139 return(Status);
140 }
141
142 NTSTATUS
143 CcRosFlushDirtyPages(ULONG Target, PULONG Count)
144 {
145 PLIST_ENTRY current_entry;
146 PCACHE_SEGMENT current;
147 ULONG PagesPerSegment;
148 BOOLEAN Locked;
149 NTSTATUS Status;
150 static ULONG WriteCount[4] = {0, 0, 0, 0};
151 ULONG NewTarget;
152
153 DPRINT("CcRosFlushDirtyPages(Target %d)\n", Target);
154
155 (*Count) = 0;
156
157 ExAcquireFastMutex(&ViewLock);
158
159 WriteCount[0] = WriteCount[1];
160 WriteCount[1] = WriteCount[2];
161 WriteCount[2] = WriteCount[3];
162 WriteCount[3] = 0;
163
164 NewTarget = WriteCount[0] + WriteCount[1] + WriteCount[2];
165
166 if (NewTarget < DirtyPageCount)
167 {
168 NewTarget = (DirtyPageCount - NewTarget + 3) / 4;
169 WriteCount[0] += NewTarget;
170 WriteCount[1] += NewTarget;
171 WriteCount[2] += NewTarget;
172 WriteCount[3] += NewTarget;
173 }
174
175 NewTarget = WriteCount[0];
176
177 Target = max(NewTarget, Target);
178
179 current_entry = DirtySegmentListHead.Flink;
180 if (current_entry == &DirtySegmentListHead)
181 {
182 DPRINT("No Dirty pages\n");
183 }
184 while (current_entry != &DirtySegmentListHead && Target > 0)
185 {
186 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
187 DirtySegmentListEntry);
188 current_entry = current_entry->Flink;
189 Locked = ExTryToAcquireFastMutex(&current->Lock);
190 if (!Locked)
191 {
192 continue;
193 }
194 assert(current->Dirty);
195 if (current->ReferenceCount > 1)
196 {
197 ExReleaseFastMutex(&current->Lock);
198 continue;
199 }
200 ExReleaseFastMutex(&ViewLock);
201 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
202 Status = CcRosFlushCacheSegment(current);
203 ExReleaseFastMutex(&current->Lock);
204 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
205 {
206 DPRINT1("CC: Failed to flush cache segment.\n");
207 }
208 else
209 {
210 (*Count) += PagesPerSegment;
211 Target -= PagesPerSegment;
212 }
213 ExAcquireFastMutex(&ViewLock);
214 current_entry = DirtySegmentListHead.Flink;
215 }
216 if (*Count < NewTarget)
217 {
218 WriteCount[1] += (NewTarget - *Count);
219 }
220 ExReleaseFastMutex(&ViewLock);
221 DPRINT("CcRosFlushDirtyPages() finished\n");
222
223 return(STATUS_SUCCESS);
224 }
225
226 NTSTATUS
227 CcRosTrimCache(ULONG Target, ULONG Priority, PULONG NrFreed)
228 /*
229 * FUNCTION: Try to free some memory from the file cache.
230 * ARGUMENTS:
231 * Target - The number of pages to be freed.
232 * Priority - The priority of free (currently unused).
233 * NrFreed - Points to a variable where the number of pages
234 * actually freed is returned.
235 */
236 {
237 PLIST_ENTRY current_entry;
238 PCACHE_SEGMENT current, last = NULL;
239 ULONG PagesPerSegment;
240 ULONG PagesFreed;
241 KIRQL oldIrql;
242 LIST_ENTRY FreeList;
243
244 DPRINT("CcRosTrimCache(Target %d)\n", Target);
245
246 *NrFreed = 0;
247
248 InitializeListHead(&FreeList);
249
250 ExAcquireFastMutex(&ViewLock);
251 current_entry = CacheSegmentLRUListHead.Flink;
252 while (current_entry != &CacheSegmentLRUListHead && Target > 0)
253 {
254 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
255 CacheSegmentLRUListEntry);
256 current_entry = current_entry->Flink;
257
258 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
259 if (current->ReferenceCount == 0)
260 {
261 RemoveEntryList(&current->BcbSegmentListEntry);
262 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
263 RemoveEntryList(&current->CacheSegmentListEntry);
264 RemoveEntryList(&current->CacheSegmentLRUListEntry);
265 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
266 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
267 PagesFreed = min(PagesPerSegment, Target);
268 Target -= PagesFreed;
269 (*NrFreed) += PagesFreed;
270 }
271 else
272 {
273 if (last != current && current->MappedCount > 0 && !current->Dirty)
274 {
275 ULONG i;
276 NTSTATUS Status;
277
278 current->ReferenceCount++;
279 last = current;
280 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
281 ExReleaseFastMutex(&ViewLock);
282 for (i = 0; i < current->Bcb->CacheSegmentSize / PAGE_SIZE; i++)
283 {
284 PHYSICAL_ADDRESS Page;
285 Page = MmGetPhysicalAddress((char*)current->BaseAddress + i * PAGE_SIZE);
286 Status = MmPageOutPhysicalAddress(Page);
287 if (!NT_SUCCESS(Status))
288 {
289 break;
290 }
291 }
292 ExAcquireFastMutex(&ViewLock);
293 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
294 current->ReferenceCount--;
295 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
296 current_entry = &current->CacheSegmentLRUListEntry;
297 continue;
298 }
299 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
300 }
301 }
302 ExReleaseFastMutex(&ViewLock);
303
304 while (!IsListEmpty(&FreeList))
305 {
306 current_entry = RemoveHeadList(&FreeList);
307 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
308 BcbSegmentListEntry);
309 CcRosInternalFreeCacheSegment(current);
310 }
311
312 DPRINT("CcRosTrimCache() finished\n");
313 return(STATUS_SUCCESS);
314 }
315
316 NTSTATUS
317 CcRosReleaseCacheSegment(PBCB Bcb,
318 PCACHE_SEGMENT CacheSeg,
319 BOOLEAN Valid,
320 BOOLEAN Dirty,
321 BOOLEAN Mapped)
322 {
323 BOOLEAN WasDirty = CacheSeg->Dirty;
324 KIRQL oldIrql;
325
326 assert(Bcb);
327
328 DPRINT("CcReleaseCacheSegment(Bcb %x, CacheSeg %x, Valid %d)\n",
329 Bcb, CacheSeg, Valid);
330
331 CacheSeg->Valid = Valid;
332 CacheSeg->Dirty = CacheSeg->Dirty || Dirty;
333
334 ExAcquireFastMutex(&ViewLock);
335 if (!WasDirty && CacheSeg->Dirty)
336 {
337 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
338 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
339 }
340 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
341 InsertTailList(&CacheSegmentLRUListHead, &CacheSeg->CacheSegmentLRUListEntry);
342
343 if (Mapped)
344 {
345 CacheSeg->MappedCount++;
346 }
347 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
348 CacheSeg->ReferenceCount--;
349 if (Mapped && CacheSeg->MappedCount == 1)
350 {
351 CacheSeg->ReferenceCount++;
352 }
353 if (!WasDirty && CacheSeg->Dirty)
354 {
355 CacheSeg->ReferenceCount++;
356 }
357 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
358 ExReleaseFastMutex(&ViewLock);
359 ExReleaseFastMutex(&CacheSeg->Lock);
360
361 return(STATUS_SUCCESS);
362 }
363
364 PCACHE_SEGMENT
365 CcRosLookupCacheSegment(PBCB Bcb, ULONG FileOffset)
366 {
367 PLIST_ENTRY current_entry;
368 PCACHE_SEGMENT current;
369 KIRQL oldIrql;
370
371 assert(Bcb);
372
373 DPRINT("CcRosLookupCacheSegment(Bcb %x, FileOffset %d)\n", Bcb, FileOffset);
374
375 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
376 current_entry = Bcb->BcbSegmentListHead.Flink;
377 while (current_entry != &Bcb->BcbSegmentListHead)
378 {
379 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
380 BcbSegmentListEntry);
381 if (current->FileOffset <= FileOffset &&
382 (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
383 {
384 current->ReferenceCount++;
385 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
386 ExAcquireFastMutex(&current->Lock);
387 return(current);
388 }
389 current_entry = current_entry->Flink;
390 }
391 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
392 return(NULL);
393 }
394
395 NTSTATUS
396 CcRosMarkDirtyCacheSegment(PBCB Bcb, ULONG FileOffset)
397 {
398 PCACHE_SEGMENT CacheSeg;
399 KIRQL oldIrql;
400
401 assert(Bcb);
402
403 DPRINT("CcRosMarkDirtyCacheSegment(Bcb %x, FileOffset %d)\n", Bcb, FileOffset);
404
405 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
406 if (CacheSeg == NULL)
407 {
408 KEBUGCHECK(0);
409 }
410 if (!CacheSeg->Dirty)
411 {
412 ExAcquireFastMutex(&ViewLock);
413 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
414 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
415 ExReleaseFastMutex(&ViewLock);
416 }
417 else
418 {
419 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
420 CacheSeg->ReferenceCount--;
421 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
422 }
423
424
425 CacheSeg->Dirty = TRUE;
426 ExReleaseFastMutex(&CacheSeg->Lock);
427
428 return(STATUS_SUCCESS);
429 }
430
431 NTSTATUS
432 CcRosUnmapCacheSegment(PBCB Bcb, ULONG FileOffset, BOOLEAN NowDirty)
433 {
434 PCACHE_SEGMENT CacheSeg;
435 BOOLEAN WasDirty;
436 KIRQL oldIrql;
437
438 assert(Bcb);
439
440 DPRINT("CcRosUnmapCacheSegment(Bcb %x, FileOffset %d, NowDirty %d)\n",
441 Bcb, FileOffset, NowDirty);
442
443 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
444 if (CacheSeg == NULL)
445 {
446 return(STATUS_UNSUCCESSFUL);
447 }
448
449 WasDirty = CacheSeg->Dirty;
450 CacheSeg->Dirty = CacheSeg->Dirty || NowDirty;
451
452 CacheSeg->MappedCount--;
453
454 if (!WasDirty && NowDirty)
455 {
456 ExAcquireFastMutex(&ViewLock);
457 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
458 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
459 ExReleaseFastMutex(&ViewLock);
460 }
461
462 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
463 CacheSeg->ReferenceCount--;
464 if (!WasDirty && NowDirty)
465 {
466 CacheSeg->ReferenceCount++;
467 }
468 if (CacheSeg->MappedCount == 0)
469 {
470 CacheSeg->ReferenceCount--;
471 }
472 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
473
474 ExReleaseFastMutex(&CacheSeg->Lock);
475 return(STATUS_SUCCESS);
476 }
477
478 NTSTATUS STATIC
479 CcRosCreateCacheSegment(PBCB Bcb,
480 ULONG FileOffset,
481 PCACHE_SEGMENT* CacheSeg)
482 {
483 ULONG i;
484 PCACHE_SEGMENT current;
485 PCACHE_SEGMENT previous;
486 PLIST_ENTRY current_entry;
487 NTSTATUS Status;
488 KIRQL oldIrql;
489 #ifdef CACHE_BITMAP
490 ULONG StartingOffset;
491 #endif
492
493 assert(Bcb);
494
495 DPRINT("CcRosCreateCacheSegment()\n");
496
497 if (FileOffset >= Bcb->FileSize.u.LowPart)
498 {
499 CacheSeg = NULL;
500 return STATUS_INVALID_PARAMETER;
501 }
502
503 current = ExAllocateFromNPagedLookasideList(&CacheSegLookasideList);
504 current->Valid = FALSE;
505 current->Dirty = FALSE;
506 current->FileOffset = ROUND_DOWN(FileOffset, Bcb->CacheSegmentSize);
507 current->Bcb = Bcb;
508 current->MappedCount = 0;
509 current->DirtySegmentListEntry.Flink = NULL;
510 current->DirtySegmentListEntry.Blink = NULL;
511 current->ReferenceCount = 1;
512 ExInitializeFastMutex(&current->Lock);
513 ExAcquireFastMutex(&current->Lock);
514 ExAcquireFastMutex(&ViewLock);
515
516 *CacheSeg = current;
517 /* There is window between the call to CcRosLookupCacheSegment
518 * and CcRosCreateCacheSegment. We must check if a segment on
519 * the fileoffset exist. If there exist a segment, we release
520 * our new created segment and return the existing one.
521 */
522 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
523 current_entry = Bcb->BcbSegmentListHead.Flink;
524 previous = NULL;
525 while (current_entry != &Bcb->BcbSegmentListHead)
526 {
527 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
528 BcbSegmentListEntry);
529 if (current->FileOffset <= FileOffset &&
530 (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
531 {
532 current->ReferenceCount++;
533 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
534 ExReleaseFastMutex(&(*CacheSeg)->Lock);
535 ExReleaseFastMutex(&ViewLock);
536 ExFreeToNPagedLookasideList(&CacheSegLookasideList, *CacheSeg);
537 *CacheSeg = current;
538 ExAcquireFastMutex(&current->Lock);
539 return STATUS_SUCCESS;
540 }
541 if (current->FileOffset < FileOffset)
542 {
543 if (previous == NULL)
544 {
545 previous = current;
546 }
547 else
548 {
549 if (previous->FileOffset < current->FileOffset)
550 {
551 previous = current;
552 }
553 }
554 }
555 current_entry = current_entry->Flink;
556 }
557 /* There was no existing segment. */
558 current = *CacheSeg;
559 if (previous)
560 {
561 InsertHeadList(&previous->BcbSegmentListEntry, &current->BcbSegmentListEntry);
562 }
563 else
564 {
565 InsertHeadList(&Bcb->BcbSegmentListHead, &current->BcbSegmentListEntry);
566 }
567 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
568 InsertTailList(&CacheSegmentListHead, &current->CacheSegmentListEntry);
569 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
570 ExReleaseFastMutex(&ViewLock);
571 #ifdef CACHE_BITMAP
572 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
573
574 StartingOffset = RtlFindClearBitsAndSet(&CiCacheSegMappingRegionAllocMap, Bcb->CacheSegmentSize / PAGE_SIZE, CiCacheSegMappingRegionHint);
575
576 if (StartingOffset == 0xffffffff)
577 {
578 DPRINT1("Out of CacheSeg mapping space\n");
579 KEBUGCHECK(0);
580 }
581
582 current->BaseAddress = CiCacheSegMappingRegionBase + StartingOffset * PAGE_SIZE;
583
584 if (CiCacheSegMappingRegionHint == StartingOffset)
585 {
586 CiCacheSegMappingRegionHint += Bcb->CacheSegmentSize / PAGE_SIZE;
587 }
588
589 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
590 #else
591 MmLockAddressSpace(MmGetKernelAddressSpace());
592 current->BaseAddress = NULL;
593 Status = MmCreateMemoryArea(NULL,
594 MmGetKernelAddressSpace(),
595 MEMORY_AREA_CACHE_SEGMENT,
596 &current->BaseAddress,
597 Bcb->CacheSegmentSize,
598 PAGE_READWRITE,
599 (PMEMORY_AREA*)&current->MemoryArea,
600 FALSE,
601 FALSE);
602 MmUnlockAddressSpace(MmGetKernelAddressSpace());
603 if (!NT_SUCCESS(Status))
604 {
605 KEBUGCHECK(0);
606 }
607 #endif
608 for (i = 0; i < (Bcb->CacheSegmentSize / PAGE_SIZE); i++)
609 {
610 PHYSICAL_ADDRESS Page;
611
612 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &Page);
613 if (!NT_SUCCESS(Status))
614 {
615 KEBUGCHECK(0);
616 }
617
618 Status = MmCreateVirtualMapping(NULL,
619 (char*)current->BaseAddress + (i * PAGE_SIZE),
620 PAGE_READWRITE,
621 Page,
622 TRUE);
623 if (!NT_SUCCESS(Status))
624 {
625 KEBUGCHECK(0);
626 }
627 }
628 return(STATUS_SUCCESS);
629 }
630
631 NTSTATUS
632 CcRosGetCacheSegmentChain(PBCB Bcb,
633 ULONG FileOffset,
634 ULONG Length,
635 PCACHE_SEGMENT* CacheSeg)
636 {
637 PCACHE_SEGMENT current;
638 ULONG i;
639 PCACHE_SEGMENT* CacheSegList;
640 PCACHE_SEGMENT Previous = NULL;
641
642 assert(Bcb);
643
644 DPRINT("CcRosGetCacheSegmentChain()\n");
645
646 Length = ROUND_UP(Length, Bcb->CacheSegmentSize);
647
648 #if defined(__GNUC__)
649 CacheSegList = alloca(sizeof(PCACHE_SEGMENT) *
650 (Length / Bcb->CacheSegmentSize));
651 #elif defined(_MSC_VER)
652 CacheSegList = _alloca(sizeof(PCACHE_SEGMENT) *
653 (Length / Bcb->CacheSegmentSize));
654 #else
655 #error Unknown compiler for alloca intrinsic stack allocation "function"
656 #endif
657
658 /*
659 * Look for a cache segment already mapping the same data.
660 */
661 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
662 {
663 ULONG CurrentOffset = FileOffset + (i * Bcb->CacheSegmentSize);
664 current = CcRosLookupCacheSegment(Bcb, CurrentOffset);
665 if (current != NULL)
666 {
667 CacheSegList[i] = current;
668 }
669 else
670 {
671 CcRosCreateCacheSegment(Bcb, CurrentOffset, &current);
672 CacheSegList[i] = current;
673 }
674 }
675
676 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
677 {
678 if (i == 0)
679 {
680 *CacheSeg = CacheSegList[i];
681 Previous = CacheSegList[i];
682 }
683 else
684 {
685 Previous->NextInChain = CacheSegList[i];
686 Previous = CacheSegList[i];
687 }
688 }
689 Previous->NextInChain = NULL;
690
691 return(STATUS_SUCCESS);
692 }
693
694 NTSTATUS
695 CcRosGetCacheSegment(PBCB Bcb,
696 ULONG FileOffset,
697 PULONG BaseOffset,
698 PVOID* BaseAddress,
699 PBOOLEAN UptoDate,
700 PCACHE_SEGMENT* CacheSeg)
701 {
702 PCACHE_SEGMENT current;
703 NTSTATUS Status;
704
705 assert(Bcb);
706
707 DPRINT("CcRosGetCacheSegment()\n");
708
709 /*
710 * Look for a cache segment already mapping the same data.
711 */
712 current = CcRosLookupCacheSegment(Bcb, FileOffset);
713 if (current == NULL)
714 {
715 /*
716 * Otherwise create a new segment.
717 */
718 Status = CcRosCreateCacheSegment(Bcb, FileOffset, &current);
719 if (!NT_SUCCESS(Status))
720 {
721 return Status;
722 }
723 }
724 /*
725 * Return information about the segment to the caller.
726 */
727 *UptoDate = current->Valid;
728 *BaseAddress = current->BaseAddress;
729 DPRINT("*BaseAddress 0x%.8X\n", *BaseAddress);
730 *CacheSeg = current;
731 *BaseOffset = current->FileOffset;
732 return(STATUS_SUCCESS);
733 }
734
735 NTSTATUS STDCALL
736 CcRosRequestCacheSegment(PBCB Bcb,
737 ULONG FileOffset,
738 PVOID* BaseAddress,
739 PBOOLEAN UptoDate,
740 PCACHE_SEGMENT* CacheSeg)
741 /*
742 * FUNCTION: Request a page mapping for a BCB
743 */
744 {
745 ULONG BaseOffset;
746
747 assert(Bcb);
748
749 if ((FileOffset % Bcb->CacheSegmentSize) != 0)
750 {
751 CPRINT("Bad fileoffset %x should be multiple of %x",
752 FileOffset, Bcb->CacheSegmentSize);
753 KEBUGCHECK(0);
754 }
755
756 return(CcRosGetCacheSegment(Bcb,
757 FileOffset,
758 &BaseOffset,
759 BaseAddress,
760 UptoDate,
761 CacheSeg));
762 }
763 #ifdef CACHE_BITMAP
764 #else
765 STATIC VOID
766 CcFreeCachePage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address,
767 PHYSICAL_ADDRESS PhysAddr, SWAPENTRY SwapEntry, BOOLEAN Dirty)
768 {
769 assert(SwapEntry == 0);
770 if (PhysAddr.QuadPart != 0)
771 {
772 MmReleasePageMemoryConsumer(MC_CACHE, PhysAddr);
773 }
774 }
775 #endif
776 NTSTATUS
777 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg)
778 /*
779 * FUNCTION: Releases a cache segment associated with a BCB
780 */
781 {
782 #ifdef CACHE_BITMAP
783 ULONG i;
784 ULONG RegionSize;
785 ULONG Base;
786 PHYSICAL_ADDRESS PhysicalAddr;
787 KIRQL oldIrql;
788 #endif
789 DPRINT("Freeing cache segment %x\n", CacheSeg);
790 #ifdef CACHE_BITMAP
791 RegionSize = CacheSeg->Bcb->CacheSegmentSize / PAGE_SIZE;
792
793 /* Unmap all the pages. */
794 for (i = 0; i < RegionSize; i++)
795 {
796 MmDeleteVirtualMapping(NULL,
797 CacheSeg->BaseAddress + (i * PAGE_SIZE),
798 FALSE,
799 NULL,
800 &PhysicalAddr);
801 MmReleasePageMemoryConsumer(MC_CACHE, PhysicalAddr);
802 }
803
804 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
805 /* Deallocate all the pages used. */
806 Base = (ULONG)(CacheSeg->BaseAddress - CiCacheSegMappingRegionBase) / PAGE_SIZE;
807
808 RtlClearBits(&CiCacheSegMappingRegionAllocMap, Base, RegionSize);
809
810 CiCacheSegMappingRegionHint = min (CiCacheSegMappingRegionHint, Base);
811
812 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
813 #else
814 MmLockAddressSpace(MmGetKernelAddressSpace());
815 MmFreeMemoryArea(MmGetKernelAddressSpace(),
816 CacheSeg->BaseAddress,
817 CacheSeg->Bcb->CacheSegmentSize,
818 CcFreeCachePage,
819 NULL);
820 MmUnlockAddressSpace(MmGetKernelAddressSpace());
821 #endif
822 ExFreeToNPagedLookasideList(&CacheSegLookasideList, CacheSeg);
823 return(STATUS_SUCCESS);
824 }
825
826 NTSTATUS
827 CcRosFreeCacheSegment(PBCB Bcb, PCACHE_SEGMENT CacheSeg)
828 {
829 NTSTATUS Status;
830 KIRQL oldIrql;
831
832 assert(Bcb);
833
834 DPRINT("CcRosFreeCacheSegment(Bcb %x, CacheSeg %x)\n",
835 Bcb, CacheSeg);
836
837 ExAcquireFastMutex(&ViewLock);
838 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
839 RemoveEntryList(&CacheSeg->BcbSegmentListEntry);
840 RemoveEntryList(&CacheSeg->CacheSegmentListEntry);
841 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
842 if (CacheSeg->Dirty)
843 {
844 RemoveEntryList(&CacheSeg->DirtySegmentListEntry);
845 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
846
847 }
848 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
849 ExReleaseFastMutex(&ViewLock);
850
851 Status = CcRosInternalFreeCacheSegment(CacheSeg);
852 return(Status);
853 }
854
855 /*
856 * @implemented
857 */
858 VOID STDCALL
859 CcFlushCache(IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
860 IN PLARGE_INTEGER FileOffset OPTIONAL,
861 IN ULONG Length,
862 OUT PIO_STATUS_BLOCK IoStatus)
863 {
864 PBCB Bcb;
865 LARGE_INTEGER Offset;
866 PCACHE_SEGMENT current;
867 NTSTATUS Status;
868 KIRQL oldIrql;
869
870 DPRINT("CcFlushCache(SectionObjectPointers %x, FileOffset %x, Length %d, IoStatus %x)\n",
871 SectionObjectPointers, FileOffset, Length, IoStatus);
872
873 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
874 {
875 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
876 assert(Bcb);
877 if (FileOffset)
878 {
879 Offset = *FileOffset;
880 }
881 else
882 {
883 #if defined(__GNUC__)
884 Offset.QuadPart = 0LL;
885 #else
886 Offset.QuadPart = 0;
887 #endif
888 Length = Bcb->FileSize.u.LowPart;
889 }
890
891 if (IoStatus)
892 {
893 IoStatus->Status = STATUS_SUCCESS;
894 IoStatus->Information = 0;
895 }
896
897 while (Length > 0)
898 {
899 current = CcRosLookupCacheSegment (Bcb, Offset.u.LowPart);
900 if (current != NULL)
901 {
902 if (current->Dirty)
903 {
904 Status = CcRosFlushCacheSegment(current);
905 if (!NT_SUCCESS(Status) && IoStatus != NULL)
906 {
907 IoStatus->Status = Status;
908 }
909 }
910 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
911 ExReleaseFastMutex(&current->Lock);
912 current->ReferenceCount--;
913 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
914 }
915
916 Offset.QuadPart += Bcb->CacheSegmentSize;
917 if (Length > Bcb->CacheSegmentSize)
918 {
919 Length -= Bcb->CacheSegmentSize;
920 }
921 else
922 {
923 Length = 0;
924 }
925 }
926 }
927 else
928 {
929 if (IoStatus)
930 {
931 IoStatus->Status = STATUS_INVALID_PARAMETER;
932 }
933 }
934 }
935
936 NTSTATUS
937 CcRosDeleteFileCache(PFILE_OBJECT FileObject, PBCB Bcb)
938 /*
939 * FUNCTION: Releases the BCB associated with a file object
940 */
941 {
942 PLIST_ENTRY current_entry;
943 PCACHE_SEGMENT current;
944 NTSTATUS Status;
945 LIST_ENTRY FreeList;
946 KIRQL oldIrql;
947
948 assert(Bcb);
949
950 Bcb->RefCount++;
951 ExReleaseFastMutex(&ViewLock);
952
953 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
954
955 ExAcquireFastMutex(&ViewLock);
956 Bcb->RefCount--;
957 if (Bcb->RefCount == 0)
958 {
959 if (Bcb->BcbRemoveListEntry.Flink != NULL)
960 {
961 RemoveEntryList(&Bcb->BcbRemoveListEntry);
962 Bcb->BcbRemoveListEntry.Flink = NULL;
963 }
964
965 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
966
967 /*
968 * Release all cache segments.
969 */
970 InitializeListHead(&FreeList);
971 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
972 current_entry = Bcb->BcbSegmentListHead.Flink;
973 while (!IsListEmpty(&Bcb->BcbSegmentListHead))
974 {
975 current_entry = RemoveTailList(&Bcb->BcbSegmentListHead);
976 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
977 RemoveEntryList(&current->CacheSegmentListEntry);
978 RemoveEntryList(&current->CacheSegmentLRUListEntry);
979 if (current->Dirty)
980 {
981 RemoveEntryList(&current->DirtySegmentListEntry);
982 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
983 DPRINT1("Freeing dirty segment\n");
984 }
985 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
986 }
987 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
988
989 ExReleaseFastMutex(&ViewLock);
990 ObDereferenceObject (Bcb->FileObject);
991
992 while (!IsListEmpty(&FreeList))
993 {
994 current_entry = RemoveTailList(&FreeList);
995 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
996 Status = CcRosInternalFreeCacheSegment(current);
997 }
998 ExFreeToNPagedLookasideList(&BcbLookasideList, Bcb);
999 ExAcquireFastMutex(&ViewLock);
1000 }
1001 return(STATUS_SUCCESS);
1002 }
1003
1004 VOID CcRosReferenceCache(PFILE_OBJECT FileObject)
1005 {
1006 PBCB Bcb;
1007 ExAcquireFastMutex(&ViewLock);
1008 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1009 assert(Bcb);
1010 if (Bcb->RefCount == 0)
1011 {
1012 assert(Bcb->BcbRemoveListEntry.Flink != NULL);
1013 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1014 Bcb->BcbRemoveListEntry.Flink = NULL;
1015
1016 }
1017 else
1018 {
1019 assert(Bcb->BcbRemoveListEntry.Flink == NULL);
1020 }
1021 Bcb->RefCount++;
1022 ExReleaseFastMutex(&ViewLock);
1023 }
1024
1025 VOID CcRosSetRemoveOnClose(PSECTION_OBJECT_POINTERS SectionObjectPointer)
1026 {
1027 PBCB Bcb;
1028 DPRINT("CcRosSetRemoveOnClose()\n");
1029 ExAcquireFastMutex(&ViewLock);
1030 Bcb = (PBCB)SectionObjectPointer->SharedCacheMap;
1031 if (Bcb)
1032 {
1033 Bcb->RemoveOnClose = TRUE;
1034 if (Bcb->RefCount == 0)
1035 {
1036 CcRosDeleteFileCache(Bcb->FileObject, Bcb);
1037 }
1038 }
1039 ExReleaseFastMutex(&ViewLock);
1040 }
1041
1042
1043 VOID CcRosDereferenceCache(PFILE_OBJECT FileObject)
1044 {
1045 PBCB Bcb;
1046 ExAcquireFastMutex(&ViewLock);
1047 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1048 assert(Bcb);
1049 if (Bcb->RefCount > 0)
1050 {
1051 Bcb->RefCount--;
1052 if (Bcb->RefCount == 0)
1053 {
1054 MmFreeSectionSegments(Bcb->FileObject);
1055 if (Bcb->RemoveOnClose)
1056 {
1057 CcRosDeleteFileCache(FileObject, Bcb);
1058 }
1059 else
1060 {
1061 Bcb->TimeStamp = CcTimeStamp;
1062 InsertHeadList(&ClosedListHead, &Bcb->BcbRemoveListEntry);
1063 }
1064 }
1065 }
1066 ExReleaseFastMutex(&ViewLock);
1067 }
1068
1069 NTSTATUS STDCALL
1070 CcRosReleaseFileCache(PFILE_OBJECT FileObject)
1071 /*
1072 * FUNCTION: Called by the file system when a handle to a file object
1073 * has been closed.
1074 */
1075 {
1076 PBCB Bcb;
1077
1078 ExAcquireFastMutex(&ViewLock);
1079
1080 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1081 {
1082 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1083 if (FileObject->PrivateCacheMap != NULL)
1084 {
1085 FileObject->PrivateCacheMap = NULL;
1086 if (Bcb->RefCount > 0)
1087 {
1088 Bcb->RefCount--;
1089 if (Bcb->RefCount == 0)
1090 {
1091 if (Bcb->RemoveOnClose)
1092 {
1093 CcRosDeleteFileCache(FileObject, Bcb);
1094 }
1095 else
1096 {
1097 Bcb->TimeStamp = CcTimeStamp;
1098 InsertHeadList(&ClosedListHead, &Bcb->BcbRemoveListEntry);
1099 }
1100 }
1101 }
1102 }
1103 }
1104 ExReleaseFastMutex(&ViewLock);
1105 return(STATUS_SUCCESS);
1106 }
1107
1108 NTSTATUS
1109 CcTryToInitializeFileCache(PFILE_OBJECT FileObject)
1110 {
1111 PBCB Bcb;
1112 NTSTATUS Status;
1113
1114 ExAcquireFastMutex(&ViewLock);
1115
1116 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1117 if (Bcb == NULL)
1118 {
1119 Status = STATUS_UNSUCCESSFUL;
1120 }
1121 else
1122 {
1123 if (FileObject->PrivateCacheMap == NULL)
1124 {
1125 FileObject->PrivateCacheMap = Bcb;
1126 Bcb->RefCount++;
1127 }
1128 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1129 {
1130 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1131 Bcb->BcbRemoveListEntry.Flink = NULL;
1132 }
1133 Status = STATUS_SUCCESS;
1134 }
1135 ExReleaseFastMutex(&ViewLock);
1136
1137 return Status;
1138 }
1139
1140
1141 NTSTATUS STDCALL
1142 CcRosInitializeFileCache(PFILE_OBJECT FileObject,
1143 ULONG CacheSegmentSize)
1144 /*
1145 * FUNCTION: Initializes a BCB for a file object
1146 */
1147 {
1148 PBCB Bcb;
1149 DPRINT("CcRosInitializeFileCache(FileObject %x, *Bcb %x, CacheSegmentSize %d)\n",
1150 FileObject, Bcb, CacheSegmentSize);
1151
1152 ExAcquireFastMutex(&ViewLock);
1153
1154 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1155 if (Bcb == NULL)
1156 {
1157 Bcb = ExAllocateFromNPagedLookasideList(&BcbLookasideList);
1158 if (Bcb == NULL)
1159 {
1160 ExReleaseFastMutex(&ViewLock);
1161 return(STATUS_UNSUCCESSFUL);
1162 }
1163 memset(Bcb, 0, sizeof(BCB));
1164 ObReferenceObjectByPointer(FileObject,
1165 FILE_ALL_ACCESS,
1166 NULL,
1167 KernelMode);
1168 Bcb->FileObject = FileObject;
1169 Bcb->CacheSegmentSize = CacheSegmentSize;
1170 if (FileObject->FsContext)
1171 {
1172 Bcb->AllocationSize =
1173 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->AllocationSize;
1174 Bcb->FileSize =
1175 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->FileSize;
1176 }
1177 KeInitializeSpinLock(&Bcb->BcbLock);
1178 InitializeListHead(&Bcb->BcbSegmentListHead);
1179 FileObject->SectionObjectPointer->SharedCacheMap = Bcb;
1180 }
1181 if (FileObject->PrivateCacheMap == NULL)
1182 {
1183 FileObject->PrivateCacheMap = Bcb;
1184 Bcb->RefCount++;
1185 }
1186 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1187 {
1188 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1189 Bcb->BcbRemoveListEntry.Flink = NULL;
1190 }
1191 ExReleaseFastMutex(&ViewLock);
1192
1193 return(STATUS_SUCCESS);
1194 }
1195
1196 /*
1197 * @implemented
1198 */
1199 PFILE_OBJECT STDCALL
1200 CcGetFileObjectFromSectionPtrs(IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1201 {
1202 PBCB Bcb;
1203 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1204 {
1205 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
1206 assert(Bcb);
1207 return Bcb->FileObject;
1208 }
1209 return NULL;
1210 }
1211
1212 VOID STDCALL
1213 CmLazyCloseThreadMain(PVOID Ignored)
1214 {
1215 LARGE_INTEGER Timeout;
1216 PLIST_ENTRY current_entry;
1217 PBCB current;
1218 ULONG RemoveTimeStamp;
1219 NTSTATUS Status;
1220
1221 KeQuerySystemTime (&Timeout);
1222
1223 while (1)
1224 {
1225 #if defined(__GNUC__)
1226 Timeout.QuadPart += 100000000LL; // 10sec
1227 #else
1228 Timeout.QuadPart += 100000000; // 10sec
1229 #endif
1230 Status = KeWaitForSingleObject(&LazyCloseThreadEvent,
1231 0,
1232 KernelMode,
1233 FALSE,
1234 &Timeout);
1235
1236 DPRINT("LazyCloseThreadMain %d\n", CcTimeStamp);
1237
1238 if (!NT_SUCCESS(Status))
1239 {
1240 DbgPrint("LazyCloseThread: Wait failed\n");
1241 KEBUGCHECK(0);
1242 break;
1243 }
1244 if (LazyCloseThreadShouldTerminate)
1245 {
1246 DbgPrint("LazyCloseThread: Terminating\n");
1247 break;
1248 }
1249
1250 ExAcquireFastMutex(&ViewLock);
1251 CcTimeStamp++;
1252 if (CcTimeStamp >= 30)
1253 {
1254 RemoveTimeStamp = CcTimeStamp - 30; /* 5min = 10sec * 30 */
1255 while (!IsListEmpty(&ClosedListHead))
1256 {
1257 current_entry = ClosedListHead.Blink;
1258 current = CONTAINING_RECORD(current_entry, BCB, BcbRemoveListEntry);
1259 if (current->TimeStamp >= RemoveTimeStamp)
1260 {
1261 break;
1262 }
1263 CcRosDeleteFileCache(current->FileObject, current);
1264 }
1265 }
1266 ExReleaseFastMutex(&ViewLock);
1267 }
1268 }
1269
1270 VOID INIT_FUNCTION
1271 CcInitView(VOID)
1272 {
1273 #ifdef CACHE_BITMAP
1274 PMEMORY_AREA marea;
1275 PVOID Buffer;
1276 #endif
1277 NTSTATUS Status;
1278 KPRIORITY Priority;
1279
1280 DPRINT("CcInitView()\n");
1281 #ifdef CACHE_BITMAP
1282 CiCacheSegMappingRegionHint = 0;
1283 CiCacheSegMappingRegionBase = NULL;
1284
1285 MmLockAddressSpace(MmGetKernelAddressSpace());
1286
1287 Status = MmCreateMemoryArea(NULL,
1288 MmGetKernelAddressSpace(),
1289 MEMORY_AREA_CACHE_SEGMENT,
1290 &CiCacheSegMappingRegionBase,
1291 CI_CACHESEG_MAPPING_REGION_SIZE,
1292 0,
1293 &marea,
1294 FALSE,
1295 FALSE);
1296 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1297 if (!NT_SUCCESS(Status))
1298 {
1299 KEBUGCHECK(0);
1300 }
1301
1302 Buffer = ExAllocatePool(NonPagedPool, CI_CACHESEG_MAPPING_REGION_SIZE / (PAGE_SIZE * 8));
1303
1304 RtlInitializeBitMap(&CiCacheSegMappingRegionAllocMap, Buffer, CI_CACHESEG_MAPPING_REGION_SIZE / PAGE_SIZE);
1305 RtlClearAllBits(&CiCacheSegMappingRegionAllocMap);
1306
1307 KeInitializeSpinLock(&CiCacheSegMappingRegionLock);
1308 #endif
1309 InitializeListHead(&CacheSegmentListHead);
1310 InitializeListHead(&DirtySegmentListHead);
1311 InitializeListHead(&CacheSegmentLRUListHead);
1312 InitializeListHead(&ClosedListHead);
1313 ExInitializeFastMutex(&ViewLock);
1314 ExInitializeNPagedLookasideList (&iBcbLookasideList,
1315 NULL,
1316 NULL,
1317 0,
1318 sizeof(INTERNAL_BCB),
1319 TAG_IBCB,
1320 20);
1321 ExInitializeNPagedLookasideList (&BcbLookasideList,
1322 NULL,
1323 NULL,
1324 0,
1325 sizeof(BCB),
1326 TAG_BCB,
1327 20);
1328 ExInitializeNPagedLookasideList (&CacheSegLookasideList,
1329 NULL,
1330 NULL,
1331 0,
1332 sizeof(CACHE_SEGMENT),
1333 TAG_CSEG,
1334 20);
1335
1336 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1337
1338 CcInitCacheZeroPage();
1339
1340 CcTimeStamp = 0;
1341 LazyCloseThreadShouldTerminate = FALSE;
1342 KeInitializeEvent (&LazyCloseThreadEvent, SynchronizationEvent, FALSE);
1343 Status = PsCreateSystemThread(&LazyCloseThreadHandle,
1344 THREAD_ALL_ACCESS,
1345 NULL,
1346 NULL,
1347 &LazyCloseThreadId,
1348 (PKSTART_ROUTINE)CmLazyCloseThreadMain,
1349 NULL);
1350 if (NT_SUCCESS(Status))
1351 {
1352 Priority = LOW_REALTIME_PRIORITY;
1353 NtSetInformationThread(LazyCloseThreadHandle,
1354 ThreadPriority,
1355 &Priority,
1356 sizeof(Priority));
1357 }
1358
1359 }
1360
1361 /* EOF */
1362
1363
1364
1365
1366
1367
1368