[NTOSKRNL] Fix MSVC build?
[reactos.git] / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* NOTES **********************************************************************
12 *
13 * This is not the NT implementation of a file cache nor anything much like
14 * it.
15 *
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
18 *
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
21 *
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
24 *
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 *
29 * (4) Copy the data into or out of the page as necessary.
30 *
31 * (5) Release the cache page
32 */
33 /* INCLUDES ******************************************************************/
34
35 #include <ntoskrnl.h>
36 #define NDEBUG
37 #include <debug.h>
38
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
41 #endif
42
43 /* GLOBALS *******************************************************************/
44
45 LIST_ENTRY DirtyVacbListHead;
46 static LIST_ENTRY VacbLruListHead;
47
48 KGUARDED_MUTEX ViewLock;
49
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
53
54 /* Counters:
55 * - Amount of pages flushed by lazy writer
56 * - Number of times lazy writer ran
57 */
58 ULONG CcLazyWritePages = 0;
59 ULONG CcLazyWriteIos = 0;
60
61 /* Internal vars (MS):
62 * - Threshold above which lazy writer will start action
63 * - Amount of dirty pages
64 * - List for deferred writes
65 * - Spinlock when dealing with the deferred list
66 * - List for "clean" shared cache maps
67 * - One second delay for lazy writer
68 */
69 ULONG CcDirtyPageThreshold = 0;
70 ULONG CcTotalDirtyPages = 0;
71 LIST_ENTRY CcDeferredWrites;
72 KSPIN_LOCK CcDeferredWriteSpinLock;
73 LIST_ENTRY CcCleanSharedCacheMapList;
74 #ifndef _MSC_VER
75 LARGE_INTEGER CcIdleDelay = {.QuadPart = (LONGLONG)-1*1000*1000*10};
76 #else
77 LARGE_INTEGER CcIdleDelay = {(LONGLONG)-1*1000*1000*10};
78 #endif
79
80 /* Internal vars (ROS):
81 * - Event to notify lazy writer to shutdown
82 * - Event to inform watchers lazy writer is done for this loop
83 * - Lock for the CcCleanSharedCacheMapList list
84 */
85 KEVENT iLazyWriterShutdown;
86 KEVENT iLazyWriterNotify;
87 KSPIN_LOCK iSharedCacheMapLock;
88
89 #if DBG
90 static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
91 {
92 ++vacb->ReferenceCount;
93 if (vacb->SharedCacheMap->Trace)
94 {
95 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
96 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
97 }
98 }
99 static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
100 {
101 --vacb->ReferenceCount;
102 if (vacb->SharedCacheMap->Trace)
103 {
104 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
105 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
106 }
107 }
108 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
109 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
110 #else
111 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
112 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
113 #endif
114
115 NTSTATUS
116 CcRosInternalFreeVacb(PROS_VACB Vacb);
117
118
119 /* FUNCTIONS *****************************************************************/
120
121 VOID
122 NTAPI
123 CcRosTraceCacheMap (
124 PROS_SHARED_CACHE_MAP SharedCacheMap,
125 BOOLEAN Trace )
126 {
127 #if DBG
128 KIRQL oldirql;
129 PLIST_ENTRY current_entry;
130 PROS_VACB current;
131
132 if (!SharedCacheMap)
133 return;
134
135 SharedCacheMap->Trace = Trace;
136
137 if (Trace)
138 {
139 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
140
141 KeAcquireGuardedMutex(&ViewLock);
142 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
143
144 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
145 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
146 {
147 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
148 current_entry = current_entry->Flink;
149
150 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
151 current, current->ReferenceCount, current->Dirty, current->PageOut );
152 }
153 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
154 KeReleaseGuardedMutex(&ViewLock);
155 }
156 else
157 {
158 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
159 }
160
161 #else
162 UNREFERENCED_PARAMETER(SharedCacheMap);
163 UNREFERENCED_PARAMETER(Trace);
164 #endif
165 }
166
167 NTSTATUS
168 NTAPI
169 CcRosFlushVacb (
170 PROS_VACB Vacb)
171 {
172 NTSTATUS Status;
173 KIRQL oldIrql;
174
175 Status = CcWriteVirtualAddress(Vacb);
176 if (NT_SUCCESS(Status))
177 {
178 KeAcquireGuardedMutex(&ViewLock);
179 KeAcquireSpinLock(&Vacb->SharedCacheMap->CacheMapLock, &oldIrql);
180
181 Vacb->Dirty = FALSE;
182 RemoveEntryList(&Vacb->DirtyVacbListEntry);
183 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
184 Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
185 CcRosVacbDecRefCount(Vacb);
186
187 KeReleaseSpinLock(&Vacb->SharedCacheMap->CacheMapLock, oldIrql);
188 KeReleaseGuardedMutex(&ViewLock);
189 }
190
191 return Status;
192 }
193
194 NTSTATUS
195 NTAPI
196 CcRosFlushDirtyPages (
197 ULONG Target,
198 PULONG Count,
199 BOOLEAN Wait,
200 BOOLEAN CalledFromLazy)
201 {
202 PLIST_ENTRY current_entry;
203 PROS_VACB current;
204 BOOLEAN Locked;
205 NTSTATUS Status;
206 LARGE_INTEGER ZeroTimeout;
207
208 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
209
210 (*Count) = 0;
211 ZeroTimeout.QuadPart = 0;
212
213 KeEnterCriticalRegion();
214 KeAcquireGuardedMutex(&ViewLock);
215
216 current_entry = DirtyVacbListHead.Flink;
217 if (current_entry == &DirtyVacbListHead)
218 {
219 DPRINT("No Dirty pages\n");
220 }
221
222 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
223 {
224 current = CONTAINING_RECORD(current_entry,
225 ROS_VACB,
226 DirtyVacbListEntry);
227 current_entry = current_entry->Flink;
228
229 CcRosVacbIncRefCount(current);
230
231 /* When performing lazy write, don't handle temporary files */
232 if (CalledFromLazy &&
233 BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
234 {
235 CcRosVacbDecRefCount(current);
236 continue;
237 }
238
239 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
240 current->SharedCacheMap->LazyWriteContext, Wait);
241 if (!Locked)
242 {
243 CcRosVacbDecRefCount(current);
244 continue;
245 }
246
247 Status = CcRosAcquireVacbLock(current,
248 Wait ? NULL : &ZeroTimeout);
249 if (Status != STATUS_SUCCESS)
250 {
251 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
252 current->SharedCacheMap->LazyWriteContext);
253 CcRosVacbDecRefCount(current);
254 continue;
255 }
256
257 ASSERT(current->Dirty);
258
259 /* One reference is added above */
260 if ((current->ReferenceCount > 2 && current->PinCount == 0) ||
261 (current->ReferenceCount > 3 && current->PinCount > 1))
262 {
263 CcRosReleaseVacbLock(current);
264 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
265 current->SharedCacheMap->LazyWriteContext);
266 CcRosVacbDecRefCount(current);
267 continue;
268 }
269
270 KeReleaseGuardedMutex(&ViewLock);
271
272 Status = CcRosFlushVacb(current);
273
274 CcRosReleaseVacbLock(current);
275 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
276 current->SharedCacheMap->LazyWriteContext);
277
278 KeAcquireGuardedMutex(&ViewLock);
279 CcRosVacbDecRefCount(current);
280
281 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
282 (Status != STATUS_MEDIA_WRITE_PROTECTED))
283 {
284 DPRINT1("CC: Failed to flush VACB.\n");
285 }
286 else
287 {
288 ULONG PagesFreed;
289
290 /* How many pages did we free? */
291 PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
292 (*Count) += PagesFreed;
293
294 /* Make sure we don't overflow target! */
295 if (Target < PagesFreed)
296 {
297 /* If we would have, jump to zero directly */
298 Target = 0;
299 }
300 else
301 {
302 Target -= PagesFreed;
303 }
304 }
305
306 current_entry = DirtyVacbListHead.Flink;
307 }
308
309 KeReleaseGuardedMutex(&ViewLock);
310 KeLeaveCriticalRegion();
311
312 DPRINT("CcRosFlushDirtyPages() finished\n");
313 return STATUS_SUCCESS;
314 }
315
316 /* FIXME: Someday this could somewhat implement write-behind/read-ahead */
317 VOID
318 NTAPI
319 CciLazyWriter(PVOID Unused)
320 {
321 while (TRUE)
322 {
323 NTSTATUS Status;
324 PLIST_ENTRY ListEntry;
325 ULONG Target, Count = 0;
326
327 /* One per second or until we have to stop */
328 Status = KeWaitForSingleObject(&iLazyWriterShutdown,
329 Executive,
330 KernelMode,
331 FALSE,
332 &CcIdleDelay);
333
334 /* If we succeeed, we've to stop running! */
335 if (Status == STATUS_SUCCESS)
336 {
337 break;
338 }
339
340 /* We're not sleeping anymore */
341 KeClearEvent(&iLazyWriterNotify);
342
343 /* Our target is one-eighth of the dirty pages */
344 Target = CcTotalDirtyPages / 8;
345 if (Target != 0)
346 {
347 /* Flush! */
348 DPRINT("Lazy writer starting (%d)\n", Target);
349 CcRosFlushDirtyPages(Target, &Count, FALSE, TRUE);
350
351 /* And update stats */
352 CcLazyWritePages += Count;
353 ++CcLazyWriteIos;
354 DPRINT("Lazy writer done (%d)\n", Count);
355 }
356
357 /* Inform people waiting on us that we're done */
358 KeSetEvent(&iLazyWriterNotify, IO_DISK_INCREMENT, FALSE);
359
360 /* Likely not optimal, but let's handle one deferred write now! */
361 ListEntry = ExInterlockedRemoveHeadList(&CcDeferredWrites, &CcDeferredWriteSpinLock);
362 if (ListEntry != NULL)
363 {
364 PROS_DEFERRED_WRITE_CONTEXT Context;
365
366 /* Extract the context */
367 Context = CONTAINING_RECORD(ListEntry, ROS_DEFERRED_WRITE_CONTEXT, CcDeferredWritesEntry);
368
369 /* Can we write now? */
370 if (CcCanIWrite(Context->FileObject, Context->BytesToWrite, FALSE, Context->Retrying))
371 {
372 /* Yes! Do it, and destroy the associated context */
373 Context->PostRoutine(Context->Context1, Context->Context2);
374 ExFreePoolWithTag(Context, 'CcDw');
375 }
376 else
377 {
378 /* Otherwise, requeue it, but in tail, so that it doesn't block others
379 * This is clearly to improve, but given the poor algorithm used now
380 * It's better than nothing!
381 */
382 ExInterlockedInsertTailList(&CcDeferredWrites,
383 &Context->CcDeferredWritesEntry,
384 &CcDeferredWriteSpinLock);
385 }
386 }
387 }
388 }
389
390 NTSTATUS
391 CcRosTrimCache (
392 ULONG Target,
393 ULONG Priority,
394 PULONG NrFreed)
395 /*
396 * FUNCTION: Try to free some memory from the file cache.
397 * ARGUMENTS:
398 * Target - The number of pages to be freed.
399 * Priority - The priority of free (currently unused).
400 * NrFreed - Points to a variable where the number of pages
401 * actually freed is returned.
402 */
403 {
404 PLIST_ENTRY current_entry;
405 PROS_VACB current;
406 ULONG PagesFreed;
407 KIRQL oldIrql;
408 LIST_ENTRY FreeList;
409 PFN_NUMBER Page;
410 ULONG i;
411 BOOLEAN FlushedPages = FALSE;
412
413 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
414
415 InitializeListHead(&FreeList);
416
417 *NrFreed = 0;
418
419 retry:
420 KeAcquireGuardedMutex(&ViewLock);
421
422 current_entry = VacbLruListHead.Flink;
423 while (current_entry != &VacbLruListHead)
424 {
425 current = CONTAINING_RECORD(current_entry,
426 ROS_VACB,
427 VacbLruListEntry);
428 current_entry = current_entry->Flink;
429
430 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
431
432 /* Reference the VACB */
433 CcRosVacbIncRefCount(current);
434
435 /* Check if it's mapped and not dirty */
436 if (current->MappedCount > 0 && !current->Dirty)
437 {
438 /* We have to break these locks because Cc sucks */
439 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
440 KeReleaseGuardedMutex(&ViewLock);
441
442 /* Page out the VACB */
443 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
444 {
445 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
446
447 MmPageOutPhysicalAddress(Page);
448 }
449
450 /* Reacquire the locks */
451 KeAcquireGuardedMutex(&ViewLock);
452 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
453 }
454
455 /* Dereference the VACB */
456 CcRosVacbDecRefCount(current);
457
458 /* Check if we can free this entry now */
459 if (current->ReferenceCount == 0)
460 {
461 ASSERT(!current->Dirty);
462 ASSERT(!current->MappedCount);
463
464 RemoveEntryList(&current->CacheMapVacbListEntry);
465 RemoveEntryList(&current->VacbLruListEntry);
466 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
467
468 /* Calculate how many pages we freed for Mm */
469 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
470 Target -= PagesFreed;
471 (*NrFreed) += PagesFreed;
472 }
473
474 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
475 }
476
477 KeReleaseGuardedMutex(&ViewLock);
478
479 /* Try flushing pages if we haven't met our target */
480 if ((Target > 0) && !FlushedPages)
481 {
482 /* Flush dirty pages to disk */
483 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
484 FlushedPages = TRUE;
485
486 /* We can only swap as many pages as we flushed */
487 if (PagesFreed < Target) Target = PagesFreed;
488
489 /* Check if we flushed anything */
490 if (PagesFreed != 0)
491 {
492 /* Try again after flushing dirty pages */
493 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
494 goto retry;
495 }
496 }
497
498 while (!IsListEmpty(&FreeList))
499 {
500 current_entry = RemoveHeadList(&FreeList);
501 current = CONTAINING_RECORD(current_entry,
502 ROS_VACB,
503 CacheMapVacbListEntry);
504 CcRosInternalFreeVacb(current);
505 }
506
507 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
508
509 return STATUS_SUCCESS;
510 }
511
512 NTSTATUS
513 NTAPI
514 CcRosReleaseVacb (
515 PROS_SHARED_CACHE_MAP SharedCacheMap,
516 PROS_VACB Vacb,
517 BOOLEAN Valid,
518 BOOLEAN Dirty,
519 BOOLEAN Mapped)
520 {
521 BOOLEAN WasDirty;
522
523 ASSERT(SharedCacheMap);
524
525 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
526 SharedCacheMap, Vacb, Valid);
527
528 Vacb->Valid = Valid;
529
530 WasDirty = FALSE;
531 if (Dirty)
532 {
533 if (!Vacb->Dirty)
534 {
535 CcRosMarkDirtyVacb(Vacb);
536 }
537 else
538 {
539 WasDirty = TRUE;
540 }
541 }
542
543 if (Mapped)
544 {
545 Vacb->MappedCount++;
546 }
547 CcRosVacbDecRefCount(Vacb);
548 if (Mapped && (Vacb->MappedCount == 1))
549 {
550 CcRosVacbIncRefCount(Vacb);
551 }
552 if (!WasDirty && Vacb->Dirty)
553 {
554 CcRosVacbIncRefCount(Vacb);
555 }
556
557 CcRosReleaseVacbLock(Vacb);
558
559 return STATUS_SUCCESS;
560 }
561
562 /* Returns with VACB Lock Held! */
563 PROS_VACB
564 NTAPI
565 CcRosLookupVacb (
566 PROS_SHARED_CACHE_MAP SharedCacheMap,
567 LONGLONG FileOffset)
568 {
569 PLIST_ENTRY current_entry;
570 PROS_VACB current;
571 KIRQL oldIrql;
572
573 ASSERT(SharedCacheMap);
574
575 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
576 SharedCacheMap, FileOffset);
577
578 KeAcquireGuardedMutex(&ViewLock);
579 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
580
581 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
582 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
583 {
584 current = CONTAINING_RECORD(current_entry,
585 ROS_VACB,
586 CacheMapVacbListEntry);
587 if (IsPointInRange(current->FileOffset.QuadPart,
588 VACB_MAPPING_GRANULARITY,
589 FileOffset))
590 {
591 CcRosVacbIncRefCount(current);
592 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
593 KeReleaseGuardedMutex(&ViewLock);
594 CcRosAcquireVacbLock(current, NULL);
595 return current;
596 }
597 if (current->FileOffset.QuadPart > FileOffset)
598 break;
599 current_entry = current_entry->Flink;
600 }
601
602 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
603 KeReleaseGuardedMutex(&ViewLock);
604
605 return NULL;
606 }
607
608 VOID
609 NTAPI
610 CcRosMarkDirtyVacb (
611 PROS_VACB Vacb)
612 {
613 KIRQL oldIrql;
614 PROS_SHARED_CACHE_MAP SharedCacheMap;
615
616 SharedCacheMap = Vacb->SharedCacheMap;
617
618 KeAcquireGuardedMutex(&ViewLock);
619 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
620
621 if (!Vacb->Dirty)
622 {
623 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
624 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
625 Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
626 }
627 else
628 {
629 CcRosVacbDecRefCount(Vacb);
630 }
631
632 /* Move to the tail of the LRU list */
633 RemoveEntryList(&Vacb->VacbLruListEntry);
634 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
635
636 Vacb->Dirty = TRUE;
637
638 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
639 KeReleaseGuardedMutex(&ViewLock);
640 }
641
642 NTSTATUS
643 NTAPI
644 CcRosMarkDirtyFile (
645 PROS_SHARED_CACHE_MAP SharedCacheMap,
646 LONGLONG FileOffset)
647 {
648 PROS_VACB Vacb;
649
650 ASSERT(SharedCacheMap);
651
652 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
653 SharedCacheMap, FileOffset);
654
655 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
656 if (Vacb == NULL)
657 {
658 KeBugCheck(CACHE_MANAGER);
659 }
660
661 CcRosMarkDirtyVacb(Vacb);
662
663 CcRosReleaseVacbLock(Vacb);
664
665 return STATUS_SUCCESS;
666 }
667
668 NTSTATUS
669 NTAPI
670 CcRosUnmapVacb (
671 PROS_SHARED_CACHE_MAP SharedCacheMap,
672 LONGLONG FileOffset,
673 BOOLEAN NowDirty)
674 {
675 PROS_VACB Vacb;
676 BOOLEAN WasDirty;
677
678 ASSERT(SharedCacheMap);
679
680 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
681 SharedCacheMap, FileOffset, NowDirty);
682
683 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
684 if (Vacb == NULL)
685 {
686 return STATUS_UNSUCCESSFUL;
687 }
688
689 WasDirty = FALSE;
690 if (NowDirty)
691 {
692 if (!Vacb->Dirty)
693 {
694 CcRosMarkDirtyVacb(Vacb);
695 }
696 else
697 {
698 WasDirty = TRUE;
699 }
700 }
701
702 Vacb->MappedCount--;
703
704 CcRosVacbDecRefCount(Vacb);
705 if (!WasDirty && NowDirty)
706 {
707 CcRosVacbIncRefCount(Vacb);
708 }
709 if (Vacb->MappedCount == 0)
710 {
711 CcRosVacbDecRefCount(Vacb);
712 }
713
714 CcRosReleaseVacbLock(Vacb);
715
716 return STATUS_SUCCESS;
717 }
718
719 static
720 NTSTATUS
721 CcRosMapVacb(
722 PROS_VACB Vacb)
723 {
724 ULONG i;
725 NTSTATUS Status;
726 ULONG_PTR NumberOfPages;
727
728 /* Create a memory area. */
729 MmLockAddressSpace(MmGetKernelAddressSpace());
730 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
731 0, // nothing checks for VACB mareas, so set to 0
732 &Vacb->BaseAddress,
733 VACB_MAPPING_GRANULARITY,
734 PAGE_READWRITE,
735 (PMEMORY_AREA*)&Vacb->MemoryArea,
736 0,
737 PAGE_SIZE);
738 MmUnlockAddressSpace(MmGetKernelAddressSpace());
739 if (!NT_SUCCESS(Status))
740 {
741 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
742 return Status;
743 }
744
745 ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
746 ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
747
748 /* Create a virtual mapping for this memory area */
749 NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
750 for (i = 0; i < NumberOfPages; i++)
751 {
752 PFN_NUMBER PageFrameNumber;
753
754 MI_SET_USAGE(MI_USAGE_CACHE);
755 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
756 if (PageFrameNumber == 0)
757 {
758 DPRINT1("Unable to allocate page\n");
759 KeBugCheck(MEMORY_MANAGEMENT);
760 }
761
762 Status = MmCreateVirtualMapping(NULL,
763 (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
764 PAGE_READWRITE,
765 &PageFrameNumber,
766 1);
767 if (!NT_SUCCESS(Status))
768 {
769 DPRINT1("Unable to create virtual mapping\n");
770 KeBugCheck(MEMORY_MANAGEMENT);
771 }
772 }
773
774 return STATUS_SUCCESS;
775 }
776
777 static
778 NTSTATUS
779 CcRosCreateVacb (
780 PROS_SHARED_CACHE_MAP SharedCacheMap,
781 LONGLONG FileOffset,
782 PROS_VACB *Vacb)
783 {
784 PROS_VACB current;
785 PROS_VACB previous;
786 PLIST_ENTRY current_entry;
787 NTSTATUS Status;
788 KIRQL oldIrql;
789
790 ASSERT(SharedCacheMap);
791
792 DPRINT("CcRosCreateVacb()\n");
793
794 if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
795 {
796 *Vacb = NULL;
797 return STATUS_INVALID_PARAMETER;
798 }
799
800 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
801 current->BaseAddress = NULL;
802 current->Valid = FALSE;
803 current->Dirty = FALSE;
804 current->PageOut = FALSE;
805 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
806 current->SharedCacheMap = SharedCacheMap;
807 #if DBG
808 if (SharedCacheMap->Trace)
809 {
810 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
811 }
812 #endif
813 current->MappedCount = 0;
814 current->DirtyVacbListEntry.Flink = NULL;
815 current->DirtyVacbListEntry.Blink = NULL;
816 current->ReferenceCount = 1;
817 current->PinCount = 0;
818 KeInitializeMutex(&current->Mutex, 0);
819 CcRosAcquireVacbLock(current, NULL);
820 KeAcquireGuardedMutex(&ViewLock);
821
822 *Vacb = current;
823 /* There is window between the call to CcRosLookupVacb
824 * and CcRosCreateVacb. We must check if a VACB for the
825 * file offset exist. If there is a VACB, we release
826 * our newly created VACB and return the existing one.
827 */
828 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
829 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
830 previous = NULL;
831 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
832 {
833 current = CONTAINING_RECORD(current_entry,
834 ROS_VACB,
835 CacheMapVacbListEntry);
836 if (IsPointInRange(current->FileOffset.QuadPart,
837 VACB_MAPPING_GRANULARITY,
838 FileOffset))
839 {
840 CcRosVacbIncRefCount(current);
841 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
842 #if DBG
843 if (SharedCacheMap->Trace)
844 {
845 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
846 SharedCacheMap,
847 (*Vacb),
848 current);
849 }
850 #endif
851 CcRosReleaseVacbLock(*Vacb);
852 KeReleaseGuardedMutex(&ViewLock);
853 ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
854 *Vacb = current;
855 CcRosAcquireVacbLock(current, NULL);
856 return STATUS_SUCCESS;
857 }
858 if (current->FileOffset.QuadPart < FileOffset)
859 {
860 ASSERT(previous == NULL ||
861 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
862 previous = current;
863 }
864 if (current->FileOffset.QuadPart > FileOffset)
865 break;
866 current_entry = current_entry->Flink;
867 }
868 /* There was no existing VACB. */
869 current = *Vacb;
870 if (previous)
871 {
872 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
873 }
874 else
875 {
876 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
877 }
878 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
879 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
880 KeReleaseGuardedMutex(&ViewLock);
881
882 MI_SET_USAGE(MI_USAGE_CACHE);
883 #if MI_TRACE_PFNS
884 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
885 {
886 PWCHAR pos;
887 ULONG len = 0;
888 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
889 if (pos)
890 {
891 len = wcslen(pos) * sizeof(WCHAR);
892 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
893 }
894 else
895 {
896 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
897 }
898 }
899 #endif
900
901 Status = CcRosMapVacb(current);
902 if (!NT_SUCCESS(Status))
903 {
904 RemoveEntryList(&current->CacheMapVacbListEntry);
905 RemoveEntryList(&current->VacbLruListEntry);
906 CcRosReleaseVacbLock(current);
907 ExFreeToNPagedLookasideList(&VacbLookasideList, current);
908 }
909
910 return Status;
911 }
912
913 NTSTATUS
914 NTAPI
915 CcRosGetVacb (
916 PROS_SHARED_CACHE_MAP SharedCacheMap,
917 LONGLONG FileOffset,
918 PLONGLONG BaseOffset,
919 PVOID* BaseAddress,
920 PBOOLEAN UptoDate,
921 PROS_VACB *Vacb)
922 {
923 PROS_VACB current;
924 NTSTATUS Status;
925
926 ASSERT(SharedCacheMap);
927
928 DPRINT("CcRosGetVacb()\n");
929
930 /*
931 * Look for a VACB already mapping the same data.
932 */
933 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
934 if (current == NULL)
935 {
936 /*
937 * Otherwise create a new VACB.
938 */
939 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
940 if (!NT_SUCCESS(Status))
941 {
942 return Status;
943 }
944 }
945
946 KeAcquireGuardedMutex(&ViewLock);
947
948 /* Move to the tail of the LRU list */
949 RemoveEntryList(&current->VacbLruListEntry);
950 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
951
952 KeReleaseGuardedMutex(&ViewLock);
953
954 /*
955 * Return information about the VACB to the caller.
956 */
957 *UptoDate = current->Valid;
958 *BaseAddress = current->BaseAddress;
959 DPRINT("*BaseAddress %p\n", *BaseAddress);
960 *Vacb = current;
961 *BaseOffset = current->FileOffset.QuadPart;
962 return STATUS_SUCCESS;
963 }
964
965 NTSTATUS
966 NTAPI
967 CcRosRequestVacb (
968 PROS_SHARED_CACHE_MAP SharedCacheMap,
969 LONGLONG FileOffset,
970 PVOID* BaseAddress,
971 PBOOLEAN UptoDate,
972 PROS_VACB *Vacb)
973 /*
974 * FUNCTION: Request a page mapping for a shared cache map
975 */
976 {
977 LONGLONG BaseOffset;
978
979 ASSERT(SharedCacheMap);
980
981 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
982 {
983 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
984 FileOffset, VACB_MAPPING_GRANULARITY);
985 KeBugCheck(CACHE_MANAGER);
986 }
987
988 return CcRosGetVacb(SharedCacheMap,
989 FileOffset,
990 &BaseOffset,
991 BaseAddress,
992 UptoDate,
993 Vacb);
994 }
995
996 static
997 VOID
998 CcFreeCachePage (
999 PVOID Context,
1000 MEMORY_AREA* MemoryArea,
1001 PVOID Address,
1002 PFN_NUMBER Page,
1003 SWAPENTRY SwapEntry,
1004 BOOLEAN Dirty)
1005 {
1006 ASSERT(SwapEntry == 0);
1007 if (Page != 0)
1008 {
1009 ASSERT(MmGetReferenceCountPage(Page) == 1);
1010 MmReleasePageMemoryConsumer(MC_CACHE, Page);
1011 }
1012 }
1013
1014 NTSTATUS
1015 CcRosInternalFreeVacb (
1016 PROS_VACB Vacb)
1017 /*
1018 * FUNCTION: Releases a VACB associated with a shared cache map
1019 */
1020 {
1021 DPRINT("Freeing VACB 0x%p\n", Vacb);
1022 #if DBG
1023 if (Vacb->SharedCacheMap->Trace)
1024 {
1025 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
1026 }
1027 #endif
1028
1029 MmLockAddressSpace(MmGetKernelAddressSpace());
1030 MmFreeMemoryArea(MmGetKernelAddressSpace(),
1031 Vacb->MemoryArea,
1032 CcFreeCachePage,
1033 NULL);
1034 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1035
1036 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
1037 return STATUS_SUCCESS;
1038 }
1039
1040 /*
1041 * @implemented
1042 */
1043 VOID
1044 NTAPI
1045 CcFlushCache (
1046 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
1047 IN PLARGE_INTEGER FileOffset OPTIONAL,
1048 IN ULONG Length,
1049 OUT PIO_STATUS_BLOCK IoStatus)
1050 {
1051 PROS_SHARED_CACHE_MAP SharedCacheMap;
1052 LARGE_INTEGER Offset;
1053 LONGLONG RemainingLength;
1054 PROS_VACB current;
1055 NTSTATUS Status;
1056 KIRQL oldIrql;
1057
1058 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
1059 SectionObjectPointers, FileOffset, Length);
1060
1061 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1062 SectionObjectPointers, FileOffset, Length, IoStatus);
1063
1064 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1065 {
1066 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1067 ASSERT(SharedCacheMap);
1068 if (FileOffset)
1069 {
1070 Offset = *FileOffset;
1071 RemainingLength = Length;
1072 }
1073 else
1074 {
1075 Offset.QuadPart = 0;
1076 RemainingLength = SharedCacheMap->FileSize.QuadPart;
1077 }
1078
1079 if (IoStatus)
1080 {
1081 IoStatus->Status = STATUS_SUCCESS;
1082 IoStatus->Information = 0;
1083 }
1084
1085 while (RemainingLength > 0)
1086 {
1087 current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
1088 if (current != NULL)
1089 {
1090 if (current->Dirty)
1091 {
1092 Status = CcRosFlushVacb(current);
1093 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1094 {
1095 IoStatus->Status = Status;
1096 }
1097 }
1098
1099 CcRosReleaseVacbLock(current);
1100
1101 KeAcquireGuardedMutex(&ViewLock);
1102 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1103 CcRosVacbDecRefCount(current);
1104 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1105 KeReleaseGuardedMutex(&ViewLock);
1106 }
1107
1108 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
1109 RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
1110 }
1111 }
1112 else
1113 {
1114 if (IoStatus)
1115 {
1116 IoStatus->Status = STATUS_INVALID_PARAMETER;
1117 }
1118 }
1119 }
1120
1121 NTSTATUS
1122 NTAPI
1123 CcRosDeleteFileCache (
1124 PFILE_OBJECT FileObject,
1125 PROS_SHARED_CACHE_MAP SharedCacheMap)
1126 /*
1127 * FUNCTION: Releases the shared cache map associated with a file object
1128 */
1129 {
1130 PLIST_ENTRY current_entry;
1131 PROS_VACB current;
1132 LIST_ENTRY FreeList;
1133 KIRQL oldIrql;
1134
1135 ASSERT(SharedCacheMap);
1136
1137 SharedCacheMap->OpenCount++;
1138 KeReleaseGuardedMutex(&ViewLock);
1139
1140 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1141
1142 KeAcquireGuardedMutex(&ViewLock);
1143 SharedCacheMap->OpenCount--;
1144 if (SharedCacheMap->OpenCount == 0)
1145 {
1146 KIRQL OldIrql;
1147
1148 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1149
1150 /*
1151 * Release all VACBs
1152 */
1153 InitializeListHead(&FreeList);
1154 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1155 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1156 {
1157 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1158 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1159
1160 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1161 CcRosAcquireVacbLock(current, NULL);
1162 RemoveEntryList(&current->VacbLruListEntry);
1163 if (current->Dirty)
1164 {
1165 RemoveEntryList(&current->DirtyVacbListEntry);
1166 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
1167 current->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
1168 DPRINT1("Freeing dirty VACB\n");
1169 }
1170 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1171 CcRosReleaseVacbLock(current);
1172
1173 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1174 }
1175 #if DBG
1176 SharedCacheMap->Trace = FALSE;
1177 #endif
1178 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1179
1180 KeReleaseGuardedMutex(&ViewLock);
1181 ObDereferenceObject(SharedCacheMap->FileObject);
1182
1183 while (!IsListEmpty(&FreeList))
1184 {
1185 current_entry = RemoveTailList(&FreeList);
1186 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1187 CcRosInternalFreeVacb(current);
1188 }
1189
1190 KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
1191 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1192 KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
1193
1194 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1195 KeAcquireGuardedMutex(&ViewLock);
1196 }
1197 return STATUS_SUCCESS;
1198 }
1199
1200 VOID
1201 NTAPI
1202 CcRosReferenceCache (
1203 PFILE_OBJECT FileObject)
1204 {
1205 PROS_SHARED_CACHE_MAP SharedCacheMap;
1206 KeAcquireGuardedMutex(&ViewLock);
1207 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1208 ASSERT(SharedCacheMap);
1209 ASSERT(SharedCacheMap->OpenCount != 0);
1210 SharedCacheMap->OpenCount++;
1211 KeReleaseGuardedMutex(&ViewLock);
1212 }
1213
1214 VOID
1215 NTAPI
1216 CcRosRemoveIfClosed (
1217 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1218 {
1219 PROS_SHARED_CACHE_MAP SharedCacheMap;
1220 DPRINT("CcRosRemoveIfClosed()\n");
1221 KeAcquireGuardedMutex(&ViewLock);
1222 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1223 if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
1224 {
1225 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
1226 }
1227 KeReleaseGuardedMutex(&ViewLock);
1228 }
1229
1230
1231 VOID
1232 NTAPI
1233 CcRosDereferenceCache (
1234 PFILE_OBJECT FileObject)
1235 {
1236 PROS_SHARED_CACHE_MAP SharedCacheMap;
1237 KeAcquireGuardedMutex(&ViewLock);
1238 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1239 ASSERT(SharedCacheMap);
1240 if (SharedCacheMap->OpenCount > 0)
1241 {
1242 SharedCacheMap->OpenCount--;
1243 if (SharedCacheMap->OpenCount == 0)
1244 {
1245 MmFreeSectionSegments(SharedCacheMap->FileObject);
1246 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1247 }
1248 }
1249 KeReleaseGuardedMutex(&ViewLock);
1250 }
1251
1252 NTSTATUS
1253 NTAPI
1254 CcRosReleaseFileCache (
1255 PFILE_OBJECT FileObject)
1256 /*
1257 * FUNCTION: Called by the file system when a handle to a file object
1258 * has been closed.
1259 */
1260 {
1261 PROS_SHARED_CACHE_MAP SharedCacheMap;
1262
1263 KeAcquireGuardedMutex(&ViewLock);
1264
1265 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1266 {
1267 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1268 if (FileObject->PrivateCacheMap != NULL)
1269 {
1270 FileObject->PrivateCacheMap = NULL;
1271 if (SharedCacheMap->OpenCount > 0)
1272 {
1273 SharedCacheMap->OpenCount--;
1274 if (SharedCacheMap->OpenCount == 0)
1275 {
1276 MmFreeSectionSegments(SharedCacheMap->FileObject);
1277 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1278 }
1279 }
1280 }
1281 }
1282 KeReleaseGuardedMutex(&ViewLock);
1283 return STATUS_SUCCESS;
1284 }
1285
1286 NTSTATUS
1287 NTAPI
1288 CcTryToInitializeFileCache (
1289 PFILE_OBJECT FileObject)
1290 {
1291 PROS_SHARED_CACHE_MAP SharedCacheMap;
1292 NTSTATUS Status;
1293
1294 KeAcquireGuardedMutex(&ViewLock);
1295
1296 ASSERT(FileObject->SectionObjectPointer);
1297 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1298 if (SharedCacheMap == NULL)
1299 {
1300 Status = STATUS_UNSUCCESSFUL;
1301 }
1302 else
1303 {
1304 if (FileObject->PrivateCacheMap == NULL)
1305 {
1306 FileObject->PrivateCacheMap = SharedCacheMap;
1307 SharedCacheMap->OpenCount++;
1308 }
1309 Status = STATUS_SUCCESS;
1310 }
1311 KeReleaseGuardedMutex(&ViewLock);
1312
1313 return Status;
1314 }
1315
1316
1317 NTSTATUS
1318 NTAPI
1319 CcRosInitializeFileCache (
1320 PFILE_OBJECT FileObject,
1321 PCC_FILE_SIZES FileSizes,
1322 BOOLEAN PinAccess,
1323 PCACHE_MANAGER_CALLBACKS CallBacks,
1324 PVOID LazyWriterContext)
1325 /*
1326 * FUNCTION: Initializes a shared cache map for a file object
1327 */
1328 {
1329 PROS_SHARED_CACHE_MAP SharedCacheMap;
1330
1331 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1332 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1333 FileObject, SharedCacheMap);
1334
1335 KeAcquireGuardedMutex(&ViewLock);
1336 if (SharedCacheMap == NULL)
1337 {
1338 KIRQL OldIrql;
1339
1340 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1341 if (SharedCacheMap == NULL)
1342 {
1343 KeReleaseGuardedMutex(&ViewLock);
1344 return STATUS_INSUFFICIENT_RESOURCES;
1345 }
1346 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1347 ObReferenceObjectByPointer(FileObject,
1348 FILE_ALL_ACCESS,
1349 NULL,
1350 KernelMode);
1351 SharedCacheMap->FileObject = FileObject;
1352 SharedCacheMap->Callbacks = CallBacks;
1353 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1354 SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1355 SharedCacheMap->FileSize = FileSizes->FileSize;
1356 SharedCacheMap->PinAccess = PinAccess;
1357 SharedCacheMap->DirtyPageThreshold = 0;
1358 SharedCacheMap->DirtyPages = 0;
1359 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1360 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1361 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1362
1363 KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
1364 InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
1365 KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
1366 }
1367 if (FileObject->PrivateCacheMap == NULL)
1368 {
1369 FileObject->PrivateCacheMap = SharedCacheMap;
1370 SharedCacheMap->OpenCount++;
1371 }
1372 KeReleaseGuardedMutex(&ViewLock);
1373
1374 return STATUS_SUCCESS;
1375 }
1376
1377 /*
1378 * @implemented
1379 */
1380 PFILE_OBJECT
1381 NTAPI
1382 CcGetFileObjectFromSectionPtrs (
1383 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1384 {
1385 PROS_SHARED_CACHE_MAP SharedCacheMap;
1386
1387 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1388
1389 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1390 {
1391 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1392 ASSERT(SharedCacheMap);
1393 return SharedCacheMap->FileObject;
1394 }
1395 return NULL;
1396 }
1397
1398 VOID
1399 NTAPI
1400 CcShutdownLazyWriter (
1401 VOID)
1402 {
1403 /* Simply set the event, lazy writer will stop when it's done */
1404 KeSetEvent(&iLazyWriterShutdown, IO_DISK_INCREMENT, FALSE);
1405 }
1406
1407 BOOLEAN
1408 INIT_FUNCTION
1409 NTAPI
1410 CcInitView (
1411 VOID)
1412 {
1413 HANDLE LazyWriter;
1414 NTSTATUS Status;
1415 KPRIORITY Priority;
1416 OBJECT_ATTRIBUTES ObjectAttributes;
1417
1418 DPRINT("CcInitView()\n");
1419
1420 InitializeListHead(&DirtyVacbListHead);
1421 InitializeListHead(&VacbLruListHead);
1422 InitializeListHead(&CcDeferredWrites);
1423 InitializeListHead(&CcCleanSharedCacheMapList);
1424 KeInitializeSpinLock(&CcDeferredWriteSpinLock);
1425 KeInitializeSpinLock(&iSharedCacheMapLock);
1426 KeInitializeGuardedMutex(&ViewLock);
1427 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1428 NULL,
1429 NULL,
1430 0,
1431 sizeof(INTERNAL_BCB),
1432 TAG_BCB,
1433 20);
1434 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1435 NULL,
1436 NULL,
1437 0,
1438 sizeof(ROS_SHARED_CACHE_MAP),
1439 TAG_SHARED_CACHE_MAP,
1440 20);
1441 ExInitializeNPagedLookasideList(&VacbLookasideList,
1442 NULL,
1443 NULL,
1444 0,
1445 sizeof(ROS_VACB),
1446 TAG_VACB,
1447 20);
1448
1449 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1450
1451 /* Initialize lazy writer events */
1452 KeInitializeEvent(&iLazyWriterShutdown, SynchronizationEvent, FALSE);
1453 KeInitializeEvent(&iLazyWriterNotify, NotificationEvent, FALSE);
1454
1455 /* Define lazy writer threshold, depending on system type */
1456 switch (MmQuerySystemSize())
1457 {
1458 case MmSmallSystem:
1459 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8;
1460 break;
1461
1462 case MmMediumSystem:
1463 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 4;
1464 break;
1465
1466 case MmLargeSystem:
1467 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8 + MmNumberOfPhysicalPages / 4;
1468 break;
1469 }
1470
1471 /* Start the lazy writer thread */
1472 InitializeObjectAttributes(&ObjectAttributes,
1473 NULL,
1474 OBJ_KERNEL_HANDLE,
1475 NULL,
1476 NULL);
1477 Status = PsCreateSystemThread(&LazyWriter,
1478 THREAD_ALL_ACCESS,
1479 &ObjectAttributes,
1480 NULL,
1481 NULL,
1482 CciLazyWriter,
1483 NULL);
1484 if (!NT_SUCCESS(Status))
1485 {
1486 return FALSE;
1487 }
1488
1489 Priority = 27;
1490 Status = NtSetInformationThread(LazyWriter,
1491 ThreadPriority,
1492 &Priority,
1493 sizeof(Priority));
1494 ASSERT(NT_SUCCESS(Status));
1495
1496 /* Handle is not needed */
1497 ObCloseHandle(LazyWriter, KernelMode);
1498
1499 CcInitCacheZeroPage();
1500
1501 return TRUE;
1502 }
1503
1504 #if DBG && defined(KDBG)
1505 BOOLEAN
1506 ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
1507 {
1508 PLIST_ENTRY ListEntry;
1509 UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
1510
1511 KdbpPrint(" Usage Summary (in kb)\n");
1512 KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1513 /* No need to lock the spin lock here, we're in DBG */
1514 for (ListEntry = CcCleanSharedCacheMapList.Flink;
1515 ListEntry != &CcCleanSharedCacheMapList;
1516 ListEntry = ListEntry->Flink)
1517 {
1518 PLIST_ENTRY Vacbs;
1519 ULONG Valid = 0, Dirty = 0;
1520 PROS_SHARED_CACHE_MAP SharedCacheMap;
1521 PUNICODE_STRING FileName;
1522
1523 SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
1524
1525 /* Dirty size */
1526 Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
1527
1528 /* First, count for all the associated VACB */
1529 for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
1530 Vacbs != &SharedCacheMap->CacheMapVacbListHead;
1531 Vacbs = Vacbs->Flink)
1532 {
1533 PROS_VACB Vacb;
1534
1535 Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
1536 if (Vacb->Valid)
1537 {
1538 Valid += VACB_MAPPING_GRANULARITY / 1024;
1539 }
1540 }
1541
1542 /* Setup name */
1543 if (SharedCacheMap->FileObject != NULL &&
1544 SharedCacheMap->FileObject->FileName.Length != 0)
1545 {
1546 FileName = &SharedCacheMap->FileObject->FileName;
1547 }
1548 else
1549 {
1550 FileName = &NoName;
1551 }
1552
1553 /* And print */
1554 KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap, Valid, Dirty, FileName);
1555 }
1556
1557 return TRUE;
1558 }
1559 #endif
1560
1561 /* EOF */