[NTOSKRNL] Add the CcDataFlushes and CcDataPages counters
[reactos.git] / ntoskrnl / cc / copy.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/copy.c
5 * PURPOSE: Implements cache managers copy interface
6 *
7 * PROGRAMMERS: Some people?
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* INCLUDES ******************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <debug.h>
16
17 /* GLOBALS *******************************************************************/
18
19 static PFN_NUMBER CcZeroPage = 0;
20
21 #define MAX_ZERO_LENGTH (256 * 1024)
22
23 typedef enum _CC_COPY_OPERATION
24 {
25 CcOperationRead,
26 CcOperationWrite,
27 CcOperationZero
28 } CC_COPY_OPERATION;
29
30 typedef enum _CC_CAN_WRITE_RETRY
31 {
32 FirstTry = 0,
33 RetryAllowRemote = 253,
34 RetryForceCheckPerFile = 254,
35 RetryMasterLocked = 255,
36 } CC_CAN_WRITE_RETRY;
37
38 ULONG CcRosTraceLevel = 0;
39 ULONG CcFastMdlReadWait;
40 ULONG CcFastMdlReadNotPossible;
41 ULONG CcFastReadNotPossible;
42 ULONG CcFastReadWait;
43 ULONG CcFastReadNoWait;
44 ULONG CcFastReadResourceMiss;
45
46 /* Counters:
47 * - Amount of pages flushed to the disk
48 * - Number of flush operations
49 */
50 ULONG CcDataPages = 0;
51 ULONG CcDataFlushes = 0;
52
53 /* FUNCTIONS *****************************************************************/
54
55 VOID
56 NTAPI
57 MiZeroPhysicalPage (
58 IN PFN_NUMBER PageFrameIndex
59 );
60
61 VOID
62 NTAPI
63 CcInitCacheZeroPage (
64 VOID)
65 {
66 NTSTATUS Status;
67
68 MI_SET_USAGE(MI_USAGE_CACHE);
69 //MI_SET_PROCESS2(PsGetCurrentProcess()->ImageFileName);
70 Status = MmRequestPageMemoryConsumer(MC_SYSTEM, TRUE, &CcZeroPage);
71 if (!NT_SUCCESS(Status))
72 {
73 DbgPrint("Can't allocate CcZeroPage.\n");
74 KeBugCheck(CACHE_MANAGER);
75 }
76 MiZeroPhysicalPage(CcZeroPage);
77 }
78
79 NTSTATUS
80 NTAPI
81 CcReadVirtualAddress (
82 PROS_VACB Vacb)
83 {
84 ULONG Size, Pages;
85 PMDL Mdl;
86 NTSTATUS Status;
87 IO_STATUS_BLOCK IoStatus;
88 KEVENT Event;
89
90 Size = (ULONG)(Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart);
91 if (Size > VACB_MAPPING_GRANULARITY)
92 {
93 Size = VACB_MAPPING_GRANULARITY;
94 }
95
96 Pages = BYTES_TO_PAGES(Size);
97 ASSERT(Pages * PAGE_SIZE <= VACB_MAPPING_GRANULARITY);
98
99 Mdl = IoAllocateMdl(Vacb->BaseAddress, Pages * PAGE_SIZE, FALSE, FALSE, NULL);
100 if (!Mdl)
101 {
102 return STATUS_INSUFFICIENT_RESOURCES;
103 }
104
105 Status = STATUS_SUCCESS;
106 _SEH2_TRY
107 {
108 MmProbeAndLockPages(Mdl, KernelMode, IoWriteAccess);
109 }
110 _SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER)
111 {
112 Status = _SEH2_GetExceptionCode();
113 KeBugCheck(CACHE_MANAGER);
114 } _SEH2_END;
115
116 if (NT_SUCCESS(Status))
117 {
118 Mdl->MdlFlags |= MDL_IO_PAGE_READ;
119 KeInitializeEvent(&Event, NotificationEvent, FALSE);
120 Status = IoPageRead(Vacb->SharedCacheMap->FileObject, Mdl, &Vacb->FileOffset, &Event, &IoStatus);
121 if (Status == STATUS_PENDING)
122 {
123 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
124 Status = IoStatus.Status;
125 }
126
127 MmUnlockPages(Mdl);
128 }
129
130 IoFreeMdl(Mdl);
131
132 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
133 {
134 DPRINT1("IoPageRead failed, Status %x\n", Status);
135 return Status;
136 }
137
138 if (Size < VACB_MAPPING_GRANULARITY)
139 {
140 RtlZeroMemory((char*)Vacb->BaseAddress + Size,
141 VACB_MAPPING_GRANULARITY - Size);
142 }
143
144 return STATUS_SUCCESS;
145 }
146
147 NTSTATUS
148 NTAPI
149 CcWriteVirtualAddress (
150 PROS_VACB Vacb)
151 {
152 ULONG Size;
153 PMDL Mdl;
154 NTSTATUS Status;
155 IO_STATUS_BLOCK IoStatus;
156 KEVENT Event;
157
158 Size = (ULONG)(Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart);
159 if (Size > VACB_MAPPING_GRANULARITY)
160 {
161 Size = VACB_MAPPING_GRANULARITY;
162 }
163 //
164 // Nonpaged pool PDEs in ReactOS must actually be synchronized between the
165 // MmGlobalPageDirectory and the real system PDE directory. What a mess...
166 //
167 {
168 ULONG i = 0;
169 do
170 {
171 MmGetPfnForProcess(NULL, (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i << PAGE_SHIFT)));
172 } while (++i < (Size >> PAGE_SHIFT));
173 }
174
175 Mdl = IoAllocateMdl(Vacb->BaseAddress, Size, FALSE, FALSE, NULL);
176 if (!Mdl)
177 {
178 return STATUS_INSUFFICIENT_RESOURCES;
179 }
180
181 Status = STATUS_SUCCESS;
182 _SEH2_TRY
183 {
184 MmProbeAndLockPages(Mdl, KernelMode, IoReadAccess);
185 }
186 _SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER)
187 {
188 Status = _SEH2_GetExceptionCode();
189 KeBugCheck(CACHE_MANAGER);
190 } _SEH2_END;
191
192 if (NT_SUCCESS(Status))
193 {
194 KeInitializeEvent(&Event, NotificationEvent, FALSE);
195 Status = IoSynchronousPageWrite(Vacb->SharedCacheMap->FileObject, Mdl, &Vacb->FileOffset, &Event, &IoStatus);
196 if (Status == STATUS_PENDING)
197 {
198 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
199 Status = IoStatus.Status;
200 }
201
202 MmUnlockPages(Mdl);
203 }
204 IoFreeMdl(Mdl);
205 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
206 {
207 DPRINT1("IoPageWrite failed, Status %x\n", Status);
208 return Status;
209 }
210
211 return STATUS_SUCCESS;
212 }
213
214 NTSTATUS
215 ReadWriteOrZero(
216 _Inout_ PVOID BaseAddress,
217 _Inout_opt_ PVOID Buffer,
218 _In_ ULONG Length,
219 _In_ CC_COPY_OPERATION Operation)
220 {
221 NTSTATUS Status = STATUS_SUCCESS;
222
223 if (Operation == CcOperationZero)
224 {
225 /* Zero */
226 RtlZeroMemory(BaseAddress, Length);
227 }
228 else
229 {
230 _SEH2_TRY
231 {
232 if (Operation == CcOperationWrite)
233 RtlCopyMemory(BaseAddress, Buffer, Length);
234 else
235 RtlCopyMemory(Buffer, BaseAddress, Length);
236 }
237 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
238 {
239 Status = _SEH2_GetExceptionCode();
240 }
241 _SEH2_END;
242 }
243 return Status;
244 }
245
246 BOOLEAN
247 CcCopyData (
248 _In_ PFILE_OBJECT FileObject,
249 _In_ LONGLONG FileOffset,
250 _Inout_ PVOID Buffer,
251 _In_ LONGLONG Length,
252 _In_ CC_COPY_OPERATION Operation,
253 _In_ BOOLEAN Wait,
254 _Out_ PIO_STATUS_BLOCK IoStatus)
255 {
256 NTSTATUS Status;
257 LONGLONG CurrentOffset;
258 ULONG BytesCopied;
259 KIRQL OldIrql;
260 PROS_SHARED_CACHE_MAP SharedCacheMap;
261 PLIST_ENTRY ListEntry;
262 PROS_VACB Vacb;
263 ULONG PartialLength;
264 PVOID BaseAddress;
265 BOOLEAN Valid;
266 PPRIVATE_CACHE_MAP PrivateCacheMap;
267
268 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
269 PrivateCacheMap = FileObject->PrivateCacheMap;
270 CurrentOffset = FileOffset;
271 BytesCopied = 0;
272
273 if (!Wait)
274 {
275 /* test if the requested data is available */
276 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
277 /* FIXME: this loop doesn't take into account areas that don't have
278 * a VACB in the list yet */
279 ListEntry = SharedCacheMap->CacheMapVacbListHead.Flink;
280 while (ListEntry != &SharedCacheMap->CacheMapVacbListHead)
281 {
282 Vacb = CONTAINING_RECORD(ListEntry,
283 ROS_VACB,
284 CacheMapVacbListEntry);
285 ListEntry = ListEntry->Flink;
286 if (!Vacb->Valid &&
287 DoRangesIntersect(Vacb->FileOffset.QuadPart,
288 VACB_MAPPING_GRANULARITY,
289 CurrentOffset, Length))
290 {
291 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
292 /* data not available */
293 return FALSE;
294 }
295 if (Vacb->FileOffset.QuadPart >= CurrentOffset + Length)
296 break;
297 }
298 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
299 }
300
301 PartialLength = CurrentOffset % VACB_MAPPING_GRANULARITY;
302 if (PartialLength != 0)
303 {
304 PartialLength = min(Length, VACB_MAPPING_GRANULARITY - PartialLength);
305 Status = CcRosRequestVacb(SharedCacheMap,
306 ROUND_DOWN(CurrentOffset,
307 VACB_MAPPING_GRANULARITY),
308 &BaseAddress,
309 &Valid,
310 &Vacb);
311 if (!NT_SUCCESS(Status))
312 ExRaiseStatus(Status);
313 if (!Valid)
314 {
315 Status = CcReadVirtualAddress(Vacb);
316 if (!NT_SUCCESS(Status))
317 {
318 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
319 ExRaiseStatus(Status);
320 }
321 }
322 Status = ReadWriteOrZero((PUCHAR)BaseAddress + CurrentOffset % VACB_MAPPING_GRANULARITY,
323 Buffer,
324 PartialLength,
325 Operation);
326
327 CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, Operation != CcOperationRead, FALSE);
328
329 if (!NT_SUCCESS(Status))
330 ExRaiseStatus(STATUS_INVALID_USER_BUFFER);
331
332 Length -= PartialLength;
333 CurrentOffset += PartialLength;
334 BytesCopied += PartialLength;
335
336 if (Operation != CcOperationZero)
337 Buffer = (PVOID)((ULONG_PTR)Buffer + PartialLength);
338 }
339
340 while (Length > 0)
341 {
342 ASSERT(CurrentOffset % VACB_MAPPING_GRANULARITY == 0);
343 PartialLength = min(VACB_MAPPING_GRANULARITY, Length);
344 Status = CcRosRequestVacb(SharedCacheMap,
345 CurrentOffset,
346 &BaseAddress,
347 &Valid,
348 &Vacb);
349 if (!NT_SUCCESS(Status))
350 ExRaiseStatus(Status);
351 if (!Valid &&
352 (Operation == CcOperationRead ||
353 PartialLength < VACB_MAPPING_GRANULARITY))
354 {
355 Status = CcReadVirtualAddress(Vacb);
356 if (!NT_SUCCESS(Status))
357 {
358 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
359 ExRaiseStatus(Status);
360 }
361 }
362 Status = ReadWriteOrZero(BaseAddress, Buffer, PartialLength, Operation);
363
364 CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, Operation != CcOperationRead, FALSE);
365
366 if (!NT_SUCCESS(Status))
367 ExRaiseStatus(STATUS_INVALID_USER_BUFFER);
368
369 Length -= PartialLength;
370 CurrentOffset += PartialLength;
371 BytesCopied += PartialLength;
372
373 if (Operation != CcOperationZero)
374 Buffer = (PVOID)((ULONG_PTR)Buffer + PartialLength);
375 }
376
377 /* If that was a successful sync read operation, let's handle read ahead */
378 if (Operation == CcOperationRead && Length == 0 && Wait)
379 {
380 /* If file isn't random access, schedule next read */
381 if (!BooleanFlagOn(FileObject->Flags, FO_RANDOM_ACCESS))
382 {
383 CcScheduleReadAhead(FileObject, (PLARGE_INTEGER)&FileOffset, BytesCopied);
384 }
385
386 /* And update read history in private cache map */
387 PrivateCacheMap->FileOffset1.QuadPart = PrivateCacheMap->FileOffset2.QuadPart;
388 PrivateCacheMap->BeyondLastByte1.QuadPart = PrivateCacheMap->BeyondLastByte2.QuadPart;
389 PrivateCacheMap->FileOffset2.QuadPart = FileOffset;
390 PrivateCacheMap->BeyondLastByte2.QuadPart = FileOffset + BytesCopied;
391 }
392
393 IoStatus->Status = STATUS_SUCCESS;
394 IoStatus->Information = BytesCopied;
395 return TRUE;
396 }
397
398 VOID
399 CcPostDeferredWrites(VOID)
400 {
401 ULONG WrittenBytes;
402
403 /* We'll try to write as much as we can */
404 WrittenBytes = 0;
405 while (TRUE)
406 {
407 KIRQL OldIrql;
408 PLIST_ENTRY ListEntry;
409 PDEFERRED_WRITE DeferredWrite;
410
411 DeferredWrite = NULL;
412
413 /* Lock our deferred writes list */
414 KeAcquireSpinLock(&CcDeferredWriteSpinLock, &OldIrql);
415 for (ListEntry = CcDeferredWrites.Flink;
416 ListEntry != &CcDeferredWrites;
417 ListEntry = ListEntry->Flink)
418 {
419 /* Extract an entry */
420 DeferredWrite = CONTAINING_RECORD(ListEntry, DEFERRED_WRITE, DeferredWriteLinks);
421
422 /* Compute the modified bytes, based on what we already wrote */
423 WrittenBytes += DeferredWrite->BytesToWrite;
424 /* We overflowed, give up */
425 if (WrittenBytes < DeferredWrite->BytesToWrite)
426 {
427 DeferredWrite = NULL;
428 break;
429 }
430
431 /* Check we can write */
432 if (CcCanIWrite(DeferredWrite->FileObject, WrittenBytes, FALSE, RetryForceCheckPerFile))
433 {
434 /* We can, so remove it from the list and stop looking for entry */
435 RemoveEntryList(&DeferredWrite->DeferredWriteLinks);
436 break;
437 }
438
439 /* If we don't accept modified pages, stop here */
440 if (!DeferredWrite->LimitModifiedPages)
441 {
442 DeferredWrite = NULL;
443 break;
444 }
445
446 /* Reset count as nothing was written yet */
447 WrittenBytes -= DeferredWrite->BytesToWrite;
448 DeferredWrite = NULL;
449 }
450 KeReleaseSpinLock(&CcDeferredWriteSpinLock, OldIrql);
451
452 /* Nothing to write found, give up */
453 if (DeferredWrite == NULL)
454 {
455 break;
456 }
457
458 /* If we have an event, set it and quit */
459 if (DeferredWrite->Event)
460 {
461 KeSetEvent(DeferredWrite->Event, IO_NO_INCREMENT, FALSE);
462 }
463 /* Otherwise, call the write routine and free the context */
464 else
465 {
466 DeferredWrite->PostRoutine(DeferredWrite->Context1, DeferredWrite->Context2);
467 ExFreePoolWithTag(DeferredWrite, 'CcDw');
468 }
469 }
470 }
471
472 VOID
473 CcPerformReadAhead(
474 IN PFILE_OBJECT FileObject)
475 {
476 NTSTATUS Status;
477 LONGLONG CurrentOffset;
478 KIRQL OldIrql;
479 PROS_SHARED_CACHE_MAP SharedCacheMap;
480 PROS_VACB Vacb;
481 ULONG PartialLength;
482 PVOID BaseAddress;
483 BOOLEAN Valid;
484 ULONG Length;
485 PPRIVATE_CACHE_MAP PrivateCacheMap;
486 BOOLEAN Locked;
487
488 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
489
490 /* Critical:
491 * PrivateCacheMap might disappear in-between if the handle
492 * to the file is closed (private is attached to the handle not to
493 * the file), so we need to lock the master lock while we deal with
494 * it. It won't disappear without attempting to lock such lock.
495 */
496 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
497 PrivateCacheMap = FileObject->PrivateCacheMap;
498 /* If the handle was closed since the read ahead was scheduled, just quit */
499 if (PrivateCacheMap == NULL)
500 {
501 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
502 ObDereferenceObject(FileObject);
503 return;
504 }
505 /* Otherwise, extract read offset and length and release private map */
506 else
507 {
508 KeAcquireSpinLockAtDpcLevel(&PrivateCacheMap->ReadAheadSpinLock);
509 CurrentOffset = PrivateCacheMap->ReadAheadOffset[1].QuadPart;
510 Length = PrivateCacheMap->ReadAheadLength[1];
511 KeReleaseSpinLockFromDpcLevel(&PrivateCacheMap->ReadAheadSpinLock);
512 }
513 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
514
515 /* Time to go! */
516 DPRINT("Doing ReadAhead for %p\n", FileObject);
517 /* Lock the file, first */
518 if (!SharedCacheMap->Callbacks->AcquireForReadAhead(SharedCacheMap->LazyWriteContext, FALSE))
519 {
520 Locked = FALSE;
521 goto Clear;
522 }
523
524 /* Remember it's locked */
525 Locked = TRUE;
526
527 /* Don't read past the end of the file */
528 if (CurrentOffset >= SharedCacheMap->FileSize.QuadPart)
529 {
530 goto Clear;
531 }
532 if (CurrentOffset + Length > SharedCacheMap->FileSize.QuadPart)
533 {
534 Length = SharedCacheMap->FileSize.QuadPart - CurrentOffset;
535 }
536
537 /* Next of the algorithm will lock like CcCopyData with the slight
538 * difference that we don't copy data back to an user-backed buffer
539 * We just bring data into Cc
540 */
541 PartialLength = CurrentOffset % VACB_MAPPING_GRANULARITY;
542 if (PartialLength != 0)
543 {
544 PartialLength = min(Length, VACB_MAPPING_GRANULARITY - PartialLength);
545 Status = CcRosRequestVacb(SharedCacheMap,
546 ROUND_DOWN(CurrentOffset,
547 VACB_MAPPING_GRANULARITY),
548 &BaseAddress,
549 &Valid,
550 &Vacb);
551 if (!NT_SUCCESS(Status))
552 {
553 DPRINT1("Failed to request VACB: %lx!\n", Status);
554 goto Clear;
555 }
556
557 if (!Valid)
558 {
559 Status = CcReadVirtualAddress(Vacb);
560 if (!NT_SUCCESS(Status))
561 {
562 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
563 DPRINT1("Failed to read data: %lx!\n", Status);
564 goto Clear;
565 }
566 }
567
568 CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, FALSE, FALSE);
569
570 Length -= PartialLength;
571 CurrentOffset += PartialLength;
572 }
573
574 while (Length > 0)
575 {
576 ASSERT(CurrentOffset % VACB_MAPPING_GRANULARITY == 0);
577 PartialLength = min(VACB_MAPPING_GRANULARITY, Length);
578 Status = CcRosRequestVacb(SharedCacheMap,
579 CurrentOffset,
580 &BaseAddress,
581 &Valid,
582 &Vacb);
583 if (!NT_SUCCESS(Status))
584 {
585 DPRINT1("Failed to request VACB: %lx!\n", Status);
586 goto Clear;
587 }
588
589 if (!Valid)
590 {
591 Status = CcReadVirtualAddress(Vacb);
592 if (!NT_SUCCESS(Status))
593 {
594 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
595 DPRINT1("Failed to read data: %lx!\n", Status);
596 goto Clear;
597 }
598 }
599
600 CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, FALSE, FALSE);
601
602 Length -= PartialLength;
603 CurrentOffset += PartialLength;
604 }
605
606 Clear:
607 /* See previous comment about private cache map */
608 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
609 PrivateCacheMap = FileObject->PrivateCacheMap;
610 if (PrivateCacheMap != NULL)
611 {
612 /* Mark read ahead as unactive */
613 KeAcquireSpinLockAtDpcLevel(&PrivateCacheMap->ReadAheadSpinLock);
614 InterlockedAnd((volatile long *)&PrivateCacheMap->UlongFlags, ~PRIVATE_CACHE_MAP_READ_AHEAD_ACTIVE);
615 KeReleaseSpinLockFromDpcLevel(&PrivateCacheMap->ReadAheadSpinLock);
616 }
617 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
618
619 /* If file was locked, release it */
620 if (Locked)
621 {
622 SharedCacheMap->Callbacks->ReleaseFromReadAhead(SharedCacheMap->LazyWriteContext);
623 }
624
625 /* And drop our extra reference (See: CcScheduleReadAhead) */
626 ObDereferenceObject(FileObject);
627
628 return;
629 }
630
631 /*
632 * @unimplemented
633 */
634 BOOLEAN
635 NTAPI
636 CcCanIWrite (
637 IN PFILE_OBJECT FileObject,
638 IN ULONG BytesToWrite,
639 IN BOOLEAN Wait,
640 IN BOOLEAN Retrying)
641 {
642 KIRQL OldIrql;
643 KEVENT WaitEvent;
644 ULONG Length, Pages;
645 BOOLEAN PerFileDefer;
646 DEFERRED_WRITE Context;
647 PFSRTL_COMMON_FCB_HEADER Fcb;
648 CC_CAN_WRITE_RETRY TryContext;
649 PROS_SHARED_CACHE_MAP SharedCacheMap;
650
651 CCTRACE(CC_API_DEBUG, "FileObject=%p BytesToWrite=%lu Wait=%d Retrying=%d\n",
652 FileObject, BytesToWrite, Wait, Retrying);
653
654 /* Write through is always OK */
655 if (BooleanFlagOn(FileObject->Flags, FO_WRITE_THROUGH))
656 {
657 return TRUE;
658 }
659
660 TryContext = Retrying;
661 /* Allow remote file if not from posted */
662 if (IoIsFileOriginRemote(FileObject) && TryContext < RetryAllowRemote)
663 {
664 return TRUE;
665 }
666
667 /* Don't exceed max tolerated size */
668 Length = MAX_ZERO_LENGTH;
669 if (BytesToWrite < MAX_ZERO_LENGTH)
670 {
671 Length = BytesToWrite;
672 }
673
674 /* Convert it to pages count */
675 Pages = (Length + PAGE_SIZE - 1) >> PAGE_SHIFT;
676
677 /* By default, assume limits per file won't be hit */
678 PerFileDefer = FALSE;
679 Fcb = FileObject->FsContext;
680 /* Do we have to check for limits per file? */
681 if (TryContext >= RetryForceCheckPerFile ||
682 BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES))
683 {
684 /* If master is not locked, lock it now */
685 if (TryContext != RetryMasterLocked)
686 {
687 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
688 }
689
690 /* Let's not assume the file is cached... */
691 if (FileObject->SectionObjectPointer != NULL &&
692 FileObject->SectionObjectPointer->SharedCacheMap != NULL)
693 {
694 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
695 /* Do we have limits per file set? */
696 if (SharedCacheMap->DirtyPageThreshold != 0 &&
697 SharedCacheMap->DirtyPages != 0)
698 {
699 /* Yes, check whether they are blocking */
700 if (Pages + SharedCacheMap->DirtyPages > SharedCacheMap->DirtyPageThreshold)
701 {
702 PerFileDefer = TRUE;
703 }
704 }
705 }
706
707 /* And don't forget to release master */
708 if (TryContext != RetryMasterLocked)
709 {
710 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
711 }
712 }
713
714 /* So, now allow write if:
715 * - Not the first try or we have no throttling yet
716 * AND:
717 * - We don't exceed threshold!
718 * - We don't exceed what Mm can allow us to use
719 * + If we're above top, that's fine
720 * + If we're above bottom with limited modified pages, that's fine
721 * + Otherwise, throttle!
722 */
723 if ((TryContext != FirstTry || IsListEmpty(&CcDeferredWrites)) &&
724 CcTotalDirtyPages + Pages < CcDirtyPageThreshold &&
725 (MmAvailablePages > MmThrottleTop ||
726 (MmModifiedPageListHead.Total < 1000 && MmAvailablePages > MmThrottleBottom)) &&
727 !PerFileDefer)
728 {
729 return TRUE;
730 }
731
732 /* If we can wait, we'll start the wait loop for waiting till we can
733 * write for real
734 */
735 if (!Wait)
736 {
737 return FALSE;
738 }
739
740 /* Otherwise, if there are no deferred writes yet, start the lazy writer */
741 if (IsListEmpty(&CcDeferredWrites))
742 {
743 KIRQL OldIrql;
744
745 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
746 CcScheduleLazyWriteScan(TRUE);
747 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
748 }
749
750 /* Initialize our wait event */
751 KeInitializeEvent(&WaitEvent, NotificationEvent, FALSE);
752
753 /* And prepare a dummy context */
754 Context.NodeTypeCode = NODE_TYPE_DEFERRED_WRITE;
755 Context.NodeByteSize = sizeof(DEFERRED_WRITE);
756 Context.FileObject = FileObject;
757 Context.BytesToWrite = BytesToWrite;
758 Context.LimitModifiedPages = BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES);
759 Context.Event = &WaitEvent;
760
761 /* And queue it */
762 if (Retrying)
763 {
764 /* To the top, if that's a retry */
765 ExInterlockedInsertHeadList(&CcDeferredWrites,
766 &Context.DeferredWriteLinks,
767 &CcDeferredWriteSpinLock);
768 }
769 else
770 {
771 /* To the bottom, if that's a first time */
772 ExInterlockedInsertTailList(&CcDeferredWrites,
773 &Context.DeferredWriteLinks,
774 &CcDeferredWriteSpinLock);
775 }
776
777 /* Now, we'll loop until our event is set. When it is set, it means that caller
778 * can immediately write, and has to
779 */
780 do
781 {
782 CcPostDeferredWrites();
783 } while (KeWaitForSingleObject(&WaitEvent, Executive, KernelMode, FALSE, &CcIdleDelay) != STATUS_SUCCESS);
784
785 return TRUE;
786 }
787
788 /*
789 * @implemented
790 */
791 BOOLEAN
792 NTAPI
793 CcCopyRead (
794 IN PFILE_OBJECT FileObject,
795 IN PLARGE_INTEGER FileOffset,
796 IN ULONG Length,
797 IN BOOLEAN Wait,
798 OUT PVOID Buffer,
799 OUT PIO_STATUS_BLOCK IoStatus)
800 {
801 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d\n",
802 FileObject, FileOffset->QuadPart, Length, Wait);
803
804 DPRINT("CcCopyRead(FileObject 0x%p, FileOffset %I64x, "
805 "Length %lu, Wait %u, Buffer 0x%p, IoStatus 0x%p)\n",
806 FileObject, FileOffset->QuadPart, Length, Wait,
807 Buffer, IoStatus);
808
809 return CcCopyData(FileObject,
810 FileOffset->QuadPart,
811 Buffer,
812 Length,
813 CcOperationRead,
814 Wait,
815 IoStatus);
816 }
817
818 /*
819 * @implemented
820 */
821 BOOLEAN
822 NTAPI
823 CcCopyWrite (
824 IN PFILE_OBJECT FileObject,
825 IN PLARGE_INTEGER FileOffset,
826 IN ULONG Length,
827 IN BOOLEAN Wait,
828 IN PVOID Buffer)
829 {
830 IO_STATUS_BLOCK IoStatus;
831
832 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d Buffer=%p\n",
833 FileObject, FileOffset->QuadPart, Length, Wait, Buffer);
834
835 DPRINT("CcCopyWrite(FileObject 0x%p, FileOffset %I64x, "
836 "Length %lu, Wait %u, Buffer 0x%p)\n",
837 FileObject, FileOffset->QuadPart, Length, Wait, Buffer);
838
839 return CcCopyData(FileObject,
840 FileOffset->QuadPart,
841 Buffer,
842 Length,
843 CcOperationWrite,
844 Wait,
845 &IoStatus);
846 }
847
848 /*
849 * @implemented
850 */
851 VOID
852 NTAPI
853 CcDeferWrite (
854 IN PFILE_OBJECT FileObject,
855 IN PCC_POST_DEFERRED_WRITE PostRoutine,
856 IN PVOID Context1,
857 IN PVOID Context2,
858 IN ULONG BytesToWrite,
859 IN BOOLEAN Retrying)
860 {
861 KIRQL OldIrql;
862 PDEFERRED_WRITE Context;
863 PFSRTL_COMMON_FCB_HEADER Fcb;
864
865 CCTRACE(CC_API_DEBUG, "FileObject=%p PostRoutine=%p Context1=%p Context2=%p BytesToWrite=%lu Retrying=%d\n",
866 FileObject, PostRoutine, Context1, Context2, BytesToWrite, Retrying);
867
868 /* Try to allocate a context for queueing the write operation */
869 Context = ExAllocatePoolWithTag(NonPagedPool, sizeof(DEFERRED_WRITE), 'CcDw');
870 /* If it failed, immediately execute the operation! */
871 if (Context == NULL)
872 {
873 PostRoutine(Context1, Context2);
874 return;
875 }
876
877 Fcb = FileObject->FsContext;
878
879 /* Otherwise, initialize the context */
880 RtlZeroMemory(Context, sizeof(DEFERRED_WRITE));
881 Context->NodeTypeCode = NODE_TYPE_DEFERRED_WRITE;
882 Context->NodeByteSize = sizeof(DEFERRED_WRITE);
883 Context->FileObject = FileObject;
884 Context->PostRoutine = PostRoutine;
885 Context->Context1 = Context1;
886 Context->Context2 = Context2;
887 Context->BytesToWrite = BytesToWrite;
888 Context->LimitModifiedPages = BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES);
889
890 /* And queue it */
891 if (Retrying)
892 {
893 /* To the top, if that's a retry */
894 ExInterlockedInsertHeadList(&CcDeferredWrites,
895 &Context->DeferredWriteLinks,
896 &CcDeferredWriteSpinLock);
897 }
898 else
899 {
900 /* To the bottom, if that's a first time */
901 ExInterlockedInsertTailList(&CcDeferredWrites,
902 &Context->DeferredWriteLinks,
903 &CcDeferredWriteSpinLock);
904 }
905
906 /* Try to execute the posted writes */
907 CcPostDeferredWrites();
908
909 /* Schedule a lazy writer run to handle deferred writes */
910 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
911 if (!LazyWriter.ScanActive)
912 {
913 CcScheduleLazyWriteScan(FALSE);
914 }
915 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
916 }
917
918 /*
919 * @unimplemented
920 */
921 VOID
922 NTAPI
923 CcFastCopyRead (
924 IN PFILE_OBJECT FileObject,
925 IN ULONG FileOffset,
926 IN ULONG Length,
927 IN ULONG PageCount,
928 OUT PVOID Buffer,
929 OUT PIO_STATUS_BLOCK IoStatus)
930 {
931 LARGE_INTEGER LargeFileOffset;
932 BOOLEAN Success;
933
934 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%lu Length=%lu PageCount=%lu Buffer=%p\n",
935 FileObject, FileOffset, Length, PageCount, Buffer);
936
937 DBG_UNREFERENCED_PARAMETER(PageCount);
938
939 LargeFileOffset.QuadPart = FileOffset;
940 Success = CcCopyRead(FileObject,
941 &LargeFileOffset,
942 Length,
943 TRUE,
944 Buffer,
945 IoStatus);
946 ASSERT(Success == TRUE);
947 }
948
949 /*
950 * @unimplemented
951 */
952 VOID
953 NTAPI
954 CcFastCopyWrite (
955 IN PFILE_OBJECT FileObject,
956 IN ULONG FileOffset,
957 IN ULONG Length,
958 IN PVOID Buffer)
959 {
960 LARGE_INTEGER LargeFileOffset;
961 BOOLEAN Success;
962
963 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%lu Length=%lu Buffer=%p\n",
964 FileObject, FileOffset, Length, Buffer);
965
966 LargeFileOffset.QuadPart = FileOffset;
967 Success = CcCopyWrite(FileObject,
968 &LargeFileOffset,
969 Length,
970 TRUE,
971 Buffer);
972 ASSERT(Success == TRUE);
973 }
974
975 /*
976 * @implemented
977 */
978 BOOLEAN
979 NTAPI
980 CcZeroData (
981 IN PFILE_OBJECT FileObject,
982 IN PLARGE_INTEGER StartOffset,
983 IN PLARGE_INTEGER EndOffset,
984 IN BOOLEAN Wait)
985 {
986 NTSTATUS Status;
987 LARGE_INTEGER WriteOffset;
988 LONGLONG Length;
989 ULONG CurrentLength;
990 PMDL Mdl;
991 ULONG i;
992 IO_STATUS_BLOCK Iosb;
993 KEVENT Event;
994
995 CCTRACE(CC_API_DEBUG, "FileObject=%p StartOffset=%I64u EndOffset=%I64u Wait=%d\n",
996 FileObject, StartOffset->QuadPart, EndOffset->QuadPart, Wait);
997
998 DPRINT("CcZeroData(FileObject 0x%p, StartOffset %I64x, EndOffset %I64x, "
999 "Wait %u)\n", FileObject, StartOffset->QuadPart, EndOffset->QuadPart,
1000 Wait);
1001
1002 Length = EndOffset->QuadPart - StartOffset->QuadPart;
1003 WriteOffset.QuadPart = StartOffset->QuadPart;
1004
1005 if (FileObject->SectionObjectPointer->SharedCacheMap == NULL)
1006 {
1007 /* File is not cached */
1008
1009 Mdl = _alloca(MmSizeOfMdl(NULL, MAX_ZERO_LENGTH));
1010
1011 while (Length > 0)
1012 {
1013 if (Length + WriteOffset.QuadPart % PAGE_SIZE > MAX_ZERO_LENGTH)
1014 {
1015 CurrentLength = MAX_ZERO_LENGTH - WriteOffset.QuadPart % PAGE_SIZE;
1016 }
1017 else
1018 {
1019 CurrentLength = Length;
1020 }
1021 MmInitializeMdl(Mdl, (PVOID)(ULONG_PTR)WriteOffset.QuadPart, CurrentLength);
1022 Mdl->MdlFlags |= (MDL_PAGES_LOCKED | MDL_IO_PAGE_READ);
1023 for (i = 0; i < ((Mdl->Size - sizeof(MDL)) / sizeof(ULONG)); i++)
1024 {
1025 ((PPFN_NUMBER)(Mdl + 1))[i] = CcZeroPage;
1026 }
1027 KeInitializeEvent(&Event, NotificationEvent, FALSE);
1028 Status = IoSynchronousPageWrite(FileObject, Mdl, &WriteOffset, &Event, &Iosb);
1029 if (Status == STATUS_PENDING)
1030 {
1031 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
1032 Status = Iosb.Status;
1033 }
1034 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
1035 {
1036 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
1037 }
1038 if (!NT_SUCCESS(Status))
1039 {
1040 return FALSE;
1041 }
1042 WriteOffset.QuadPart += CurrentLength;
1043 Length -= CurrentLength;
1044 }
1045 }
1046 else
1047 {
1048 IO_STATUS_BLOCK IoStatus;
1049
1050 return CcCopyData(FileObject,
1051 WriteOffset.QuadPart,
1052 NULL,
1053 Length,
1054 CcOperationZero,
1055 Wait,
1056 &IoStatus);
1057 }
1058
1059 return TRUE;
1060 }