a90734000e377a8b600ca1ecec6ca5c72dbab7ee
[reactos.git] / ntoskrnl / cc / copy.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/copy.c
5 * PURPOSE: Implements cache managers copy interface
6 *
7 * PROGRAMMERS: Some people?
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* INCLUDES ******************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <debug.h>
16
17 /* GLOBALS *******************************************************************/
18
19 static PFN_NUMBER CcZeroPage = 0;
20
21 #define MAX_ZERO_LENGTH (256 * 1024)
22
23 typedef enum _CC_COPY_OPERATION
24 {
25 CcOperationRead,
26 CcOperationWrite,
27 CcOperationZero
28 } CC_COPY_OPERATION;
29
30 typedef enum _CC_CAN_WRITE_RETRY
31 {
32 FirstTry = 0,
33 RetryAllowRemote = 253,
34 RetryForceCheckPerFile = 254,
35 RetryMasterLocked = 255,
36 } CC_CAN_WRITE_RETRY;
37
38 ULONG CcRosTraceLevel = 0;
39 ULONG CcFastMdlReadWait;
40 ULONG CcFastMdlReadNotPossible;
41 ULONG CcFastReadNotPossible;
42 ULONG CcFastReadWait;
43 ULONG CcFastReadNoWait;
44 ULONG CcFastReadResourceMiss;
45
46 /* FUNCTIONS *****************************************************************/
47
48 VOID
49 NTAPI
50 MiZeroPhysicalPage (
51 IN PFN_NUMBER PageFrameIndex
52 );
53
54 VOID
55 NTAPI
56 CcInitCacheZeroPage (
57 VOID)
58 {
59 NTSTATUS Status;
60
61 MI_SET_USAGE(MI_USAGE_CACHE);
62 //MI_SET_PROCESS2(PsGetCurrentProcess()->ImageFileName);
63 Status = MmRequestPageMemoryConsumer(MC_SYSTEM, TRUE, &CcZeroPage);
64 if (!NT_SUCCESS(Status))
65 {
66 DbgPrint("Can't allocate CcZeroPage.\n");
67 KeBugCheck(CACHE_MANAGER);
68 }
69 MiZeroPhysicalPage(CcZeroPage);
70 }
71
72 NTSTATUS
73 NTAPI
74 CcReadVirtualAddress (
75 PROS_VACB Vacb)
76 {
77 ULONG Size, Pages;
78 PMDL Mdl;
79 NTSTATUS Status;
80 IO_STATUS_BLOCK IoStatus;
81 KEVENT Event;
82
83 Size = (ULONG)(Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart);
84 if (Size > VACB_MAPPING_GRANULARITY)
85 {
86 Size = VACB_MAPPING_GRANULARITY;
87 }
88
89 Pages = BYTES_TO_PAGES(Size);
90 ASSERT(Pages * PAGE_SIZE <= VACB_MAPPING_GRANULARITY);
91
92 Mdl = IoAllocateMdl(Vacb->BaseAddress, Pages * PAGE_SIZE, FALSE, FALSE, NULL);
93 if (!Mdl)
94 {
95 return STATUS_INSUFFICIENT_RESOURCES;
96 }
97
98 Status = STATUS_SUCCESS;
99 _SEH2_TRY
100 {
101 MmProbeAndLockPages(Mdl, KernelMode, IoWriteAccess);
102 }
103 _SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER)
104 {
105 Status = _SEH2_GetExceptionCode();
106 KeBugCheck(CACHE_MANAGER);
107 } _SEH2_END;
108
109 if (NT_SUCCESS(Status))
110 {
111 Mdl->MdlFlags |= MDL_IO_PAGE_READ;
112 KeInitializeEvent(&Event, NotificationEvent, FALSE);
113 Status = IoPageRead(Vacb->SharedCacheMap->FileObject, Mdl, &Vacb->FileOffset, &Event, &IoStatus);
114 if (Status == STATUS_PENDING)
115 {
116 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
117 Status = IoStatus.Status;
118 }
119
120 MmUnlockPages(Mdl);
121 }
122
123 IoFreeMdl(Mdl);
124
125 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
126 {
127 DPRINT1("IoPageRead failed, Status %x\n", Status);
128 return Status;
129 }
130
131 if (Size < VACB_MAPPING_GRANULARITY)
132 {
133 RtlZeroMemory((char*)Vacb->BaseAddress + Size,
134 VACB_MAPPING_GRANULARITY - Size);
135 }
136
137 return STATUS_SUCCESS;
138 }
139
140 NTSTATUS
141 NTAPI
142 CcWriteVirtualAddress (
143 PROS_VACB Vacb)
144 {
145 ULONG Size;
146 PMDL Mdl;
147 NTSTATUS Status;
148 IO_STATUS_BLOCK IoStatus;
149 KEVENT Event;
150
151 Size = (ULONG)(Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart);
152 if (Size > VACB_MAPPING_GRANULARITY)
153 {
154 Size = VACB_MAPPING_GRANULARITY;
155 }
156 //
157 // Nonpaged pool PDEs in ReactOS must actually be synchronized between the
158 // MmGlobalPageDirectory and the real system PDE directory. What a mess...
159 //
160 {
161 ULONG i = 0;
162 do
163 {
164 MmGetPfnForProcess(NULL, (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i << PAGE_SHIFT)));
165 } while (++i < (Size >> PAGE_SHIFT));
166 }
167
168 Mdl = IoAllocateMdl(Vacb->BaseAddress, Size, FALSE, FALSE, NULL);
169 if (!Mdl)
170 {
171 return STATUS_INSUFFICIENT_RESOURCES;
172 }
173
174 Status = STATUS_SUCCESS;
175 _SEH2_TRY
176 {
177 MmProbeAndLockPages(Mdl, KernelMode, IoReadAccess);
178 }
179 _SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER)
180 {
181 Status = _SEH2_GetExceptionCode();
182 KeBugCheck(CACHE_MANAGER);
183 } _SEH2_END;
184
185 if (NT_SUCCESS(Status))
186 {
187 KeInitializeEvent(&Event, NotificationEvent, FALSE);
188 Status = IoSynchronousPageWrite(Vacb->SharedCacheMap->FileObject, Mdl, &Vacb->FileOffset, &Event, &IoStatus);
189 if (Status == STATUS_PENDING)
190 {
191 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
192 Status = IoStatus.Status;
193 }
194
195 MmUnlockPages(Mdl);
196 }
197 IoFreeMdl(Mdl);
198 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
199 {
200 DPRINT1("IoPageWrite failed, Status %x\n", Status);
201 return Status;
202 }
203
204 return STATUS_SUCCESS;
205 }
206
207 NTSTATUS
208 ReadWriteOrZero(
209 _Inout_ PVOID BaseAddress,
210 _Inout_opt_ PVOID Buffer,
211 _In_ ULONG Length,
212 _In_ CC_COPY_OPERATION Operation)
213 {
214 NTSTATUS Status = STATUS_SUCCESS;
215
216 if (Operation == CcOperationZero)
217 {
218 /* Zero */
219 RtlZeroMemory(BaseAddress, Length);
220 }
221 else
222 {
223 _SEH2_TRY
224 {
225 if (Operation == CcOperationWrite)
226 RtlCopyMemory(BaseAddress, Buffer, Length);
227 else
228 RtlCopyMemory(Buffer, BaseAddress, Length);
229 }
230 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
231 {
232 Status = _SEH2_GetExceptionCode();
233 }
234 _SEH2_END;
235 }
236 return Status;
237 }
238
239 BOOLEAN
240 CcCopyData (
241 _In_ PFILE_OBJECT FileObject,
242 _In_ LONGLONG FileOffset,
243 _Inout_ PVOID Buffer,
244 _In_ LONGLONG Length,
245 _In_ CC_COPY_OPERATION Operation,
246 _In_ BOOLEAN Wait,
247 _Out_ PIO_STATUS_BLOCK IoStatus)
248 {
249 NTSTATUS Status;
250 LONGLONG CurrentOffset;
251 ULONG BytesCopied;
252 KIRQL OldIrql;
253 PROS_SHARED_CACHE_MAP SharedCacheMap;
254 PLIST_ENTRY ListEntry;
255 PROS_VACB Vacb;
256 ULONG PartialLength;
257 PVOID BaseAddress;
258 BOOLEAN Valid;
259 PPRIVATE_CACHE_MAP PrivateCacheMap;
260
261 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
262 PrivateCacheMap = FileObject->PrivateCacheMap;
263 CurrentOffset = FileOffset;
264 BytesCopied = 0;
265
266 if (!Wait)
267 {
268 /* test if the requested data is available */
269 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
270 /* FIXME: this loop doesn't take into account areas that don't have
271 * a VACB in the list yet */
272 ListEntry = SharedCacheMap->CacheMapVacbListHead.Flink;
273 while (ListEntry != &SharedCacheMap->CacheMapVacbListHead)
274 {
275 Vacb = CONTAINING_RECORD(ListEntry,
276 ROS_VACB,
277 CacheMapVacbListEntry);
278 ListEntry = ListEntry->Flink;
279 if (!Vacb->Valid &&
280 DoRangesIntersect(Vacb->FileOffset.QuadPart,
281 VACB_MAPPING_GRANULARITY,
282 CurrentOffset, Length))
283 {
284 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
285 /* data not available */
286 return FALSE;
287 }
288 if (Vacb->FileOffset.QuadPart >= CurrentOffset + Length)
289 break;
290 }
291 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
292 }
293
294 PartialLength = CurrentOffset % VACB_MAPPING_GRANULARITY;
295 if (PartialLength != 0)
296 {
297 PartialLength = min(Length, VACB_MAPPING_GRANULARITY - PartialLength);
298 Status = CcRosRequestVacb(SharedCacheMap,
299 ROUND_DOWN(CurrentOffset,
300 VACB_MAPPING_GRANULARITY),
301 &BaseAddress,
302 &Valid,
303 &Vacb);
304 if (!NT_SUCCESS(Status))
305 ExRaiseStatus(Status);
306 if (!Valid)
307 {
308 Status = CcReadVirtualAddress(Vacb);
309 if (!NT_SUCCESS(Status))
310 {
311 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
312 ExRaiseStatus(Status);
313 }
314 }
315 Status = ReadWriteOrZero((PUCHAR)BaseAddress + CurrentOffset % VACB_MAPPING_GRANULARITY,
316 Buffer,
317 PartialLength,
318 Operation);
319
320 CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, Operation != CcOperationRead, FALSE);
321
322 if (!NT_SUCCESS(Status))
323 ExRaiseStatus(STATUS_INVALID_USER_BUFFER);
324
325 Length -= PartialLength;
326 CurrentOffset += PartialLength;
327 BytesCopied += PartialLength;
328
329 if (Operation != CcOperationZero)
330 Buffer = (PVOID)((ULONG_PTR)Buffer + PartialLength);
331 }
332
333 while (Length > 0)
334 {
335 ASSERT(CurrentOffset % VACB_MAPPING_GRANULARITY == 0);
336 PartialLength = min(VACB_MAPPING_GRANULARITY, Length);
337 Status = CcRosRequestVacb(SharedCacheMap,
338 CurrentOffset,
339 &BaseAddress,
340 &Valid,
341 &Vacb);
342 if (!NT_SUCCESS(Status))
343 ExRaiseStatus(Status);
344 if (!Valid &&
345 (Operation == CcOperationRead ||
346 PartialLength < VACB_MAPPING_GRANULARITY))
347 {
348 Status = CcReadVirtualAddress(Vacb);
349 if (!NT_SUCCESS(Status))
350 {
351 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
352 ExRaiseStatus(Status);
353 }
354 }
355 Status = ReadWriteOrZero(BaseAddress, Buffer, PartialLength, Operation);
356
357 CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, Operation != CcOperationRead, FALSE);
358
359 if (!NT_SUCCESS(Status))
360 ExRaiseStatus(STATUS_INVALID_USER_BUFFER);
361
362 Length -= PartialLength;
363 CurrentOffset += PartialLength;
364 BytesCopied += PartialLength;
365
366 if (Operation != CcOperationZero)
367 Buffer = (PVOID)((ULONG_PTR)Buffer + PartialLength);
368 }
369
370 /* If that was a successful sync read operation, let's handle read ahead */
371 if (Operation == CcOperationRead && Length == 0 && Wait)
372 {
373 /* If file isn't random access, schedule next read */
374 if (!BooleanFlagOn(FileObject->Flags, FO_RANDOM_ACCESS))
375 {
376 CcScheduleReadAhead(FileObject, (PLARGE_INTEGER)&FileOffset, BytesCopied);
377 }
378
379 /* And update read history in private cache map */
380 PrivateCacheMap->FileOffset1.QuadPart = PrivateCacheMap->FileOffset2.QuadPart;
381 PrivateCacheMap->BeyondLastByte1.QuadPart = PrivateCacheMap->BeyondLastByte2.QuadPart;
382 PrivateCacheMap->FileOffset2.QuadPart = FileOffset;
383 PrivateCacheMap->BeyondLastByte2.QuadPart = FileOffset + BytesCopied;
384 }
385
386 IoStatus->Status = STATUS_SUCCESS;
387 IoStatus->Information = BytesCopied;
388 return TRUE;
389 }
390
391 VOID
392 CcPostDeferredWrites(VOID)
393 {
394 ULONG WrittenBytes;
395
396 /* We'll try to write as much as we can */
397 WrittenBytes = 0;
398 while (TRUE)
399 {
400 KIRQL OldIrql;
401 PLIST_ENTRY ListEntry;
402 PDEFERRED_WRITE DeferredWrite;
403
404 DeferredWrite = NULL;
405
406 /* Lock our deferred writes list */
407 KeAcquireSpinLock(&CcDeferredWriteSpinLock, &OldIrql);
408 for (ListEntry = CcDeferredWrites.Flink;
409 ListEntry != &CcDeferredWrites;
410 ListEntry = ListEntry->Flink)
411 {
412 /* Extract an entry */
413 DeferredWrite = CONTAINING_RECORD(ListEntry, DEFERRED_WRITE, DeferredWriteLinks);
414
415 /* Compute the modified bytes, based on what we already wrote */
416 WrittenBytes += DeferredWrite->BytesToWrite;
417 /* We overflowed, give up */
418 if (WrittenBytes < DeferredWrite->BytesToWrite)
419 {
420 DeferredWrite = NULL;
421 break;
422 }
423
424 /* Check we can write */
425 if (CcCanIWrite(DeferredWrite->FileObject, WrittenBytes, FALSE, RetryForceCheckPerFile))
426 {
427 /* We can, so remove it from the list and stop looking for entry */
428 RemoveEntryList(&DeferredWrite->DeferredWriteLinks);
429 break;
430 }
431
432 /* If we don't accept modified pages, stop here */
433 if (!DeferredWrite->LimitModifiedPages)
434 {
435 DeferredWrite = NULL;
436 break;
437 }
438
439 /* Reset count as nothing was written yet */
440 WrittenBytes -= DeferredWrite->BytesToWrite;
441 DeferredWrite = NULL;
442 }
443 KeReleaseSpinLock(&CcDeferredWriteSpinLock, OldIrql);
444
445 /* Nothing to write found, give up */
446 if (DeferredWrite == NULL)
447 {
448 break;
449 }
450
451 /* If we have an event, set it and quit */
452 if (DeferredWrite->Event)
453 {
454 KeSetEvent(DeferredWrite->Event, IO_NO_INCREMENT, FALSE);
455 }
456 /* Otherwise, call the write routine and free the context */
457 else
458 {
459 DeferredWrite->PostRoutine(DeferredWrite->Context1, DeferredWrite->Context2);
460 ExFreePoolWithTag(DeferredWrite, 'CcDw');
461 }
462 }
463 }
464
465 VOID
466 CcPerformReadAhead(
467 IN PFILE_OBJECT FileObject)
468 {
469 NTSTATUS Status;
470 LONGLONG CurrentOffset;
471 KIRQL OldIrql;
472 PROS_SHARED_CACHE_MAP SharedCacheMap;
473 PROS_VACB Vacb;
474 ULONG PartialLength;
475 PVOID BaseAddress;
476 BOOLEAN Valid;
477 ULONG Length;
478 PPRIVATE_CACHE_MAP PrivateCacheMap;
479 BOOLEAN Locked;
480
481 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
482
483 /* Critical:
484 * PrivateCacheMap might disappear in-between if the handle
485 * to the file is closed (private is attached to the handle not to
486 * the file), so we need to lock the master lock while we deal with
487 * it. It won't disappear without attempting to lock such lock.
488 */
489 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
490 PrivateCacheMap = FileObject->PrivateCacheMap;
491 /* If the handle was closed since the read ahead was scheduled, just quit */
492 if (PrivateCacheMap == NULL)
493 {
494 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
495 ObDereferenceObject(FileObject);
496 return;
497 }
498 /* Otherwise, extract read offset and length and release private map */
499 else
500 {
501 KeAcquireSpinLockAtDpcLevel(&PrivateCacheMap->ReadAheadSpinLock);
502 CurrentOffset = PrivateCacheMap->ReadAheadOffset[1].QuadPart;
503 Length = PrivateCacheMap->ReadAheadLength[1];
504 KeReleaseSpinLockFromDpcLevel(&PrivateCacheMap->ReadAheadSpinLock);
505 }
506 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
507
508 /* Time to go! */
509 DPRINT("Doing ReadAhead for %p\n", FileObject);
510 /* Lock the file, first */
511 if (!SharedCacheMap->Callbacks->AcquireForReadAhead(SharedCacheMap->LazyWriteContext, FALSE))
512 {
513 Locked = FALSE;
514 goto Clear;
515 }
516
517 /* Remember it's locked */
518 Locked = TRUE;
519
520 /* Next of the algorithm will lock like CcCopyData with the slight
521 * difference that we don't copy data back to an user-backed buffer
522 * We just bring data into Cc
523 */
524 PartialLength = CurrentOffset % VACB_MAPPING_GRANULARITY;
525 if (PartialLength != 0)
526 {
527 PartialLength = min(Length, VACB_MAPPING_GRANULARITY - PartialLength);
528 Status = CcRosRequestVacb(SharedCacheMap,
529 ROUND_DOWN(CurrentOffset,
530 VACB_MAPPING_GRANULARITY),
531 &BaseAddress,
532 &Valid,
533 &Vacb);
534 if (!NT_SUCCESS(Status))
535 {
536 DPRINT1("Failed to request VACB: %lx!\n", Status);
537 goto Clear;
538 }
539
540 if (!Valid)
541 {
542 Status = CcReadVirtualAddress(Vacb);
543 if (!NT_SUCCESS(Status))
544 {
545 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
546 DPRINT1("Failed to read data: %lx!\n", Status);
547 goto Clear;
548 }
549 }
550
551 CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, FALSE, FALSE);
552
553 Length -= PartialLength;
554 CurrentOffset += PartialLength;
555 }
556
557 while (Length > 0)
558 {
559 ASSERT(CurrentOffset % VACB_MAPPING_GRANULARITY == 0);
560 PartialLength = min(VACB_MAPPING_GRANULARITY, Length);
561 Status = CcRosRequestVacb(SharedCacheMap,
562 CurrentOffset,
563 &BaseAddress,
564 &Valid,
565 &Vacb);
566 if (!NT_SUCCESS(Status))
567 {
568 DPRINT1("Failed to request VACB: %lx!\n", Status);
569 goto Clear;
570 }
571
572 if (!Valid)
573 {
574 Status = CcReadVirtualAddress(Vacb);
575 if (!NT_SUCCESS(Status))
576 {
577 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
578 DPRINT1("Failed to read data: %lx!\n", Status);
579 goto Clear;
580 }
581 }
582
583 CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, FALSE, FALSE);
584
585 Length -= PartialLength;
586 CurrentOffset += PartialLength;
587 }
588
589 Clear:
590 /* See previous comment about private cache map */
591 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
592 PrivateCacheMap = FileObject->PrivateCacheMap;
593 if (PrivateCacheMap != NULL)
594 {
595 /* Mark read ahead as unactive */
596 KeAcquireSpinLockAtDpcLevel(&PrivateCacheMap->ReadAheadSpinLock);
597 InterlockedAnd((volatile long *)&PrivateCacheMap->UlongFlags, 0xFFFEFFFF);
598 KeReleaseSpinLockFromDpcLevel(&PrivateCacheMap->ReadAheadSpinLock);
599 }
600 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
601
602 /* If file was locked, release it */
603 if (Locked)
604 {
605 SharedCacheMap->Callbacks->ReleaseFromReadAhead(SharedCacheMap->LazyWriteContext);
606 }
607
608 /* And drop our extra reference (See: CcScheduleReadAhead) */
609 ObDereferenceObject(FileObject);
610
611 return;
612 }
613
614 /*
615 * @unimplemented
616 */
617 BOOLEAN
618 NTAPI
619 CcCanIWrite (
620 IN PFILE_OBJECT FileObject,
621 IN ULONG BytesToWrite,
622 IN BOOLEAN Wait,
623 IN BOOLEAN Retrying)
624 {
625 KIRQL OldIrql;
626 KEVENT WaitEvent;
627 ULONG Length, Pages;
628 BOOLEAN PerFileDefer;
629 DEFERRED_WRITE Context;
630 PFSRTL_COMMON_FCB_HEADER Fcb;
631 CC_CAN_WRITE_RETRY TryContext;
632 PROS_SHARED_CACHE_MAP SharedCacheMap;
633
634 CCTRACE(CC_API_DEBUG, "FileObject=%p BytesToWrite=%lu Wait=%d Retrying=%d\n",
635 FileObject, BytesToWrite, Wait, Retrying);
636
637 /* Write through is always OK */
638 if (BooleanFlagOn(FileObject->Flags, FO_WRITE_THROUGH))
639 {
640 return TRUE;
641 }
642
643 TryContext = Retrying;
644 /* Allow remote file if not from posted */
645 if (IoIsFileOriginRemote(FileObject) && TryContext < RetryAllowRemote)
646 {
647 return TRUE;
648 }
649
650 /* Don't exceed max tolerated size */
651 Length = MAX_ZERO_LENGTH;
652 if (BytesToWrite < MAX_ZERO_LENGTH)
653 {
654 Length = BytesToWrite;
655 }
656
657 /* Convert it to pages count */
658 Pages = (Length + PAGE_SIZE - 1) >> PAGE_SHIFT;
659
660 /* By default, assume limits per file won't be hit */
661 PerFileDefer = FALSE;
662 Fcb = FileObject->FsContext;
663 /* Do we have to check for limits per file? */
664 if (TryContext >= RetryForceCheckPerFile ||
665 BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES))
666 {
667 /* If master is not locked, lock it now */
668 if (TryContext != RetryMasterLocked)
669 {
670 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
671 }
672
673 /* Let's not assume the file is cached... */
674 if (FileObject->SectionObjectPointer != NULL &&
675 FileObject->SectionObjectPointer->SharedCacheMap != NULL)
676 {
677 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
678 /* Do we have limits per file set? */
679 if (SharedCacheMap->DirtyPageThreshold != 0 &&
680 SharedCacheMap->DirtyPages != 0)
681 {
682 /* Yes, check whether they are blocking */
683 if (Pages + SharedCacheMap->DirtyPages > SharedCacheMap->DirtyPageThreshold)
684 {
685 PerFileDefer = TRUE;
686 }
687 }
688 }
689
690 /* And don't forget to release master */
691 if (TryContext != RetryMasterLocked)
692 {
693 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
694 }
695 }
696
697 /* So, now allow write if:
698 * - Not the first try or we have no throttling yet
699 * AND:
700 * - We don't exceed threshold!
701 * - We don't exceed what Mm can allow us to use
702 * + If we're above top, that's fine
703 * + If we're above bottom with limited modified pages, that's fine
704 * + Otherwise, throttle!
705 */
706 if ((TryContext != FirstTry || IsListEmpty(&CcDeferredWrites)) &&
707 CcTotalDirtyPages + Pages < CcDirtyPageThreshold &&
708 (MmAvailablePages > MmThrottleTop ||
709 (MmModifiedPageListHead.Total < 1000 && MmAvailablePages > MmThrottleBottom)) &&
710 !PerFileDefer)
711 {
712 return TRUE;
713 }
714
715 /* If we can wait, we'll start the wait loop for waiting till we can
716 * write for real
717 */
718 if (!Wait)
719 {
720 return FALSE;
721 }
722
723 /* Otherwise, if there are no deferred writes yet, start the lazy writer */
724 if (IsListEmpty(&CcDeferredWrites))
725 {
726 KIRQL OldIrql;
727
728 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
729 CcScheduleLazyWriteScan(TRUE);
730 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
731 }
732
733 /* Initialize our wait event */
734 KeInitializeEvent(&WaitEvent, NotificationEvent, FALSE);
735
736 /* And prepare a dummy context */
737 Context.NodeTypeCode = NODE_TYPE_DEFERRED_WRITE;
738 Context.NodeByteSize = sizeof(DEFERRED_WRITE);
739 Context.FileObject = FileObject;
740 Context.BytesToWrite = BytesToWrite;
741 Context.LimitModifiedPages = BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES);
742 Context.Event = &WaitEvent;
743
744 /* And queue it */
745 if (Retrying)
746 {
747 /* To the top, if that's a retry */
748 ExInterlockedInsertHeadList(&CcDeferredWrites,
749 &Context.DeferredWriteLinks,
750 &CcDeferredWriteSpinLock);
751 }
752 else
753 {
754 /* To the bottom, if that's a first time */
755 ExInterlockedInsertTailList(&CcDeferredWrites,
756 &Context.DeferredWriteLinks,
757 &CcDeferredWriteSpinLock);
758 }
759
760 /* Now, we'll loop until our event is set. When it is set, it means that caller
761 * can immediately write, and has to
762 */
763 do
764 {
765 CcPostDeferredWrites();
766 } while (KeWaitForSingleObject(&WaitEvent, Executive, KernelMode, FALSE, &CcIdleDelay) != STATUS_SUCCESS);
767
768 return TRUE;
769 }
770
771 /*
772 * @implemented
773 */
774 BOOLEAN
775 NTAPI
776 CcCopyRead (
777 IN PFILE_OBJECT FileObject,
778 IN PLARGE_INTEGER FileOffset,
779 IN ULONG Length,
780 IN BOOLEAN Wait,
781 OUT PVOID Buffer,
782 OUT PIO_STATUS_BLOCK IoStatus)
783 {
784 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d\n",
785 FileObject, FileOffset->QuadPart, Length, Wait);
786
787 DPRINT("CcCopyRead(FileObject 0x%p, FileOffset %I64x, "
788 "Length %lu, Wait %u, Buffer 0x%p, IoStatus 0x%p)\n",
789 FileObject, FileOffset->QuadPart, Length, Wait,
790 Buffer, IoStatus);
791
792 return CcCopyData(FileObject,
793 FileOffset->QuadPart,
794 Buffer,
795 Length,
796 CcOperationRead,
797 Wait,
798 IoStatus);
799 }
800
801 /*
802 * @implemented
803 */
804 BOOLEAN
805 NTAPI
806 CcCopyWrite (
807 IN PFILE_OBJECT FileObject,
808 IN PLARGE_INTEGER FileOffset,
809 IN ULONG Length,
810 IN BOOLEAN Wait,
811 IN PVOID Buffer)
812 {
813 IO_STATUS_BLOCK IoStatus;
814
815 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d Buffer=%p\n",
816 FileObject, FileOffset->QuadPart, Length, Wait, Buffer);
817
818 DPRINT("CcCopyWrite(FileObject 0x%p, FileOffset %I64x, "
819 "Length %lu, Wait %u, Buffer 0x%p)\n",
820 FileObject, FileOffset->QuadPart, Length, Wait, Buffer);
821
822 return CcCopyData(FileObject,
823 FileOffset->QuadPart,
824 Buffer,
825 Length,
826 CcOperationWrite,
827 Wait,
828 &IoStatus);
829 }
830
831 /*
832 * @implemented
833 */
834 VOID
835 NTAPI
836 CcDeferWrite (
837 IN PFILE_OBJECT FileObject,
838 IN PCC_POST_DEFERRED_WRITE PostRoutine,
839 IN PVOID Context1,
840 IN PVOID Context2,
841 IN ULONG BytesToWrite,
842 IN BOOLEAN Retrying)
843 {
844 KIRQL OldIrql;
845 PDEFERRED_WRITE Context;
846 PFSRTL_COMMON_FCB_HEADER Fcb;
847
848 CCTRACE(CC_API_DEBUG, "FileObject=%p PostRoutine=%p Context1=%p Context2=%p BytesToWrite=%lu Retrying=%d\n",
849 FileObject, PostRoutine, Context1, Context2, BytesToWrite, Retrying);
850
851 /* Try to allocate a context for queueing the write operation */
852 Context = ExAllocatePoolWithTag(NonPagedPool, sizeof(DEFERRED_WRITE), 'CcDw');
853 /* If it failed, immediately execute the operation! */
854 if (Context == NULL)
855 {
856 PostRoutine(Context1, Context2);
857 return;
858 }
859
860 Fcb = FileObject->FsContext;
861
862 /* Otherwise, initialize the context */
863 RtlZeroMemory(Context, sizeof(DEFERRED_WRITE));
864 Context->NodeTypeCode = NODE_TYPE_DEFERRED_WRITE;
865 Context->NodeByteSize = sizeof(DEFERRED_WRITE);
866 Context->FileObject = FileObject;
867 Context->PostRoutine = PostRoutine;
868 Context->Context1 = Context1;
869 Context->Context2 = Context2;
870 Context->BytesToWrite = BytesToWrite;
871 Context->LimitModifiedPages = BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES);
872
873 /* And queue it */
874 if (Retrying)
875 {
876 /* To the top, if that's a retry */
877 ExInterlockedInsertHeadList(&CcDeferredWrites,
878 &Context->DeferredWriteLinks,
879 &CcDeferredWriteSpinLock);
880 }
881 else
882 {
883 /* To the bottom, if that's a first time */
884 ExInterlockedInsertTailList(&CcDeferredWrites,
885 &Context->DeferredWriteLinks,
886 &CcDeferredWriteSpinLock);
887 }
888
889 /* Try to execute the posted writes */
890 CcPostDeferredWrites();
891
892 /* Schedule a lazy writer run to handle deferred writes */
893 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
894 if (!LazyWriter.ScanActive)
895 {
896 CcScheduleLazyWriteScan(FALSE);
897 }
898 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
899 }
900
901 /*
902 * @unimplemented
903 */
904 VOID
905 NTAPI
906 CcFastCopyRead (
907 IN PFILE_OBJECT FileObject,
908 IN ULONG FileOffset,
909 IN ULONG Length,
910 IN ULONG PageCount,
911 OUT PVOID Buffer,
912 OUT PIO_STATUS_BLOCK IoStatus)
913 {
914 LARGE_INTEGER LargeFileOffset;
915 BOOLEAN Success;
916
917 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%lu Length=%lu PageCount=%lu Buffer=%p\n",
918 FileObject, FileOffset, Length, PageCount, Buffer);
919
920 DBG_UNREFERENCED_PARAMETER(PageCount);
921
922 LargeFileOffset.QuadPart = FileOffset;
923 Success = CcCopyRead(FileObject,
924 &LargeFileOffset,
925 Length,
926 TRUE,
927 Buffer,
928 IoStatus);
929 ASSERT(Success == TRUE);
930 }
931
932 /*
933 * @unimplemented
934 */
935 VOID
936 NTAPI
937 CcFastCopyWrite (
938 IN PFILE_OBJECT FileObject,
939 IN ULONG FileOffset,
940 IN ULONG Length,
941 IN PVOID Buffer)
942 {
943 LARGE_INTEGER LargeFileOffset;
944 BOOLEAN Success;
945
946 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%lu Length=%lu Buffer=%p\n",
947 FileObject, FileOffset, Length, Buffer);
948
949 LargeFileOffset.QuadPart = FileOffset;
950 Success = CcCopyWrite(FileObject,
951 &LargeFileOffset,
952 Length,
953 TRUE,
954 Buffer);
955 ASSERT(Success == TRUE);
956 }
957
958 /*
959 * @implemented
960 */
961 BOOLEAN
962 NTAPI
963 CcZeroData (
964 IN PFILE_OBJECT FileObject,
965 IN PLARGE_INTEGER StartOffset,
966 IN PLARGE_INTEGER EndOffset,
967 IN BOOLEAN Wait)
968 {
969 NTSTATUS Status;
970 LARGE_INTEGER WriteOffset;
971 LONGLONG Length;
972 ULONG CurrentLength;
973 PMDL Mdl;
974 ULONG i;
975 IO_STATUS_BLOCK Iosb;
976 KEVENT Event;
977
978 CCTRACE(CC_API_DEBUG, "FileObject=%p StartOffset=%I64u EndOffset=%I64u Wait=%d\n",
979 FileObject, StartOffset->QuadPart, EndOffset->QuadPart, Wait);
980
981 DPRINT("CcZeroData(FileObject 0x%p, StartOffset %I64x, EndOffset %I64x, "
982 "Wait %u)\n", FileObject, StartOffset->QuadPart, EndOffset->QuadPart,
983 Wait);
984
985 Length = EndOffset->QuadPart - StartOffset->QuadPart;
986 WriteOffset.QuadPart = StartOffset->QuadPart;
987
988 if (FileObject->SectionObjectPointer->SharedCacheMap == NULL)
989 {
990 /* File is not cached */
991
992 Mdl = _alloca(MmSizeOfMdl(NULL, MAX_ZERO_LENGTH));
993
994 while (Length > 0)
995 {
996 if (Length + WriteOffset.QuadPart % PAGE_SIZE > MAX_ZERO_LENGTH)
997 {
998 CurrentLength = MAX_ZERO_LENGTH - WriteOffset.QuadPart % PAGE_SIZE;
999 }
1000 else
1001 {
1002 CurrentLength = Length;
1003 }
1004 MmInitializeMdl(Mdl, (PVOID)(ULONG_PTR)WriteOffset.QuadPart, CurrentLength);
1005 Mdl->MdlFlags |= (MDL_PAGES_LOCKED | MDL_IO_PAGE_READ);
1006 for (i = 0; i < ((Mdl->Size - sizeof(MDL)) / sizeof(ULONG)); i++)
1007 {
1008 ((PPFN_NUMBER)(Mdl + 1))[i] = CcZeroPage;
1009 }
1010 KeInitializeEvent(&Event, NotificationEvent, FALSE);
1011 Status = IoSynchronousPageWrite(FileObject, Mdl, &WriteOffset, &Event, &Iosb);
1012 if (Status == STATUS_PENDING)
1013 {
1014 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
1015 Status = Iosb.Status;
1016 }
1017 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
1018 {
1019 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
1020 }
1021 if (!NT_SUCCESS(Status))
1022 {
1023 return FALSE;
1024 }
1025 WriteOffset.QuadPart += CurrentLength;
1026 Length -= CurrentLength;
1027 }
1028 }
1029 else
1030 {
1031 IO_STATUS_BLOCK IoStatus;
1032
1033 return CcCopyData(FileObject,
1034 WriteOffset.QuadPart,
1035 NULL,
1036 Length,
1037 CcOperationZero,
1038 Wait,
1039 &IoStatus);
1040 }
1041
1042 return TRUE;
1043 }