[SHELLBARS] Add missing psdk dependency.
[reactos.git] / ntoskrnl / cc / copy.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/copy.c
5 * PURPOSE: Implements cache managers copy interface
6 *
7 * PROGRAMMERS: Some people?
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* INCLUDES ******************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <debug.h>
16
17 /* GLOBALS *******************************************************************/
18
19 static PFN_NUMBER CcZeroPage = 0;
20
21 #define MAX_ZERO_LENGTH (256 * 1024)
22
23 typedef enum _CC_COPY_OPERATION
24 {
25 CcOperationRead,
26 CcOperationWrite,
27 CcOperationZero
28 } CC_COPY_OPERATION;
29
30 typedef enum _CC_CAN_WRITE_RETRY
31 {
32 FirstTry = 0,
33 RetryAllowRemote = 253,
34 RetryForceCheckPerFile = 254,
35 RetryMasterLocked = 255,
36 } CC_CAN_WRITE_RETRY;
37
38 ULONG CcRosTraceLevel = 0;
39 ULONG CcFastMdlReadWait;
40 ULONG CcFastMdlReadNotPossible;
41 ULONG CcFastReadNotPossible;
42 ULONG CcFastReadWait;
43 ULONG CcFastReadNoWait;
44 ULONG CcFastReadResourceMiss;
45
46 /* Counters:
47 * - Amount of pages flushed to the disk
48 * - Number of flush operations
49 */
50 ULONG CcDataPages = 0;
51 ULONG CcDataFlushes = 0;
52
53 /* FUNCTIONS *****************************************************************/
54
55 VOID
56 NTAPI
57 MiZeroPhysicalPage (
58 IN PFN_NUMBER PageFrameIndex
59 );
60
61 VOID
62 NTAPI
63 CcInitCacheZeroPage (
64 VOID)
65 {
66 NTSTATUS Status;
67
68 MI_SET_USAGE(MI_USAGE_CACHE);
69 //MI_SET_PROCESS2(PsGetCurrentProcess()->ImageFileName);
70 Status = MmRequestPageMemoryConsumer(MC_SYSTEM, TRUE, &CcZeroPage);
71 if (!NT_SUCCESS(Status))
72 {
73 DbgPrint("Can't allocate CcZeroPage.\n");
74 KeBugCheck(CACHE_MANAGER);
75 }
76 MiZeroPhysicalPage(CcZeroPage);
77 }
78
79 NTSTATUS
80 NTAPI
81 CcReadVirtualAddress (
82 PROS_VACB Vacb)
83 {
84 ULONG Size;
85 PMDL Mdl;
86 NTSTATUS Status;
87 IO_STATUS_BLOCK IoStatus;
88 KEVENT Event;
89 ULARGE_INTEGER LargeSize;
90
91 LargeSize.QuadPart = Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart;
92 if (LargeSize.QuadPart > VACB_MAPPING_GRANULARITY)
93 {
94 LargeSize.QuadPart = VACB_MAPPING_GRANULARITY;
95 }
96 Size = LargeSize.LowPart;
97
98 Size = ROUND_TO_PAGES(Size);
99 ASSERT(Size <= VACB_MAPPING_GRANULARITY);
100 ASSERT(Size > 0);
101
102 Mdl = IoAllocateMdl(Vacb->BaseAddress, Size, FALSE, FALSE, NULL);
103 if (!Mdl)
104 {
105 return STATUS_INSUFFICIENT_RESOURCES;
106 }
107
108 Status = STATUS_SUCCESS;
109 _SEH2_TRY
110 {
111 MmProbeAndLockPages(Mdl, KernelMode, IoWriteAccess);
112 }
113 _SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER)
114 {
115 Status = _SEH2_GetExceptionCode();
116 DPRINT1("MmProbeAndLockPages failed with: %lx for %p (%p, %p)\n", Status, Mdl, Vacb, Vacb->BaseAddress);
117 KeBugCheck(CACHE_MANAGER);
118 } _SEH2_END;
119
120 if (NT_SUCCESS(Status))
121 {
122 Mdl->MdlFlags |= MDL_IO_PAGE_READ;
123 KeInitializeEvent(&Event, NotificationEvent, FALSE);
124 Status = IoPageRead(Vacb->SharedCacheMap->FileObject, Mdl, &Vacb->FileOffset, &Event, &IoStatus);
125 if (Status == STATUS_PENDING)
126 {
127 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
128 Status = IoStatus.Status;
129 }
130
131 MmUnlockPages(Mdl);
132 }
133
134 IoFreeMdl(Mdl);
135
136 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
137 {
138 DPRINT1("IoPageRead failed, Status %x\n", Status);
139 return Status;
140 }
141
142 if (Size < VACB_MAPPING_GRANULARITY)
143 {
144 RtlZeroMemory((char*)Vacb->BaseAddress + Size,
145 VACB_MAPPING_GRANULARITY - Size);
146 }
147
148 return STATUS_SUCCESS;
149 }
150
151 NTSTATUS
152 NTAPI
153 CcWriteVirtualAddress (
154 PROS_VACB Vacb)
155 {
156 ULONG Size;
157 PMDL Mdl;
158 NTSTATUS Status;
159 IO_STATUS_BLOCK IoStatus;
160 KEVENT Event;
161 ULARGE_INTEGER LargeSize;
162
163 LargeSize.QuadPart = Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart;
164 if (LargeSize.QuadPart > VACB_MAPPING_GRANULARITY)
165 {
166 LargeSize.QuadPart = VACB_MAPPING_GRANULARITY;
167 }
168 Size = LargeSize.LowPart;
169 //
170 // Nonpaged pool PDEs in ReactOS must actually be synchronized between the
171 // MmGlobalPageDirectory and the real system PDE directory. What a mess...
172 //
173 {
174 ULONG i = 0;
175 do
176 {
177 MmGetPfnForProcess(NULL, (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i << PAGE_SHIFT)));
178 } while (++i < (Size >> PAGE_SHIFT));
179 }
180
181 ASSERT(Size <= VACB_MAPPING_GRANULARITY);
182 ASSERT(Size > 0);
183
184 Mdl = IoAllocateMdl(Vacb->BaseAddress, Size, FALSE, FALSE, NULL);
185 if (!Mdl)
186 {
187 return STATUS_INSUFFICIENT_RESOURCES;
188 }
189
190 Status = STATUS_SUCCESS;
191 _SEH2_TRY
192 {
193 MmProbeAndLockPages(Mdl, KernelMode, IoReadAccess);
194 }
195 _SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER)
196 {
197 Status = _SEH2_GetExceptionCode();
198 DPRINT1("MmProbeAndLockPages failed with: %lx for %p (%p, %p)\n", Status, Mdl, Vacb, Vacb->BaseAddress);
199 KeBugCheck(CACHE_MANAGER);
200 } _SEH2_END;
201
202 if (NT_SUCCESS(Status))
203 {
204 KeInitializeEvent(&Event, NotificationEvent, FALSE);
205 Status = IoSynchronousPageWrite(Vacb->SharedCacheMap->FileObject, Mdl, &Vacb->FileOffset, &Event, &IoStatus);
206 if (Status == STATUS_PENDING)
207 {
208 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
209 Status = IoStatus.Status;
210 }
211
212 MmUnlockPages(Mdl);
213 }
214 IoFreeMdl(Mdl);
215 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
216 {
217 DPRINT1("IoPageWrite failed, Status %x\n", Status);
218 return Status;
219 }
220
221 return STATUS_SUCCESS;
222 }
223
224 NTSTATUS
225 ReadWriteOrZero(
226 _Inout_ PVOID BaseAddress,
227 _Inout_opt_ PVOID Buffer,
228 _In_ ULONG Length,
229 _In_ CC_COPY_OPERATION Operation)
230 {
231 NTSTATUS Status = STATUS_SUCCESS;
232
233 if (Operation == CcOperationZero)
234 {
235 /* Zero */
236 RtlZeroMemory(BaseAddress, Length);
237 }
238 else
239 {
240 _SEH2_TRY
241 {
242 if (Operation == CcOperationWrite)
243 RtlCopyMemory(BaseAddress, Buffer, Length);
244 else
245 RtlCopyMemory(Buffer, BaseAddress, Length);
246 }
247 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
248 {
249 Status = _SEH2_GetExceptionCode();
250 }
251 _SEH2_END;
252 }
253 return Status;
254 }
255
256 BOOLEAN
257 CcCopyData (
258 _In_ PFILE_OBJECT FileObject,
259 _In_ LONGLONG FileOffset,
260 _Inout_ PVOID Buffer,
261 _In_ LONGLONG Length,
262 _In_ CC_COPY_OPERATION Operation,
263 _In_ BOOLEAN Wait,
264 _Out_ PIO_STATUS_BLOCK IoStatus)
265 {
266 NTSTATUS Status;
267 LONGLONG CurrentOffset;
268 ULONG BytesCopied;
269 KIRQL OldIrql;
270 PROS_SHARED_CACHE_MAP SharedCacheMap;
271 PLIST_ENTRY ListEntry;
272 PROS_VACB Vacb;
273 ULONG PartialLength;
274 PVOID BaseAddress;
275 BOOLEAN Valid;
276 PPRIVATE_CACHE_MAP PrivateCacheMap;
277
278 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
279 PrivateCacheMap = FileObject->PrivateCacheMap;
280 CurrentOffset = FileOffset;
281 BytesCopied = 0;
282
283 if (!Wait)
284 {
285 /* test if the requested data is available */
286 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
287 /* FIXME: this loop doesn't take into account areas that don't have
288 * a VACB in the list yet */
289 ListEntry = SharedCacheMap->CacheMapVacbListHead.Flink;
290 while (ListEntry != &SharedCacheMap->CacheMapVacbListHead)
291 {
292 Vacb = CONTAINING_RECORD(ListEntry,
293 ROS_VACB,
294 CacheMapVacbListEntry);
295 ListEntry = ListEntry->Flink;
296 if (!Vacb->Valid &&
297 DoRangesIntersect(Vacb->FileOffset.QuadPart,
298 VACB_MAPPING_GRANULARITY,
299 CurrentOffset, Length))
300 {
301 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
302 /* data not available */
303 return FALSE;
304 }
305 if (Vacb->FileOffset.QuadPart >= CurrentOffset + Length)
306 break;
307 }
308 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
309 }
310
311 PartialLength = CurrentOffset % VACB_MAPPING_GRANULARITY;
312 if (PartialLength != 0)
313 {
314 PartialLength = min(Length, VACB_MAPPING_GRANULARITY - PartialLength);
315 Status = CcRosRequestVacb(SharedCacheMap,
316 ROUND_DOWN(CurrentOffset,
317 VACB_MAPPING_GRANULARITY),
318 &BaseAddress,
319 &Valid,
320 &Vacb);
321 if (!NT_SUCCESS(Status))
322 ExRaiseStatus(Status);
323 if (!Valid)
324 {
325 Status = CcReadVirtualAddress(Vacb);
326 if (!NT_SUCCESS(Status))
327 {
328 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
329 ExRaiseStatus(Status);
330 }
331 }
332 Status = ReadWriteOrZero((PUCHAR)BaseAddress + CurrentOffset % VACB_MAPPING_GRANULARITY,
333 Buffer,
334 PartialLength,
335 Operation);
336
337 CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, Operation != CcOperationRead, FALSE);
338
339 if (!NT_SUCCESS(Status))
340 ExRaiseStatus(STATUS_INVALID_USER_BUFFER);
341
342 Length -= PartialLength;
343 CurrentOffset += PartialLength;
344 BytesCopied += PartialLength;
345
346 if (Operation != CcOperationZero)
347 Buffer = (PVOID)((ULONG_PTR)Buffer + PartialLength);
348 }
349
350 while (Length > 0)
351 {
352 ASSERT(CurrentOffset % VACB_MAPPING_GRANULARITY == 0);
353 PartialLength = min(VACB_MAPPING_GRANULARITY, Length);
354 Status = CcRosRequestVacb(SharedCacheMap,
355 CurrentOffset,
356 &BaseAddress,
357 &Valid,
358 &Vacb);
359 if (!NT_SUCCESS(Status))
360 ExRaiseStatus(Status);
361 if (!Valid &&
362 (Operation == CcOperationRead ||
363 PartialLength < VACB_MAPPING_GRANULARITY))
364 {
365 Status = CcReadVirtualAddress(Vacb);
366 if (!NT_SUCCESS(Status))
367 {
368 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
369 ExRaiseStatus(Status);
370 }
371 }
372 Status = ReadWriteOrZero(BaseAddress, Buffer, PartialLength, Operation);
373
374 CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, Operation != CcOperationRead, FALSE);
375
376 if (!NT_SUCCESS(Status))
377 ExRaiseStatus(STATUS_INVALID_USER_BUFFER);
378
379 Length -= PartialLength;
380 CurrentOffset += PartialLength;
381 BytesCopied += PartialLength;
382
383 if (Operation != CcOperationZero)
384 Buffer = (PVOID)((ULONG_PTR)Buffer + PartialLength);
385 }
386
387 /* If that was a successful sync read operation, let's handle read ahead */
388 if (Operation == CcOperationRead && Length == 0 && Wait)
389 {
390 /* If file isn't random access and next read may get us cross VACB boundary,
391 * schedule next read
392 */
393 if (!BooleanFlagOn(FileObject->Flags, FO_RANDOM_ACCESS) &&
394 (CurrentOffset - 1) / VACB_MAPPING_GRANULARITY != (CurrentOffset + BytesCopied - 1) / VACB_MAPPING_GRANULARITY)
395 {
396 CcScheduleReadAhead(FileObject, (PLARGE_INTEGER)&FileOffset, BytesCopied);
397 }
398
399 /* And update read history in private cache map */
400 PrivateCacheMap->FileOffset1.QuadPart = PrivateCacheMap->FileOffset2.QuadPart;
401 PrivateCacheMap->BeyondLastByte1.QuadPart = PrivateCacheMap->BeyondLastByte2.QuadPart;
402 PrivateCacheMap->FileOffset2.QuadPart = FileOffset;
403 PrivateCacheMap->BeyondLastByte2.QuadPart = FileOffset + BytesCopied;
404 }
405
406 IoStatus->Status = STATUS_SUCCESS;
407 IoStatus->Information = BytesCopied;
408 return TRUE;
409 }
410
411 VOID
412 CcPostDeferredWrites(VOID)
413 {
414 ULONG WrittenBytes;
415
416 /* We'll try to write as much as we can */
417 WrittenBytes = 0;
418 while (TRUE)
419 {
420 KIRQL OldIrql;
421 PLIST_ENTRY ListEntry;
422 PDEFERRED_WRITE DeferredWrite;
423
424 DeferredWrite = NULL;
425
426 /* Lock our deferred writes list */
427 KeAcquireSpinLock(&CcDeferredWriteSpinLock, &OldIrql);
428 for (ListEntry = CcDeferredWrites.Flink;
429 ListEntry != &CcDeferredWrites;
430 ListEntry = ListEntry->Flink)
431 {
432 /* Extract an entry */
433 DeferredWrite = CONTAINING_RECORD(ListEntry, DEFERRED_WRITE, DeferredWriteLinks);
434
435 /* Compute the modified bytes, based on what we already wrote */
436 WrittenBytes += DeferredWrite->BytesToWrite;
437 /* We overflowed, give up */
438 if (WrittenBytes < DeferredWrite->BytesToWrite)
439 {
440 DeferredWrite = NULL;
441 break;
442 }
443
444 /* Check we can write */
445 if (CcCanIWrite(DeferredWrite->FileObject, WrittenBytes, FALSE, RetryForceCheckPerFile))
446 {
447 /* We can, so remove it from the list and stop looking for entry */
448 RemoveEntryList(&DeferredWrite->DeferredWriteLinks);
449 break;
450 }
451
452 /* If we don't accept modified pages, stop here */
453 if (!DeferredWrite->LimitModifiedPages)
454 {
455 DeferredWrite = NULL;
456 break;
457 }
458
459 /* Reset count as nothing was written yet */
460 WrittenBytes -= DeferredWrite->BytesToWrite;
461 DeferredWrite = NULL;
462 }
463 KeReleaseSpinLock(&CcDeferredWriteSpinLock, OldIrql);
464
465 /* Nothing to write found, give up */
466 if (DeferredWrite == NULL)
467 {
468 break;
469 }
470
471 /* If we have an event, set it and quit */
472 if (DeferredWrite->Event)
473 {
474 KeSetEvent(DeferredWrite->Event, IO_NO_INCREMENT, FALSE);
475 }
476 /* Otherwise, call the write routine and free the context */
477 else
478 {
479 DeferredWrite->PostRoutine(DeferredWrite->Context1, DeferredWrite->Context2);
480 ExFreePoolWithTag(DeferredWrite, 'CcDw');
481 }
482 }
483 }
484
485 VOID
486 CcPerformReadAhead(
487 IN PFILE_OBJECT FileObject)
488 {
489 NTSTATUS Status;
490 LONGLONG CurrentOffset;
491 KIRQL OldIrql;
492 PROS_SHARED_CACHE_MAP SharedCacheMap;
493 PROS_VACB Vacb;
494 ULONG PartialLength;
495 PVOID BaseAddress;
496 BOOLEAN Valid;
497 ULONG Length;
498 PPRIVATE_CACHE_MAP PrivateCacheMap;
499 BOOLEAN Locked;
500
501 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
502
503 /* Critical:
504 * PrivateCacheMap might disappear in-between if the handle
505 * to the file is closed (private is attached to the handle not to
506 * the file), so we need to lock the master lock while we deal with
507 * it. It won't disappear without attempting to lock such lock.
508 */
509 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
510 PrivateCacheMap = FileObject->PrivateCacheMap;
511 /* If the handle was closed since the read ahead was scheduled, just quit */
512 if (PrivateCacheMap == NULL)
513 {
514 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
515 ObDereferenceObject(FileObject);
516 return;
517 }
518 /* Otherwise, extract read offset and length and release private map */
519 else
520 {
521 KeAcquireSpinLockAtDpcLevel(&PrivateCacheMap->ReadAheadSpinLock);
522 CurrentOffset = PrivateCacheMap->ReadAheadOffset[1].QuadPart;
523 Length = PrivateCacheMap->ReadAheadLength[1];
524 KeReleaseSpinLockFromDpcLevel(&PrivateCacheMap->ReadAheadSpinLock);
525 }
526 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
527
528 /* Time to go! */
529 DPRINT("Doing ReadAhead for %p\n", FileObject);
530 /* Lock the file, first */
531 if (!SharedCacheMap->Callbacks->AcquireForReadAhead(SharedCacheMap->LazyWriteContext, FALSE))
532 {
533 Locked = FALSE;
534 goto Clear;
535 }
536
537 /* Remember it's locked */
538 Locked = TRUE;
539
540 /* Don't read past the end of the file */
541 if (CurrentOffset >= SharedCacheMap->FileSize.QuadPart)
542 {
543 goto Clear;
544 }
545 if (CurrentOffset + Length > SharedCacheMap->FileSize.QuadPart)
546 {
547 Length = SharedCacheMap->FileSize.QuadPart - CurrentOffset;
548 }
549
550 /* Next of the algorithm will lock like CcCopyData with the slight
551 * difference that we don't copy data back to an user-backed buffer
552 * We just bring data into Cc
553 */
554 PartialLength = CurrentOffset % VACB_MAPPING_GRANULARITY;
555 if (PartialLength != 0)
556 {
557 PartialLength = min(Length, VACB_MAPPING_GRANULARITY - PartialLength);
558 Status = CcRosRequestVacb(SharedCacheMap,
559 ROUND_DOWN(CurrentOffset,
560 VACB_MAPPING_GRANULARITY),
561 &BaseAddress,
562 &Valid,
563 &Vacb);
564 if (!NT_SUCCESS(Status))
565 {
566 DPRINT1("Failed to request VACB: %lx!\n", Status);
567 goto Clear;
568 }
569
570 if (!Valid)
571 {
572 Status = CcReadVirtualAddress(Vacb);
573 if (!NT_SUCCESS(Status))
574 {
575 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
576 DPRINT1("Failed to read data: %lx!\n", Status);
577 goto Clear;
578 }
579 }
580
581 CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, FALSE, FALSE);
582
583 Length -= PartialLength;
584 CurrentOffset += PartialLength;
585 }
586
587 while (Length > 0)
588 {
589 ASSERT(CurrentOffset % VACB_MAPPING_GRANULARITY == 0);
590 PartialLength = min(VACB_MAPPING_GRANULARITY, Length);
591 Status = CcRosRequestVacb(SharedCacheMap,
592 CurrentOffset,
593 &BaseAddress,
594 &Valid,
595 &Vacb);
596 if (!NT_SUCCESS(Status))
597 {
598 DPRINT1("Failed to request VACB: %lx!\n", Status);
599 goto Clear;
600 }
601
602 if (!Valid)
603 {
604 Status = CcReadVirtualAddress(Vacb);
605 if (!NT_SUCCESS(Status))
606 {
607 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
608 DPRINT1("Failed to read data: %lx!\n", Status);
609 goto Clear;
610 }
611 }
612
613 CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, FALSE, FALSE);
614
615 Length -= PartialLength;
616 CurrentOffset += PartialLength;
617 }
618
619 Clear:
620 /* See previous comment about private cache map */
621 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
622 PrivateCacheMap = FileObject->PrivateCacheMap;
623 if (PrivateCacheMap != NULL)
624 {
625 /* Mark read ahead as unactive */
626 KeAcquireSpinLockAtDpcLevel(&PrivateCacheMap->ReadAheadSpinLock);
627 InterlockedAnd((volatile long *)&PrivateCacheMap->UlongFlags, ~PRIVATE_CACHE_MAP_READ_AHEAD_ACTIVE);
628 KeReleaseSpinLockFromDpcLevel(&PrivateCacheMap->ReadAheadSpinLock);
629 }
630 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
631
632 /* If file was locked, release it */
633 if (Locked)
634 {
635 SharedCacheMap->Callbacks->ReleaseFromReadAhead(SharedCacheMap->LazyWriteContext);
636 }
637
638 /* And drop our extra reference (See: CcScheduleReadAhead) */
639 ObDereferenceObject(FileObject);
640
641 return;
642 }
643
644 /*
645 * @unimplemented
646 */
647 BOOLEAN
648 NTAPI
649 CcCanIWrite (
650 IN PFILE_OBJECT FileObject,
651 IN ULONG BytesToWrite,
652 IN BOOLEAN Wait,
653 IN BOOLEAN Retrying)
654 {
655 KIRQL OldIrql;
656 KEVENT WaitEvent;
657 ULONG Length, Pages;
658 BOOLEAN PerFileDefer;
659 DEFERRED_WRITE Context;
660 PFSRTL_COMMON_FCB_HEADER Fcb;
661 CC_CAN_WRITE_RETRY TryContext;
662 PROS_SHARED_CACHE_MAP SharedCacheMap;
663
664 CCTRACE(CC_API_DEBUG, "FileObject=%p BytesToWrite=%lu Wait=%d Retrying=%d\n",
665 FileObject, BytesToWrite, Wait, Retrying);
666
667 /* Write through is always OK */
668 if (BooleanFlagOn(FileObject->Flags, FO_WRITE_THROUGH))
669 {
670 return TRUE;
671 }
672
673 TryContext = Retrying;
674 /* Allow remote file if not from posted */
675 if (IoIsFileOriginRemote(FileObject) && TryContext < RetryAllowRemote)
676 {
677 return TRUE;
678 }
679
680 /* Don't exceed max tolerated size */
681 Length = MAX_ZERO_LENGTH;
682 if (BytesToWrite < MAX_ZERO_LENGTH)
683 {
684 Length = BytesToWrite;
685 }
686
687 Pages = BYTES_TO_PAGES(Length);
688
689 /* By default, assume limits per file won't be hit */
690 PerFileDefer = FALSE;
691 Fcb = FileObject->FsContext;
692 /* Do we have to check for limits per file? */
693 if (TryContext >= RetryForceCheckPerFile ||
694 BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES))
695 {
696 /* If master is not locked, lock it now */
697 if (TryContext != RetryMasterLocked)
698 {
699 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
700 }
701
702 /* Let's not assume the file is cached... */
703 if (FileObject->SectionObjectPointer != NULL &&
704 FileObject->SectionObjectPointer->SharedCacheMap != NULL)
705 {
706 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
707 /* Do we have limits per file set? */
708 if (SharedCacheMap->DirtyPageThreshold != 0 &&
709 SharedCacheMap->DirtyPages != 0)
710 {
711 /* Yes, check whether they are blocking */
712 if (Pages + SharedCacheMap->DirtyPages > SharedCacheMap->DirtyPageThreshold)
713 {
714 PerFileDefer = TRUE;
715 }
716 }
717 }
718
719 /* And don't forget to release master */
720 if (TryContext != RetryMasterLocked)
721 {
722 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
723 }
724 }
725
726 /* So, now allow write if:
727 * - Not the first try or we have no throttling yet
728 * AND:
729 * - We don't exceed threshold!
730 * - We don't exceed what Mm can allow us to use
731 * + If we're above top, that's fine
732 * + If we're above bottom with limited modified pages, that's fine
733 * + Otherwise, throttle!
734 */
735 if ((TryContext != FirstTry || IsListEmpty(&CcDeferredWrites)) &&
736 CcTotalDirtyPages + Pages < CcDirtyPageThreshold &&
737 (MmAvailablePages > MmThrottleTop ||
738 (MmModifiedPageListHead.Total < 1000 && MmAvailablePages > MmThrottleBottom)) &&
739 !PerFileDefer)
740 {
741 return TRUE;
742 }
743
744 /* If we can wait, we'll start the wait loop for waiting till we can
745 * write for real
746 */
747 if (!Wait)
748 {
749 return FALSE;
750 }
751
752 /* Otherwise, if there are no deferred writes yet, start the lazy writer */
753 if (IsListEmpty(&CcDeferredWrites))
754 {
755 KIRQL OldIrql;
756
757 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
758 CcScheduleLazyWriteScan(TRUE);
759 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
760 }
761
762 /* Initialize our wait event */
763 KeInitializeEvent(&WaitEvent, NotificationEvent, FALSE);
764
765 /* And prepare a dummy context */
766 Context.NodeTypeCode = NODE_TYPE_DEFERRED_WRITE;
767 Context.NodeByteSize = sizeof(DEFERRED_WRITE);
768 Context.FileObject = FileObject;
769 Context.BytesToWrite = BytesToWrite;
770 Context.LimitModifiedPages = BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES);
771 Context.Event = &WaitEvent;
772
773 /* And queue it */
774 if (Retrying)
775 {
776 /* To the top, if that's a retry */
777 ExInterlockedInsertHeadList(&CcDeferredWrites,
778 &Context.DeferredWriteLinks,
779 &CcDeferredWriteSpinLock);
780 }
781 else
782 {
783 /* To the bottom, if that's a first time */
784 ExInterlockedInsertTailList(&CcDeferredWrites,
785 &Context.DeferredWriteLinks,
786 &CcDeferredWriteSpinLock);
787 }
788
789 DPRINT1("Actively deferring write for: %p\n", FileObject);
790 /* Now, we'll loop until our event is set. When it is set, it means that caller
791 * can immediately write, and has to
792 */
793 do
794 {
795 CcPostDeferredWrites();
796 } while (KeWaitForSingleObject(&WaitEvent, Executive, KernelMode, FALSE, &CcIdleDelay) != STATUS_SUCCESS);
797
798 return TRUE;
799 }
800
801 /*
802 * @implemented
803 */
804 BOOLEAN
805 NTAPI
806 CcCopyRead (
807 IN PFILE_OBJECT FileObject,
808 IN PLARGE_INTEGER FileOffset,
809 IN ULONG Length,
810 IN BOOLEAN Wait,
811 OUT PVOID Buffer,
812 OUT PIO_STATUS_BLOCK IoStatus)
813 {
814 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d\n",
815 FileObject, FileOffset->QuadPart, Length, Wait);
816
817 DPRINT("CcCopyRead(FileObject 0x%p, FileOffset %I64x, "
818 "Length %lu, Wait %u, Buffer 0x%p, IoStatus 0x%p)\n",
819 FileObject, FileOffset->QuadPart, Length, Wait,
820 Buffer, IoStatus);
821
822 return CcCopyData(FileObject,
823 FileOffset->QuadPart,
824 Buffer,
825 Length,
826 CcOperationRead,
827 Wait,
828 IoStatus);
829 }
830
831 /*
832 * @implemented
833 */
834 BOOLEAN
835 NTAPI
836 CcCopyWrite (
837 IN PFILE_OBJECT FileObject,
838 IN PLARGE_INTEGER FileOffset,
839 IN ULONG Length,
840 IN BOOLEAN Wait,
841 IN PVOID Buffer)
842 {
843 IO_STATUS_BLOCK IoStatus;
844
845 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d Buffer=%p\n",
846 FileObject, FileOffset->QuadPart, Length, Wait, Buffer);
847
848 DPRINT("CcCopyWrite(FileObject 0x%p, FileOffset %I64x, "
849 "Length %lu, Wait %u, Buffer 0x%p)\n",
850 FileObject, FileOffset->QuadPart, Length, Wait, Buffer);
851
852 return CcCopyData(FileObject,
853 FileOffset->QuadPart,
854 Buffer,
855 Length,
856 CcOperationWrite,
857 Wait,
858 &IoStatus);
859 }
860
861 /*
862 * @implemented
863 */
864 VOID
865 NTAPI
866 CcDeferWrite (
867 IN PFILE_OBJECT FileObject,
868 IN PCC_POST_DEFERRED_WRITE PostRoutine,
869 IN PVOID Context1,
870 IN PVOID Context2,
871 IN ULONG BytesToWrite,
872 IN BOOLEAN Retrying)
873 {
874 KIRQL OldIrql;
875 PDEFERRED_WRITE Context;
876 PFSRTL_COMMON_FCB_HEADER Fcb;
877
878 CCTRACE(CC_API_DEBUG, "FileObject=%p PostRoutine=%p Context1=%p Context2=%p BytesToWrite=%lu Retrying=%d\n",
879 FileObject, PostRoutine, Context1, Context2, BytesToWrite, Retrying);
880
881 /* Try to allocate a context for queueing the write operation */
882 Context = ExAllocatePoolWithTag(NonPagedPool, sizeof(DEFERRED_WRITE), 'CcDw');
883 /* If it failed, immediately execute the operation! */
884 if (Context == NULL)
885 {
886 PostRoutine(Context1, Context2);
887 return;
888 }
889
890 Fcb = FileObject->FsContext;
891
892 /* Otherwise, initialize the context */
893 RtlZeroMemory(Context, sizeof(DEFERRED_WRITE));
894 Context->NodeTypeCode = NODE_TYPE_DEFERRED_WRITE;
895 Context->NodeByteSize = sizeof(DEFERRED_WRITE);
896 Context->FileObject = FileObject;
897 Context->PostRoutine = PostRoutine;
898 Context->Context1 = Context1;
899 Context->Context2 = Context2;
900 Context->BytesToWrite = BytesToWrite;
901 Context->LimitModifiedPages = BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES);
902
903 /* And queue it */
904 if (Retrying)
905 {
906 /* To the top, if that's a retry */
907 ExInterlockedInsertHeadList(&CcDeferredWrites,
908 &Context->DeferredWriteLinks,
909 &CcDeferredWriteSpinLock);
910 }
911 else
912 {
913 /* To the bottom, if that's a first time */
914 ExInterlockedInsertTailList(&CcDeferredWrites,
915 &Context->DeferredWriteLinks,
916 &CcDeferredWriteSpinLock);
917 }
918
919 /* Try to execute the posted writes */
920 CcPostDeferredWrites();
921
922 /* Schedule a lazy writer run to handle deferred writes */
923 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
924 if (!LazyWriter.ScanActive)
925 {
926 CcScheduleLazyWriteScan(FALSE);
927 }
928 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
929 }
930
931 /*
932 * @unimplemented
933 */
934 VOID
935 NTAPI
936 CcFastCopyRead (
937 IN PFILE_OBJECT FileObject,
938 IN ULONG FileOffset,
939 IN ULONG Length,
940 IN ULONG PageCount,
941 OUT PVOID Buffer,
942 OUT PIO_STATUS_BLOCK IoStatus)
943 {
944 LARGE_INTEGER LargeFileOffset;
945 BOOLEAN Success;
946
947 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%lu Length=%lu PageCount=%lu Buffer=%p\n",
948 FileObject, FileOffset, Length, PageCount, Buffer);
949
950 DBG_UNREFERENCED_PARAMETER(PageCount);
951
952 LargeFileOffset.QuadPart = FileOffset;
953 Success = CcCopyRead(FileObject,
954 &LargeFileOffset,
955 Length,
956 TRUE,
957 Buffer,
958 IoStatus);
959 ASSERT(Success == TRUE);
960 }
961
962 /*
963 * @unimplemented
964 */
965 VOID
966 NTAPI
967 CcFastCopyWrite (
968 IN PFILE_OBJECT FileObject,
969 IN ULONG FileOffset,
970 IN ULONG Length,
971 IN PVOID Buffer)
972 {
973 LARGE_INTEGER LargeFileOffset;
974 BOOLEAN Success;
975
976 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%lu Length=%lu Buffer=%p\n",
977 FileObject, FileOffset, Length, Buffer);
978
979 LargeFileOffset.QuadPart = FileOffset;
980 Success = CcCopyWrite(FileObject,
981 &LargeFileOffset,
982 Length,
983 TRUE,
984 Buffer);
985 ASSERT(Success == TRUE);
986 }
987
988 /*
989 * @implemented
990 */
991 BOOLEAN
992 NTAPI
993 CcZeroData (
994 IN PFILE_OBJECT FileObject,
995 IN PLARGE_INTEGER StartOffset,
996 IN PLARGE_INTEGER EndOffset,
997 IN BOOLEAN Wait)
998 {
999 NTSTATUS Status;
1000 LARGE_INTEGER WriteOffset;
1001 LONGLONG Length;
1002 ULONG CurrentLength;
1003 PMDL Mdl;
1004 ULONG i;
1005 IO_STATUS_BLOCK Iosb;
1006 KEVENT Event;
1007
1008 CCTRACE(CC_API_DEBUG, "FileObject=%p StartOffset=%I64u EndOffset=%I64u Wait=%d\n",
1009 FileObject, StartOffset->QuadPart, EndOffset->QuadPart, Wait);
1010
1011 DPRINT("CcZeroData(FileObject 0x%p, StartOffset %I64x, EndOffset %I64x, "
1012 "Wait %u)\n", FileObject, StartOffset->QuadPart, EndOffset->QuadPart,
1013 Wait);
1014
1015 Length = EndOffset->QuadPart - StartOffset->QuadPart;
1016 WriteOffset.QuadPart = StartOffset->QuadPart;
1017
1018 if (FileObject->SectionObjectPointer->SharedCacheMap == NULL)
1019 {
1020 /* File is not cached */
1021
1022 Mdl = _alloca(MmSizeOfMdl(NULL, MAX_ZERO_LENGTH));
1023
1024 while (Length > 0)
1025 {
1026 if (Length + WriteOffset.QuadPart % PAGE_SIZE > MAX_ZERO_LENGTH)
1027 {
1028 CurrentLength = MAX_ZERO_LENGTH - WriteOffset.QuadPart % PAGE_SIZE;
1029 }
1030 else
1031 {
1032 CurrentLength = Length;
1033 }
1034 MmInitializeMdl(Mdl, (PVOID)(ULONG_PTR)WriteOffset.QuadPart, CurrentLength);
1035 Mdl->MdlFlags |= (MDL_PAGES_LOCKED | MDL_IO_PAGE_READ);
1036 for (i = 0; i < ((Mdl->Size - sizeof(MDL)) / sizeof(ULONG)); i++)
1037 {
1038 ((PPFN_NUMBER)(Mdl + 1))[i] = CcZeroPage;
1039 }
1040 KeInitializeEvent(&Event, NotificationEvent, FALSE);
1041 Status = IoSynchronousPageWrite(FileObject, Mdl, &WriteOffset, &Event, &Iosb);
1042 if (Status == STATUS_PENDING)
1043 {
1044 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
1045 Status = Iosb.Status;
1046 }
1047 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
1048 {
1049 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
1050 }
1051 if (!NT_SUCCESS(Status))
1052 {
1053 return FALSE;
1054 }
1055 WriteOffset.QuadPart += CurrentLength;
1056 Length -= CurrentLength;
1057 }
1058 }
1059 else
1060 {
1061 IO_STATUS_BLOCK IoStatus;
1062
1063 return CcCopyData(FileObject,
1064 WriteOffset.QuadPart,
1065 NULL,
1066 Length,
1067 CcOperationZero,
1068 Wait,
1069 &IoStatus);
1070 }
1071
1072 return TRUE;
1073 }