[ADVAPI32_WINETEST] Revert service.c changes from 3c1b7834e1 to avoid testbot hangs...
[reactos.git] / ntoskrnl / cc / copy.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/copy.c
5 * PURPOSE: Implements cache managers copy interface
6 *
7 * PROGRAMMERS: Some people?
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* INCLUDES ******************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <debug.h>
16
17 /* GLOBALS *******************************************************************/
18
19 static PFN_NUMBER CcZeroPage = 0;
20
21 #define MAX_ZERO_LENGTH (256 * 1024)
22
23 typedef enum _CC_COPY_OPERATION
24 {
25 CcOperationRead,
26 CcOperationWrite,
27 CcOperationZero
28 } CC_COPY_OPERATION;
29
30 typedef enum _CC_CAN_WRITE_RETRY
31 {
32 FirstTry = 0,
33 RetryAllowRemote = 253,
34 RetryForceCheckPerFile = 254,
35 RetryMasterLocked = 255,
36 } CC_CAN_WRITE_RETRY;
37
38 ULONG CcRosTraceLevel = 0;
39 ULONG CcFastMdlReadWait;
40 ULONG CcFastMdlReadNotPossible;
41 ULONG CcFastReadNotPossible;
42 ULONG CcFastReadWait;
43 ULONG CcFastReadNoWait;
44 ULONG CcFastReadResourceMiss;
45
46 /* Counters:
47 * - Amount of pages flushed to the disk
48 * - Number of flush operations
49 */
50 ULONG CcDataPages = 0;
51 ULONG CcDataFlushes = 0;
52
53 /* FUNCTIONS *****************************************************************/
54
55 VOID
56 NTAPI
57 MiZeroPhysicalPage (
58 IN PFN_NUMBER PageFrameIndex
59 );
60
61 VOID
62 NTAPI
63 CcInitCacheZeroPage (
64 VOID)
65 {
66 NTSTATUS Status;
67
68 MI_SET_USAGE(MI_USAGE_CACHE);
69 //MI_SET_PROCESS2(PsGetCurrentProcess()->ImageFileName);
70 Status = MmRequestPageMemoryConsumer(MC_SYSTEM, TRUE, &CcZeroPage);
71 if (!NT_SUCCESS(Status))
72 {
73 DbgPrint("Can't allocate CcZeroPage.\n");
74 KeBugCheck(CACHE_MANAGER);
75 }
76 MiZeroPhysicalPage(CcZeroPage);
77 }
78
79 NTSTATUS
80 NTAPI
81 CcReadVirtualAddress (
82 PROS_VACB Vacb)
83 {
84 ULONG Size, Pages;
85 PMDL Mdl;
86 NTSTATUS Status;
87 IO_STATUS_BLOCK IoStatus;
88 KEVENT Event;
89
90 Size = (ULONG)(Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart);
91 if (Size > VACB_MAPPING_GRANULARITY)
92 {
93 Size = VACB_MAPPING_GRANULARITY;
94 }
95
96 Pages = BYTES_TO_PAGES(Size);
97 ASSERT(Pages * PAGE_SIZE <= VACB_MAPPING_GRANULARITY);
98
99 Mdl = IoAllocateMdl(Vacb->BaseAddress, Pages * PAGE_SIZE, FALSE, FALSE, NULL);
100 if (!Mdl)
101 {
102 return STATUS_INSUFFICIENT_RESOURCES;
103 }
104
105 Status = STATUS_SUCCESS;
106 _SEH2_TRY
107 {
108 MmProbeAndLockPages(Mdl, KernelMode, IoWriteAccess);
109 }
110 _SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER)
111 {
112 Status = _SEH2_GetExceptionCode();
113 KeBugCheck(CACHE_MANAGER);
114 } _SEH2_END;
115
116 if (NT_SUCCESS(Status))
117 {
118 Mdl->MdlFlags |= MDL_IO_PAGE_READ;
119 KeInitializeEvent(&Event, NotificationEvent, FALSE);
120 Status = IoPageRead(Vacb->SharedCacheMap->FileObject, Mdl, &Vacb->FileOffset, &Event, &IoStatus);
121 if (Status == STATUS_PENDING)
122 {
123 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
124 Status = IoStatus.Status;
125 }
126
127 MmUnlockPages(Mdl);
128 }
129
130 IoFreeMdl(Mdl);
131
132 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
133 {
134 DPRINT1("IoPageRead failed, Status %x\n", Status);
135 return Status;
136 }
137
138 if (Size < VACB_MAPPING_GRANULARITY)
139 {
140 RtlZeroMemory((char*)Vacb->BaseAddress + Size,
141 VACB_MAPPING_GRANULARITY - Size);
142 }
143
144 return STATUS_SUCCESS;
145 }
146
147 NTSTATUS
148 NTAPI
149 CcWriteVirtualAddress (
150 PROS_VACB Vacb)
151 {
152 ULONG Size;
153 PMDL Mdl;
154 NTSTATUS Status;
155 IO_STATUS_BLOCK IoStatus;
156 KEVENT Event;
157
158 Size = (ULONG)(Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart);
159 if (Size > VACB_MAPPING_GRANULARITY)
160 {
161 Size = VACB_MAPPING_GRANULARITY;
162 }
163 //
164 // Nonpaged pool PDEs in ReactOS must actually be synchronized between the
165 // MmGlobalPageDirectory and the real system PDE directory. What a mess...
166 //
167 {
168 ULONG i = 0;
169 do
170 {
171 MmGetPfnForProcess(NULL, (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i << PAGE_SHIFT)));
172 } while (++i < (Size >> PAGE_SHIFT));
173 }
174
175 Mdl = IoAllocateMdl(Vacb->BaseAddress, Size, FALSE, FALSE, NULL);
176 if (!Mdl)
177 {
178 return STATUS_INSUFFICIENT_RESOURCES;
179 }
180
181 Status = STATUS_SUCCESS;
182 _SEH2_TRY
183 {
184 MmProbeAndLockPages(Mdl, KernelMode, IoReadAccess);
185 }
186 _SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER)
187 {
188 Status = _SEH2_GetExceptionCode();
189 KeBugCheck(CACHE_MANAGER);
190 } _SEH2_END;
191
192 if (NT_SUCCESS(Status))
193 {
194 KeInitializeEvent(&Event, NotificationEvent, FALSE);
195 Status = IoSynchronousPageWrite(Vacb->SharedCacheMap->FileObject, Mdl, &Vacb->FileOffset, &Event, &IoStatus);
196 if (Status == STATUS_PENDING)
197 {
198 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
199 Status = IoStatus.Status;
200 }
201
202 MmUnlockPages(Mdl);
203 }
204 IoFreeMdl(Mdl);
205 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
206 {
207 DPRINT1("IoPageWrite failed, Status %x\n", Status);
208 return Status;
209 }
210
211 return STATUS_SUCCESS;
212 }
213
214 NTSTATUS
215 ReadWriteOrZero(
216 _Inout_ PVOID BaseAddress,
217 _Inout_opt_ PVOID Buffer,
218 _In_ ULONG Length,
219 _In_ CC_COPY_OPERATION Operation)
220 {
221 NTSTATUS Status = STATUS_SUCCESS;
222
223 if (Operation == CcOperationZero)
224 {
225 /* Zero */
226 RtlZeroMemory(BaseAddress, Length);
227 }
228 else
229 {
230 _SEH2_TRY
231 {
232 if (Operation == CcOperationWrite)
233 RtlCopyMemory(BaseAddress, Buffer, Length);
234 else
235 RtlCopyMemory(Buffer, BaseAddress, Length);
236 }
237 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
238 {
239 Status = _SEH2_GetExceptionCode();
240 }
241 _SEH2_END;
242 }
243 return Status;
244 }
245
246 BOOLEAN
247 CcCopyData (
248 _In_ PFILE_OBJECT FileObject,
249 _In_ LONGLONG FileOffset,
250 _Inout_ PVOID Buffer,
251 _In_ LONGLONG Length,
252 _In_ CC_COPY_OPERATION Operation,
253 _In_ BOOLEAN Wait,
254 _Out_ PIO_STATUS_BLOCK IoStatus)
255 {
256 NTSTATUS Status;
257 LONGLONG CurrentOffset;
258 ULONG BytesCopied;
259 KIRQL OldIrql;
260 PROS_SHARED_CACHE_MAP SharedCacheMap;
261 PLIST_ENTRY ListEntry;
262 PROS_VACB Vacb;
263 ULONG PartialLength;
264 PVOID BaseAddress;
265 BOOLEAN Valid;
266 PPRIVATE_CACHE_MAP PrivateCacheMap;
267
268 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
269 PrivateCacheMap = FileObject->PrivateCacheMap;
270 CurrentOffset = FileOffset;
271 BytesCopied = 0;
272
273 if (!Wait)
274 {
275 /* test if the requested data is available */
276 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
277 /* FIXME: this loop doesn't take into account areas that don't have
278 * a VACB in the list yet */
279 ListEntry = SharedCacheMap->CacheMapVacbListHead.Flink;
280 while (ListEntry != &SharedCacheMap->CacheMapVacbListHead)
281 {
282 Vacb = CONTAINING_RECORD(ListEntry,
283 ROS_VACB,
284 CacheMapVacbListEntry);
285 ListEntry = ListEntry->Flink;
286 if (!Vacb->Valid &&
287 DoRangesIntersect(Vacb->FileOffset.QuadPart,
288 VACB_MAPPING_GRANULARITY,
289 CurrentOffset, Length))
290 {
291 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
292 /* data not available */
293 return FALSE;
294 }
295 if (Vacb->FileOffset.QuadPart >= CurrentOffset + Length)
296 break;
297 }
298 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
299 }
300
301 PartialLength = CurrentOffset % VACB_MAPPING_GRANULARITY;
302 if (PartialLength != 0)
303 {
304 PartialLength = min(Length, VACB_MAPPING_GRANULARITY - PartialLength);
305 Status = CcRosRequestVacb(SharedCacheMap,
306 ROUND_DOWN(CurrentOffset,
307 VACB_MAPPING_GRANULARITY),
308 &BaseAddress,
309 &Valid,
310 &Vacb);
311 if (!NT_SUCCESS(Status))
312 ExRaiseStatus(Status);
313 if (!Valid)
314 {
315 Status = CcReadVirtualAddress(Vacb);
316 if (!NT_SUCCESS(Status))
317 {
318 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
319 ExRaiseStatus(Status);
320 }
321 }
322 Status = ReadWriteOrZero((PUCHAR)BaseAddress + CurrentOffset % VACB_MAPPING_GRANULARITY,
323 Buffer,
324 PartialLength,
325 Operation);
326
327 CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, Operation != CcOperationRead, FALSE);
328
329 if (!NT_SUCCESS(Status))
330 ExRaiseStatus(STATUS_INVALID_USER_BUFFER);
331
332 Length -= PartialLength;
333 CurrentOffset += PartialLength;
334 BytesCopied += PartialLength;
335
336 if (Operation != CcOperationZero)
337 Buffer = (PVOID)((ULONG_PTR)Buffer + PartialLength);
338 }
339
340 while (Length > 0)
341 {
342 ASSERT(CurrentOffset % VACB_MAPPING_GRANULARITY == 0);
343 PartialLength = min(VACB_MAPPING_GRANULARITY, Length);
344 Status = CcRosRequestVacb(SharedCacheMap,
345 CurrentOffset,
346 &BaseAddress,
347 &Valid,
348 &Vacb);
349 if (!NT_SUCCESS(Status))
350 ExRaiseStatus(Status);
351 if (!Valid &&
352 (Operation == CcOperationRead ||
353 PartialLength < VACB_MAPPING_GRANULARITY))
354 {
355 Status = CcReadVirtualAddress(Vacb);
356 if (!NT_SUCCESS(Status))
357 {
358 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
359 ExRaiseStatus(Status);
360 }
361 }
362 Status = ReadWriteOrZero(BaseAddress, Buffer, PartialLength, Operation);
363
364 CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, Operation != CcOperationRead, FALSE);
365
366 if (!NT_SUCCESS(Status))
367 ExRaiseStatus(STATUS_INVALID_USER_BUFFER);
368
369 Length -= PartialLength;
370 CurrentOffset += PartialLength;
371 BytesCopied += PartialLength;
372
373 if (Operation != CcOperationZero)
374 Buffer = (PVOID)((ULONG_PTR)Buffer + PartialLength);
375 }
376
377 /* If that was a successful sync read operation, let's handle read ahead */
378 if (Operation == CcOperationRead && Length == 0 && Wait)
379 {
380 /* If file isn't random access and next read may get us cross VACB boundary,
381 * schedule next read
382 */
383 if (!BooleanFlagOn(FileObject->Flags, FO_RANDOM_ACCESS) &&
384 (CurrentOffset - 1) / VACB_MAPPING_GRANULARITY != (CurrentOffset + BytesCopied - 1) / VACB_MAPPING_GRANULARITY)
385 {
386 CcScheduleReadAhead(FileObject, (PLARGE_INTEGER)&FileOffset, BytesCopied);
387 }
388
389 /* And update read history in private cache map */
390 PrivateCacheMap->FileOffset1.QuadPart = PrivateCacheMap->FileOffset2.QuadPart;
391 PrivateCacheMap->BeyondLastByte1.QuadPart = PrivateCacheMap->BeyondLastByte2.QuadPart;
392 PrivateCacheMap->FileOffset2.QuadPart = FileOffset;
393 PrivateCacheMap->BeyondLastByte2.QuadPart = FileOffset + BytesCopied;
394 }
395
396 IoStatus->Status = STATUS_SUCCESS;
397 IoStatus->Information = BytesCopied;
398 return TRUE;
399 }
400
401 VOID
402 CcPostDeferredWrites(VOID)
403 {
404 ULONG WrittenBytes;
405
406 /* We'll try to write as much as we can */
407 WrittenBytes = 0;
408 while (TRUE)
409 {
410 KIRQL OldIrql;
411 PLIST_ENTRY ListEntry;
412 PDEFERRED_WRITE DeferredWrite;
413
414 DeferredWrite = NULL;
415
416 /* Lock our deferred writes list */
417 KeAcquireSpinLock(&CcDeferredWriteSpinLock, &OldIrql);
418 for (ListEntry = CcDeferredWrites.Flink;
419 ListEntry != &CcDeferredWrites;
420 ListEntry = ListEntry->Flink)
421 {
422 /* Extract an entry */
423 DeferredWrite = CONTAINING_RECORD(ListEntry, DEFERRED_WRITE, DeferredWriteLinks);
424
425 /* Compute the modified bytes, based on what we already wrote */
426 WrittenBytes += DeferredWrite->BytesToWrite;
427 /* We overflowed, give up */
428 if (WrittenBytes < DeferredWrite->BytesToWrite)
429 {
430 DeferredWrite = NULL;
431 break;
432 }
433
434 /* Check we can write */
435 if (CcCanIWrite(DeferredWrite->FileObject, WrittenBytes, FALSE, RetryForceCheckPerFile))
436 {
437 /* We can, so remove it from the list and stop looking for entry */
438 RemoveEntryList(&DeferredWrite->DeferredWriteLinks);
439 break;
440 }
441
442 /* If we don't accept modified pages, stop here */
443 if (!DeferredWrite->LimitModifiedPages)
444 {
445 DeferredWrite = NULL;
446 break;
447 }
448
449 /* Reset count as nothing was written yet */
450 WrittenBytes -= DeferredWrite->BytesToWrite;
451 DeferredWrite = NULL;
452 }
453 KeReleaseSpinLock(&CcDeferredWriteSpinLock, OldIrql);
454
455 /* Nothing to write found, give up */
456 if (DeferredWrite == NULL)
457 {
458 break;
459 }
460
461 /* If we have an event, set it and quit */
462 if (DeferredWrite->Event)
463 {
464 KeSetEvent(DeferredWrite->Event, IO_NO_INCREMENT, FALSE);
465 }
466 /* Otherwise, call the write routine and free the context */
467 else
468 {
469 DeferredWrite->PostRoutine(DeferredWrite->Context1, DeferredWrite->Context2);
470 ExFreePoolWithTag(DeferredWrite, 'CcDw');
471 }
472 }
473 }
474
475 VOID
476 CcPerformReadAhead(
477 IN PFILE_OBJECT FileObject)
478 {
479 NTSTATUS Status;
480 LONGLONG CurrentOffset;
481 KIRQL OldIrql;
482 PROS_SHARED_CACHE_MAP SharedCacheMap;
483 PROS_VACB Vacb;
484 ULONG PartialLength;
485 PVOID BaseAddress;
486 BOOLEAN Valid;
487 ULONG Length;
488 PPRIVATE_CACHE_MAP PrivateCacheMap;
489 BOOLEAN Locked;
490
491 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
492
493 /* Critical:
494 * PrivateCacheMap might disappear in-between if the handle
495 * to the file is closed (private is attached to the handle not to
496 * the file), so we need to lock the master lock while we deal with
497 * it. It won't disappear without attempting to lock such lock.
498 */
499 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
500 PrivateCacheMap = FileObject->PrivateCacheMap;
501 /* If the handle was closed since the read ahead was scheduled, just quit */
502 if (PrivateCacheMap == NULL)
503 {
504 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
505 ObDereferenceObject(FileObject);
506 return;
507 }
508 /* Otherwise, extract read offset and length and release private map */
509 else
510 {
511 KeAcquireSpinLockAtDpcLevel(&PrivateCacheMap->ReadAheadSpinLock);
512 CurrentOffset = PrivateCacheMap->ReadAheadOffset[1].QuadPart;
513 Length = PrivateCacheMap->ReadAheadLength[1];
514 KeReleaseSpinLockFromDpcLevel(&PrivateCacheMap->ReadAheadSpinLock);
515 }
516 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
517
518 /* Time to go! */
519 DPRINT("Doing ReadAhead for %p\n", FileObject);
520 /* Lock the file, first */
521 if (!SharedCacheMap->Callbacks->AcquireForReadAhead(SharedCacheMap->LazyWriteContext, FALSE))
522 {
523 Locked = FALSE;
524 goto Clear;
525 }
526
527 /* Remember it's locked */
528 Locked = TRUE;
529
530 /* Don't read past the end of the file */
531 if (CurrentOffset >= SharedCacheMap->FileSize.QuadPart)
532 {
533 goto Clear;
534 }
535 if (CurrentOffset + Length > SharedCacheMap->FileSize.QuadPart)
536 {
537 Length = SharedCacheMap->FileSize.QuadPart - CurrentOffset;
538 }
539
540 /* Next of the algorithm will lock like CcCopyData with the slight
541 * difference that we don't copy data back to an user-backed buffer
542 * We just bring data into Cc
543 */
544 PartialLength = CurrentOffset % VACB_MAPPING_GRANULARITY;
545 if (PartialLength != 0)
546 {
547 PartialLength = min(Length, VACB_MAPPING_GRANULARITY - PartialLength);
548 Status = CcRosRequestVacb(SharedCacheMap,
549 ROUND_DOWN(CurrentOffset,
550 VACB_MAPPING_GRANULARITY),
551 &BaseAddress,
552 &Valid,
553 &Vacb);
554 if (!NT_SUCCESS(Status))
555 {
556 DPRINT1("Failed to request VACB: %lx!\n", Status);
557 goto Clear;
558 }
559
560 if (!Valid)
561 {
562 Status = CcReadVirtualAddress(Vacb);
563 if (!NT_SUCCESS(Status))
564 {
565 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
566 DPRINT1("Failed to read data: %lx!\n", Status);
567 goto Clear;
568 }
569 }
570
571 CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, FALSE, FALSE);
572
573 Length -= PartialLength;
574 CurrentOffset += PartialLength;
575 }
576
577 while (Length > 0)
578 {
579 ASSERT(CurrentOffset % VACB_MAPPING_GRANULARITY == 0);
580 PartialLength = min(VACB_MAPPING_GRANULARITY, Length);
581 Status = CcRosRequestVacb(SharedCacheMap,
582 CurrentOffset,
583 &BaseAddress,
584 &Valid,
585 &Vacb);
586 if (!NT_SUCCESS(Status))
587 {
588 DPRINT1("Failed to request VACB: %lx!\n", Status);
589 goto Clear;
590 }
591
592 if (!Valid)
593 {
594 Status = CcReadVirtualAddress(Vacb);
595 if (!NT_SUCCESS(Status))
596 {
597 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
598 DPRINT1("Failed to read data: %lx!\n", Status);
599 goto Clear;
600 }
601 }
602
603 CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, FALSE, FALSE);
604
605 Length -= PartialLength;
606 CurrentOffset += PartialLength;
607 }
608
609 Clear:
610 /* See previous comment about private cache map */
611 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
612 PrivateCacheMap = FileObject->PrivateCacheMap;
613 if (PrivateCacheMap != NULL)
614 {
615 /* Mark read ahead as unactive */
616 KeAcquireSpinLockAtDpcLevel(&PrivateCacheMap->ReadAheadSpinLock);
617 InterlockedAnd((volatile long *)&PrivateCacheMap->UlongFlags, ~PRIVATE_CACHE_MAP_READ_AHEAD_ACTIVE);
618 KeReleaseSpinLockFromDpcLevel(&PrivateCacheMap->ReadAheadSpinLock);
619 }
620 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
621
622 /* If file was locked, release it */
623 if (Locked)
624 {
625 SharedCacheMap->Callbacks->ReleaseFromReadAhead(SharedCacheMap->LazyWriteContext);
626 }
627
628 /* And drop our extra reference (See: CcScheduleReadAhead) */
629 ObDereferenceObject(FileObject);
630
631 return;
632 }
633
634 /*
635 * @unimplemented
636 */
637 BOOLEAN
638 NTAPI
639 CcCanIWrite (
640 IN PFILE_OBJECT FileObject,
641 IN ULONG BytesToWrite,
642 IN BOOLEAN Wait,
643 IN BOOLEAN Retrying)
644 {
645 KIRQL OldIrql;
646 KEVENT WaitEvent;
647 ULONG Length, Pages;
648 BOOLEAN PerFileDefer;
649 DEFERRED_WRITE Context;
650 PFSRTL_COMMON_FCB_HEADER Fcb;
651 CC_CAN_WRITE_RETRY TryContext;
652 PROS_SHARED_CACHE_MAP SharedCacheMap;
653
654 CCTRACE(CC_API_DEBUG, "FileObject=%p BytesToWrite=%lu Wait=%d Retrying=%d\n",
655 FileObject, BytesToWrite, Wait, Retrying);
656
657 /* Write through is always OK */
658 if (BooleanFlagOn(FileObject->Flags, FO_WRITE_THROUGH))
659 {
660 return TRUE;
661 }
662
663 TryContext = Retrying;
664 /* Allow remote file if not from posted */
665 if (IoIsFileOriginRemote(FileObject) && TryContext < RetryAllowRemote)
666 {
667 return TRUE;
668 }
669
670 /* Don't exceed max tolerated size */
671 Length = MAX_ZERO_LENGTH;
672 if (BytesToWrite < MAX_ZERO_LENGTH)
673 {
674 Length = BytesToWrite;
675 }
676
677 /* Convert it to pages count */
678 Pages = (Length + PAGE_SIZE - 1) >> PAGE_SHIFT;
679
680 /* By default, assume limits per file won't be hit */
681 PerFileDefer = FALSE;
682 Fcb = FileObject->FsContext;
683 /* Do we have to check for limits per file? */
684 if (TryContext >= RetryForceCheckPerFile ||
685 BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES))
686 {
687 /* If master is not locked, lock it now */
688 if (TryContext != RetryMasterLocked)
689 {
690 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
691 }
692
693 /* Let's not assume the file is cached... */
694 if (FileObject->SectionObjectPointer != NULL &&
695 FileObject->SectionObjectPointer->SharedCacheMap != NULL)
696 {
697 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
698 /* Do we have limits per file set? */
699 if (SharedCacheMap->DirtyPageThreshold != 0 &&
700 SharedCacheMap->DirtyPages != 0)
701 {
702 /* Yes, check whether they are blocking */
703 if (Pages + SharedCacheMap->DirtyPages > SharedCacheMap->DirtyPageThreshold)
704 {
705 PerFileDefer = TRUE;
706 }
707 }
708 }
709
710 /* And don't forget to release master */
711 if (TryContext != RetryMasterLocked)
712 {
713 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
714 }
715 }
716
717 /* So, now allow write if:
718 * - Not the first try or we have no throttling yet
719 * AND:
720 * - We don't exceed threshold!
721 * - We don't exceed what Mm can allow us to use
722 * + If we're above top, that's fine
723 * + If we're above bottom with limited modified pages, that's fine
724 * + Otherwise, throttle!
725 */
726 if ((TryContext != FirstTry || IsListEmpty(&CcDeferredWrites)) &&
727 CcTotalDirtyPages + Pages < CcDirtyPageThreshold &&
728 (MmAvailablePages > MmThrottleTop ||
729 (MmModifiedPageListHead.Total < 1000 && MmAvailablePages > MmThrottleBottom)) &&
730 !PerFileDefer)
731 {
732 return TRUE;
733 }
734
735 /* If we can wait, we'll start the wait loop for waiting till we can
736 * write for real
737 */
738 if (!Wait)
739 {
740 return FALSE;
741 }
742
743 /* Otherwise, if there are no deferred writes yet, start the lazy writer */
744 if (IsListEmpty(&CcDeferredWrites))
745 {
746 KIRQL OldIrql;
747
748 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
749 CcScheduleLazyWriteScan(TRUE);
750 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
751 }
752
753 /* Initialize our wait event */
754 KeInitializeEvent(&WaitEvent, NotificationEvent, FALSE);
755
756 /* And prepare a dummy context */
757 Context.NodeTypeCode = NODE_TYPE_DEFERRED_WRITE;
758 Context.NodeByteSize = sizeof(DEFERRED_WRITE);
759 Context.FileObject = FileObject;
760 Context.BytesToWrite = BytesToWrite;
761 Context.LimitModifiedPages = BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES);
762 Context.Event = &WaitEvent;
763
764 /* And queue it */
765 if (Retrying)
766 {
767 /* To the top, if that's a retry */
768 ExInterlockedInsertHeadList(&CcDeferredWrites,
769 &Context.DeferredWriteLinks,
770 &CcDeferredWriteSpinLock);
771 }
772 else
773 {
774 /* To the bottom, if that's a first time */
775 ExInterlockedInsertTailList(&CcDeferredWrites,
776 &Context.DeferredWriteLinks,
777 &CcDeferredWriteSpinLock);
778 }
779
780 /* Now, we'll loop until our event is set. When it is set, it means that caller
781 * can immediately write, and has to
782 */
783 do
784 {
785 CcPostDeferredWrites();
786 } while (KeWaitForSingleObject(&WaitEvent, Executive, KernelMode, FALSE, &CcIdleDelay) != STATUS_SUCCESS);
787
788 return TRUE;
789 }
790
791 /*
792 * @implemented
793 */
794 BOOLEAN
795 NTAPI
796 CcCopyRead (
797 IN PFILE_OBJECT FileObject,
798 IN PLARGE_INTEGER FileOffset,
799 IN ULONG Length,
800 IN BOOLEAN Wait,
801 OUT PVOID Buffer,
802 OUT PIO_STATUS_BLOCK IoStatus)
803 {
804 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d\n",
805 FileObject, FileOffset->QuadPart, Length, Wait);
806
807 DPRINT("CcCopyRead(FileObject 0x%p, FileOffset %I64x, "
808 "Length %lu, Wait %u, Buffer 0x%p, IoStatus 0x%p)\n",
809 FileObject, FileOffset->QuadPart, Length, Wait,
810 Buffer, IoStatus);
811
812 return CcCopyData(FileObject,
813 FileOffset->QuadPart,
814 Buffer,
815 Length,
816 CcOperationRead,
817 Wait,
818 IoStatus);
819 }
820
821 /*
822 * @implemented
823 */
824 BOOLEAN
825 NTAPI
826 CcCopyWrite (
827 IN PFILE_OBJECT FileObject,
828 IN PLARGE_INTEGER FileOffset,
829 IN ULONG Length,
830 IN BOOLEAN Wait,
831 IN PVOID Buffer)
832 {
833 IO_STATUS_BLOCK IoStatus;
834
835 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d Buffer=%p\n",
836 FileObject, FileOffset->QuadPart, Length, Wait, Buffer);
837
838 DPRINT("CcCopyWrite(FileObject 0x%p, FileOffset %I64x, "
839 "Length %lu, Wait %u, Buffer 0x%p)\n",
840 FileObject, FileOffset->QuadPart, Length, Wait, Buffer);
841
842 return CcCopyData(FileObject,
843 FileOffset->QuadPart,
844 Buffer,
845 Length,
846 CcOperationWrite,
847 Wait,
848 &IoStatus);
849 }
850
851 /*
852 * @implemented
853 */
854 VOID
855 NTAPI
856 CcDeferWrite (
857 IN PFILE_OBJECT FileObject,
858 IN PCC_POST_DEFERRED_WRITE PostRoutine,
859 IN PVOID Context1,
860 IN PVOID Context2,
861 IN ULONG BytesToWrite,
862 IN BOOLEAN Retrying)
863 {
864 KIRQL OldIrql;
865 PDEFERRED_WRITE Context;
866 PFSRTL_COMMON_FCB_HEADER Fcb;
867
868 CCTRACE(CC_API_DEBUG, "FileObject=%p PostRoutine=%p Context1=%p Context2=%p BytesToWrite=%lu Retrying=%d\n",
869 FileObject, PostRoutine, Context1, Context2, BytesToWrite, Retrying);
870
871 /* Try to allocate a context for queueing the write operation */
872 Context = ExAllocatePoolWithTag(NonPagedPool, sizeof(DEFERRED_WRITE), 'CcDw');
873 /* If it failed, immediately execute the operation! */
874 if (Context == NULL)
875 {
876 PostRoutine(Context1, Context2);
877 return;
878 }
879
880 Fcb = FileObject->FsContext;
881
882 /* Otherwise, initialize the context */
883 RtlZeroMemory(Context, sizeof(DEFERRED_WRITE));
884 Context->NodeTypeCode = NODE_TYPE_DEFERRED_WRITE;
885 Context->NodeByteSize = sizeof(DEFERRED_WRITE);
886 Context->FileObject = FileObject;
887 Context->PostRoutine = PostRoutine;
888 Context->Context1 = Context1;
889 Context->Context2 = Context2;
890 Context->BytesToWrite = BytesToWrite;
891 Context->LimitModifiedPages = BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES);
892
893 /* And queue it */
894 if (Retrying)
895 {
896 /* To the top, if that's a retry */
897 ExInterlockedInsertHeadList(&CcDeferredWrites,
898 &Context->DeferredWriteLinks,
899 &CcDeferredWriteSpinLock);
900 }
901 else
902 {
903 /* To the bottom, if that's a first time */
904 ExInterlockedInsertTailList(&CcDeferredWrites,
905 &Context->DeferredWriteLinks,
906 &CcDeferredWriteSpinLock);
907 }
908
909 /* Try to execute the posted writes */
910 CcPostDeferredWrites();
911
912 /* Schedule a lazy writer run to handle deferred writes */
913 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
914 if (!LazyWriter.ScanActive)
915 {
916 CcScheduleLazyWriteScan(FALSE);
917 }
918 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
919 }
920
921 /*
922 * @unimplemented
923 */
924 VOID
925 NTAPI
926 CcFastCopyRead (
927 IN PFILE_OBJECT FileObject,
928 IN ULONG FileOffset,
929 IN ULONG Length,
930 IN ULONG PageCount,
931 OUT PVOID Buffer,
932 OUT PIO_STATUS_BLOCK IoStatus)
933 {
934 LARGE_INTEGER LargeFileOffset;
935 BOOLEAN Success;
936
937 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%lu Length=%lu PageCount=%lu Buffer=%p\n",
938 FileObject, FileOffset, Length, PageCount, Buffer);
939
940 DBG_UNREFERENCED_PARAMETER(PageCount);
941
942 LargeFileOffset.QuadPart = FileOffset;
943 Success = CcCopyRead(FileObject,
944 &LargeFileOffset,
945 Length,
946 TRUE,
947 Buffer,
948 IoStatus);
949 ASSERT(Success == TRUE);
950 }
951
952 /*
953 * @unimplemented
954 */
955 VOID
956 NTAPI
957 CcFastCopyWrite (
958 IN PFILE_OBJECT FileObject,
959 IN ULONG FileOffset,
960 IN ULONG Length,
961 IN PVOID Buffer)
962 {
963 LARGE_INTEGER LargeFileOffset;
964 BOOLEAN Success;
965
966 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%lu Length=%lu Buffer=%p\n",
967 FileObject, FileOffset, Length, Buffer);
968
969 LargeFileOffset.QuadPart = FileOffset;
970 Success = CcCopyWrite(FileObject,
971 &LargeFileOffset,
972 Length,
973 TRUE,
974 Buffer);
975 ASSERT(Success == TRUE);
976 }
977
978 /*
979 * @implemented
980 */
981 BOOLEAN
982 NTAPI
983 CcZeroData (
984 IN PFILE_OBJECT FileObject,
985 IN PLARGE_INTEGER StartOffset,
986 IN PLARGE_INTEGER EndOffset,
987 IN BOOLEAN Wait)
988 {
989 NTSTATUS Status;
990 LARGE_INTEGER WriteOffset;
991 LONGLONG Length;
992 ULONG CurrentLength;
993 PMDL Mdl;
994 ULONG i;
995 IO_STATUS_BLOCK Iosb;
996 KEVENT Event;
997
998 CCTRACE(CC_API_DEBUG, "FileObject=%p StartOffset=%I64u EndOffset=%I64u Wait=%d\n",
999 FileObject, StartOffset->QuadPart, EndOffset->QuadPart, Wait);
1000
1001 DPRINT("CcZeroData(FileObject 0x%p, StartOffset %I64x, EndOffset %I64x, "
1002 "Wait %u)\n", FileObject, StartOffset->QuadPart, EndOffset->QuadPart,
1003 Wait);
1004
1005 Length = EndOffset->QuadPart - StartOffset->QuadPart;
1006 WriteOffset.QuadPart = StartOffset->QuadPart;
1007
1008 if (FileObject->SectionObjectPointer->SharedCacheMap == NULL)
1009 {
1010 /* File is not cached */
1011
1012 Mdl = _alloca(MmSizeOfMdl(NULL, MAX_ZERO_LENGTH));
1013
1014 while (Length > 0)
1015 {
1016 if (Length + WriteOffset.QuadPart % PAGE_SIZE > MAX_ZERO_LENGTH)
1017 {
1018 CurrentLength = MAX_ZERO_LENGTH - WriteOffset.QuadPart % PAGE_SIZE;
1019 }
1020 else
1021 {
1022 CurrentLength = Length;
1023 }
1024 MmInitializeMdl(Mdl, (PVOID)(ULONG_PTR)WriteOffset.QuadPart, CurrentLength);
1025 Mdl->MdlFlags |= (MDL_PAGES_LOCKED | MDL_IO_PAGE_READ);
1026 for (i = 0; i < ((Mdl->Size - sizeof(MDL)) / sizeof(ULONG)); i++)
1027 {
1028 ((PPFN_NUMBER)(Mdl + 1))[i] = CcZeroPage;
1029 }
1030 KeInitializeEvent(&Event, NotificationEvent, FALSE);
1031 Status = IoSynchronousPageWrite(FileObject, Mdl, &WriteOffset, &Event, &Iosb);
1032 if (Status == STATUS_PENDING)
1033 {
1034 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
1035 Status = Iosb.Status;
1036 }
1037 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
1038 {
1039 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
1040 }
1041 if (!NT_SUCCESS(Status))
1042 {
1043 return FALSE;
1044 }
1045 WriteOffset.QuadPart += CurrentLength;
1046 Length -= CurrentLength;
1047 }
1048 }
1049 else
1050 {
1051 IO_STATUS_BLOCK IoStatus;
1052
1053 return CcCopyData(FileObject,
1054 WriteOffset.QuadPart,
1055 NULL,
1056 Length,
1057 CcOperationZero,
1058 Wait,
1059 &IoStatus);
1060 }
1061
1062 return TRUE;
1063 }