[NTOS:CC] CcCanIWrite(): Use BYTES_TO_PAGES(Length)
[reactos.git] / ntoskrnl / cc / copy.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/copy.c
5 * PURPOSE: Implements cache managers copy interface
6 *
7 * PROGRAMMERS: Some people?
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* INCLUDES ******************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <debug.h>
16
17 /* GLOBALS *******************************************************************/
18
19 static PFN_NUMBER CcZeroPage = 0;
20
21 #define MAX_ZERO_LENGTH (256 * 1024)
22
23 typedef enum _CC_COPY_OPERATION
24 {
25 CcOperationRead,
26 CcOperationWrite,
27 CcOperationZero
28 } CC_COPY_OPERATION;
29
30 typedef enum _CC_CAN_WRITE_RETRY
31 {
32 FirstTry = 0,
33 RetryAllowRemote = 253,
34 RetryForceCheckPerFile = 254,
35 RetryMasterLocked = 255,
36 } CC_CAN_WRITE_RETRY;
37
38 ULONG CcRosTraceLevel = 0;
39 ULONG CcFastMdlReadWait;
40 ULONG CcFastMdlReadNotPossible;
41 ULONG CcFastReadNotPossible;
42 ULONG CcFastReadWait;
43 ULONG CcFastReadNoWait;
44 ULONG CcFastReadResourceMiss;
45
46 /* Counters:
47 * - Amount of pages flushed to the disk
48 * - Number of flush operations
49 */
50 ULONG CcDataPages = 0;
51 ULONG CcDataFlushes = 0;
52
53 /* FUNCTIONS *****************************************************************/
54
55 VOID
56 NTAPI
57 MiZeroPhysicalPage (
58 IN PFN_NUMBER PageFrameIndex
59 );
60
61 VOID
62 NTAPI
63 CcInitCacheZeroPage (
64 VOID)
65 {
66 NTSTATUS Status;
67
68 MI_SET_USAGE(MI_USAGE_CACHE);
69 //MI_SET_PROCESS2(PsGetCurrentProcess()->ImageFileName);
70 Status = MmRequestPageMemoryConsumer(MC_SYSTEM, TRUE, &CcZeroPage);
71 if (!NT_SUCCESS(Status))
72 {
73 DbgPrint("Can't allocate CcZeroPage.\n");
74 KeBugCheck(CACHE_MANAGER);
75 }
76 MiZeroPhysicalPage(CcZeroPage);
77 }
78
79 NTSTATUS
80 NTAPI
81 CcReadVirtualAddress (
82 PROS_VACB Vacb)
83 {
84 ULONG Size;
85 PMDL Mdl;
86 NTSTATUS Status;
87 IO_STATUS_BLOCK IoStatus;
88 KEVENT Event;
89 ULARGE_INTEGER LargeSize;
90
91 LargeSize.QuadPart = Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart;
92 if (LargeSize.QuadPart > VACB_MAPPING_GRANULARITY)
93 {
94 LargeSize.QuadPart = VACB_MAPPING_GRANULARITY;
95 }
96 Size = LargeSize.LowPart;
97
98 Size = ROUND_TO_PAGES(Size);
99 ASSERT(Size <= VACB_MAPPING_GRANULARITY);
100 ASSERT(Size > 0);
101
102 Mdl = IoAllocateMdl(Vacb->BaseAddress, Size, FALSE, FALSE, NULL);
103 if (!Mdl)
104 {
105 return STATUS_INSUFFICIENT_RESOURCES;
106 }
107
108 Status = STATUS_SUCCESS;
109 _SEH2_TRY
110 {
111 MmProbeAndLockPages(Mdl, KernelMode, IoWriteAccess);
112 }
113 _SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER)
114 {
115 Status = _SEH2_GetExceptionCode();
116 DPRINT1("MmProbeAndLockPages failed with: %lx for %p (%p, %p)\n", Status, Mdl, Vacb, Vacb->BaseAddress);
117 KeBugCheck(CACHE_MANAGER);
118 } _SEH2_END;
119
120 if (NT_SUCCESS(Status))
121 {
122 Mdl->MdlFlags |= MDL_IO_PAGE_READ;
123 KeInitializeEvent(&Event, NotificationEvent, FALSE);
124 Status = IoPageRead(Vacb->SharedCacheMap->FileObject, Mdl, &Vacb->FileOffset, &Event, &IoStatus);
125 if (Status == STATUS_PENDING)
126 {
127 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
128 Status = IoStatus.Status;
129 }
130
131 MmUnlockPages(Mdl);
132 }
133
134 IoFreeMdl(Mdl);
135
136 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
137 {
138 DPRINT1("IoPageRead failed, Status %x\n", Status);
139 return Status;
140 }
141
142 if (Size < VACB_MAPPING_GRANULARITY)
143 {
144 RtlZeroMemory((char*)Vacb->BaseAddress + Size,
145 VACB_MAPPING_GRANULARITY - Size);
146 }
147
148 return STATUS_SUCCESS;
149 }
150
151 NTSTATUS
152 NTAPI
153 CcWriteVirtualAddress (
154 PROS_VACB Vacb)
155 {
156 ULONG Size;
157 PMDL Mdl;
158 NTSTATUS Status;
159 IO_STATUS_BLOCK IoStatus;
160 KEVENT Event;
161 ULARGE_INTEGER LargeSize;
162
163 LargeSize.QuadPart = Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart;
164 if (LargeSize.QuadPart > VACB_MAPPING_GRANULARITY)
165 {
166 LargeSize.QuadPart = VACB_MAPPING_GRANULARITY;
167 }
168 Size = LargeSize.LowPart;
169 //
170 // Nonpaged pool PDEs in ReactOS must actually be synchronized between the
171 // MmGlobalPageDirectory and the real system PDE directory. What a mess...
172 //
173 {
174 ULONG i = 0;
175 do
176 {
177 MmGetPfnForProcess(NULL, (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i << PAGE_SHIFT)));
178 } while (++i < (Size >> PAGE_SHIFT));
179 }
180
181 Size = ROUND_TO_PAGES(Size);
182 ASSERT(Size <= VACB_MAPPING_GRANULARITY);
183 ASSERT(Size > 0);
184
185 Mdl = IoAllocateMdl(Vacb->BaseAddress, Size, FALSE, FALSE, NULL);
186 if (!Mdl)
187 {
188 return STATUS_INSUFFICIENT_RESOURCES;
189 }
190
191 Status = STATUS_SUCCESS;
192 _SEH2_TRY
193 {
194 MmProbeAndLockPages(Mdl, KernelMode, IoReadAccess);
195 }
196 _SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER)
197 {
198 Status = _SEH2_GetExceptionCode();
199 DPRINT1("MmProbeAndLockPages failed with: %lx for %p (%p, %p)\n", Status, Mdl, Vacb, Vacb->BaseAddress);
200 KeBugCheck(CACHE_MANAGER);
201 } _SEH2_END;
202
203 if (NT_SUCCESS(Status))
204 {
205 KeInitializeEvent(&Event, NotificationEvent, FALSE);
206 Status = IoSynchronousPageWrite(Vacb->SharedCacheMap->FileObject, Mdl, &Vacb->FileOffset, &Event, &IoStatus);
207 if (Status == STATUS_PENDING)
208 {
209 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
210 Status = IoStatus.Status;
211 }
212
213 MmUnlockPages(Mdl);
214 }
215 IoFreeMdl(Mdl);
216 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
217 {
218 DPRINT1("IoPageWrite failed, Status %x\n", Status);
219 return Status;
220 }
221
222 return STATUS_SUCCESS;
223 }
224
225 NTSTATUS
226 ReadWriteOrZero(
227 _Inout_ PVOID BaseAddress,
228 _Inout_opt_ PVOID Buffer,
229 _In_ ULONG Length,
230 _In_ CC_COPY_OPERATION Operation)
231 {
232 NTSTATUS Status = STATUS_SUCCESS;
233
234 if (Operation == CcOperationZero)
235 {
236 /* Zero */
237 RtlZeroMemory(BaseAddress, Length);
238 }
239 else
240 {
241 _SEH2_TRY
242 {
243 if (Operation == CcOperationWrite)
244 RtlCopyMemory(BaseAddress, Buffer, Length);
245 else
246 RtlCopyMemory(Buffer, BaseAddress, Length);
247 }
248 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
249 {
250 Status = _SEH2_GetExceptionCode();
251 }
252 _SEH2_END;
253 }
254 return Status;
255 }
256
257 BOOLEAN
258 CcCopyData (
259 _In_ PFILE_OBJECT FileObject,
260 _In_ LONGLONG FileOffset,
261 _Inout_ PVOID Buffer,
262 _In_ LONGLONG Length,
263 _In_ CC_COPY_OPERATION Operation,
264 _In_ BOOLEAN Wait,
265 _Out_ PIO_STATUS_BLOCK IoStatus)
266 {
267 NTSTATUS Status;
268 LONGLONG CurrentOffset;
269 ULONG BytesCopied;
270 KIRQL OldIrql;
271 PROS_SHARED_CACHE_MAP SharedCacheMap;
272 PLIST_ENTRY ListEntry;
273 PROS_VACB Vacb;
274 ULONG PartialLength;
275 PVOID BaseAddress;
276 BOOLEAN Valid;
277 PPRIVATE_CACHE_MAP PrivateCacheMap;
278
279 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
280 PrivateCacheMap = FileObject->PrivateCacheMap;
281 CurrentOffset = FileOffset;
282 BytesCopied = 0;
283
284 if (!Wait)
285 {
286 /* test if the requested data is available */
287 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
288 /* FIXME: this loop doesn't take into account areas that don't have
289 * a VACB in the list yet */
290 ListEntry = SharedCacheMap->CacheMapVacbListHead.Flink;
291 while (ListEntry != &SharedCacheMap->CacheMapVacbListHead)
292 {
293 Vacb = CONTAINING_RECORD(ListEntry,
294 ROS_VACB,
295 CacheMapVacbListEntry);
296 ListEntry = ListEntry->Flink;
297 if (!Vacb->Valid &&
298 DoRangesIntersect(Vacb->FileOffset.QuadPart,
299 VACB_MAPPING_GRANULARITY,
300 CurrentOffset, Length))
301 {
302 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
303 /* data not available */
304 return FALSE;
305 }
306 if (Vacb->FileOffset.QuadPart >= CurrentOffset + Length)
307 break;
308 }
309 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
310 }
311
312 PartialLength = CurrentOffset % VACB_MAPPING_GRANULARITY;
313 if (PartialLength != 0)
314 {
315 PartialLength = min(Length, VACB_MAPPING_GRANULARITY - PartialLength);
316 Status = CcRosRequestVacb(SharedCacheMap,
317 ROUND_DOWN(CurrentOffset,
318 VACB_MAPPING_GRANULARITY),
319 &BaseAddress,
320 &Valid,
321 &Vacb);
322 if (!NT_SUCCESS(Status))
323 ExRaiseStatus(Status);
324 if (!Valid)
325 {
326 Status = CcReadVirtualAddress(Vacb);
327 if (!NT_SUCCESS(Status))
328 {
329 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
330 ExRaiseStatus(Status);
331 }
332 }
333 Status = ReadWriteOrZero((PUCHAR)BaseAddress + CurrentOffset % VACB_MAPPING_GRANULARITY,
334 Buffer,
335 PartialLength,
336 Operation);
337
338 CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, Operation != CcOperationRead, FALSE);
339
340 if (!NT_SUCCESS(Status))
341 ExRaiseStatus(STATUS_INVALID_USER_BUFFER);
342
343 Length -= PartialLength;
344 CurrentOffset += PartialLength;
345 BytesCopied += PartialLength;
346
347 if (Operation != CcOperationZero)
348 Buffer = (PVOID)((ULONG_PTR)Buffer + PartialLength);
349 }
350
351 while (Length > 0)
352 {
353 ASSERT(CurrentOffset % VACB_MAPPING_GRANULARITY == 0);
354 PartialLength = min(VACB_MAPPING_GRANULARITY, Length);
355 Status = CcRosRequestVacb(SharedCacheMap,
356 CurrentOffset,
357 &BaseAddress,
358 &Valid,
359 &Vacb);
360 if (!NT_SUCCESS(Status))
361 ExRaiseStatus(Status);
362 if (!Valid &&
363 (Operation == CcOperationRead ||
364 PartialLength < VACB_MAPPING_GRANULARITY))
365 {
366 Status = CcReadVirtualAddress(Vacb);
367 if (!NT_SUCCESS(Status))
368 {
369 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
370 ExRaiseStatus(Status);
371 }
372 }
373 Status = ReadWriteOrZero(BaseAddress, Buffer, PartialLength, Operation);
374
375 CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, Operation != CcOperationRead, FALSE);
376
377 if (!NT_SUCCESS(Status))
378 ExRaiseStatus(STATUS_INVALID_USER_BUFFER);
379
380 Length -= PartialLength;
381 CurrentOffset += PartialLength;
382 BytesCopied += PartialLength;
383
384 if (Operation != CcOperationZero)
385 Buffer = (PVOID)((ULONG_PTR)Buffer + PartialLength);
386 }
387
388 /* If that was a successful sync read operation, let's handle read ahead */
389 if (Operation == CcOperationRead && Length == 0 && Wait)
390 {
391 /* If file isn't random access and next read may get us cross VACB boundary,
392 * schedule next read
393 */
394 if (!BooleanFlagOn(FileObject->Flags, FO_RANDOM_ACCESS) &&
395 (CurrentOffset - 1) / VACB_MAPPING_GRANULARITY != (CurrentOffset + BytesCopied - 1) / VACB_MAPPING_GRANULARITY)
396 {
397 CcScheduleReadAhead(FileObject, (PLARGE_INTEGER)&FileOffset, BytesCopied);
398 }
399
400 /* And update read history in private cache map */
401 PrivateCacheMap->FileOffset1.QuadPart = PrivateCacheMap->FileOffset2.QuadPart;
402 PrivateCacheMap->BeyondLastByte1.QuadPart = PrivateCacheMap->BeyondLastByte2.QuadPart;
403 PrivateCacheMap->FileOffset2.QuadPart = FileOffset;
404 PrivateCacheMap->BeyondLastByte2.QuadPart = FileOffset + BytesCopied;
405 }
406
407 IoStatus->Status = STATUS_SUCCESS;
408 IoStatus->Information = BytesCopied;
409 return TRUE;
410 }
411
412 VOID
413 CcPostDeferredWrites(VOID)
414 {
415 ULONG WrittenBytes;
416
417 /* We'll try to write as much as we can */
418 WrittenBytes = 0;
419 while (TRUE)
420 {
421 KIRQL OldIrql;
422 PLIST_ENTRY ListEntry;
423 PDEFERRED_WRITE DeferredWrite;
424
425 DeferredWrite = NULL;
426
427 /* Lock our deferred writes list */
428 KeAcquireSpinLock(&CcDeferredWriteSpinLock, &OldIrql);
429 for (ListEntry = CcDeferredWrites.Flink;
430 ListEntry != &CcDeferredWrites;
431 ListEntry = ListEntry->Flink)
432 {
433 /* Extract an entry */
434 DeferredWrite = CONTAINING_RECORD(ListEntry, DEFERRED_WRITE, DeferredWriteLinks);
435
436 /* Compute the modified bytes, based on what we already wrote */
437 WrittenBytes += DeferredWrite->BytesToWrite;
438 /* We overflowed, give up */
439 if (WrittenBytes < DeferredWrite->BytesToWrite)
440 {
441 DeferredWrite = NULL;
442 break;
443 }
444
445 /* Check we can write */
446 if (CcCanIWrite(DeferredWrite->FileObject, WrittenBytes, FALSE, RetryForceCheckPerFile))
447 {
448 /* We can, so remove it from the list and stop looking for entry */
449 RemoveEntryList(&DeferredWrite->DeferredWriteLinks);
450 break;
451 }
452
453 /* If we don't accept modified pages, stop here */
454 if (!DeferredWrite->LimitModifiedPages)
455 {
456 DeferredWrite = NULL;
457 break;
458 }
459
460 /* Reset count as nothing was written yet */
461 WrittenBytes -= DeferredWrite->BytesToWrite;
462 DeferredWrite = NULL;
463 }
464 KeReleaseSpinLock(&CcDeferredWriteSpinLock, OldIrql);
465
466 /* Nothing to write found, give up */
467 if (DeferredWrite == NULL)
468 {
469 break;
470 }
471
472 /* If we have an event, set it and quit */
473 if (DeferredWrite->Event)
474 {
475 KeSetEvent(DeferredWrite->Event, IO_NO_INCREMENT, FALSE);
476 }
477 /* Otherwise, call the write routine and free the context */
478 else
479 {
480 DeferredWrite->PostRoutine(DeferredWrite->Context1, DeferredWrite->Context2);
481 ExFreePoolWithTag(DeferredWrite, 'CcDw');
482 }
483 }
484 }
485
486 VOID
487 CcPerformReadAhead(
488 IN PFILE_OBJECT FileObject)
489 {
490 NTSTATUS Status;
491 LONGLONG CurrentOffset;
492 KIRQL OldIrql;
493 PROS_SHARED_CACHE_MAP SharedCacheMap;
494 PROS_VACB Vacb;
495 ULONG PartialLength;
496 PVOID BaseAddress;
497 BOOLEAN Valid;
498 ULONG Length;
499 PPRIVATE_CACHE_MAP PrivateCacheMap;
500 BOOLEAN Locked;
501
502 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
503
504 /* Critical:
505 * PrivateCacheMap might disappear in-between if the handle
506 * to the file is closed (private is attached to the handle not to
507 * the file), so we need to lock the master lock while we deal with
508 * it. It won't disappear without attempting to lock such lock.
509 */
510 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
511 PrivateCacheMap = FileObject->PrivateCacheMap;
512 /* If the handle was closed since the read ahead was scheduled, just quit */
513 if (PrivateCacheMap == NULL)
514 {
515 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
516 ObDereferenceObject(FileObject);
517 return;
518 }
519 /* Otherwise, extract read offset and length and release private map */
520 else
521 {
522 KeAcquireSpinLockAtDpcLevel(&PrivateCacheMap->ReadAheadSpinLock);
523 CurrentOffset = PrivateCacheMap->ReadAheadOffset[1].QuadPart;
524 Length = PrivateCacheMap->ReadAheadLength[1];
525 KeReleaseSpinLockFromDpcLevel(&PrivateCacheMap->ReadAheadSpinLock);
526 }
527 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
528
529 /* Time to go! */
530 DPRINT("Doing ReadAhead for %p\n", FileObject);
531 /* Lock the file, first */
532 if (!SharedCacheMap->Callbacks->AcquireForReadAhead(SharedCacheMap->LazyWriteContext, FALSE))
533 {
534 Locked = FALSE;
535 goto Clear;
536 }
537
538 /* Remember it's locked */
539 Locked = TRUE;
540
541 /* Don't read past the end of the file */
542 if (CurrentOffset >= SharedCacheMap->FileSize.QuadPart)
543 {
544 goto Clear;
545 }
546 if (CurrentOffset + Length > SharedCacheMap->FileSize.QuadPart)
547 {
548 Length = SharedCacheMap->FileSize.QuadPart - CurrentOffset;
549 }
550
551 /* Next of the algorithm will lock like CcCopyData with the slight
552 * difference that we don't copy data back to an user-backed buffer
553 * We just bring data into Cc
554 */
555 PartialLength = CurrentOffset % VACB_MAPPING_GRANULARITY;
556 if (PartialLength != 0)
557 {
558 PartialLength = min(Length, VACB_MAPPING_GRANULARITY - PartialLength);
559 Status = CcRosRequestVacb(SharedCacheMap,
560 ROUND_DOWN(CurrentOffset,
561 VACB_MAPPING_GRANULARITY),
562 &BaseAddress,
563 &Valid,
564 &Vacb);
565 if (!NT_SUCCESS(Status))
566 {
567 DPRINT1("Failed to request VACB: %lx!\n", Status);
568 goto Clear;
569 }
570
571 if (!Valid)
572 {
573 Status = CcReadVirtualAddress(Vacb);
574 if (!NT_SUCCESS(Status))
575 {
576 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
577 DPRINT1("Failed to read data: %lx!\n", Status);
578 goto Clear;
579 }
580 }
581
582 CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, FALSE, FALSE);
583
584 Length -= PartialLength;
585 CurrentOffset += PartialLength;
586 }
587
588 while (Length > 0)
589 {
590 ASSERT(CurrentOffset % VACB_MAPPING_GRANULARITY == 0);
591 PartialLength = min(VACB_MAPPING_GRANULARITY, Length);
592 Status = CcRosRequestVacb(SharedCacheMap,
593 CurrentOffset,
594 &BaseAddress,
595 &Valid,
596 &Vacb);
597 if (!NT_SUCCESS(Status))
598 {
599 DPRINT1("Failed to request VACB: %lx!\n", Status);
600 goto Clear;
601 }
602
603 if (!Valid)
604 {
605 Status = CcReadVirtualAddress(Vacb);
606 if (!NT_SUCCESS(Status))
607 {
608 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
609 DPRINT1("Failed to read data: %lx!\n", Status);
610 goto Clear;
611 }
612 }
613
614 CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, FALSE, FALSE);
615
616 Length -= PartialLength;
617 CurrentOffset += PartialLength;
618 }
619
620 Clear:
621 /* See previous comment about private cache map */
622 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
623 PrivateCacheMap = FileObject->PrivateCacheMap;
624 if (PrivateCacheMap != NULL)
625 {
626 /* Mark read ahead as unactive */
627 KeAcquireSpinLockAtDpcLevel(&PrivateCacheMap->ReadAheadSpinLock);
628 InterlockedAnd((volatile long *)&PrivateCacheMap->UlongFlags, ~PRIVATE_CACHE_MAP_READ_AHEAD_ACTIVE);
629 KeReleaseSpinLockFromDpcLevel(&PrivateCacheMap->ReadAheadSpinLock);
630 }
631 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
632
633 /* If file was locked, release it */
634 if (Locked)
635 {
636 SharedCacheMap->Callbacks->ReleaseFromReadAhead(SharedCacheMap->LazyWriteContext);
637 }
638
639 /* And drop our extra reference (See: CcScheduleReadAhead) */
640 ObDereferenceObject(FileObject);
641
642 return;
643 }
644
645 /*
646 * @unimplemented
647 */
648 BOOLEAN
649 NTAPI
650 CcCanIWrite (
651 IN PFILE_OBJECT FileObject,
652 IN ULONG BytesToWrite,
653 IN BOOLEAN Wait,
654 IN BOOLEAN Retrying)
655 {
656 KIRQL OldIrql;
657 KEVENT WaitEvent;
658 ULONG Length, Pages;
659 BOOLEAN PerFileDefer;
660 DEFERRED_WRITE Context;
661 PFSRTL_COMMON_FCB_HEADER Fcb;
662 CC_CAN_WRITE_RETRY TryContext;
663 PROS_SHARED_CACHE_MAP SharedCacheMap;
664
665 CCTRACE(CC_API_DEBUG, "FileObject=%p BytesToWrite=%lu Wait=%d Retrying=%d\n",
666 FileObject, BytesToWrite, Wait, Retrying);
667
668 /* Write through is always OK */
669 if (BooleanFlagOn(FileObject->Flags, FO_WRITE_THROUGH))
670 {
671 return TRUE;
672 }
673
674 TryContext = Retrying;
675 /* Allow remote file if not from posted */
676 if (IoIsFileOriginRemote(FileObject) && TryContext < RetryAllowRemote)
677 {
678 return TRUE;
679 }
680
681 /* Don't exceed max tolerated size */
682 Length = MAX_ZERO_LENGTH;
683 if (BytesToWrite < MAX_ZERO_LENGTH)
684 {
685 Length = BytesToWrite;
686 }
687
688 Pages = BYTES_TO_PAGES(Length);
689
690 /* By default, assume limits per file won't be hit */
691 PerFileDefer = FALSE;
692 Fcb = FileObject->FsContext;
693 /* Do we have to check for limits per file? */
694 if (TryContext >= RetryForceCheckPerFile ||
695 BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES))
696 {
697 /* If master is not locked, lock it now */
698 if (TryContext != RetryMasterLocked)
699 {
700 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
701 }
702
703 /* Let's not assume the file is cached... */
704 if (FileObject->SectionObjectPointer != NULL &&
705 FileObject->SectionObjectPointer->SharedCacheMap != NULL)
706 {
707 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
708 /* Do we have limits per file set? */
709 if (SharedCacheMap->DirtyPageThreshold != 0 &&
710 SharedCacheMap->DirtyPages != 0)
711 {
712 /* Yes, check whether they are blocking */
713 if (Pages + SharedCacheMap->DirtyPages > SharedCacheMap->DirtyPageThreshold)
714 {
715 PerFileDefer = TRUE;
716 }
717 }
718 }
719
720 /* And don't forget to release master */
721 if (TryContext != RetryMasterLocked)
722 {
723 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
724 }
725 }
726
727 /* So, now allow write if:
728 * - Not the first try or we have no throttling yet
729 * AND:
730 * - We don't exceed threshold!
731 * - We don't exceed what Mm can allow us to use
732 * + If we're above top, that's fine
733 * + If we're above bottom with limited modified pages, that's fine
734 * + Otherwise, throttle!
735 */
736 if ((TryContext != FirstTry || IsListEmpty(&CcDeferredWrites)) &&
737 CcTotalDirtyPages + Pages < CcDirtyPageThreshold &&
738 (MmAvailablePages > MmThrottleTop ||
739 (MmModifiedPageListHead.Total < 1000 && MmAvailablePages > MmThrottleBottom)) &&
740 !PerFileDefer)
741 {
742 return TRUE;
743 }
744
745 /* If we can wait, we'll start the wait loop for waiting till we can
746 * write for real
747 */
748 if (!Wait)
749 {
750 return FALSE;
751 }
752
753 /* Otherwise, if there are no deferred writes yet, start the lazy writer */
754 if (IsListEmpty(&CcDeferredWrites))
755 {
756 KIRQL OldIrql;
757
758 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
759 CcScheduleLazyWriteScan(TRUE);
760 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
761 }
762
763 /* Initialize our wait event */
764 KeInitializeEvent(&WaitEvent, NotificationEvent, FALSE);
765
766 /* And prepare a dummy context */
767 Context.NodeTypeCode = NODE_TYPE_DEFERRED_WRITE;
768 Context.NodeByteSize = sizeof(DEFERRED_WRITE);
769 Context.FileObject = FileObject;
770 Context.BytesToWrite = BytesToWrite;
771 Context.LimitModifiedPages = BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES);
772 Context.Event = &WaitEvent;
773
774 /* And queue it */
775 if (Retrying)
776 {
777 /* To the top, if that's a retry */
778 ExInterlockedInsertHeadList(&CcDeferredWrites,
779 &Context.DeferredWriteLinks,
780 &CcDeferredWriteSpinLock);
781 }
782 else
783 {
784 /* To the bottom, if that's a first time */
785 ExInterlockedInsertTailList(&CcDeferredWrites,
786 &Context.DeferredWriteLinks,
787 &CcDeferredWriteSpinLock);
788 }
789
790 DPRINT1("Actively deferring write for: %p\n", FileObject);
791 /* Now, we'll loop until our event is set. When it is set, it means that caller
792 * can immediately write, and has to
793 */
794 do
795 {
796 CcPostDeferredWrites();
797 } while (KeWaitForSingleObject(&WaitEvent, Executive, KernelMode, FALSE, &CcIdleDelay) != STATUS_SUCCESS);
798
799 return TRUE;
800 }
801
802 /*
803 * @implemented
804 */
805 BOOLEAN
806 NTAPI
807 CcCopyRead (
808 IN PFILE_OBJECT FileObject,
809 IN PLARGE_INTEGER FileOffset,
810 IN ULONG Length,
811 IN BOOLEAN Wait,
812 OUT PVOID Buffer,
813 OUT PIO_STATUS_BLOCK IoStatus)
814 {
815 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d\n",
816 FileObject, FileOffset->QuadPart, Length, Wait);
817
818 DPRINT("CcCopyRead(FileObject 0x%p, FileOffset %I64x, "
819 "Length %lu, Wait %u, Buffer 0x%p, IoStatus 0x%p)\n",
820 FileObject, FileOffset->QuadPart, Length, Wait,
821 Buffer, IoStatus);
822
823 return CcCopyData(FileObject,
824 FileOffset->QuadPart,
825 Buffer,
826 Length,
827 CcOperationRead,
828 Wait,
829 IoStatus);
830 }
831
832 /*
833 * @implemented
834 */
835 BOOLEAN
836 NTAPI
837 CcCopyWrite (
838 IN PFILE_OBJECT FileObject,
839 IN PLARGE_INTEGER FileOffset,
840 IN ULONG Length,
841 IN BOOLEAN Wait,
842 IN PVOID Buffer)
843 {
844 IO_STATUS_BLOCK IoStatus;
845
846 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d Buffer=%p\n",
847 FileObject, FileOffset->QuadPart, Length, Wait, Buffer);
848
849 DPRINT("CcCopyWrite(FileObject 0x%p, FileOffset %I64x, "
850 "Length %lu, Wait %u, Buffer 0x%p)\n",
851 FileObject, FileOffset->QuadPart, Length, Wait, Buffer);
852
853 return CcCopyData(FileObject,
854 FileOffset->QuadPart,
855 Buffer,
856 Length,
857 CcOperationWrite,
858 Wait,
859 &IoStatus);
860 }
861
862 /*
863 * @implemented
864 */
865 VOID
866 NTAPI
867 CcDeferWrite (
868 IN PFILE_OBJECT FileObject,
869 IN PCC_POST_DEFERRED_WRITE PostRoutine,
870 IN PVOID Context1,
871 IN PVOID Context2,
872 IN ULONG BytesToWrite,
873 IN BOOLEAN Retrying)
874 {
875 KIRQL OldIrql;
876 PDEFERRED_WRITE Context;
877 PFSRTL_COMMON_FCB_HEADER Fcb;
878
879 CCTRACE(CC_API_DEBUG, "FileObject=%p PostRoutine=%p Context1=%p Context2=%p BytesToWrite=%lu Retrying=%d\n",
880 FileObject, PostRoutine, Context1, Context2, BytesToWrite, Retrying);
881
882 /* Try to allocate a context for queueing the write operation */
883 Context = ExAllocatePoolWithTag(NonPagedPool, sizeof(DEFERRED_WRITE), 'CcDw');
884 /* If it failed, immediately execute the operation! */
885 if (Context == NULL)
886 {
887 PostRoutine(Context1, Context2);
888 return;
889 }
890
891 Fcb = FileObject->FsContext;
892
893 /* Otherwise, initialize the context */
894 RtlZeroMemory(Context, sizeof(DEFERRED_WRITE));
895 Context->NodeTypeCode = NODE_TYPE_DEFERRED_WRITE;
896 Context->NodeByteSize = sizeof(DEFERRED_WRITE);
897 Context->FileObject = FileObject;
898 Context->PostRoutine = PostRoutine;
899 Context->Context1 = Context1;
900 Context->Context2 = Context2;
901 Context->BytesToWrite = BytesToWrite;
902 Context->LimitModifiedPages = BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES);
903
904 /* And queue it */
905 if (Retrying)
906 {
907 /* To the top, if that's a retry */
908 ExInterlockedInsertHeadList(&CcDeferredWrites,
909 &Context->DeferredWriteLinks,
910 &CcDeferredWriteSpinLock);
911 }
912 else
913 {
914 /* To the bottom, if that's a first time */
915 ExInterlockedInsertTailList(&CcDeferredWrites,
916 &Context->DeferredWriteLinks,
917 &CcDeferredWriteSpinLock);
918 }
919
920 /* Try to execute the posted writes */
921 CcPostDeferredWrites();
922
923 /* Schedule a lazy writer run to handle deferred writes */
924 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
925 if (!LazyWriter.ScanActive)
926 {
927 CcScheduleLazyWriteScan(FALSE);
928 }
929 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
930 }
931
932 /*
933 * @unimplemented
934 */
935 VOID
936 NTAPI
937 CcFastCopyRead (
938 IN PFILE_OBJECT FileObject,
939 IN ULONG FileOffset,
940 IN ULONG Length,
941 IN ULONG PageCount,
942 OUT PVOID Buffer,
943 OUT PIO_STATUS_BLOCK IoStatus)
944 {
945 LARGE_INTEGER LargeFileOffset;
946 BOOLEAN Success;
947
948 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%lu Length=%lu PageCount=%lu Buffer=%p\n",
949 FileObject, FileOffset, Length, PageCount, Buffer);
950
951 DBG_UNREFERENCED_PARAMETER(PageCount);
952
953 LargeFileOffset.QuadPart = FileOffset;
954 Success = CcCopyRead(FileObject,
955 &LargeFileOffset,
956 Length,
957 TRUE,
958 Buffer,
959 IoStatus);
960 ASSERT(Success == TRUE);
961 }
962
963 /*
964 * @unimplemented
965 */
966 VOID
967 NTAPI
968 CcFastCopyWrite (
969 IN PFILE_OBJECT FileObject,
970 IN ULONG FileOffset,
971 IN ULONG Length,
972 IN PVOID Buffer)
973 {
974 LARGE_INTEGER LargeFileOffset;
975 BOOLEAN Success;
976
977 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%lu Length=%lu Buffer=%p\n",
978 FileObject, FileOffset, Length, Buffer);
979
980 LargeFileOffset.QuadPart = FileOffset;
981 Success = CcCopyWrite(FileObject,
982 &LargeFileOffset,
983 Length,
984 TRUE,
985 Buffer);
986 ASSERT(Success == TRUE);
987 }
988
989 /*
990 * @implemented
991 */
992 BOOLEAN
993 NTAPI
994 CcZeroData (
995 IN PFILE_OBJECT FileObject,
996 IN PLARGE_INTEGER StartOffset,
997 IN PLARGE_INTEGER EndOffset,
998 IN BOOLEAN Wait)
999 {
1000 NTSTATUS Status;
1001 LARGE_INTEGER WriteOffset;
1002 LONGLONG Length;
1003 ULONG CurrentLength;
1004 PMDL Mdl;
1005 ULONG i;
1006 IO_STATUS_BLOCK Iosb;
1007 KEVENT Event;
1008
1009 CCTRACE(CC_API_DEBUG, "FileObject=%p StartOffset=%I64u EndOffset=%I64u Wait=%d\n",
1010 FileObject, StartOffset->QuadPart, EndOffset->QuadPart, Wait);
1011
1012 DPRINT("CcZeroData(FileObject 0x%p, StartOffset %I64x, EndOffset %I64x, "
1013 "Wait %u)\n", FileObject, StartOffset->QuadPart, EndOffset->QuadPart,
1014 Wait);
1015
1016 Length = EndOffset->QuadPart - StartOffset->QuadPart;
1017 WriteOffset.QuadPart = StartOffset->QuadPart;
1018
1019 if (FileObject->SectionObjectPointer->SharedCacheMap == NULL)
1020 {
1021 /* File is not cached */
1022
1023 Mdl = _alloca(MmSizeOfMdl(NULL, MAX_ZERO_LENGTH));
1024
1025 while (Length > 0)
1026 {
1027 if (Length + WriteOffset.QuadPart % PAGE_SIZE > MAX_ZERO_LENGTH)
1028 {
1029 CurrentLength = MAX_ZERO_LENGTH - WriteOffset.QuadPart % PAGE_SIZE;
1030 }
1031 else
1032 {
1033 CurrentLength = Length;
1034 }
1035 MmInitializeMdl(Mdl, (PVOID)(ULONG_PTR)WriteOffset.QuadPart, CurrentLength);
1036 Mdl->MdlFlags |= (MDL_PAGES_LOCKED | MDL_IO_PAGE_READ);
1037 for (i = 0; i < ((Mdl->Size - sizeof(MDL)) / sizeof(ULONG)); i++)
1038 {
1039 ((PPFN_NUMBER)(Mdl + 1))[i] = CcZeroPage;
1040 }
1041 KeInitializeEvent(&Event, NotificationEvent, FALSE);
1042 Status = IoSynchronousPageWrite(FileObject, Mdl, &WriteOffset, &Event, &Iosb);
1043 if (Status == STATUS_PENDING)
1044 {
1045 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
1046 Status = Iosb.Status;
1047 }
1048 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
1049 {
1050 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
1051 }
1052 if (!NT_SUCCESS(Status))
1053 {
1054 return FALSE;
1055 }
1056 WriteOffset.QuadPart += CurrentLength;
1057 Length -= CurrentLength;
1058 }
1059 }
1060 else
1061 {
1062 IO_STATUS_BLOCK IoStatus;
1063
1064 return CcCopyData(FileObject,
1065 WriteOffset.QuadPart,
1066 NULL,
1067 Length,
1068 CcOperationZero,
1069 Wait,
1070 &IoStatus);
1071 }
1072
1073 return TRUE;
1074 }