[NTOSKRNL] Simplify (and speedup!) CcCanIWrite() using the dirty pages count in cache...
[reactos.git] / ntoskrnl / cc / copy.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/copy.c
5 * PURPOSE: Implements cache managers copy interface
6 *
7 * PROGRAMMERS: Some people?
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* INCLUDES ******************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <debug.h>
16
17 /* GLOBALS *******************************************************************/
18
19 static PFN_NUMBER CcZeroPage = 0;
20
21 #define MAX_ZERO_LENGTH (256 * 1024)
22
23 typedef enum _CC_COPY_OPERATION
24 {
25 CcOperationRead,
26 CcOperationWrite,
27 CcOperationZero
28 } CC_COPY_OPERATION;
29
30 ULONG CcRosTraceLevel = 0;
31 ULONG CcFastMdlReadWait;
32 ULONG CcFastMdlReadNotPossible;
33 ULONG CcFastReadNotPossible;
34 ULONG CcFastReadWait;
35 ULONG CcFastReadNoWait;
36 ULONG CcFastReadResourceMiss;
37
38 extern KEVENT iLazyWriterNotify;
39
40 /* FUNCTIONS *****************************************************************/
41
42 VOID
43 NTAPI
44 MiZeroPhysicalPage (
45 IN PFN_NUMBER PageFrameIndex
46 );
47
48 VOID
49 NTAPI
50 CcInitCacheZeroPage (
51 VOID)
52 {
53 NTSTATUS Status;
54
55 MI_SET_USAGE(MI_USAGE_CACHE);
56 //MI_SET_PROCESS2(PsGetCurrentProcess()->ImageFileName);
57 Status = MmRequestPageMemoryConsumer(MC_SYSTEM, TRUE, &CcZeroPage);
58 if (!NT_SUCCESS(Status))
59 {
60 DbgPrint("Can't allocate CcZeroPage.\n");
61 KeBugCheck(CACHE_MANAGER);
62 }
63 MiZeroPhysicalPage(CcZeroPage);
64 }
65
66 NTSTATUS
67 NTAPI
68 CcReadVirtualAddress (
69 PROS_VACB Vacb)
70 {
71 ULONG Size, Pages;
72 PMDL Mdl;
73 NTSTATUS Status;
74 IO_STATUS_BLOCK IoStatus;
75 KEVENT Event;
76
77 Size = (ULONG)(Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart);
78 if (Size > VACB_MAPPING_GRANULARITY)
79 {
80 Size = VACB_MAPPING_GRANULARITY;
81 }
82
83 Pages = BYTES_TO_PAGES(Size);
84 ASSERT(Pages * PAGE_SIZE <= VACB_MAPPING_GRANULARITY);
85
86 Mdl = IoAllocateMdl(Vacb->BaseAddress, Pages * PAGE_SIZE, FALSE, FALSE, NULL);
87 if (!Mdl)
88 {
89 return STATUS_INSUFFICIENT_RESOURCES;
90 }
91
92 Status = STATUS_SUCCESS;
93 _SEH2_TRY
94 {
95 MmProbeAndLockPages(Mdl, KernelMode, IoWriteAccess);
96 }
97 _SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER)
98 {
99 Status = _SEH2_GetExceptionCode();
100 KeBugCheck(CACHE_MANAGER);
101 } _SEH2_END;
102
103 if (NT_SUCCESS(Status))
104 {
105 Mdl->MdlFlags |= MDL_IO_PAGE_READ;
106 KeInitializeEvent(&Event, NotificationEvent, FALSE);
107 Status = IoPageRead(Vacb->SharedCacheMap->FileObject, Mdl, &Vacb->FileOffset, &Event, &IoStatus);
108 if (Status == STATUS_PENDING)
109 {
110 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
111 Status = IoStatus.Status;
112 }
113
114 MmUnlockPages(Mdl);
115 }
116
117 IoFreeMdl(Mdl);
118
119 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
120 {
121 DPRINT1("IoPageRead failed, Status %x\n", Status);
122 return Status;
123 }
124
125 if (Size < VACB_MAPPING_GRANULARITY)
126 {
127 RtlZeroMemory((char*)Vacb->BaseAddress + Size,
128 VACB_MAPPING_GRANULARITY - Size);
129 }
130
131 return STATUS_SUCCESS;
132 }
133
134 NTSTATUS
135 NTAPI
136 CcWriteVirtualAddress (
137 PROS_VACB Vacb)
138 {
139 ULONG Size;
140 PMDL Mdl;
141 NTSTATUS Status;
142 IO_STATUS_BLOCK IoStatus;
143 KEVENT Event;
144
145 Vacb->Dirty = FALSE;
146 Size = (ULONG)(Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart);
147 if (Size > VACB_MAPPING_GRANULARITY)
148 {
149 Size = VACB_MAPPING_GRANULARITY;
150 }
151 //
152 // Nonpaged pool PDEs in ReactOS must actually be synchronized between the
153 // MmGlobalPageDirectory and the real system PDE directory. What a mess...
154 //
155 {
156 ULONG i = 0;
157 do
158 {
159 MmGetPfnForProcess(NULL, (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i << PAGE_SHIFT)));
160 } while (++i < (Size >> PAGE_SHIFT));
161 }
162
163 Mdl = IoAllocateMdl(Vacb->BaseAddress, Size, FALSE, FALSE, NULL);
164 if (!Mdl)
165 {
166 return STATUS_INSUFFICIENT_RESOURCES;
167 }
168
169 Status = STATUS_SUCCESS;
170 _SEH2_TRY
171 {
172 MmProbeAndLockPages(Mdl, KernelMode, IoReadAccess);
173 }
174 _SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER)
175 {
176 Status = _SEH2_GetExceptionCode();
177 KeBugCheck(CACHE_MANAGER);
178 } _SEH2_END;
179
180 if (NT_SUCCESS(Status))
181 {
182 KeInitializeEvent(&Event, NotificationEvent, FALSE);
183 Status = IoSynchronousPageWrite(Vacb->SharedCacheMap->FileObject, Mdl, &Vacb->FileOffset, &Event, &IoStatus);
184 if (Status == STATUS_PENDING)
185 {
186 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
187 Status = IoStatus.Status;
188 }
189
190 MmUnlockPages(Mdl);
191 }
192 IoFreeMdl(Mdl);
193 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
194 {
195 DPRINT1("IoPageWrite failed, Status %x\n", Status);
196 Vacb->Dirty = TRUE;
197 return Status;
198 }
199
200 return STATUS_SUCCESS;
201 }
202
203 NTSTATUS
204 ReadWriteOrZero(
205 _Inout_ PVOID BaseAddress,
206 _Inout_opt_ PVOID Buffer,
207 _In_ ULONG Length,
208 _In_ CC_COPY_OPERATION Operation)
209 {
210 NTSTATUS Status = STATUS_SUCCESS;
211
212 if (Operation == CcOperationZero)
213 {
214 /* Zero */
215 RtlZeroMemory(BaseAddress, Length);
216 }
217 else
218 {
219 _SEH2_TRY
220 {
221 if (Operation == CcOperationWrite)
222 RtlCopyMemory(BaseAddress, Buffer, Length);
223 else
224 RtlCopyMemory(Buffer, BaseAddress, Length);
225 }
226 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
227 {
228 Status = _SEH2_GetExceptionCode();
229 }
230 _SEH2_END;
231 }
232 return Status;
233 }
234
235 BOOLEAN
236 CcCopyData (
237 _In_ PFILE_OBJECT FileObject,
238 _In_ LONGLONG FileOffset,
239 _Inout_ PVOID Buffer,
240 _In_ LONGLONG Length,
241 _In_ CC_COPY_OPERATION Operation,
242 _In_ BOOLEAN Wait,
243 _Out_ PIO_STATUS_BLOCK IoStatus)
244 {
245 NTSTATUS Status;
246 LONGLONG CurrentOffset;
247 ULONG BytesCopied;
248 KIRQL OldIrql;
249 PROS_SHARED_CACHE_MAP SharedCacheMap;
250 PLIST_ENTRY ListEntry;
251 PROS_VACB Vacb;
252 ULONG PartialLength;
253 PVOID BaseAddress;
254 BOOLEAN Valid;
255
256 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
257 CurrentOffset = FileOffset;
258 BytesCopied = 0;
259
260 if (!Wait)
261 {
262 /* test if the requested data is available */
263 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
264 /* FIXME: this loop doesn't take into account areas that don't have
265 * a VACB in the list yet */
266 ListEntry = SharedCacheMap->CacheMapVacbListHead.Flink;
267 while (ListEntry != &SharedCacheMap->CacheMapVacbListHead)
268 {
269 Vacb = CONTAINING_RECORD(ListEntry,
270 ROS_VACB,
271 CacheMapVacbListEntry);
272 ListEntry = ListEntry->Flink;
273 if (!Vacb->Valid &&
274 DoRangesIntersect(Vacb->FileOffset.QuadPart,
275 VACB_MAPPING_GRANULARITY,
276 CurrentOffset, Length))
277 {
278 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
279 /* data not available */
280 return FALSE;
281 }
282 if (Vacb->FileOffset.QuadPart >= CurrentOffset + Length)
283 break;
284 }
285 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
286 }
287
288 PartialLength = CurrentOffset % VACB_MAPPING_GRANULARITY;
289 if (PartialLength != 0)
290 {
291 PartialLength = min(Length, VACB_MAPPING_GRANULARITY - PartialLength);
292 Status = CcRosRequestVacb(SharedCacheMap,
293 ROUND_DOWN(CurrentOffset,
294 VACB_MAPPING_GRANULARITY),
295 &BaseAddress,
296 &Valid,
297 &Vacb);
298 if (!NT_SUCCESS(Status))
299 ExRaiseStatus(Status);
300 if (!Valid)
301 {
302 Status = CcReadVirtualAddress(Vacb);
303 if (!NT_SUCCESS(Status))
304 {
305 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
306 ExRaiseStatus(Status);
307 }
308 }
309 Status = ReadWriteOrZero((PUCHAR)BaseAddress + CurrentOffset % VACB_MAPPING_GRANULARITY,
310 Buffer,
311 PartialLength,
312 Operation);
313
314 CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, Operation != CcOperationRead, FALSE);
315
316 if (!NT_SUCCESS(Status))
317 ExRaiseStatus(STATUS_INVALID_USER_BUFFER);
318
319 Length -= PartialLength;
320 CurrentOffset += PartialLength;
321 BytesCopied += PartialLength;
322
323 if (Operation != CcOperationZero)
324 Buffer = (PVOID)((ULONG_PTR)Buffer + PartialLength);
325 }
326
327 while (Length > 0)
328 {
329 ASSERT(CurrentOffset % VACB_MAPPING_GRANULARITY == 0);
330 PartialLength = min(VACB_MAPPING_GRANULARITY, Length);
331 Status = CcRosRequestVacb(SharedCacheMap,
332 CurrentOffset,
333 &BaseAddress,
334 &Valid,
335 &Vacb);
336 if (!NT_SUCCESS(Status))
337 ExRaiseStatus(Status);
338 if (!Valid &&
339 (Operation == CcOperationRead ||
340 PartialLength < VACB_MAPPING_GRANULARITY))
341 {
342 Status = CcReadVirtualAddress(Vacb);
343 if (!NT_SUCCESS(Status))
344 {
345 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
346 ExRaiseStatus(Status);
347 }
348 }
349 Status = ReadWriteOrZero(BaseAddress, Buffer, PartialLength, Operation);
350
351 CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, Operation != CcOperationRead, FALSE);
352
353 if (!NT_SUCCESS(Status))
354 ExRaiseStatus(STATUS_INVALID_USER_BUFFER);
355
356 Length -= PartialLength;
357 CurrentOffset += PartialLength;
358 BytesCopied += PartialLength;
359
360 if (Operation != CcOperationZero)
361 Buffer = (PVOID)((ULONG_PTR)Buffer + PartialLength);
362 }
363 IoStatus->Status = STATUS_SUCCESS;
364 IoStatus->Information = BytesCopied;
365 return TRUE;
366 }
367
368 /*
369 * @unimplemented
370 */
371 BOOLEAN
372 NTAPI
373 CcCanIWrite (
374 IN PFILE_OBJECT FileObject,
375 IN ULONG BytesToWrite,
376 IN BOOLEAN Wait,
377 IN BOOLEAN Retrying)
378 {
379 PFSRTL_COMMON_FCB_HEADER Fcb;
380 PROS_SHARED_CACHE_MAP SharedCacheMap;
381
382 CCTRACE(CC_API_DEBUG, "FileObject=%p BytesToWrite=%lu Wait=%d Retrying=%d\n",
383 FileObject, BytesToWrite, Wait, Retrying);
384
385 /* We cannot write if dirty pages count is above threshold */
386 if (CcTotalDirtyPages > CcDirtyPageThreshold)
387 {
388 return FALSE;
389 }
390
391 /* We cannot write if dirty pages count will bring use above
392 * XXX: Might not be accurate
393 */
394 if (CcTotalDirtyPages + (BytesToWrite / PAGE_SIZE) > CcDirtyPageThreshold)
395 {
396 return FALSE;
397 }
398
399 /* Is there a limit per file object? */
400 Fcb = FileObject->FsContext;
401 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
402 if (!BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES) ||
403 SharedCacheMap->DirtyPageThreshold == 0)
404 {
405 /* Nope, so that's fine, allow write operation */
406 return TRUE;
407 }
408
409 /* Is dirty page count above local threshold? */
410 if (SharedCacheMap->DirtyPages > SharedCacheMap->DirtyPageThreshold)
411 {
412 return FALSE;
413 }
414
415 /* We cannot write if dirty pages count will bring use above
416 * XXX: Might not be accurate
417 */
418 if (SharedCacheMap->DirtyPages + (BytesToWrite / PAGE_SIZE) > SharedCacheMap->DirtyPageThreshold)
419 {
420 return FALSE;
421 }
422
423 return TRUE;
424 }
425
426 /*
427 * @implemented
428 */
429 BOOLEAN
430 NTAPI
431 CcCopyRead (
432 IN PFILE_OBJECT FileObject,
433 IN PLARGE_INTEGER FileOffset,
434 IN ULONG Length,
435 IN BOOLEAN Wait,
436 OUT PVOID Buffer,
437 OUT PIO_STATUS_BLOCK IoStatus)
438 {
439 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d\n",
440 FileObject, FileOffset->QuadPart, Length, Wait);
441
442 DPRINT("CcCopyRead(FileObject 0x%p, FileOffset %I64x, "
443 "Length %lu, Wait %u, Buffer 0x%p, IoStatus 0x%p)\n",
444 FileObject, FileOffset->QuadPart, Length, Wait,
445 Buffer, IoStatus);
446
447 return CcCopyData(FileObject,
448 FileOffset->QuadPart,
449 Buffer,
450 Length,
451 CcOperationRead,
452 Wait,
453 IoStatus);
454 }
455
456 /*
457 * @implemented
458 */
459 BOOLEAN
460 NTAPI
461 CcCopyWrite (
462 IN PFILE_OBJECT FileObject,
463 IN PLARGE_INTEGER FileOffset,
464 IN ULONG Length,
465 IN BOOLEAN Wait,
466 IN PVOID Buffer)
467 {
468 IO_STATUS_BLOCK IoStatus;
469
470 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d Buffer=%p\n",
471 FileObject, FileOffset->QuadPart, Length, Wait, Buffer);
472
473 DPRINT("CcCopyWrite(FileObject 0x%p, FileOffset %I64x, "
474 "Length %lu, Wait %u, Buffer 0x%p)\n",
475 FileObject, FileOffset->QuadPart, Length, Wait, Buffer);
476
477 return CcCopyData(FileObject,
478 FileOffset->QuadPart,
479 Buffer,
480 Length,
481 CcOperationWrite,
482 Wait,
483 &IoStatus);
484 }
485
486 /*
487 * @implemented
488 */
489 VOID
490 NTAPI
491 CcDeferWrite (
492 IN PFILE_OBJECT FileObject,
493 IN PCC_POST_DEFERRED_WRITE PostRoutine,
494 IN PVOID Context1,
495 IN PVOID Context2,
496 IN ULONG BytesToWrite,
497 IN BOOLEAN Retrying)
498 {
499 PROS_DEFERRED_WRITE_CONTEXT Context;
500
501 CCTRACE(CC_API_DEBUG, "FileObject=%p PostRoutine=%p Context1=%p Context2=%p BytesToWrite=%lu Retrying=%d\n",
502 FileObject, PostRoutine, Context1, Context2, BytesToWrite, Retrying);
503
504 /* Try to allocate a context for queueing the write operation */
505 Context = ExAllocatePoolWithTag(NonPagedPool, sizeof(ROS_DEFERRED_WRITE_CONTEXT), 'CcDw');
506 /* If it failed, immediately execute the operation! */
507 if (Context == NULL)
508 {
509 PostRoutine(Context1, Context2);
510 return;
511 }
512
513 /* Otherwise, initialize the context */
514 Context->FileObject = FileObject;
515 Context->PostRoutine = PostRoutine;
516 Context->Context1 = Context1;
517 Context->Context2 = Context2;
518 Context->BytesToWrite = BytesToWrite;
519 Context->Retrying = Retrying;
520
521 /* And queue it */
522 if (Retrying)
523 {
524 /* To the top, if that's a retry */
525 ExInterlockedInsertHeadList(&CcDeferredWrites,
526 &Context->CcDeferredWritesEntry,
527 &CcDeferredWriteSpinLock);
528 }
529 else
530 {
531 /* To the bottom, if that's a first time */
532 ExInterlockedInsertTailList(&CcDeferredWrites,
533 &Context->CcDeferredWritesEntry,
534 &CcDeferredWriteSpinLock);
535 }
536 }
537
538 /*
539 * @unimplemented
540 */
541 VOID
542 NTAPI
543 CcFastCopyRead (
544 IN PFILE_OBJECT FileObject,
545 IN ULONG FileOffset,
546 IN ULONG Length,
547 IN ULONG PageCount,
548 OUT PVOID Buffer,
549 OUT PIO_STATUS_BLOCK IoStatus)
550 {
551 LARGE_INTEGER LargeFileOffset;
552 BOOLEAN Success;
553
554 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%lu Length=%lu PageCount=%lu Buffer=%p\n",
555 FileObject, FileOffset, Length, PageCount, Buffer);
556
557 DBG_UNREFERENCED_PARAMETER(PageCount);
558
559 LargeFileOffset.QuadPart = FileOffset;
560 Success = CcCopyRead(FileObject,
561 &LargeFileOffset,
562 Length,
563 TRUE,
564 Buffer,
565 IoStatus);
566 ASSERT(Success == TRUE);
567 }
568
569 /*
570 * @unimplemented
571 */
572 VOID
573 NTAPI
574 CcFastCopyWrite (
575 IN PFILE_OBJECT FileObject,
576 IN ULONG FileOffset,
577 IN ULONG Length,
578 IN PVOID Buffer)
579 {
580 LARGE_INTEGER LargeFileOffset;
581 BOOLEAN Success;
582
583 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%lu Length=%lu Buffer=%p\n",
584 FileObject, FileOffset, Length, Buffer);
585
586 LargeFileOffset.QuadPart = FileOffset;
587 Success = CcCopyWrite(FileObject,
588 &LargeFileOffset,
589 Length,
590 TRUE,
591 Buffer);
592 ASSERT(Success == TRUE);
593 }
594
595 /*
596 * @implemented
597 */
598 NTSTATUS
599 NTAPI
600 CcWaitForCurrentLazyWriterActivity (
601 VOID)
602 {
603 NTSTATUS Status;
604
605 /* Lazy writer is done when its event is set */
606 Status = KeWaitForSingleObject(&iLazyWriterNotify,
607 Executive,
608 KernelMode,
609 FALSE,
610 NULL);
611 if (!NT_SUCCESS(Status))
612 {
613 return Status;
614 }
615
616 return STATUS_SUCCESS;
617 }
618
619 /*
620 * @implemented
621 */
622 BOOLEAN
623 NTAPI
624 CcZeroData (
625 IN PFILE_OBJECT FileObject,
626 IN PLARGE_INTEGER StartOffset,
627 IN PLARGE_INTEGER EndOffset,
628 IN BOOLEAN Wait)
629 {
630 NTSTATUS Status;
631 LARGE_INTEGER WriteOffset;
632 LONGLONG Length;
633 ULONG CurrentLength;
634 PMDL Mdl;
635 ULONG i;
636 IO_STATUS_BLOCK Iosb;
637 KEVENT Event;
638
639 CCTRACE(CC_API_DEBUG, "FileObject=%p StartOffset=%I64u EndOffset=%I64u Wait=%d\n",
640 FileObject, StartOffset->QuadPart, EndOffset->QuadPart, Wait);
641
642 DPRINT("CcZeroData(FileObject 0x%p, StartOffset %I64x, EndOffset %I64x, "
643 "Wait %u)\n", FileObject, StartOffset->QuadPart, EndOffset->QuadPart,
644 Wait);
645
646 Length = EndOffset->QuadPart - StartOffset->QuadPart;
647 WriteOffset.QuadPart = StartOffset->QuadPart;
648
649 if (FileObject->SectionObjectPointer->SharedCacheMap == NULL)
650 {
651 /* File is not cached */
652
653 Mdl = _alloca(MmSizeOfMdl(NULL, MAX_ZERO_LENGTH));
654
655 while (Length > 0)
656 {
657 if (Length + WriteOffset.QuadPart % PAGE_SIZE > MAX_ZERO_LENGTH)
658 {
659 CurrentLength = MAX_ZERO_LENGTH - WriteOffset.QuadPart % PAGE_SIZE;
660 }
661 else
662 {
663 CurrentLength = Length;
664 }
665 MmInitializeMdl(Mdl, (PVOID)(ULONG_PTR)WriteOffset.QuadPart, CurrentLength);
666 Mdl->MdlFlags |= (MDL_PAGES_LOCKED | MDL_IO_PAGE_READ);
667 for (i = 0; i < ((Mdl->Size - sizeof(MDL)) / sizeof(ULONG)); i++)
668 {
669 ((PPFN_NUMBER)(Mdl + 1))[i] = CcZeroPage;
670 }
671 KeInitializeEvent(&Event, NotificationEvent, FALSE);
672 Status = IoSynchronousPageWrite(FileObject, Mdl, &WriteOffset, &Event, &Iosb);
673 if (Status == STATUS_PENDING)
674 {
675 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
676 Status = Iosb.Status;
677 }
678 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
679 {
680 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
681 }
682 if (!NT_SUCCESS(Status))
683 {
684 return FALSE;
685 }
686 WriteOffset.QuadPart += CurrentLength;
687 Length -= CurrentLength;
688 }
689 }
690 else
691 {
692 IO_STATUS_BLOCK IoStatus;
693
694 return CcCopyData(FileObject,
695 WriteOffset.QuadPart,
696 NULL,
697 Length,
698 CcOperationZero,
699 Wait,
700 &IoStatus);
701 }
702
703 return TRUE;
704 }