[NTOSKRNL] Drop the always running thread for lazy writer.
[reactos.git] / ntoskrnl / cc / copy.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/copy.c
5 * PURPOSE: Implements cache managers copy interface
6 *
7 * PROGRAMMERS: Some people?
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* INCLUDES ******************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <debug.h>
16
17 /* GLOBALS *******************************************************************/
18
19 static PFN_NUMBER CcZeroPage = 0;
20
21 #define MAX_ZERO_LENGTH (256 * 1024)
22
23 typedef enum _CC_COPY_OPERATION
24 {
25 CcOperationRead,
26 CcOperationWrite,
27 CcOperationZero
28 } CC_COPY_OPERATION;
29
30 ULONG CcRosTraceLevel = 0;
31 ULONG CcFastMdlReadWait;
32 ULONG CcFastMdlReadNotPossible;
33 ULONG CcFastReadNotPossible;
34 ULONG CcFastReadWait;
35 ULONG CcFastReadNoWait;
36 ULONG CcFastReadResourceMiss;
37
38 extern KEVENT iLazyWriterNotify;
39
40 /* FUNCTIONS *****************************************************************/
41
42 VOID
43 NTAPI
44 MiZeroPhysicalPage (
45 IN PFN_NUMBER PageFrameIndex
46 );
47
48 VOID
49 NTAPI
50 CcInitCacheZeroPage (
51 VOID)
52 {
53 NTSTATUS Status;
54
55 MI_SET_USAGE(MI_USAGE_CACHE);
56 //MI_SET_PROCESS2(PsGetCurrentProcess()->ImageFileName);
57 Status = MmRequestPageMemoryConsumer(MC_SYSTEM, TRUE, &CcZeroPage);
58 if (!NT_SUCCESS(Status))
59 {
60 DbgPrint("Can't allocate CcZeroPage.\n");
61 KeBugCheck(CACHE_MANAGER);
62 }
63 MiZeroPhysicalPage(CcZeroPage);
64 }
65
66 NTSTATUS
67 NTAPI
68 CcReadVirtualAddress (
69 PROS_VACB Vacb)
70 {
71 ULONG Size, Pages;
72 PMDL Mdl;
73 NTSTATUS Status;
74 IO_STATUS_BLOCK IoStatus;
75 KEVENT Event;
76
77 Size = (ULONG)(Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart);
78 if (Size > VACB_MAPPING_GRANULARITY)
79 {
80 Size = VACB_MAPPING_GRANULARITY;
81 }
82
83 Pages = BYTES_TO_PAGES(Size);
84 ASSERT(Pages * PAGE_SIZE <= VACB_MAPPING_GRANULARITY);
85
86 Mdl = IoAllocateMdl(Vacb->BaseAddress, Pages * PAGE_SIZE, FALSE, FALSE, NULL);
87 if (!Mdl)
88 {
89 return STATUS_INSUFFICIENT_RESOURCES;
90 }
91
92 Status = STATUS_SUCCESS;
93 _SEH2_TRY
94 {
95 MmProbeAndLockPages(Mdl, KernelMode, IoWriteAccess);
96 }
97 _SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER)
98 {
99 Status = _SEH2_GetExceptionCode();
100 KeBugCheck(CACHE_MANAGER);
101 } _SEH2_END;
102
103 if (NT_SUCCESS(Status))
104 {
105 Mdl->MdlFlags |= MDL_IO_PAGE_READ;
106 KeInitializeEvent(&Event, NotificationEvent, FALSE);
107 Status = IoPageRead(Vacb->SharedCacheMap->FileObject, Mdl, &Vacb->FileOffset, &Event, &IoStatus);
108 if (Status == STATUS_PENDING)
109 {
110 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
111 Status = IoStatus.Status;
112 }
113
114 MmUnlockPages(Mdl);
115 }
116
117 IoFreeMdl(Mdl);
118
119 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
120 {
121 DPRINT1("IoPageRead failed, Status %x\n", Status);
122 return Status;
123 }
124
125 if (Size < VACB_MAPPING_GRANULARITY)
126 {
127 RtlZeroMemory((char*)Vacb->BaseAddress + Size,
128 VACB_MAPPING_GRANULARITY - Size);
129 }
130
131 return STATUS_SUCCESS;
132 }
133
134 NTSTATUS
135 NTAPI
136 CcWriteVirtualAddress (
137 PROS_VACB Vacb)
138 {
139 ULONG Size;
140 PMDL Mdl;
141 NTSTATUS Status;
142 IO_STATUS_BLOCK IoStatus;
143 KEVENT Event;
144
145 Size = (ULONG)(Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart);
146 if (Size > VACB_MAPPING_GRANULARITY)
147 {
148 Size = VACB_MAPPING_GRANULARITY;
149 }
150 //
151 // Nonpaged pool PDEs in ReactOS must actually be synchronized between the
152 // MmGlobalPageDirectory and the real system PDE directory. What a mess...
153 //
154 {
155 ULONG i = 0;
156 do
157 {
158 MmGetPfnForProcess(NULL, (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i << PAGE_SHIFT)));
159 } while (++i < (Size >> PAGE_SHIFT));
160 }
161
162 Mdl = IoAllocateMdl(Vacb->BaseAddress, Size, FALSE, FALSE, NULL);
163 if (!Mdl)
164 {
165 return STATUS_INSUFFICIENT_RESOURCES;
166 }
167
168 Status = STATUS_SUCCESS;
169 _SEH2_TRY
170 {
171 MmProbeAndLockPages(Mdl, KernelMode, IoReadAccess);
172 }
173 _SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER)
174 {
175 Status = _SEH2_GetExceptionCode();
176 KeBugCheck(CACHE_MANAGER);
177 } _SEH2_END;
178
179 if (NT_SUCCESS(Status))
180 {
181 KeInitializeEvent(&Event, NotificationEvent, FALSE);
182 Status = IoSynchronousPageWrite(Vacb->SharedCacheMap->FileObject, Mdl, &Vacb->FileOffset, &Event, &IoStatus);
183 if (Status == STATUS_PENDING)
184 {
185 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
186 Status = IoStatus.Status;
187 }
188
189 MmUnlockPages(Mdl);
190 }
191 IoFreeMdl(Mdl);
192 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
193 {
194 DPRINT1("IoPageWrite failed, Status %x\n", Status);
195 return Status;
196 }
197
198 return STATUS_SUCCESS;
199 }
200
201 NTSTATUS
202 ReadWriteOrZero(
203 _Inout_ PVOID BaseAddress,
204 _Inout_opt_ PVOID Buffer,
205 _In_ ULONG Length,
206 _In_ CC_COPY_OPERATION Operation)
207 {
208 NTSTATUS Status = STATUS_SUCCESS;
209
210 if (Operation == CcOperationZero)
211 {
212 /* Zero */
213 RtlZeroMemory(BaseAddress, Length);
214 }
215 else
216 {
217 _SEH2_TRY
218 {
219 if (Operation == CcOperationWrite)
220 RtlCopyMemory(BaseAddress, Buffer, Length);
221 else
222 RtlCopyMemory(Buffer, BaseAddress, Length);
223 }
224 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
225 {
226 Status = _SEH2_GetExceptionCode();
227 }
228 _SEH2_END;
229 }
230 return Status;
231 }
232
233 BOOLEAN
234 CcCopyData (
235 _In_ PFILE_OBJECT FileObject,
236 _In_ LONGLONG FileOffset,
237 _Inout_ PVOID Buffer,
238 _In_ LONGLONG Length,
239 _In_ CC_COPY_OPERATION Operation,
240 _In_ BOOLEAN Wait,
241 _Out_ PIO_STATUS_BLOCK IoStatus)
242 {
243 NTSTATUS Status;
244 LONGLONG CurrentOffset;
245 ULONG BytesCopied;
246 KIRQL OldIrql;
247 PROS_SHARED_CACHE_MAP SharedCacheMap;
248 PLIST_ENTRY ListEntry;
249 PROS_VACB Vacb;
250 ULONG PartialLength;
251 PVOID BaseAddress;
252 BOOLEAN Valid;
253
254 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
255 CurrentOffset = FileOffset;
256 BytesCopied = 0;
257
258 if (!Wait)
259 {
260 /* test if the requested data is available */
261 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
262 /* FIXME: this loop doesn't take into account areas that don't have
263 * a VACB in the list yet */
264 ListEntry = SharedCacheMap->CacheMapVacbListHead.Flink;
265 while (ListEntry != &SharedCacheMap->CacheMapVacbListHead)
266 {
267 Vacb = CONTAINING_RECORD(ListEntry,
268 ROS_VACB,
269 CacheMapVacbListEntry);
270 ListEntry = ListEntry->Flink;
271 if (!Vacb->Valid &&
272 DoRangesIntersect(Vacb->FileOffset.QuadPart,
273 VACB_MAPPING_GRANULARITY,
274 CurrentOffset, Length))
275 {
276 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
277 /* data not available */
278 return FALSE;
279 }
280 if (Vacb->FileOffset.QuadPart >= CurrentOffset + Length)
281 break;
282 }
283 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
284 }
285
286 PartialLength = CurrentOffset % VACB_MAPPING_GRANULARITY;
287 if (PartialLength != 0)
288 {
289 PartialLength = min(Length, VACB_MAPPING_GRANULARITY - PartialLength);
290 Status = CcRosRequestVacb(SharedCacheMap,
291 ROUND_DOWN(CurrentOffset,
292 VACB_MAPPING_GRANULARITY),
293 &BaseAddress,
294 &Valid,
295 &Vacb);
296 if (!NT_SUCCESS(Status))
297 ExRaiseStatus(Status);
298 if (!Valid)
299 {
300 Status = CcReadVirtualAddress(Vacb);
301 if (!NT_SUCCESS(Status))
302 {
303 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
304 ExRaiseStatus(Status);
305 }
306 }
307 Status = ReadWriteOrZero((PUCHAR)BaseAddress + CurrentOffset % VACB_MAPPING_GRANULARITY,
308 Buffer,
309 PartialLength,
310 Operation);
311
312 CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, Operation != CcOperationRead, FALSE);
313
314 if (!NT_SUCCESS(Status))
315 ExRaiseStatus(STATUS_INVALID_USER_BUFFER);
316
317 Length -= PartialLength;
318 CurrentOffset += PartialLength;
319 BytesCopied += PartialLength;
320
321 if (Operation != CcOperationZero)
322 Buffer = (PVOID)((ULONG_PTR)Buffer + PartialLength);
323 }
324
325 while (Length > 0)
326 {
327 ASSERT(CurrentOffset % VACB_MAPPING_GRANULARITY == 0);
328 PartialLength = min(VACB_MAPPING_GRANULARITY, Length);
329 Status = CcRosRequestVacb(SharedCacheMap,
330 CurrentOffset,
331 &BaseAddress,
332 &Valid,
333 &Vacb);
334 if (!NT_SUCCESS(Status))
335 ExRaiseStatus(Status);
336 if (!Valid &&
337 (Operation == CcOperationRead ||
338 PartialLength < VACB_MAPPING_GRANULARITY))
339 {
340 Status = CcReadVirtualAddress(Vacb);
341 if (!NT_SUCCESS(Status))
342 {
343 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
344 ExRaiseStatus(Status);
345 }
346 }
347 Status = ReadWriteOrZero(BaseAddress, Buffer, PartialLength, Operation);
348
349 CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, Operation != CcOperationRead, FALSE);
350
351 if (!NT_SUCCESS(Status))
352 ExRaiseStatus(STATUS_INVALID_USER_BUFFER);
353
354 Length -= PartialLength;
355 CurrentOffset += PartialLength;
356 BytesCopied += PartialLength;
357
358 if (Operation != CcOperationZero)
359 Buffer = (PVOID)((ULONG_PTR)Buffer + PartialLength);
360 }
361 IoStatus->Status = STATUS_SUCCESS;
362 IoStatus->Information = BytesCopied;
363 return TRUE;
364 }
365
366 /*
367 * @unimplemented
368 */
369 BOOLEAN
370 NTAPI
371 CcCanIWrite (
372 IN PFILE_OBJECT FileObject,
373 IN ULONG BytesToWrite,
374 IN BOOLEAN Wait,
375 IN BOOLEAN Retrying)
376 {
377 PFSRTL_COMMON_FCB_HEADER Fcb;
378 PROS_SHARED_CACHE_MAP SharedCacheMap;
379
380 CCTRACE(CC_API_DEBUG, "FileObject=%p BytesToWrite=%lu Wait=%d Retrying=%d\n",
381 FileObject, BytesToWrite, Wait, Retrying);
382
383 /* We cannot write if dirty pages count is above threshold */
384 if (CcTotalDirtyPages > CcDirtyPageThreshold)
385 {
386 return FALSE;
387 }
388
389 /* We cannot write if dirty pages count will bring use above
390 * XXX: Might not be accurate
391 */
392 if (CcTotalDirtyPages + (BytesToWrite / PAGE_SIZE) > CcDirtyPageThreshold)
393 {
394 return FALSE;
395 }
396
397 /* Is there a limit per file object? */
398 Fcb = FileObject->FsContext;
399 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
400 if (!BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES) ||
401 SharedCacheMap->DirtyPageThreshold == 0)
402 {
403 /* Nope, so that's fine, allow write operation */
404 return TRUE;
405 }
406
407 /* Is dirty page count above local threshold? */
408 if (SharedCacheMap->DirtyPages > SharedCacheMap->DirtyPageThreshold)
409 {
410 return FALSE;
411 }
412
413 /* We cannot write if dirty pages count will bring use above
414 * XXX: Might not be accurate
415 */
416 if (SharedCacheMap->DirtyPages + (BytesToWrite / PAGE_SIZE) > SharedCacheMap->DirtyPageThreshold)
417 {
418 return FALSE;
419 }
420
421 return TRUE;
422 }
423
424 /*
425 * @implemented
426 */
427 BOOLEAN
428 NTAPI
429 CcCopyRead (
430 IN PFILE_OBJECT FileObject,
431 IN PLARGE_INTEGER FileOffset,
432 IN ULONG Length,
433 IN BOOLEAN Wait,
434 OUT PVOID Buffer,
435 OUT PIO_STATUS_BLOCK IoStatus)
436 {
437 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d\n",
438 FileObject, FileOffset->QuadPart, Length, Wait);
439
440 DPRINT("CcCopyRead(FileObject 0x%p, FileOffset %I64x, "
441 "Length %lu, Wait %u, Buffer 0x%p, IoStatus 0x%p)\n",
442 FileObject, FileOffset->QuadPart, Length, Wait,
443 Buffer, IoStatus);
444
445 return CcCopyData(FileObject,
446 FileOffset->QuadPart,
447 Buffer,
448 Length,
449 CcOperationRead,
450 Wait,
451 IoStatus);
452 }
453
454 /*
455 * @implemented
456 */
457 BOOLEAN
458 NTAPI
459 CcCopyWrite (
460 IN PFILE_OBJECT FileObject,
461 IN PLARGE_INTEGER FileOffset,
462 IN ULONG Length,
463 IN BOOLEAN Wait,
464 IN PVOID Buffer)
465 {
466 IO_STATUS_BLOCK IoStatus;
467
468 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d Buffer=%p\n",
469 FileObject, FileOffset->QuadPart, Length, Wait, Buffer);
470
471 DPRINT("CcCopyWrite(FileObject 0x%p, FileOffset %I64x, "
472 "Length %lu, Wait %u, Buffer 0x%p)\n",
473 FileObject, FileOffset->QuadPart, Length, Wait, Buffer);
474
475 return CcCopyData(FileObject,
476 FileOffset->QuadPart,
477 Buffer,
478 Length,
479 CcOperationWrite,
480 Wait,
481 &IoStatus);
482 }
483
484 /*
485 * @implemented
486 */
487 VOID
488 NTAPI
489 CcDeferWrite (
490 IN PFILE_OBJECT FileObject,
491 IN PCC_POST_DEFERRED_WRITE PostRoutine,
492 IN PVOID Context1,
493 IN PVOID Context2,
494 IN ULONG BytesToWrite,
495 IN BOOLEAN Retrying)
496 {
497 PDEFERRED_WRITE Context;
498
499 CCTRACE(CC_API_DEBUG, "FileObject=%p PostRoutine=%p Context1=%p Context2=%p BytesToWrite=%lu Retrying=%d\n",
500 FileObject, PostRoutine, Context1, Context2, BytesToWrite, Retrying);
501
502 /* Try to allocate a context for queueing the write operation */
503 Context = ExAllocatePoolWithTag(NonPagedPool, sizeof(DEFERRED_WRITE), 'CcDw');
504 /* If it failed, immediately execute the operation! */
505 if (Context == NULL)
506 {
507 PostRoutine(Context1, Context2);
508 return;
509 }
510
511 /* Otherwise, initialize the context */
512 RtlZeroMemory(Context, sizeof(DEFERRED_WRITE));
513 Context->NodeTypeCode = NODE_TYPE_DEFERRED_WRITE;
514 Context->NodeByteSize = sizeof(DEFERRED_WRITE);
515 Context->FileObject = FileObject;
516 Context->PostRoutine = PostRoutine;
517 Context->Context1 = Context1;
518 Context->Context2 = Context2;
519 Context->BytesToWrite = BytesToWrite;
520
521 /* And queue it */
522 if (Retrying)
523 {
524 /* To the top, if that's a retry */
525 ExInterlockedInsertHeadList(&CcDeferredWrites,
526 &Context->DeferredWriteLinks,
527 &CcDeferredWriteSpinLock);
528 }
529 else
530 {
531 /* To the bottom, if that's a first time */
532 ExInterlockedInsertTailList(&CcDeferredWrites,
533 &Context->DeferredWriteLinks,
534 &CcDeferredWriteSpinLock);
535 }
536
537 /* FIXME: lock master */
538 if (!LazyWriter.ScanActive)
539 {
540 CcScheduleLazyWriteScan(FALSE);
541 }
542 }
543
544 /*
545 * @unimplemented
546 */
547 VOID
548 NTAPI
549 CcFastCopyRead (
550 IN PFILE_OBJECT FileObject,
551 IN ULONG FileOffset,
552 IN ULONG Length,
553 IN ULONG PageCount,
554 OUT PVOID Buffer,
555 OUT PIO_STATUS_BLOCK IoStatus)
556 {
557 LARGE_INTEGER LargeFileOffset;
558 BOOLEAN Success;
559
560 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%lu Length=%lu PageCount=%lu Buffer=%p\n",
561 FileObject, FileOffset, Length, PageCount, Buffer);
562
563 DBG_UNREFERENCED_PARAMETER(PageCount);
564
565 LargeFileOffset.QuadPart = FileOffset;
566 Success = CcCopyRead(FileObject,
567 &LargeFileOffset,
568 Length,
569 TRUE,
570 Buffer,
571 IoStatus);
572 ASSERT(Success == TRUE);
573 }
574
575 /*
576 * @unimplemented
577 */
578 VOID
579 NTAPI
580 CcFastCopyWrite (
581 IN PFILE_OBJECT FileObject,
582 IN ULONG FileOffset,
583 IN ULONG Length,
584 IN PVOID Buffer)
585 {
586 LARGE_INTEGER LargeFileOffset;
587 BOOLEAN Success;
588
589 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%lu Length=%lu Buffer=%p\n",
590 FileObject, FileOffset, Length, Buffer);
591
592 LargeFileOffset.QuadPart = FileOffset;
593 Success = CcCopyWrite(FileObject,
594 &LargeFileOffset,
595 Length,
596 TRUE,
597 Buffer);
598 ASSERT(Success == TRUE);
599 }
600
601 /*
602 * @implemented
603 */
604 NTSTATUS
605 NTAPI
606 CcWaitForCurrentLazyWriterActivity (
607 VOID)
608 {
609 NTSTATUS Status;
610
611 /* Lazy writer is done when its event is set */
612 Status = KeWaitForSingleObject(&iLazyWriterNotify,
613 Executive,
614 KernelMode,
615 FALSE,
616 NULL);
617 if (!NT_SUCCESS(Status))
618 {
619 return Status;
620 }
621
622 return STATUS_SUCCESS;
623 }
624
625 /*
626 * @implemented
627 */
628 BOOLEAN
629 NTAPI
630 CcZeroData (
631 IN PFILE_OBJECT FileObject,
632 IN PLARGE_INTEGER StartOffset,
633 IN PLARGE_INTEGER EndOffset,
634 IN BOOLEAN Wait)
635 {
636 NTSTATUS Status;
637 LARGE_INTEGER WriteOffset;
638 LONGLONG Length;
639 ULONG CurrentLength;
640 PMDL Mdl;
641 ULONG i;
642 IO_STATUS_BLOCK Iosb;
643 KEVENT Event;
644
645 CCTRACE(CC_API_DEBUG, "FileObject=%p StartOffset=%I64u EndOffset=%I64u Wait=%d\n",
646 FileObject, StartOffset->QuadPart, EndOffset->QuadPart, Wait);
647
648 DPRINT("CcZeroData(FileObject 0x%p, StartOffset %I64x, EndOffset %I64x, "
649 "Wait %u)\n", FileObject, StartOffset->QuadPart, EndOffset->QuadPart,
650 Wait);
651
652 Length = EndOffset->QuadPart - StartOffset->QuadPart;
653 WriteOffset.QuadPart = StartOffset->QuadPart;
654
655 if (FileObject->SectionObjectPointer->SharedCacheMap == NULL)
656 {
657 /* File is not cached */
658
659 Mdl = _alloca(MmSizeOfMdl(NULL, MAX_ZERO_LENGTH));
660
661 while (Length > 0)
662 {
663 if (Length + WriteOffset.QuadPart % PAGE_SIZE > MAX_ZERO_LENGTH)
664 {
665 CurrentLength = MAX_ZERO_LENGTH - WriteOffset.QuadPart % PAGE_SIZE;
666 }
667 else
668 {
669 CurrentLength = Length;
670 }
671 MmInitializeMdl(Mdl, (PVOID)(ULONG_PTR)WriteOffset.QuadPart, CurrentLength);
672 Mdl->MdlFlags |= (MDL_PAGES_LOCKED | MDL_IO_PAGE_READ);
673 for (i = 0; i < ((Mdl->Size - sizeof(MDL)) / sizeof(ULONG)); i++)
674 {
675 ((PPFN_NUMBER)(Mdl + 1))[i] = CcZeroPage;
676 }
677 KeInitializeEvent(&Event, NotificationEvent, FALSE);
678 Status = IoSynchronousPageWrite(FileObject, Mdl, &WriteOffset, &Event, &Iosb);
679 if (Status == STATUS_PENDING)
680 {
681 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
682 Status = Iosb.Status;
683 }
684 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
685 {
686 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
687 }
688 if (!NT_SUCCESS(Status))
689 {
690 return FALSE;
691 }
692 WriteOffset.QuadPart += CurrentLength;
693 Length -= CurrentLength;
694 }
695 }
696 else
697 {
698 IO_STATUS_BLOCK IoStatus;
699
700 return CcCopyData(FileObject,
701 WriteOffset.QuadPart,
702 NULL,
703 Length,
704 CcOperationZero,
705 Wait,
706 &IoStatus);
707 }
708
709 return TRUE;
710 }