4992ade2906b3e8b634474c014764242b98e8a73
[reactos.git] / ntoskrnl / cc / copy.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/copy.c
5 * PURPOSE: Implements cache managers copy interface
6 *
7 * PROGRAMMERS: Some people?
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* INCLUDES ******************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <debug.h>
16
17 /* GLOBALS *******************************************************************/
18
19 static PFN_NUMBER CcZeroPage = 0;
20
21 #define MAX_ZERO_LENGTH (256 * 1024)
22
23 typedef enum _CC_COPY_OPERATION
24 {
25 CcOperationRead,
26 CcOperationWrite,
27 CcOperationZero
28 } CC_COPY_OPERATION;
29
30 ULONG CcRosTraceLevel = 0;
31 ULONG CcFastMdlReadWait;
32 ULONG CcFastMdlReadNotPossible;
33 ULONG CcFastReadNotPossible;
34 ULONG CcFastReadWait;
35 ULONG CcFastReadNoWait;
36 ULONG CcFastReadResourceMiss;
37
38 /* FUNCTIONS *****************************************************************/
39
40 VOID
41 NTAPI
42 MiZeroPhysicalPage (
43 IN PFN_NUMBER PageFrameIndex
44 );
45
46 VOID
47 NTAPI
48 CcInitCacheZeroPage (
49 VOID)
50 {
51 NTSTATUS Status;
52
53 MI_SET_USAGE(MI_USAGE_CACHE);
54 //MI_SET_PROCESS2(PsGetCurrentProcess()->ImageFileName);
55 Status = MmRequestPageMemoryConsumer(MC_SYSTEM, TRUE, &CcZeroPage);
56 if (!NT_SUCCESS(Status))
57 {
58 DbgPrint("Can't allocate CcZeroPage.\n");
59 KeBugCheck(CACHE_MANAGER);
60 }
61 MiZeroPhysicalPage(CcZeroPage);
62 }
63
64 NTSTATUS
65 NTAPI
66 CcReadVirtualAddress (
67 PROS_VACB Vacb)
68 {
69 ULONG Size, Pages;
70 PMDL Mdl;
71 NTSTATUS Status;
72 IO_STATUS_BLOCK IoStatus;
73 KEVENT Event;
74
75 Size = (ULONG)(Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart);
76 if (Size > VACB_MAPPING_GRANULARITY)
77 {
78 Size = VACB_MAPPING_GRANULARITY;
79 }
80
81 Pages = BYTES_TO_PAGES(Size);
82 ASSERT(Pages * PAGE_SIZE <= VACB_MAPPING_GRANULARITY);
83
84 Mdl = IoAllocateMdl(Vacb->BaseAddress, Pages * PAGE_SIZE, FALSE, FALSE, NULL);
85 if (!Mdl)
86 {
87 return STATUS_INSUFFICIENT_RESOURCES;
88 }
89
90 Status = STATUS_SUCCESS;
91 _SEH2_TRY
92 {
93 MmProbeAndLockPages(Mdl, KernelMode, IoWriteAccess);
94 }
95 _SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER)
96 {
97 Status = _SEH2_GetExceptionCode();
98 KeBugCheck(CACHE_MANAGER);
99 } _SEH2_END;
100
101 if (NT_SUCCESS(Status))
102 {
103 Mdl->MdlFlags |= MDL_IO_PAGE_READ;
104 KeInitializeEvent(&Event, NotificationEvent, FALSE);
105 Status = IoPageRead(Vacb->SharedCacheMap->FileObject, Mdl, &Vacb->FileOffset, &Event, &IoStatus);
106 if (Status == STATUS_PENDING)
107 {
108 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
109 Status = IoStatus.Status;
110 }
111
112 MmUnlockPages(Mdl);
113 }
114
115 IoFreeMdl(Mdl);
116
117 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
118 {
119 DPRINT1("IoPageRead failed, Status %x\n", Status);
120 return Status;
121 }
122
123 if (Size < VACB_MAPPING_GRANULARITY)
124 {
125 RtlZeroMemory((char*)Vacb->BaseAddress + Size,
126 VACB_MAPPING_GRANULARITY - Size);
127 }
128
129 return STATUS_SUCCESS;
130 }
131
132 NTSTATUS
133 NTAPI
134 CcWriteVirtualAddress (
135 PROS_VACB Vacb)
136 {
137 ULONG Size;
138 PMDL Mdl;
139 NTSTATUS Status;
140 IO_STATUS_BLOCK IoStatus;
141 KEVENT Event;
142
143 Size = (ULONG)(Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart);
144 if (Size > VACB_MAPPING_GRANULARITY)
145 {
146 Size = VACB_MAPPING_GRANULARITY;
147 }
148 //
149 // Nonpaged pool PDEs in ReactOS must actually be synchronized between the
150 // MmGlobalPageDirectory and the real system PDE directory. What a mess...
151 //
152 {
153 ULONG i = 0;
154 do
155 {
156 MmGetPfnForProcess(NULL, (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i << PAGE_SHIFT)));
157 } while (++i < (Size >> PAGE_SHIFT));
158 }
159
160 Mdl = IoAllocateMdl(Vacb->BaseAddress, Size, FALSE, FALSE, NULL);
161 if (!Mdl)
162 {
163 return STATUS_INSUFFICIENT_RESOURCES;
164 }
165
166 Status = STATUS_SUCCESS;
167 _SEH2_TRY
168 {
169 MmProbeAndLockPages(Mdl, KernelMode, IoReadAccess);
170 }
171 _SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER)
172 {
173 Status = _SEH2_GetExceptionCode();
174 KeBugCheck(CACHE_MANAGER);
175 } _SEH2_END;
176
177 if (NT_SUCCESS(Status))
178 {
179 KeInitializeEvent(&Event, NotificationEvent, FALSE);
180 Status = IoSynchronousPageWrite(Vacb->SharedCacheMap->FileObject, Mdl, &Vacb->FileOffset, &Event, &IoStatus);
181 if (Status == STATUS_PENDING)
182 {
183 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
184 Status = IoStatus.Status;
185 }
186
187 MmUnlockPages(Mdl);
188 }
189 IoFreeMdl(Mdl);
190 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
191 {
192 DPRINT1("IoPageWrite failed, Status %x\n", Status);
193 return Status;
194 }
195
196 return STATUS_SUCCESS;
197 }
198
199 NTSTATUS
200 ReadWriteOrZero(
201 _Inout_ PVOID BaseAddress,
202 _Inout_opt_ PVOID Buffer,
203 _In_ ULONG Length,
204 _In_ CC_COPY_OPERATION Operation)
205 {
206 NTSTATUS Status = STATUS_SUCCESS;
207
208 if (Operation == CcOperationZero)
209 {
210 /* Zero */
211 RtlZeroMemory(BaseAddress, Length);
212 }
213 else
214 {
215 _SEH2_TRY
216 {
217 if (Operation == CcOperationWrite)
218 RtlCopyMemory(BaseAddress, Buffer, Length);
219 else
220 RtlCopyMemory(Buffer, BaseAddress, Length);
221 }
222 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
223 {
224 Status = _SEH2_GetExceptionCode();
225 }
226 _SEH2_END;
227 }
228 return Status;
229 }
230
231 BOOLEAN
232 CcCopyData (
233 _In_ PFILE_OBJECT FileObject,
234 _In_ LONGLONG FileOffset,
235 _Inout_ PVOID Buffer,
236 _In_ LONGLONG Length,
237 _In_ CC_COPY_OPERATION Operation,
238 _In_ BOOLEAN Wait,
239 _Out_ PIO_STATUS_BLOCK IoStatus)
240 {
241 NTSTATUS Status;
242 LONGLONG CurrentOffset;
243 ULONG BytesCopied;
244 KIRQL OldIrql;
245 PROS_SHARED_CACHE_MAP SharedCacheMap;
246 PLIST_ENTRY ListEntry;
247 PROS_VACB Vacb;
248 ULONG PartialLength;
249 PVOID BaseAddress;
250 BOOLEAN Valid;
251 PPRIVATE_CACHE_MAP PrivateCacheMap;
252
253 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
254 PrivateCacheMap = FileObject->PrivateCacheMap;
255 CurrentOffset = FileOffset;
256 BytesCopied = 0;
257
258 if (!Wait)
259 {
260 /* test if the requested data is available */
261 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
262 /* FIXME: this loop doesn't take into account areas that don't have
263 * a VACB in the list yet */
264 ListEntry = SharedCacheMap->CacheMapVacbListHead.Flink;
265 while (ListEntry != &SharedCacheMap->CacheMapVacbListHead)
266 {
267 Vacb = CONTAINING_RECORD(ListEntry,
268 ROS_VACB,
269 CacheMapVacbListEntry);
270 ListEntry = ListEntry->Flink;
271 if (!Vacb->Valid &&
272 DoRangesIntersect(Vacb->FileOffset.QuadPart,
273 VACB_MAPPING_GRANULARITY,
274 CurrentOffset, Length))
275 {
276 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
277 /* data not available */
278 return FALSE;
279 }
280 if (Vacb->FileOffset.QuadPart >= CurrentOffset + Length)
281 break;
282 }
283 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
284 }
285
286 PartialLength = CurrentOffset % VACB_MAPPING_GRANULARITY;
287 if (PartialLength != 0)
288 {
289 PartialLength = min(Length, VACB_MAPPING_GRANULARITY - PartialLength);
290 Status = CcRosRequestVacb(SharedCacheMap,
291 ROUND_DOWN(CurrentOffset,
292 VACB_MAPPING_GRANULARITY),
293 &BaseAddress,
294 &Valid,
295 &Vacb);
296 if (!NT_SUCCESS(Status))
297 ExRaiseStatus(Status);
298 if (!Valid)
299 {
300 Status = CcReadVirtualAddress(Vacb);
301 if (!NT_SUCCESS(Status))
302 {
303 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
304 ExRaiseStatus(Status);
305 }
306 }
307 Status = ReadWriteOrZero((PUCHAR)BaseAddress + CurrentOffset % VACB_MAPPING_GRANULARITY,
308 Buffer,
309 PartialLength,
310 Operation);
311
312 CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, Operation != CcOperationRead, FALSE);
313
314 if (!NT_SUCCESS(Status))
315 ExRaiseStatus(STATUS_INVALID_USER_BUFFER);
316
317 Length -= PartialLength;
318 CurrentOffset += PartialLength;
319 BytesCopied += PartialLength;
320
321 if (Operation != CcOperationZero)
322 Buffer = (PVOID)((ULONG_PTR)Buffer + PartialLength);
323 }
324
325 while (Length > 0)
326 {
327 ASSERT(CurrentOffset % VACB_MAPPING_GRANULARITY == 0);
328 PartialLength = min(VACB_MAPPING_GRANULARITY, Length);
329 Status = CcRosRequestVacb(SharedCacheMap,
330 CurrentOffset,
331 &BaseAddress,
332 &Valid,
333 &Vacb);
334 if (!NT_SUCCESS(Status))
335 ExRaiseStatus(Status);
336 if (!Valid &&
337 (Operation == CcOperationRead ||
338 PartialLength < VACB_MAPPING_GRANULARITY))
339 {
340 Status = CcReadVirtualAddress(Vacb);
341 if (!NT_SUCCESS(Status))
342 {
343 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
344 ExRaiseStatus(Status);
345 }
346 }
347 Status = ReadWriteOrZero(BaseAddress, Buffer, PartialLength, Operation);
348
349 CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, Operation != CcOperationRead, FALSE);
350
351 if (!NT_SUCCESS(Status))
352 ExRaiseStatus(STATUS_INVALID_USER_BUFFER);
353
354 Length -= PartialLength;
355 CurrentOffset += PartialLength;
356 BytesCopied += PartialLength;
357
358 if (Operation != CcOperationZero)
359 Buffer = (PVOID)((ULONG_PTR)Buffer + PartialLength);
360 }
361
362 /* If that was a successful sync read operation, let's handle read ahead */
363 if (Operation == CcOperationRead && Length == 0 && Wait)
364 {
365 /* If file isn't random access, schedule next read */
366 if (!BooleanFlagOn(FileObject->Flags, FO_RANDOM_ACCESS))
367 {
368 CcScheduleReadAhead(FileObject, (PLARGE_INTEGER)&FileOffset, BytesCopied);
369 }
370
371 /* And update read history in private cache map */
372 PrivateCacheMap->FileOffset1.QuadPart = PrivateCacheMap->FileOffset2.QuadPart;
373 PrivateCacheMap->BeyondLastByte1.QuadPart = PrivateCacheMap->BeyondLastByte2.QuadPart;
374 PrivateCacheMap->FileOffset2.QuadPart = FileOffset;
375 PrivateCacheMap->BeyondLastByte2.QuadPart = FileOffset + BytesCopied;
376 }
377
378 IoStatus->Status = STATUS_SUCCESS;
379 IoStatus->Information = BytesCopied;
380 return TRUE;
381 }
382
383 VOID
384 CcPostDeferredWrites(VOID)
385 {
386 ULONG WrittenBytes;
387
388 /* We'll try to write as much as we can */
389 WrittenBytes = 0;
390 while (TRUE)
391 {
392 KIRQL OldIrql;
393 PLIST_ENTRY ListEntry;
394 PDEFERRED_WRITE DeferredWrite;
395
396 DeferredWrite = NULL;
397
398 /* Lock our deferred writes list */
399 KeAcquireSpinLock(&CcDeferredWriteSpinLock, &OldIrql);
400 for (ListEntry = CcDeferredWrites.Flink;
401 ListEntry != &CcDeferredWrites;
402 ListEntry = ListEntry->Flink)
403 {
404 /* Extract an entry */
405 DeferredWrite = CONTAINING_RECORD(ListEntry, DEFERRED_WRITE, DeferredWriteLinks);
406
407 /* Compute the modified bytes, based on what we already wrote */
408 WrittenBytes += DeferredWrite->BytesToWrite;
409 /* We overflowed, give up */
410 if (WrittenBytes < DeferredWrite->BytesToWrite)
411 {
412 DeferredWrite = NULL;
413 break;
414 }
415
416 /* Check we can write */
417 if (CcCanIWrite(DeferredWrite->FileObject, WrittenBytes, FALSE, TRUE))
418 {
419 /* We can, so remove it from the list and stop looking for entry */
420 RemoveEntryList(&DeferredWrite->DeferredWriteLinks);
421 break;
422 }
423
424 /* If we don't accept modified pages, stop here */
425 if (!DeferredWrite->LimitModifiedPages)
426 {
427 DeferredWrite = NULL;
428 break;
429 }
430
431 /* Reset count as nothing was written yet */
432 WrittenBytes -= DeferredWrite->BytesToWrite;
433 DeferredWrite = NULL;
434 }
435 KeReleaseSpinLock(&CcDeferredWriteSpinLock, OldIrql);
436
437 /* Nothing to write found, give up */
438 if (DeferredWrite == NULL)
439 {
440 break;
441 }
442
443 /* If we have an event, set it and quit */
444 if (DeferredWrite->Event)
445 {
446 KeSetEvent(DeferredWrite->Event, IO_NO_INCREMENT, FALSE);
447 }
448 /* Otherwise, call the write routine and free the context */
449 else
450 {
451 DeferredWrite->PostRoutine(DeferredWrite->Context1, DeferredWrite->Context2);
452 ExFreePoolWithTag(DeferredWrite, 'CcDw');
453 }
454 }
455 }
456
457 VOID
458 CcPerformReadAhead(
459 IN PFILE_OBJECT FileObject)
460 {
461 NTSTATUS Status;
462 LONGLONG CurrentOffset;
463 KIRQL OldIrql;
464 PROS_SHARED_CACHE_MAP SharedCacheMap;
465 PROS_VACB Vacb;
466 ULONG PartialLength;
467 PVOID BaseAddress;
468 BOOLEAN Valid;
469 ULONG Length;
470 PPRIVATE_CACHE_MAP PrivateCacheMap;
471 BOOLEAN Locked;
472
473 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
474
475 /* Critical:
476 * PrivateCacheMap might disappear in-between if the handle
477 * to the file is closed (private is attached to the handle not to
478 * the file), so we need to lock the master lock while we deal with
479 * it. It won't disappear without attempting to lock such lock.
480 */
481 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
482 PrivateCacheMap = FileObject->PrivateCacheMap;
483 /* If the handle was closed since the read ahead was scheduled, just quit */
484 if (PrivateCacheMap == NULL)
485 {
486 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
487 ObDereferenceObject(FileObject);
488 return;
489 }
490 /* Otherwise, extract read offset and length and release private map */
491 else
492 {
493 KeAcquireSpinLockAtDpcLevel(&PrivateCacheMap->ReadAheadSpinLock);
494 CurrentOffset = PrivateCacheMap->ReadAheadOffset[1].QuadPart;
495 Length = PrivateCacheMap->ReadAheadLength[1];
496 KeReleaseSpinLockFromDpcLevel(&PrivateCacheMap->ReadAheadSpinLock);
497 }
498 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
499
500 /* Time to go! */
501 DPRINT("Doing ReadAhead for %p\n", FileObject);
502 /* Lock the file, first */
503 if (!SharedCacheMap->Callbacks->AcquireForReadAhead(SharedCacheMap->LazyWriteContext, FALSE))
504 {
505 Locked = FALSE;
506 goto Clear;
507 }
508
509 /* Remember it's locked */
510 Locked = TRUE;
511
512 /* Next of the algorithm will lock like CcCopyData with the slight
513 * difference that we don't copy data back to an user-backed buffer
514 * We just bring data into Cc
515 */
516 PartialLength = CurrentOffset % VACB_MAPPING_GRANULARITY;
517 if (PartialLength != 0)
518 {
519 PartialLength = min(Length, VACB_MAPPING_GRANULARITY - PartialLength);
520 Status = CcRosRequestVacb(SharedCacheMap,
521 ROUND_DOWN(CurrentOffset,
522 VACB_MAPPING_GRANULARITY),
523 &BaseAddress,
524 &Valid,
525 &Vacb);
526 if (!NT_SUCCESS(Status))
527 {
528 DPRINT1("Failed to request VACB: %lx!\n", Status);
529 goto Clear;
530 }
531
532 if (!Valid)
533 {
534 Status = CcReadVirtualAddress(Vacb);
535 if (!NT_SUCCESS(Status))
536 {
537 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
538 DPRINT1("Failed to read data: %lx!\n", Status);
539 goto Clear;
540 }
541 }
542
543 CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, FALSE, FALSE);
544
545 Length -= PartialLength;
546 CurrentOffset += PartialLength;
547 }
548
549 while (Length > 0)
550 {
551 ASSERT(CurrentOffset % VACB_MAPPING_GRANULARITY == 0);
552 PartialLength = min(VACB_MAPPING_GRANULARITY, Length);
553 Status = CcRosRequestVacb(SharedCacheMap,
554 CurrentOffset,
555 &BaseAddress,
556 &Valid,
557 &Vacb);
558 if (!NT_SUCCESS(Status))
559 {
560 DPRINT1("Failed to request VACB: %lx!\n", Status);
561 goto Clear;
562 }
563
564 if (!Valid)
565 {
566 Status = CcReadVirtualAddress(Vacb);
567 if (!NT_SUCCESS(Status))
568 {
569 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
570 DPRINT1("Failed to read data: %lx!\n", Status);
571 goto Clear;
572 }
573 }
574
575 CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, FALSE, FALSE);
576
577 Length -= PartialLength;
578 CurrentOffset += PartialLength;
579 }
580
581 Clear:
582 /* See previous comment about private cache map */
583 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
584 PrivateCacheMap = FileObject->PrivateCacheMap;
585 if (PrivateCacheMap != NULL)
586 {
587 /* Mark read ahead as unactive */
588 KeAcquireSpinLockAtDpcLevel(&PrivateCacheMap->ReadAheadSpinLock);
589 InterlockedAnd((volatile long *)&PrivateCacheMap->UlongFlags, 0xFFFEFFFF);
590 KeReleaseSpinLockFromDpcLevel(&PrivateCacheMap->ReadAheadSpinLock);
591 }
592 KeReleaseSpinLock(&PrivateCacheMap->ReadAheadSpinLock, OldIrql);
593
594 /* If file was locked, release it */
595 if (Locked)
596 {
597 SharedCacheMap->Callbacks->ReleaseFromReadAhead(SharedCacheMap->LazyWriteContext);
598 }
599
600 /* And drop our extra reference (See: CcScheduleReadAhead) */
601 ObDereferenceObject(FileObject);
602
603 return;
604 }
605
606 /*
607 * @unimplemented
608 */
609 BOOLEAN
610 NTAPI
611 CcCanIWrite (
612 IN PFILE_OBJECT FileObject,
613 IN ULONG BytesToWrite,
614 IN BOOLEAN Wait,
615 IN BOOLEAN Retrying)
616 {
617 KEVENT WaitEvent;
618 DEFERRED_WRITE Context;
619 PFSRTL_COMMON_FCB_HEADER Fcb;
620 PROS_SHARED_CACHE_MAP SharedCacheMap;
621
622 CCTRACE(CC_API_DEBUG, "FileObject=%p BytesToWrite=%lu Wait=%d Retrying=%d\n",
623 FileObject, BytesToWrite, Wait, Retrying);
624
625 /* Write through is always OK */
626 if (BooleanFlagOn(FileObject->Flags, FO_WRITE_THROUGH))
627 {
628 return TRUE;
629 }
630
631 /* We cannot write if dirty pages count is above threshold */
632 if (CcTotalDirtyPages > CcDirtyPageThreshold)
633 {
634 /* Can the caller wait till it's possible to write? */
635 goto CanIWait;
636 }
637
638 /* We cannot write if dirty pages count will bring use above
639 * XXX: Might not be accurate
640 */
641 if (CcTotalDirtyPages + (BytesToWrite / PAGE_SIZE) > CcDirtyPageThreshold)
642 {
643 /* Can the caller wait till it's possible to write? */
644 goto CanIWait;
645 }
646
647 /* Is there a limit per file object? */
648 Fcb = FileObject->FsContext;
649 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
650 if (!BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES) ||
651 SharedCacheMap->DirtyPageThreshold == 0)
652 {
653 /* Nope, so that's fine, allow write operation */
654 return TRUE;
655 }
656
657 /* Is dirty page count above local threshold? */
658 if (SharedCacheMap->DirtyPages > SharedCacheMap->DirtyPageThreshold)
659 {
660 /* Can the caller wait till it's possible to write? */
661 goto CanIWait;
662 }
663
664 /* We cannot write if dirty pages count will bring use above
665 * XXX: Might not be accurate
666 */
667 if (SharedCacheMap->DirtyPages + (BytesToWrite / PAGE_SIZE) > SharedCacheMap->DirtyPageThreshold)
668 {
669 /* Can the caller wait till it's possible to write? */
670 goto CanIWait;
671 }
672
673 return TRUE;
674
675 CanIWait:
676 /* If we reached that point, it means caller cannot write
677 * If he cannot wait, then fail and deny write
678 */
679 if (!Wait)
680 {
681 return FALSE;
682 }
683
684 /* Otherwise, if there are no deferred writes yet, start the lazy writer */
685 if (IsListEmpty(&CcDeferredWrites))
686 {
687 KIRQL OldIrql;
688
689 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
690 CcScheduleLazyWriteScan(TRUE);
691 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
692 }
693
694 /* Initialize our wait event */
695 KeInitializeEvent(&WaitEvent, NotificationEvent, FALSE);
696
697 /* And prepare a dummy context */
698 Context.NodeTypeCode = NODE_TYPE_DEFERRED_WRITE;
699 Context.NodeByteSize = sizeof(DEFERRED_WRITE);
700 Context.FileObject = FileObject;
701 Context.BytesToWrite = BytesToWrite;
702 Context.LimitModifiedPages = BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES);
703 Context.Event = &WaitEvent;
704
705 /* And queue it */
706 if (Retrying)
707 {
708 /* To the top, if that's a retry */
709 ExInterlockedInsertHeadList(&CcDeferredWrites,
710 &Context.DeferredWriteLinks,
711 &CcDeferredWriteSpinLock);
712 }
713 else
714 {
715 /* To the bottom, if that's a first time */
716 ExInterlockedInsertTailList(&CcDeferredWrites,
717 &Context.DeferredWriteLinks,
718 &CcDeferredWriteSpinLock);
719 }
720
721 /* Now, we'll loop until our event is set. When it is set, it means that caller
722 * can immediately write, and has to
723 */
724 do
725 {
726 CcPostDeferredWrites();
727 } while (KeWaitForSingleObject(&WaitEvent, Executive, KernelMode, FALSE, &CcIdleDelay) != STATUS_SUCCESS);
728
729 return TRUE;
730 }
731
732 /*
733 * @implemented
734 */
735 BOOLEAN
736 NTAPI
737 CcCopyRead (
738 IN PFILE_OBJECT FileObject,
739 IN PLARGE_INTEGER FileOffset,
740 IN ULONG Length,
741 IN BOOLEAN Wait,
742 OUT PVOID Buffer,
743 OUT PIO_STATUS_BLOCK IoStatus)
744 {
745 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d\n",
746 FileObject, FileOffset->QuadPart, Length, Wait);
747
748 DPRINT("CcCopyRead(FileObject 0x%p, FileOffset %I64x, "
749 "Length %lu, Wait %u, Buffer 0x%p, IoStatus 0x%p)\n",
750 FileObject, FileOffset->QuadPart, Length, Wait,
751 Buffer, IoStatus);
752
753 return CcCopyData(FileObject,
754 FileOffset->QuadPart,
755 Buffer,
756 Length,
757 CcOperationRead,
758 Wait,
759 IoStatus);
760 }
761
762 /*
763 * @implemented
764 */
765 BOOLEAN
766 NTAPI
767 CcCopyWrite (
768 IN PFILE_OBJECT FileObject,
769 IN PLARGE_INTEGER FileOffset,
770 IN ULONG Length,
771 IN BOOLEAN Wait,
772 IN PVOID Buffer)
773 {
774 IO_STATUS_BLOCK IoStatus;
775
776 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d Buffer=%p\n",
777 FileObject, FileOffset->QuadPart, Length, Wait, Buffer);
778
779 DPRINT("CcCopyWrite(FileObject 0x%p, FileOffset %I64x, "
780 "Length %lu, Wait %u, Buffer 0x%p)\n",
781 FileObject, FileOffset->QuadPart, Length, Wait, Buffer);
782
783 return CcCopyData(FileObject,
784 FileOffset->QuadPart,
785 Buffer,
786 Length,
787 CcOperationWrite,
788 Wait,
789 &IoStatus);
790 }
791
792 /*
793 * @implemented
794 */
795 VOID
796 NTAPI
797 CcDeferWrite (
798 IN PFILE_OBJECT FileObject,
799 IN PCC_POST_DEFERRED_WRITE PostRoutine,
800 IN PVOID Context1,
801 IN PVOID Context2,
802 IN ULONG BytesToWrite,
803 IN BOOLEAN Retrying)
804 {
805 KIRQL OldIrql;
806 PDEFERRED_WRITE Context;
807 PFSRTL_COMMON_FCB_HEADER Fcb;
808
809 CCTRACE(CC_API_DEBUG, "FileObject=%p PostRoutine=%p Context1=%p Context2=%p BytesToWrite=%lu Retrying=%d\n",
810 FileObject, PostRoutine, Context1, Context2, BytesToWrite, Retrying);
811
812 /* Try to allocate a context for queueing the write operation */
813 Context = ExAllocatePoolWithTag(NonPagedPool, sizeof(DEFERRED_WRITE), 'CcDw');
814 /* If it failed, immediately execute the operation! */
815 if (Context == NULL)
816 {
817 PostRoutine(Context1, Context2);
818 return;
819 }
820
821 Fcb = FileObject->FsContext;
822
823 /* Otherwise, initialize the context */
824 RtlZeroMemory(Context, sizeof(DEFERRED_WRITE));
825 Context->NodeTypeCode = NODE_TYPE_DEFERRED_WRITE;
826 Context->NodeByteSize = sizeof(DEFERRED_WRITE);
827 Context->FileObject = FileObject;
828 Context->PostRoutine = PostRoutine;
829 Context->Context1 = Context1;
830 Context->Context2 = Context2;
831 Context->BytesToWrite = BytesToWrite;
832 Context->LimitModifiedPages = BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES);
833
834 /* And queue it */
835 if (Retrying)
836 {
837 /* To the top, if that's a retry */
838 ExInterlockedInsertHeadList(&CcDeferredWrites,
839 &Context->DeferredWriteLinks,
840 &CcDeferredWriteSpinLock);
841 }
842 else
843 {
844 /* To the bottom, if that's a first time */
845 ExInterlockedInsertTailList(&CcDeferredWrites,
846 &Context->DeferredWriteLinks,
847 &CcDeferredWriteSpinLock);
848 }
849
850 /* Try to execute the posted writes */
851 CcPostDeferredWrites();
852
853 /* Schedule a lazy writer run to handle deferred writes */
854 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
855 if (!LazyWriter.ScanActive)
856 {
857 CcScheduleLazyWriteScan(FALSE);
858 }
859 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
860 }
861
862 /*
863 * @unimplemented
864 */
865 VOID
866 NTAPI
867 CcFastCopyRead (
868 IN PFILE_OBJECT FileObject,
869 IN ULONG FileOffset,
870 IN ULONG Length,
871 IN ULONG PageCount,
872 OUT PVOID Buffer,
873 OUT PIO_STATUS_BLOCK IoStatus)
874 {
875 LARGE_INTEGER LargeFileOffset;
876 BOOLEAN Success;
877
878 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%lu Length=%lu PageCount=%lu Buffer=%p\n",
879 FileObject, FileOffset, Length, PageCount, Buffer);
880
881 DBG_UNREFERENCED_PARAMETER(PageCount);
882
883 LargeFileOffset.QuadPart = FileOffset;
884 Success = CcCopyRead(FileObject,
885 &LargeFileOffset,
886 Length,
887 TRUE,
888 Buffer,
889 IoStatus);
890 ASSERT(Success == TRUE);
891 }
892
893 /*
894 * @unimplemented
895 */
896 VOID
897 NTAPI
898 CcFastCopyWrite (
899 IN PFILE_OBJECT FileObject,
900 IN ULONG FileOffset,
901 IN ULONG Length,
902 IN PVOID Buffer)
903 {
904 LARGE_INTEGER LargeFileOffset;
905 BOOLEAN Success;
906
907 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%lu Length=%lu Buffer=%p\n",
908 FileObject, FileOffset, Length, Buffer);
909
910 LargeFileOffset.QuadPart = FileOffset;
911 Success = CcCopyWrite(FileObject,
912 &LargeFileOffset,
913 Length,
914 TRUE,
915 Buffer);
916 ASSERT(Success == TRUE);
917 }
918
919 /*
920 * @implemented
921 */
922 BOOLEAN
923 NTAPI
924 CcZeroData (
925 IN PFILE_OBJECT FileObject,
926 IN PLARGE_INTEGER StartOffset,
927 IN PLARGE_INTEGER EndOffset,
928 IN BOOLEAN Wait)
929 {
930 NTSTATUS Status;
931 LARGE_INTEGER WriteOffset;
932 LONGLONG Length;
933 ULONG CurrentLength;
934 PMDL Mdl;
935 ULONG i;
936 IO_STATUS_BLOCK Iosb;
937 KEVENT Event;
938
939 CCTRACE(CC_API_DEBUG, "FileObject=%p StartOffset=%I64u EndOffset=%I64u Wait=%d\n",
940 FileObject, StartOffset->QuadPart, EndOffset->QuadPart, Wait);
941
942 DPRINT("CcZeroData(FileObject 0x%p, StartOffset %I64x, EndOffset %I64x, "
943 "Wait %u)\n", FileObject, StartOffset->QuadPart, EndOffset->QuadPart,
944 Wait);
945
946 Length = EndOffset->QuadPart - StartOffset->QuadPart;
947 WriteOffset.QuadPart = StartOffset->QuadPart;
948
949 if (FileObject->SectionObjectPointer->SharedCacheMap == NULL)
950 {
951 /* File is not cached */
952
953 Mdl = _alloca(MmSizeOfMdl(NULL, MAX_ZERO_LENGTH));
954
955 while (Length > 0)
956 {
957 if (Length + WriteOffset.QuadPart % PAGE_SIZE > MAX_ZERO_LENGTH)
958 {
959 CurrentLength = MAX_ZERO_LENGTH - WriteOffset.QuadPart % PAGE_SIZE;
960 }
961 else
962 {
963 CurrentLength = Length;
964 }
965 MmInitializeMdl(Mdl, (PVOID)(ULONG_PTR)WriteOffset.QuadPart, CurrentLength);
966 Mdl->MdlFlags |= (MDL_PAGES_LOCKED | MDL_IO_PAGE_READ);
967 for (i = 0; i < ((Mdl->Size - sizeof(MDL)) / sizeof(ULONG)); i++)
968 {
969 ((PPFN_NUMBER)(Mdl + 1))[i] = CcZeroPage;
970 }
971 KeInitializeEvent(&Event, NotificationEvent, FALSE);
972 Status = IoSynchronousPageWrite(FileObject, Mdl, &WriteOffset, &Event, &Iosb);
973 if (Status == STATUS_PENDING)
974 {
975 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
976 Status = Iosb.Status;
977 }
978 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
979 {
980 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
981 }
982 if (!NT_SUCCESS(Status))
983 {
984 return FALSE;
985 }
986 WriteOffset.QuadPart += CurrentLength;
987 Length -= CurrentLength;
988 }
989 }
990 else
991 {
992 IO_STATUS_BLOCK IoStatus;
993
994 return CcCopyData(FileObject,
995 WriteOffset.QuadPart,
996 NULL,
997 Length,
998 CcOperationZero,
999 Wait,
1000 &IoStatus);
1001 }
1002
1003 return TRUE;
1004 }