[SHELL/EXPERIMENTS]
[reactos.git] / ntoskrnl / cc / copy.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/copy.c
5 * PURPOSE: Implements cache managers copy interface
6 *
7 * PROGRAMMERS:
8 */
9
10 /* INCLUDES ******************************************************************/
11
12 #include <ntoskrnl.h>
13 #define NDEBUG
14 #include <debug.h>
15
16 /* GLOBALS *******************************************************************/
17
18 static PFN_NUMBER CcZeroPage = 0;
19
20 #define MAX_ZERO_LENGTH (256 * 1024)
21 #define MAX_RW_LENGTH (256 * 1024)
22 C_ASSERT(MAX_RW_LENGTH <= VACB_MAPPING_GRANULARITY);
23
24 ULONG CcFastMdlReadWait;
25 ULONG CcFastMdlReadNotPossible;
26 ULONG CcFastReadNotPossible;
27 ULONG CcFastReadWait;
28 ULONG CcFastReadNoWait;
29 ULONG CcFastReadResourceMiss;
30
31 /* FUNCTIONS *****************************************************************/
32
33 VOID
34 NTAPI
35 MiZeroPhysicalPage (
36 IN PFN_NUMBER PageFrameIndex
37 );
38
39 VOID
40 NTAPI
41 CcInitCacheZeroPage (
42 VOID)
43 {
44 NTSTATUS Status;
45
46 MI_SET_USAGE(MI_USAGE_CACHE);
47 //MI_SET_PROCESS2(PsGetCurrentProcess()->ImageFileName);
48 Status = MmRequestPageMemoryConsumer(MC_SYSTEM, TRUE, &CcZeroPage);
49 if (!NT_SUCCESS(Status))
50 {
51 DbgPrint("Can't allocate CcZeroPage.\n");
52 KeBugCheck(CACHE_MANAGER);
53 }
54 MiZeroPhysicalPage(CcZeroPage);
55 }
56
57 NTSTATUS
58 NTAPI
59 ReadCacheSegmentChain (
60 PBCB Bcb,
61 ULONG ReadOffset,
62 ULONG Length,
63 PVOID Buffer)
64 {
65 PCACHE_SEGMENT head;
66 PCACHE_SEGMENT current;
67 PCACHE_SEGMENT previous;
68 IO_STATUS_BLOCK Iosb;
69 LARGE_INTEGER SegOffset;
70 NTSTATUS Status;
71 ULONG TempLength;
72 KEVENT Event;
73 PMDL Mdl;
74
75 Mdl = _alloca(MmSizeOfMdl(NULL, MAX_RW_LENGTH));
76
77 Status = CcRosGetCacheSegmentChain(Bcb, ReadOffset, Length, &head);
78 if (!NT_SUCCESS(Status))
79 {
80 return Status;
81 }
82 current = head;
83 while (current != NULL)
84 {
85 /*
86 * If the current segment is valid then copy it into the
87 * user buffer.
88 */
89 if (current->Valid)
90 {
91 TempLength = min(VACB_MAPPING_GRANULARITY, Length);
92 RtlCopyMemory(Buffer, current->BaseAddress, TempLength);
93
94 Buffer = (PVOID)((ULONG_PTR)Buffer + TempLength);
95
96 Length = Length - TempLength;
97 previous = current;
98 current = current->NextInChain;
99 CcRosReleaseCacheSegment(Bcb, previous, TRUE, FALSE, FALSE);
100 }
101 /*
102 * Otherwise read in as much as we can.
103 */
104 else
105 {
106 PCACHE_SEGMENT current2;
107 ULONG current_size;
108 ULONG i;
109 PPFN_NUMBER MdlPages;
110
111 /*
112 * Count the maximum number of bytes we could read starting
113 * from the current segment.
114 */
115 current2 = current;
116 current_size = 0;
117 while ((current2 != NULL) && !current2->Valid && (current_size < MAX_RW_LENGTH))
118 {
119 current2 = current2->NextInChain;
120 current_size += VACB_MAPPING_GRANULARITY;
121 }
122
123 /*
124 * Create an MDL which contains all their pages.
125 */
126 MmInitializeMdl(Mdl, NULL, current_size);
127 Mdl->MdlFlags |= (MDL_PAGES_LOCKED | MDL_IO_PAGE_READ);
128 current2 = current;
129 current_size = 0;
130 MdlPages = (PPFN_NUMBER)(Mdl + 1);
131 while ((current2 != NULL) && !current2->Valid && (current_size < MAX_RW_LENGTH))
132 {
133 PVOID address = current2->BaseAddress;
134 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++, address = RVA(address, PAGE_SIZE))
135 {
136 *MdlPages++ = MmGetPfnForProcess(NULL, address);
137 }
138 current2 = current2->NextInChain;
139 current_size += VACB_MAPPING_GRANULARITY;
140 }
141
142 /*
143 * Read in the information.
144 */
145 SegOffset.QuadPart = current->FileOffset;
146 KeInitializeEvent(&Event, NotificationEvent, FALSE);
147 Status = IoPageRead(Bcb->FileObject,
148 Mdl,
149 &SegOffset,
150 &Event,
151 &Iosb);
152 if (Status == STATUS_PENDING)
153 {
154 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
155 Status = Iosb.Status;
156 }
157 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
158 {
159 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
160 }
161 if (!NT_SUCCESS(Status) && Status != STATUS_END_OF_FILE)
162 {
163 while (current != NULL)
164 {
165 previous = current;
166 current = current->NextInChain;
167 CcRosReleaseCacheSegment(Bcb, previous, FALSE, FALSE, FALSE);
168 }
169 return Status;
170 }
171 current_size = 0;
172 while (current != NULL && !current->Valid && current_size < MAX_RW_LENGTH)
173 {
174 previous = current;
175 current = current->NextInChain;
176 TempLength = min(VACB_MAPPING_GRANULARITY, Length);
177 RtlCopyMemory(Buffer, previous->BaseAddress, TempLength);
178
179 Buffer = (PVOID)((ULONG_PTR)Buffer + TempLength);
180
181 Length = Length - TempLength;
182 CcRosReleaseCacheSegment(Bcb, previous, TRUE, FALSE, FALSE);
183 current_size += VACB_MAPPING_GRANULARITY;
184 }
185 }
186 }
187 return STATUS_SUCCESS;
188 }
189
190 NTSTATUS
191 NTAPI
192 ReadCacheSegment (
193 PCACHE_SEGMENT CacheSeg)
194 {
195 ULONG Size;
196 PMDL Mdl;
197 NTSTATUS Status;
198 LARGE_INTEGER SegOffset;
199 IO_STATUS_BLOCK IoStatus;
200 KEVENT Event;
201
202 SegOffset.QuadPart = CacheSeg->FileOffset;
203 Size = (ULONG)(CacheSeg->Bcb->AllocationSize.QuadPart - CacheSeg->FileOffset);
204 if (Size > VACB_MAPPING_GRANULARITY)
205 {
206 Size = VACB_MAPPING_GRANULARITY;
207 }
208
209 Mdl = IoAllocateMdl(CacheSeg->BaseAddress, Size, FALSE, FALSE, NULL);
210 if (!Mdl)
211 {
212 return STATUS_INSUFFICIENT_RESOURCES;
213 }
214
215 MmBuildMdlForNonPagedPool(Mdl);
216 Mdl->MdlFlags |= MDL_IO_PAGE_READ;
217 KeInitializeEvent(&Event, NotificationEvent, FALSE);
218 Status = IoPageRead(CacheSeg->Bcb->FileObject, Mdl, &SegOffset, &Event, &IoStatus);
219 if (Status == STATUS_PENDING)
220 {
221 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
222 Status = IoStatus.Status;
223 }
224
225 IoFreeMdl(Mdl);
226
227 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
228 {
229 DPRINT1("IoPageRead failed, Status %x\n", Status);
230 return Status;
231 }
232
233 if (Size < VACB_MAPPING_GRANULARITY)
234 {
235 RtlZeroMemory((char*)CacheSeg->BaseAddress + Size,
236 VACB_MAPPING_GRANULARITY - Size);
237 }
238
239 return STATUS_SUCCESS;
240 }
241
242 NTSTATUS
243 NTAPI
244 WriteCacheSegment (
245 PCACHE_SEGMENT CacheSeg)
246 {
247 ULONG Size;
248 PMDL Mdl;
249 NTSTATUS Status;
250 IO_STATUS_BLOCK IoStatus;
251 LARGE_INTEGER SegOffset;
252 KEVENT Event;
253
254 CacheSeg->Dirty = FALSE;
255 SegOffset.QuadPart = CacheSeg->FileOffset;
256 Size = (ULONG)(CacheSeg->Bcb->AllocationSize.QuadPart - CacheSeg->FileOffset);
257 if (Size > VACB_MAPPING_GRANULARITY)
258 {
259 Size = VACB_MAPPING_GRANULARITY;
260 }
261 //
262 // Nonpaged pool PDEs in ReactOS must actually be synchronized between the
263 // MmGlobalPageDirectory and the real system PDE directory. What a mess...
264 //
265 {
266 ULONG i = 0;
267 do
268 {
269 MmGetPfnForProcess(NULL, (PVOID)((ULONG_PTR)CacheSeg->BaseAddress + (i << PAGE_SHIFT)));
270 } while (++i < (Size >> PAGE_SHIFT));
271 }
272
273 Mdl = IoAllocateMdl(CacheSeg->BaseAddress, Size, FALSE, FALSE, NULL);
274 if (!Mdl)
275 {
276 return STATUS_INSUFFICIENT_RESOURCES;
277 }
278 MmBuildMdlForNonPagedPool(Mdl);
279 Mdl->MdlFlags |= MDL_IO_PAGE_READ;
280 KeInitializeEvent(&Event, NotificationEvent, FALSE);
281 Status = IoSynchronousPageWrite(CacheSeg->Bcb->FileObject, Mdl, &SegOffset, &Event, &IoStatus);
282 if (Status == STATUS_PENDING)
283 {
284 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
285 Status = IoStatus.Status;
286 }
287 IoFreeMdl(Mdl);
288 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
289 {
290 DPRINT1("IoPageWrite failed, Status %x\n", Status);
291 CacheSeg->Dirty = TRUE;
292 return Status;
293 }
294
295 return STATUS_SUCCESS;
296 }
297
298
299 /*
300 * @unimplemented
301 */
302 BOOLEAN
303 NTAPI
304 CcCanIWrite (
305 IN PFILE_OBJECT FileObject,
306 IN ULONG BytesToWrite,
307 IN BOOLEAN Wait,
308 IN BOOLEAN Retrying)
309 {
310 UNIMPLEMENTED;
311 return FALSE;
312 }
313
314
315 /*
316 * @implemented
317 */
318 BOOLEAN
319 NTAPI
320 CcCopyRead (
321 IN PFILE_OBJECT FileObject,
322 IN PLARGE_INTEGER FileOffset,
323 IN ULONG Length,
324 IN BOOLEAN Wait,
325 OUT PVOID Buffer,
326 OUT PIO_STATUS_BLOCK IoStatus)
327 {
328 ULONG ReadOffset;
329 ULONG TempLength;
330 NTSTATUS Status = STATUS_SUCCESS;
331 PVOID BaseAddress;
332 PCACHE_SEGMENT CacheSeg;
333 BOOLEAN Valid;
334 ULONG ReadLength = 0;
335 PBCB Bcb;
336 KIRQL oldirql;
337 PLIST_ENTRY current_entry;
338 PCACHE_SEGMENT current;
339
340 DPRINT("CcCopyRead(FileObject 0x%p, FileOffset %I64x, "
341 "Length %lu, Wait %u, Buffer 0x%p, IoStatus 0x%p)\n",
342 FileObject, FileOffset->QuadPart, Length, Wait,
343 Buffer, IoStatus);
344
345 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
346 ReadOffset = (ULONG)FileOffset->QuadPart;
347
348 DPRINT("AllocationSize %I64d, FileSize %I64d\n",
349 Bcb->AllocationSize.QuadPart,
350 Bcb->FileSize.QuadPart);
351
352 /*
353 * Check for the nowait case that all the cache segments that would
354 * cover this read are in memory.
355 */
356 if (!Wait)
357 {
358 KeAcquireSpinLock(&Bcb->BcbLock, &oldirql);
359 /* FIXME: this loop doesn't take into account areas that don't have
360 * a segment in the list yet */
361 current_entry = Bcb->BcbSegmentListHead.Flink;
362 while (current_entry != &Bcb->BcbSegmentListHead)
363 {
364 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
365 BcbSegmentListEntry);
366 if (!current->Valid &&
367 DoSegmentsIntersect(current->FileOffset, VACB_MAPPING_GRANULARITY,
368 ReadOffset, Length))
369 {
370 KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
371 IoStatus->Status = STATUS_UNSUCCESSFUL;
372 IoStatus->Information = 0;
373 return FALSE;
374 }
375 if (current->FileOffset >= ReadOffset + Length)
376 break;
377 current_entry = current_entry->Flink;
378 }
379 KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
380 }
381
382 TempLength = ReadOffset % VACB_MAPPING_GRANULARITY;
383 if (TempLength != 0)
384 {
385 TempLength = min(Length, VACB_MAPPING_GRANULARITY - TempLength);
386 Status = CcRosRequestCacheSegment(Bcb,
387 ROUND_DOWN(ReadOffset,
388 VACB_MAPPING_GRANULARITY),
389 &BaseAddress, &Valid, &CacheSeg);
390 if (!NT_SUCCESS(Status))
391 {
392 IoStatus->Information = 0;
393 IoStatus->Status = Status;
394 DPRINT("CcRosRequestCacheSegment faild, Status %x\n", Status);
395 return FALSE;
396 }
397 if (!Valid)
398 {
399 Status = ReadCacheSegment(CacheSeg);
400 if (!NT_SUCCESS(Status))
401 {
402 IoStatus->Information = 0;
403 IoStatus->Status = Status;
404 CcRosReleaseCacheSegment(Bcb, CacheSeg, FALSE, FALSE, FALSE);
405 return FALSE;
406 }
407 }
408 RtlCopyMemory(Buffer,
409 (char*)BaseAddress + ReadOffset % VACB_MAPPING_GRANULARITY,
410 TempLength);
411 CcRosReleaseCacheSegment(Bcb, CacheSeg, TRUE, FALSE, FALSE);
412 ReadLength += TempLength;
413 Length -= TempLength;
414 ReadOffset += TempLength;
415 Buffer = (PVOID)((char*)Buffer + TempLength);
416 }
417
418 while (Length > 0)
419 {
420 TempLength = min(VACB_MAPPING_GRANULARITY, Length);
421 Status = ReadCacheSegmentChain(Bcb, ReadOffset, TempLength, Buffer);
422 if (!NT_SUCCESS(Status))
423 {
424 IoStatus->Information = 0;
425 IoStatus->Status = Status;
426 DPRINT("ReadCacheSegmentChain failed, Status %x\n", Status);
427 return FALSE;
428 }
429
430 ReadLength += TempLength;
431 Length -= TempLength;
432 ReadOffset += TempLength;
433
434 Buffer = (PVOID)((ULONG_PTR)Buffer + TempLength);
435 }
436
437 IoStatus->Status = STATUS_SUCCESS;
438 IoStatus->Information = ReadLength;
439 DPRINT("CcCopyRead O.K.\n");
440 return TRUE;
441 }
442
443 /*
444 * @implemented
445 */
446 BOOLEAN
447 NTAPI
448 CcCopyWrite (
449 IN PFILE_OBJECT FileObject,
450 IN PLARGE_INTEGER FileOffset,
451 IN ULONG Length,
452 IN BOOLEAN Wait,
453 IN PVOID Buffer)
454 {
455 NTSTATUS Status;
456 ULONG WriteOffset;
457 KIRQL oldirql;
458 PBCB Bcb;
459 PLIST_ENTRY current_entry;
460 PCACHE_SEGMENT CacheSeg;
461 ULONG TempLength;
462 PVOID BaseAddress;
463 BOOLEAN Valid;
464
465 DPRINT("CcCopyWrite(FileObject 0x%p, FileOffset %I64x, "
466 "Length %lu, Wait %u, Buffer 0x%p)\n",
467 FileObject, FileOffset->QuadPart, Length, Wait, Buffer);
468
469 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
470 WriteOffset = (ULONG)FileOffset->QuadPart;
471
472 if (!Wait)
473 {
474 /* testing, if the requested datas are available */
475 KeAcquireSpinLock(&Bcb->BcbLock, &oldirql);
476 /* FIXME: this loop doesn't take into account areas that don't have
477 * a segment in the list yet */
478 current_entry = Bcb->BcbSegmentListHead.Flink;
479 while (current_entry != &Bcb->BcbSegmentListHead)
480 {
481 CacheSeg = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
482 BcbSegmentListEntry);
483 if (!CacheSeg->Valid &&
484 DoSegmentsIntersect(CacheSeg->FileOffset, VACB_MAPPING_GRANULARITY,
485 WriteOffset, Length))
486 {
487 KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
488 /* datas not available */
489 return FALSE;
490 }
491 if (CacheSeg->FileOffset >= WriteOffset + Length)
492 break;
493 current_entry = current_entry->Flink;
494 }
495 KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
496 }
497
498 TempLength = WriteOffset % VACB_MAPPING_GRANULARITY;
499 if (TempLength != 0)
500 {
501 ULONG ROffset;
502 ROffset = ROUND_DOWN(WriteOffset, VACB_MAPPING_GRANULARITY);
503 TempLength = min(Length, VACB_MAPPING_GRANULARITY - TempLength);
504 Status = CcRosRequestCacheSegment(Bcb, ROffset,
505 &BaseAddress, &Valid, &CacheSeg);
506 if (!NT_SUCCESS(Status))
507 {
508 return FALSE;
509 }
510 if (!Valid)
511 {
512 if (!NT_SUCCESS(ReadCacheSegment(CacheSeg)))
513 {
514 return FALSE;
515 }
516 }
517 RtlCopyMemory((char*)BaseAddress + WriteOffset % VACB_MAPPING_GRANULARITY,
518 Buffer,
519 TempLength);
520 CcRosReleaseCacheSegment(Bcb, CacheSeg, TRUE, TRUE, FALSE);
521
522 Length -= TempLength;
523 WriteOffset += TempLength;
524
525 Buffer = (PVOID)((ULONG_PTR)Buffer + TempLength);
526 }
527
528 while (Length > 0)
529 {
530 TempLength = min(VACB_MAPPING_GRANULARITY, Length);
531 Status = CcRosRequestCacheSegment(Bcb,
532 WriteOffset,
533 &BaseAddress,
534 &Valid,
535 &CacheSeg);
536 if (!NT_SUCCESS(Status))
537 {
538 return FALSE;
539 }
540 if (!Valid && TempLength < VACB_MAPPING_GRANULARITY)
541 {
542 if (!NT_SUCCESS(ReadCacheSegment(CacheSeg)))
543 {
544 CcRosReleaseCacheSegment(Bcb, CacheSeg, FALSE, FALSE, FALSE);
545 return FALSE;
546 }
547 }
548 RtlCopyMemory(BaseAddress, Buffer, TempLength);
549 CcRosReleaseCacheSegment(Bcb, CacheSeg, TRUE, TRUE, FALSE);
550 Length -= TempLength;
551 WriteOffset += TempLength;
552
553 Buffer = (PVOID)((ULONG_PTR)Buffer + TempLength);
554 }
555 return TRUE;
556 }
557
558 /*
559 * @unimplemented
560 */
561 VOID
562 NTAPI
563 CcDeferWrite (
564 IN PFILE_OBJECT FileObject,
565 IN PCC_POST_DEFERRED_WRITE PostRoutine,
566 IN PVOID Context1,
567 IN PVOID Context2,
568 IN ULONG BytesToWrite,
569 IN BOOLEAN Retrying)
570 {
571 UNIMPLEMENTED;
572 }
573
574 /*
575 * @unimplemented
576 */
577 VOID
578 NTAPI
579 CcFastCopyRead (
580 IN PFILE_OBJECT FileObject,
581 IN ULONG FileOffset,
582 IN ULONG Length,
583 IN ULONG PageCount,
584 OUT PVOID Buffer,
585 OUT PIO_STATUS_BLOCK IoStatus)
586 {
587 UNIMPLEMENTED;
588 }
589 /*
590 * @unimplemented
591 */
592 VOID
593 NTAPI
594 CcFastCopyWrite (
595 IN PFILE_OBJECT FileObject,
596 IN ULONG FileOffset,
597 IN ULONG Length,
598 IN PVOID Buffer)
599 {
600 UNIMPLEMENTED;
601 }
602
603 /*
604 * @unimplemented
605 */
606 NTSTATUS
607 NTAPI
608 CcWaitForCurrentLazyWriterActivity (
609 VOID)
610 {
611 UNIMPLEMENTED;
612 return STATUS_NOT_IMPLEMENTED;
613 }
614
615 /*
616 * @implemented
617 */
618 BOOLEAN
619 NTAPI
620 CcZeroData (
621 IN PFILE_OBJECT FileObject,
622 IN PLARGE_INTEGER StartOffset,
623 IN PLARGE_INTEGER EndOffset,
624 IN BOOLEAN Wait)
625 {
626 NTSTATUS Status;
627 LARGE_INTEGER WriteOffset;
628 ULONG Length;
629 ULONG CurrentLength;
630 PMDL Mdl;
631 ULONG i;
632 IO_STATUS_BLOCK Iosb;
633 KEVENT Event;
634
635 DPRINT("CcZeroData(FileObject 0x%p, StartOffset %I64x, EndOffset %I64x, "
636 "Wait %u)\n", FileObject, StartOffset->QuadPart, EndOffset->QuadPart,
637 Wait);
638
639 Length = EndOffset->u.LowPart - StartOffset->u.LowPart;
640 WriteOffset.QuadPart = StartOffset->QuadPart;
641
642 if (FileObject->SectionObjectPointer->SharedCacheMap == NULL)
643 {
644 /* File is not cached */
645
646 Mdl = _alloca(MmSizeOfMdl(NULL, MAX_ZERO_LENGTH));
647
648 while (Length > 0)
649 {
650 if (Length + WriteOffset.u.LowPart % PAGE_SIZE > MAX_ZERO_LENGTH)
651 {
652 CurrentLength = MAX_ZERO_LENGTH - WriteOffset.u.LowPart % PAGE_SIZE;
653 }
654 else
655 {
656 CurrentLength = Length;
657 }
658 MmInitializeMdl(Mdl, (PVOID)(ULONG_PTR)WriteOffset.QuadPart, CurrentLength);
659 Mdl->MdlFlags |= (MDL_PAGES_LOCKED | MDL_IO_PAGE_READ);
660 for (i = 0; i < ((Mdl->Size - sizeof(MDL)) / sizeof(ULONG)); i++)
661 {
662 ((PPFN_NUMBER)(Mdl + 1))[i] = CcZeroPage;
663 }
664 KeInitializeEvent(&Event, NotificationEvent, FALSE);
665 Status = IoSynchronousPageWrite(FileObject, Mdl, &WriteOffset, &Event, &Iosb);
666 if (Status == STATUS_PENDING)
667 {
668 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
669 Status = Iosb.Status;
670 }
671 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
672 {
673 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
674 }
675 if (!NT_SUCCESS(Status))
676 {
677 return FALSE;
678 }
679 WriteOffset.QuadPart += CurrentLength;
680 Length -= CurrentLength;
681 }
682 }
683 else
684 {
685 /* File is cached */
686 KIRQL oldirql;
687 PBCB Bcb;
688 PLIST_ENTRY current_entry;
689 PCACHE_SEGMENT CacheSeg, current, previous;
690 ULONG TempLength;
691
692 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
693 if (!Wait)
694 {
695 /* testing, if the requested datas are available */
696 KeAcquireSpinLock(&Bcb->BcbLock, &oldirql);
697 /* FIXME: this loop doesn't take into account areas that don't have
698 * a segment in the list yet */
699 current_entry = Bcb->BcbSegmentListHead.Flink;
700 while (current_entry != &Bcb->BcbSegmentListHead)
701 {
702 CacheSeg = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
703 BcbSegmentListEntry);
704 if (!CacheSeg->Valid &&
705 DoSegmentsIntersect(CacheSeg->FileOffset, VACB_MAPPING_GRANULARITY,
706 WriteOffset.u.LowPart, Length))
707 {
708 KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
709 /* datas not available */
710 return FALSE;
711 }
712 if (CacheSeg->FileOffset >= WriteOffset.u.LowPart + Length)
713 break;
714 current_entry = current_entry->Flink;
715 }
716 KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
717 }
718
719 while (Length > 0)
720 {
721 ULONG Offset;
722 Offset = WriteOffset.u.LowPart % VACB_MAPPING_GRANULARITY;
723 if (Length + Offset > MAX_ZERO_LENGTH)
724 {
725 CurrentLength = MAX_ZERO_LENGTH - Offset;
726 }
727 else
728 {
729 CurrentLength = Length;
730 }
731 Status = CcRosGetCacheSegmentChain (Bcb, WriteOffset.u.LowPart - Offset,
732 Offset + CurrentLength, &CacheSeg);
733 if (!NT_SUCCESS(Status))
734 {
735 return FALSE;
736 }
737 current = CacheSeg;
738
739 while (current != NULL)
740 {
741 Offset = WriteOffset.u.LowPart % VACB_MAPPING_GRANULARITY;
742 if ((Offset != 0) ||
743 (Offset + CurrentLength < VACB_MAPPING_GRANULARITY))
744 {
745 if (!current->Valid)
746 {
747 /* read the segment */
748 Status = ReadCacheSegment(current);
749 if (!NT_SUCCESS(Status))
750 {
751 DPRINT1("ReadCacheSegment failed, status %x\n",
752 Status);
753 }
754 }
755 TempLength = min(CurrentLength, VACB_MAPPING_GRANULARITY - Offset);
756 }
757 else
758 {
759 TempLength = VACB_MAPPING_GRANULARITY;
760 }
761 RtlZeroMemory((PUCHAR)current->BaseAddress + Offset,
762 TempLength);
763
764 WriteOffset.QuadPart += TempLength;
765 CurrentLength -= TempLength;
766 Length -= TempLength;
767
768 current = current->NextInChain;
769 }
770
771 current = CacheSeg;
772 while (current != NULL)
773 {
774 previous = current;
775 current = current->NextInChain;
776 CcRosReleaseCacheSegment(Bcb, previous, TRUE, TRUE, FALSE);
777 }
778 }
779 }
780
781 return TRUE;
782 }