b23159a213af13d16ecd3e54520e9cd2d915ae40
[reactos.git] / reactos / ntoskrnl / cc / copy.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/copy.c
5 * PURPOSE: Implements cache managers copy interface
6 *
7 * PROGRAMMERS:
8 */
9
10 /* INCLUDES ******************************************************************/
11
12 #include <ntoskrnl.h>
13 #define NDEBUG
14 #include <debug.h>
15
16 /* GLOBALS *******************************************************************/
17
18 static PFN_NUMBER CcZeroPage = 0;
19
20 #define MAX_ZERO_LENGTH (256 * 1024)
21 #define MAX_RW_LENGTH (256 * 1024)
22
23 ULONG CcFastMdlReadWait;
24 ULONG CcFastMdlReadNotPossible;
25 ULONG CcFastReadNotPossible;
26 ULONG CcFastReadWait;
27 ULONG CcFastReadNoWait;
28 ULONG CcFastReadResourceMiss;
29
30 /* FUNCTIONS *****************************************************************/
31
32 VOID
33 NTAPI
34 MiZeroPhysicalPage (
35 IN PFN_NUMBER PageFrameIndex
36 );
37
38 VOID
39 NTAPI
40 CcInitCacheZeroPage (
41 VOID)
42 {
43 NTSTATUS Status;
44
45 MI_SET_USAGE(MI_USAGE_CACHE);
46 //MI_SET_PROCESS2(PsGetCurrentProcess()->ImageFileName);
47 Status = MmRequestPageMemoryConsumer(MC_SYSTEM, TRUE, &CcZeroPage);
48 if (!NT_SUCCESS(Status))
49 {
50 DbgPrint("Can't allocate CcZeroPage.\n");
51 KeBugCheck(CACHE_MANAGER);
52 }
53 MiZeroPhysicalPage(CcZeroPage);
54 }
55
56 NTSTATUS
57 NTAPI
58 ReadCacheSegmentChain (
59 PBCB Bcb,
60 ULONG ReadOffset,
61 ULONG Length,
62 PVOID Buffer)
63 {
64 PCACHE_SEGMENT head;
65 PCACHE_SEGMENT current;
66 PCACHE_SEGMENT previous;
67 IO_STATUS_BLOCK Iosb;
68 LARGE_INTEGER SegOffset;
69 NTSTATUS Status;
70 ULONG TempLength;
71 KEVENT Event;
72 PMDL Mdl;
73
74 Mdl = _alloca(MmSizeOfMdl(NULL, MAX_RW_LENGTH));
75
76 Status = CcRosGetCacheSegmentChain(Bcb, ReadOffset, Length, &head);
77 if (!NT_SUCCESS(Status))
78 {
79 return Status;
80 }
81 current = head;
82 while (current != NULL)
83 {
84 /*
85 * If the current segment is valid then copy it into the
86 * user buffer.
87 */
88 if (current->Valid)
89 {
90 TempLength = min(VACB_MAPPING_GRANULARITY, Length);
91 memcpy(Buffer, current->BaseAddress, TempLength);
92
93 Buffer = (PVOID)((ULONG_PTR)Buffer + TempLength);
94
95 Length = Length - TempLength;
96 previous = current;
97 current = current->NextInChain;
98 CcRosReleaseCacheSegment(Bcb, previous, TRUE, FALSE, FALSE);
99 }
100 /*
101 * Otherwise read in as much as we can.
102 */
103 else
104 {
105 PCACHE_SEGMENT current2;
106 ULONG current_size;
107 ULONG i;
108 PPFN_NUMBER MdlPages;
109
110 /*
111 * Count the maximum number of bytes we could read starting
112 * from the current segment.
113 */
114 current2 = current;
115 current_size = 0;
116 while ((current2 != NULL) && !current2->Valid && (current_size < MAX_RW_LENGTH))
117 {
118 current2 = current2->NextInChain;
119 current_size += VACB_MAPPING_GRANULARITY;
120 }
121
122 /*
123 * Create an MDL which contains all their pages.
124 */
125 MmInitializeMdl(Mdl, NULL, current_size);
126 Mdl->MdlFlags |= (MDL_PAGES_LOCKED | MDL_IO_PAGE_READ);
127 current2 = current;
128 current_size = 0;
129 MdlPages = (PPFN_NUMBER)(Mdl + 1);
130 while ((current2 != NULL) && !current2->Valid && (current_size < MAX_RW_LENGTH))
131 {
132 PVOID address = current2->BaseAddress;
133 for (i = 0; i < (VACB_MAPPING_GRANULARITY / PAGE_SIZE); i++, address = RVA(address, PAGE_SIZE))
134 {
135 *MdlPages++ = MmGetPfnForProcess(NULL, address);
136 }
137 current2 = current2->NextInChain;
138 current_size += VACB_MAPPING_GRANULARITY;
139 }
140
141 /*
142 * Read in the information.
143 */
144 SegOffset.QuadPart = current->FileOffset;
145 KeInitializeEvent(&Event, NotificationEvent, FALSE);
146 Status = IoPageRead(Bcb->FileObject,
147 Mdl,
148 &SegOffset,
149 &Event,
150 &Iosb);
151 if (Status == STATUS_PENDING)
152 {
153 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
154 Status = Iosb.Status;
155 }
156 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
157 {
158 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
159 }
160 if (!NT_SUCCESS(Status) && Status != STATUS_END_OF_FILE)
161 {
162 while (current != NULL)
163 {
164 previous = current;
165 current = current->NextInChain;
166 CcRosReleaseCacheSegment(Bcb, previous, FALSE, FALSE, FALSE);
167 }
168 return Status;
169 }
170 current_size = 0;
171 while (current != NULL && !current->Valid && current_size < MAX_RW_LENGTH)
172 {
173 previous = current;
174 current = current->NextInChain;
175 TempLength = min(VACB_MAPPING_GRANULARITY, Length);
176 memcpy(Buffer, previous->BaseAddress, TempLength);
177
178 Buffer = (PVOID)((ULONG_PTR)Buffer + TempLength);
179
180 Length = Length - TempLength;
181 CcRosReleaseCacheSegment(Bcb, previous, TRUE, FALSE, FALSE);
182 current_size += VACB_MAPPING_GRANULARITY;
183 }
184 }
185 }
186 return STATUS_SUCCESS;
187 }
188
189 NTSTATUS
190 NTAPI
191 ReadCacheSegment (
192 PCACHE_SEGMENT CacheSeg)
193 {
194 ULONG Size;
195 PMDL Mdl;
196 NTSTATUS Status;
197 LARGE_INTEGER SegOffset;
198 IO_STATUS_BLOCK IoStatus;
199 KEVENT Event;
200
201 SegOffset.QuadPart = CacheSeg->FileOffset;
202 Size = (ULONG)(CacheSeg->Bcb->AllocationSize.QuadPart - CacheSeg->FileOffset);
203 if (Size > VACB_MAPPING_GRANULARITY)
204 {
205 Size = VACB_MAPPING_GRANULARITY;
206 }
207
208 Mdl = IoAllocateMdl(CacheSeg->BaseAddress, Size, FALSE, FALSE, NULL);
209 if (!Mdl)
210 {
211 return STATUS_INSUFFICIENT_RESOURCES;
212 }
213
214 MmBuildMdlForNonPagedPool(Mdl);
215 Mdl->MdlFlags |= MDL_IO_PAGE_READ;
216 KeInitializeEvent(&Event, NotificationEvent, FALSE);
217 Status = IoPageRead(CacheSeg->Bcb->FileObject, Mdl, &SegOffset, &Event, &IoStatus);
218 if (Status == STATUS_PENDING)
219 {
220 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
221 Status = IoStatus.Status;
222 }
223
224 IoFreeMdl(Mdl);
225
226 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
227 {
228 DPRINT1("IoPageRead failed, Status %x\n", Status);
229 return Status;
230 }
231
232 if (VACB_MAPPING_GRANULARITY > Size)
233 {
234 RtlZeroMemory((char*)CacheSeg->BaseAddress + Size,
235 VACB_MAPPING_GRANULARITY - Size);
236 }
237
238 return STATUS_SUCCESS;
239 }
240
241 NTSTATUS
242 NTAPI
243 WriteCacheSegment (
244 PCACHE_SEGMENT CacheSeg)
245 {
246 ULONG Size;
247 PMDL Mdl;
248 NTSTATUS Status;
249 IO_STATUS_BLOCK IoStatus;
250 LARGE_INTEGER SegOffset;
251 KEVENT Event;
252
253 CacheSeg->Dirty = FALSE;
254 SegOffset.QuadPart = CacheSeg->FileOffset;
255 Size = (ULONG)(CacheSeg->Bcb->AllocationSize.QuadPart - CacheSeg->FileOffset);
256 if (Size > VACB_MAPPING_GRANULARITY)
257 {
258 Size = VACB_MAPPING_GRANULARITY;
259 }
260 //
261 // Nonpaged pool PDEs in ReactOS must actually be synchronized between the
262 // MmGlobalPageDirectory and the real system PDE directory. What a mess...
263 //
264 {
265 ULONG i = 0;
266 do
267 {
268 MmGetPfnForProcess(NULL, (PVOID)((ULONG_PTR)CacheSeg->BaseAddress + (i << PAGE_SHIFT)));
269 } while (++i < (Size >> PAGE_SHIFT));
270 }
271
272 Mdl = IoAllocateMdl(CacheSeg->BaseAddress, Size, FALSE, FALSE, NULL);
273 if (!Mdl)
274 {
275 return STATUS_INSUFFICIENT_RESOURCES;
276 }
277 MmBuildMdlForNonPagedPool(Mdl);
278 Mdl->MdlFlags |= MDL_IO_PAGE_READ;
279 KeInitializeEvent(&Event, NotificationEvent, FALSE);
280 Status = IoSynchronousPageWrite(CacheSeg->Bcb->FileObject, Mdl, &SegOffset, &Event, &IoStatus);
281 if (Status == STATUS_PENDING)
282 {
283 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
284 Status = IoStatus.Status;
285 }
286 IoFreeMdl(Mdl);
287 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
288 {
289 DPRINT1("IoPageWrite failed, Status %x\n", Status);
290 CacheSeg->Dirty = TRUE;
291 return Status;
292 }
293
294 return STATUS_SUCCESS;
295 }
296
297
298 /*
299 * @unimplemented
300 */
301 BOOLEAN
302 NTAPI
303 CcCanIWrite (
304 IN PFILE_OBJECT FileObject,
305 IN ULONG BytesToWrite,
306 IN BOOLEAN Wait,
307 IN BOOLEAN Retrying)
308 {
309 UNIMPLEMENTED;
310 return FALSE;
311 }
312
313
314 /*
315 * @implemented
316 */
317 BOOLEAN
318 NTAPI
319 CcCopyRead (
320 IN PFILE_OBJECT FileObject,
321 IN PLARGE_INTEGER FileOffset,
322 IN ULONG Length,
323 IN BOOLEAN Wait,
324 OUT PVOID Buffer,
325 OUT PIO_STATUS_BLOCK IoStatus)
326 {
327 ULONG ReadOffset;
328 ULONG TempLength;
329 NTSTATUS Status = STATUS_SUCCESS;
330 PVOID BaseAddress;
331 PCACHE_SEGMENT CacheSeg;
332 BOOLEAN Valid;
333 ULONG ReadLength = 0;
334 PBCB Bcb;
335 KIRQL oldirql;
336 PLIST_ENTRY current_entry;
337 PCACHE_SEGMENT current;
338
339 DPRINT("CcCopyRead(FileObject 0x%p, FileOffset %I64x, "
340 "Length %lu, Wait %u, Buffer 0x%p, IoStatus 0x%p)\n",
341 FileObject, FileOffset->QuadPart, Length, Wait,
342 Buffer, IoStatus);
343
344 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
345 ReadOffset = (ULONG)FileOffset->QuadPart;
346
347 DPRINT("AllocationSize %I64d, FileSize %I64d\n",
348 Bcb->AllocationSize.QuadPart,
349 Bcb->FileSize.QuadPart);
350
351 /*
352 * Check for the nowait case that all the cache segments that would
353 * cover this read are in memory.
354 */
355 if (!Wait)
356 {
357 KeAcquireSpinLock(&Bcb->BcbLock, &oldirql);
358 /* FIXME: this loop doesn't take into account areas that don't have
359 * a segment in the list yet */
360 current_entry = Bcb->BcbSegmentListHead.Flink;
361 while (current_entry != &Bcb->BcbSegmentListHead)
362 {
363 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
364 BcbSegmentListEntry);
365 if (!current->Valid &&
366 DoSegmentsIntersect(current->FileOffset, VACB_MAPPING_GRANULARITY,
367 ReadOffset, Length))
368 {
369 KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
370 IoStatus->Status = STATUS_UNSUCCESSFUL;
371 IoStatus->Information = 0;
372 return FALSE;
373 }
374 if (current->FileOffset >= ReadOffset + Length)
375 break;
376 current_entry = current_entry->Flink;
377 }
378 KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
379 }
380
381 TempLength = ReadOffset % VACB_MAPPING_GRANULARITY;
382 if (TempLength != 0)
383 {
384 TempLength = min (Length, VACB_MAPPING_GRANULARITY - TempLength);
385 Status = CcRosRequestCacheSegment(Bcb,
386 ROUND_DOWN(ReadOffset,
387 VACB_MAPPING_GRANULARITY),
388 &BaseAddress, &Valid, &CacheSeg);
389 if (!NT_SUCCESS(Status))
390 {
391 IoStatus->Information = 0;
392 IoStatus->Status = Status;
393 DPRINT("CcRosRequestCacheSegment faild, Status %x\n", Status);
394 return FALSE;
395 }
396 if (!Valid)
397 {
398 Status = ReadCacheSegment(CacheSeg);
399 if (!NT_SUCCESS(Status))
400 {
401 IoStatus->Information = 0;
402 IoStatus->Status = Status;
403 CcRosReleaseCacheSegment(Bcb, CacheSeg, FALSE, FALSE, FALSE);
404 return FALSE;
405 }
406 }
407 memcpy (Buffer, (char*)BaseAddress + ReadOffset % VACB_MAPPING_GRANULARITY,
408 TempLength);
409 CcRosReleaseCacheSegment(Bcb, CacheSeg, TRUE, FALSE, FALSE);
410 ReadLength += TempLength;
411 Length -= TempLength;
412 ReadOffset += TempLength;
413 Buffer = (PVOID)((char*)Buffer + TempLength);
414 }
415
416 while (Length > 0)
417 {
418 TempLength = min(max(VACB_MAPPING_GRANULARITY, MAX_RW_LENGTH), Length);
419 Status = ReadCacheSegmentChain(Bcb, ReadOffset, TempLength, Buffer);
420 if (!NT_SUCCESS(Status))
421 {
422 IoStatus->Information = 0;
423 IoStatus->Status = Status;
424 DPRINT("ReadCacheSegmentChain failed, Status %x\n", Status);
425 return FALSE;
426 }
427
428 ReadLength += TempLength;
429 Length -= TempLength;
430 ReadOffset += TempLength;
431
432 Buffer = (PVOID)((ULONG_PTR)Buffer + TempLength);
433 }
434
435 IoStatus->Status = STATUS_SUCCESS;
436 IoStatus->Information = ReadLength;
437 DPRINT("CcCopyRead O.K.\n");
438 return TRUE;
439 }
440
441 /*
442 * @implemented
443 */
444 BOOLEAN
445 NTAPI
446 CcCopyWrite (
447 IN PFILE_OBJECT FileObject,
448 IN PLARGE_INTEGER FileOffset,
449 IN ULONG Length,
450 IN BOOLEAN Wait,
451 IN PVOID Buffer)
452 {
453 NTSTATUS Status;
454 ULONG WriteOffset;
455 KIRQL oldirql;
456 PBCB Bcb;
457 PLIST_ENTRY current_entry;
458 PCACHE_SEGMENT CacheSeg;
459 ULONG TempLength;
460 PVOID BaseAddress;
461 BOOLEAN Valid;
462
463 DPRINT("CcCopyWrite(FileObject 0x%p, FileOffset %I64x, "
464 "Length %lu, Wait %u, Buffer 0x%p)\n",
465 FileObject, FileOffset->QuadPart, Length, Wait, Buffer);
466
467 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
468 WriteOffset = (ULONG)FileOffset->QuadPart;
469
470 if (!Wait)
471 {
472 /* testing, if the requested datas are available */
473 KeAcquireSpinLock(&Bcb->BcbLock, &oldirql);
474 /* FIXME: this loop doesn't take into account areas that don't have
475 * a segment in the list yet */
476 current_entry = Bcb->BcbSegmentListHead.Flink;
477 while (current_entry != &Bcb->BcbSegmentListHead)
478 {
479 CacheSeg = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
480 BcbSegmentListEntry);
481 if (!CacheSeg->Valid &&
482 DoSegmentsIntersect(CacheSeg->FileOffset, VACB_MAPPING_GRANULARITY,
483 WriteOffset, Length))
484 {
485 KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
486 /* datas not available */
487 return FALSE;
488 }
489 if (CacheSeg->FileOffset >= WriteOffset + Length)
490 break;
491 current_entry = current_entry->Flink;
492 }
493 KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
494 }
495
496 TempLength = WriteOffset % VACB_MAPPING_GRANULARITY;
497 if (TempLength != 0)
498 {
499 ULONG ROffset;
500 ROffset = ROUND_DOWN(WriteOffset, VACB_MAPPING_GRANULARITY);
501 TempLength = min (Length, VACB_MAPPING_GRANULARITY - TempLength);
502 Status = CcRosRequestCacheSegment(Bcb, ROffset,
503 &BaseAddress, &Valid, &CacheSeg);
504 if (!NT_SUCCESS(Status))
505 {
506 return FALSE;
507 }
508 if (!Valid)
509 {
510 if (!NT_SUCCESS(ReadCacheSegment(CacheSeg)))
511 {
512 return FALSE;
513 }
514 }
515 memcpy ((char*)BaseAddress + WriteOffset % VACB_MAPPING_GRANULARITY,
516 Buffer, TempLength);
517 CcRosReleaseCacheSegment(Bcb, CacheSeg, TRUE, TRUE, FALSE);
518
519 Length -= TempLength;
520 WriteOffset += TempLength;
521
522 Buffer = (PVOID)((ULONG_PTR)Buffer + TempLength);
523 }
524
525 while (Length > 0)
526 {
527 TempLength = min (VACB_MAPPING_GRANULARITY, Length);
528 Status = CcRosRequestCacheSegment(Bcb,
529 WriteOffset,
530 &BaseAddress,
531 &Valid,
532 &CacheSeg);
533 if (!NT_SUCCESS(Status))
534 {
535 return FALSE;
536 }
537 if (!Valid && TempLength < VACB_MAPPING_GRANULARITY)
538 {
539 if (!NT_SUCCESS(ReadCacheSegment(CacheSeg)))
540 {
541 CcRosReleaseCacheSegment(Bcb, CacheSeg, FALSE, FALSE, FALSE);
542 return FALSE;
543 }
544 }
545 memcpy (BaseAddress, Buffer, TempLength);
546 CcRosReleaseCacheSegment(Bcb, CacheSeg, TRUE, TRUE, FALSE);
547 Length -= TempLength;
548 WriteOffset += TempLength;
549
550 Buffer = (PVOID)((ULONG_PTR)Buffer + TempLength);
551 }
552 return TRUE;
553 }
554
555 /*
556 * @unimplemented
557 */
558 VOID
559 NTAPI
560 CcDeferWrite (
561 IN PFILE_OBJECT FileObject,
562 IN PCC_POST_DEFERRED_WRITE PostRoutine,
563 IN PVOID Context1,
564 IN PVOID Context2,
565 IN ULONG BytesToWrite,
566 IN BOOLEAN Retrying)
567 {
568 UNIMPLEMENTED;
569 }
570
571 /*
572 * @unimplemented
573 */
574 VOID
575 NTAPI
576 CcFastCopyRead (
577 IN PFILE_OBJECT FileObject,
578 IN ULONG FileOffset,
579 IN ULONG Length,
580 IN ULONG PageCount,
581 OUT PVOID Buffer,
582 OUT PIO_STATUS_BLOCK IoStatus)
583 {
584 UNIMPLEMENTED;
585 }
586 /*
587 * @unimplemented
588 */
589 VOID
590 NTAPI
591 CcFastCopyWrite (
592 IN PFILE_OBJECT FileObject,
593 IN ULONG FileOffset,
594 IN ULONG Length,
595 IN PVOID Buffer)
596 {
597 UNIMPLEMENTED;
598 }
599
600 /*
601 * @unimplemented
602 */
603 NTSTATUS
604 NTAPI
605 CcWaitForCurrentLazyWriterActivity (
606 VOID)
607 {
608 UNIMPLEMENTED;
609 return STATUS_NOT_IMPLEMENTED;
610 }
611
612 /*
613 * @implemented
614 */
615 BOOLEAN
616 NTAPI
617 CcZeroData (
618 IN PFILE_OBJECT FileObject,
619 IN PLARGE_INTEGER StartOffset,
620 IN PLARGE_INTEGER EndOffset,
621 IN BOOLEAN Wait)
622 {
623 NTSTATUS Status;
624 LARGE_INTEGER WriteOffset;
625 ULONG Length;
626 ULONG CurrentLength;
627 PMDL Mdl;
628 ULONG i;
629 IO_STATUS_BLOCK Iosb;
630 KEVENT Event;
631
632 DPRINT("CcZeroData(FileObject 0x%p, StartOffset %I64x, EndOffset %I64x, "
633 "Wait %u)\n", FileObject, StartOffset->QuadPart, EndOffset->QuadPart,
634 Wait);
635
636 Length = EndOffset->u.LowPart - StartOffset->u.LowPart;
637 WriteOffset.QuadPart = StartOffset->QuadPart;
638
639 if (FileObject->SectionObjectPointer->SharedCacheMap == NULL)
640 {
641 /* File is not cached */
642
643 Mdl = _alloca(MmSizeOfMdl(NULL, MAX_ZERO_LENGTH));
644
645 while (Length > 0)
646 {
647 if (Length + WriteOffset.u.LowPart % PAGE_SIZE > MAX_ZERO_LENGTH)
648 {
649 CurrentLength = MAX_ZERO_LENGTH - WriteOffset.u.LowPart % PAGE_SIZE;
650 }
651 else
652 {
653 CurrentLength = Length;
654 }
655 MmInitializeMdl(Mdl, (PVOID)(ULONG_PTR)WriteOffset.QuadPart, CurrentLength);
656 Mdl->MdlFlags |= (MDL_PAGES_LOCKED | MDL_IO_PAGE_READ);
657 for (i = 0; i < ((Mdl->Size - sizeof(MDL)) / sizeof(ULONG)); i++)
658 {
659 ((PPFN_NUMBER)(Mdl + 1))[i] = CcZeroPage;
660 }
661 KeInitializeEvent(&Event, NotificationEvent, FALSE);
662 Status = IoSynchronousPageWrite(FileObject, Mdl, &WriteOffset, &Event, &Iosb);
663 if (Status == STATUS_PENDING)
664 {
665 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
666 Status = Iosb.Status;
667 }
668 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
669 {
670 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
671 }
672 if (!NT_SUCCESS(Status))
673 {
674 return FALSE;
675 }
676 WriteOffset.QuadPart += CurrentLength;
677 Length -= CurrentLength;
678 }
679 }
680 else
681 {
682 /* File is cached */
683 KIRQL oldirql;
684 PBCB Bcb;
685 PLIST_ENTRY current_entry;
686 PCACHE_SEGMENT CacheSeg, current, previous;
687 ULONG TempLength;
688
689 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
690 if (!Wait)
691 {
692 /* testing, if the requested datas are available */
693 KeAcquireSpinLock(&Bcb->BcbLock, &oldirql);
694 /* FIXME: this loop doesn't take into account areas that don't have
695 * a segment in the list yet */
696 current_entry = Bcb->BcbSegmentListHead.Flink;
697 while (current_entry != &Bcb->BcbSegmentListHead)
698 {
699 CacheSeg = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
700 BcbSegmentListEntry);
701 if (!CacheSeg->Valid &&
702 DoSegmentsIntersect(CacheSeg->FileOffset, VACB_MAPPING_GRANULARITY,
703 WriteOffset.u.LowPart, Length))
704 {
705 KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
706 /* datas not available */
707 return FALSE;
708 }
709 if (CacheSeg->FileOffset >= WriteOffset.u.LowPart + Length)
710 break;
711 current_entry = current_entry->Flink;
712 }
713 KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
714 }
715
716 while (Length > 0)
717 {
718 ULONG Offset;
719 Offset = WriteOffset.u.LowPart % VACB_MAPPING_GRANULARITY;
720 if (Length + Offset > MAX_ZERO_LENGTH)
721 {
722 CurrentLength = MAX_ZERO_LENGTH - Offset;
723 }
724 else
725 {
726 CurrentLength = Length;
727 }
728 Status = CcRosGetCacheSegmentChain (Bcb, WriteOffset.u.LowPart - Offset,
729 Offset + CurrentLength, &CacheSeg);
730 if (!NT_SUCCESS(Status))
731 {
732 return FALSE;
733 }
734 current = CacheSeg;
735
736 while (current != NULL)
737 {
738 Offset = WriteOffset.u.LowPart % VACB_MAPPING_GRANULARITY;
739 if ((Offset != 0) ||
740 (Offset + CurrentLength < VACB_MAPPING_GRANULARITY))
741 {
742 if (!current->Valid)
743 {
744 /* read the segment */
745 Status = ReadCacheSegment(current);
746 if (!NT_SUCCESS(Status))
747 {
748 DPRINT1("ReadCacheSegment failed, status %x\n",
749 Status);
750 }
751 }
752 TempLength = min (CurrentLength, VACB_MAPPING_GRANULARITY - Offset);
753 }
754 else
755 {
756 TempLength = VACB_MAPPING_GRANULARITY;
757 }
758 memset ((PUCHAR)current->BaseAddress + Offset, 0, TempLength);
759
760 WriteOffset.QuadPart += TempLength;
761 CurrentLength -= TempLength;
762 Length -= TempLength;
763
764 current = current->NextInChain;
765 }
766
767 current = CacheSeg;
768 while (current != NULL)
769 {
770 previous = current;
771 current = current->NextInChain;
772 CcRosReleaseCacheSegment(Bcb, previous, TRUE, TRUE, FALSE);
773 }
774 }
775 }
776
777 return TRUE;
778 }