Remove unnecessary executable bits
[reactos.git] / drivers / filesystems / cdfs_new / deviosup.c
1 /*++
2
3 Copyright (c) 1989-2000 Microsoft Corporation
4
5 Module Name:
6
7 DevIoSup.c
8
9 Abstract:
10
11 This module implements the low lever disk read/write support for Cdfs.
12
13
14 --*/
15
16 #include "cdprocs.h"
17
18 //
19 // The Bug check file id for this module
20 //
21
22 #define BugCheckFileId (CDFS_BUG_CHECK_DEVIOSUP)
23
24 //
25 // Local structure definitions
26 //
27
28 //
29 // An array of these structures is passed to CdMultipleAsync describing
30 // a set of runs to execute in parallel.
31 //
32
33 typedef struct _IO_RUN {
34
35 //
36 // Disk offset to read from and number of bytes to read. These
37 // must be a multiple of 2048 and the disk offset is also a
38 // multiple of 2048.
39 //
40
41 LONGLONG DiskOffset;
42 ULONG DiskByteCount;
43
44 //
45 // Current position in user buffer. This is the final destination for
46 // this portion of the Io transfer.
47 //
48
49 PVOID UserBuffer;
50
51 //
52 // Buffer to perform the transfer to. If this is the same as the
53 // user buffer above then we are using the user's buffer. Otherwise
54 // we either allocated a temporary buffer or are using a different portion
55 // of the user's buffer.
56 //
57 // TransferBuffer - Read full sectors into this location. This can
58 // be a pointer into the user's buffer at the exact location the
59 // data should go. It can also be an earlier point in the user's
60 // buffer if the complete I/O doesn't start on a sector boundary.
61 // It may also be a pointer into an allocated buffer.
62 //
63 // TransferByteCount - Count of bytes to transfer to user's buffer. A
64 // value of zero indicates that we did do the transfer into the
65 // user's buffer directly.
66 //
67 // TransferBufferOffset - Offset in this buffer to begin the transfer
68 // to the user's buffer.
69 //
70
71 PVOID TransferBuffer;
72 ULONG TransferByteCount;
73 ULONG TransferBufferOffset;
74
75 //
76 // This is the Mdl describing the locked pages in memory. It may
77 // be allocated to describe the allocated buffer. Or it may be
78 // the Mdl in the originating Irp. The MdlOffset is the offset of
79 // the current buffer from the beginning of the buffer described by
80 // the Mdl below. If the TransferMdl is not the same as the Mdl
81 // in the user's Irp then we know we have allocated it.
82 //
83
84 PMDL TransferMdl;
85 PVOID TransferVirtualAddress;
86
87 //
88 // Associated Irp used to perform the Io.
89 //
90
91 PIRP SavedIrp;
92
93 } IO_RUN;
94 typedef IO_RUN *PIO_RUN;
95
96 #define MAX_PARALLEL_IOS 5
97
98 //
99 // Local support routines
100 //
101
102 _Requires_lock_held_(_Global_critical_region_)
103 BOOLEAN
104 CdPrepareBuffers (
105 _In_ PIRP_CONTEXT IrpContext,
106 _In_ PIRP Irp,
107 _In_ PFCB Fcb,
108 _In_reads_bytes_(ByteCount) PVOID UserBuffer,
109 _In_ ULONG UserBufferOffset,
110 _In_ LONGLONG StartingOffset,
111 _In_ ULONG ByteCount,
112 _Out_ PIO_RUN IoRuns,
113 _Out_ PULONG RunCount,
114 _Out_ PULONG ThisByteCount
115 );
116
117 _Requires_lock_held_(_Global_critical_region_)
118 VOID
119 CdPrepareXABuffers (
120 _In_ PIRP_CONTEXT IrpContext,
121 _In_ PIRP Irp,
122 _In_ PFCB Fcb,
123 _In_reads_bytes_(ByteCount) PVOID UserBuffer,
124 _In_ ULONG UserBufferOffset,
125 _In_ LONGLONG StartingOffset,
126 _In_ ULONG ByteCount,
127 _Out_ PIO_RUN IoRuns,
128 _Out_ PULONG RunCount,
129 _Out_ PULONG ThisByteCount
130 );
131
132 BOOLEAN
133 CdFinishBuffers (
134 _In_ PIRP_CONTEXT IrpContext,
135 _Inout_ PIO_RUN IoRuns,
136 _In_ ULONG RunCount,
137 _In_ BOOLEAN FinalCleanup,
138 _In_ BOOLEAN SaveXABuffer
139 );
140
141 _Requires_lock_held_(_Global_critical_region_)
142 VOID
143 CdMultipleAsync (
144 _In_ PIRP_CONTEXT IrpContext,
145 _In_ PFCB Fcb,
146 _In_ ULONG RunCount,
147 _Inout_ PIO_RUN IoRuns
148 );
149
150 VOID
151 CdMultipleXAAsync (
152 _In_ PIRP_CONTEXT IrpContext,
153 _In_ ULONG RunCount,
154 _Inout_ PIO_RUN IoRuns,
155 _In_ PRAW_READ_INFO RawReads,
156 _In_ TRACK_MODE_TYPE TrackMode
157 );
158
159 _Requires_lock_held_(_Global_critical_region_)
160 VOID
161 CdSingleAsync (
162 _In_ PIRP_CONTEXT IrpContext,
163 _In_ PIO_RUN Run,
164 _In_ PFCB Fcb
165 );
166
167 VOID
168 CdWaitSync (
169 _In_ PIRP_CONTEXT IrpContext
170 );
171
172 // Tell prefast this is a completion routine.
173 IO_COMPLETION_ROUTINE CdMultiSyncCompletionRoutine;
174
175 // Tell prefast this is a completion routine
176 IO_COMPLETION_ROUTINE CdMultiAsyncCompletionRoutine;
177
178 // Tell prefast this is a completion routine
179 IO_COMPLETION_ROUTINE CdSingleSyncCompletionRoutine;
180
181 // Tell prefast this is a completion routine
182 IO_COMPLETION_ROUTINE CdSingleAsyncCompletionRoutine;
183
184 _When_(SafeNodeType(Fcb) != CDFS_NTC_FCB_PATH_TABLE && StartingOffset == 0, _At_(ByteCount, _In_range_(>=, CdAudioDirentSize + sizeof(RAW_DIRENT))))
185 _When_(SafeNodeType(Fcb) != CDFS_NTC_FCB_PATH_TABLE && StartingOffset != 0, _At_(ByteCount, _In_range_(>=, CdAudioDirentSize + SECTOR_SIZE)))
186 VOID
187 CdReadAudioSystemFile (
188 _In_ PIRP_CONTEXT IrpContext,
189 _In_ PFCB Fcb,
190 _In_ LONGLONG StartingOffset,
191 _In_ _In_range_(>=, CdAudioDirentSize) ULONG ByteCount,
192 _Out_writes_bytes_(ByteCount) PVOID SystemBuffer
193 );
194
195 _Requires_lock_held_(_Global_critical_region_)
196 BOOLEAN
197 CdReadDirDataThroughCache (
198 _In_ PIRP_CONTEXT IrpContext,
199 _In_ PIO_RUN Run
200 );
201
202 #ifdef ALLOC_PRAGMA
203 #pragma alloc_text(PAGE, CdCreateUserMdl)
204 #pragma alloc_text(PAGE, CdMultipleAsync)
205 #pragma alloc_text(PAGE, CdMultipleXAAsync)
206 #pragma alloc_text(PAGE, CdNonCachedRead)
207 #pragma alloc_text(PAGE, CdNonCachedXARead)
208 #pragma alloc_text(PAGE, CdVolumeDasdWrite)
209 #pragma alloc_text(PAGE, CdFinishBuffers)
210 #pragma alloc_text(PAGE, CdPerformDevIoCtrl)
211 #pragma alloc_text(PAGE, CdPerformDevIoCtrlEx)
212 #pragma alloc_text(PAGE, CdPrepareBuffers)
213 #pragma alloc_text(PAGE, CdPrepareXABuffers)
214 #pragma alloc_text(PAGE, CdReadAudioSystemFile)
215 #pragma alloc_text(PAGE, CdReadSectors)
216 #pragma alloc_text(PAGE, CdSingleAsync)
217 #pragma alloc_text(PAGE, CdWaitSync)
218 #pragma alloc_text(PAGE, CdReadDirDataThroughCache)
219 #pragma alloc_text(PAGE, CdFreeDirCache)
220 #pragma alloc_text(PAGE, CdLbnToMmSsFf)
221 #pragma alloc_text(PAGE, CdHijackIrpAndFlushDevice)
222 #endif
223
224
225 VOID
226 CdLbnToMmSsFf (
227 _In_ ULONG Blocks,
228 _Out_writes_(3) PUCHAR Msf
229 )
230
231 /*++
232
233 Routine Description:
234
235 Convert Lbn to MSF format.
236
237 Arguments:
238
239 Msf - on output, set to 0xMmSsFf representation of blocks.
240
241 --*/
242
243 {
244 PAGED_CODE();
245
246 Blocks += 150; // Lbn 0 == 00:02:00, 1sec == 75 frames.
247
248 Msf[0] = (UCHAR)(Blocks % 75); // Frames
249 Blocks /= 75; // -> Seconds
250 Msf[1] = (UCHAR)(Blocks % 60); // Seconds
251 Blocks /= 60; // -> Minutes
252 Msf[2] = (UCHAR)Blocks; // Minutes
253 }
254
255
256 __inline
257 TRACK_MODE_TYPE
258 CdFileTrackMode (
259 _In_ PFCB Fcb
260 )
261
262 /*++
263
264 Routine Description:
265
266 This routine converts FCB XA file type flags to the track mode
267 used by the device drivers.
268
269 Arguments:
270
271 Fcb - Fcb representing the file to read.
272
273 Return Value:
274
275 TrackMode of the file represented by the Fcb.
276
277 --*/
278 {
279 NT_ASSERT( FlagOn( Fcb->FcbState, FCB_STATE_MODE2FORM2_FILE |
280 FCB_STATE_MODE2_FILE |
281 FCB_STATE_DA_FILE ));
282
283 if (FlagOn( Fcb->FcbState, FCB_STATE_MODE2FORM2_FILE )) {
284
285 return XAForm2;
286
287 } else if (FlagOn( Fcb->FcbState, FCB_STATE_DA_FILE )) {
288
289 return CDDA;
290
291 }
292
293 //
294 // FCB_STATE_MODE2_FILE
295 //
296
297 return YellowMode2;
298 }
299
300 \f
301 _Requires_lock_held_(_Global_critical_region_)
302 NTSTATUS
303 CdNonCachedRead (
304 _In_ PIRP_CONTEXT IrpContext,
305 _In_ PFCB Fcb,
306 _In_ LONGLONG StartingOffset,
307 _In_ ULONG ByteCount
308 )
309
310 /*++
311
312 Routine Description:
313
314 This routine performs the non-cached reads to 'cooked' sectors (2048 bytes
315 per sector). This is done by performing the following in a loop.
316
317 Fill in the IoRuns array for the next block of Io.
318 Send the Io to the device.
319 Perform any cleanup on the Io runs array.
320
321 We will not do async Io to any request that generates non-aligned Io.
322 Also we will not perform async Io if it will exceed the size of our
323 IoRuns array. These should be the unusual cases but we will raise
324 or return CANT_WAIT in this routine if we detect this case.
325
326 Arguments:
327
328 Fcb - Fcb representing the file to read.
329
330 StartingOffset - Logical offset in the file to read from.
331
332 ByteCount - Number of bytes to read.
333
334 Return Value:
335
336 NTSTATUS - Status indicating the result of the operation.
337
338 --*/
339
340 {
341 NTSTATUS Status = STATUS_SUCCESS;
342
343 IO_RUN IoRuns[MAX_PARALLEL_IOS];
344 ULONG RunCount = 0;
345 ULONG CleanupRunCount = 0;
346
347 PVOID UserBuffer;
348 ULONG UserBufferOffset = 0;
349 LONGLONG CurrentOffset = StartingOffset;
350 ULONG RemainingByteCount = ByteCount;
351 ULONG ThisByteCount;
352
353 BOOLEAN Unaligned;
354 BOOLEAN FlushIoBuffers = FALSE;
355 BOOLEAN FirstPass = TRUE;
356
357 PAGED_CODE();
358
359 //
360 // We want to make sure the user's buffer is locked in all cases.
361 //
362
363 if (IrpContext->Irp->MdlAddress == NULL) {
364
365 CdCreateUserMdl( IrpContext, ByteCount, TRUE, IoWriteAccess );
366 }
367
368 CdMapUserBuffer( IrpContext, &UserBuffer);
369
370 //
371 // Special case the root directory and path table for a music volume.
372 //
373
374 if (FlagOn( Fcb->Vcb->VcbState, VCB_STATE_AUDIO_DISK ) &&
375 ((SafeNodeType( Fcb ) == CDFS_NTC_FCB_INDEX) ||
376 (SafeNodeType( Fcb ) == CDFS_NTC_FCB_PATH_TABLE))) {
377
378 CdReadAudioSystemFile( IrpContext,
379 Fcb,
380 StartingOffset,
381 ByteCount,
382 UserBuffer );
383
384 return STATUS_SUCCESS;
385 }
386
387 //
388 // If we're going to use the sector cache for this request, then
389 // mark the request waitable.
390 //
391
392 if ((SafeNodeType( Fcb) == CDFS_NTC_FCB_INDEX) &&
393 (NULL != Fcb->Vcb->SectorCacheBuffer) &&
394 (VcbMounted == IrpContext->Vcb->VcbCondition)) {
395
396 if (!FlagOn( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT)) {
397
398 KeInitializeEvent( &IrpContext->IoContext->SyncEvent,
399 NotificationEvent,
400 FALSE );
401
402 SetFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT);
403 }
404 }
405
406 //
407 // Use a try-finally to perform the final cleanup.
408 //
409
410 _SEH2_TRY {
411
412 //
413 // Loop while there are more bytes to transfer.
414 //
415
416 do {
417
418 //
419 // Call prepare buffers to set up the next entries
420 // in the IoRuns array. Remember if there are any
421 // unaligned entries. This routine will raise CANT_WAIT
422 // if there are unaligned entries for an async request.
423 //
424
425 RtlZeroMemory( IoRuns, sizeof( IoRuns ));
426
427 Unaligned = CdPrepareBuffers( IrpContext,
428 IrpContext->Irp,
429 Fcb,
430 UserBuffer,
431 UserBufferOffset,
432 CurrentOffset,
433 RemainingByteCount,
434 IoRuns,
435 &CleanupRunCount,
436 &ThisByteCount );
437
438
439 RunCount = CleanupRunCount;
440
441 //
442 // If this is an async request and there aren't enough entries
443 // in the Io array then post the request.
444 //
445
446 if ((ThisByteCount < RemainingByteCount) &&
447 !FlagOn( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT )) {
448
449 CdRaiseStatus( IrpContext, STATUS_CANT_WAIT );
450 }
451
452 //
453 // If the entire Io is contained in a single run then
454 // we can pass the Io down to the driver. Send the driver down
455 // and wait on the result if this is synchronous.
456 //
457
458 if ((RunCount == 1) && !Unaligned && FirstPass) {
459
460 CdSingleAsync( IrpContext,&IoRuns[0], Fcb );
461
462 //
463 // No cleanup needed for the IoRuns array here.
464 //
465
466 CleanupRunCount = 0;
467
468 //
469 // Wait if we are synchronous, otherwise return
470 //
471
472 if (FlagOn( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT )) {
473
474 CdWaitSync( IrpContext );
475
476 Status = IrpContext->Irp->IoStatus.Status;
477
478 //
479 // Our completion routine will free the Io context but
480 // we do want to return STATUS_PENDING.
481 //
482
483 } else {
484
485 ClearFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_ALLOC_IO );
486 Status = STATUS_PENDING;
487 }
488
489 try_return( NOTHING );
490 }
491
492 //
493 // Otherwise we will perform multiple Io to read in the data.
494 //
495
496 CdMultipleAsync( IrpContext, Fcb, RunCount, IoRuns );
497
498 //
499 // If this is a synchronous request then perform any necessary
500 // post-processing.
501 //
502
503 if (FlagOn( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT )) {
504
505 //
506 // Wait for the request to complete.
507 //
508
509 CdWaitSync( IrpContext );
510
511 Status = IrpContext->Irp->IoStatus.Status;
512
513 //
514 // Exit this loop if there is an error.
515 //
516
517 if (!NT_SUCCESS( Status )) {
518
519 try_return( NOTHING );
520 }
521
522 //
523 // Perform post read operations on the IoRuns if
524 // necessary.
525 //
526
527 if (Unaligned &&
528 CdFinishBuffers( IrpContext, IoRuns, RunCount, FALSE, FALSE )) {
529
530 FlushIoBuffers = TRUE;
531 }
532
533 CleanupRunCount = 0;
534
535 //
536 // Exit this loop if there are no more bytes to transfer
537 // or we have any error.
538 //
539
540 RemainingByteCount -= ThisByteCount;
541 CurrentOffset += ThisByteCount;
542 UserBuffer = Add2Ptr( UserBuffer, ThisByteCount, PVOID );
543 UserBufferOffset += ThisByteCount;
544
545 //
546 // Otherwise this is an asynchronous request. Always return
547 // STATUS_PENDING.
548 //
549
550 } else {
551
552 ClearFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_ALLOC_IO );
553 CleanupRunCount = 0;
554 try_return( Status = STATUS_PENDING );
555 break;
556 }
557
558 FirstPass = FALSE;
559 } while (RemainingByteCount != 0);
560
561 //
562 // Flush the hardware cache if we performed any copy operations.
563 //
564
565 if (FlushIoBuffers) {
566
567 KeFlushIoBuffers( IrpContext->Irp->MdlAddress, TRUE, FALSE );
568 }
569
570 try_exit: NOTHING;
571 } _SEH2_FINALLY {
572
573 //
574 // Perform final cleanup on the IoRuns if necessary.
575 //
576
577 if (CleanupRunCount != 0) {
578
579 CdFinishBuffers( IrpContext, IoRuns, CleanupRunCount, TRUE, FALSE );
580 }
581 } _SEH2_END;
582
583 return Status;
584 }
585
586 \f
587
588 _Requires_lock_held_(_Global_critical_region_)
589 NTSTATUS
590 CdNonCachedXARead (
591 _In_ PIRP_CONTEXT IrpContext,
592 _In_ PFCB Fcb,
593 _In_ LONGLONG StartingOffset,
594 _In_ ULONG ByteCount
595 )
596
597 /*++
598
599 Routine Description:
600
601 This routine performs the non-cached reads for 'raw' sectors (2352 bytes
602 per sector). We also prepend a hard-coded RIFF header of 44 bytes to the file.
603 All of this is already reflected in the file size.
604
605 We start by checking whether to prepend any portion of the RIFF header. Then we check
606 if the last raw sector read was from the beginning portion of this file, deallocating
607 that buffer if necessary. Finally we do the following in a loop.
608
609 Fill the IoRuns array for the next block of Io.
610 Send the Io to the device driver.
611 Perform any cleanup necessary on the IoRuns array.
612
613 We will not do any async request in this path. The request would have been
614 posted to a worker thread before getting to this point.
615
616 Arguments:
617
618 Fcb - Fcb representing the file to read.
619
620 StartingOffset - Logical offset in the file to read from.
621
622 ByteCount - Number of bytes to read.
623
624 Return Value:
625
626 NTSTATUS - Status indicating the result of the operation.
627
628 --*/
629
630 {
631 NTSTATUS Status = STATUS_SUCCESS;
632
633 RIFF_HEADER LocalRiffHeader;
634 PRIFF_HEADER RiffHeader;
635
636 RAW_READ_INFO RawReads[MAX_PARALLEL_IOS];
637 IO_RUN IoRuns[MAX_PARALLEL_IOS];
638 ULONG RunCount = 0;
639 ULONG CleanupRunCount = 0;
640
641 PVOID UserBuffer;
642 ULONG UserBufferOffset = 0;
643 LONGLONG CurrentOffset = StartingOffset;
644 ULONG RemainingByteCount = ByteCount;
645 ULONG ThisByteCount = 0;
646 ULONG Address = 0;
647
648 BOOLEAN TryingYellowbookMode2 = FALSE;
649
650 TRACK_MODE_TYPE TrackMode;
651
652 PAGED_CODE();
653
654 //
655 // We want to make sure the user's buffer is locked in all cases.
656 //
657
658 if (IrpContext->Irp->MdlAddress == NULL) {
659
660 CdCreateUserMdl( IrpContext, ByteCount, TRUE, IoWriteAccess );
661 }
662
663 //
664 // The byte count was rounded up to a logical sector boundary. It has
665 // nothing to do with the raw sectors on disk. Limit the remaining
666 // byte count to file size.
667 //
668
669 if (CurrentOffset + RemainingByteCount > Fcb->FileSize.QuadPart) {
670
671 RemainingByteCount = (ULONG) (Fcb->FileSize.QuadPart - CurrentOffset);
672 }
673
674 CdMapUserBuffer( IrpContext, &UserBuffer);
675
676 //
677 // Use a try-finally to perform the final cleanup.
678 //
679
680 _SEH2_TRY {
681
682 //
683 // If the initial offset lies within the RIFF header then copy the
684 // necessary bytes to the user's buffer.
685 //
686
687 if (CurrentOffset < sizeof( RIFF_HEADER )) {
688
689 //
690 // Copy the appropriate RIFF header.
691 //
692
693 if (FlagOn( Fcb->FcbState, FCB_STATE_DA_FILE )) {
694
695 //
696 // Create the pseudo entries for a music disk.
697 //
698
699 if (FlagOn( Fcb->Vcb->VcbState, VCB_STATE_AUDIO_DISK )) {
700
701 PAUDIO_PLAY_HEADER AudioPlayHeader;
702 PTRACK_DATA TrackData;
703
704 AudioPlayHeader = (PAUDIO_PLAY_HEADER) &LocalRiffHeader;
705 TrackData = &Fcb->Vcb->CdromToc->TrackData[Fcb->XAFileNumber];
706
707 //
708 // Copy the data header into our local buffer.
709 //
710
711 RtlCopyMemory( AudioPlayHeader,
712 CdAudioPlayHeader,
713 sizeof( AUDIO_PLAY_HEADER ));
714
715 //
716 // Copy the serial number into the Id field. Also
717 // the track number in the TOC.
718 //
719
720 AudioPlayHeader->DiskID = Fcb->Vcb->Vpb->SerialNumber;
721 AudioPlayHeader->TrackNumber = TrackData->TrackNumber;
722
723 //
724 // One frame == One sector.
725 // One second == 75 frames (winds up being a 44.1khz sample)
726 //
727 // Note: LBN 0 == 0:2:0 (MSF)
728 //
729
730 //
731 // Fill in the address (both MSF and Lbn format) and length fields.
732 //
733
734 SwapCopyUchar4( &Address, TrackData->Address);
735 CdLbnToMmSsFf( Address, AudioPlayHeader->TrackAddress);
736
737 SwapCopyUchar4( &AudioPlayHeader->StartingSector, TrackData->Address);
738
739 //
740 // Go to the next track and find the starting point.
741 //
742
743 TrackData = &Fcb->Vcb->CdromToc->TrackData[Fcb->XAFileNumber + 1];
744
745 SwapCopyUchar4( &AudioPlayHeader->SectorCount, TrackData->Address);
746
747 //
748 // Now compute the difference. If there is an error then use
749 // a length of zero.
750 //
751
752 if (AudioPlayHeader->SectorCount < AudioPlayHeader->StartingSector) {
753
754 AudioPlayHeader->SectorCount = 0;
755
756 } else {
757
758 AudioPlayHeader->SectorCount -= AudioPlayHeader->StartingSector;
759 }
760
761 //
762 // Use the sector count to determine the MSF length. Bias by 150 to make
763 // it an "lbn" since the conversion routine corrects for Lbn 0 == 0:2:0;
764 //
765
766 Address = AudioPlayHeader->SectorCount - 150;
767 CdLbnToMmSsFf( Address, AudioPlayHeader->TrackLength);
768
769 ThisByteCount = sizeof( RIFF_HEADER ) - (ULONG) CurrentOffset;
770
771 RtlCopyMemory( UserBuffer,
772 Add2Ptr( AudioPlayHeader,
773 sizeof( RIFF_HEADER ) - ThisByteCount,
774 PCHAR ),
775 ThisByteCount );
776
777 //
778 // CD-XA CDDA
779 //
780
781 } else {
782
783 //
784 // The WAVE header format is actually much closer to an audio play
785 // header in format but we only need to modify the filesize fields.
786 //
787
788 RiffHeader = &LocalRiffHeader;
789
790 //
791 // Copy the data header into our local buffer and add the file size to it.
792 //
793
794 RtlCopyMemory( RiffHeader,
795 CdXAAudioPhileHeader,
796 sizeof( RIFF_HEADER ));
797
798 RiffHeader->ChunkSize += Fcb->FileSize.LowPart;
799 RiffHeader->RawSectors += Fcb->FileSize.LowPart;
800
801 ThisByteCount = sizeof( RIFF_HEADER ) - (ULONG) CurrentOffset;
802 RtlCopyMemory( UserBuffer,
803 Add2Ptr( RiffHeader,
804 sizeof( RIFF_HEADER ) - ThisByteCount,
805 PCHAR ),
806 ThisByteCount );
807 }
808
809 //
810 // CD-XA non-audio
811 //
812
813 } else {
814
815 NT_ASSERT( FlagOn( Fcb->FcbState, FCB_STATE_MODE2_FILE | FCB_STATE_MODE2FORM2_FILE ));
816
817 RiffHeader = &LocalRiffHeader;
818
819 //
820 // Copy the data header into our local buffer and add the file size to it.
821 //
822
823 RtlCopyMemory( RiffHeader,
824 CdXAFileHeader,
825 sizeof( RIFF_HEADER ));
826
827 RiffHeader->ChunkSize += Fcb->FileSize.LowPart;
828 RiffHeader->RawSectors += Fcb->FileSize.LowPart;
829
830 RiffHeader->Attributes = (USHORT) Fcb->XAAttributes;
831 RiffHeader->FileNumber = (UCHAR) Fcb->XAFileNumber;
832
833 ThisByteCount = sizeof( RIFF_HEADER ) - (ULONG) CurrentOffset;
834 RtlCopyMemory( UserBuffer,
835 Add2Ptr( RiffHeader,
836 sizeof( RIFF_HEADER ) - ThisByteCount,
837 PCHAR ),
838 ThisByteCount );
839 }
840
841 //
842 // Adjust the starting offset and byte count to reflect that
843 // we copied over the RIFF bytes.
844 //
845
846 UserBuffer = Add2Ptr( UserBuffer, ThisByteCount, PVOID );
847 UserBufferOffset += ThisByteCount;
848 CurrentOffset += ThisByteCount;
849 RemainingByteCount -= ThisByteCount;
850 }
851
852 //
853 // Set up the appropriate trackmode
854 //
855
856 TrackMode = CdFileTrackMode(Fcb);
857
858 //
859 // Loop while there are more bytes to transfer.
860 //
861
862 while (RemainingByteCount != 0) {
863
864 //
865 // Call prepare buffers to set up the next entries
866 // in the IoRuns array. Remember if there are any
867 // unaligned entries. If we're just retrying the previous
868 // runs with a different track mode, then don't do anything here.
869 //
870
871 if (!TryingYellowbookMode2) {
872
873 RtlZeroMemory( IoRuns, sizeof( IoRuns ));
874 RtlZeroMemory( RawReads, sizeof( RawReads ));
875
876 CdPrepareXABuffers( IrpContext,
877 IrpContext->Irp,
878 Fcb,
879 UserBuffer,
880 UserBufferOffset,
881 CurrentOffset,
882 RemainingByteCount,
883 IoRuns,
884 &CleanupRunCount,
885 &ThisByteCount );
886 }
887
888 //
889 // Perform multiple Io to read in the data. Note that
890 // there may be no Io to do if we were able to use an
891 // existing buffer from the Vcb.
892 //
893
894 if (CleanupRunCount != 0) {
895
896 RunCount = CleanupRunCount;
897
898 CdMultipleXAAsync( IrpContext,
899 RunCount,
900 IoRuns,
901 RawReads,
902 TrackMode );
903 //
904 // Wait for the request to complete.
905 //
906
907 CdWaitSync( IrpContext );
908
909 Status = IrpContext->Irp->IoStatus.Status;
910
911 //
912 // Exit this loop if there is an error.
913 //
914
915 if (!NT_SUCCESS( Status )) {
916
917 if (!TryingYellowbookMode2 &&
918 FlagOn( Fcb->FcbState, FCB_STATE_MODE2FORM2_FILE )) {
919
920 //
921 // There are wacky cases where someone has mastered as CD-XA
922 // but the sectors they claim are Mode2Form2 are really, according
923 // to ATAPI devices, Yellowbook Mode2. We will try once more
924 // with these. Kodak PHOTO-CD has been observed to do this.
925 //
926
927 TryingYellowbookMode2 = TRUE;
928 TrackMode = YellowMode2;
929
930 //
931 // Clear our 'cumulative' error status value
932 //
933
934 IrpContext->IoContext->Status = STATUS_SUCCESS;
935
936 continue;
937 }
938
939 try_return( NOTHING );
940 }
941
942 CleanupRunCount = 0;
943
944 if (TryingYellowbookMode2) {
945
946 //
947 // We succesfully got data when we tried switching the trackmode,
948 // so change the state of the FCB to remember that.
949 //
950
951 SetFlag( Fcb->FcbState, FCB_STATE_MODE2_FILE );
952 ClearFlag( Fcb->FcbState, FCB_STATE_MODE2FORM2_FILE );
953
954 TryingYellowbookMode2 = FALSE;
955 }
956
957 //
958 // Perform post read operations on the IoRuns if
959 // necessary.
960 //
961
962 CdFinishBuffers( IrpContext, IoRuns, RunCount, FALSE, TRUE );
963 }
964
965 //
966 // Adjust our loop variants.
967 //
968
969 RemainingByteCount -= ThisByteCount;
970 CurrentOffset += ThisByteCount;
971 UserBuffer = Add2Ptr( UserBuffer, ThisByteCount, PVOID );
972 UserBufferOffset += ThisByteCount;
973 }
974
975 //
976 // Always flush the hardware cache.
977 //
978
979 KeFlushIoBuffers( IrpContext->Irp->MdlAddress, TRUE, FALSE );
980
981 try_exit: NOTHING;
982 } _SEH2_FINALLY {
983
984 //
985 // Perform final cleanup on the IoRuns if necessary.
986 //
987
988 if (CleanupRunCount != 0) {
989
990 CdFinishBuffers( IrpContext, IoRuns, CleanupRunCount, TRUE, FALSE );
991 }
992 } _SEH2_END;
993
994 return Status;
995 }
996
997 _Requires_lock_held_(_Global_critical_region_)
998 NTSTATUS
999 CdVolumeDasdWrite (
1000 _In_ PIRP_CONTEXT IrpContext,
1001 _In_ PFCB Fcb,
1002 _In_ LONGLONG StartingOffset,
1003 _In_ ULONG ByteCount
1004 )
1005
1006 /*++
1007
1008 Routine Description:
1009
1010 This routine performs the non-cached writes to 'cooked' sectors (2048 bytes
1011 per sector). This is done by filling the IoRun for the desired request
1012 and send it down to the device.
1013
1014 Arguments:
1015
1016 Fcb - Fcb representing the file to read.
1017
1018 StartingOffset - Logical offset in the file to read from.
1019
1020 ByteCount - Number of bytes to read.
1021
1022 Return Value:
1023
1024 NTSTATUS - Status indicating the result of the operation.
1025
1026 --*/
1027
1028 {
1029 NTSTATUS Status;
1030 IO_RUN IoRun;
1031
1032 PAGED_CODE();
1033
1034 //
1035 // We want to make sure the user's buffer is locked in all cases.
1036 //
1037
1038 CdLockUserBuffer( IrpContext, ByteCount, IoReadAccess );
1039
1040 //
1041 // The entire Io can be contained in a single run, just pass
1042 // the Io down to the driver. Send the driver down
1043 // and wait on the result if this is synchronous.
1044 //
1045
1046 RtlZeroMemory( &IoRun, sizeof( IoRun ) );
1047
1048 IoRun.DiskOffset = StartingOffset;
1049 IoRun.DiskByteCount = ByteCount;
1050
1051 CdSingleAsync( IrpContext, &IoRun, Fcb );
1052
1053 //
1054 // Wait if we are synchronous, otherwise return
1055 //
1056
1057 if (FlagOn( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT )) {
1058
1059 CdWaitSync( IrpContext );
1060
1061 Status = IrpContext->Irp->IoStatus.Status;
1062
1063 //
1064 // Our completion routine will free the Io context but
1065 // we do want to return STATUS_PENDING.
1066 //
1067
1068 } else {
1069
1070 ClearFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_ALLOC_IO );
1071 Status = STATUS_PENDING;
1072 }
1073
1074 return Status;
1075 }
1076
1077
1078 \f
1079 BOOLEAN
1080 CdReadSectors (
1081 _In_ PIRP_CONTEXT IrpContext,
1082 _In_ LONGLONG StartingOffset,
1083 _In_ ULONG ByteCount,
1084 _In_ BOOLEAN ReturnError,
1085 _Out_writes_bytes_(ByteCount) PVOID Buffer,
1086 _In_ PDEVICE_OBJECT TargetDeviceObject
1087 )
1088
1089 /*++
1090
1091 Routine Description:
1092
1093 This routine is called to transfer sectors from the disk to a
1094 specified buffer. It is used for mount and volume verify operations.
1095
1096 This routine is synchronous, it will not return until the operation
1097 is complete or until the operation fails.
1098
1099 The routine allocates an IRP and then passes this IRP to a lower
1100 level driver. Errors may occur in the allocation of this IRP or
1101 in the operation of the lower driver.
1102
1103 Arguments:
1104
1105 StartingOffset - Logical offset on the disk to start the read. This
1106 must be on a sector boundary, no check is made here.
1107
1108 ByteCount - Number of bytes to read. This is an integral number of
1109 2K sectors, no check is made here to confirm this.
1110
1111 ReturnError - Indicates whether we should return TRUE or FALSE
1112 to indicate an error or raise an error condition. This only applies
1113 to the result of the IO. Any other error may cause a raise.
1114
1115 Buffer - Buffer to transfer the disk data into.
1116
1117 TargetDeviceObject - The device object for the volume to be read.
1118
1119 Return Value:
1120
1121 BOOLEAN - Depending on 'RaiseOnError' flag above. TRUE if operation
1122 succeeded, FALSE otherwise.
1123
1124 --*/
1125
1126 {
1127 NTSTATUS Status;
1128 KEVENT Event;
1129 PIRP Irp;
1130
1131 PAGED_CODE();
1132
1133 //
1134 // Initialize the event.
1135 //
1136
1137 KeInitializeEvent( &Event, NotificationEvent, FALSE );
1138
1139 //
1140 // Attempt to allocate the IRP. If unsuccessful, raise
1141 // STATUS_INSUFFICIENT_RESOURCES.
1142 //
1143
1144 Irp = IoBuildSynchronousFsdRequest( IRP_MJ_READ,
1145 TargetDeviceObject,
1146 Buffer,
1147 ByteCount,
1148 (PLARGE_INTEGER) &StartingOffset,
1149 &Event,
1150 &IrpContext->Irp->IoStatus );
1151
1152 if (Irp == NULL) {
1153
1154 CdRaiseStatus( IrpContext, STATUS_INSUFFICIENT_RESOURCES );
1155 }
1156
1157 //
1158 // Ignore the change line (verify) for mount and verify requests
1159 //
1160
1161 SetFlag( IoGetNextIrpStackLocation( Irp )->Flags, SL_OVERRIDE_VERIFY_VOLUME );
1162
1163 //
1164 // Send the request down to the driver. If an error occurs return
1165 // it to the caller.
1166 //
1167
1168 Status = IoCallDriver( TargetDeviceObject, Irp );
1169
1170 //
1171 // If the status was STATUS_PENDING then wait on the event.
1172 //
1173
1174 if (Status == STATUS_PENDING) {
1175
1176 Status = KeWaitForSingleObject( &Event,
1177 Executive,
1178 KernelMode,
1179 FALSE,
1180 NULL );
1181
1182 //
1183 // On a successful wait pull the status out of the IoStatus block.
1184 //
1185
1186 if (NT_SUCCESS( Status )) {
1187
1188 Status = IrpContext->Irp->IoStatus.Status;
1189 }
1190 }
1191
1192 //
1193 // Check whether we should raise in the error case.
1194 //
1195
1196 if (!NT_SUCCESS( Status )) {
1197
1198 if (!ReturnError) {
1199
1200 CdNormalizeAndRaiseStatus( IrpContext, Status );
1201 }
1202
1203 //
1204 // We don't raise, but return FALSE to indicate an error.
1205 //
1206
1207 return FALSE;
1208
1209 //
1210 // The operation completed successfully.
1211 //
1212
1213 } else {
1214
1215 return TRUE;
1216 }
1217 }
1218
1219 \f
1220 NTSTATUS
1221 CdCreateUserMdl (
1222 _In_ PIRP_CONTEXT IrpContext,
1223 _In_ ULONG BufferLength,
1224 _In_ BOOLEAN RaiseOnError,
1225 _In_ LOCK_OPERATION Operation
1226 )
1227
1228 /*++
1229
1230 Routine Description:
1231
1232 This routine locks the specified buffer for read access (we only write into
1233 the buffer). The file system requires this routine since it does not
1234 ask the I/O system to lock its buffers for direct I/O. This routine
1235 may only be called from the Fsd while still in the user context.
1236
1237 This routine is only called if there is not already an Mdl.
1238
1239 Arguments:
1240
1241 BufferLength - Length of user buffer.
1242
1243 RaiseOnError - Indicates if our caller wants this routine to raise on
1244 an error condition.
1245
1246 Operation - IoWriteAccess or IoReadAccess
1247
1248 Return Value:
1249
1250 NTSTATUS - Status from this routine. Error status only returned if
1251 RaiseOnError is FALSE.
1252
1253 --*/
1254
1255 {
1256 NTSTATUS Status = STATUS_INSUFFICIENT_RESOURCES;
1257 PMDL Mdl;
1258
1259 PAGED_CODE();
1260
1261 UNREFERENCED_PARAMETER( Operation );
1262 UNREFERENCED_PARAMETER( IrpContext );
1263
1264 ASSERT_IRP_CONTEXT( IrpContext );
1265 ASSERT_IRP( IrpContext->Irp );
1266 NT_ASSERT( IrpContext->Irp->MdlAddress == NULL );
1267
1268 //
1269 // Allocate the Mdl, and Raise if we fail.
1270 //
1271
1272 Mdl = IoAllocateMdl( IrpContext->Irp->UserBuffer,
1273 BufferLength,
1274 FALSE,
1275 FALSE,
1276 IrpContext->Irp );
1277
1278 if (Mdl != NULL) {
1279
1280 //
1281 // Now probe the buffer described by the Irp. If we get an exception,
1282 // deallocate the Mdl and return the appropriate "expected" status.
1283 //
1284
1285 _SEH2_TRY {
1286
1287 MmProbeAndLockPages( Mdl, IrpContext->Irp->RequestorMode, IoWriteAccess );
1288
1289 Status = STATUS_SUCCESS;
1290
1291 #ifdef _MSC_VER
1292 #pragma warning(suppress: 6320)
1293 #endif
1294 } _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER) {
1295
1296 Status = _SEH2_GetExceptionCode();
1297
1298 IoFreeMdl( Mdl );
1299 IrpContext->Irp->MdlAddress = NULL;
1300
1301 if (!FsRtlIsNtstatusExpected( Status )) {
1302
1303 Status = STATUS_INVALID_USER_BUFFER;
1304 }
1305 } _SEH2_END;
1306 }
1307
1308 //
1309 // Check if we are to raise or return
1310 //
1311
1312 if (Status != STATUS_SUCCESS) {
1313
1314 if (RaiseOnError) {
1315
1316 CdRaiseStatus( IrpContext, Status );
1317 }
1318 }
1319
1320 //
1321 // Return the status code.
1322 //
1323
1324 return Status;
1325 }
1326
1327 \f
1328 NTSTATUS
1329 CdPerformDevIoCtrlEx (
1330 _In_ PIRP_CONTEXT IrpContext,
1331 _In_ ULONG IoControlCode,
1332 _In_ PDEVICE_OBJECT Device,
1333 _In_reads_bytes_opt_(InputBufferLength) PVOID InputBuffer,
1334 _In_ ULONG InputBufferLength,
1335 _Out_writes_bytes_opt_(OutputBufferLength) PVOID OutputBuffer,
1336 _In_ ULONG OutputBufferLength,
1337 _In_ BOOLEAN InternalDeviceIoControl,
1338 _In_ BOOLEAN OverrideVerify,
1339 _Out_opt_ PIO_STATUS_BLOCK Iosb
1340 )
1341
1342 /*++
1343
1344 Routine Description:
1345
1346 This routine is called to perform DevIoCtrl functions internally within
1347 the filesystem. We take the status from the driver and return it to our
1348 caller.
1349
1350 Arguments:
1351
1352 IoControlCode - Code to send to driver.
1353
1354 Device - This is the device to send the request to.
1355
1356 OutPutBuffer - Pointer to output buffer.
1357
1358 OutputBufferLength - Length of output buffer above.
1359
1360 InternalDeviceIoControl - Indicates if this is an internal or external
1361 Io control code.
1362
1363 OverrideVerify - Indicates if we should tell the driver not to return
1364 STATUS_VERIFY_REQUIRED for mount and verify.
1365
1366 Iosb - If specified, we return the results of the operation here.
1367
1368 Return Value:
1369
1370 NTSTATUS - Status returned by next lower driver.
1371
1372 --*/
1373
1374 {
1375 NTSTATUS Status;
1376 PIRP Irp;
1377 KEVENT Event;
1378 IO_STATUS_BLOCK LocalIosb;
1379 PIO_STATUS_BLOCK IosbToUse = &LocalIosb;
1380
1381 PAGED_CODE();
1382
1383 UNREFERENCED_PARAMETER( IrpContext );
1384
1385 //
1386 // Check if the user gave us an Iosb.
1387 //
1388
1389 if (ARGUMENT_PRESENT( Iosb )) {
1390
1391 IosbToUse = Iosb;
1392 }
1393
1394 IosbToUse->Status = 0;
1395 IosbToUse->Information = 0;
1396
1397 KeInitializeEvent( &Event, NotificationEvent, FALSE );
1398
1399 Irp = IoBuildDeviceIoControlRequest( IoControlCode,
1400 Device,
1401 InputBuffer,
1402 InputBufferLength,
1403 OutputBuffer,
1404 OutputBufferLength,
1405 InternalDeviceIoControl,
1406 &Event,
1407 IosbToUse );
1408
1409 if (Irp == NULL) {
1410
1411 return STATUS_INSUFFICIENT_RESOURCES;
1412 }
1413
1414 if (OverrideVerify) {
1415
1416 SetFlag( IoGetNextIrpStackLocation( Irp )->Flags, SL_OVERRIDE_VERIFY_VOLUME );
1417 }
1418
1419 Status = IoCallDriver( Device, Irp );
1420
1421 //
1422 // We check for device not ready by first checking Status
1423 // and then if status pending was returned, the Iosb status
1424 // value.
1425 //
1426
1427 if (Status == STATUS_PENDING) {
1428
1429 (VOID) KeWaitForSingleObject( &Event,
1430 Executive,
1431 KernelMode,
1432 FALSE,
1433 (PLARGE_INTEGER)NULL );
1434
1435 Status = IosbToUse->Status;
1436 }
1437
1438 NT_ASSERT( !(OverrideVerify && (STATUS_VERIFY_REQUIRED == Status)));
1439
1440 return Status;
1441 }
1442
1443
1444 NTSTATUS
1445 FASTCALL
1446 CdPerformDevIoCtrl (
1447 _In_ PIRP_CONTEXT IrpContext,
1448 _In_ ULONG IoControlCode,
1449 _In_ PDEVICE_OBJECT Device,
1450 _Out_writes_bytes_opt_(OutputBufferLength) PVOID OutputBuffer,
1451 _In_ ULONG OutputBufferLength,
1452 _In_ BOOLEAN InternalDeviceIoControl,
1453 _In_ BOOLEAN OverrideVerify,
1454 _Out_opt_ PIO_STATUS_BLOCK Iosb
1455 )
1456 {
1457 PAGED_CODE();
1458
1459 return CdPerformDevIoCtrlEx( IrpContext,
1460 IoControlCode,
1461 Device,
1462 NULL,
1463 0,
1464 OutputBuffer,
1465 OutputBufferLength,
1466 InternalDeviceIoControl,
1467 OverrideVerify,
1468 Iosb);
1469 }
1470
1471
1472 \f
1473 //
1474 // Local support routine
1475 //
1476
1477 _Requires_lock_held_(_Global_critical_region_)
1478 BOOLEAN
1479 CdPrepareBuffers (
1480 _In_ PIRP_CONTEXT IrpContext,
1481 _In_ PIRP Irp,
1482 _In_ PFCB Fcb,
1483 _In_reads_bytes_(ByteCount) PVOID UserBuffer,
1484 _In_ ULONG UserBufferOffset,
1485 _In_ LONGLONG StartingOffset,
1486 _In_ ULONG ByteCount,
1487 _Out_ PIO_RUN IoRuns,
1488 _Out_ PULONG RunCount,
1489 _Out_ PULONG ThisByteCount
1490 )
1491
1492 /*++
1493
1494 Routine Description:
1495
1496 This routine is the worker routine which looks up each run of an IO
1497 request and stores an entry for it in the IoRuns array. If the run
1498 begins on an unaligned disk boundary then we will allocate a buffer
1499 and Mdl for the unaligned portion and put it in the IoRuns entry.
1500
1501 This routine will raise CANT_WAIT if an unaligned transfer is encountered
1502 and this request can't wait.
1503
1504 Arguments:
1505
1506 Irp - Originating Irp for this request.
1507
1508 Fcb - This is the Fcb for this data stream. It may be a file, directory,
1509 path table or the volume file.
1510
1511 UserBuffer - Current position in the user's buffer.
1512
1513 UserBufferOffset - Offset from the start of the original user buffer.
1514
1515 StartingOffset - Offset in the stream to begin the read.
1516
1517 ByteCount - Number of bytes to read. We will fill the IoRuns array up
1518 to this point. We will stop early if we exceed the maximum number
1519 of parallel Ios we support.
1520
1521 IoRuns - Pointer to the IoRuns array. The entire array is zeroes when
1522 this routine is called.
1523
1524 RunCount - Number of entries in the IoRuns array filled here.
1525
1526 ThisByteCount - Number of bytes described by the IoRun entries. Will
1527 not exceed the ByteCount passed in.
1528
1529 Return Value:
1530
1531 BOOLEAN - TRUE if one of the entries in an unaligned buffer (provided
1532 this is synchronous). FALSE otherwise.
1533
1534 --*/
1535
1536 {
1537 BOOLEAN FoundUnaligned = FALSE;
1538 PIO_RUN ThisIoRun = IoRuns;
1539
1540 //
1541 // Following indicate where we are in the current transfer. Current
1542 // position in the file and number of bytes yet to transfer from
1543 // this position.
1544 //
1545
1546 ULONG RemainingByteCount = ByteCount;
1547 LONGLONG CurrentFileOffset = StartingOffset;
1548
1549 //
1550 // Following indicate the state of the user's buffer. We have
1551 // the destination of the next transfer and its offset in the
1552 // buffer. We also have the next available position in the buffer
1553 // available for a scratch buffer. We will align this up to a sector
1554 // boundary.
1555 //
1556
1557 PVOID CurrentUserBuffer = UserBuffer;
1558 ULONG CurrentUserBufferOffset = UserBufferOffset;
1559
1560 //
1561 // The following is the next contiguous bytes on the disk to
1562 // transfer. Read from the allocation package.
1563 //
1564
1565 LONGLONG DiskOffset = 0;
1566 ULONG CurrentByteCount = RemainingByteCount;
1567
1568 PAGED_CODE();
1569
1570 //
1571 // Initialize the RunCount and ByteCount.
1572 //
1573
1574 *RunCount = 0;
1575 *ThisByteCount = 0;
1576
1577 //
1578 // Loop while there are more bytes to process or there are
1579 // available entries in the IoRun array.
1580 //
1581
1582 while (TRUE) {
1583
1584 *RunCount += 1;
1585
1586 //
1587 // Initialize the current position in the IoRuns array.
1588 // Find the user's buffer for this portion of the transfer.
1589 //
1590
1591 ThisIoRun->UserBuffer = CurrentUserBuffer;
1592
1593 //
1594 // Find the allocation information for the current offset in the
1595 // stream.
1596 //
1597
1598 CdLookupAllocation( IrpContext,
1599 Fcb,
1600 CurrentFileOffset,
1601 &DiskOffset,
1602 &CurrentByteCount );
1603
1604 //
1605 // Limit ourselves to the data requested.
1606 //
1607
1608 if (CurrentByteCount > RemainingByteCount) {
1609
1610 CurrentByteCount = RemainingByteCount;
1611 }
1612
1613 //
1614 // Handle the case where this is an unaligned transfer. The
1615 // following must all be true for this to be an aligned transfer.
1616 //
1617 // Disk offset on a 2048 byte boundary (Start of transfer)
1618 //
1619 // Byte count is a multiple of 2048 (Length of transfer)
1620 //
1621 // If the ByteCount is at least one sector then do the
1622 // unaligned transfer only for the tail. We can use the
1623 // user's buffer for the aligned portion.
1624 //
1625
1626 if (FlagOn( (ULONG) DiskOffset, SECTOR_MASK ) ||
1627 (FlagOn( (ULONG) CurrentByteCount, SECTOR_MASK ) &&
1628 (CurrentByteCount < SECTOR_SIZE))) {
1629
1630 NT_ASSERT( SafeNodeType(Fcb) != CDFS_NTC_FCB_INDEX);
1631
1632 //
1633 // If we can't wait then raise.
1634 //
1635
1636 if (!FlagOn( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT )) {
1637
1638 CdRaiseStatus( IrpContext, STATUS_CANT_WAIT );
1639 }
1640
1641 //
1642 // Remember the offset and the number of bytes out of
1643 // the transfer buffer to copy into the user's buffer.
1644 // We will truncate the current read to end on a sector
1645 // boundary.
1646 //
1647
1648 ThisIoRun->TransferBufferOffset = SectorOffset( DiskOffset );
1649
1650 //
1651 // Make sure this transfer ends on a sector boundary.
1652 //
1653
1654 ThisIoRun->DiskOffset = LlSectorTruncate( DiskOffset );
1655
1656 //
1657 // We need to allocate an auxilary buffer for the next sector.
1658 // Read up to a page containing the partial data.
1659 //
1660
1661 ThisIoRun->DiskByteCount = SectorAlign( ThisIoRun->TransferBufferOffset + CurrentByteCount );
1662
1663 if (ThisIoRun->DiskByteCount > PAGE_SIZE) {
1664
1665 ThisIoRun->DiskByteCount = PAGE_SIZE;
1666 }
1667
1668 if (ThisIoRun->TransferBufferOffset + CurrentByteCount > ThisIoRun->DiskByteCount) {
1669
1670 CurrentByteCount = ThisIoRun->DiskByteCount - ThisIoRun->TransferBufferOffset;
1671 }
1672
1673 ThisIoRun->TransferByteCount = CurrentByteCount;
1674
1675 //
1676 // Allocate a buffer for the non-aligned transfer.
1677 //
1678
1679 ThisIoRun->TransferBuffer = FsRtlAllocatePoolWithTag( CdNonPagedPool, PAGE_SIZE, TAG_IO_BUFFER );
1680
1681 //
1682 // Allocate and build the Mdl to describe this buffer.
1683 //
1684
1685 ThisIoRun->TransferMdl = IoAllocateMdl( ThisIoRun->TransferBuffer,
1686 PAGE_SIZE,
1687 FALSE,
1688 FALSE,
1689 NULL );
1690
1691 ThisIoRun->TransferVirtualAddress = ThisIoRun->TransferBuffer;
1692
1693 if (ThisIoRun->TransferMdl == NULL) {
1694
1695 IrpContext->Irp->IoStatus.Information = 0;
1696 CdRaiseStatus( IrpContext, STATUS_INSUFFICIENT_RESOURCES );
1697 }
1698
1699 MmBuildMdlForNonPagedPool( ThisIoRun->TransferMdl );
1700
1701 //
1702 // Remember we found an unaligned transfer.
1703 //
1704
1705 FoundUnaligned = TRUE;
1706
1707 //
1708 // Otherwise we use the buffer and Mdl from the original request.
1709 //
1710
1711 } else {
1712
1713 //
1714 // Truncate the read length to a sector-aligned value. We know
1715 // the length must be at least one sector or we wouldn't be
1716 // here now.
1717 //
1718
1719 CurrentByteCount = SectorTruncate( CurrentByteCount );
1720
1721 //
1722 // Read these sectors from the disk.
1723 //
1724
1725 ThisIoRun->DiskOffset = DiskOffset;
1726 ThisIoRun->DiskByteCount = CurrentByteCount;
1727
1728 //
1729 // Use the user's buffer and Mdl as our transfer buffer
1730 // and Mdl.
1731 //
1732
1733 ThisIoRun->TransferBuffer = CurrentUserBuffer;
1734 ThisIoRun->TransferMdl = Irp->MdlAddress;
1735 ThisIoRun->TransferVirtualAddress = Add2Ptr( Irp->UserBuffer,
1736 CurrentUserBufferOffset,
1737 PVOID );
1738 }
1739
1740 //
1741 // Update our position in the transfer and the RunCount and
1742 // ByteCount for the user.
1743 //
1744
1745 RemainingByteCount -= CurrentByteCount;
1746
1747 //
1748 // Break out if no more positions in the IoRuns array or
1749 // we have all of the bytes accounted for.
1750 //
1751
1752 *ThisByteCount += CurrentByteCount;
1753
1754 if ((RemainingByteCount == 0) || (*RunCount == MAX_PARALLEL_IOS)) {
1755
1756 break;
1757 }
1758
1759 //
1760 // Update our pointers for the user's buffer.
1761 //
1762
1763 ThisIoRun += 1;
1764 CurrentUserBuffer = Add2Ptr( CurrentUserBuffer, CurrentByteCount, PVOID );
1765 CurrentUserBufferOffset += CurrentByteCount;
1766 CurrentFileOffset += CurrentByteCount;
1767 }
1768
1769 return FoundUnaligned;
1770 }
1771
1772 \f
1773 //
1774 // Local support routine
1775 //
1776
1777 _Requires_lock_held_(_Global_critical_region_)
1778 VOID
1779 CdPrepareXABuffers (
1780 _In_ PIRP_CONTEXT IrpContext,
1781 _In_ PIRP Irp,
1782 _In_ PFCB Fcb,
1783 _In_reads_bytes_(ByteCount) PVOID UserBuffer,
1784 _In_ ULONG UserBufferOffset,
1785 _In_ LONGLONG StartingOffset,
1786 _In_ ULONG ByteCount,
1787 _Out_ PIO_RUN IoRuns,
1788 _Out_ PULONG RunCount,
1789 _Out_ PULONG ThisByteCount
1790 )
1791
1792 /*++
1793
1794 Routine Description:
1795
1796 This routine is the worker routine which looks up the individual runs
1797 of an IO request and stores an entry for it in the IoRuns array. The
1798 worker routine is for XA files where we need to convert the raw offset
1799 in the file to logical cooked sectors. We store one raw sector in
1800 the Vcb. If the current read is to that sector then we can simply copy
1801 whatever bytes are needed from that sector.
1802
1803 Arguments:
1804
1805 Irp - Originating Irp for this request.
1806
1807 Fcb - This is the Fcb for this data stream. It must be a data stream.
1808
1809 UserBuffer - Current position in the user's buffer.
1810
1811 UserBufferOffset - Offset of this buffer from the beginning of the user's
1812 buffer for the original request.
1813
1814 StartingOffset - Offset in the stream to begin the read.
1815
1816 ByteCount - Number of bytes to read. We will fill the IoRuns array up
1817 to this point. We will stop early if we exceed the maximum number
1818 of parallel Ios we support.
1819
1820 IoRuns - Pointer to the IoRuns array. The entire array is zeroes when
1821 this routine is called.
1822
1823 RunCount - Number of entries in the IoRuns array filled here.
1824
1825 ThisByteCount - Number of bytes described by the IoRun entries. Will
1826 not exceed the ByteCount passed in.
1827
1828 Return Value:
1829
1830 None
1831
1832 --*/
1833
1834 {
1835 PIO_RUN ThisIoRun = IoRuns;
1836 BOOLEAN PerformedCopy;
1837
1838 //
1839 // The following deal with where we are in the range of raw sectors.
1840 // Note that we will bias the input file offset by the RIFF header
1841 // to deal directly with the raw sectors.
1842 //
1843
1844 ULONG RawSectorOffset;
1845 ULONG RemainingRawByteCount = ByteCount;
1846 LONGLONG CurrentRawOffset = StartingOffset - sizeof( RIFF_HEADER );
1847
1848 //
1849 // The following is the offset into the cooked sectors for the file.
1850 //
1851
1852 LONGLONG CurrentCookedOffset;
1853 ULONG RemainingCookedByteCount;
1854
1855 //
1856 // Following indicate the state of the user's buffer. We have
1857 // the destination of the next transfer and its offset in the
1858 // buffer. We also have the next available position in the buffer
1859 // available for a scratch buffer.
1860 //
1861
1862 PVOID CurrentUserBuffer = UserBuffer;
1863 ULONG CurrentUserBufferOffset = UserBufferOffset;
1864
1865 //
1866 // The following is the next contiguous bytes on the disk to
1867 // transfer. These are represented by cooked byte offset and length.
1868 // We also compute the number of raw bytes in the current transfer.
1869 //
1870
1871 LONGLONG DiskOffset = 0;
1872 ULONG CurrentCookedByteCount = 0;
1873 ULONG CurrentRawByteCount;
1874
1875 PAGED_CODE();
1876
1877 //
1878 // We need to maintain our position as we walk through the sectors on the disk.
1879 // We keep separate values for the cooked offset as well as the raw offset.
1880 // These are initialized on sector boundaries and we move through these
1881 // the file sector-by-sector.
1882 //
1883 // Try to do 32-bit math.
1884 //
1885
1886 if (((PLARGE_INTEGER) &CurrentRawOffset)->HighPart == 0) {
1887
1888 //
1889 // Prefix/fast: Note that the following are safe since we only
1890 // take this path for 32bit offsets.
1891 //
1892
1893 CurrentRawOffset = (LONGLONG) ((ULONG) CurrentRawOffset / RAW_SECTOR_SIZE);
1894
1895 #ifdef _MSC_VER
1896 #pragma prefast( suppress: __WARNING_RESULTOFSHIFTCASTTOLARGERSIZE, "This is fine beacuse raw sector size > sector shift" )
1897 #endif
1898 CurrentCookedOffset = (LONGLONG) ((ULONG) CurrentRawOffset << SECTOR_SHIFT );
1899
1900 CurrentRawOffset = (LONGLONG) ((ULONG) CurrentRawOffset * RAW_SECTOR_SIZE);
1901
1902 //
1903 // Otherwise we need to do 64-bit math (sigh).
1904 //
1905
1906 } else {
1907
1908 CurrentRawOffset /= RAW_SECTOR_SIZE;
1909
1910 CurrentCookedOffset = CurrentRawOffset << SECTOR_SHIFT;
1911
1912 CurrentRawOffset *= RAW_SECTOR_SIZE;
1913 }
1914
1915 //
1916 // Now compute the full number of sectors to be read. Count all of the raw
1917 // sectors that need to be read and convert to cooked bytes.
1918 //
1919
1920 RawSectorOffset = (ULONG) ( StartingOffset - CurrentRawOffset) - sizeof( RIFF_HEADER );
1921 CurrentRawByteCount = (RawSectorOffset + RemainingRawByteCount + RAW_SECTOR_SIZE - 1) / RAW_SECTOR_SIZE;
1922
1923 RemainingCookedByteCount = CurrentRawByteCount << SECTOR_SHIFT;
1924
1925 //
1926 // Initialize the RunCount and ByteCount.
1927 //
1928
1929 *RunCount = 0;
1930 *ThisByteCount = 0;
1931
1932 //
1933 // Loop while there are more bytes to process or there are
1934 // available entries in the IoRun array.
1935 //
1936
1937 while (TRUE) {
1938
1939 PerformedCopy = FALSE;
1940 *RunCount += 1;
1941
1942 //
1943 // Initialize the current position in the IoRuns array. Find the
1944 // eventual destination in the user's buffer for this portion of the transfer.
1945 //
1946
1947 ThisIoRun->UserBuffer = CurrentUserBuffer;
1948
1949 //
1950 // Find the allocation information for the current offset in the
1951 // stream.
1952 //
1953
1954 CdLookupAllocation( IrpContext,
1955 Fcb,
1956 CurrentCookedOffset,
1957 &DiskOffset,
1958 &CurrentCookedByteCount );
1959 //
1960 // Maybe we got lucky and this is the same sector as in the
1961 // Vcb.
1962 //
1963
1964 if (DiskOffset == Fcb->Vcb->XADiskOffset) {
1965
1966 //
1967 // We will perform safe synchronization. Check again that
1968 // this is the correct sector.
1969 //
1970
1971 CdLockVcb( IrpContext, Fcb->Vcb );
1972
1973 if ((DiskOffset == Fcb->Vcb->XADiskOffset) &&
1974 (Fcb->Vcb->XASector != NULL)) {
1975
1976 //
1977 // Copy any bytes we can from the current sector.
1978 //
1979
1980 CurrentRawByteCount = RAW_SECTOR_SIZE - RawSectorOffset;
1981
1982 //
1983 // Check whether we don't go to the end of the sector.
1984 //
1985
1986 if (CurrentRawByteCount > RemainingRawByteCount) {
1987
1988 CurrentRawByteCount = RemainingRawByteCount;
1989 }
1990
1991 RtlCopyMemory( CurrentUserBuffer,
1992 Add2Ptr( Fcb->Vcb->XASector, RawSectorOffset, PCHAR ),
1993 CurrentRawByteCount );
1994
1995 CdUnlockVcb( IrpContext, Fcb->Vcb );
1996
1997 //
1998 // Adjust the run count and pointer in the IoRuns array
1999 // to show that we didn't use a position.
2000 //
2001
2002 *RunCount -= 1;
2003 ThisIoRun -= 1;
2004
2005 //
2006 // Remember that we performed a copy operation.
2007 //
2008
2009 PerformedCopy = TRUE;
2010
2011 CurrentCookedByteCount = SECTOR_SIZE;
2012
2013 } else {
2014
2015 //
2016 // The safe test showed no available buffer. Drop down to common code to
2017 // perform the Io.
2018 //
2019
2020 CdUnlockVcb( IrpContext, Fcb->Vcb );
2021 }
2022 }
2023
2024 //
2025 // No work in this pass if we did a copy operation.
2026 //
2027
2028 if (!PerformedCopy) {
2029
2030 //
2031 // Limit ourselves by the number of remaining cooked bytes.
2032 //
2033
2034 if (CurrentCookedByteCount > RemainingCookedByteCount) {
2035
2036 CurrentCookedByteCount = RemainingCookedByteCount;
2037 }
2038
2039 ThisIoRun->DiskOffset = DiskOffset;
2040 ThisIoRun->TransferBufferOffset = RawSectorOffset;
2041
2042 //
2043 // We will always need to perform copy operations for XA files.
2044 // We allocate an auxillary buffer to read the start of the
2045 // transfer. Then we can use a range of the user's buffer to
2046 // perform the next range of the transfer. Finally we may
2047 // need to allocate a buffer for the tail of the transfer.
2048 //
2049 // We can use the user's buffer (at the current scratch buffer) if the
2050 // following are true:
2051 //
2052 // If we are to store the beginning of the raw sector in the user's buffer.
2053 // The current scratch buffer precedes the destination in the user's buffer
2054 // (and hence also lies within it)
2055 // There are enough bytes remaining in the buffer for at least one
2056 // raw sector.
2057 //
2058
2059 if ((RawSectorOffset == 0) &&
2060 (RemainingRawByteCount >= RAW_SECTOR_SIZE)) {
2061
2062 //
2063 // We can use the scratch buffer. We must ensure we don't send down reads
2064 // greater than the device can handle, since the driver is unable to split
2065 // raw requests.
2066 //
2067
2068 if (CurrentCookedByteCount <= Fcb->Vcb->MaximumTransferRawSectors * SECTOR_SIZE) {
2069
2070 CurrentRawByteCount = (SectorAlign( CurrentCookedByteCount) >> SECTOR_SHIFT) * RAW_SECTOR_SIZE;
2071
2072 } else {
2073
2074 CurrentCookedByteCount = Fcb->Vcb->MaximumTransferRawSectors * SECTOR_SIZE;
2075 CurrentRawByteCount = Fcb->Vcb->MaximumTransferRawSectors * RAW_SECTOR_SIZE;
2076 }
2077
2078 //
2079 // Now make sure we are within the page transfer limit.
2080 //
2081
2082 while (ADDRESS_AND_SIZE_TO_SPAN_PAGES(CurrentUserBuffer, RawSectorAlign( CurrentRawByteCount)) >
2083 Fcb->Vcb->MaximumPhysicalPages ) {
2084
2085 CurrentRawByteCount -= RAW_SECTOR_SIZE;
2086 CurrentCookedByteCount -= SECTOR_SIZE;
2087 }
2088
2089 //
2090 // Trim the number of bytes to read if it won't fit into the current buffer. Take
2091 // account of the fact that we must read in whole raw sector multiples.
2092 //
2093
2094 while (RawSectorAlign( CurrentRawByteCount) > RemainingRawByteCount) {
2095
2096 CurrentRawByteCount -= RAW_SECTOR_SIZE;
2097 CurrentCookedByteCount -= SECTOR_SIZE;
2098 }
2099
2100 //
2101 // Now trim the maximum number of raw bytes to the remaining bytes.
2102 //
2103
2104 if (CurrentRawByteCount > RemainingRawByteCount) {
2105
2106 CurrentRawByteCount = RemainingRawByteCount;
2107 }
2108
2109 //
2110 // Update the IO run array. We point to the scratch buffer as
2111 // well as the buffer and Mdl in the original Irp.
2112 //
2113
2114 ThisIoRun->DiskByteCount = SectorAlign( CurrentCookedByteCount);
2115
2116 //
2117 // Point to the user's buffer and Mdl for this transfer.
2118 //
2119
2120 ThisIoRun->TransferBuffer = CurrentUserBuffer;
2121 ThisIoRun->TransferMdl = Irp->MdlAddress;
2122 ThisIoRun->TransferVirtualAddress = Add2Ptr( Irp->UserBuffer,
2123 CurrentUserBufferOffset,
2124 PVOID);
2125
2126 } else {
2127
2128 //
2129 // We need to determine the number of bytes to transfer and the
2130 // offset into this page to begin the transfer.
2131 //
2132 // We will transfer only one raw sector.
2133 //
2134
2135 ThisIoRun->DiskByteCount = SECTOR_SIZE;
2136
2137 CurrentCookedByteCount = SECTOR_SIZE;
2138
2139 ThisIoRun->TransferByteCount = RAW_SECTOR_SIZE - RawSectorOffset;
2140 ThisIoRun->TransferBufferOffset = RawSectorOffset;
2141
2142 if (ThisIoRun->TransferByteCount > RemainingRawByteCount) {
2143
2144 ThisIoRun->TransferByteCount = RemainingRawByteCount;
2145 }
2146
2147 CurrentRawByteCount = ThisIoRun->TransferByteCount;
2148
2149 //
2150 // We need to allocate an auxillary buffer. We will allocate
2151 // a single page. Then we will build an Mdl to describe the buffer.
2152 //
2153
2154 ThisIoRun->TransferBuffer = FsRtlAllocatePoolWithTag( CdNonPagedPool, PAGE_SIZE, TAG_IO_BUFFER );
2155
2156 //
2157 // Allocate and build the Mdl to describe this buffer.
2158 //
2159
2160 ThisIoRun->TransferMdl = IoAllocateMdl( ThisIoRun->TransferBuffer,
2161 PAGE_SIZE,
2162 FALSE,
2163 FALSE,
2164 NULL );
2165
2166 ThisIoRun->TransferVirtualAddress = ThisIoRun->TransferBuffer;
2167
2168 if (ThisIoRun->TransferMdl == NULL) {
2169
2170 IrpContext->Irp->IoStatus.Information = 0;
2171 CdRaiseStatus( IrpContext, STATUS_INSUFFICIENT_RESOURCES );
2172 }
2173
2174 MmBuildMdlForNonPagedPool( ThisIoRun->TransferMdl );
2175 }
2176 }
2177
2178 //
2179 // Update the byte count for our caller.
2180 //
2181
2182 RemainingRawByteCount -= CurrentRawByteCount;
2183 *ThisByteCount += CurrentRawByteCount;
2184
2185 //
2186 // Break out if no more positions in the IoRuns array or
2187 // we have all of the bytes accounted for.
2188 //
2189
2190 if ((RemainingRawByteCount == 0) || (*RunCount == MAX_PARALLEL_IOS)) {
2191
2192 break;
2193 }
2194
2195 //
2196 // Update our local pointers to allow for the current range of bytes.
2197 //
2198
2199 ThisIoRun += 1;
2200
2201 CurrentUserBuffer = Add2Ptr( CurrentUserBuffer, CurrentRawByteCount, PVOID );
2202 CurrentUserBufferOffset += CurrentRawByteCount;
2203
2204 RawSectorOffset = 0;
2205
2206 CurrentCookedOffset += CurrentCookedByteCount;
2207 RemainingCookedByteCount -= CurrentCookedByteCount;
2208 }
2209
2210 return;
2211 }
2212
2213 \f
2214 //
2215 // Local support routine
2216 //
2217
2218 BOOLEAN
2219 CdFinishBuffers (
2220 _In_ PIRP_CONTEXT IrpContext,
2221 _Inout_ PIO_RUN IoRuns,
2222 _In_ ULONG RunCount,
2223 _In_ BOOLEAN FinalCleanup,
2224 _In_ BOOLEAN SaveXABuffer
2225 )
2226
2227 /*++
2228
2229 Routine Description:
2230
2231 This routine is called to perform any data transferred required for
2232 unaligned Io or to perform the final cleanup of the IoRuns array.
2233
2234 In all cases this is where we will deallocate any buffer and mdl
2235 allocated to perform the unaligned transfer. If this is not the
2236 final cleanup then we also transfer the bytes to the user buffer
2237 and flush the hardware cache.
2238
2239 We walk backwards through the run array because we may be shifting data
2240 in the user's buffer. Typical case is where we allocated a buffer for
2241 the first part of a read and then used the user's buffer for the
2242 next section (but stored it at the beginning of the buffer.
2243
2244 Arguments:
2245
2246 IoRuns - Pointer to the IoRuns array.
2247
2248 RunCount - Number of entries in the IoRuns array filled here.
2249
2250 FinalCleanup - Indicates if we should be deallocating temporary buffers
2251 (TRUE) or transferring bytes for a unaligned transfers and
2252 deallocating the buffers (FALSE). Flush the system cache if
2253 transferring data.
2254
2255 SaveXABuffer - TRUE if we should try to save an XA buffer, FALSE otherwise
2256
2257 Return Value:
2258
2259 BOOLEAN - TRUE if this request needs the Io buffers to be flushed, FALSE otherwise.
2260
2261 --*/
2262
2263 {
2264 BOOLEAN FlushIoBuffers = FALSE;
2265
2266 ULONG RemainingEntries = RunCount;
2267 PIO_RUN ThisIoRun = &IoRuns[RunCount - 1];
2268 PVCB Vcb;
2269
2270 PAGED_CODE();
2271
2272 //
2273 // Walk through each entry in the IoRun array.
2274 //
2275
2276 while (RemainingEntries != 0) {
2277
2278 //
2279 // We only need to deal with the case of an unaligned transfer.
2280 //
2281
2282 if (ThisIoRun->TransferByteCount != 0) {
2283
2284 //
2285 // If not the final cleanup then transfer the data to the
2286 // user's buffer and remember that we will need to flush
2287 // the user's buffer to memory.
2288 //
2289
2290 if (!FinalCleanup) {
2291
2292 RtlCopyMemory( ThisIoRun->UserBuffer,
2293 Add2Ptr( ThisIoRun->TransferBuffer,
2294 ThisIoRun->TransferBufferOffset,
2295 PVOID ),
2296 ThisIoRun->TransferByteCount );
2297
2298 FlushIoBuffers = TRUE;
2299 }
2300
2301 //
2302 // Free any Mdl we may have allocated. If the Mdl isn't
2303 // present then we must have failed during the allocation
2304 // phase.
2305 //
2306
2307 if (ThisIoRun->TransferMdl != IrpContext->Irp->MdlAddress) {
2308
2309 if (ThisIoRun->TransferMdl != NULL) {
2310
2311 IoFreeMdl( ThisIoRun->TransferMdl );
2312 }
2313
2314 //
2315 // Now free any buffer we may have allocated. If the Mdl
2316 // doesn't match the original Mdl then free the buffer.
2317 //
2318
2319 if (ThisIoRun->TransferBuffer != NULL) {
2320
2321 //
2322 // If this is the final buffer for an XA read then store this buffer
2323 // into the Vcb so that we will have it when reading any remaining
2324 // portion of this buffer.
2325 //
2326
2327 if (SaveXABuffer) {
2328
2329 Vcb = IrpContext->Vcb;
2330
2331 CdLockVcb( IrpContext, Vcb );
2332
2333 if (Vcb->XASector != NULL) {
2334
2335 CdFreePool( &Vcb->XASector );
2336 }
2337
2338 Vcb->XASector = ThisIoRun->TransferBuffer;
2339 Vcb->XADiskOffset = ThisIoRun->DiskOffset;
2340
2341 SaveXABuffer = FALSE;
2342
2343 CdUnlockVcb( IrpContext, Vcb );
2344
2345 //
2346 // Otherwise just free the buffer.
2347 //
2348
2349 } else {
2350
2351 CdFreePool( &ThisIoRun->TransferBuffer );
2352 }
2353 }
2354 }
2355 }
2356
2357 //
2358 // Now handle the case where we failed in the process
2359 // of allocating associated Irps and Mdls.
2360 //
2361
2362 if (ThisIoRun->SavedIrp != NULL) {
2363
2364 if (ThisIoRun->SavedIrp->MdlAddress != NULL) {
2365
2366 IoFreeMdl( ThisIoRun->SavedIrp->MdlAddress );
2367 }
2368
2369 IoFreeIrp( ThisIoRun->SavedIrp );
2370 }
2371
2372 //
2373 // Move to the previous IoRun entry.
2374 //
2375
2376 ThisIoRun -= 1;
2377 RemainingEntries -= 1;
2378 }
2379
2380 //
2381 // If we copied any data then flush the Io buffers.
2382 //
2383
2384 return FlushIoBuffers;
2385 }
2386
2387 // Tell prefast this is a completion routine.
2388 IO_COMPLETION_ROUTINE CdSyncCompletionRoutine;
2389
2390 NTSTATUS
2391 NTAPI /* ReactOS Change: GCC Does not support STDCALL by default */
2392 CdSyncCompletionRoutine (
2393 PDEVICE_OBJECT DeviceObject,
2394 PIRP Irp,
2395 PVOID Contxt
2396 )
2397
2398 /*++
2399
2400 Routine Description:
2401
2402 Completion routine for synchronizing back to dispatch.
2403
2404 Arguments:
2405
2406 Contxt - pointer to KEVENT.
2407
2408 Return Value:
2409
2410 STATUS_MORE_PROCESSING_REQUIRED
2411
2412 --*/
2413
2414 {
2415 PKEVENT Event = (PKEVENT)Contxt;
2416 _Analysis_assume_(Contxt != NULL);
2417
2418 UNREFERENCED_PARAMETER( Irp );
2419 UNREFERENCED_PARAMETER( DeviceObject );
2420
2421 KeSetEvent( Event, 0, FALSE );
2422
2423 //
2424 // We don't want IO to get our IRP and free it.
2425 //
2426
2427 return STATUS_MORE_PROCESSING_REQUIRED;
2428 }
2429
2430
2431 _Requires_lock_held_(_Global_critical_region_)
2432 VOID
2433 CdFreeDirCache (
2434 _In_ PIRP_CONTEXT IrpContext
2435 )
2436
2437 /*++
2438
2439 Routine Description:
2440
2441 Safely frees the sector cache buffer.
2442
2443 Arguments:
2444
2445 Return Value:
2446
2447 None.
2448
2449 --*/
2450
2451 {
2452 PAGED_CODE();
2453
2454 if (NULL != IrpContext->Vcb->SectorCacheBuffer) {
2455
2456 CdAcquireCacheForUpdate( IrpContext);
2457 CdFreePool( &IrpContext->Vcb->SectorCacheBuffer);
2458 CdReleaseCache( IrpContext);
2459 }
2460 }
2461
2462 _Requires_lock_held_(_Global_critical_region_)
2463 BOOLEAN
2464 CdReadDirDataThroughCache (
2465 _In_ PIRP_CONTEXT IrpContext,
2466 _In_ PIO_RUN Run
2467 )
2468
2469 /*++
2470
2471 Routine Description:
2472
2473 Reads blocks through the sector cache. If the data is present, then it
2474 is copied from memory. If not present, one of the cache chunks will be
2475 replaced with a chunk containing the requested region, and the data
2476 copied from there.
2477
2478 Only intended for reading *directory* blocks, for the purpose of pre-caching
2479 directory information, by reading a chunk of blocks which hopefully contains
2480 other directory blocks, rather than just the (usually) single block requested.
2481
2482 Arguments:
2483
2484 Run - description of extent required, and buffer to read into.
2485
2486 Return Value:
2487
2488 None. Raises on error.
2489
2490 --*/
2491
2492 {
2493 PVCB Vcb = IrpContext->Vcb;
2494 ULONG Lbn = SectorsFromLlBytes( Run->DiskOffset);
2495 ULONG Remaining = SectorsFromBytes( Run->DiskByteCount);
2496 PUCHAR UserBuffer = Run->TransferBuffer;
2497
2498 NTSTATUS Status;
2499 ULONG Found;
2500 ULONG BufferSectorOffset;
2501 ULONG StartBlock;
2502 ULONG EndBlock;
2503 ULONG Blocks;
2504
2505 PIO_STACK_LOCATION IrpSp;
2506 IO_STATUS_BLOCK Iosb;
2507
2508 PTRACK_DATA TrackData;
2509
2510 #if DBG
2511 BOOLEAN JustRead = FALSE;
2512 #endif
2513
2514 ULONG Index;
2515 PCD_SECTOR_CACHE_CHUNK Buffer;
2516 BOOLEAN Result = FALSE;
2517
2518 PAGED_CODE();
2519
2520 CdAcquireCacheForRead( IrpContext);
2521
2522 _SEH2_TRY {
2523
2524 //
2525 // Check the cache hasn't gone away due to volume verify failure (which
2526 // is the *only* reason it'll go away). If this is the case we raise
2527 // the same error any I/O would return if the cache weren't here.
2528 //
2529
2530 if (NULL == Vcb->SectorCacheBuffer) {
2531
2532 CdRaiseStatus( IrpContext, STATUS_VERIFY_REQUIRED);
2533 }
2534
2535 while (Remaining) {
2536
2537 Buffer = NULL;
2538
2539 //
2540 // Look to see if any portion is currently cached.
2541 //
2542
2543 for (Index = 0; Index < CD_SEC_CACHE_CHUNKS; Index++) {
2544
2545 if ((Vcb->SecCacheChunks[ Index].BaseLbn != -1) &&
2546 (Vcb->SecCacheChunks[ Index].BaseLbn <= Lbn) &&
2547 ((Vcb->SecCacheChunks[ Index].BaseLbn + CD_SEC_CHUNK_BLOCKS) > Lbn)) {
2548
2549 Buffer = &Vcb->SecCacheChunks[ Index];
2550 break;
2551 }
2552 }
2553
2554 //
2555 // If we found any, copy it out and continue.
2556 //
2557
2558 if (NULL != Buffer) {
2559
2560 BufferSectorOffset = Lbn - Buffer->BaseLbn;
2561 Found = Min( CD_SEC_CHUNK_BLOCKS - BufferSectorOffset, Remaining);
2562
2563 RtlCopyMemory( UserBuffer,
2564 Buffer->Buffer + BytesFromSectors( BufferSectorOffset),
2565 BytesFromSectors( Found));
2566
2567 Remaining -= Found;
2568 UserBuffer += BytesFromSectors( Found);
2569 Lbn += Found;
2570 #if DBG
2571 //
2572 // Update stats. Don't count a hit if we've just read the data in.
2573 //
2574
2575 if (!JustRead) {
2576
2577 InterlockedIncrement( (LONG*)&Vcb->SecCacheHits);
2578 }
2579
2580 JustRead = FALSE;
2581 #endif
2582 continue;
2583 }
2584
2585 //
2586 // Missed the cache, so we need to read a new chunk. Take the cache
2587 // resource exclusive while we do so.
2588 //
2589
2590 CdReleaseCache( IrpContext);
2591 CdAcquireCacheForUpdate( IrpContext);
2592 #if DBG
2593 Vcb->SecCacheMisses += 1;
2594 #endif
2595 //
2596 // Select the chunk to replace and calculate the start block of the
2597 // chunk to cache. We cache blocks which start on Lbns aligned on
2598 // multiples of chunk size, treating block 16 (VRS start) as block
2599 // zero.
2600 //
2601
2602 Buffer = &Vcb->SecCacheChunks[ Vcb->SecCacheLRUChunkIndex];
2603
2604 StartBlock = Lbn - ((Lbn - 16) % CD_SEC_CHUNK_BLOCKS);
2605
2606 //
2607 // Make sure we don't try and read past end of the last track.
2608 //
2609
2610 #ifdef __REACTOS__
2611 if (Vcb->CdromToc) {
2612 #endif
2613 TrackData = &Vcb->CdromToc->TrackData[(Vcb->CdromToc->LastTrack - Vcb->CdromToc->FirstTrack + 1)];
2614
2615 SwapCopyUchar4( &EndBlock, &TrackData->Address );
2616
2617 Blocks = EndBlock - StartBlock;
2618
2619 if (Blocks > CD_SEC_CHUNK_BLOCKS) {
2620
2621 Blocks = CD_SEC_CHUNK_BLOCKS;
2622 }
2623 #ifdef __REACTOS__
2624 } else {
2625 // HACK!!!!!!!! Might cause reads to overrun the end of the partition, no idea what consequences that can have.
2626 Blocks = CD_SEC_CHUNK_BLOCKS;
2627 }
2628 #endif
2629
2630 if ((0 == Blocks) || (Lbn < 16)) {
2631
2632 CdRaiseStatus( IrpContext, STATUS_INVALID_PARAMETER);
2633 }
2634
2635 //
2636 // Now build / send the read request.
2637 //
2638
2639 IoReuseIrp( Vcb->SectorCacheIrp, STATUS_SUCCESS);
2640
2641 KeClearEvent( &Vcb->SectorCacheEvent);
2642 Vcb->SectorCacheIrp->Tail.Overlay.Thread = PsGetCurrentThread();
2643
2644 //
2645 // Get a pointer to the stack location of the first driver which will be
2646 // invoked. This is where the function codes and the parameters are set.
2647 //
2648
2649 IrpSp = IoGetNextIrpStackLocation( Vcb->SectorCacheIrp);
2650 IrpSp->MajorFunction = (UCHAR) IRP_MJ_READ;
2651
2652 //
2653 // Build an MDL to describe the buffer.
2654 //
2655
2656 IoAllocateMdl( Buffer->Buffer,
2657 BytesFromSectors( Blocks),
2658 FALSE,
2659 FALSE,
2660 Vcb->SectorCacheIrp);
2661
2662 if (NULL == Vcb->SectorCacheIrp->MdlAddress) {
2663
2664 IrpContext->Irp->IoStatus.Information = 0;
2665 CdRaiseStatus( IrpContext, STATUS_INSUFFICIENT_RESOURCES);
2666 }
2667
2668 //
2669 // We're reading/writing into the block cache (paged pool). Lock the
2670 // pages and update the MDL with physical page information.
2671 //
2672
2673 _SEH2_TRY {
2674
2675 MmProbeAndLockPages( Vcb->SectorCacheIrp->MdlAddress,
2676 KernelMode,
2677 (LOCK_OPERATION) IoWriteAccess );
2678 }
2679 #ifdef _MSC_VER
2680 #pragma warning(suppress: 6320)
2681 #endif
2682 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER) {
2683
2684 IoFreeMdl( Vcb->SectorCacheIrp->MdlAddress );
2685 Vcb->SectorCacheIrp->MdlAddress = NULL;
2686 } _SEH2_END;
2687
2688 if (NULL == Vcb->SectorCacheIrp->MdlAddress) {
2689
2690 CdRaiseStatus( IrpContext, STATUS_INSUFFICIENT_RESOURCES );
2691 }
2692
2693 //
2694 // Reset the BaseLbn as we can't trust this Buffer's data until the request
2695 // is successfully completed.
2696 //
2697
2698 Buffer->BaseLbn = (ULONG)-1;
2699
2700 IrpSp->Parameters.Read.Length = BytesFromSectors( Blocks);
2701 IrpSp->Parameters.Read.ByteOffset.QuadPart = LlBytesFromSectors( StartBlock);
2702
2703 IoSetCompletionRoutine( Vcb->SectorCacheIrp,
2704 CdSyncCompletionRoutine,
2705 &Vcb->SectorCacheEvent,
2706 TRUE,
2707 TRUE,
2708 TRUE );
2709
2710 Vcb->SectorCacheIrp->UserIosb = &Iosb;
2711
2712 Status = IoCallDriver( Vcb->TargetDeviceObject, Vcb->SectorCacheIrp );
2713
2714 if (STATUS_PENDING == Status) {
2715
2716
2717 (VOID)KeWaitForSingleObject( &Vcb->SectorCacheEvent,
2718 Executive,
2719 KernelMode,
2720 FALSE,
2721 NULL );
2722
2723 Status = Vcb->SectorCacheIrp->IoStatus.Status;
2724 }
2725
2726 Vcb->SectorCacheIrp->UserIosb = NULL;
2727
2728 //
2729 // Unlock the pages and free the MDL.
2730 //
2731
2732 MmUnlockPages( Vcb->SectorCacheIrp->MdlAddress );
2733 IoFreeMdl( Vcb->SectorCacheIrp->MdlAddress );
2734 Vcb->SectorCacheIrp->MdlAddress = NULL;
2735
2736 if (!NT_SUCCESS( Status )) {
2737
2738 try_leave( Status );
2739 }
2740
2741 //
2742 // Update the buffer information, and drop the cache resource to shared
2743 // to allow in reads.
2744 //
2745
2746 Buffer->BaseLbn = StartBlock;
2747 Vcb->SecCacheLRUChunkIndex = (Vcb->SecCacheLRUChunkIndex + 1) % CD_SEC_CACHE_CHUNKS;
2748
2749 CdConvertCacheToShared( IrpContext);
2750 #if DBG
2751 JustRead = TRUE;
2752 #endif
2753 }
2754
2755 Result = TRUE;
2756 }
2757 _SEH2_FINALLY {
2758
2759 CdReleaseCache( IrpContext);
2760 } _SEH2_END;
2761
2762 return Result;
2763 }
2764
2765
2766 //
2767 // Local support routine
2768 //
2769
2770 _Requires_lock_held_(_Global_critical_region_)
2771 VOID
2772 CdMultipleAsync (
2773 _In_ PIRP_CONTEXT IrpContext,
2774 _In_ PFCB Fcb,
2775 _In_ ULONG RunCount,
2776 _Inout_ PIO_RUN IoRuns
2777 )
2778
2779 /*++
2780
2781 Routine Description:
2782
2783 This routine first does the initial setup required of a Master IRP that is
2784 going to be completed using associated IRPs. This routine should not
2785 be used if only one async request is needed, instead the single read
2786 async routines should be called.
2787
2788 A context parameter is initialized, to serve as a communications area
2789 between here and the common completion routine.
2790
2791 Next this routine reads or writes one or more contiguous sectors from
2792 a device asynchronously, and is used if there are multiple reads for a
2793 master IRP. A completion routine is used to synchronize with the
2794 completion of all of the I/O requests started by calls to this routine.
2795
2796 Also, prior to calling this routine the caller must initialize the
2797 IoStatus field in the Context, with the correct success status and byte
2798 count which are expected if all of the parallel transfers complete
2799 successfully. After return this status will be unchanged if all requests
2800 were, in fact, successful. However, if one or more errors occur, the
2801 IoStatus will be modified to reflect the error status and byte count
2802 from the first run (by Vbo) which encountered an error. I/O status
2803 from all subsequent runs will not be indicated.
2804
2805 Arguments:
2806
2807 RunCount - Supplies the number of multiple async requests
2808 that will be issued against the master irp.
2809
2810 IoRuns - Supplies an array containing the Offset and ByteCount for the
2811 separate requests.
2812
2813 Return Value:
2814
2815 None.
2816
2817 --*/
2818
2819 {
2820 PIO_COMPLETION_ROUTINE CompletionRoutine;
2821 PIO_STACK_LOCATION IrpSp;
2822 PMDL Mdl;
2823 PIRP Irp;
2824 PIRP MasterIrp;
2825 ULONG UnwindRunCount;
2826 BOOLEAN UseSectorCache;
2827
2828 PAGED_CODE();
2829
2830 //
2831 // Set up things according to whether this is truely async.
2832 //
2833
2834 CompletionRoutine = CdMultiSyncCompletionRoutine;
2835
2836 if (!FlagOn( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT )) {
2837
2838 CompletionRoutine = CdMultiAsyncCompletionRoutine;
2839 }
2840
2841 //
2842 // For directories, use the sector cache.
2843 //
2844
2845 if ((SafeNodeType( Fcb) == CDFS_NTC_FCB_INDEX) &&
2846 (NULL != Fcb->Vcb->SectorCacheBuffer) &&
2847 (VcbMounted == IrpContext->Vcb->VcbCondition)) {
2848
2849 UseSectorCache = TRUE;
2850 }
2851 else {
2852
2853 UseSectorCache = FALSE;
2854 }
2855
2856 //
2857 // Initialize some local variables.
2858 //
2859
2860 MasterIrp = IrpContext->Irp;
2861
2862 //
2863 // Itterate through the runs, doing everything that can fail.
2864 // We let the cleanup in CdFinishBuffers clean up on error.
2865 //
2866
2867 for (UnwindRunCount = 0;
2868 UnwindRunCount < RunCount;
2869 UnwindRunCount += 1) {
2870
2871 if (UseSectorCache) {
2872
2873 if (!CdReadDirDataThroughCache( IrpContext, &IoRuns[ UnwindRunCount])) {
2874
2875 //
2876 // Turn off using directory cache and restart all over again.
2877 //
2878
2879 UseSectorCache = FALSE;
2880 UnwindRunCount = 0;
2881 }
2882
2883 continue;
2884 }
2885
2886 //
2887 // Create an associated IRP, making sure there is one stack entry for
2888 // us, as well.
2889 //
2890
2891 IoRuns[UnwindRunCount].SavedIrp =
2892 Irp = IoMakeAssociatedIrp( MasterIrp, (CCHAR)(IrpContext->Vcb->TargetDeviceObject->StackSize + 1) );
2893
2894 if (Irp == NULL) {
2895
2896 IrpContext->Irp->IoStatus.Information = 0;
2897 CdRaiseStatus( IrpContext, STATUS_INSUFFICIENT_RESOURCES );
2898 }
2899
2900 //
2901 // Allocate and build a partial Mdl for the request.
2902 //
2903
2904 Mdl = IoAllocateMdl( IoRuns[UnwindRunCount].TransferVirtualAddress,
2905 IoRuns[UnwindRunCount].DiskByteCount,
2906 FALSE,
2907 FALSE,
2908 Irp );
2909
2910 if (Mdl == NULL) {
2911
2912 IrpContext->Irp->IoStatus.Information = 0;
2913 CdRaiseStatus( IrpContext, STATUS_INSUFFICIENT_RESOURCES );
2914 }
2915
2916 IoBuildPartialMdl( IoRuns[UnwindRunCount].TransferMdl,
2917 Mdl,
2918 IoRuns[UnwindRunCount].TransferVirtualAddress,
2919 IoRuns[UnwindRunCount].DiskByteCount );
2920
2921 //
2922 // Get the first IRP stack location in the associated Irp
2923 //
2924
2925 IoSetNextIrpStackLocation( Irp );
2926 IrpSp = IoGetCurrentIrpStackLocation( Irp );
2927
2928 //
2929 // Setup the Stack location to describe our read.
2930 //
2931
2932 IrpSp->MajorFunction = IRP_MJ_READ;
2933 IrpSp->Parameters.Read.Length = IoRuns[UnwindRunCount].DiskByteCount;
2934 IrpSp->Parameters.Read.ByteOffset.QuadPart = IoRuns[UnwindRunCount].DiskOffset;
2935
2936 //
2937 // Set up the completion routine address in our stack frame.
2938 //
2939
2940 IoSetCompletionRoutine( Irp,
2941 CompletionRoutine,
2942 IrpContext->IoContext,
2943 TRUE,
2944 TRUE,
2945 TRUE );
2946
2947 //
2948 // Setup the next IRP stack location in the associated Irp for the disk
2949 // driver beneath us.
2950 //
2951
2952 IrpSp = IoGetNextIrpStackLocation( Irp );
2953
2954 //
2955 // Setup the Stack location to do a read from the disk driver.
2956 //
2957
2958 IrpSp->MajorFunction = IRP_MJ_READ;
2959 IrpSp->Parameters.Read.Length = IoRuns[UnwindRunCount].DiskByteCount;
2960 IrpSp->Parameters.Read.ByteOffset.QuadPart = IoRuns[UnwindRunCount].DiskOffset;
2961 }
2962
2963 //
2964 // If we used the cache, we're done.
2965 //
2966
2967 if (UseSectorCache) {
2968
2969 if (FlagOn( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT)) {
2970
2971 IrpContext->Irp->IoStatus.Status = STATUS_SUCCESS;
2972 KeSetEvent( &IrpContext->IoContext->SyncEvent, 0, FALSE );
2973 }
2974
2975 return;
2976 }
2977
2978 //
2979 // We only need to set the associated IRP count in the master irp to
2980 // make it a master IRP. But we set the count to one more than our
2981 // caller requested, because we do not want the I/O system to complete
2982 // the I/O. We also set our own count.
2983 //
2984
2985 IrpContext->IoContext->IrpCount = RunCount;
2986 IrpContext->IoContext->MasterIrp = MasterIrp;
2987
2988 //
2989 // We set the count in the master Irp to 1 since typically we
2990 // will clean up the associated irps ourselves. Setting this to one
2991 // means completing the last associated Irp with SUCCESS (in the async
2992 // case) will complete the master irp.
2993 //
2994
2995 MasterIrp->AssociatedIrp.IrpCount = 1;
2996
2997 //
2998 // If we (FS) acquired locks, transition the lock owners to an object, since
2999 // when we return this thread could go away before request completion, and
3000 // the resource package may otherwise try to boost priority, etc.
3001 //
3002
3003 if (!FlagOn( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT ) &&
3004 FlagOn( IrpContext->Flags, IRP_CONTEXT_FLAG_TOP_LEVEL )) {
3005
3006 NT_ASSERT( IrpContext->IoContext->ResourceThreadId == (ERESOURCE_THREAD)PsGetCurrentThread() );
3007
3008 IrpContext->IoContext->ResourceThreadId = ((ULONG_PTR)IrpContext->IoContext) | 3;
3009
3010 ExSetResourceOwnerPointer( IrpContext->IoContext->Resource,
3011 (PVOID)IrpContext->IoContext->ResourceThreadId );
3012 }
3013
3014 //
3015 // Now that all the dangerous work is done, issue the Io requests
3016 //
3017
3018 for (UnwindRunCount = 0;
3019 UnwindRunCount < RunCount;
3020 UnwindRunCount++) {
3021
3022 Irp = IoRuns[UnwindRunCount].SavedIrp;
3023 IoRuns[UnwindRunCount].SavedIrp = NULL;
3024
3025 if (NULL != Irp) {
3026
3027 //
3028 // If IoCallDriver returns an error, it has completed the Irp
3029 // and the error will be caught by our completion routines
3030 // and dealt with as a normal IO error.
3031 //
3032
3033 (VOID) IoCallDriver( IrpContext->Vcb->TargetDeviceObject, Irp );
3034 }
3035 }
3036 }
3037
3038 \f
3039 //
3040 // Local support routine
3041 //
3042
3043 VOID
3044 CdMultipleXAAsync (
3045 _In_ PIRP_CONTEXT IrpContext,
3046 _In_ ULONG RunCount,
3047 _Inout_ PIO_RUN IoRuns,
3048 _In_ PRAW_READ_INFO RawReads,
3049 _In_ TRACK_MODE_TYPE TrackMode
3050 )
3051
3052 /*++
3053
3054 Routine Description:
3055
3056 This routine first does the initial setup required of a Master IRP that is
3057 going to be completed using associated IRPs. This routine is used to generate
3058 the associated Irps used to read raw sectors from the disk.
3059
3060 A context parameter is initialized, to serve as a communications area
3061 between here and the common completion routine.
3062
3063 Next this routine reads or writes one or more contiguous sectors from
3064 a device asynchronously, and is used if there are multiple reads for a
3065 master IRP. A completion routine is used to synchronize with the
3066 completion of all of the I/O requests started by calls to this routine.
3067
3068 Also, prior to calling this routine the caller must initialize the
3069 IoStatus field in the Context, with the correct success status and byte
3070 count which are expected if all of the parallel transfers complete
3071 successfully. After return this status will be unchanged if all requests
3072 were, in fact, successful. However, if one or more errors occur, the
3073 IoStatus will be modified to reflect the error status and byte count
3074 from the first run (by Vbo) which encountered an error. I/O status
3075 from all subsequent runs will not be indicated.
3076
3077 Arguments:
3078
3079 RunCount - Supplies the number of multiple async requests
3080 that will be issued against the master irp.
3081
3082 IoRuns - Supplies an array containing the Offset and ByteCount for the
3083 separate requests.
3084
3085 RawReads - Supplies an array of structures to store in the Irps passed to the
3086 device driver to perform the low-level Io.
3087
3088 TrackMode - Supplies the recording mode of sectors in these IoRuns
3089
3090 Return Value:
3091
3092 None.
3093
3094 --*/
3095
3096 {
3097 PIO_STACK_LOCATION IrpSp;
3098 PMDL Mdl;
3099 PIRP Irp;
3100 PIRP MasterIrp;
3101 ULONG UnwindRunCount;
3102 ULONG RawByteCount;
3103
3104 PIO_RUN ThisIoRun = IoRuns;
3105 PRAW_READ_INFO ThisRawRead = RawReads;
3106
3107 PAGED_CODE();
3108
3109 //
3110 // Initialize some local variables.
3111 //
3112
3113 MasterIrp = IrpContext->Irp;
3114
3115 //
3116 // Itterate through the runs, doing everything that can fail.
3117 // We let the cleanup in CdFinishBuffers clean up on error.
3118 //
3119
3120 for (UnwindRunCount = 0;
3121 UnwindRunCount < RunCount;
3122 UnwindRunCount += 1, ThisIoRun += 1, ThisRawRead += 1) {
3123
3124 //
3125 // Create an associated IRP, making sure there is one stack entry for
3126 // us, as well.
3127 //
3128
3129 ThisIoRun->SavedIrp =
3130 Irp = IoMakeAssociatedIrp( MasterIrp, (CCHAR)(IrpContext->Vcb->TargetDeviceObject->StackSize + 1) );
3131
3132 if (Irp == NULL) {
3133
3134 IrpContext->Irp->IoStatus.Information = 0;
3135 CdRaiseStatus( IrpContext, STATUS_INSUFFICIENT_RESOURCES );
3136 }
3137
3138 //
3139 // Should have been passed a byte count of at least one sector, and
3140 // must be a multiple of sector size
3141 //
3142
3143 NT_ASSERT( ThisIoRun->DiskByteCount && !SectorOffset(ThisIoRun->DiskByteCount));
3144
3145 RawByteCount = SectorsFromBytes( ThisIoRun->DiskByteCount) * RAW_SECTOR_SIZE;
3146
3147 //
3148 // Allocate and build a partial Mdl for the request.
3149 //
3150
3151 Mdl = IoAllocateMdl( ThisIoRun->TransferVirtualAddress,
3152 RawByteCount,
3153 FALSE,
3154 FALSE,
3155 Irp );
3156
3157 if (Mdl == NULL) {
3158
3159 IrpContext->Irp->IoStatus.Information = 0;
3160 CdRaiseStatus( IrpContext, STATUS_INSUFFICIENT_RESOURCES );
3161 }
3162
3163 IoBuildPartialMdl( ThisIoRun->TransferMdl,
3164 Mdl,
3165 ThisIoRun->TransferVirtualAddress,
3166 RawByteCount);
3167 //
3168 // Get the first IRP stack location in the associated Irp
3169 //
3170
3171 IoSetNextIrpStackLocation( Irp );
3172 IrpSp = IoGetCurrentIrpStackLocation( Irp );
3173
3174 //
3175 // Setup the Stack location to describe our read (using cooked values)
3176 // These values won't be used for the raw read in any case.
3177 //
3178
3179 IrpSp->MajorFunction = IRP_MJ_READ;
3180 IrpSp->Parameters.Read.Length = ThisIoRun->DiskByteCount;
3181 IrpSp->Parameters.Read.ByteOffset.QuadPart = ThisIoRun->DiskOffset;
3182
3183 //
3184 // Set up the completion routine address in our stack frame.
3185 //
3186
3187 IoSetCompletionRoutine( Irp,
3188 CdMultiSyncCompletionRoutine,
3189 IrpContext->IoContext,
3190 TRUE,
3191 TRUE,
3192 TRUE );
3193
3194 //
3195 // Setup the next IRP stack location in the associated Irp for the disk
3196 // driver beneath us.
3197 //
3198
3199 IrpSp = IoGetNextIrpStackLocation( Irp );
3200
3201 //
3202 // Setup the stack location to do a read of raw sectors at this location.
3203 // Note that the storage stack always reads multiples of whole XA sectors.
3204 //
3205
3206 ThisRawRead->DiskOffset.QuadPart = ThisIoRun->DiskOffset;
3207 ThisRawRead->SectorCount = ThisIoRun->DiskByteCount >> SECTOR_SHIFT;
3208 ThisRawRead->TrackMode = TrackMode;
3209
3210 IrpSp->MajorFunction = IRP_MJ_DEVICE_CONTROL;
3211
3212 IrpSp->Parameters.DeviceIoControl.OutputBufferLength = ThisRawRead->SectorCount * RAW_SECTOR_SIZE;
3213 Irp->UserBuffer = ThisIoRun->TransferVirtualAddress;
3214
3215 IrpSp->Parameters.DeviceIoControl.InputBufferLength = sizeof( RAW_READ_INFO );
3216 IrpSp->Parameters.DeviceIoControl.Type3InputBuffer = ThisRawRead;
3217
3218 IrpSp->Parameters.DeviceIoControl.IoControlCode = IOCTL_CDROM_RAW_READ;
3219 }
3220
3221 //
3222 // We only need to set the associated IRP count in the master irp to
3223 // make it a master IRP. But we set the count to one more than our
3224 // caller requested, because we do not want the I/O system to complete
3225 // the I/O. We also set our own count.
3226 //
3227
3228 IrpContext->IoContext->IrpCount = RunCount;
3229 IrpContext->IoContext->MasterIrp = MasterIrp;
3230
3231 //
3232 // We set the count in the master Irp to 1 since typically we
3233 // will clean up the associated irps ourselves. Setting this to one
3234 // means completing the last associated Irp with SUCCESS (in the async
3235 // case) will complete the master irp.
3236 //
3237
3238 MasterIrp->AssociatedIrp.IrpCount = 1;
3239
3240 //
3241 // Now that all the dangerous work is done, issue the Io requests
3242 //
3243
3244 for (UnwindRunCount = 0;
3245 UnwindRunCount < RunCount;
3246 UnwindRunCount++) {
3247
3248 Irp = IoRuns[UnwindRunCount].SavedIrp;
3249 IoRuns[UnwindRunCount].SavedIrp = NULL;
3250
3251 //
3252 //
3253 // If IoCallDriver returns an error, it has completed the Irp
3254 // and the error will be caught by our completion routines
3255 // and dealt with as a normal IO error.
3256 //
3257
3258 (VOID) IoCallDriver( IrpContext->Vcb->TargetDeviceObject, Irp );
3259 }
3260
3261 return;
3262 }
3263
3264 \f
3265 //
3266 // Local support routine
3267 //
3268
3269 _Requires_lock_held_(_Global_critical_region_)
3270 VOID
3271 CdSingleAsync (
3272 _In_ PIRP_CONTEXT IrpContext,
3273 _In_ PIO_RUN Run,
3274 _In_ PFCB Fcb
3275 )
3276
3277 /*++
3278
3279 Routine Description:
3280
3281 This routine reads one or more contiguous sectors from a device
3282 asynchronously, and is used if there is only one read necessary to
3283 complete the IRP. It implements the read by simply filling
3284 in the next stack frame in the Irp, and passing it on. The transfer
3285 occurs to the single buffer originally specified in the user request.
3286
3287 Arguments:
3288
3289 ByteOffset - Supplies the starting Logical Byte Offset to begin reading from
3290
3291 ByteCount - Supplies the number of bytes to read from the device
3292
3293 Return Value:
3294
3295 None.
3296
3297 --*/
3298
3299 {
3300 PIO_STACK_LOCATION IrpSp;
3301 PIO_COMPLETION_ROUTINE CompletionRoutine;
3302
3303 PAGED_CODE();
3304
3305 //
3306 // For directories, look in the sector cache,
3307 //
3308
3309 if ((SafeNodeType( Fcb) == CDFS_NTC_FCB_INDEX) &&
3310 (NULL != Fcb->Vcb->SectorCacheBuffer) &&
3311 (VcbMounted == IrpContext->Vcb->VcbCondition)) {
3312
3313 if (CdReadDirDataThroughCache( IrpContext, Run )) {
3314
3315 if (FlagOn( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT)) {
3316
3317 IrpContext->Irp->IoStatus.Status = STATUS_SUCCESS;
3318 KeSetEvent( &IrpContext->IoContext->SyncEvent, 0, FALSE );
3319 }
3320
3321 return;
3322 }
3323 }
3324
3325 //
3326 // Set up things according to whether this is truely async.
3327 //
3328
3329 if (FlagOn( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT )) {
3330
3331 CompletionRoutine = CdSingleSyncCompletionRoutine;
3332
3333 } else {
3334
3335 CompletionRoutine = CdSingleAsyncCompletionRoutine;
3336
3337 //
3338 // If we (FS) acquired locks, transition the lock owners to an object, since
3339 // when we return this thread could go away before request completion, and
3340 // the resource package may otherwise try to boost priority, etc.
3341 //
3342
3343 if (FlagOn( IrpContext->Flags, IRP_CONTEXT_FLAG_TOP_LEVEL )) {
3344
3345 NT_ASSERT( IrpContext->IoContext->ResourceThreadId == (ERESOURCE_THREAD)PsGetCurrentThread() );
3346
3347 IrpContext->IoContext->ResourceThreadId = ((ULONG_PTR)IrpContext->IoContext) | 3;
3348
3349 ExSetResourceOwnerPointer( IrpContext->IoContext->Resource,
3350 (PVOID)IrpContext->IoContext->ResourceThreadId );
3351 }
3352 }
3353
3354 //
3355 // Set up the completion routine address in our stack frame.
3356 //
3357
3358 IoSetCompletionRoutine( IrpContext->Irp,
3359 CompletionRoutine,
3360 IrpContext->IoContext,
3361 TRUE,
3362 TRUE,
3363 TRUE );
3364
3365 //
3366 // Setup the next IRP stack location in the associated Irp for the disk
3367 // driver beneath us.
3368 //
3369
3370 IrpSp = IoGetNextIrpStackLocation( IrpContext->Irp );
3371
3372 //
3373 // Setup the Stack location to do a read from the disk driver.
3374 //
3375
3376 IrpSp->MajorFunction = IrpContext->MajorFunction;
3377 IrpSp->Parameters.Read.Length = Run->DiskByteCount;
3378 IrpSp->Parameters.Read.ByteOffset.QuadPart = Run->DiskOffset;
3379
3380 //
3381 // Issue the Io request
3382 //
3383
3384 //
3385 // If IoCallDriver returns an error, it has completed the Irp
3386 // and the error will be caught by our completion routines
3387 // and dealt with as a normal IO error.
3388 //
3389
3390 (VOID)IoCallDriver( IrpContext->Vcb->TargetDeviceObject, IrpContext->Irp );
3391 }
3392
3393 \f
3394 //
3395 // Local support routine
3396 //
3397
3398 VOID
3399 CdWaitSync (
3400 _In_ PIRP_CONTEXT IrpContext
3401 )
3402
3403 /*++
3404
3405 Routine Description:
3406
3407 This routine waits for one or more previously started I/O requests
3408 from the above routines, by simply waiting on the event.
3409
3410 Arguments:
3411
3412 Return Value:
3413
3414 None
3415
3416 --*/
3417
3418 {
3419 PAGED_CODE();
3420
3421
3422 (VOID)KeWaitForSingleObject( &IrpContext->IoContext->SyncEvent,
3423 Executive,
3424 KernelMode,
3425 FALSE,
3426 NULL );
3427
3428 KeClearEvent( &IrpContext->IoContext->SyncEvent );
3429 }
3430
3431 \f
3432 //
3433 // Local support routine
3434 //
3435
3436 NTSTATUS
3437 NTAPI /* ReactOS Change: GCC Does not support STDCALL by default */
3438 CdMultiSyncCompletionRoutine (
3439 PDEVICE_OBJECT DeviceObject,
3440 PIRP Irp,
3441 PVOID Context
3442 )
3443
3444 /*++
3445
3446 Routine Description:
3447
3448 This is the completion routine for all synchronous reads
3449 started via CdMultipleAsynch.
3450
3451 The completion routine has has the following responsibilities:
3452
3453 If the individual request was completed with an error, then
3454 this completion routine must see if this is the first error
3455 and remember the error status in the Context.
3456
3457 If the IrpCount goes to 1, then it sets the event in the Context
3458 parameter to signal the caller that all of the asynch requests
3459 are done.
3460
3461 Arguments:
3462
3463 DeviceObject - Pointer to the file system device object.
3464
3465 Irp - Pointer to the associated Irp which is being completed. (This
3466 Irp will no longer be accessible after this routine returns.)
3467
3468 Context - The context parameter which was specified for all of
3469 the multiple asynch I/O requests for this MasterIrp.
3470
3471 Return Value:
3472
3473 The routine returns STATUS_MORE_PROCESSING_REQUIRED so that we can
3474 immediately complete the Master Irp without being in a race condition
3475 with the IoCompleteRequest thread trying to decrement the IrpCount in
3476 the Master Irp.
3477
3478 --*/
3479
3480 {
3481 PCD_IO_CONTEXT IoContext = Context;
3482 _Analysis_assume_(Context != NULL);
3483
3484 AssertVerifyDeviceIrp( Irp );
3485
3486 //
3487 // If we got an error (or verify required), remember it in the Irp
3488 //
3489
3490 if (!NT_SUCCESS( Irp->IoStatus.Status )) {
3491
3492 InterlockedExchange( &IoContext->Status, Irp->IoStatus.Status );
3493 IoContext->MasterIrp->IoStatus.Information = 0;
3494 }
3495
3496 //
3497 // We must do this here since IoCompleteRequest won't get a chance
3498 // on this associated Irp.
3499 //
3500
3501 IoFreeMdl( Irp->MdlAddress );
3502 IoFreeIrp( Irp );
3503
3504 if (InterlockedDecrement( &IoContext->IrpCount ) == 0) {
3505
3506 //
3507 // Update the Master Irp with any error status from the associated Irps.
3508 //
3509
3510 IoContext->MasterIrp->IoStatus.Status = IoContext->Status;
3511 KeSetEvent( &IoContext->SyncEvent, 0, FALSE );
3512 }
3513
3514 UNREFERENCED_PARAMETER( DeviceObject );
3515
3516 return STATUS_MORE_PROCESSING_REQUIRED;
3517 }
3518
3519 \f
3520 //
3521 // Local support routine
3522 //
3523
3524 NTSTATUS
3525 NTAPI /* ReactOS Change: GCC Does not support STDCALL by default */
3526 CdMultiAsyncCompletionRoutine (
3527 PDEVICE_OBJECT DeviceObject,
3528 PIRP Irp,
3529 PVOID Context
3530 )
3531
3532 /*++
3533
3534 Routine Description:
3535
3536 This is the completion routine for all asynchronous reads
3537 started via CdMultipleAsynch.
3538
3539 The completion routine has has the following responsibilities:
3540
3541 If the individual request was completed with an error, then
3542 this completion routine must see if this is the first error
3543 and remember the error status in the Context.
3544
3545 Arguments:
3546
3547 DeviceObject - Pointer to the file system device object.
3548
3549 Irp - Pointer to the associated Irp which is being completed. (This
3550 Irp will no longer be accessible after this routine returns.)
3551