[TASKMGR] Process page: Allow using "Open File Location" functionality without runnin...
[reactos.git] / ntoskrnl / io / iomgr / irp.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: GPL - See COPYING in the top level directory
4 * FILE: ntoskrnl/io/iomgr/irp.c
5 * PURPOSE: IRP Handling Functions
6 * PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
7 * Gunnar Dalsnes
8 * Filip Navara (navaraf@reactos.org)
9 * Pierre Schweitzer (pierre@reactos.org)
10 */
11
12 /* INCLUDES ****************************************************************/
13
14 #include <ntoskrnl.h>
15 #define NDEBUG
16 #include <debug.h>
17
18 PIRP IopDeadIrp;
19 RESERVE_IRP_ALLOCATOR IopReserveIrpAllocator;
20
21 /* PRIVATE FUNCTIONS ********************************************************/
22
23 VOID
24 NTAPI
25 IopFreeIrpKernelApc(IN PKAPC Apc,
26 IN PKNORMAL_ROUTINE *NormalRoutine,
27 IN PVOID *NormalContext,
28 IN PVOID *SystemArgument1,
29 IN PVOID *SystemArgument2)
30 {
31 /* Free the IRP */
32 IoFreeIrp(CONTAINING_RECORD(Apc, IRP, Tail.Apc));
33 }
34
35 VOID
36 NTAPI
37 IopAbortIrpKernelApc(IN PKAPC Apc)
38 {
39 /* Free the IRP */
40 IoFreeIrp(CONTAINING_RECORD(Apc, IRP, Tail.Apc));
41 }
42
43 NTSTATUS
44 NTAPI
45 IopCleanupFailedIrp(IN PFILE_OBJECT FileObject,
46 IN PKEVENT EventObject OPTIONAL,
47 IN PVOID Buffer OPTIONAL)
48 {
49 PAGED_CODE();
50
51 /* Dereference the event */
52 if (EventObject) ObDereferenceObject(EventObject);
53
54 /* Free a buffer, if any */
55 if (Buffer) ExFreePool(Buffer);
56
57 /* If this was a file opened for synch I/O, then unlock it */
58 if (FileObject->Flags & FO_SYNCHRONOUS_IO) IopUnlockFileObject(FileObject);
59
60 /* Now dereference it and return */
61 ObDereferenceObject(FileObject);
62 return STATUS_INSUFFICIENT_RESOURCES;
63 }
64
65 VOID
66 NTAPI
67 IopAbortInterruptedIrp(IN PKEVENT EventObject,
68 IN PIRP Irp)
69 {
70 KIRQL OldIrql;
71 BOOLEAN CancelResult;
72 LARGE_INTEGER Wait;
73 PAGED_CODE();
74
75 /* Raise IRQL to APC */
76 KeRaiseIrql(APC_LEVEL, &OldIrql);
77
78 /* Check if nobody completed it yet */
79 if (!KeReadStateEvent(EventObject))
80 {
81 /* First, cancel it */
82 CancelResult = IoCancelIrp(Irp);
83 KeLowerIrql(OldIrql);
84
85 /* Check if we cancelled it */
86 if (CancelResult)
87 {
88 /* Wait for the IRP to be cancelled */
89 Wait.QuadPart = -100000;
90 while (!KeReadStateEvent(EventObject))
91 {
92 /* Delay indefintely */
93 KeDelayExecutionThread(KernelMode, FALSE, &Wait);
94 }
95 }
96 else
97 {
98 /* No cancellation done, so wait for the I/O system to kill it */
99 KeWaitForSingleObject(EventObject,
100 Executive,
101 KernelMode,
102 FALSE,
103 NULL);
104 }
105 }
106 else
107 {
108 /* We got preempted, so give up */
109 KeLowerIrql(OldIrql);
110 }
111 }
112
113 VOID
114 NTAPI
115 IopDisassociateThreadIrp(VOID)
116 {
117 KIRQL OldIrql, LockIrql;
118 PETHREAD IrpThread;
119 PLIST_ENTRY IrpEntry;
120 PIO_ERROR_LOG_PACKET ErrorLogEntry;
121 PDEVICE_OBJECT DeviceObject = NULL;
122 PIO_STACK_LOCATION IoStackLocation;
123
124 /* First, raise to APC to protect IrpList */
125 KeRaiseIrql(APC_LEVEL, &OldIrql);
126
127 /* Get the Thread and check the list */
128 IrpThread = PsGetCurrentThread();
129 if (IsListEmpty(&IrpThread->IrpList))
130 {
131 /* It got completed now, so quit */
132 KeLowerIrql(OldIrql);
133 return;
134 }
135
136 /* Ensure no one will come disturb */
137 LockIrql = KeAcquireQueuedSpinLock(LockQueueIoCompletionLock);
138
139 /* Get the misbehaving IRP */
140 IrpEntry = IrpThread->IrpList.Flink;
141 IopDeadIrp = CONTAINING_RECORD(IrpEntry, IRP, ThreadListEntry);
142 IOTRACE(IO_IRP_DEBUG,
143 "%s - Deassociating IRP %p for %p\n",
144 __FUNCTION__,
145 IopDeadIrp,
146 IrpThread);
147
148 /* Don't cancel the IRP if it's already been completed far */
149 if (IopDeadIrp->CurrentLocation == (IopDeadIrp->StackCount + 2))
150 {
151 /* Return */
152 KeReleaseQueuedSpinLock(LockQueueIoCompletionLock, LockIrql);
153 KeLowerIrql(OldIrql);
154 return;
155 }
156
157 /* Disown the IRP! */
158 IopDeadIrp->Tail.Overlay.Thread = NULL;
159 RemoveHeadList(&IrpThread->IrpList);
160 InitializeListHead(&IopDeadIrp->ThreadListEntry);
161
162 /* Get the stack location and check if it's valid */
163 IoStackLocation = IoGetCurrentIrpStackLocation(IopDeadIrp);
164 if (IopDeadIrp->CurrentLocation <= IopDeadIrp->StackCount)
165 {
166 /* Get the device object */
167 DeviceObject = IoStackLocation->DeviceObject;
168 }
169
170 KeReleaseQueuedSpinLock(LockQueueIoCompletionLock, LockIrql);
171 /* Lower IRQL now, since we have the pointers we need */
172 KeLowerIrql(OldIrql);
173
174 /* Check if we can send an Error Log Entry*/
175 if (DeviceObject)
176 {
177 /* Allocate an entry */
178 ErrorLogEntry = IoAllocateErrorLogEntry(DeviceObject,
179 sizeof(IO_ERROR_LOG_PACKET));
180 if (ErrorLogEntry)
181 {
182 /* Write the entry */
183 ErrorLogEntry->ErrorCode = IO_DRIVER_CANCEL_TIMEOUT;
184 IoWriteErrorLogEntry(ErrorLogEntry);
185 }
186 }
187 }
188
189 VOID
190 NTAPI
191 IopCleanupIrp(IN PIRP Irp,
192 IN PFILE_OBJECT FileObject)
193 {
194 PMDL Mdl;
195 IOTRACE(IO_IRP_DEBUG,
196 "%s - Cleaning IRP %p for %p\n",
197 __FUNCTION__,
198 Irp,
199 FileObject);
200
201 /* Check if there's an MDL */
202 while ((Mdl = Irp->MdlAddress))
203 {
204 /* Clear all of them */
205 Irp->MdlAddress = Mdl->Next;
206 IoFreeMdl(Mdl);
207 }
208
209 /* Check if the IRP has system buffer */
210 if (Irp->Flags & IRP_DEALLOCATE_BUFFER)
211 {
212 /* Free the buffer */
213 ExFreePoolWithTag(Irp->AssociatedIrp.SystemBuffer, TAG_SYS_BUF);
214 }
215
216 /* Check if this IRP has a user event, a file object, and is async */
217 if ((Irp->UserEvent) &&
218 !(Irp->Flags & IRP_SYNCHRONOUS_API) &&
219 (FileObject))
220 {
221 /* Dereference the User Event */
222 ObDereferenceObject(Irp->UserEvent);
223 }
224
225 /* Check if we have a file object and this isn't a create operation */
226 if ((FileObject) && !(Irp->Flags & IRP_CREATE_OPERATION))
227 {
228 /* Dereference the file object */
229 ObDereferenceObject(FileObject);
230 }
231
232 /* Free the IRP */
233 IoFreeIrp(Irp);
234 }
235
236 VOID
237 NTAPI
238 IopCompleteRequest(IN PKAPC Apc,
239 IN PKNORMAL_ROUTINE* NormalRoutine,
240 IN PVOID* NormalContext,
241 IN PVOID* SystemArgument1,
242 IN PVOID* SystemArgument2)
243 {
244 PFILE_OBJECT FileObject;
245 PIRP Irp;
246 PMDL Mdl, NextMdl;
247 PVOID Port = NULL, Key = NULL;
248 BOOLEAN SignaledCreateRequest = FALSE;
249
250 /* Get data from the APC */
251 FileObject = (PFILE_OBJECT)*SystemArgument1;
252 Irp = CONTAINING_RECORD(Apc, IRP, Tail.Apc);
253 IOTRACE(IO_IRP_DEBUG,
254 "%s - Completing IRP %p for %p\n",
255 __FUNCTION__,
256 Irp,
257 FileObject);
258
259 /* Sanity check */
260 ASSERT(Irp->IoStatus.Status != (NTSTATUS)0xFFFFFFFF);
261
262 /* Check if we have a file object */
263 if (*SystemArgument2)
264 {
265 /* Check if we're reparsing */
266 if ((Irp->IoStatus.Status == STATUS_REPARSE) &&
267 (Irp->IoStatus.Information == IO_REPARSE_TAG_MOUNT_POINT))
268 {
269 PREPARSE_DATA_BUFFER ReparseData;
270
271 ReparseData = (PREPARSE_DATA_BUFFER)*SystemArgument2;
272
273 ASSERT(ReparseData->ReparseTag == IO_REPARSE_TAG_MOUNT_POINT);
274 ASSERT(ReparseData->ReparseDataLength < MAXIMUM_REPARSE_DATA_BUFFER_SIZE);
275 ASSERT(ReparseData->Reserved < MAXIMUM_REPARSE_DATA_BUFFER_SIZE);
276
277 IopDoNameTransmogrify(Irp, FileObject, ReparseData);
278 }
279 }
280
281 /* Handle Buffered case first */
282 if (Irp->Flags & IRP_BUFFERED_IO)
283 {
284 /* Check if we have an input buffer and if we succeeded */
285 if ((Irp->Flags & IRP_INPUT_OPERATION) &&
286 (Irp->IoStatus.Status != STATUS_VERIFY_REQUIRED) &&
287 !(NT_ERROR(Irp->IoStatus.Status)))
288 {
289 _SEH2_TRY
290 {
291 /* Copy the buffer back to the user */
292 RtlCopyMemory(Irp->UserBuffer,
293 Irp->AssociatedIrp.SystemBuffer,
294 Irp->IoStatus.Information);
295 }
296 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
297 {
298 /* Fail the IRP */
299 Irp->IoStatus.Status = _SEH2_GetExceptionCode();
300 }
301 _SEH2_END;
302 }
303
304 /* Also check if we should de-allocate it */
305 if (Irp->Flags & IRP_DEALLOCATE_BUFFER)
306 {
307 /* Deallocate it */
308 ExFreePool(Irp->AssociatedIrp.SystemBuffer);
309 }
310 }
311
312 /* Now we got rid of these two... */
313 Irp->Flags &= ~(IRP_BUFFERED_IO | IRP_DEALLOCATE_BUFFER);
314
315 /* Check if there's an MDL */
316 for (Mdl = Irp->MdlAddress; Mdl; Mdl = NextMdl)
317 {
318 /* Free it */
319 NextMdl = Mdl->Next;
320 IoFreeMdl(Mdl);
321 }
322
323 /* No MDLs left */
324 Irp->MdlAddress = NULL;
325
326 /*
327 * Check if either the request was completed without any errors
328 * (but warnings are OK!), or if it was completed with an error, but
329 * did return from a pending I/O Operation and is not synchronous.
330 */
331 if (!NT_ERROR(Irp->IoStatus.Status) ||
332 (Irp->PendingReturned &&
333 !IsIrpSynchronous(Irp, FileObject)))
334 {
335 /* Get any information we need from the FO before we kill it */
336 if ((FileObject) && (FileObject->CompletionContext))
337 {
338 /* Save Completion Data */
339 Port = FileObject->CompletionContext->Port;
340 Key = FileObject->CompletionContext->Key;
341 }
342
343 /* Check for UserIos */
344 if (Irp->UserIosb != NULL)
345 {
346 /* Use SEH to make sure we don't write somewhere invalid */
347 _SEH2_TRY
348 {
349 /* Save the IOSB Information */
350 *Irp->UserIosb = Irp->IoStatus;
351 }
352 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
353 {
354 /* Ignore any error */
355 }
356 _SEH2_END;
357 }
358
359 /* Check if we have an event or a file object */
360 if (Irp->UserEvent)
361 {
362 /* At the very least, this is a PKEVENT, so signal it always */
363 KeSetEvent(Irp->UserEvent, 0, FALSE);
364
365 /* Check if we also have a File Object */
366 if (FileObject)
367 {
368 /* Check if this is an Asynch API */
369 if (!(Irp->Flags & IRP_SYNCHRONOUS_API))
370 {
371 /* Dereference the event */
372 ObDereferenceObject(Irp->UserEvent);
373 }
374
375 /*
376 * Now, if this is a Synch I/O File Object, then this event is
377 * NOT an actual Executive Event, so we won't dereference it,
378 * and instead, we will signal the File Object
379 */
380 if ((FileObject->Flags & FO_SYNCHRONOUS_IO) &&
381 !(Irp->Flags & IRP_OB_QUERY_NAME))
382 {
383 /* Signal the file object and set the status */
384 KeSetEvent(&FileObject->Event, 0, FALSE);
385 FileObject->FinalStatus = Irp->IoStatus.Status;
386 }
387
388 /*
389 * This could also be a create operation, in which case we want
390 * to make sure there's no APC fired.
391 */
392 if (Irp->Flags & IRP_CREATE_OPERATION)
393 {
394 /* Clear the APC Routine and remember this */
395 Irp->Overlay.AsynchronousParameters.UserApcRoutine = NULL;
396 SignaledCreateRequest = TRUE;
397 }
398 }
399 }
400 else if (FileObject)
401 {
402 /* Signal the file object and set the status */
403 KeSetEvent(&FileObject->Event, 0, FALSE);
404 FileObject->FinalStatus = Irp->IoStatus.Status;
405
406 /*
407 * This could also be a create operation, in which case we want
408 * to make sure there's no APC fired.
409 */
410 if (Irp->Flags & IRP_CREATE_OPERATION)
411 {
412 /* Clear the APC Routine and remember this */
413 Irp->Overlay.AsynchronousParameters.UserApcRoutine = NULL;
414 SignaledCreateRequest = TRUE;
415 }
416 }
417
418 /* Update transfer count for everything but create operation */
419 if (!(Irp->Flags & IRP_CREATE_OPERATION))
420 {
421 if (Irp->Flags & IRP_WRITE_OPERATION)
422 {
423 /* Update write transfer count */
424 IopUpdateTransferCount(IopWriteTransfer,
425 (ULONG)Irp->IoStatus.Information);
426 }
427 else if (Irp->Flags & IRP_READ_OPERATION)
428 {
429 /* Update read transfer count */
430 IopUpdateTransferCount(IopReadTransfer,
431 (ULONG)Irp->IoStatus.Information);
432 }
433 else
434 {
435 /* Update other transfer count */
436 IopUpdateTransferCount(IopOtherTransfer,
437 (ULONG)Irp->IoStatus.Information);
438 }
439 }
440
441 /* Now that we've signaled the events, de-associate the IRP */
442 IopUnQueueIrpFromThread(Irp);
443
444 /* Now check if a User APC Routine was requested */
445 if (Irp->Overlay.AsynchronousParameters.UserApcRoutine)
446 {
447 /* Initialize it */
448 KeInitializeApc(&Irp->Tail.Apc,
449 KeGetCurrentThread(),
450 CurrentApcEnvironment,
451 IopFreeIrpKernelApc,
452 IopAbortIrpKernelApc,
453 (PKNORMAL_ROUTINE)Irp->
454 Overlay.AsynchronousParameters.UserApcRoutine,
455 Irp->RequestorMode,
456 Irp->
457 Overlay.AsynchronousParameters.UserApcContext);
458
459 /* Queue it */
460 KeInsertQueueApc(&Irp->Tail.Apc, Irp->UserIosb, NULL, 2);
461 }
462 else if ((Port) &&
463 (Irp->Overlay.AsynchronousParameters.UserApcContext))
464 {
465 /* We have an I/O Completion setup... create the special Overlay */
466 Irp->Tail.CompletionKey = Key;
467 Irp->Tail.Overlay.PacketType = IopCompletionPacketIrp;
468 KeInsertQueue(Port, &Irp->Tail.Overlay.ListEntry);
469 }
470 else
471 {
472 /* Free the IRP since we don't need it anymore */
473 IoFreeIrp(Irp);
474 }
475
476 /* Check if we have a file object that wasn't part of a create */
477 if ((FileObject) && !(SignaledCreateRequest))
478 {
479 /* Dereference it, since it's not needed anymore either */
480 ObDereferenceObjectDeferDelete(FileObject);
481 }
482 }
483 else
484 {
485 /*
486 * Either we didn't return from the request, or we did return but this
487 * request was synchronous.
488 */
489 if ((Irp->PendingReturned) && (FileObject))
490 {
491 /* So we did return with a synch operation, was it the IRP? */
492 if (Irp->Flags & IRP_SYNCHRONOUS_API)
493 {
494 /* Yes, this IRP was synchronous, so return the I/O Status */
495 *Irp->UserIosb = Irp->IoStatus;
496
497 /* Now check if the user gave an event */
498 if (Irp->UserEvent)
499 {
500 /* Signal it */
501 KeSetEvent(Irp->UserEvent, 0, FALSE);
502 }
503 else
504 {
505 /* No event was given, so signal the FO instead */
506 KeSetEvent(&FileObject->Event, 0, FALSE);
507 }
508 }
509 else
510 {
511 /*
512 * It's not the IRP that was synchronous, it was the FO
513 * that was opened this way. Signal its event.
514 */
515 FileObject->FinalStatus = Irp->IoStatus.Status;
516 KeSetEvent(&FileObject->Event, 0, FALSE);
517 }
518 }
519
520 /* Now that we got here, we do this for incomplete I/Os as well */
521 if ((FileObject) && !(Irp->Flags & IRP_CREATE_OPERATION))
522 {
523 /* Dereference the File Object unless this was a create */
524 ObDereferenceObjectDeferDelete(FileObject);
525 }
526
527 /*
528 * Check if this was an Executive Event (remember that we know this
529 * by checking if the IRP is synchronous)
530 */
531 if ((Irp->UserEvent) &&
532 (FileObject) &&
533 !(Irp->Flags & IRP_SYNCHRONOUS_API))
534 {
535 /* This isn't a PKEVENT, so dereference it */
536 ObDereferenceObject(Irp->UserEvent);
537 }
538
539 /* Now that we've signaled the events, de-associate the IRP */
540 IopUnQueueIrpFromThread(Irp);
541
542 /* Free the IRP as well */
543 IoFreeIrp(Irp);
544 }
545 }
546
547 BOOLEAN
548 NTAPI
549 IopInitializeReserveIrp(IN PRESERVE_IRP_ALLOCATOR ReserveIrpAllocator)
550 {
551 /* Our allocated stack size */
552 ReserveIrpAllocator->StackSize = 20;
553
554 /* Allocate the IRP now */
555 ReserveIrpAllocator->ReserveIrp = IoAllocateIrp(ReserveIrpAllocator->StackSize, FALSE);
556 /* If we cannot, abort system boot */
557 if (ReserveIrpAllocator->ReserveIrp == NULL)
558 {
559 return FALSE;
560 }
561
562 /* It's not in use */
563 ReserveIrpAllocator->ReserveIrpInUse = 0;
564 /* And init the event */
565 KeInitializeEvent(&ReserveIrpAllocator->WaitEvent, SynchronizationEvent, FALSE);
566
567 /* All good, keep booting */
568 return TRUE;
569 }
570
571 PIRP
572 NTAPI
573 IopAllocateReserveIrp(IN CCHAR StackSize)
574 {
575 /* If we need a stack size higher than what was allocated, then fail */
576 if (StackSize > IopReserveIrpAllocator.StackSize)
577 {
578 return NULL;
579 }
580
581 /* Now, wait until the IRP becomes available and reserve it immediately */
582 while (InterlockedExchange(&IopReserveIrpAllocator.ReserveIrpInUse, 1) == 1)
583 {
584 KeWaitForSingleObject(&IopReserveIrpAllocator.WaitEvent,
585 Executive,
586 KernelMode,
587 FALSE,
588 NULL);
589 }
590
591 /* It's ours! Initialize it */
592 IoInitializeIrp(IopReserveIrpAllocator.ReserveIrp, IoSizeOfIrp(StackSize), StackSize);
593
594 /* And return it to the caller */
595 return IopReserveIrpAllocator.ReserveIrp;
596 }
597
598 VOID
599 IopFreeReserveIrp(IN CCHAR PriorityBoost)
600 {
601 /* Mark we don't use the IRP anymore */
602 InterlockedExchange(&IopReserveIrpAllocator.ReserveIrpInUse, 0);
603
604 /* And set the event if someone is waiting on the IRP */
605 KeSetEvent(&IopReserveIrpAllocator.WaitEvent, PriorityBoost, FALSE);
606 }
607
608 /* FUNCTIONS *****************************************************************/
609
610 /*
611 * @implemented
612 */
613 PIRP
614 NTAPI
615 IoAllocateIrp(IN CCHAR StackSize,
616 IN BOOLEAN ChargeQuota)
617 {
618 PIRP Irp = NULL;
619 USHORT Size = IoSizeOfIrp(StackSize);
620 PKPRCB Prcb;
621 UCHAR Flags = 0;
622 PNPAGED_LOOKASIDE_LIST List = NULL;
623 PP_NPAGED_LOOKASIDE_NUMBER ListType = LookasideSmallIrpList;
624
625 /* Set Charge Quota Flag */
626 if (ChargeQuota) Flags |= IRP_QUOTA_CHARGED;
627
628 /* Get the PRCB */
629 Prcb = KeGetCurrentPrcb();
630
631 /* Figure out which Lookaside List to use */
632 if ((StackSize <= 8) && (ChargeQuota == FALSE || Prcb->LookasideIrpFloat > 0))
633 {
634 /* Set Fixed Size Flag */
635 Flags |= IRP_ALLOCATED_FIXED_SIZE;
636
637 /* See if we should use big list */
638 if (StackSize != 1)
639 {
640 Size = IoSizeOfIrp(8);
641 ListType = LookasideLargeIrpList;
642 }
643
644 /* Get the P List First */
645 List = (PNPAGED_LOOKASIDE_LIST)Prcb->PPLookasideList[ListType].P;
646
647 /* Attempt allocation */
648 List->L.TotalAllocates++;
649 Irp = (PIRP)InterlockedPopEntrySList(&List->L.ListHead);
650
651 /* Check if the P List failed */
652 if (!Irp)
653 {
654 /* Let the balancer know */
655 List->L.AllocateMisses++;
656
657 /* Try the L List */
658 List = (PNPAGED_LOOKASIDE_LIST)Prcb->PPLookasideList[ListType].L;
659 List->L.TotalAllocates++;
660 Irp = (PIRP)InterlockedPopEntrySList(&List->L.ListHead);
661 }
662 }
663
664 /* Check if we have to use the pool */
665 if (!Irp)
666 {
667 /* Did we try lookaside and fail? */
668 if (Flags & IRP_ALLOCATED_FIXED_SIZE) List->L.AllocateMisses++;
669
670 /* Check if we should charge quota */
671 if (ChargeQuota)
672 {
673 Irp = ExAllocatePoolWithQuotaTag(NonPagedPool | POOL_QUOTA_FAIL_INSTEAD_OF_RAISE,
674 Size,
675 TAG_IRP);
676 }
677 else
678 {
679 /* Allocate the IRP with no quota charge */
680 Irp = ExAllocatePoolWithTag(NonPagedPool, Size, TAG_IRP);
681 }
682
683 /* Make sure it was sucessful */
684 if (!Irp) return NULL;
685 }
686 else if (Flags & IRP_QUOTA_CHARGED)
687 {
688 /* Decrement lookaside float */
689 InterlockedDecrement(&Prcb->LookasideIrpFloat);
690 Flags |= IRP_LOOKASIDE_ALLOCATION;
691
692 /* In this case there is no charge quota */
693 Flags &= ~IRP_QUOTA_CHARGED;
694 }
695
696 /* Now Initialize it */
697 IoInitializeIrp(Irp, Size, StackSize);
698
699 /* Set the Allocation Flags */
700 Irp->AllocationFlags = Flags;
701
702 /* Return it */
703 IOTRACE(IO_IRP_DEBUG,
704 "%s - Allocated IRP %p with allocation flags %lx\n",
705 __FUNCTION__,
706 Irp,
707 Flags);
708 return Irp;
709 }
710
711 /*
712 * @implemented
713 */
714 PIRP
715 NTAPI
716 IopAllocateIrpMustSucceed(IN CCHAR StackSize)
717 {
718 LONG i;
719 PIRP Irp;
720 LARGE_INTEGER Sleep;
721
722 /* Try to get an IRP */
723 Irp = IoAllocateIrp(StackSize, FALSE);
724 if (Irp)
725 return Irp;
726
727 /* If we fail, start looping till we may get one */
728 i = LONG_MAX;
729 do {
730 i--;
731
732 /* First, sleep for 10ms */
733 Sleep.QuadPart = -10 * 1000 * 10;
734 KeDelayExecutionThread(KernelMode, FALSE, &Sleep);
735
736 /* Then, retry allocation */
737 Irp = IoAllocateIrp(StackSize, FALSE);
738 if (Irp)
739 return Irp;
740 } while (i > 0);
741
742 return Irp;
743 }
744
745 /*
746 * @implemented
747 */
748 PIRP
749 NTAPI
750 IoBuildAsynchronousFsdRequest(IN ULONG MajorFunction,
751 IN PDEVICE_OBJECT DeviceObject,
752 IN PVOID Buffer,
753 IN ULONG Length,
754 IN PLARGE_INTEGER StartingOffset,
755 IN PIO_STATUS_BLOCK IoStatusBlock)
756 {
757 PIRP Irp;
758 PIO_STACK_LOCATION StackPtr;
759
760 /* Allocate IRP */
761 Irp = IoAllocateIrp(DeviceObject->StackSize, FALSE);
762 if (!Irp) return NULL;
763
764 /* Get the Stack */
765 StackPtr = IoGetNextIrpStackLocation(Irp);
766
767 /* Write the Major function and then deal with it */
768 StackPtr->MajorFunction = (UCHAR)MajorFunction;
769
770 /* Do not handle the following here */
771 if ((MajorFunction != IRP_MJ_FLUSH_BUFFERS) &&
772 (MajorFunction != IRP_MJ_SHUTDOWN) &&
773 (MajorFunction != IRP_MJ_PNP) &&
774 (MajorFunction != IRP_MJ_POWER))
775 {
776 /* Check if this is Buffered IO */
777 if (DeviceObject->Flags & DO_BUFFERED_IO)
778 {
779 /* Allocate the System Buffer */
780 Irp->AssociatedIrp.SystemBuffer =
781 ExAllocatePoolWithTag(NonPagedPool, Length, TAG_SYS_BUF);
782 if (!Irp->AssociatedIrp.SystemBuffer)
783 {
784 /* Free the IRP and fail */
785 IoFreeIrp(Irp);
786 return NULL;
787 }
788
789 /* Set flags */
790 Irp->Flags = IRP_BUFFERED_IO | IRP_DEALLOCATE_BUFFER;
791
792 /* Handle special IRP_MJ_WRITE Case */
793 if (MajorFunction == IRP_MJ_WRITE)
794 {
795 /* Copy the buffer data */
796 RtlCopyMemory(Irp->AssociatedIrp.SystemBuffer, Buffer, Length);
797 }
798 else
799 {
800 /* Set the Input Operation flag and set this as a User Buffer */
801 Irp->Flags |= IRP_INPUT_OPERATION;
802 Irp->UserBuffer = Buffer;
803 }
804 }
805 else if (DeviceObject->Flags & DO_DIRECT_IO)
806 {
807 /* Use an MDL for Direct I/O */
808 Irp->MdlAddress = IoAllocateMdl(Buffer,
809 Length,
810 FALSE,
811 FALSE,
812 NULL);
813 if (!Irp->MdlAddress)
814 {
815 /* Free the IRP and fail */
816 IoFreeIrp(Irp);
817 return NULL;
818 }
819
820 /* Probe and Lock */
821 _SEH2_TRY
822 {
823 /* Do the probe */
824 MmProbeAndLockPages(Irp->MdlAddress,
825 KernelMode,
826 MajorFunction == IRP_MJ_READ ?
827 IoWriteAccess : IoReadAccess);
828 }
829 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
830 {
831 /* Free the IRP and its MDL */
832 IoFreeMdl(Irp->MdlAddress);
833 IoFreeIrp(Irp);
834
835 /* Fail */
836 _SEH2_YIELD(return NULL);
837 }
838 _SEH2_END;
839 }
840 else
841 {
842 /* Neither, use the buffer */
843 Irp->UserBuffer = Buffer;
844 }
845
846 /* Check if this is a read */
847 if (MajorFunction == IRP_MJ_READ)
848 {
849 /* Set the parameters for a read */
850 StackPtr->Parameters.Read.Length = Length;
851 StackPtr->Parameters.Read.ByteOffset = *StartingOffset;
852 }
853 else if (MajorFunction == IRP_MJ_WRITE)
854 {
855 /* Otherwise, set write parameters */
856 StackPtr->Parameters.Write.Length = Length;
857 StackPtr->Parameters.Write.ByteOffset = *StartingOffset;
858 }
859 }
860
861 /* Set the Current Thread and IOSB */
862 Irp->UserIosb = IoStatusBlock;
863 Irp->Tail.Overlay.Thread = PsGetCurrentThread();
864
865 /* Return the IRP */
866 IOTRACE(IO_IRP_DEBUG,
867 "%s - Built IRP %p with Major, Buffer, DO %lx %p %p\n",
868 __FUNCTION__,
869 Irp,
870 MajorFunction,
871 Buffer,
872 DeviceObject);
873 return Irp;
874 }
875
876 /*
877 * @implemented
878 */
879 PIRP
880 NTAPI
881 IoBuildDeviceIoControlRequest(IN ULONG IoControlCode,
882 IN PDEVICE_OBJECT DeviceObject,
883 IN PVOID InputBuffer,
884 IN ULONG InputBufferLength,
885 IN PVOID OutputBuffer,
886 IN ULONG OutputBufferLength,
887 IN BOOLEAN InternalDeviceIoControl,
888 IN PKEVENT Event,
889 IN PIO_STATUS_BLOCK IoStatusBlock)
890 {
891 PIRP Irp;
892 PIO_STACK_LOCATION StackPtr;
893 ULONG BufferLength;
894
895 /* Allocate IRP */
896 Irp = IoAllocateIrp(DeviceObject->StackSize, FALSE);
897 if (!Irp) return NULL;
898
899 /* Get the Stack */
900 StackPtr = IoGetNextIrpStackLocation(Irp);
901
902 /* Set the DevCtl Type */
903 StackPtr->MajorFunction = InternalDeviceIoControl ?
904 IRP_MJ_INTERNAL_DEVICE_CONTROL :
905 IRP_MJ_DEVICE_CONTROL;
906
907 /* Set the IOCTL Data */
908 StackPtr->Parameters.DeviceIoControl.IoControlCode = IoControlCode;
909 StackPtr->Parameters.DeviceIoControl.InputBufferLength = InputBufferLength;
910 StackPtr->Parameters.DeviceIoControl.OutputBufferLength =
911 OutputBufferLength;
912
913 /* Handle the Methods */
914 switch (IO_METHOD_FROM_CTL_CODE(IoControlCode))
915 {
916 /* Buffered I/O */
917 case METHOD_BUFFERED:
918
919 /* Select the right Buffer Length */
920 BufferLength = InputBufferLength > OutputBufferLength ?
921 InputBufferLength : OutputBufferLength;
922
923 /* Make sure there is one */
924 if (BufferLength)
925 {
926 /* Allocate the System Buffer */
927 Irp->AssociatedIrp.SystemBuffer =
928 ExAllocatePoolWithTag(NonPagedPool,
929 BufferLength,
930 TAG_SYS_BUF);
931 if (!Irp->AssociatedIrp.SystemBuffer)
932 {
933 /* Free the IRP and fail */
934 IoFreeIrp(Irp);
935 return NULL;
936 }
937
938 /* Check if we got a buffer */
939 if (InputBuffer)
940 {
941 /* Copy into the System Buffer */
942 RtlCopyMemory(Irp->AssociatedIrp.SystemBuffer,
943 InputBuffer,
944 InputBufferLength);
945 }
946
947 /* Write the flags */
948 Irp->Flags = IRP_BUFFERED_IO | IRP_DEALLOCATE_BUFFER;
949 if (OutputBuffer) Irp->Flags |= IRP_INPUT_OPERATION;
950
951 /* Save the Buffer */
952 Irp->UserBuffer = OutputBuffer;
953 }
954 else
955 {
956 /* Clear the Flags and Buffer */
957 Irp->Flags = 0;
958 Irp->UserBuffer = NULL;
959 }
960 break;
961
962 /* Direct I/O */
963 case METHOD_IN_DIRECT:
964 case METHOD_OUT_DIRECT:
965
966 /* Check if we got an input buffer */
967 if (InputBuffer)
968 {
969 /* Allocate the System Buffer */
970 Irp->AssociatedIrp.SystemBuffer =
971 ExAllocatePoolWithTag(NonPagedPool,
972 InputBufferLength,
973 TAG_SYS_BUF);
974 if (!Irp->AssociatedIrp.SystemBuffer)
975 {
976 /* Free the IRP and fail */
977 IoFreeIrp(Irp);
978 return NULL;
979 }
980
981 /* Copy into the System Buffer */
982 RtlCopyMemory(Irp->AssociatedIrp.SystemBuffer,
983 InputBuffer,
984 InputBufferLength);
985
986 /* Write the flags */
987 Irp->Flags = IRP_BUFFERED_IO | IRP_DEALLOCATE_BUFFER;
988 }
989 else
990 {
991 /* Clear the flags */
992 Irp->Flags = 0;
993 }
994
995 /* Check if we got an output buffer */
996 if (OutputBuffer)
997 {
998 /* Allocate the System Buffer */
999 Irp->MdlAddress = IoAllocateMdl(OutputBuffer,
1000 OutputBufferLength,
1001 FALSE,
1002 FALSE,
1003 Irp);
1004 if (!Irp->MdlAddress)
1005 {
1006 /* Free the IRP and fail */
1007 IoFreeIrp(Irp);
1008 return NULL;
1009 }
1010
1011 /* Probe and Lock */
1012 _SEH2_TRY
1013 {
1014 /* Do the probe */
1015 MmProbeAndLockPages(Irp->MdlAddress,
1016 KernelMode,
1017 IO_METHOD_FROM_CTL_CODE(IoControlCode) ==
1018 METHOD_IN_DIRECT ?
1019 IoReadAccess : IoWriteAccess);
1020 }
1021 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
1022 {
1023 /* Free the MDL */
1024 IoFreeMdl(Irp->MdlAddress);
1025
1026 /* Free the input buffer and IRP */
1027 if (InputBuffer) ExFreePool(Irp->AssociatedIrp.SystemBuffer);
1028 IoFreeIrp(Irp);
1029
1030 /* Fail */
1031 _SEH2_YIELD(return NULL);
1032 }
1033 _SEH2_END;
1034 }
1035 break;
1036
1037 case METHOD_NEITHER:
1038
1039 /* Just save the Buffer */
1040 Irp->UserBuffer = OutputBuffer;
1041 StackPtr->Parameters.DeviceIoControl.Type3InputBuffer = InputBuffer;
1042 }
1043
1044 /* Now write the Event and IoSB */
1045 Irp->UserIosb = IoStatusBlock;
1046 Irp->UserEvent = Event;
1047
1048 /* Sync IRPs are queued to requestor thread's irp cancel/cleanup list */
1049 Irp->Tail.Overlay.Thread = PsGetCurrentThread();
1050 IoQueueThreadIrp(Irp);
1051
1052 /* Return the IRP */
1053 IOTRACE(IO_IRP_DEBUG,
1054 "%s - Built IRP %p with IOCTL, Buffers, DO %lx %p %p %p\n",
1055 __FUNCTION__,
1056 Irp,
1057 IoControlCode,
1058 InputBuffer,
1059 OutputBuffer,
1060 DeviceObject);
1061 return Irp;
1062 }
1063
1064 /*
1065 * @implemented
1066 */
1067 PIRP
1068 NTAPI
1069 IoBuildSynchronousFsdRequest(IN ULONG MajorFunction,
1070 IN PDEVICE_OBJECT DeviceObject,
1071 IN PVOID Buffer,
1072 IN ULONG Length,
1073 IN PLARGE_INTEGER StartingOffset,
1074 IN PKEVENT Event,
1075 IN PIO_STATUS_BLOCK IoStatusBlock)
1076 {
1077 PIRP Irp;
1078
1079 /* Do the big work to set up the IRP */
1080 Irp = IoBuildAsynchronousFsdRequest(MajorFunction,
1081 DeviceObject,
1082 Buffer,
1083 Length,
1084 StartingOffset,
1085 IoStatusBlock );
1086 if (!Irp) return NULL;
1087
1088 /* Set the Event which makes it Syncronous */
1089 Irp->UserEvent = Event;
1090
1091 /* Sync IRPs are queued to requestor thread's irp cancel/cleanup list */
1092 IoQueueThreadIrp(Irp);
1093 return Irp;
1094 }
1095
1096 /*
1097 * @implemented
1098 */
1099 BOOLEAN
1100 NTAPI
1101 IoCancelIrp(IN PIRP Irp)
1102 {
1103 KIRQL OldIrql;
1104 PDRIVER_CANCEL CancelRoutine;
1105 IOTRACE(IO_IRP_DEBUG,
1106 "%s - Canceling IRP %p\n",
1107 __FUNCTION__,
1108 Irp);
1109 ASSERT(Irp->Type == IO_TYPE_IRP);
1110
1111 /* Acquire the cancel lock and cancel the IRP */
1112 IoAcquireCancelSpinLock(&OldIrql);
1113 Irp->Cancel = TRUE;
1114
1115 /* Clear the cancel routine and get the old one */
1116 CancelRoutine = IoSetCancelRoutine(Irp, NULL);
1117 if (CancelRoutine)
1118 {
1119 /* We had a routine, make sure the IRP isn't completed */
1120 if (Irp->CurrentLocation > (Irp->StackCount + 1))
1121 {
1122 /* It is, bugcheck */
1123 KeBugCheckEx(CANCEL_STATE_IN_COMPLETED_IRP,
1124 (ULONG_PTR)Irp,
1125 (ULONG_PTR)CancelRoutine,
1126 0,
1127 0);
1128 }
1129
1130 /* Set the cancel IRQL And call the routine */
1131 Irp->CancelIrql = OldIrql;
1132 CancelRoutine(IoGetCurrentIrpStackLocation(Irp)->DeviceObject, Irp);
1133 return TRUE;
1134 }
1135
1136 /* Otherwise, release the cancel lock and fail */
1137 IoReleaseCancelSpinLock(OldIrql);
1138 return FALSE;
1139 }
1140
1141 /*
1142 * @implemented
1143 */
1144 VOID
1145 NTAPI
1146 IoCancelThreadIo(IN PETHREAD Thread)
1147 {
1148 KIRQL OldIrql;
1149 ULONG Retries = 3000;
1150 LARGE_INTEGER Interval;
1151 PLIST_ENTRY ListHead, NextEntry;
1152 PIRP Irp;
1153 PAGED_CODE();
1154
1155 /* Windows isn't using given thread, but using current. */
1156 Thread = PsGetCurrentThread();
1157
1158 IOTRACE(IO_IRP_DEBUG,
1159 "%s - Canceling IRPs for Thread %p\n",
1160 __FUNCTION__,
1161 Thread);
1162
1163 /* Raise to APC to protect the IrpList */
1164 KeRaiseIrql(APC_LEVEL, &OldIrql);
1165
1166 /* Start by cancelling all the IRPs in the current thread queue. */
1167 ListHead = &Thread->IrpList;
1168 NextEntry = ListHead->Flink;
1169 while (ListHead != NextEntry)
1170 {
1171 /* Get the IRP */
1172 Irp = CONTAINING_RECORD(NextEntry, IRP, ThreadListEntry);
1173
1174 /* Cancel it */
1175 IoCancelIrp(Irp);
1176
1177 /* Move to the next entry */
1178 NextEntry = NextEntry->Flink;
1179 }
1180
1181 /* Wait 100 milliseconds */
1182 Interval.QuadPart = -1000000;
1183
1184 /* Wait till all the IRPs are completed or cancelled. */
1185 while (!IsListEmpty(&Thread->IrpList))
1186 {
1187 /* Now we can lower */
1188 KeLowerIrql(OldIrql);
1189
1190 /* Wait a short while and then look if all our IRPs were completed. */
1191 KeDelayExecutionThread(KernelMode, FALSE, &Interval);
1192
1193 /*
1194 * Don't stay here forever if some broken driver doesn't complete
1195 * the IRP.
1196 */
1197 if (!(Retries--))
1198 {
1199 /* Print out a message and remove the IRP */
1200 DPRINT1("Broken driver did not complete!\n");
1201 IopDisassociateThreadIrp();
1202 }
1203
1204 /* Raise the IRQL Again */
1205 KeRaiseIrql(APC_LEVEL, &OldIrql);
1206 }
1207
1208 /* We're done, lower the IRQL */
1209 KeLowerIrql(OldIrql);
1210 }
1211
1212 /*
1213 * @implemented
1214 */
1215 #undef IoCallDriver
1216 NTSTATUS
1217 NTAPI
1218 IoCallDriver(IN PDEVICE_OBJECT DeviceObject,
1219 IN PIRP Irp)
1220 {
1221 /* Call fastcall */
1222 return IofCallDriver(DeviceObject, Irp);
1223 }
1224
1225 #define IoCallDriver IofCallDriver
1226
1227 /*
1228 * @implemented
1229 */
1230 #undef IoCompleteRequest
1231 VOID
1232 NTAPI
1233 IoCompleteRequest(IN PIRP Irp,
1234 IN CCHAR PriorityBoost)
1235 {
1236 /* Call the fastcall */
1237 IofCompleteRequest(Irp, PriorityBoost);
1238 }
1239
1240 #define IoCompleteRequest IofCompleteRequest
1241
1242 /*
1243 * @implemented
1244 */
1245 VOID
1246 NTAPI
1247 IoEnqueueIrp(IN PIRP Irp)
1248 {
1249 /* This is the same as calling IoQueueThreadIrp */
1250 IoQueueThreadIrp(Irp);
1251 }
1252
1253 /*
1254 * @implemented
1255 */
1256 NTSTATUS
1257 FASTCALL
1258 IofCallDriver(IN PDEVICE_OBJECT DeviceObject,
1259 IN PIRP Irp)
1260 {
1261 PDRIVER_OBJECT DriverObject;
1262 PIO_STACK_LOCATION StackPtr;
1263
1264 /* Make sure this is a valid IRP */
1265 ASSERT(Irp->Type == IO_TYPE_IRP);
1266
1267 /* Get the Driver Object */
1268 DriverObject = DeviceObject->DriverObject;
1269
1270 /* Decrease the current location and check if */
1271 Irp->CurrentLocation--;
1272 if (Irp->CurrentLocation <= 0)
1273 {
1274 /* This IRP ran out of stack, bugcheck */
1275 KeBugCheckEx(NO_MORE_IRP_STACK_LOCATIONS, (ULONG_PTR)Irp, 0, 0, 0);
1276 }
1277
1278 /* Now update the stack location */
1279 StackPtr = IoGetNextIrpStackLocation(Irp);
1280 Irp->Tail.Overlay.CurrentStackLocation = StackPtr;
1281
1282 /* Get the Device Object */
1283 StackPtr->DeviceObject = DeviceObject;
1284
1285 /* Call it */
1286 return DriverObject->MajorFunction[StackPtr->MajorFunction](DeviceObject,
1287 Irp);
1288 }
1289
1290 FORCEINLINE
1291 VOID
1292 IopClearStackLocation(IN PIO_STACK_LOCATION IoStackLocation)
1293 {
1294 IoStackLocation->MinorFunction = 0;
1295 IoStackLocation->Flags = 0;
1296 IoStackLocation->Control &= SL_ERROR_RETURNED;
1297 IoStackLocation->Parameters.Others.Argument1 = 0;
1298 IoStackLocation->Parameters.Others.Argument2 = 0;
1299 IoStackLocation->Parameters.Others.Argument3 = 0;
1300 IoStackLocation->FileObject = NULL;
1301 }
1302
1303 /*
1304 * @implemented
1305 */
1306 VOID
1307 FASTCALL
1308 IofCompleteRequest(IN PIRP Irp,
1309 IN CCHAR PriorityBoost)
1310 {
1311 PIO_STACK_LOCATION StackPtr, LastStackPtr;
1312 PDEVICE_OBJECT DeviceObject;
1313 PFILE_OBJECT FileObject;
1314 PETHREAD Thread;
1315 NTSTATUS Status;
1316 PMDL Mdl, NextMdl;
1317 ULONG MasterCount;
1318 PIRP MasterIrp;
1319 ULONG Flags;
1320 NTSTATUS ErrorCode = STATUS_SUCCESS;
1321 PREPARSE_DATA_BUFFER DataBuffer = NULL;
1322 IOTRACE(IO_IRP_DEBUG,
1323 "%s - Completing IRP %p\n",
1324 __FUNCTION__,
1325 Irp);
1326
1327 /* Make sure this IRP isn't getting completed twice or is invalid */
1328 if ((Irp->CurrentLocation) > (Irp->StackCount + 1))
1329 {
1330 /* Bugcheck */
1331 KeBugCheckEx(MULTIPLE_IRP_COMPLETE_REQUESTS, (ULONG_PTR)Irp, 0, 0, 0);
1332 }
1333
1334 /* Some sanity checks */
1335 ASSERT(Irp->Type == IO_TYPE_IRP);
1336 ASSERT(!Irp->CancelRoutine);
1337 ASSERT(Irp->IoStatus.Status != STATUS_PENDING);
1338 ASSERT(Irp->IoStatus.Status != (NTSTATUS)0xFFFFFFFF);
1339
1340 /* Get the last stack */
1341 LastStackPtr = (PIO_STACK_LOCATION)(Irp + 1);
1342 if (LastStackPtr->Control & SL_ERROR_RETURNED)
1343 {
1344 /* Get the error code */
1345 ErrorCode = PtrToUlong(LastStackPtr->Parameters.Others.Argument4);
1346 }
1347
1348 /*
1349 * Start the loop with the current stack and point the IRP to the next stack
1350 * and then keep incrementing the stack as we loop through. The IRP should
1351 * always point to the next stack location w.r.t the one currently being
1352 * analyzed, so completion routine code will see the appropriate value.
1353 * Because of this, we must loop until the current stack location is +1 of
1354 * the stack count, because when StackPtr is at the end, CurrentLocation is +1.
1355 */
1356 for (StackPtr = IoGetCurrentIrpStackLocation(Irp),
1357 Irp->CurrentLocation++,
1358 Irp->Tail.Overlay.CurrentStackLocation++;
1359 Irp->CurrentLocation <= (Irp->StackCount + 1);
1360 StackPtr++,
1361 Irp->CurrentLocation++,
1362 Irp->Tail.Overlay.CurrentStackLocation++)
1363 {
1364 /* Set Pending Returned */
1365 Irp->PendingReturned = StackPtr->Control & SL_PENDING_RETURNED;
1366
1367 /* Check if we failed */
1368 if (!NT_SUCCESS(Irp->IoStatus.Status))
1369 {
1370 /* Check if it was changed by a completion routine */
1371 if (Irp->IoStatus.Status != ErrorCode)
1372 {
1373 /* Update the error for the current stack */
1374 ErrorCode = Irp->IoStatus.Status;
1375 StackPtr->Control |= SL_ERROR_RETURNED;
1376 LastStackPtr->Parameters.Others.Argument4 = UlongToPtr(ErrorCode);
1377 LastStackPtr->Control |= SL_ERROR_RETURNED;
1378 }
1379 }
1380
1381 /* Check if there is a Completion Routine to Call */
1382 if ((NT_SUCCESS(Irp->IoStatus.Status) &&
1383 (StackPtr->Control & SL_INVOKE_ON_SUCCESS)) ||
1384 (!NT_SUCCESS(Irp->IoStatus.Status) &&
1385 (StackPtr->Control & SL_INVOKE_ON_ERROR)) ||
1386 (Irp->Cancel &&
1387 (StackPtr->Control & SL_INVOKE_ON_CANCEL)))
1388 {
1389 /* Clear the stack location */
1390 IopClearStackLocation(StackPtr);
1391
1392 /* Check for highest-level device completion routines */
1393 if (Irp->CurrentLocation == (Irp->StackCount + 1))
1394 {
1395 /* Clear the DO, since the current stack location is invalid */
1396 DeviceObject = NULL;
1397 }
1398 else
1399 {
1400 /* Otherwise, return the real one */
1401 DeviceObject = IoGetCurrentIrpStackLocation(Irp)->DeviceObject;
1402 }
1403
1404 /* Call the completion routine */
1405 Status = StackPtr->CompletionRoutine(DeviceObject,
1406 Irp,
1407 StackPtr->Context);
1408
1409 /* Don't touch the Packet in this case, since it might be gone! */
1410 if (Status == STATUS_MORE_PROCESSING_REQUIRED) return;
1411 }
1412 else
1413 {
1414 /* Otherwise, check if this is a completed IRP */
1415 if ((Irp->CurrentLocation <= Irp->StackCount) &&
1416 (Irp->PendingReturned))
1417 {
1418 /* Mark it as pending */
1419 IoMarkIrpPending(Irp);
1420 }
1421
1422 /* Clear the stack location */
1423 IopClearStackLocation(StackPtr);
1424 }
1425 }
1426
1427 /* Check if the IRP is an associated IRP */
1428 if (Irp->Flags & IRP_ASSOCIATED_IRP)
1429 {
1430 /* Get the master IRP and count */
1431 MasterIrp = Irp->AssociatedIrp.MasterIrp;
1432 MasterCount = InterlockedDecrement(&MasterIrp->AssociatedIrp.IrpCount);
1433
1434 /* Free the MDLs */
1435 for (Mdl = Irp->MdlAddress; Mdl; Mdl = NextMdl)
1436 {
1437 /* Go to the next one */
1438 NextMdl = Mdl->Next;
1439 IoFreeMdl(Mdl);
1440 }
1441
1442 /* Free the IRP itself */
1443 IoFreeIrp(Irp);
1444
1445 /* Complete the Master IRP */
1446 if (!MasterCount) IofCompleteRequest(MasterIrp, PriorityBoost);
1447 return;
1448 }
1449
1450 /* Check whether we have to reparse */
1451 if (Irp->IoStatus.Status == STATUS_REPARSE)
1452 {
1453 if (Irp->IoStatus.Information > IO_REMOUNT)
1454 {
1455 /* If that's a reparse tag we understand, save the buffer from deletion */
1456 if (Irp->IoStatus.Information == IO_REPARSE_TAG_MOUNT_POINT)
1457 {
1458 ASSERT(Irp->Tail.Overlay.AuxiliaryBuffer != NULL);
1459 DataBuffer = (PREPARSE_DATA_BUFFER)Irp->Tail.Overlay.AuxiliaryBuffer;
1460 Irp->Tail.Overlay.AuxiliaryBuffer = NULL;
1461 }
1462 else
1463 {
1464 Irp->IoStatus.Status = STATUS_IO_REPARSE_TAG_NOT_HANDLED;
1465 }
1466 }
1467 }
1468
1469 /* Check if we have an auxiliary buffer */
1470 if (Irp->Tail.Overlay.AuxiliaryBuffer)
1471 {
1472 /* Free it */
1473 ExFreePool(Irp->Tail.Overlay.AuxiliaryBuffer);
1474 Irp->Tail.Overlay.AuxiliaryBuffer = NULL;
1475 }
1476
1477 /* Check if this is a Paging I/O or Close Operation */
1478 if (Irp->Flags & (IRP_PAGING_IO | IRP_CLOSE_OPERATION))
1479 {
1480 /* Handle a Close Operation or Sync Paging I/O */
1481 if (Irp->Flags & (IRP_SYNCHRONOUS_PAGING_IO | IRP_CLOSE_OPERATION))
1482 {
1483 /* Set the I/O Status and Signal the Event */
1484 Flags = Irp->Flags & (IRP_SYNCHRONOUS_PAGING_IO | IRP_PAGING_IO);
1485 *Irp->UserIosb = Irp->IoStatus;
1486 KeSetEvent(Irp->UserEvent, PriorityBoost, FALSE);
1487
1488 /* Free the IRP for a Paging I/O Only, Close is handled by us */
1489 if (Flags)
1490 {
1491 /* If we were using the reserve IRP, then call the appropriate
1492 * free function (to make the IRP available again)
1493 */
1494 if (Irp == IopReserveIrpAllocator.ReserveIrp)
1495 {
1496 IopFreeReserveIrp(PriorityBoost);
1497 }
1498 /* Otherwise, free for real! */
1499 else
1500 {
1501 IoFreeIrp(Irp);
1502 }
1503 }
1504 }
1505 else
1506 {
1507 #if 0
1508 /* Page 166 */
1509 KeInitializeApc(&Irp->Tail.Apc
1510 &Irp->Tail.Overlay.Thread->Tcb,
1511 Irp->ApcEnvironment,
1512 IopCompletePageWrite,
1513 NULL,
1514 NULL,
1515 KernelMode,
1516 NULL);
1517 KeInsertQueueApc(&Irp->Tail.Apc,
1518 NULL,
1519 NULL,
1520 PriorityBoost);
1521 #else
1522 /* Not implemented yet. */
1523 UNIMPLEMENTED_DBGBREAK("Not supported!\n");
1524 #endif
1525 }
1526
1527 /* Get out of here */
1528 return;
1529 }
1530
1531 /* Unlock MDL Pages, page 167. */
1532 Mdl = Irp->MdlAddress;
1533 while (Mdl)
1534 {
1535 MmUnlockPages(Mdl);
1536 Mdl = Mdl->Next;
1537 }
1538
1539 /* Check if we should exit because of a Deferred I/O (page 168) */
1540 if ((Irp->Flags & IRP_DEFER_IO_COMPLETION) && !(Irp->PendingReturned))
1541 {
1542 /* Restore the saved reparse buffer for the caller */
1543 if (Irp->IoStatus.Status == STATUS_REPARSE &&
1544 Irp->IoStatus.Information == IO_REPARSE_TAG_MOUNT_POINT)
1545 {
1546 Irp->Tail.Overlay.AuxiliaryBuffer = (PCHAR)DataBuffer;
1547 }
1548
1549 /*
1550 * Return without queuing the completion APC, since the caller will
1551 * take care of doing its own optimized completion at PASSIVE_LEVEL.
1552 */
1553 return;
1554 }
1555
1556 /* Get the thread and file object */
1557 Thread = Irp->Tail.Overlay.Thread;
1558 FileObject = Irp->Tail.Overlay.OriginalFileObject;
1559
1560 /* Make sure the IRP isn't canceled */
1561 if (!Irp->Cancel)
1562 {
1563 /* Initialize the APC */
1564 KeInitializeApc(&Irp->Tail.Apc,
1565 &Thread->Tcb,
1566 Irp->ApcEnvironment,
1567 IopCompleteRequest,
1568 NULL,
1569 NULL,
1570 KernelMode,
1571 NULL);
1572
1573 /* Queue it */
1574 KeInsertQueueApc(&Irp->Tail.Apc,
1575 FileObject,
1576 DataBuffer,
1577 PriorityBoost);
1578 }
1579 else
1580 {
1581 /* The IRP just got canceled... does a thread still own it? */
1582 if (Thread)
1583 {
1584 /* Yes! There is still hope! Initialize the APC */
1585 KeInitializeApc(&Irp->Tail.Apc,
1586 &Thread->Tcb,
1587 Irp->ApcEnvironment,
1588 IopCompleteRequest,
1589 NULL,
1590 NULL,
1591 KernelMode,
1592 NULL);
1593
1594 /* Queue it */
1595 KeInsertQueueApc(&Irp->Tail.Apc,
1596 FileObject,
1597 DataBuffer,
1598 PriorityBoost);
1599 }
1600 else
1601 {
1602 /* Nothing left for us to do, kill it */
1603 ASSERT(Irp->Cancel);
1604 IopCleanupIrp(Irp, FileObject);
1605 }
1606 }
1607 }
1608
1609 NTSTATUS
1610 NTAPI
1611 IopSynchronousCompletion(IN PDEVICE_OBJECT DeviceObject,
1612 IN PIRP Irp,
1613 IN PVOID Context)
1614 {
1615 if (Irp->PendingReturned)
1616 KeSetEvent((PKEVENT)Context, IO_NO_INCREMENT, FALSE);
1617 return STATUS_MORE_PROCESSING_REQUIRED;
1618 }
1619
1620 /*
1621 * @implemented
1622 */
1623 BOOLEAN
1624 NTAPI
1625 IoForwardIrpSynchronously(IN PDEVICE_OBJECT DeviceObject,
1626 IN PIRP Irp)
1627 {
1628 KEVENT Event;
1629 NTSTATUS Status;
1630
1631 /* Check if next stack location is available */
1632 if (Irp->CurrentLocation > Irp->StackCount || Irp->CurrentLocation <= 1)
1633 {
1634 /* No more stack location */
1635 return FALSE;
1636 }
1637
1638 /* Initialize event */
1639 KeInitializeEvent(&Event, NotificationEvent, FALSE);
1640
1641 /* Copy stack location for next driver */
1642 IoCopyCurrentIrpStackLocationToNext(Irp);
1643
1644 /* Set a completion routine, which will signal the event */
1645 IoSetCompletionRoutine(Irp, IopSynchronousCompletion, &Event, TRUE, TRUE, TRUE);
1646
1647 /* Call next driver */
1648 Status = IoCallDriver(DeviceObject, Irp);
1649
1650 /* Check if irp is pending */
1651 if (Status == STATUS_PENDING)
1652 {
1653 /* Yes, wait for its completion */
1654 KeWaitForSingleObject(&Event, Suspended, KernelMode, FALSE, NULL);
1655 }
1656
1657 /* Return success */
1658 return TRUE;
1659 }
1660
1661 /*
1662 * @implemented
1663 */
1664 VOID
1665 NTAPI
1666 IoFreeIrp(IN PIRP Irp)
1667 {
1668 PNPAGED_LOOKASIDE_LIST List;
1669 PP_NPAGED_LOOKASIDE_NUMBER ListType = LookasideSmallIrpList;
1670 PKPRCB Prcb;
1671 IOTRACE(IO_IRP_DEBUG,
1672 "%s - Freeing IRPs %p\n",
1673 __FUNCTION__,
1674 Irp);
1675
1676 /* Make sure the Thread IRP list is empty and that it OK to free it */
1677 ASSERT(Irp->Type == IO_TYPE_IRP);
1678 ASSERT(IsListEmpty(&Irp->ThreadListEntry));
1679 ASSERT(Irp->CurrentLocation >= Irp->StackCount);
1680
1681 /* Get the PRCB */
1682 Prcb = KeGetCurrentPrcb();
1683
1684 /* If this was a lookaside alloc, increment lookaside float */
1685 if (Irp->AllocationFlags & IRP_LOOKASIDE_ALLOCATION)
1686 {
1687 Irp->AllocationFlags &= ~IRP_LOOKASIDE_ALLOCATION;
1688 InterlockedIncrement(&Prcb->LookasideIrpFloat);
1689 }
1690
1691 /* If this was a pool alloc, free it with the pool */
1692 if (!(Irp->AllocationFlags & IRP_ALLOCATED_FIXED_SIZE))
1693 {
1694 /* Free it */
1695 ExFreePoolWithTag(Irp, TAG_IRP);
1696 }
1697 else
1698 {
1699 /* Check if this was a Big IRP */
1700 if (Irp->StackCount != 1) ListType = LookasideLargeIrpList;
1701
1702 /* Use the P List */
1703 List = (PNPAGED_LOOKASIDE_LIST)Prcb->PPLookasideList[ListType].P;
1704 List->L.TotalFrees++;
1705
1706 /* Check if the Free was within the Depth or not */
1707 if (ExQueryDepthSList(&List->L.ListHead) >= List->L.Depth)
1708 {
1709 /* Let the balancer know */
1710 List->L.FreeMisses++;
1711
1712 /* Use the L List */
1713 List = (PNPAGED_LOOKASIDE_LIST)Prcb->PPLookasideList[ListType].L;
1714 List->L.TotalFrees++;
1715
1716 /* Check if the Free was within the Depth or not */
1717 if (ExQueryDepthSList(&List->L.ListHead) >= List->L.Depth)
1718 {
1719 /* All lists failed, use the pool */
1720 List->L.FreeMisses++;
1721 ExFreePoolWithTag(Irp, TAG_IRP);
1722 Irp = NULL;
1723 }
1724 }
1725
1726 /* The free was within the Depth */
1727 if (Irp)
1728 {
1729 /* Remove the association with the process */
1730 if (Irp->AllocationFlags & IRP_QUOTA_CHARGED)
1731 {
1732 ExReturnPoolQuota(Irp);
1733 Irp->AllocationFlags &= ~IRP_QUOTA_CHARGED;
1734 }
1735
1736 /* Add it to the lookaside list */
1737 InterlockedPushEntrySList(&List->L.ListHead,
1738 (PSLIST_ENTRY)Irp);
1739 }
1740 }
1741 }
1742
1743 /*
1744 * @implemented
1745 */
1746 IO_PAGING_PRIORITY
1747 FASTCALL
1748 IoGetPagingIoPriority(IN PIRP Irp)
1749 {
1750 IO_PAGING_PRIORITY Priority;
1751 ULONG Flags;
1752
1753 /* Get the flags */
1754 Flags = Irp->Flags;
1755
1756 /* Check what priority it has */
1757 if (Flags & IRP_CLASS_CACHE_OPERATION)
1758 {
1759 /* High priority */
1760 Priority = IoPagingPriorityHigh;
1761 }
1762 else if (Flags & IRP_PAGING_IO)
1763 {
1764 /* Normal priority */
1765 Priority = IoPagingPriorityNormal;
1766 }
1767 else
1768 {
1769 /* Invalid -- not a paging IRP */
1770 Priority = IoPagingPriorityInvalid;
1771 }
1772
1773 /* Return the priority */
1774 return Priority;
1775 }
1776
1777 /*
1778 * @implemented
1779 */
1780 PEPROCESS
1781 NTAPI
1782 IoGetRequestorProcess(IN PIRP Irp)
1783 {
1784 /* Return the requestor process */
1785 if (Irp->Tail.Overlay.Thread)
1786 {
1787 if (Irp->ApcEnvironment == OriginalApcEnvironment)
1788 {
1789 return Irp->Tail.Overlay.Thread->ThreadsProcess;
1790 }
1791 else if (Irp->ApcEnvironment == AttachedApcEnvironment)
1792 {
1793 return (PEPROCESS)Irp->Tail.Overlay.Thread->Tcb.ApcState.Process;
1794 }
1795 }
1796
1797 return NULL;
1798 }
1799
1800 /*
1801 * @implemented
1802 */
1803 ULONG
1804 NTAPI
1805 IoGetRequestorProcessId(IN PIRP Irp)
1806 {
1807 PEPROCESS Process;
1808
1809 /* Return the requestor process' id */
1810 Process = IoGetRequestorProcess(Irp);
1811 if (Process) return PtrToUlong(Process->UniqueProcessId);
1812
1813 return 0;
1814 }
1815
1816 /*
1817 * @implemented
1818 */
1819 NTSTATUS
1820 NTAPI
1821 IoGetRequestorSessionId(IN PIRP Irp,
1822 OUT PULONG pSessionId)
1823 {
1824 PEPROCESS Process;
1825
1826 /* Return the session */
1827 if (Irp->Tail.Overlay.Thread)
1828 {
1829 Process = Irp->Tail.Overlay.Thread->ThreadsProcess;
1830 *pSessionId = MmGetSessionId(Process);
1831 return STATUS_SUCCESS;
1832 }
1833
1834 *pSessionId = (ULONG)-1;
1835 return STATUS_UNSUCCESSFUL;
1836 }
1837
1838 /*
1839 * @implemented
1840 */
1841 PIRP
1842 NTAPI
1843 IoGetTopLevelIrp(VOID)
1844 {
1845 /* Return the IRP */
1846 return (PIRP)PsGetCurrentThread()->TopLevelIrp;
1847 }
1848
1849 /*
1850 * @implemented
1851 */
1852 VOID
1853 NTAPI
1854 IoInitializeIrp(IN PIRP Irp,
1855 IN USHORT PacketSize,
1856 IN CCHAR StackSize)
1857 {
1858 /* Clear it */
1859 IOTRACE(IO_IRP_DEBUG,
1860 "%s - Initializing IRP %p\n",
1861 __FUNCTION__,
1862 Irp);
1863 RtlZeroMemory(Irp, PacketSize);
1864
1865 /* Set the Header and other data */
1866 Irp->Type = IO_TYPE_IRP;
1867 Irp->Size = PacketSize;
1868 Irp->StackCount = StackSize;
1869 Irp->CurrentLocation = StackSize + 1;
1870 Irp->ApcEnvironment = KeGetCurrentThread()->ApcStateIndex;
1871 Irp->Tail.Overlay.CurrentStackLocation = (PIO_STACK_LOCATION)(Irp + 1) + StackSize;
1872
1873 /* Initialize the Thread List */
1874 InitializeListHead(&Irp->ThreadListEntry);
1875 }
1876
1877 /*
1878 * @implemented
1879 */
1880 BOOLEAN
1881 NTAPI
1882 IoIsOperationSynchronous(IN PIRP Irp)
1883 {
1884 BOOLEAN SynchIO;
1885 BOOLEAN ForceAsync;
1886
1887 /* If the IRP requests synchronous paging I/O, if the file object was opened
1888 for synchronous I/O, if the IRP_SYNCHRONOUS_API flag is set in the IRP
1889 the operation is synchronous */
1890 SynchIO = (IoGetCurrentIrpStackLocation(Irp)->FileObject->Flags & FO_SYNCHRONOUS_IO) ||
1891 (Irp->Flags & IRP_SYNCHRONOUS_API) || (Irp->Flags & IRP_SYNCHRONOUS_PAGING_IO);
1892
1893 /* If the IRP requests asynchronous paging I/O, the operation is asynchronous,
1894 even if one of the above conditions is true */
1895 ForceAsync = (Irp->Flags & IRP_PAGING_IO) && !(Irp->Flags & IRP_SYNCHRONOUS_PAGING_IO);
1896
1897 /* Check the flags */
1898 if (SynchIO && !ForceAsync)
1899 {
1900 /* Synch API or Paging I/O is OK, as is Sync File I/O */
1901 return TRUE;
1902 }
1903
1904 /* Otherwise, it is an asynchronous operation. */
1905 return FALSE;
1906 }
1907
1908 /*
1909 * @unimplemented
1910 */
1911 BOOLEAN
1912 NTAPI
1913 IoIsValidNameGraftingBuffer(IN PIRP Irp,
1914 IN PREPARSE_DATA_BUFFER ReparseBuffer)
1915 {
1916 UNIMPLEMENTED;
1917 return FALSE;
1918 }
1919
1920 /*
1921 * @implemented
1922 */
1923 PIRP
1924 NTAPI
1925 IoMakeAssociatedIrp(IN PIRP Irp,
1926 IN CCHAR StackSize)
1927 {
1928 PIRP AssocIrp;
1929 IOTRACE(IO_IRP_DEBUG,
1930 "%s - Associating IRP %p\n",
1931 __FUNCTION__,
1932 Irp);
1933
1934 /* Allocate the IRP */
1935 AssocIrp = IoAllocateIrp(StackSize, FALSE);
1936 if (!AssocIrp) return NULL;
1937
1938 /* Set the Flags */
1939 AssocIrp->Flags |= IRP_ASSOCIATED_IRP;
1940
1941 /* Set the Thread */
1942 AssocIrp->Tail.Overlay.Thread = Irp->Tail.Overlay.Thread;
1943
1944 /* Associate them */
1945 AssocIrp->AssociatedIrp.MasterIrp = Irp;
1946 return AssocIrp;
1947 }
1948
1949 /*
1950 * @implemented
1951 */
1952 VOID
1953 NTAPI
1954 IoQueueThreadIrp(IN PIRP Irp)
1955 {
1956 IOTRACE(IO_IRP_DEBUG,
1957 "%s - Queueing IRP %p\n",
1958 __FUNCTION__,
1959 Irp);
1960
1961 /* Use our inlined routine */
1962 IopQueueIrpToThread(Irp);
1963 }
1964
1965 /*
1966 * @implemented
1967 * Reference: Chris Cant's "Writing WDM Device Drivers"
1968 */
1969 VOID
1970 NTAPI
1971 IoReuseIrp(IN OUT PIRP Irp,
1972 IN NTSTATUS Status)
1973 {
1974 UCHAR AllocationFlags;
1975 IOTRACE(IO_IRP_DEBUG,
1976 "%s - Reusing IRP %p\n",
1977 __FUNCTION__,
1978 Irp);
1979
1980 /* Make sure it's OK to reuse it */
1981 ASSERT(!Irp->CancelRoutine);
1982 ASSERT(IsListEmpty(&Irp->ThreadListEntry));
1983
1984 /* Get the old flags */
1985 AllocationFlags = Irp->AllocationFlags;
1986
1987 /* Reinitialize the IRP */
1988 IoInitializeIrp(Irp, Irp->Size, Irp->StackCount);
1989
1990 /* Duplicate the data */
1991 Irp->IoStatus.Status = Status;
1992 Irp->AllocationFlags = AllocationFlags;
1993 }
1994
1995 /*
1996 * @implemented
1997 */
1998 VOID
1999 NTAPI
2000 IoSetTopLevelIrp(IN PIRP Irp)
2001 {
2002 /* Set the IRP */
2003 PsGetCurrentThread()->TopLevelIrp = (ULONG_PTR)Irp;
2004 }
2005
2006 #if defined (_WIN64)
2007 BOOLEAN
2008 NTAPI
2009 IoIs32bitProcess(
2010 IN PIRP Irp OPTIONAL)
2011 {
2012 UNIMPLEMENTED;
2013 return FALSE;
2014 }
2015 #endif