Sync up with trunk r61578.
[reactos.git] / drivers / network / afd / afd / lock.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: drivers/net/afd/afd/lock.c
5 * PURPOSE: Ancillary functions driver
6 * PROGRAMMER: Art Yerkes (ayerkes@speakeasy.net)
7 * UPDATE HISTORY:
8 * 20040708 Created
9 */
10 #include "afd.h"
11
12 PVOID GetLockedData(PIRP Irp, PIO_STACK_LOCATION IrpSp)
13 {
14 ASSERT(Irp->MdlAddress);
15 ASSERT(Irp->Tail.Overlay.DriverContext[0]);
16
17 UNREFERENCED_PARAMETER(IrpSp);
18
19 return Irp->Tail.Overlay.DriverContext[0];
20 }
21
22 /* Lock a method_neither request so it'll be available from DISPATCH_LEVEL */
23 PVOID LockRequest( PIRP Irp,
24 PIO_STACK_LOCATION IrpSp,
25 BOOLEAN Output,
26 KPROCESSOR_MODE *LockMode) {
27 BOOLEAN LockFailed = FALSE;
28
29 ASSERT(!Irp->MdlAddress);
30
31 switch (IrpSp->MajorFunction)
32 {
33 case IRP_MJ_DEVICE_CONTROL:
34 case IRP_MJ_INTERNAL_DEVICE_CONTROL:
35 ASSERT(IrpSp->Parameters.DeviceIoControl.Type3InputBuffer);
36 ASSERT(IrpSp->Parameters.DeviceIoControl.InputBufferLength);
37
38
39 Irp->MdlAddress =
40 IoAllocateMdl( IrpSp->Parameters.DeviceIoControl.Type3InputBuffer,
41 IrpSp->Parameters.DeviceIoControl.InputBufferLength,
42 FALSE,
43 FALSE,
44 NULL );
45 if( Irp->MdlAddress ) {
46 _SEH2_TRY {
47 MmProbeAndLockPages( Irp->MdlAddress, Irp->RequestorMode, IoModifyAccess );
48 } _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER) {
49 LockFailed = TRUE;
50 } _SEH2_END;
51
52 if( LockFailed ) {
53 AFD_DbgPrint(MIN_TRACE,("Failed to lock pages\n"));
54 IoFreeMdl( Irp->MdlAddress );
55 Irp->MdlAddress = NULL;
56 return NULL;
57 }
58
59 /* The mapped address goes in index 1 */
60 Irp->Tail.Overlay.DriverContext[1] = MmGetSystemAddressForMdlSafe(Irp->MdlAddress, NormalPagePriority);
61 if (!Irp->Tail.Overlay.DriverContext[1])
62 {
63 AFD_DbgPrint(MIN_TRACE,("Failed to get mapped address\n"));
64 MmUnlockPages(Irp->MdlAddress);
65 IoFreeMdl( Irp->MdlAddress );
66 Irp->MdlAddress = NULL;
67 return NULL;
68 }
69
70 /* The allocated address goes in index 0 */
71 Irp->Tail.Overlay.DriverContext[0] = ExAllocatePool(NonPagedPool, MmGetMdlByteCount(Irp->MdlAddress));
72 if (!Irp->Tail.Overlay.DriverContext[0])
73 {
74 AFD_DbgPrint(MIN_TRACE,("Failed to allocate memory\n"));
75 MmUnlockPages(Irp->MdlAddress);
76 IoFreeMdl( Irp->MdlAddress );
77 Irp->MdlAddress = NULL;
78 return NULL;
79 }
80
81 RtlCopyMemory(Irp->Tail.Overlay.DriverContext[0],
82 Irp->Tail.Overlay.DriverContext[1],
83 MmGetMdlByteCount(Irp->MdlAddress));
84
85 /* If we don't want a copy back, we zero the mapped address pointer */
86 if (!Output)
87 {
88 Irp->Tail.Overlay.DriverContext[1] = NULL;
89 }
90
91 /* We're using a user-mode buffer directly */
92 if (LockMode != NULL)
93 {
94 *LockMode = UserMode;
95 }
96 }
97 else return NULL;
98 break;
99
100 case IRP_MJ_READ:
101 case IRP_MJ_WRITE:
102 ASSERT(Irp->UserBuffer);
103
104 Irp->MdlAddress =
105 IoAllocateMdl(Irp->UserBuffer,
106 (IrpSp->MajorFunction == IRP_MJ_READ) ?
107 IrpSp->Parameters.Read.Length : IrpSp->Parameters.Write.Length,
108 FALSE,
109 FALSE,
110 NULL );
111 if( Irp->MdlAddress ) {
112 PAFD_RECV_INFO AfdInfo;
113
114 _SEH2_TRY {
115 MmProbeAndLockPages( Irp->MdlAddress, Irp->RequestorMode, IoModifyAccess );
116 } _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER) {
117 LockFailed = TRUE;
118 } _SEH2_END;
119
120 if( LockFailed ) {
121 AFD_DbgPrint(MIN_TRACE,("Failed to lock pages\n"));
122 IoFreeMdl( Irp->MdlAddress );
123 Irp->MdlAddress = NULL;
124 return NULL;
125 }
126
127 /* We need to create the info struct that AFD expects for all send/recv requests */
128 AfdInfo = ExAllocatePool(NonPagedPool, sizeof(AFD_RECV_INFO) + sizeof(AFD_WSABUF));
129 if (!AfdInfo)
130 {
131 AFD_DbgPrint(MIN_TRACE,("Failed to allocate memory\n"));
132 MmUnlockPages(Irp->MdlAddress);
133 IoFreeMdl( Irp->MdlAddress );
134 Irp->MdlAddress = NULL;
135 return NULL;
136 }
137
138 /* We'll append the buffer array to this struct */
139 AfdInfo->BufferArray = (PAFD_WSABUF)(AfdInfo + 1);
140 AfdInfo->BufferCount = 1;
141
142 /* Setup the default flags values */
143 AfdInfo->AfdFlags = 0;
144 AfdInfo->TdiFlags = 0;
145
146 /* Now build the buffer array */
147 AfdInfo->BufferArray[0].buf = MmGetSystemAddressForMdl(Irp->MdlAddress);
148 AfdInfo->BufferArray[0].len = MmGetMdlByteCount(Irp->MdlAddress);
149
150 /* Store the struct where AFD expects */
151 Irp->Tail.Overlay.DriverContext[0] = AfdInfo;
152
153 /* Don't copy anything out */
154 Irp->Tail.Overlay.DriverContext[1] = NULL;
155
156 /* We're using a placeholder buffer that we allocated */
157 if (LockMode != NULL)
158 {
159 *LockMode = KernelMode;
160 }
161 }
162 else return NULL;
163 break;
164
165 default:
166 ASSERT(FALSE);
167 return NULL;
168 }
169
170 return GetLockedData(Irp, IrpSp);
171 }
172
173 VOID UnlockRequest( PIRP Irp, PIO_STACK_LOCATION IrpSp )
174 {
175 ASSERT(Irp->MdlAddress);
176 ASSERT(Irp->Tail.Overlay.DriverContext[0]);
177
178 UNREFERENCED_PARAMETER(IrpSp);
179
180 /* Check if we need to copy stuff back */
181 if (Irp->Tail.Overlay.DriverContext[1] != NULL)
182 {
183 RtlCopyMemory(Irp->Tail.Overlay.DriverContext[1],
184 Irp->Tail.Overlay.DriverContext[0],
185 MmGetMdlByteCount(Irp->MdlAddress));
186 }
187
188 ExFreePool(Irp->Tail.Overlay.DriverContext[0]);
189 MmUnlockPages( Irp->MdlAddress );
190 IoFreeMdl( Irp->MdlAddress );
191 Irp->MdlAddress = NULL;
192 }
193
194 /* Note: We add an extra buffer if LockAddress is true. This allows us to
195 * treat the address buffer as an ordinary client buffer. It's only used
196 * for datagrams. */
197
198 PAFD_WSABUF LockBuffers( PAFD_WSABUF Buf, UINT Count,
199 PVOID AddressBuf, PINT AddressLen,
200 BOOLEAN Write, BOOLEAN LockAddress,
201 KPROCESSOR_MODE LockMode) {
202 UINT i;
203 /* Copy the buffer array so we don't lose it */
204 UINT Lock = LockAddress ? 2 : 0;
205 UINT Size = (sizeof(AFD_WSABUF) + sizeof(AFD_MAPBUF)) * (Count + Lock);
206 PAFD_WSABUF NewBuf = ExAllocatePool( PagedPool, Size );
207 BOOLEAN LockFailed = FALSE;
208 PAFD_MAPBUF MapBuf;
209
210 AFD_DbgPrint(MID_TRACE,("Called(%p)\n", NewBuf));
211
212 if( NewBuf ) {
213 RtlZeroMemory(NewBuf, Size);
214
215 MapBuf = (PAFD_MAPBUF)(NewBuf + Count + Lock);
216
217 _SEH2_TRY {
218 RtlCopyMemory( NewBuf, Buf, sizeof(AFD_WSABUF) * Count );
219 if( LockAddress ) {
220 if (AddressBuf && AddressLen) {
221 NewBuf[Count].buf = AddressBuf;
222 NewBuf[Count].len = *AddressLen;
223 NewBuf[Count + 1].buf = (PVOID)AddressLen;
224 NewBuf[Count + 1].len = sizeof(*AddressLen);
225 }
226 Count += 2;
227 }
228 } _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER) {
229 AFD_DbgPrint(MIN_TRACE,("Access violation copying buffer info "
230 "from userland (%p %p)\n",
231 Buf, AddressLen));
232 ExFreePool( NewBuf );
233 _SEH2_YIELD(return NULL);
234 } _SEH2_END;
235
236 for( i = 0; i < Count; i++ ) {
237 AFD_DbgPrint(MID_TRACE,("Locking buffer %u (%p:%u)\n",
238 i, NewBuf[i].buf, NewBuf[i].len));
239
240 if( NewBuf[i].buf && NewBuf[i].len ) {
241 MapBuf[i].Mdl = IoAllocateMdl( NewBuf[i].buf,
242 NewBuf[i].len,
243 FALSE,
244 FALSE,
245 NULL );
246 } else {
247 MapBuf[i].Mdl = NULL;
248 continue;
249 }
250
251 AFD_DbgPrint(MID_TRACE,("NewMdl @ %p\n", MapBuf[i].Mdl));
252
253 if( MapBuf[i].Mdl ) {
254 AFD_DbgPrint(MID_TRACE,("Probe and lock pages\n"));
255 _SEH2_TRY {
256 MmProbeAndLockPages( MapBuf[i].Mdl, LockMode,
257 Write ? IoModifyAccess : IoReadAccess );
258 } _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER) {
259 LockFailed = TRUE;
260 } _SEH2_END;
261 AFD_DbgPrint(MID_TRACE,("MmProbeAndLock finished\n"));
262
263 if( LockFailed ) {
264 AFD_DbgPrint(MIN_TRACE,("Failed to lock pages\n"));
265 IoFreeMdl( MapBuf[i].Mdl );
266 MapBuf[i].Mdl = NULL;
267 ExFreePool( NewBuf );
268 return NULL;
269 }
270 } else {
271 ExFreePool( NewBuf );
272 return NULL;
273 }
274 }
275 }
276
277 AFD_DbgPrint(MID_TRACE,("Leaving %p\n", NewBuf));
278
279 return NewBuf;
280 }
281
282 VOID UnlockBuffers( PAFD_WSABUF Buf, UINT Count, BOOL Address ) {
283 UINT Lock = Address ? 2 : 0;
284 PAFD_MAPBUF Map = (PAFD_MAPBUF)(Buf + Count + Lock);
285 UINT i;
286
287 if( !Buf ) return;
288
289 for( i = 0; i < Count + Lock; i++ ) {
290 if( Map[i].Mdl ) {
291 MmUnlockPages( Map[i].Mdl );
292 IoFreeMdl( Map[i].Mdl );
293 Map[i].Mdl = NULL;
294 }
295 }
296
297 ExFreePool( Buf );
298 Buf = NULL;
299 }
300
301 /* Produce a kernel-land handle array with handles replaced by object
302 * pointers. This will allow the system to do proper alerting */
303 PAFD_HANDLE LockHandles( PAFD_HANDLE HandleArray, UINT HandleCount ) {
304 UINT i;
305 NTSTATUS Status = STATUS_SUCCESS;
306
307 PAFD_HANDLE FileObjects = ExAllocatePool
308 ( NonPagedPool, HandleCount * sizeof(AFD_HANDLE) );
309
310 for( i = 0; FileObjects && i < HandleCount; i++ ) {
311 FileObjects[i].Status = 0;
312 FileObjects[i].Events = HandleArray[i].Events;
313 FileObjects[i].Handle = 0;
314 if( !HandleArray[i].Handle ) continue;
315 if( NT_SUCCESS(Status) ) {
316 Status = ObReferenceObjectByHandle
317 ( (PVOID)HandleArray[i].Handle,
318 FILE_ALL_ACCESS,
319 NULL,
320 KernelMode,
321 (PVOID*)&FileObjects[i].Handle,
322 NULL );
323 }
324
325 if( !NT_SUCCESS(Status) )
326 {
327 AFD_DbgPrint(MIN_TRACE,("Failed to reference handles (0x%x)\n", Status));
328 FileObjects[i].Handle = 0;
329 }
330 }
331
332 if( !NT_SUCCESS(Status) ) {
333 UnlockHandles( FileObjects, HandleCount );
334 return NULL;
335 }
336
337 return FileObjects;
338 }
339
340 VOID UnlockHandles( PAFD_HANDLE HandleArray, UINT HandleCount ) {
341 UINT i;
342
343 for( i = 0; i < HandleCount; i++ ) {
344 if( HandleArray[i].Handle )
345 ObDereferenceObject( (PVOID)HandleArray[i].Handle );
346 }
347
348 ExFreePool( HandleArray );
349 HandleArray = NULL;
350 }
351
352 BOOLEAN SocketAcquireStateLock( PAFD_FCB FCB ) {
353 if( !FCB ) return FALSE;
354
355 return !KeWaitForMutexObject(&FCB->Mutex,
356 Executive,
357 KernelMode,
358 FALSE,
359 NULL);
360 }
361
362 VOID SocketStateUnlock( PAFD_FCB FCB ) {
363 KeReleaseMutex(&FCB->Mutex, FALSE);
364 }
365
366 NTSTATUS NTAPI UnlockAndMaybeComplete
367 ( PAFD_FCB FCB, NTSTATUS Status, PIRP Irp,
368 UINT Information ) {
369 Irp->IoStatus.Status = Status;
370 Irp->IoStatus.Information = Information;
371 if ( Irp->MdlAddress ) UnlockRequest( Irp, IoGetCurrentIrpStackLocation( Irp ) );
372 (void)IoSetCancelRoutine(Irp, NULL);
373 SocketStateUnlock( FCB );
374 IoCompleteRequest( Irp, IO_NETWORK_INCREMENT );
375 return Status;
376 }
377
378
379 NTSTATUS LostSocket( PIRP Irp ) {
380 NTSTATUS Status = STATUS_FILE_CLOSED;
381 AFD_DbgPrint(MIN_TRACE,("Called.\n"));
382 Irp->IoStatus.Information = 0;
383 Irp->IoStatus.Status = Status;
384 if ( Irp->MdlAddress ) UnlockRequest( Irp, IoGetCurrentIrpStackLocation( Irp ) );
385 IoCompleteRequest( Irp, IO_NO_INCREMENT );
386 return Status;
387 }
388
389 NTSTATUS QueueUserModeIrp(PAFD_FCB FCB, PIRP Irp, UINT Function)
390 {
391 NTSTATUS Status;
392
393 /* Add the IRP to the queue in all cases (so AfdCancelHandler will work properly) */
394 InsertTailList( &FCB->PendingIrpList[Function],
395 &Irp->Tail.Overlay.ListEntry );
396
397 /* Acquire the cancel spin lock and check the cancel bit */
398 IoAcquireCancelSpinLock(&Irp->CancelIrql);
399 if (!Irp->Cancel)
400 {
401 /* We are not cancelled; we're good to go so
402 * set the cancel routine, release the cancel spin lock,
403 * mark the IRP as pending, and
404 * return STATUS_PENDING to the caller
405 */
406 (void)IoSetCancelRoutine(Irp, AfdCancelHandler);
407 IoReleaseCancelSpinLock(Irp->CancelIrql);
408 IoMarkIrpPending(Irp);
409 Status = STATUS_PENDING;
410 }
411 else
412 {
413 /* We were already cancelled before we were able to register our cancel routine
414 * so we are to call the cancel routine ourselves right here to cancel the IRP
415 * (which handles all the stuff we do above) and return STATUS_CANCELLED to the caller
416 */
417 AfdCancelHandler(IoGetCurrentIrpStackLocation(Irp)->DeviceObject,
418 Irp);
419 Status = STATUS_CANCELLED;
420 }
421
422 return Status;
423 }
424
425 NTSTATUS LeaveIrpUntilLater( PAFD_FCB FCB, PIRP Irp, UINT Function ) {
426 NTSTATUS Status;
427
428 Status = QueueUserModeIrp(FCB, Irp, Function);
429
430 SocketStateUnlock( FCB );
431
432 return Status;
433 }