Synchronize with trunk's revision r57629.
[reactos.git] / drivers / network / afd / afd / lock.c
1 /* $Id$
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: drivers/net/afd/afd/lock.c
5 * PURPOSE: Ancillary functions driver
6 * PROGRAMMER: Art Yerkes (ayerkes@speakeasy.net)
7 * UPDATE HISTORY:
8 * 20040708 Created
9 */
10 #include "afd.h"
11
12 PVOID GetLockedData(PIRP Irp, PIO_STACK_LOCATION IrpSp)
13 {
14 ASSERT(Irp->MdlAddress);
15 ASSERT(Irp->Tail.Overlay.DriverContext[0]);
16
17 return Irp->Tail.Overlay.DriverContext[0];
18 }
19
20 /* Lock a method_neither request so it'll be available from DISPATCH_LEVEL */
21 PVOID LockRequest( PIRP Irp,
22 PIO_STACK_LOCATION IrpSp,
23 BOOLEAN Output,
24 KPROCESSOR_MODE *LockMode) {
25 BOOLEAN LockFailed = FALSE;
26
27 ASSERT(!Irp->MdlAddress);
28
29 switch (IrpSp->MajorFunction)
30 {
31 case IRP_MJ_DEVICE_CONTROL:
32 case IRP_MJ_INTERNAL_DEVICE_CONTROL:
33 ASSERT(IrpSp->Parameters.DeviceIoControl.Type3InputBuffer);
34 ASSERT(IrpSp->Parameters.DeviceIoControl.InputBufferLength);
35
36
37 Irp->MdlAddress =
38 IoAllocateMdl( IrpSp->Parameters.DeviceIoControl.Type3InputBuffer,
39 IrpSp->Parameters.DeviceIoControl.InputBufferLength,
40 FALSE,
41 FALSE,
42 NULL );
43 if( Irp->MdlAddress ) {
44 _SEH2_TRY {
45 MmProbeAndLockPages( Irp->MdlAddress, Irp->RequestorMode, IoModifyAccess );
46 } _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER) {
47 LockFailed = TRUE;
48 } _SEH2_END;
49
50 if( LockFailed ) {
51 AFD_DbgPrint(MIN_TRACE,("Failed to lock pages\n"));
52 IoFreeMdl( Irp->MdlAddress );
53 Irp->MdlAddress = NULL;
54 return NULL;
55 }
56
57 /* The mapped address goes in index 1 */
58 Irp->Tail.Overlay.DriverContext[1] = MmGetSystemAddressForMdlSafe(Irp->MdlAddress, NormalPagePriority);
59 if (!Irp->Tail.Overlay.DriverContext[1])
60 {
61 AFD_DbgPrint(MIN_TRACE,("Failed to get mapped address\n"));
62 MmUnlockPages(Irp->MdlAddress);
63 IoFreeMdl( Irp->MdlAddress );
64 Irp->MdlAddress = NULL;
65 return NULL;
66 }
67
68 /* The allocated address goes in index 0 */
69 Irp->Tail.Overlay.DriverContext[0] = ExAllocatePool(NonPagedPool, MmGetMdlByteCount(Irp->MdlAddress));
70 if (!Irp->Tail.Overlay.DriverContext[0])
71 {
72 AFD_DbgPrint(MIN_TRACE,("Failed to allocate memory\n"));
73 MmUnlockPages(Irp->MdlAddress);
74 IoFreeMdl( Irp->MdlAddress );
75 Irp->MdlAddress = NULL;
76 return NULL;
77 }
78
79 RtlCopyMemory(Irp->Tail.Overlay.DriverContext[0],
80 Irp->Tail.Overlay.DriverContext[1],
81 MmGetMdlByteCount(Irp->MdlAddress));
82
83 /* If we don't want a copy back, we zero the mapped address pointer */
84 if (!Output)
85 {
86 Irp->Tail.Overlay.DriverContext[1] = NULL;
87 }
88
89 /* We're using a user-mode buffer directly */
90 if (LockMode != NULL)
91 {
92 *LockMode = UserMode;
93 }
94 }
95 else return NULL;
96 break;
97
98 case IRP_MJ_READ:
99 case IRP_MJ_WRITE:
100 ASSERT(Irp->UserBuffer);
101
102 Irp->MdlAddress =
103 IoAllocateMdl(Irp->UserBuffer,
104 (IrpSp->MajorFunction == IRP_MJ_READ) ?
105 IrpSp->Parameters.Read.Length : IrpSp->Parameters.Write.Length,
106 FALSE,
107 FALSE,
108 NULL );
109 if( Irp->MdlAddress ) {
110 PAFD_RECV_INFO AfdInfo;
111
112 _SEH2_TRY {
113 MmProbeAndLockPages( Irp->MdlAddress, Irp->RequestorMode, IoModifyAccess );
114 } _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER) {
115 LockFailed = TRUE;
116 } _SEH2_END;
117
118 if( LockFailed ) {
119 AFD_DbgPrint(MIN_TRACE,("Failed to lock pages\n"));
120 IoFreeMdl( Irp->MdlAddress );
121 Irp->MdlAddress = NULL;
122 return NULL;
123 }
124
125 /* We need to create the info struct that AFD expects for all send/recv requests */
126 AfdInfo = ExAllocatePool(NonPagedPool, sizeof(AFD_RECV_INFO) + sizeof(AFD_WSABUF));
127 if (!AfdInfo)
128 {
129 AFD_DbgPrint(MIN_TRACE,("Failed to allocate memory\n"));
130 MmUnlockPages(Irp->MdlAddress);
131 IoFreeMdl( Irp->MdlAddress );
132 Irp->MdlAddress = NULL;
133 return NULL;
134 }
135
136 /* We'll append the buffer array to this struct */
137 AfdInfo->BufferArray = (PAFD_WSABUF)(AfdInfo + 1);
138 AfdInfo->BufferCount = 1;
139
140 /* Setup the default flags values */
141 AfdInfo->AfdFlags = 0;
142 AfdInfo->TdiFlags = 0;
143
144 /* Now build the buffer array */
145 AfdInfo->BufferArray[0].buf = MmGetSystemAddressForMdl(Irp->MdlAddress);
146 AfdInfo->BufferArray[0].len = MmGetMdlByteCount(Irp->MdlAddress);
147
148 /* Store the struct where AFD expects */
149 Irp->Tail.Overlay.DriverContext[0] = AfdInfo;
150
151 /* Don't copy anything out */
152 Irp->Tail.Overlay.DriverContext[1] = NULL;
153
154 /* We're using a placeholder buffer that we allocated */
155 if (LockMode != NULL)
156 {
157 *LockMode = KernelMode;
158 }
159 }
160 else return NULL;
161 break;
162
163 default:
164 ASSERT(FALSE);
165 return NULL;
166 }
167
168 return GetLockedData(Irp, IrpSp);
169 }
170
171 VOID UnlockRequest( PIRP Irp, PIO_STACK_LOCATION IrpSp )
172 {
173 ASSERT(Irp->MdlAddress);
174 ASSERT(Irp->Tail.Overlay.DriverContext[0]);
175
176 /* Check if we need to copy stuff back */
177 if (Irp->Tail.Overlay.DriverContext[1] != NULL)
178 {
179 RtlCopyMemory(Irp->Tail.Overlay.DriverContext[1],
180 Irp->Tail.Overlay.DriverContext[0],
181 MmGetMdlByteCount(Irp->MdlAddress));
182 }
183
184 ExFreePool(Irp->Tail.Overlay.DriverContext[0]);
185 MmUnlockPages( Irp->MdlAddress );
186 IoFreeMdl( Irp->MdlAddress );
187 Irp->MdlAddress = NULL;
188 }
189
190 /* Note: We add an extra buffer if LockAddress is true. This allows us to
191 * treat the address buffer as an ordinary client buffer. It's only used
192 * for datagrams. */
193
194 PAFD_WSABUF LockBuffers( PAFD_WSABUF Buf, UINT Count,
195 PVOID AddressBuf, PINT AddressLen,
196 BOOLEAN Write, BOOLEAN LockAddress,
197 KPROCESSOR_MODE LockMode) {
198 UINT i;
199 /* Copy the buffer array so we don't lose it */
200 UINT Lock = LockAddress ? 2 : 0;
201 UINT Size = (sizeof(AFD_WSABUF) + sizeof(AFD_MAPBUF)) * (Count + Lock);
202 PAFD_WSABUF NewBuf = ExAllocatePool( PagedPool, Size );
203 BOOLEAN LockFailed = FALSE;
204 PAFD_MAPBUF MapBuf;
205
206 AFD_DbgPrint(MID_TRACE,("Called(%08x)\n", NewBuf));
207
208 if( NewBuf ) {
209 RtlZeroMemory(NewBuf, Size);
210
211 MapBuf = (PAFD_MAPBUF)(NewBuf + Count + Lock);
212
213 _SEH2_TRY {
214 RtlCopyMemory( NewBuf, Buf, sizeof(AFD_WSABUF) * Count );
215 if( LockAddress ) {
216 if (AddressBuf && AddressLen) {
217 NewBuf[Count].buf = AddressBuf;
218 NewBuf[Count].len = *AddressLen;
219 NewBuf[Count + 1].buf = (PVOID)AddressLen;
220 NewBuf[Count + 1].len = sizeof(*AddressLen);
221 }
222 Count += 2;
223 }
224 } _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER) {
225 AFD_DbgPrint(MIN_TRACE,("Access violation copying buffer info "
226 "from userland (%x %x)\n",
227 Buf, AddressLen));
228 ExFreePool( NewBuf );
229 _SEH2_YIELD(return NULL);
230 } _SEH2_END;
231
232 for( i = 0; i < Count; i++ ) {
233 AFD_DbgPrint(MID_TRACE,("Locking buffer %d (%x:%d)\n",
234 i, NewBuf[i].buf, NewBuf[i].len));
235
236 if( NewBuf[i].buf && NewBuf[i].len ) {
237 MapBuf[i].Mdl = IoAllocateMdl( NewBuf[i].buf,
238 NewBuf[i].len,
239 FALSE,
240 FALSE,
241 NULL );
242 } else {
243 MapBuf[i].Mdl = NULL;
244 continue;
245 }
246
247 AFD_DbgPrint(MID_TRACE,("NewMdl @ %x\n", MapBuf[i].Mdl));
248
249 if( MapBuf[i].Mdl ) {
250 AFD_DbgPrint(MID_TRACE,("Probe and lock pages\n"));
251 _SEH2_TRY {
252 MmProbeAndLockPages( MapBuf[i].Mdl, LockMode,
253 Write ? IoModifyAccess : IoReadAccess );
254 } _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER) {
255 LockFailed = TRUE;
256 } _SEH2_END;
257 AFD_DbgPrint(MID_TRACE,("MmProbeAndLock finished\n"));
258
259 if( LockFailed ) {
260 AFD_DbgPrint(MIN_TRACE,("Failed to lock pages\n"));
261 IoFreeMdl( MapBuf[i].Mdl );
262 MapBuf[i].Mdl = NULL;
263 ExFreePool( NewBuf );
264 return NULL;
265 }
266 } else {
267 ExFreePool( NewBuf );
268 return NULL;
269 }
270 }
271 }
272
273 AFD_DbgPrint(MID_TRACE,("Leaving %x\n", NewBuf));
274
275 return NewBuf;
276 }
277
278 VOID UnlockBuffers( PAFD_WSABUF Buf, UINT Count, BOOL Address ) {
279 UINT Lock = Address ? 2 : 0;
280 PAFD_MAPBUF Map = (PAFD_MAPBUF)(Buf + Count + Lock);
281 UINT i;
282
283 if( !Buf ) return;
284
285 for( i = 0; i < Count + Lock; i++ ) {
286 if( Map[i].Mdl ) {
287 MmUnlockPages( Map[i].Mdl );
288 IoFreeMdl( Map[i].Mdl );
289 Map[i].Mdl = NULL;
290 }
291 }
292
293 ExFreePool( Buf );
294 Buf = NULL;
295 }
296
297 /* Produce a kernel-land handle array with handles replaced by object
298 * pointers. This will allow the system to do proper alerting */
299 PAFD_HANDLE LockHandles( PAFD_HANDLE HandleArray, UINT HandleCount ) {
300 UINT i;
301 NTSTATUS Status = STATUS_SUCCESS;
302
303 PAFD_HANDLE FileObjects = ExAllocatePool
304 ( NonPagedPool, HandleCount * sizeof(AFD_HANDLE) );
305
306 for( i = 0; FileObjects && i < HandleCount; i++ ) {
307 FileObjects[i].Status = 0;
308 FileObjects[i].Events = HandleArray[i].Events;
309 FileObjects[i].Handle = 0;
310 if( !HandleArray[i].Handle ) continue;
311 if( NT_SUCCESS(Status) ) {
312 Status = ObReferenceObjectByHandle
313 ( (PVOID)HandleArray[i].Handle,
314 FILE_ALL_ACCESS,
315 NULL,
316 KernelMode,
317 (PVOID*)&FileObjects[i].Handle,
318 NULL );
319 }
320
321 if( !NT_SUCCESS(Status) )
322 {
323 AFD_DbgPrint(MIN_TRACE,("Failed to reference handles (0x%x)\n", Status));
324 FileObjects[i].Handle = 0;
325 }
326 }
327
328 if( !NT_SUCCESS(Status) ) {
329 UnlockHandles( FileObjects, HandleCount );
330 return NULL;
331 }
332
333 return FileObjects;
334 }
335
336 VOID UnlockHandles( PAFD_HANDLE HandleArray, UINT HandleCount ) {
337 UINT i;
338
339 for( i = 0; i < HandleCount; i++ ) {
340 if( HandleArray[i].Handle )
341 ObDereferenceObject( (PVOID)HandleArray[i].Handle );
342 }
343
344 ExFreePool( HandleArray );
345 HandleArray = NULL;
346 }
347
348 BOOLEAN SocketAcquireStateLock( PAFD_FCB FCB ) {
349 if( !FCB ) return FALSE;
350
351 return !KeWaitForMutexObject(&FCB->Mutex,
352 Executive,
353 KernelMode,
354 FALSE,
355 NULL);
356 }
357
358 VOID SocketStateUnlock( PAFD_FCB FCB ) {
359 KeReleaseMutex(&FCB->Mutex, FALSE);
360 }
361
362 NTSTATUS NTAPI UnlockAndMaybeComplete
363 ( PAFD_FCB FCB, NTSTATUS Status, PIRP Irp,
364 UINT Information ) {
365 Irp->IoStatus.Status = Status;
366 Irp->IoStatus.Information = Information;
367 if ( Irp->MdlAddress ) UnlockRequest( Irp, IoGetCurrentIrpStackLocation( Irp ) );
368 (void)IoSetCancelRoutine(Irp, NULL);
369 SocketStateUnlock( FCB );
370 IoCompleteRequest( Irp, IO_NETWORK_INCREMENT );
371 return Status;
372 }
373
374
375 NTSTATUS LostSocket( PIRP Irp ) {
376 NTSTATUS Status = STATUS_FILE_CLOSED;
377 AFD_DbgPrint(MIN_TRACE,("Called.\n"));
378 Irp->IoStatus.Information = 0;
379 Irp->IoStatus.Status = Status;
380 if ( Irp->MdlAddress ) UnlockRequest( Irp, IoGetCurrentIrpStackLocation( Irp ) );
381 IoCompleteRequest( Irp, IO_NO_INCREMENT );
382 return Status;
383 }
384
385 NTSTATUS QueueUserModeIrp(PAFD_FCB FCB, PIRP Irp, UINT Function)
386 {
387 NTSTATUS Status;
388
389 /* Add the IRP to the queue in all cases (so AfdCancelHandler will work properly) */
390 InsertTailList( &FCB->PendingIrpList[Function],
391 &Irp->Tail.Overlay.ListEntry );
392
393 /* Acquire the cancel spin lock and check the cancel bit */
394 IoAcquireCancelSpinLock(&Irp->CancelIrql);
395 if (!Irp->Cancel)
396 {
397 /* We are not cancelled; we're good to go so
398 * set the cancel routine, release the cancel spin lock,
399 * mark the IRP as pending, and
400 * return STATUS_PENDING to the caller
401 */
402 (void)IoSetCancelRoutine(Irp, AfdCancelHandler);
403 IoReleaseCancelSpinLock(Irp->CancelIrql);
404 IoMarkIrpPending(Irp);
405 Status = STATUS_PENDING;
406 }
407 else
408 {
409 /* We were already cancelled before we were able to register our cancel routine
410 * so we are to call the cancel routine ourselves right here to cancel the IRP
411 * (which handles all the stuff we do above) and return STATUS_CANCELLED to the caller
412 */
413 AfdCancelHandler(IoGetCurrentIrpStackLocation(Irp)->DeviceObject,
414 Irp);
415 Status = STATUS_CANCELLED;
416 }
417
418 return Status;
419 }
420
421 NTSTATUS LeaveIrpUntilLater( PAFD_FCB FCB, PIRP Irp, UINT Function ) {
422 NTSTATUS Status;
423
424 Status = QueueUserModeIrp(FCB, Irp, Function);
425
426 SocketStateUnlock( FCB );
427
428 return Status;
429 }