[SHELL/EXPERIMENTS]
[reactos.git] / drivers / network / afd / afd / lock.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: drivers/net/afd/afd/lock.c
5 * PURPOSE: Ancillary functions driver
6 * PROGRAMMER: Art Yerkes (ayerkes@speakeasy.net)
7 * UPDATE HISTORY:
8 * 20040708 Created
9 */
10
11 #include "afd.h"
12
13 PVOID GetLockedData(PIRP Irp, PIO_STACK_LOCATION IrpSp)
14 {
15 ASSERT(Irp->MdlAddress);
16 ASSERT(Irp->Tail.Overlay.DriverContext[0]);
17
18 UNREFERENCED_PARAMETER(IrpSp);
19
20 return Irp->Tail.Overlay.DriverContext[0];
21 }
22
23 /* Lock a method_neither request so it'll be available from DISPATCH_LEVEL */
24 PVOID LockRequest( PIRP Irp,
25 PIO_STACK_LOCATION IrpSp,
26 BOOLEAN Output,
27 KPROCESSOR_MODE *LockMode) {
28 BOOLEAN LockFailed = FALSE;
29
30 ASSERT(!Irp->MdlAddress);
31
32 switch (IrpSp->MajorFunction)
33 {
34 case IRP_MJ_DEVICE_CONTROL:
35 case IRP_MJ_INTERNAL_DEVICE_CONTROL:
36 ASSERT(IrpSp->Parameters.DeviceIoControl.Type3InputBuffer);
37 ASSERT(IrpSp->Parameters.DeviceIoControl.InputBufferLength);
38
39
40 Irp->MdlAddress =
41 IoAllocateMdl( IrpSp->Parameters.DeviceIoControl.Type3InputBuffer,
42 IrpSp->Parameters.DeviceIoControl.InputBufferLength,
43 FALSE,
44 FALSE,
45 NULL );
46 if( Irp->MdlAddress ) {
47 _SEH2_TRY {
48 MmProbeAndLockPages( Irp->MdlAddress, Irp->RequestorMode, IoModifyAccess );
49 } _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER) {
50 LockFailed = TRUE;
51 } _SEH2_END;
52
53 if( LockFailed ) {
54 AFD_DbgPrint(MIN_TRACE,("Failed to lock pages\n"));
55 IoFreeMdl( Irp->MdlAddress );
56 Irp->MdlAddress = NULL;
57 return NULL;
58 }
59
60 /* The mapped address goes in index 1 */
61 Irp->Tail.Overlay.DriverContext[1] = MmGetSystemAddressForMdlSafe(Irp->MdlAddress, NormalPagePriority);
62 if (!Irp->Tail.Overlay.DriverContext[1])
63 {
64 AFD_DbgPrint(MIN_TRACE,("Failed to get mapped address\n"));
65 MmUnlockPages(Irp->MdlAddress);
66 IoFreeMdl( Irp->MdlAddress );
67 Irp->MdlAddress = NULL;
68 return NULL;
69 }
70
71 /* The allocated address goes in index 0 */
72 Irp->Tail.Overlay.DriverContext[0] = ExAllocatePool(NonPagedPool, MmGetMdlByteCount(Irp->MdlAddress));
73 if (!Irp->Tail.Overlay.DriverContext[0])
74 {
75 AFD_DbgPrint(MIN_TRACE,("Failed to allocate memory\n"));
76 MmUnlockPages(Irp->MdlAddress);
77 IoFreeMdl( Irp->MdlAddress );
78 Irp->MdlAddress = NULL;
79 return NULL;
80 }
81
82 RtlCopyMemory(Irp->Tail.Overlay.DriverContext[0],
83 Irp->Tail.Overlay.DriverContext[1],
84 MmGetMdlByteCount(Irp->MdlAddress));
85
86 /* If we don't want a copy back, we zero the mapped address pointer */
87 if (!Output)
88 {
89 Irp->Tail.Overlay.DriverContext[1] = NULL;
90 }
91
92 /* We're using a user-mode buffer directly */
93 if (LockMode != NULL)
94 {
95 *LockMode = UserMode;
96 }
97 }
98 else return NULL;
99 break;
100
101 case IRP_MJ_READ:
102 case IRP_MJ_WRITE:
103 ASSERT(Irp->UserBuffer);
104
105 Irp->MdlAddress =
106 IoAllocateMdl(Irp->UserBuffer,
107 (IrpSp->MajorFunction == IRP_MJ_READ) ?
108 IrpSp->Parameters.Read.Length : IrpSp->Parameters.Write.Length,
109 FALSE,
110 FALSE,
111 NULL );
112 if( Irp->MdlAddress ) {
113 PAFD_RECV_INFO AfdInfo;
114
115 _SEH2_TRY {
116 MmProbeAndLockPages( Irp->MdlAddress, Irp->RequestorMode, IoModifyAccess );
117 } _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER) {
118 LockFailed = TRUE;
119 } _SEH2_END;
120
121 if( LockFailed ) {
122 AFD_DbgPrint(MIN_TRACE,("Failed to lock pages\n"));
123 IoFreeMdl( Irp->MdlAddress );
124 Irp->MdlAddress = NULL;
125 return NULL;
126 }
127
128 /* We need to create the info struct that AFD expects for all send/recv requests */
129 AfdInfo = ExAllocatePool(NonPagedPool, sizeof(AFD_RECV_INFO) + sizeof(AFD_WSABUF));
130 if (!AfdInfo)
131 {
132 AFD_DbgPrint(MIN_TRACE,("Failed to allocate memory\n"));
133 MmUnlockPages(Irp->MdlAddress);
134 IoFreeMdl( Irp->MdlAddress );
135 Irp->MdlAddress = NULL;
136 return NULL;
137 }
138
139 /* We'll append the buffer array to this struct */
140 AfdInfo->BufferArray = (PAFD_WSABUF)(AfdInfo + 1);
141 AfdInfo->BufferCount = 1;
142
143 /* Setup the default flags values */
144 AfdInfo->AfdFlags = 0;
145 AfdInfo->TdiFlags = 0;
146
147 /* Now build the buffer array */
148 AfdInfo->BufferArray[0].buf = MmGetSystemAddressForMdl(Irp->MdlAddress);
149 AfdInfo->BufferArray[0].len = MmGetMdlByteCount(Irp->MdlAddress);
150
151 /* Store the struct where AFD expects */
152 Irp->Tail.Overlay.DriverContext[0] = AfdInfo;
153
154 /* Don't copy anything out */
155 Irp->Tail.Overlay.DriverContext[1] = NULL;
156
157 /* We're using a placeholder buffer that we allocated */
158 if (LockMode != NULL)
159 {
160 *LockMode = KernelMode;
161 }
162 }
163 else return NULL;
164 break;
165
166 default:
167 ASSERT(FALSE);
168 return NULL;
169 }
170
171 return GetLockedData(Irp, IrpSp);
172 }
173
174 VOID UnlockRequest( PIRP Irp, PIO_STACK_LOCATION IrpSp )
175 {
176 ASSERT(Irp->MdlAddress);
177 ASSERT(Irp->Tail.Overlay.DriverContext[0]);
178
179 UNREFERENCED_PARAMETER(IrpSp);
180
181 /* Check if we need to copy stuff back */
182 if (Irp->Tail.Overlay.DriverContext[1] != NULL)
183 {
184 RtlCopyMemory(Irp->Tail.Overlay.DriverContext[1],
185 Irp->Tail.Overlay.DriverContext[0],
186 MmGetMdlByteCount(Irp->MdlAddress));
187 }
188
189 ExFreePool(Irp->Tail.Overlay.DriverContext[0]);
190 MmUnlockPages( Irp->MdlAddress );
191 IoFreeMdl( Irp->MdlAddress );
192 Irp->MdlAddress = NULL;
193 }
194
195 /* Note: We add an extra buffer if LockAddress is true. This allows us to
196 * treat the address buffer as an ordinary client buffer. It's only used
197 * for datagrams. */
198
199 PAFD_WSABUF LockBuffers( PAFD_WSABUF Buf, UINT Count,
200 PVOID AddressBuf, PINT AddressLen,
201 BOOLEAN Write, BOOLEAN LockAddress,
202 KPROCESSOR_MODE LockMode) {
203 UINT i;
204 /* Copy the buffer array so we don't lose it */
205 UINT Lock = LockAddress ? 2 : 0;
206 UINT Size = (sizeof(AFD_WSABUF) + sizeof(AFD_MAPBUF)) * (Count + Lock);
207 PAFD_WSABUF NewBuf = ExAllocatePool( PagedPool, Size );
208 BOOLEAN LockFailed = FALSE;
209 PAFD_MAPBUF MapBuf;
210
211 AFD_DbgPrint(MID_TRACE,("Called(%p)\n", NewBuf));
212
213 if( NewBuf ) {
214 RtlZeroMemory(NewBuf, Size);
215
216 MapBuf = (PAFD_MAPBUF)(NewBuf + Count + Lock);
217
218 _SEH2_TRY {
219 RtlCopyMemory( NewBuf, Buf, sizeof(AFD_WSABUF) * Count );
220 if( LockAddress ) {
221 if (AddressBuf && AddressLen) {
222 NewBuf[Count].buf = AddressBuf;
223 NewBuf[Count].len = *AddressLen;
224 NewBuf[Count + 1].buf = (PVOID)AddressLen;
225 NewBuf[Count + 1].len = sizeof(*AddressLen);
226 }
227 Count += 2;
228 }
229 } _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER) {
230 AFD_DbgPrint(MIN_TRACE,("Access violation copying buffer info "
231 "from userland (%p %p)\n",
232 Buf, AddressLen));
233 ExFreePool( NewBuf );
234 _SEH2_YIELD(return NULL);
235 } _SEH2_END;
236
237 for( i = 0; i < Count; i++ ) {
238 AFD_DbgPrint(MID_TRACE,("Locking buffer %u (%p:%u)\n",
239 i, NewBuf[i].buf, NewBuf[i].len));
240
241 if( NewBuf[i].buf && NewBuf[i].len ) {
242 MapBuf[i].Mdl = IoAllocateMdl( NewBuf[i].buf,
243 NewBuf[i].len,
244 FALSE,
245 FALSE,
246 NULL );
247 } else {
248 MapBuf[i].Mdl = NULL;
249 continue;
250 }
251
252 AFD_DbgPrint(MID_TRACE,("NewMdl @ %p\n", MapBuf[i].Mdl));
253
254 if( MapBuf[i].Mdl ) {
255 AFD_DbgPrint(MID_TRACE,("Probe and lock pages\n"));
256 _SEH2_TRY {
257 MmProbeAndLockPages( MapBuf[i].Mdl, LockMode,
258 Write ? IoModifyAccess : IoReadAccess );
259 } _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER) {
260 LockFailed = TRUE;
261 } _SEH2_END;
262 AFD_DbgPrint(MID_TRACE,("MmProbeAndLock finished\n"));
263
264 if( LockFailed ) {
265 AFD_DbgPrint(MIN_TRACE,("Failed to lock pages\n"));
266 IoFreeMdl( MapBuf[i].Mdl );
267 MapBuf[i].Mdl = NULL;
268 ExFreePool( NewBuf );
269 return NULL;
270 }
271 } else {
272 ExFreePool( NewBuf );
273 return NULL;
274 }
275 }
276 }
277
278 AFD_DbgPrint(MID_TRACE,("Leaving %p\n", NewBuf));
279
280 return NewBuf;
281 }
282
283 VOID UnlockBuffers( PAFD_WSABUF Buf, UINT Count, BOOL Address ) {
284 UINT Lock = Address ? 2 : 0;
285 PAFD_MAPBUF Map = (PAFD_MAPBUF)(Buf + Count + Lock);
286 UINT i;
287
288 if( !Buf ) return;
289
290 for( i = 0; i < Count + Lock; i++ ) {
291 if( Map[i].Mdl ) {
292 MmUnlockPages( Map[i].Mdl );
293 IoFreeMdl( Map[i].Mdl );
294 Map[i].Mdl = NULL;
295 }
296 }
297
298 ExFreePool( Buf );
299 Buf = NULL;
300 }
301
302 /* Produce a kernel-land handle array with handles replaced by object
303 * pointers. This will allow the system to do proper alerting */
304 PAFD_HANDLE LockHandles( PAFD_HANDLE HandleArray, UINT HandleCount ) {
305 UINT i;
306 NTSTATUS Status = STATUS_SUCCESS;
307
308 PAFD_HANDLE FileObjects = ExAllocatePool
309 ( NonPagedPool, HandleCount * sizeof(AFD_HANDLE) );
310
311 for( i = 0; FileObjects && i < HandleCount; i++ ) {
312 FileObjects[i].Status = 0;
313 FileObjects[i].Events = HandleArray[i].Events;
314 FileObjects[i].Handle = 0;
315 if( !HandleArray[i].Handle ) continue;
316 if( NT_SUCCESS(Status) ) {
317 Status = ObReferenceObjectByHandle
318 ( (PVOID)HandleArray[i].Handle,
319 FILE_ALL_ACCESS,
320 NULL,
321 KernelMode,
322 (PVOID*)&FileObjects[i].Handle,
323 NULL );
324 }
325
326 if( !NT_SUCCESS(Status) )
327 {
328 AFD_DbgPrint(MIN_TRACE,("Failed to reference handles (0x%x)\n", Status));
329 FileObjects[i].Handle = 0;
330 }
331 }
332
333 if( !NT_SUCCESS(Status) ) {
334 UnlockHandles( FileObjects, HandleCount );
335 return NULL;
336 }
337
338 return FileObjects;
339 }
340
341 VOID UnlockHandles( PAFD_HANDLE HandleArray, UINT HandleCount ) {
342 UINT i;
343
344 for( i = 0; i < HandleCount; i++ ) {
345 if( HandleArray[i].Handle )
346 ObDereferenceObject( (PVOID)HandleArray[i].Handle );
347 }
348
349 ExFreePool( HandleArray );
350 HandleArray = NULL;
351 }
352
353 BOOLEAN SocketAcquireStateLock( PAFD_FCB FCB ) {
354 if( !FCB ) return FALSE;
355
356 return !KeWaitForMutexObject(&FCB->Mutex,
357 Executive,
358 KernelMode,
359 FALSE,
360 NULL);
361 }
362
363 VOID SocketStateUnlock( PAFD_FCB FCB ) {
364 KeReleaseMutex(&FCB->Mutex, FALSE);
365 }
366
367 NTSTATUS NTAPI UnlockAndMaybeComplete
368 ( PAFD_FCB FCB, NTSTATUS Status, PIRP Irp,
369 UINT Information ) {
370 Irp->IoStatus.Status = Status;
371 Irp->IoStatus.Information = Information;
372 if ( Irp->MdlAddress ) UnlockRequest( Irp, IoGetCurrentIrpStackLocation( Irp ) );
373 (void)IoSetCancelRoutine(Irp, NULL);
374 SocketStateUnlock( FCB );
375 IoCompleteRequest( Irp, IO_NETWORK_INCREMENT );
376 return Status;
377 }
378
379
380 NTSTATUS LostSocket( PIRP Irp ) {
381 NTSTATUS Status = STATUS_FILE_CLOSED;
382 AFD_DbgPrint(MIN_TRACE,("Called.\n"));
383 Irp->IoStatus.Information = 0;
384 Irp->IoStatus.Status = Status;
385 if ( Irp->MdlAddress ) UnlockRequest( Irp, IoGetCurrentIrpStackLocation( Irp ) );
386 IoCompleteRequest( Irp, IO_NO_INCREMENT );
387 return Status;
388 }
389
390 NTSTATUS QueueUserModeIrp(PAFD_FCB FCB, PIRP Irp, UINT Function)
391 {
392 NTSTATUS Status;
393
394 /* Add the IRP to the queue in all cases (so AfdCancelHandler will work properly) */
395 InsertTailList( &FCB->PendingIrpList[Function],
396 &Irp->Tail.Overlay.ListEntry );
397
398 /* Acquire the cancel spin lock and check the cancel bit */
399 IoAcquireCancelSpinLock(&Irp->CancelIrql);
400 if (!Irp->Cancel)
401 {
402 /* We are not cancelled; we're good to go so
403 * set the cancel routine, release the cancel spin lock,
404 * mark the IRP as pending, and
405 * return STATUS_PENDING to the caller
406 */
407 (void)IoSetCancelRoutine(Irp, AfdCancelHandler);
408 IoReleaseCancelSpinLock(Irp->CancelIrql);
409 IoMarkIrpPending(Irp);
410 Status = STATUS_PENDING;
411 }
412 else
413 {
414 /* We were already cancelled before we were able to register our cancel routine
415 * so we are to call the cancel routine ourselves right here to cancel the IRP
416 * (which handles all the stuff we do above) and return STATUS_CANCELLED to the caller
417 */
418 AfdCancelHandler(IoGetCurrentIrpStackLocation(Irp)->DeviceObject,
419 Irp);
420 Status = STATUS_CANCELLED;
421 }
422
423 return Status;
424 }
425
426 NTSTATUS LeaveIrpUntilLater( PAFD_FCB FCB, PIRP Irp, UINT Function ) {
427 NTSTATUS Status;
428
429 Status = QueueUserModeIrp(FCB, Irp, Function);
430
431 SocketStateUnlock( FCB );
432
433 return Status;
434 }