d4557994f3916cb8e3ba3e8ee522eb65552a5a40
[reactos.git] / reactos / drivers / usb / cromwell / core / urb.c
1 #include "../usb_wrapper.h"
2 #include "hcd.h"
3
4 /**
5 * usb_init_urb - initializes a urb so that it can be used by a USB driver
6 * @urb: pointer to the urb to initialize
7 *
8 * Initializes a urb so that the USB subsystem can use it properly.
9 *
10 * If a urb is created with a call to usb_alloc_urb() it is not
11 * necessary to call this function. Only use this if you allocate the
12 * space for a struct urb on your own. If you call this function, be
13 * careful when freeing the memory for your urb that it is no longer in
14 * use by the USB core.
15 *
16 * Only use this function if you _really_ understand what you are doing.
17 */
18 void STDCALL usb_init_urb(struct urb *urb)
19 {
20 if (urb) {
21 memset(urb, 0, sizeof(*urb));
22 urb->count = (atomic_t)ATOMIC_INIT(1);
23 spin_lock_init(&urb->lock);
24 }
25 }
26
27 /**
28 * usb_alloc_urb - creates a new urb for a USB driver to use
29 * @iso_packets: number of iso packets for this urb
30 * @mem_flags: the type of memory to allocate, see kmalloc() for a list of
31 * valid options for this.
32 *
33 * Creates an urb for the USB driver to use, initializes a few internal
34 * structures, incrementes the usage counter, and returns a pointer to it.
35 *
36 * If no memory is available, NULL is returned.
37 *
38 * If the driver want to use this urb for interrupt, control, or bulk
39 * endpoints, pass '0' as the number of iso packets.
40 *
41 * The driver must call usb_free_urb() when it is finished with the urb.
42 */
43 struct urb STDCALL *usb_alloc_urb(int iso_packets, int mem_flags)
44 {
45 struct urb *urb;
46
47 urb = (struct urb *)kmalloc(sizeof(struct urb) +
48 iso_packets * sizeof(struct usb_iso_packet_descriptor),
49 mem_flags);
50 if (!urb) {
51 err("alloc_urb: kmalloc failed");
52 return NULL;
53 }
54 usb_init_urb(urb);
55 return urb;
56 }
57
58 /**
59 * usb_free_urb - frees the memory used by a urb when all users of it are finished
60 * @urb: pointer to the urb to free
61 *
62 * Must be called when a user of a urb is finished with it. When the last user
63 * of the urb calls this function, the memory of the urb is freed.
64 *
65 * Note: The transfer buffer associated with the urb is not freed, that must be
66 * done elsewhere.
67 */
68 void STDCALL usb_free_urb(struct urb *urb)
69 {
70 if (urb)
71 if (atomic_dec_and_test(&urb->count))
72 {
73 kfree(urb);
74 }
75 }
76
77 /**
78 * usb_get_urb - increments the reference count of the urb
79 * @urb: pointer to the urb to modify
80 *
81 * This must be called whenever a urb is transferred from a device driver to a
82 * host controller driver. This allows proper reference counting to happen
83 * for urbs.
84 *
85 * A pointer to the urb with the incremented reference counter is returned.
86 */
87 struct urb STDCALL * usb_get_urb(struct urb *urb)
88 {
89 if (urb) {
90 atomic_inc(&urb->count);
91 return urb;
92 } else
93 return NULL;
94 }
95
96
97 /*-------------------------------------------------------------------*/
98
99 /**
100 * usb_submit_urb - issue an asynchronous transfer request for an endpoint
101 * @urb: pointer to the urb describing the request
102 * @mem_flags: the type of memory to allocate, see kmalloc() for a list
103 * of valid options for this.
104 *
105 * This submits a transfer request, and transfers control of the URB
106 * describing that request to the USB subsystem. Request completion will
107 * be indicated later, asynchronously, by calling the completion handler.
108 * The three types of completion are success, error, and unlink
109 * (also called "request cancellation").
110 * URBs may be submitted in interrupt context.
111 *
112 * The caller must have correctly initialized the URB before submitting
113 * it. Functions such as usb_fill_bulk_urb() and usb_fill_control_urb() are
114 * available to ensure that most fields are correctly initialized, for
115 * the particular kind of transfer, although they will not initialize
116 * any transfer flags.
117 *
118 * Successful submissions return 0; otherwise this routine returns a
119 * negative error number. If the submission is successful, the complete()
120 * callback from the urb will be called exactly once, when the USB core and
121 * host controller driver are finished with the urb. When the completion
122 * function is called, control of the URB is returned to the device
123 * driver which issued the request. The completion handler may then
124 * immediately free or reuse that URB.
125 *
126 * For control endpoints, the synchronous usb_control_msg() call is
127 * often used (in non-interrupt context) instead of this call.
128 * That is often used through convenience wrappers, for the requests
129 * that are standardized in the USB 2.0 specification. For bulk
130 * endpoints, a synchronous usb_bulk_msg() call is available.
131 *
132 * Request Queuing:
133 *
134 * URBs may be submitted to endpoints before previous ones complete, to
135 * minimize the impact of interrupt latencies and system overhead on data
136 * throughput. This is required for continuous isochronous data streams,
137 * and may also be required for some kinds of interrupt transfers. Such
138 * queueing also maximizes bandwidth utilization by letting USB controllers
139 * start work on later requests before driver software has finished the
140 * completion processing for earlier requests.
141 *
142 * Bulk and Isochronous URBs may always be queued. At this writing, all
143 * mainstream host controller drivers support queueing for control and
144 * interrupt transfer requests.
145 *
146 * Reserved Bandwidth Transfers:
147 *
148 * Periodic transfers (interrupt or isochronous) are performed repeatedly,
149 * using the interval specified in the urb. Submitting the first urb to
150 * the endpoint reserves the bandwidth necessary to make those transfers.
151 * If the USB subsystem can't allocate sufficient bandwidth to perform
152 * the periodic request, submitting such a periodic request should fail.
153 *
154 * Device drivers must explicitly request that repetition, by ensuring that
155 * some URB is always on the endpoint's queue (except possibly for short
156 * periods during completion callacks). When there is no longer an urb
157 * queued, the endpoint's bandwidth reservation is canceled. This means
158 * drivers can use their completion handlers to ensure they keep bandwidth
159 * they need, by reinitializing and resubmitting the just-completed urb
160 * until the driver longer needs that periodic bandwidth.
161 *
162 * Memory Flags:
163 *
164 * The general rules for how to decide which mem_flags to use
165 * are the same as for kmalloc. There are four
166 * different possible values; GFP_KERNEL, GFP_NOFS, GFP_NOIO and
167 * GFP_ATOMIC.
168 *
169 * GFP_NOFS is not ever used, as it has not been implemented yet.
170 *
171 * GFP_ATOMIC is used when
172 * (a) you are inside a completion handler, an interrupt, bottom half,
173 * tasklet or timer, or
174 * (b) you are holding a spinlock or rwlock (does not apply to
175 * semaphores), or
176 * (c) current->state != TASK_RUNNING, this is the case only after
177 * you've changed it.
178 *
179 * GFP_NOIO is used in the block io path and error handling of storage
180 * devices.
181 *
182 * All other situations use GFP_KERNEL.
183 *
184 * Some more specific rules for mem_flags can be inferred, such as
185 * (1) start_xmit, timeout, and receive methods of network drivers must
186 * use GFP_ATOMIC (they are called with a spinlock held);
187 * (2) queuecommand methods of scsi drivers must use GFP_ATOMIC (also
188 * called with a spinlock held);
189 * (3) If you use a kernel thread with a network driver you must use
190 * GFP_NOIO, unless (b) or (c) apply;
191 * (4) after you have done a down() you can use GFP_KERNEL, unless (b) or (c)
192 * apply or your are in a storage driver's block io path;
193 * (5) USB probe and disconnect can use GFP_KERNEL unless (b) or (c) apply; and
194 * (6) changing firmware on a running storage or net device uses
195 * GFP_NOIO, unless b) or c) apply
196 *
197 */
198 int STDCALL usb_submit_urb(struct urb *urb, int mem_flags)
199 {
200 int pipe, temp, max;
201 struct usb_device *dev;
202 struct usb_operations *op;
203 int is_out;
204 //printk("sub dev %p bus %p num %i op %p sub %p\n",
205 // urb->dev, urb->dev->bus,urb->dev->devnum,urb->dev->bus->op, urb->dev->bus->op->submit_urb);
206 if (!urb || urb->hcpriv || !urb->complete)
207 return -EINVAL;
208 if (!(dev = urb->dev) ||
209 (dev->state < USB_STATE_DEFAULT) ||
210 (!dev->bus) || (dev->devnum <= 0))
211 return -ENODEV;
212 if (!(op = dev->bus->op) || !op->submit_urb)
213 return -ENODEV;
214
215 urb->status = -EINPROGRESS;
216 urb->actual_length = 0;
217 urb->bandwidth = 0;
218
219 /* Lots of sanity checks, so HCDs can rely on clean data
220 * and don't need to duplicate tests
221 */
222 pipe = urb->pipe;
223 temp = usb_pipetype (pipe);
224 is_out = usb_pipeout (pipe);
225
226 if (!usb_pipecontrol (pipe) && dev->state < USB_STATE_CONFIGURED)
227 return -ENODEV;
228
229 /* (actually HCDs may need to duplicate this, endpoint might yet
230 * stall due to queued bulk/intr transactions that complete after
231 * we check)
232 */
233 if (usb_endpoint_halted (dev, usb_pipeendpoint (pipe), is_out))
234 return -EPIPE;
235
236 /* FIXME there should be a sharable lock protecting us against
237 * config/altsetting changes and disconnects, kicking in here.
238 * (here == before maxpacket, and eventually endpoint type,
239 * checks get made.)
240 */
241
242 max = usb_maxpacket (dev, pipe, is_out);
243 if (max <= 0) {
244 dbg ("%s: bogus endpoint %d-%s on usb-%s-%s (bad maxpacket %d)",
245 __FUNCTION__,
246 usb_pipeendpoint (pipe), is_out ? "OUT" : "IN",
247 dev->bus->bus_name, dev->devpath,
248 max);
249 return -EMSGSIZE;
250 }
251
252 /* periodic transfers limit size per frame/uframe,
253 * but drivers only control those sizes for ISO.
254 * while we're checking, initialize return status.
255 */
256 if (temp == PIPE_ISOCHRONOUS) {
257 int n, len;
258
259 /* "high bandwidth" mode, 1-3 packets/uframe? */
260 if (dev->speed == USB_SPEED_HIGH) {
261 int mult = 1 + ((max >> 11) & 0x03);
262 max &= 0x03ff;
263 max *= mult;
264 }
265
266 if (urb->number_of_packets <= 0)
267 return -EINVAL;
268 for (n = 0; n < urb->number_of_packets; n++) {
269 len = urb->iso_frame_desc [n].length;
270 if (len < 0 || len > max)
271 return -EMSGSIZE;
272 urb->iso_frame_desc [n].status = -EXDEV;
273 urb->iso_frame_desc [n].actual_length = 0;
274 }
275 }
276
277 /* the I/O buffer must be mapped/unmapped, except when length=0 */
278 if (urb->transfer_buffer_length < 0)
279 return -EMSGSIZE;
280
281 #ifdef DEBUG
282 /* stuff that drivers shouldn't do, but which shouldn't
283 * cause problems in HCDs if they get it wrong.
284 */
285 {
286 unsigned int orig_flags = urb->transfer_flags;
287 unsigned int allowed;
288
289 /* enforce simple/standard policy */
290 allowed = URB_ASYNC_UNLINK; // affects later unlinks
291 allowed |= URB_NO_DMA_MAP;
292 allowed |= URB_NO_INTERRUPT;
293 switch (temp) {
294 case PIPE_BULK:
295 if (is_out)
296 allowed |= URB_ZERO_PACKET;
297 /* FALLTHROUGH */
298 case PIPE_CONTROL:
299 allowed |= URB_NO_FSBR; /* only affects UHCI */
300 /* FALLTHROUGH */
301 default: /* all non-iso endpoints */
302 if (!is_out)
303 allowed |= URB_SHORT_NOT_OK;
304 break;
305 case PIPE_ISOCHRONOUS:
306 allowed |= URB_ISO_ASAP;
307 break;
308 }
309 urb->transfer_flags &= allowed;
310
311 /* fail if submitter gave bogus flags */
312 if (urb->transfer_flags != orig_flags) {
313 err ("BOGUS urb flags, %x --> %x",
314 orig_flags, urb->transfer_flags);
315 return -EINVAL;
316 }
317 }
318 #endif
319 /*
320 * Force periodic transfer intervals to be legal values that are
321 * a power of two (so HCDs don't need to).
322 *
323 * FIXME want bus->{intr,iso}_sched_horizon values here. Each HC
324 * supports different values... this uses EHCI/UHCI defaults (and
325 * EHCI can use smaller non-default values).
326 */
327 switch (temp) {
328 case PIPE_ISOCHRONOUS:
329 case PIPE_INTERRUPT:
330 /* too small? */
331 if (urb->interval <= 0)
332 return -EINVAL;
333 /* too big? */
334 switch (dev->speed) {
335 case USB_SPEED_HIGH: /* units are microframes */
336 // NOTE usb handles 2^15
337 if (urb->interval > (1024 * 8))
338 urb->interval = 1024 * 8;
339 temp = 1024 * 8;
340 break;
341 case USB_SPEED_FULL: /* units are frames/msec */
342 case USB_SPEED_LOW:
343 if (temp == PIPE_INTERRUPT) {
344 if (urb->interval > 255)
345 return -EINVAL;
346 // NOTE ohci only handles up to 32
347 temp = 128;
348 } else {
349 if (urb->interval > 1024)
350 urb->interval = 1024;
351 // NOTE usb and ohci handle up to 2^15
352 temp = 1024;
353 }
354 break;
355 default:
356 return -EINVAL;
357 }
358 /* power of two? */
359 while (temp > urb->interval)
360 temp >>= 1;
361 urb->interval = temp;
362 }
363
364 return op->submit_urb (urb, mem_flags);
365 }
366
367 /*-------------------------------------------------------------------*/
368
369 /**
370 * usb_unlink_urb - abort/cancel a transfer request for an endpoint
371 * @urb: pointer to urb describing a previously submitted request
372 *
373 * This routine cancels an in-progress request. The requests's
374 * completion handler will be called with a status code indicating
375 * that the request has been canceled, and that control of the URB
376 * has been returned to that device driver.
377 *
378 * When the URB_ASYNC_UNLINK transfer flag for the URB is clear, this
379 * request is synchronous. Success is indicated by returning zero,
380 * at which time the urb will have been unlinked,
381 * and the completion function will see status -ENOENT. Failure is
382 * indicated by any other return value. This mode may not be used
383 * when unlinking an urb from an interrupt context, such as a bottom
384 * half or a completion handler,
385 *
386 * When the URB_ASYNC_UNLINK transfer flag for the URB is set, this
387 * request is asynchronous. Success is indicated by returning -EINPROGRESS,
388 * at which time the urb will normally not have been unlinked,
389 * and the completion function will see status -ECONNRESET. Failure is
390 * indicated by any other return value.
391 */
392 int STDCALL usb_unlink_urb(struct urb *urb)
393 {
394 if (urb && urb->dev && urb->dev->bus && urb->dev->bus->op)
395 return urb->dev->bus->op->unlink_urb(urb);
396 else
397 return -ENODEV;
398 }