2 * OHCI HCD (Host Controller Driver) for USB.
4 * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
5 * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
7 * This file is licenced under the GPL.
10 static void urb_free_priv (struct ohci_hcd
*hc
, urb_priv_t
*urb_priv
)
12 int last
= urb_priv
->length
- 1;
18 for (i
= 0; i
<= last
; i
++) {
19 td
= urb_priv
->td
[i
];
28 /*-------------------------------------------------------------------------*/
31 * URB goes back to driver, and isn't reissued.
32 * It's completely gone from HC data structures.
33 * PRECONDITION: no locks held, irqs blocked (Giveback can call into HCD.)
36 finish_urb (struct ohci_hcd
*ohci
, struct urb
*urb
, struct pt_regs
*regs
)
38 // ASSERT (urb->hcpriv != 0);
40 urb_free_priv (ohci
, urb
->hcpriv
);
43 spin_lock (&urb
->lock
);
44 if (likely (urb
->status
== -EINPROGRESS
))
46 spin_unlock (&urb
->lock
);
48 // what lock protects these?
49 switch (usb_pipetype (urb
->pipe
)) {
50 case PIPE_ISOCHRONOUS
:
51 hcd_to_bus (&ohci
->hcd
)->bandwidth_isoc_reqs
--;
54 hcd_to_bus (&ohci
->hcd
)->bandwidth_int_reqs
--;
58 #ifdef OHCI_VERBOSE_DEBUG
59 urb_print (urb
, "RET", usb_pipeout (urb
->pipe
));
61 usb_hcd_giveback_urb (&ohci
->hcd
, urb
, regs
);
65 /*-------------------------------------------------------------------------*
66 * ED handling functions
67 *-------------------------------------------------------------------------*/
69 /* search for the right schedule branch to use for a periodic ed.
70 * does some load balancing; returns the branch, or negative errno.
72 static int balance (struct ohci_hcd
*ohci
, int interval
, int load
)
74 int i
, branch
= -ENOSPC
;
76 /* iso periods can be huge; iso tds specify frame numbers */
77 if (interval
> NUM_INTS
)
80 /* search for the least loaded schedule branch of that period
81 * that has enough bandwidth left unreserved.
83 for (i
= 0; i
< interval
; i
++) {
84 if (branch
< 0 || ohci
->load
[branch
] > ohci
->load
[i
]) {
85 #if 1 /* CONFIG_USB_BANDWIDTH */
88 /* usb 1.1 says 90% of one frame */
89 for (j
= i
; j
< NUM_INTS
; j
+= interval
) {
90 if ((ohci
->load
[j
] + load
) > 900)
102 /*-------------------------------------------------------------------------*/
104 /* both iso and interrupt requests have periods; this routine puts them
105 * into the schedule tree in the apppropriate place. most iso devices use
106 * 1msec periods, but that's not required.
108 static void periodic_link (struct ohci_hcd
*ohci
, struct ed
*ed
)
112 ohci_vdbg (ohci
, "link %sed %p branch %d [%dus.], interval %d\n",
113 (ed
->hwINFO
& ED_ISO
) ? "iso " : "",
114 ed
, ed
->branch
, ed
->load
, ed
->interval
);
116 for (i
= ed
->branch
; i
< NUM_INTS
; i
+= ed
->interval
) {
117 struct ed
**prev
= &ohci
->periodic
[i
];
118 u32
*prev_p
= &ohci
->hcca
->int_table
[i
];
119 struct ed
*here
= *prev
;
121 /* sorting each branch by period (slow before fast)
122 * lets us share the faster parts of the tree.
123 * (plus maybe: put interrupt eds before iso)
125 while (here
&& ed
!= here
) {
126 if (ed
->interval
> here
->interval
)
128 prev
= &here
->ed_next
;
129 prev_p
= &here
->hwNextED
;
135 ed
->hwNextED
= *prev_p
;
138 *prev_p
= cpu_to_le32p (&ed
->dma
);
140 ohci
->load
[i
] += ed
->load
;
142 hcd_to_bus (&ohci
->hcd
)->bandwidth_allocated
+= ed
->load
/ ed
->interval
;
145 /* link an ed into one of the HC chains */
147 static int ed_schedule (struct ohci_hcd
*ohci
, struct ed
*ed
)
157 /* we care about rm_list when setting CLE/BLE in case the HC was at
158 * work on some TD when CLE/BLE was turned off, and isn't quiesced
159 * yet. finish_unlinks() restarts as needed, some upcoming INTR_SF.
161 * control and bulk EDs are doubly linked (ed_next, ed_prev), but
162 * periodic ones are singly linked (ed_next). that's because the
163 * periodic schedule encodes a tree like figure 3-5 in the ohci
164 * spec: each qh can have several "previous" nodes, and the tree
165 * doesn't have unused/idle descriptors.
169 if (ohci
->ed_controltail
== NULL
) {
170 writel (ed
->dma
, &ohci
->regs
->ed_controlhead
);
172 ohci
->ed_controltail
->ed_next
= ed
;
173 ohci
->ed_controltail
->hwNextED
= cpu_to_le32 (ed
->dma
);
175 ed
->ed_prev
= ohci
->ed_controltail
;
176 if (!ohci
->ed_controltail
&& !ohci
->ed_rm_list
) {
177 ohci
->hc_control
|= OHCI_CTRL_CLE
;
178 writel (0, &ohci
->regs
->ed_controlcurrent
);
179 writel (ohci
->hc_control
, &ohci
->regs
->control
);
181 ohci
->ed_controltail
= ed
;
185 if (ohci
->ed_bulktail
== NULL
) {
186 writel (ed
->dma
, &ohci
->regs
->ed_bulkhead
);
188 ohci
->ed_bulktail
->ed_next
= ed
;
189 ohci
->ed_bulktail
->hwNextED
= cpu_to_le32 (ed
->dma
);
191 ed
->ed_prev
= ohci
->ed_bulktail
;
192 if (!ohci
->ed_bulktail
&& !ohci
->ed_rm_list
) {
193 ohci
->hc_control
|= OHCI_CTRL_BLE
;
194 writel (0, &ohci
->regs
->ed_bulkcurrent
);
195 writel (ohci
->hc_control
, &ohci
->regs
->control
);
197 ohci
->ed_bulktail
= ed
;
200 // case PIPE_INTERRUPT:
201 // case PIPE_ISOCHRONOUS:
203 branch
= balance (ohci
, ed
->interval
, ed
->load
);
206 "ERR %d, interval %d msecs, load %d\n",
207 branch
, ed
->interval
, ed
->load
);
208 // FIXME if there are TDs queued, fail them!
212 periodic_link (ohci
, ed
);
215 /* the HC may not see the schedule updates yet, but if it does
216 * then they'll be properly ordered.
221 /*-------------------------------------------------------------------------*/
223 /* scan the periodic table to find and unlink this ED */
224 static void periodic_unlink (struct ohci_hcd
*ohci
, struct ed
*ed
)
228 for (i
= ed
->branch
; i
< NUM_INTS
; i
+= ed
->interval
) {
230 struct ed
**prev
= &ohci
->periodic
[i
];
231 u32
*prev_p
= &ohci
->hcca
->int_table
[i
];
233 while (*prev
&& (temp
= *prev
) != ed
) {
234 prev_p
= &temp
->hwNextED
;
235 prev
= &temp
->ed_next
;
238 *prev_p
= ed
->hwNextED
;
241 ohci
->load
[i
] -= ed
->load
;
243 hcd_to_bus (&ohci
->hcd
)->bandwidth_allocated
-= ed
->load
/ ed
->interval
;
245 ohci_vdbg (ohci
, "unlink %sed %p branch %d [%dus.], interval %d\n",
246 (ed
->hwINFO
& ED_ISO
) ? "iso " : "",
247 ed
, ed
->branch
, ed
->load
, ed
->interval
);
250 /* unlink an ed from one of the HC chains.
251 * just the link to the ed is unlinked.
252 * the link from the ed still points to another operational ed or 0
253 * so the HC can eventually finish the processing of the unlinked ed
255 static void ed_deschedule (struct ohci_hcd
*ohci
, struct ed
*ed
)
257 ed
->hwINFO
|= ED_SKIP
;
261 if (ed
->ed_prev
== NULL
) {
263 ohci
->hc_control
&= ~OHCI_CTRL_CLE
;
264 writel (ohci
->hc_control
, &ohci
->regs
->control
);
265 writel (0, &ohci
->regs
->ed_controlcurrent
);
266 // post those pci writes
267 (void) readl (&ohci
->regs
->control
);
269 writel (le32_to_cpup (&ed
->hwNextED
),
270 &ohci
->regs
->ed_controlhead
);
272 ed
->ed_prev
->ed_next
= ed
->ed_next
;
273 ed
->ed_prev
->hwNextED
= ed
->hwNextED
;
275 if (ohci
->ed_controltail
== ed
) {
276 ohci
->ed_controltail
= ed
->ed_prev
;
277 if (ohci
->ed_controltail
)
278 ohci
->ed_controltail
->ed_next
= 0;
279 } else if (ed
->ed_next
) {
280 ed
->ed_next
->ed_prev
= ed
->ed_prev
;
285 if (ed
->ed_prev
== NULL
) {
287 ohci
->hc_control
&= ~OHCI_CTRL_BLE
;
288 writel (ohci
->hc_control
, &ohci
->regs
->control
);
289 writel (0, &ohci
->regs
->ed_bulkcurrent
);
290 // post those pci writes
291 (void) readl (&ohci
->regs
->control
);
293 writel (le32_to_cpup (&ed
->hwNextED
),
294 &ohci
->regs
->ed_bulkhead
);
296 ed
->ed_prev
->ed_next
= ed
->ed_next
;
297 ed
->ed_prev
->hwNextED
= ed
->hwNextED
;
299 if (ohci
->ed_bulktail
== ed
) {
300 ohci
->ed_bulktail
= ed
->ed_prev
;
301 if (ohci
->ed_bulktail
)
302 ohci
->ed_bulktail
->ed_next
= 0;
303 } else if (ed
->ed_next
) {
304 ed
->ed_next
->ed_prev
= ed
->ed_prev
;
308 // case PIPE_INTERRUPT:
309 // case PIPE_ISOCHRONOUS:
311 periodic_unlink (ohci
, ed
);
315 /* NOTE: Except for a couple of exceptionally clean unlink cases
316 * (like unlinking the only c/b ED, with no TDs) HCs may still be
317 * caching this operational ED (or its address). Safe unlinking
318 * involves not marking it ED_IDLE till INTR_SF; we always do that
319 * if td_list isn't empty. Otherwise the race is small; but ...
321 if (ed
->state
== ED_OPER
) {
323 ed
->hwINFO
&= ~(ED_SKIP
| ED_DEQUEUE
);
324 ed
->hwHeadP
&= ~ED_H
;
330 /*-------------------------------------------------------------------------*/
332 /* get and maybe (re)init an endpoint. init _should_ be done only as part
333 * of usb_set_configuration() or usb_set_interface() ... but the USB stack
334 * isn't very stateful, so we re-init whenever the HC isn't looking.
336 static struct ed
*ed_get (
337 struct ohci_hcd
*ohci
,
338 struct usb_device
*udev
,
342 int is_out
= !usb_pipein (pipe
);
343 int type
= usb_pipetype (pipe
);
344 struct hcd_dev
*dev
= (struct hcd_dev
*) udev
->hcpriv
;
349 ep
= usb_pipeendpoint (pipe
) << 1;
350 if (type
!= PIPE_CONTROL
&& is_out
)
353 spin_lock_irqsave (&ohci
->lock
, flags
);
355 if (!(ed
= dev
->ep
[ep
])) {
358 ed
= ed_alloc (ohci
, SLAB_ATOMIC
);
365 /* dummy td; end of td list for ed */
366 td
= td_alloc (ohci
, SLAB_ATOMIC
);
374 ed
->hwTailP
= cpu_to_le32 (td
->td_dma
);
375 ed
->hwHeadP
= ed
->hwTailP
; /* ED_C, ED_H zeroed */
380 /* NOTE: only ep0 currently needs this "re"init logic, during
381 * enumeration (after set_address, or if ep0 maxpacket >8).
383 if (ed
->state
== ED_IDLE
) {
386 info
= usb_pipedevice (pipe
);
387 info
|= (ep
>> 1) << 7;
388 info
|= usb_maxpacket (udev
, pipe
, is_out
) << 16;
389 info
= cpu_to_le32 (info
);
390 if (udev
->speed
== USB_SPEED_LOW
)
392 /* only control transfers store pids in tds */
393 if (type
!= PIPE_CONTROL
) {
394 info
|= is_out
? ED_OUT
: ED_IN
;
395 if (type
!= PIPE_BULK
) {
396 /* periodic transfers... */
397 if (type
== PIPE_ISOCHRONOUS
)
399 else if (interval
> 32) /* iso can be bigger */
401 ed
->interval
= interval
;
402 ed
->load
= usb_calc_bus_time (
403 udev
->speed
, !is_out
,
404 type
== PIPE_ISOCHRONOUS
,
405 usb_maxpacket (udev
, pipe
, is_out
))
413 spin_unlock_irqrestore (&ohci
->lock
, flags
);
417 /*-------------------------------------------------------------------------*/
419 /* request unlinking of an endpoint from an operational HC.
420 * put the ep on the rm_list
421 * real work is done at the next start frame (SF) hardware interrupt
423 static void start_urb_unlink (struct ohci_hcd
*ohci
, struct ed
*ed
)
425 ed
->hwINFO
|= ED_DEQUEUE
;
426 ed
->state
= ED_UNLINK
;
427 ed_deschedule (ohci
, ed
);
429 /* SF interrupt might get delayed; record the frame counter value that
430 * indicates when the HC isn't looking at it, so concurrent unlinks
431 * behave. frame_no wraps every 2^16 msec, and changes right before
434 ed
->tick
= le16_to_cpu (ohci
->hcca
->frame_no
) + 1;
436 /* rm_list is just singly linked, for simplicity */
437 ed
->ed_next
= ohci
->ed_rm_list
;
439 ohci
->ed_rm_list
= ed
;
441 /* enable SOF interrupt */
442 if (!ohci
->sleeping
) {
443 writel (OHCI_INTR_SF
, &ohci
->regs
->intrstatus
);
444 writel (OHCI_INTR_SF
, &ohci
->regs
->intrenable
);
445 // flush those pci writes
446 (void) readl (&ohci
->regs
->control
);
450 /*-------------------------------------------------------------------------*
451 * TD handling functions
452 *-------------------------------------------------------------------------*/
454 /* enqueue next TD for this URB (OHCI spec 5.2.8.2) */
457 td_fill (struct ohci_hcd
*ohci
, u32 info
,
458 dma_addr_t data
, int len
,
459 struct urb
*urb
, int index
)
461 struct td
*td
, *td_pt
;
462 struct urb_priv
*urb_priv
= urb
->hcpriv
;
463 int is_iso
= info
& TD_ISO
;
466 // ASSERT (index < urb_priv->length);
468 /* aim for only one interrupt per urb. mostly applies to control
469 * and iso; other urbs rarely need more than one TD per urb.
470 * this way, only final tds (or ones with an error) cause IRQs.
471 * at least immediately; use DI=6 in case any control request is
472 * tempted to die part way through.
474 * NOTE: could delay interrupts even for the last TD, and get fewer
475 * interrupts ... increasing per-urb latency by sharing interrupts.
476 * Drivers that queue bulk urbs may request that behavior.
478 if (index
!= (urb_priv
->length
- 1)
479 || (urb
->transfer_flags
& URB_NO_INTERRUPT
))
480 info
|= TD_DI_SET (6);
482 /* use this td as the next dummy */
483 td_pt
= urb_priv
->td
[index
];
485 /* fill the old dummy TD */
486 td
= urb_priv
->td
[index
] = urb_priv
->ed
->dummy
;
487 urb_priv
->ed
->dummy
= td_pt
;
489 td
->ed
= urb_priv
->ed
;
490 td
->next_dl_td
= NULL
;
497 td
->hwINFO
= cpu_to_le32 (info
);
499 td
->hwCBP
= cpu_to_le32 (data
& 0xFFFFF000);
500 td
->hwPSW
[0] = cpu_to_le16 ((data
& 0x0FFF) | 0xE000);
501 td
->ed
->last_iso
= info
& 0xffff;
503 td
->hwCBP
= cpu_to_le32 (data
);
506 td
->hwBE
= cpu_to_le32 (data
+ len
- 1);
509 td
->hwNextTD
= cpu_to_le32 (td_pt
->td_dma
);
511 /* append to queue */
512 list_add_tail (&td
->td_list
, &td
->ed
->td_list
);
514 /* hash it for later reverse mapping */
515 hash
= TD_HASH_FUNC (td
->td_dma
);
516 td
->td_hash
= ohci
->td_hash
[hash
];
517 ohci
->td_hash
[hash
] = td
;
519 /* HC might read the TD (or cachelines) right away ... */
521 td
->ed
->hwTailP
= td
->hwNextTD
;
524 /*-------------------------------------------------------------------------*/
526 /* Prepare all TDs of a transfer, and queue them onto the ED.
527 * Caller guarantees HC is active.
528 * Usually the ED is already on the schedule, so TDs might be
529 * processed as soon as they're queued.
531 static void td_submit_urb (
532 struct ohci_hcd
*ohci
,
535 struct urb_priv
*urb_priv
= urb
->hcpriv
;
537 int data_len
= urb
->transfer_buffer_length
;
540 int is_out
= usb_pipeout (urb
->pipe
);
542 /* OHCI handles the bulk/interrupt data toggles itself. We just
543 * use the device toggle bits for resetting, and rely on the fact
544 * that resetting toggle is meaningless if the endpoint is active.
546 if (!usb_gettoggle (urb
->dev
, usb_pipeendpoint (urb
->pipe
), is_out
)) {
547 usb_settoggle (urb
->dev
, usb_pipeendpoint (urb
->pipe
),
549 urb_priv
->ed
->hwHeadP
&= ~ED_C
;
552 urb_priv
->td_cnt
= 0;
555 data
= urb
->transfer_dma
;
559 /* NOTE: TD_CC is set so we can tell which TDs the HC processed by
560 * using TD_CC_GET, as well as by seeing them on the done list.
561 * (CC = NotAccessed ... 0x0F, or 0x0E in PSWs for ISO.)
563 switch (urb_priv
->ed
->type
) {
565 /* Bulk and interrupt are identical except for where in the schedule
569 /* ... and periodic urbs have extra accounting */
570 hcd_to_bus (&ohci
->hcd
)->bandwidth_int_reqs
++;
574 ? TD_T_TOGGLE
| TD_CC
| TD_DP_OUT
575 : TD_T_TOGGLE
| TD_CC
| TD_DP_IN
;
576 /* TDs _could_ transfer up to 8K each */
577 while (data_len
> 4096) {
578 td_fill (ohci
, info
, data
, 4096, urb
, cnt
);
583 /* maybe avoid ED halt on final TD short read */
584 if (!(urb
->transfer_flags
& URB_SHORT_NOT_OK
))
586 td_fill (ohci
, info
, data
, data_len
, urb
, cnt
);
588 if ((urb
->transfer_flags
& URB_ZERO_PACKET
)
589 && cnt
< urb_priv
->length
) {
590 td_fill (ohci
, info
, 0, 0, urb
, cnt
);
593 /* maybe kickstart bulk list */
594 if (urb_priv
->ed
->type
== PIPE_BULK
) {
596 writel (OHCI_BLF
, &ohci
->regs
->cmdstatus
);
600 /* control manages DATA0/DATA1 toggle per-request; SETUP resets it,
601 * any DATA phase works normally, and the STATUS ack is special.
604 info
= TD_CC
| TD_DP_SETUP
| TD_T_DATA0
;
605 td_fill (ohci
, info
, urb
->setup_dma
, 8, urb
, cnt
++);
607 info
= TD_CC
| TD_R
| TD_T_DATA1
;
608 info
|= is_out
? TD_DP_OUT
: TD_DP_IN
;
609 /* NOTE: mishandles transfers >8K, some >4K */
610 td_fill (ohci
, info
, data
, data_len
, urb
, cnt
++);
613 ? TD_CC
| TD_DP_IN
| TD_T_DATA1
614 : TD_CC
| TD_DP_OUT
| TD_T_DATA1
;
615 td_fill (ohci
, info
, data
, 0, urb
, cnt
++);
616 /* maybe kickstart control list */
618 writel (OHCI_CLF
, &ohci
->regs
->cmdstatus
);
621 /* ISO has no retransmit, so no toggle; and it uses special TDs.
622 * Each TD could handle multiple consecutive frames (interval 1);
623 * we could often reduce the number of TDs here.
625 case PIPE_ISOCHRONOUS
:
626 for (cnt
= 0; cnt
< urb
->number_of_packets
; cnt
++) {
627 int frame
= urb
->start_frame
;
629 // FIXME scheduling should handle frame counter
630 // roll-around ... exotic case (and OHCI has
631 // a 2^16 iso range, vs other HCs max of 2^10)
632 frame
+= cnt
* urb
->interval
;
634 td_fill (ohci
, TD_CC
| TD_ISO
| frame
,
635 data
+ urb
->iso_frame_desc
[cnt
].offset
,
636 urb
->iso_frame_desc
[cnt
].length
, urb
, cnt
);
638 hcd_to_bus (&ohci
->hcd
)->bandwidth_isoc_reqs
++;
641 // ASSERT (urb_priv->length == cnt);
644 /*-------------------------------------------------------------------------*
645 * Done List handling functions
646 *-------------------------------------------------------------------------*/
648 /* calculate transfer length/status and update the urb
649 * PRECONDITION: irqsafe (only for urb->status locking)
651 static void td_done (struct ohci_hcd
*ohci
, struct urb
*urb
, struct td
*td
)
653 u32 tdINFO
= le32_to_cpup (&td
->hwINFO
);
656 list_del (&td
->td_list
);
658 /* ISO ... drivers see per-TD length/status */
659 if (tdINFO
& TD_ISO
) {
660 u16 tdPSW
= le16_to_cpu (td
->hwPSW
[0]);
663 /* NOTE: assumes FC in tdINFO == 0 (and MAXPSW == 1) */
665 cc
= (tdPSW
>> 12) & 0xF;
666 if (tdINFO
& TD_CC
) /* hc didn't touch? */
669 if (usb_pipeout (urb
->pipe
))
670 dlen
= urb
->iso_frame_desc
[td
->index
].length
;
672 /* short reads are always OK for ISO */
673 if (cc
== TD_DATAUNDERRUN
)
675 dlen
= tdPSW
& 0x3ff;
677 urb
->actual_length
+= dlen
;
678 urb
->iso_frame_desc
[td
->index
].actual_length
= dlen
;
679 urb
->iso_frame_desc
[td
->index
].status
= cc_to_error
[cc
];
681 if (cc
!= TD_CC_NOERROR
)
683 "urb %p iso td %p (%d) len %d cc %d\n",
684 urb
, td
, 1 + td
->index
, dlen
, cc
);
686 /* BULK, INT, CONTROL ... drivers see aggregate length/status,
687 * except that "setup" bytes aren't counted and "short" transfers
688 * might not be reported as errors.
691 int type
= usb_pipetype (urb
->pipe
);
692 u32 tdBE
= le32_to_cpup (&td
->hwBE
);
694 cc
= TD_CC_GET (tdINFO
);
696 /* control endpoints only have soft stalls */
697 if (type
!= PIPE_CONTROL
&& cc
== TD_CC_STALL
)
698 usb_endpoint_halt (urb
->dev
,
699 usb_pipeendpoint (urb
->pipe
),
700 usb_pipeout (urb
->pipe
));
702 /* update packet status if needed (short is normally ok) */
703 if (cc
== TD_DATAUNDERRUN
704 && !(urb
->transfer_flags
& URB_SHORT_NOT_OK
))
706 if (cc
!= TD_CC_NOERROR
&& cc
< 0x0E) {
707 spin_lock (&urb
->lock
);
708 if (urb
->status
== -EINPROGRESS
)
709 urb
->status
= cc_to_error
[cc
];
710 spin_unlock (&urb
->lock
);
713 /* count all non-empty packets except control SETUP packet */
714 if ((type
!= PIPE_CONTROL
|| td
->index
!= 0) && tdBE
!= 0) {
716 urb
->actual_length
+= tdBE
- td
->data_dma
+ 1;
718 urb
->actual_length
+=
719 le32_to_cpup (&td
->hwCBP
)
723 if (cc
!= TD_CC_NOERROR
&& cc
< 0x0E)
725 "urb %p td %p (%d) cc %d, len=%d/%d\n",
726 urb
, td
, 1 + td
->index
, cc
,
728 urb
->transfer_buffer_length
);
732 /*-------------------------------------------------------------------------*/
734 static inline struct td
*
735 ed_halted (struct ohci_hcd
*ohci
, struct td
*td
, int cc
, struct td
*rev
)
737 struct urb
*urb
= td
->urb
;
738 struct ed
*ed
= td
->ed
;
739 struct list_head
*tmp
= td
->td_list
.next
;
740 u32 toggle
= ed
->hwHeadP
& ED_C
;
742 /* clear ed halt; this is the td that caused it, but keep it inactive
743 * until its urb->complete() has a chance to clean up.
745 ed
->hwINFO
|= ED_SKIP
;
747 ed
->hwHeadP
&= ~ED_H
;
749 /* put any later tds from this urb onto the donelist, after 'td',
750 * order won't matter here: no errors, and nothing was transferred.
751 * also patch the ed so it looks as if those tds completed normally.
753 while (tmp
!= &ed
->td_list
) {
757 next
= list_entry (tmp
, struct td
, td_list
);
758 tmp
= next
->td_list
.next
;
760 if (next
->urb
!= urb
)
763 /* NOTE: if multi-td control DATA segments get supported,
764 * this urb had one of them, this td wasn't the last td
765 * in that segment (TD_R clear), this ed halted because
766 * of a short read, _and_ URB_SHORT_NOT_OK is clear ...
767 * then we need to leave the control STATUS packet queued
771 info
|= cpu_to_le32 (TD_DONE
);
772 info
&= ~cpu_to_le32 (TD_CC
);
775 next
->next_dl_td
= rev
;
778 if (ed
->hwTailP
== cpu_to_le32 (next
->td_dma
))
779 ed
->hwTailP
= next
->hwNextTD
;
780 ed
->hwHeadP
= next
->hwNextTD
| toggle
;
783 /* help for troubleshooting: report anything that
784 * looks odd ... that doesn't include protocol stalls
785 * (or maybe some other things)
787 if (cc
!= TD_CC_STALL
|| !usb_pipecontrol (urb
->pipe
))
789 "urb %p path %s ep%d%s %08x cc %d --> status %d\n",
790 urb
, urb
->dev
->devpath
,
791 usb_pipeendpoint (urb
->pipe
),
792 usb_pipein (urb
->pipe
) ? "in" : "out",
793 le32_to_cpu (td
->hwINFO
),
794 cc
, cc_to_error
[cc
]);
799 /* replies to the request have to be on a FIFO basis so
800 * we unreverse the hc-reversed done-list
802 static struct td
*dl_reverse_done_list (struct ohci_hcd
*ohci
)
805 struct td
*td_rev
= NULL
;
806 struct td
*td
= NULL
;
809 spin_lock_irqsave (&ohci
->lock
, flags
);
810 td_dma
= le32_to_cpup (&ohci
->hcca
->done_head
);
811 ohci
->hcca
->done_head
= 0;
813 /* get TD from hc's singly linked list, and
814 * prepend to ours. ed->td_list changes later.
819 td
= dma_to_td (ohci
, td_dma
);
821 ohci_err (ohci
, "bad entry %8x\n", td_dma
);
825 td
->hwINFO
|= cpu_to_le32 (TD_DONE
);
826 cc
= TD_CC_GET (le32_to_cpup (&td
->hwINFO
));
828 /* Non-iso endpoints can halt on error; un-halt,
829 * and dequeue any other TDs from this urb.
830 * No other TD could have caused the halt.
832 if (cc
!= TD_CC_NOERROR
&& (td
->ed
->hwHeadP
& ED_H
))
833 td_rev
= ed_halted (ohci
, td
, cc
, td_rev
);
835 td
->next_dl_td
= td_rev
;
837 td_dma
= le32_to_cpup (&td
->hwNextTD
);
839 spin_unlock_irqrestore (&ohci
->lock
, flags
);
843 /*-------------------------------------------------------------------------*/
845 /* wrap-aware logic stolen from <linux/jiffies.h> */
846 #define tick_before(t1,t2) ((((s16)(t1))-((s16)(t2))) < 0)
848 /* there are some urbs/eds to unlink; called in_irq(), with HCD locked */
850 finish_unlinks (struct ohci_hcd
*ohci
, u16 tick
, struct pt_regs
*regs
)
852 struct ed
*ed
, **last
;
855 for (last
= &ohci
->ed_rm_list
, ed
= *last
; ed
!= NULL
; ed
= *last
) {
856 struct list_head
*entry
, *tmp
;
857 int completed
, modified
;
860 /* only take off EDs that the HC isn't using, accounting for
861 * frame counter wraps.
863 if (tick_before (tick
, ed
->tick
) && !ohci
->disabled
) {
868 /* reentrancy: if we drop the schedule lock, someone might
869 * have modified this list. normally it's just prepending
870 * entries (which we'd ignore), but paranoia won't hurt.
876 /* unlink urbs as requested, but rescan the list after
877 * we call a completion since it might have unlinked
878 * another (earlier) urb
883 list_for_each_safe (entry
, tmp
, &ed
->td_list
) {
886 urb_priv_t
*urb_priv
;
889 td
= list_entry (entry
, struct td
, td_list
);
891 urb_priv
= td
->urb
->hcpriv
;
893 if (urb_priv
->state
!= URB_DEL
) {
894 prev
= &td
->hwNextTD
;
898 /* patch pointers hc uses ... tail, if we're removing
899 * an otherwise active td, and whatever td pointer
902 if (ed
->hwTailP
== cpu_to_le32 (td
->td_dma
))
903 ed
->hwTailP
= td
->hwNextTD
;
904 savebits
= *prev
& ~cpu_to_le32 (TD_MASK
);
905 *prev
= td
->hwNextTD
| savebits
;
907 /* HC may have partly processed this TD */
908 td_done (ohci
, urb
, td
);
911 /* if URB is done, clean up */
912 if (urb_priv
->td_cnt
== urb_priv
->length
) {
913 modified
= completed
= 1;
914 spin_unlock (&ohci
->lock
);
915 finish_urb (ohci
, urb
, regs
);
916 spin_lock (&ohci
->lock
);
919 if (completed
&& !list_empty (&ed
->td_list
))
922 /* ED's now officially unlinked, hc doesn't see */
924 ed
->hwINFO
&= ~(ED_SKIP
| ED_DEQUEUE
);
925 ed
->hwHeadP
&= ~ED_H
;
928 /* but if there's work queued, reschedule */
929 if (!list_empty (&ed
->td_list
)) {
930 if (!ohci
->disabled
&& !ohci
->sleeping
)
931 ed_schedule (ohci
, ed
);
938 /* maybe reenable control and bulk lists */
939 if (!ohci
->disabled
&& !ohci
->ed_rm_list
) {
940 u32 command
= 0, control
= 0;
942 if (ohci
->ed_controltail
) {
944 if (!(ohci
->hc_control
& OHCI_CTRL_CLE
)) {
945 control
|= OHCI_CTRL_CLE
;
946 writel (0, &ohci
->regs
->ed_controlcurrent
);
949 if (ohci
->ed_bulktail
) {
951 if (!(ohci
->hc_control
& OHCI_CTRL_BLE
)) {
952 control
|= OHCI_CTRL_BLE
;
953 writel (0, &ohci
->regs
->ed_bulkcurrent
);
957 /* CLE/BLE to enable, CLF/BLF to (maybe) kickstart */
959 ohci
->hc_control
|= control
;
960 writel (ohci
->hc_control
, &ohci
->regs
->control
);
963 writel (command
, &ohci
->regs
->cmdstatus
);
969 /*-------------------------------------------------------------------------*/
972 * Process normal completions (error or success) and clean the schedules.
974 * This is the main path for handing urbs back to drivers. The only other
975 * path is finish_unlinks(), which unlinks URBs using ed_rm_list, instead of
976 * scanning the (re-reversed) donelist as this does.
979 dl_done_list (struct ohci_hcd
*ohci
, struct td
*td
, struct pt_regs
*regs
)
983 spin_lock_irqsave (&ohci
->lock
, flags
);
985 struct td
*td_next
= td
->next_dl_td
;
986 struct urb
*urb
= td
->urb
;
987 urb_priv_t
*urb_priv
= urb
->hcpriv
;
988 struct ed
*ed
= td
->ed
;
990 /* update URB's length and status from TD */
991 td_done (ohci
, urb
, td
);
994 /* If all this urb's TDs are done, call complete() */
995 if (urb_priv
->td_cnt
== urb_priv
->length
) {
996 spin_unlock (&ohci
->lock
);
997 finish_urb (ohci
, urb
, regs
);
998 spin_lock (&ohci
->lock
);
1001 /* clean schedule: unlink EDs that are no longer busy */
1002 if (list_empty (&ed
->td_list
))
1003 ed_deschedule (ohci
, ed
);
1004 /* ... reenabling halted EDs only after fault cleanup */
1005 else if (!(ed
->hwINFO
& ED_DEQUEUE
)) {
1006 td
= list_entry (ed
->td_list
.next
, struct td
, td_list
);
1007 if (!(td
->hwINFO
& TD_DONE
))
1008 ed
->hwINFO
&= ~ED_SKIP
;
1013 spin_unlock_irqrestore (&ohci
->lock
, flags
);