SVN maintenance:
[reactos.git] / reactos / drivers / usb / cromwell / host / ohci-q.c
index 71e6350..eae584a 100644 (file)
-/*\r
- * OHCI HCD (Host Controller Driver) for USB.\r
- * \r
- * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>\r
- * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>\r
- * \r
- * This file is licenced under the GPL.\r
- */\r
-\r
-static void urb_free_priv (struct ohci_hcd *hc, urb_priv_t *urb_priv)\r
-{\r
-       int             last = urb_priv->length - 1;\r
-\r
-       if (last >= 0) {\r
-               int             i;\r
-               struct td       *td;\r
-\r
-               for (i = 0; i <= last; i++) {\r
-                       td = urb_priv->td [i];\r
-                       if (td)\r
-                               td_free (hc, td);\r
-               }\r
-       }\r
-\r
-       kfree (urb_priv);\r
-}\r
-\r
-/*-------------------------------------------------------------------------*/\r
-\r
-/*\r
- * URB goes back to driver, and isn't reissued.\r
- * It's completely gone from HC data structures.\r
- * PRECONDITION:  no locks held, irqs blocked  (Giveback can call into HCD.)\r
- */\r
-static void\r
-finish_urb (struct ohci_hcd *ohci, struct urb *urb, struct pt_regs *regs)\r
-{\r
-       // ASSERT (urb->hcpriv != 0);\r
-\r
-       urb_free_priv (ohci, urb->hcpriv);\r
-       urb->hcpriv = NULL;\r
-\r
-       spin_lock (&urb->lock);\r
-       if (likely (urb->status == -EINPROGRESS))\r
-               urb->status = 0;\r
-       spin_unlock (&urb->lock);\r
-\r
-       // what lock protects these?\r
-       switch (usb_pipetype (urb->pipe)) {\r
-       case PIPE_ISOCHRONOUS:\r
-               hcd_to_bus (&ohci->hcd)->bandwidth_isoc_reqs--;\r
-               break;\r
-       case PIPE_INTERRUPT:\r
-               hcd_to_bus (&ohci->hcd)->bandwidth_int_reqs--;\r
-               break;\r
-       }\r
-\r
-#ifdef OHCI_VERBOSE_DEBUG\r
-       urb_print (urb, "RET", usb_pipeout (urb->pipe));\r
-#endif\r
-       usb_hcd_giveback_urb (&ohci->hcd, urb, regs);\r
-}\r
-\r
-\r
-/*-------------------------------------------------------------------------*\r
- * ED handling functions\r
- *-------------------------------------------------------------------------*/  \r
-\r
-/* search for the right schedule branch to use for a periodic ed.\r
- * does some load balancing; returns the branch, or negative errno.\r
- */\r
-static int balance (struct ohci_hcd *ohci, int interval, int load)\r
-{\r
-       int     i, branch = -ENOSPC;\r
-\r
-       /* iso periods can be huge; iso tds specify frame numbers */\r
-       if (interval > NUM_INTS)\r
-               interval = NUM_INTS;\r
-\r
-       /* search for the least loaded schedule branch of that period\r
-        * that has enough bandwidth left unreserved.\r
-        */\r
-       for (i = 0; i < interval ; i++) {\r
-               if (branch < 0 || ohci->load [branch] > ohci->load [i]) {\r
-#if 1  /* CONFIG_USB_BANDWIDTH */\r
-                       int     j;\r
-\r
-                       /* usb 1.1 says 90% of one frame */\r
-                       for (j = i; j < NUM_INTS; j += interval) {\r
-                               if ((ohci->load [j] + load) > 900)\r
-                                       break;\r
-                       }\r
-                       if (j < NUM_INTS)\r
-                               continue;\r
-#endif\r
-                       branch = i; \r
-               }\r
-       }\r
-       return branch;\r
-}\r
-\r
-/*-------------------------------------------------------------------------*/\r
-\r
-/* both iso and interrupt requests have periods; this routine puts them\r
- * into the schedule tree in the apppropriate place.  most iso devices use\r
- * 1msec periods, but that's not required.\r
- */\r
-static void periodic_link (struct ohci_hcd *ohci, struct ed *ed)\r
-{\r
-       unsigned        i;\r
-\r
-       ohci_vdbg (ohci, "link %sed %p branch %d [%dus.], interval %d\n",\r
-               (ed->hwINFO & ED_ISO) ? "iso " : "",\r
-               ed, ed->branch, ed->load, ed->interval);\r
-\r
-       for (i = ed->branch; i < NUM_INTS; i += ed->interval) {\r
-               struct ed       **prev = &ohci->periodic [i];\r
-               u32             *prev_p = &ohci->hcca->int_table [i];\r
-               struct ed       *here = *prev;\r
-\r
-               /* sorting each branch by period (slow before fast)\r
-                * lets us share the faster parts of the tree.\r
-                * (plus maybe: put interrupt eds before iso)\r
-                */\r
-               while (here && ed != here) {\r
-                       if (ed->interval > here->interval)\r
-                               break;\r
-                       prev = &here->ed_next;\r
-                       prev_p = &here->hwNextED;\r
-                       here = *prev;\r
-               }\r
-               if (ed != here) {\r
-                       ed->ed_next = here;\r
-                       if (here)\r
-                               ed->hwNextED = *prev_p;\r
-                       wmb ();\r
-                       *prev = ed;\r
-                       *prev_p = cpu_to_le32p (&ed->dma);\r
-               }\r
-               ohci->load [i] += ed->load;\r
-       }\r
-       hcd_to_bus (&ohci->hcd)->bandwidth_allocated += ed->load / ed->interval;\r
-}\r
-\r
-/* link an ed into one of the HC chains */\r
-\r
-static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)\r
-{       \r
-       int     branch;\r
-\r
-       ed->state = ED_OPER;\r
-       ed->ed_prev = 0;\r
-       ed->ed_next = 0;\r
-       ed->hwNextED = 0;\r
-       wmb ();\r
-\r
-       /* we care about rm_list when setting CLE/BLE in case the HC was at\r
-        * work on some TD when CLE/BLE was turned off, and isn't quiesced\r
-        * yet.  finish_unlinks() restarts as needed, some upcoming INTR_SF.\r
-        *\r
-        * control and bulk EDs are doubly linked (ed_next, ed_prev), but\r
-        * periodic ones are singly linked (ed_next). that's because the\r
-        * periodic schedule encodes a tree like figure 3-5 in the ohci\r
-        * spec:  each qh can have several "previous" nodes, and the tree\r
-        * doesn't have unused/idle descriptors.\r
-        */\r
-       switch (ed->type) {\r
-       case PIPE_CONTROL:\r
-               if (ohci->ed_controltail == NULL) {\r
-                       writel (ed->dma, &ohci->regs->ed_controlhead);\r
-               } else {\r
-                       ohci->ed_controltail->ed_next = ed;\r
-                       ohci->ed_controltail->hwNextED = cpu_to_le32 (ed->dma);\r
-               }\r
-               ed->ed_prev = ohci->ed_controltail;\r
-               if (!ohci->ed_controltail && !ohci->ed_rm_list) {\r
-                       ohci->hc_control |= OHCI_CTRL_CLE;\r
-                       writel (0, &ohci->regs->ed_controlcurrent);\r
-                       writel (ohci->hc_control, &ohci->regs->control);\r
-               }\r
-               ohci->ed_controltail = ed;\r
-               break;\r
-\r
-       case PIPE_BULK:\r
-               if (ohci->ed_bulktail == NULL) {\r
-                       writel (ed->dma, &ohci->regs->ed_bulkhead);\r
-               } else {\r
-                       ohci->ed_bulktail->ed_next = ed;\r
-                       ohci->ed_bulktail->hwNextED = cpu_to_le32 (ed->dma);\r
-               }\r
-               ed->ed_prev = ohci->ed_bulktail;\r
-               if (!ohci->ed_bulktail && !ohci->ed_rm_list) {\r
-                       ohci->hc_control |= OHCI_CTRL_BLE;\r
-                       writel (0, &ohci->regs->ed_bulkcurrent);\r
-                       writel (ohci->hc_control, &ohci->regs->control);\r
-               }\r
-               ohci->ed_bulktail = ed;\r
-               break;\r
-\r
-       // case PIPE_INTERRUPT:\r
-       // case PIPE_ISOCHRONOUS:\r
-       default:\r
-               branch = balance (ohci, ed->interval, ed->load);\r
-               if (branch < 0) {\r
-                       ohci_dbg (ohci,\r
-                               "ERR %d, interval %d msecs, load %d\n",\r
-                               branch, ed->interval, ed->load);\r
-                       // FIXME if there are TDs queued, fail them!\r
-                       return branch;\r
-               }\r
-               ed->branch = branch;\r
-               periodic_link (ohci, ed);\r
-       }               \r
-\r
-       /* the HC may not see the schedule updates yet, but if it does\r
-        * then they'll be properly ordered.\r
-        */\r
-       return 0;\r
-}\r
-\r
-/*-------------------------------------------------------------------------*/\r
-\r
-/* scan the periodic table to find and unlink this ED */\r
-static void periodic_unlink (struct ohci_hcd *ohci, struct ed *ed)\r
-{\r
-       int     i;\r
-\r
-       for (i = ed->branch; i < NUM_INTS; i += ed->interval) {\r
-               struct ed       *temp;\r
-               struct ed       **prev = &ohci->periodic [i];\r
-               u32             *prev_p = &ohci->hcca->int_table [i];\r
-\r
-               while (*prev && (temp = *prev) != ed) {\r
-                       prev_p = &temp->hwNextED;\r
-                       prev = &temp->ed_next;\r
-               }\r
-               if (*prev) {\r
-                       *prev_p = ed->hwNextED;\r
-                       *prev = ed->ed_next;\r
-               }\r
-               ohci->load [i] -= ed->load;\r
-       }       \r
-       hcd_to_bus (&ohci->hcd)->bandwidth_allocated -= ed->load / ed->interval;\r
-\r
-       ohci_vdbg (ohci, "unlink %sed %p branch %d [%dus.], interval %d\n",\r
-               (ed->hwINFO & ED_ISO) ? "iso " : "",\r
-               ed, ed->branch, ed->load, ed->interval);\r
-}\r
-\r
-/* unlink an ed from one of the HC chains. \r
- * just the link to the ed is unlinked.\r
- * the link from the ed still points to another operational ed or 0\r
- * so the HC can eventually finish the processing of the unlinked ed\r
- */\r
-static void ed_deschedule (struct ohci_hcd *ohci, struct ed *ed) \r
-{\r
-       ed->hwINFO |= ED_SKIP;\r
-\r
-       switch (ed->type) {\r
-       case PIPE_CONTROL:\r
-               if (ed->ed_prev == NULL) {\r
-                       if (!ed->hwNextED) {\r
-                               ohci->hc_control &= ~OHCI_CTRL_CLE;\r
-                               writel (ohci->hc_control, &ohci->regs->control);\r
-                               writel (0, &ohci->regs->ed_controlcurrent);\r
-                               // post those pci writes\r
-                               (void) readl (&ohci->regs->control);\r
-                       }\r
-                       writel (le32_to_cpup (&ed->hwNextED),\r
-                               &ohci->regs->ed_controlhead);\r
-               } else {\r
-                       ed->ed_prev->ed_next = ed->ed_next;\r
-                       ed->ed_prev->hwNextED = ed->hwNextED;\r
-               }\r
-               if (ohci->ed_controltail == ed) {\r
-                       ohci->ed_controltail = ed->ed_prev;\r
-                       if (ohci->ed_controltail)\r
-                               ohci->ed_controltail->ed_next = 0;\r
-               } else if (ed->ed_next) {\r
-                       ed->ed_next->ed_prev = ed->ed_prev;\r
-               }\r
-               break;\r
-\r
-       case PIPE_BULK:\r
-               if (ed->ed_prev == NULL) {\r
-                       if (!ed->hwNextED) {\r
-                               ohci->hc_control &= ~OHCI_CTRL_BLE;\r
-                               writel (ohci->hc_control, &ohci->regs->control);\r
-                               writel (0, &ohci->regs->ed_bulkcurrent);\r
-                               // post those pci writes\r
-                               (void) readl (&ohci->regs->control);\r
-                       }\r
-                       writel (le32_to_cpup (&ed->hwNextED),\r
-                               &ohci->regs->ed_bulkhead);\r
-               } else {\r
-                       ed->ed_prev->ed_next = ed->ed_next;\r
-                       ed->ed_prev->hwNextED = ed->hwNextED;\r
-               }\r
-               if (ohci->ed_bulktail == ed) {\r
-                       ohci->ed_bulktail = ed->ed_prev;\r
-                       if (ohci->ed_bulktail)\r
-                               ohci->ed_bulktail->ed_next = 0;\r
-               } else if (ed->ed_next) {\r
-                       ed->ed_next->ed_prev = ed->ed_prev;\r
-               }\r
-               break;\r
-\r
-       // case PIPE_INTERRUPT:\r
-       // case PIPE_ISOCHRONOUS:\r
-       default:\r
-               periodic_unlink (ohci, ed);\r
-               break;\r
-       }\r
-\r
-       /* NOTE: Except for a couple of exceptionally clean unlink cases\r
-        * (like unlinking the only c/b ED, with no TDs) HCs may still be\r
-        * caching this operational ED (or its address).  Safe unlinking\r
-        * involves not marking it ED_IDLE till INTR_SF; we always do that\r
-        * if td_list isn't empty.  Otherwise the race is small; but ...\r
-        */\r
-       if (ed->state == ED_OPER) {\r
-               ed->state = ED_IDLE;\r
-               ed->hwINFO &= ~(ED_SKIP | ED_DEQUEUE);\r
-               ed->hwHeadP &= ~ED_H;\r
-               wmb ();\r
-       }\r
-}\r
-\r
-\r
-/*-------------------------------------------------------------------------*/\r
-\r
-/* get and maybe (re)init an endpoint. init _should_ be done only as part\r
- * of usb_set_configuration() or usb_set_interface() ... but the USB stack\r
- * isn't very stateful, so we re-init whenever the HC isn't looking.\r
- */\r
-static struct ed *ed_get (\r
-       struct ohci_hcd         *ohci,\r
-       struct usb_device       *udev,\r
-       unsigned int            pipe,\r
-       int                     interval\r
-) {\r
-       int                     is_out = !usb_pipein (pipe);\r
-       int                     type = usb_pipetype (pipe);\r
-       struct hcd_dev          *dev = (struct hcd_dev *) udev->hcpriv;\r
-       struct ed               *ed; \r
-       unsigned                ep;\r
-       unsigned long           flags;\r
-\r
-       ep = usb_pipeendpoint (pipe) << 1;\r
-       if (type != PIPE_CONTROL && is_out)\r
-               ep |= 1;\r
-\r
-       spin_lock_irqsave (&ohci->lock, flags);\r
-\r
-       if (!(ed = dev->ep [ep])) {\r
-               struct td       *td;\r
-\r
-               ed = ed_alloc (ohci, SLAB_ATOMIC);\r
-               if (!ed) {\r
-                       /* out of memory */\r
-                       goto done;\r
-               }\r
-               dev->ep [ep] = ed;\r
-\r
-               /* dummy td; end of td list for ed */\r
-               td = td_alloc (ohci, SLAB_ATOMIC);\r
-               if (!td) {\r
-                       /* out of memory */\r
-                       ed_free (ohci, ed);\r
-                       ed = 0;\r
-                       goto done;\r
-               }\r
-               ed->dummy = td;\r
-               ed->hwTailP = cpu_to_le32 (td->td_dma);\r
-               ed->hwHeadP = ed->hwTailP;      /* ED_C, ED_H zeroed */\r
-               ed->state = ED_IDLE;\r
-               ed->type = type;\r
-       }\r
-\r
-       /* NOTE: only ep0 currently needs this "re"init logic, during\r
-        * enumeration (after set_address, or if ep0 maxpacket >8).\r
-        */\r
-       if (ed->state == ED_IDLE) {\r
-               u32     info;\r
-\r
-               info = usb_pipedevice (pipe);\r
-               info |= (ep >> 1) << 7;\r
-               info |= usb_maxpacket (udev, pipe, is_out) << 16;\r
-               info = cpu_to_le32 (info);\r
-               if (udev->speed == USB_SPEED_LOW)\r
-                       info |= ED_LOWSPEED;\r
-               /* only control transfers store pids in tds */\r
-               if (type != PIPE_CONTROL) {\r
-                       info |= is_out ? ED_OUT : ED_IN;\r
-                       if (type != PIPE_BULK) {\r
-                               /* periodic transfers... */\r
-                               if (type == PIPE_ISOCHRONOUS)\r
-                                       info |= ED_ISO;\r
-                               else if (interval > 32) /* iso can be bigger */\r
-                                       interval = 32;\r
-                               ed->interval = interval;\r
-                               ed->load = usb_calc_bus_time (\r
-                                       udev->speed, !is_out,\r
-                                       type == PIPE_ISOCHRONOUS,\r
-                                       usb_maxpacket (udev, pipe, is_out))\r
-                                               / 1000;\r
-                       }\r
-               }\r
-               ed->hwINFO = info;\r
-       }\r
-\r
-done:\r
-       spin_unlock_irqrestore (&ohci->lock, flags);\r
-       return ed; \r
-}\r
-\r
-/*-------------------------------------------------------------------------*/\r
-\r
-/* request unlinking of an endpoint from an operational HC.\r
- * put the ep on the rm_list\r
- * real work is done at the next start frame (SF) hardware interrupt\r
- */\r
-static void start_urb_unlink (struct ohci_hcd *ohci, struct ed *ed)\r
-{    \r
-       ed->hwINFO |= ED_DEQUEUE;\r
-       ed->state = ED_UNLINK;\r
-       ed_deschedule (ohci, ed);\r
-\r
-       /* SF interrupt might get delayed; record the frame counter value that\r
-        * indicates when the HC isn't looking at it, so concurrent unlinks\r
-        * behave.  frame_no wraps every 2^16 msec, and changes right before\r
-        * SF is triggered.\r
-        */\r
-       ed->tick = le16_to_cpu (ohci->hcca->frame_no) + 1;\r
-\r
-       /* rm_list is just singly linked, for simplicity */\r
-       ed->ed_next = ohci->ed_rm_list;\r
-       ed->ed_prev = 0;\r
-       ohci->ed_rm_list = ed;\r
-\r
-       /* enable SOF interrupt */\r
-       if (!ohci->sleeping) {\r
-               writel (OHCI_INTR_SF, &ohci->regs->intrstatus);\r
-               writel (OHCI_INTR_SF, &ohci->regs->intrenable);\r
-               // flush those pci writes\r
-               (void) readl (&ohci->regs->control);\r
-       }\r
-}\r
-\r
-/*-------------------------------------------------------------------------*\r
- * TD handling functions\r
- *-------------------------------------------------------------------------*/\r
-\r
-/* enqueue next TD for this URB (OHCI spec 5.2.8.2) */\r
-\r
-static void\r
-td_fill (struct ohci_hcd *ohci, u32 info,\r
-       dma_addr_t data, int len,\r
-       struct urb *urb, int index)\r
-{\r
-       struct td               *td, *td_pt;\r
-       struct urb_priv         *urb_priv = urb->hcpriv;\r
-       int                     is_iso = info & TD_ISO;\r
-       int                     hash;\r
-\r
-       // ASSERT (index < urb_priv->length);\r
-\r
-       /* aim for only one interrupt per urb.  mostly applies to control\r
-        * and iso; other urbs rarely need more than one TD per urb.\r
-        * this way, only final tds (or ones with an error) cause IRQs.\r
-        * at least immediately; use DI=6 in case any control request is\r
-        * tempted to die part way through.\r
-        *\r
-        * NOTE: could delay interrupts even for the last TD, and get fewer\r
-        * interrupts ... increasing per-urb latency by sharing interrupts.\r
-        * Drivers that queue bulk urbs may request that behavior.\r
-        */\r
-       if (index != (urb_priv->length - 1)\r
-                       || (urb->transfer_flags & URB_NO_INTERRUPT))\r
-               info |= TD_DI_SET (6);\r
-\r
-       /* use this td as the next dummy */\r
-       td_pt = urb_priv->td [index];\r
-\r
-       /* fill the old dummy TD */\r
-       td = urb_priv->td [index] = urb_priv->ed->dummy;\r
-       urb_priv->ed->dummy = td_pt;\r
-\r
-       td->ed = urb_priv->ed;\r
-       td->next_dl_td = NULL;\r
-       td->index = index;\r
-       td->urb = urb; \r
-       td->data_dma = data;\r
-       if (!len)\r
-               data = 0;\r
-\r
-       td->hwINFO = cpu_to_le32 (info);\r
-       if (is_iso) {\r
-               td->hwCBP = cpu_to_le32 (data & 0xFFFFF000);\r
-               td->hwPSW [0] = cpu_to_le16 ((data & 0x0FFF) | 0xE000);\r
-               td->ed->last_iso = info & 0xffff;\r
-       } else {\r
-               td->hwCBP = cpu_to_le32 (data); \r
-       }                       \r
-       if (data)\r
-               td->hwBE = cpu_to_le32 (data + len - 1);\r
-       else\r
-               td->hwBE = 0;\r
-       td->hwNextTD = cpu_to_le32 (td_pt->td_dma);\r
-\r
-       /* append to queue */\r
-       list_add_tail (&td->td_list, &td->ed->td_list);\r
-\r
-       /* hash it for later reverse mapping */\r
-       hash = TD_HASH_FUNC (td->td_dma);\r
-       td->td_hash = ohci->td_hash [hash];\r
-       ohci->td_hash [hash] = td;\r
-\r
-       /* HC might read the TD (or cachelines) right away ... */\r
-       wmb ();\r
-       td->ed->hwTailP = td->hwNextTD;\r
-}\r
-\r
-/*-------------------------------------------------------------------------*/\r
-\r
-/* Prepare all TDs of a transfer, and queue them onto the ED.\r
- * Caller guarantees HC is active.\r
- * Usually the ED is already on the schedule, so TDs might be\r
- * processed as soon as they're queued.\r
- */\r
-static void td_submit_urb (\r
-       struct ohci_hcd *ohci,\r
-       struct urb      *urb\r
-) {\r
-       struct urb_priv *urb_priv = urb->hcpriv;\r
-       dma_addr_t      data;\r
-       int             data_len = urb->transfer_buffer_length;\r
-       int             cnt = 0;\r
-       u32             info = 0;\r
-       int             is_out = usb_pipeout (urb->pipe);\r
-\r
-       /* OHCI handles the bulk/interrupt data toggles itself.  We just\r
-        * use the device toggle bits for resetting, and rely on the fact\r
-        * that resetting toggle is meaningless if the endpoint is active.\r
-        */\r
-       if (!usb_gettoggle (urb->dev, usb_pipeendpoint (urb->pipe), is_out)) {\r
-               usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe),\r
-                       is_out, 1);\r
-               urb_priv->ed->hwHeadP &= ~ED_C;\r
-       }\r
-\r
-       urb_priv->td_cnt = 0;\r
-\r
-       if (data_len)\r
-               data = urb->transfer_dma;\r
-       else\r
-               data = 0;\r
-\r
-       /* NOTE:  TD_CC is set so we can tell which TDs the HC processed by\r
-        * using TD_CC_GET, as well as by seeing them on the done list.\r
-        * (CC = NotAccessed ... 0x0F, or 0x0E in PSWs for ISO.)\r
-        */\r
-       switch (urb_priv->ed->type) {\r
-\r
-       /* Bulk and interrupt are identical except for where in the schedule\r
-        * their EDs live.\r
-        */\r
-       case PIPE_INTERRUPT:\r
-               /* ... and periodic urbs have extra accounting */\r
-               hcd_to_bus (&ohci->hcd)->bandwidth_int_reqs++;\r
-               /* FALLTHROUGH */\r
-       case PIPE_BULK:\r
-               info = is_out\r
-                       ? TD_T_TOGGLE | TD_CC | TD_DP_OUT\r
-                       : TD_T_TOGGLE | TD_CC | TD_DP_IN;\r
-               /* TDs _could_ transfer up to 8K each */\r
-               while (data_len > 4096) {\r
-                       td_fill (ohci, info, data, 4096, urb, cnt);\r
-                       data += 4096;\r
-                       data_len -= 4096;\r
-                       cnt++;\r
-               }\r
-               /* maybe avoid ED halt on final TD short read */\r
-               if (!(urb->transfer_flags & URB_SHORT_NOT_OK))\r
-                       info |= TD_R;\r
-               td_fill (ohci, info, data, data_len, urb, cnt);\r
-               cnt++;\r
-               if ((urb->transfer_flags & URB_ZERO_PACKET)\r
-                               && cnt < urb_priv->length) {\r
-                       td_fill (ohci, info, 0, 0, urb, cnt);\r
-                       cnt++;\r
-               }\r
-               /* maybe kickstart bulk list */\r
-               if (urb_priv->ed->type == PIPE_BULK) {\r
-                       wmb ();\r
-                       writel (OHCI_BLF, &ohci->regs->cmdstatus);\r
-               }\r
-               break;\r
-\r
-       /* control manages DATA0/DATA1 toggle per-request; SETUP resets it,\r
-        * any DATA phase works normally, and the STATUS ack is special.\r
-        */\r
-       case PIPE_CONTROL:\r
-               info = TD_CC | TD_DP_SETUP | TD_T_DATA0;\r
-               td_fill (ohci, info, urb->setup_dma, 8, urb, cnt++);\r
-               if (data_len > 0) {\r
-                       info = TD_CC | TD_R | TD_T_DATA1;\r
-                       info |= is_out ? TD_DP_OUT : TD_DP_IN;\r
-                       /* NOTE:  mishandles transfers >8K, some >4K */\r
-                       td_fill (ohci, info, data, data_len, urb, cnt++);\r
-               }\r
-               info = is_out\r
-                       ? TD_CC | TD_DP_IN | TD_T_DATA1\r
-                       : TD_CC | TD_DP_OUT | TD_T_DATA1;\r
-               td_fill (ohci, info, data, 0, urb, cnt++);\r
-               /* maybe kickstart control list */\r
-               wmb ();\r
-               writel (OHCI_CLF, &ohci->regs->cmdstatus);\r
-               break;\r
-\r
-       /* ISO has no retransmit, so no toggle; and it uses special TDs.\r
-        * Each TD could handle multiple consecutive frames (interval 1);\r
-        * we could often reduce the number of TDs here.\r
-        */\r
-       case PIPE_ISOCHRONOUS:\r
-               for (cnt = 0; cnt < urb->number_of_packets; cnt++) {\r
-                       int     frame = urb->start_frame;\r
-\r
-                       // FIXME scheduling should handle frame counter\r
-                       // roll-around ... exotic case (and OHCI has\r
-                       // a 2^16 iso range, vs other HCs max of 2^10)\r
-                       frame += cnt * urb->interval;\r
-                       frame &= 0xffff;\r
-                       td_fill (ohci, TD_CC | TD_ISO | frame,\r
-                               data + urb->iso_frame_desc [cnt].offset,\r
-                               urb->iso_frame_desc [cnt].length, urb, cnt);\r
-               }\r
-               hcd_to_bus (&ohci->hcd)->bandwidth_isoc_reqs++;\r
-               break;\r
-       }\r
-       // ASSERT (urb_priv->length == cnt);\r
-}\r
-\r
-/*-------------------------------------------------------------------------*\r
- * Done List handling functions\r
- *-------------------------------------------------------------------------*/\r
-\r
-/* calculate transfer length/status and update the urb\r
- * PRECONDITION:  irqsafe (only for urb->status locking)\r
- */\r
-static void td_done (struct ohci_hcd *ohci, struct urb *urb, struct td *td)\r
-{\r
-       u32     tdINFO = le32_to_cpup (&td->hwINFO);\r
-       int     cc = 0;\r
-\r
-       list_del (&td->td_list);\r
-\r
-       /* ISO ... drivers see per-TD length/status */\r
-       if (tdINFO & TD_ISO) {\r
-               u16     tdPSW = le16_to_cpu (td->hwPSW [0]);\r
-               int     dlen = 0;\r
-\r
-               /* NOTE:  assumes FC in tdINFO == 0 (and MAXPSW == 1) */\r
-\r
-               cc = (tdPSW >> 12) & 0xF;\r
-               if (tdINFO & TD_CC)     /* hc didn't touch? */\r
-                       return;\r
-\r
-               if (usb_pipeout (urb->pipe))\r
-                       dlen = urb->iso_frame_desc [td->index].length;\r
-               else {\r
-                       /* short reads are always OK for ISO */\r
-                       if (cc == TD_DATAUNDERRUN)\r
-                               cc = TD_CC_NOERROR;\r
-                       dlen = tdPSW & 0x3ff;\r
-               }\r
-               urb->actual_length += dlen;\r
-               urb->iso_frame_desc [td->index].actual_length = dlen;\r
-               urb->iso_frame_desc [td->index].status = cc_to_error [cc];\r
-\r
-               if (cc != TD_CC_NOERROR)\r
-                       ohci_vdbg (ohci,\r
-                               "urb %p iso td %p (%d) len %d cc %d\n",\r
-                               urb, td, 1 + td->index, dlen, cc);\r
-\r
-       /* BULK, INT, CONTROL ... drivers see aggregate length/status,\r
-        * except that "setup" bytes aren't counted and "short" transfers\r
-        * might not be reported as errors.\r
-        */\r
-       } else {\r
-               int     type = usb_pipetype (urb->pipe);\r
-               u32     tdBE = le32_to_cpup (&td->hwBE);\r
-\r
-               cc = TD_CC_GET (tdINFO);\r
-\r
-               /* control endpoints only have soft stalls */\r
-               if (type != PIPE_CONTROL && cc == TD_CC_STALL)\r
-                       usb_endpoint_halt (urb->dev,\r
-                               usb_pipeendpoint (urb->pipe),\r
-                               usb_pipeout (urb->pipe));\r
-\r
-               /* update packet status if needed (short is normally ok) */\r
-               if (cc == TD_DATAUNDERRUN\r
-                               && !(urb->transfer_flags & URB_SHORT_NOT_OK))\r
-                       cc = TD_CC_NOERROR;\r
-               if (cc != TD_CC_NOERROR && cc < 0x0E) {\r
-                       spin_lock (&urb->lock);\r
-                       if (urb->status == -EINPROGRESS)\r
-                               urb->status = cc_to_error [cc];\r
-                       spin_unlock (&urb->lock);\r
-               }\r
-\r
-               /* count all non-empty packets except control SETUP packet */\r
-               if ((type != PIPE_CONTROL || td->index != 0) && tdBE != 0) {\r
-                       if (td->hwCBP == 0)\r
-                               urb->actual_length += tdBE - td->data_dma + 1;\r
-                       else\r
-                               urb->actual_length +=\r
-                                         le32_to_cpup (&td->hwCBP)\r
-                                       - td->data_dma;\r
-               }\r
-\r
-               if (cc != TD_CC_NOERROR && cc < 0x0E)\r
-                       ohci_vdbg (ohci,\r
-                               "urb %p td %p (%d) cc %d, len=%d/%d\n",\r
-                               urb, td, 1 + td->index, cc,\r
-                               urb->actual_length,\r
-                               urb->transfer_buffer_length);\r
-       }\r
-}\r
-\r
-/*-------------------------------------------------------------------------*/\r
-\r
-static inline struct td *\r
-ed_halted (struct ohci_hcd *ohci, struct td *td, int cc, struct td *rev)\r
-{\r
-       struct urb              *urb = td->urb;\r
-       struct ed               *ed = td->ed;\r
-       struct list_head        *tmp = td->td_list.next;\r
-       u32                     toggle = ed->hwHeadP & ED_C;\r
-\r
-       /* clear ed halt; this is the td that caused it, but keep it inactive\r
-        * until its urb->complete() has a chance to clean up.\r
-        */\r
-       ed->hwINFO |= ED_SKIP;\r
-       wmb ();\r
-       ed->hwHeadP &= ~ED_H; \r
-\r
-       /* put any later tds from this urb onto the donelist, after 'td',\r
-        * order won't matter here: no errors, and nothing was transferred.\r
-        * also patch the ed so it looks as if those tds completed normally.\r
-        */\r
-       while (tmp != &ed->td_list) {\r
-               struct td       *next;\r
-               u32             info;\r
-\r
-               next = list_entry (tmp, struct td, td_list);\r
-               tmp = next->td_list.next;\r
-\r
-               if (next->urb != urb)\r
-                       break;\r
-\r
-               /* NOTE: if multi-td control DATA segments get supported,\r
-                * this urb had one of them, this td wasn't the last td\r
-                * in that segment (TD_R clear), this ed halted because\r
-                * of a short read, _and_ URB_SHORT_NOT_OK is clear ...\r
-                * then we need to leave the control STATUS packet queued\r
-                * and clear ED_SKIP.\r
-                */\r
-               info = next->hwINFO;\r
-               info |= cpu_to_le32 (TD_DONE);\r
-               info &= ~cpu_to_le32 (TD_CC);\r
-               next->hwINFO = info;\r
-\r
-               next->next_dl_td = rev; \r
-               rev = next;\r
-\r
-               if (ed->hwTailP == cpu_to_le32 (next->td_dma))\r
-                       ed->hwTailP = next->hwNextTD;\r
-               ed->hwHeadP = next->hwNextTD | toggle;\r
-       }\r
-\r
-       /* help for troubleshooting:  report anything that\r
-        * looks odd ... that doesn't include protocol stalls\r
-        * (or maybe some other things)\r
-        */\r
-       if (cc != TD_CC_STALL || !usb_pipecontrol (urb->pipe))\r
-               ohci_dbg (ohci,\r
-                       "urb %p path %s ep%d%s %08x cc %d --> status %d\n",\r
-                       urb, urb->dev->devpath,\r
-                       usb_pipeendpoint (urb->pipe),\r
-                       usb_pipein (urb->pipe) ? "in" : "out",\r
-                       le32_to_cpu (td->hwINFO),\r
-                       cc, cc_to_error [cc]);\r
-\r
-       return rev;\r
-}\r
-\r
-/* replies to the request have to be on a FIFO basis so\r
- * we unreverse the hc-reversed done-list\r
- */\r
-static struct td *dl_reverse_done_list (struct ohci_hcd *ohci)\r
-{\r
-       u32             td_dma;\r
-       struct td       *td_rev = NULL;\r
-       struct td       *td = NULL;\r
-       unsigned long   flags;\r
-\r
-       spin_lock_irqsave (&ohci->lock, flags);\r
-       td_dma = le32_to_cpup (&ohci->hcca->done_head);\r
-       ohci->hcca->done_head = 0;\r
-\r
-       /* get TD from hc's singly linked list, and\r
-        * prepend to ours.  ed->td_list changes later.\r
-        */\r
-       while (td_dma) {                \r
-               int             cc;\r
-\r
-               td = dma_to_td (ohci, td_dma);\r
-               if (!td) {\r
-                       ohci_err (ohci, "bad entry %8x\n", td_dma);\r
-                       break;\r
-               }\r
-\r
-               td->hwINFO |= cpu_to_le32 (TD_DONE);\r
-               cc = TD_CC_GET (le32_to_cpup (&td->hwINFO));\r
-\r
-               /* Non-iso endpoints can halt on error; un-halt,\r
-                * and dequeue any other TDs from this urb.\r
-                * No other TD could have caused the halt.\r
-                */\r
-               if (cc != TD_CC_NOERROR && (td->ed->hwHeadP & ED_H))\r
-                       td_rev = ed_halted (ohci, td, cc, td_rev);\r
-\r
-               td->next_dl_td = td_rev;        \r
-               td_rev = td;\r
-               td_dma = le32_to_cpup (&td->hwNextTD);\r
-       }       \r
-       spin_unlock_irqrestore (&ohci->lock, flags);\r
-       return td_rev;\r
-}\r
-\r
-/*-------------------------------------------------------------------------*/\r
-\r
-/* wrap-aware logic stolen from <linux/jiffies.h> */\r
-#define tick_before(t1,t2) ((((s16)(t1))-((s16)(t2))) < 0)\r
-\r
-/* there are some urbs/eds to unlink; called in_irq(), with HCD locked */\r
-static void\r
-finish_unlinks (struct ohci_hcd *ohci, u16 tick, struct pt_regs *regs)\r
-{\r
-       struct ed       *ed, **last;\r
-\r
-rescan_all:\r
-       for (last = &ohci->ed_rm_list, ed = *last; ed != NULL; ed = *last) {\r
-               struct list_head        *entry, *tmp;\r
-               int                     completed, modified;\r
-               u32                     *prev;\r
-\r
-               /* only take off EDs that the HC isn't using, accounting for\r
-                * frame counter wraps.\r
-                */\r
-               if (tick_before (tick, ed->tick) && !ohci->disabled) {\r
-                       last = &ed->ed_next;\r
-                       continue;\r
-               }\r
-\r
-               /* reentrancy:  if we drop the schedule lock, someone might\r
-                * have modified this list.  normally it's just prepending\r
-                * entries (which we'd ignore), but paranoia won't hurt.\r
-                */\r
-               *last = ed->ed_next;\r
-               ed->ed_next = 0;\r
-               modified = 0;\r
-\r
-               /* unlink urbs as requested, but rescan the list after\r
-                * we call a completion since it might have unlinked\r
-                * another (earlier) urb\r
-                */\r
-rescan_this:\r
-               completed = 0;\r
-               prev = &ed->hwHeadP;\r
-               list_for_each_safe (entry, tmp, &ed->td_list) {\r
-                       struct td       *td;\r
-                       struct urb      *urb;\r
-                       urb_priv_t      *urb_priv;\r
-                       u32             savebits;\r
-\r
-                       td = list_entry (entry, struct td, td_list);\r
-                       urb = td->urb;\r
-                       urb_priv = td->urb->hcpriv;\r
-\r
-                       if (urb_priv->state != URB_DEL) {\r
-                               prev = &td->hwNextTD;\r
-                               continue;\r
-                       }\r
-\r
-                       /* patch pointers hc uses ... tail, if we're removing\r
-                        * an otherwise active td, and whatever td pointer\r
-                        * points to this td\r
-                        */\r
-                       if (ed->hwTailP == cpu_to_le32 (td->td_dma))\r
-                               ed->hwTailP = td->hwNextTD;\r
-                       savebits = *prev & ~cpu_to_le32 (TD_MASK);\r
-                       *prev = td->hwNextTD | savebits;\r
-\r
-                       /* HC may have partly processed this TD */\r
-                       td_done (ohci, urb, td);\r
-                       urb_priv->td_cnt++;\r
-\r
-                       /* if URB is done, clean up */\r
-                       if (urb_priv->td_cnt == urb_priv->length) {\r
-                               modified = completed = 1;\r
-                               spin_unlock (&ohci->lock);\r
-                               finish_urb (ohci, urb, regs);\r
-                               spin_lock (&ohci->lock);\r
-                       }\r
-               }\r
-               if (completed && !list_empty (&ed->td_list))\r
-                       goto rescan_this;\r
-\r
-               /* ED's now officially unlinked, hc doesn't see */\r
-               ed->state = ED_IDLE;\r
-               ed->hwINFO &= ~(ED_SKIP | ED_DEQUEUE);\r
-               ed->hwHeadP &= ~ED_H;\r
-               ed->hwNextED = 0;\r
-\r
-               /* but if there's work queued, reschedule */\r
-               if (!list_empty (&ed->td_list)) {\r
-                       if (!ohci->disabled && !ohci->sleeping)\r
-                               ed_schedule (ohci, ed);\r
-               }\r
-\r
-               if (modified)\r
-                       goto rescan_all;\r
-       }\r
-\r
-       /* maybe reenable control and bulk lists */ \r
-       if (!ohci->disabled && !ohci->ed_rm_list) {\r
-               u32     command = 0, control = 0;\r
-\r
-               if (ohci->ed_controltail) {\r
-                       command |= OHCI_CLF;\r
-                       if (!(ohci->hc_control & OHCI_CTRL_CLE)) {\r
-                               control |= OHCI_CTRL_CLE;\r
-                               writel (0, &ohci->regs->ed_controlcurrent);\r
-                       }\r
-               }\r
-               if (ohci->ed_bulktail) {\r
-                       command |= OHCI_BLF;\r
-                       if (!(ohci->hc_control & OHCI_CTRL_BLE)) {\r
-                               control |= OHCI_CTRL_BLE;\r
-                               writel (0, &ohci->regs->ed_bulkcurrent);\r
-                       }\r
-               }\r
-               \r
-               /* CLE/BLE to enable, CLF/BLF to (maybe) kickstart */\r
-               if (control) {\r
-                       ohci->hc_control |= control;\r
-                       writel (ohci->hc_control, &ohci->regs->control);   \r
-               }\r
-               if (command)\r
-                       writel (command, &ohci->regs->cmdstatus);   \r
-       }\r
-}\r
-\r
-\r
-\r
-/*-------------------------------------------------------------------------*/\r
-\r
-/*\r
- * Process normal completions (error or success) and clean the schedules.\r
- *\r
- * This is the main path for handing urbs back to drivers.  The only other\r
- * path is finish_unlinks(), which unlinks URBs using ed_rm_list, instead of\r
- * scanning the (re-reversed) donelist as this does.\r
- */\r
-static void\r
-dl_done_list (struct ohci_hcd *ohci, struct td *td, struct pt_regs *regs)\r
-{\r
-       unsigned long   flags;\r
-\r
-       spin_lock_irqsave (&ohci->lock, flags);\r
-       while (td) {\r
-               struct td       *td_next = td->next_dl_td;\r
-               struct urb      *urb = td->urb;\r
-               urb_priv_t      *urb_priv = urb->hcpriv;\r
-               struct ed       *ed = td->ed;\r
-\r
-               /* update URB's length and status from TD */\r
-               td_done (ohci, urb, td);\r
-               urb_priv->td_cnt++;\r
-\r
-               /* If all this urb's TDs are done, call complete() */\r
-               if (urb_priv->td_cnt == urb_priv->length) {\r
-                       spin_unlock (&ohci->lock);\r
-                       finish_urb (ohci, urb, regs);\r
-                       spin_lock (&ohci->lock);\r
-               }\r
-\r
-               /* clean schedule:  unlink EDs that are no longer busy */\r
-               if (list_empty (&ed->td_list))\r
-                       ed_deschedule (ohci, ed);\r
-               /* ... reenabling halted EDs only after fault cleanup */\r
-               else if (!(ed->hwINFO & ED_DEQUEUE)) {\r
-                       td = list_entry (ed->td_list.next, struct td, td_list);\r
-                       if (!(td->hwINFO & TD_DONE))\r
-                               ed->hwINFO &= ~ED_SKIP;\r
-               }\r
-\r
-               td = td_next;\r
-       }  \r
-       spin_unlock_irqrestore (&ohci->lock, flags);\r
-}\r
+/*
+ * OHCI HCD (Host Controller Driver) for USB.
+ * 
+ * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
+ * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
+ * 
+ * This file is licenced under the GPL.
+ */
+
+static void urb_free_priv (struct ohci_hcd *hc, urb_priv_t *urb_priv)
+{
+       int             last = urb_priv->length - 1;
+
+       if (last >= 0) {
+               int             i;
+               struct td       *td;
+
+               for (i = 0; i <= last; i++) {
+                       td = urb_priv->td [i];
+                       if (td)
+                               td_free (hc, td);
+               }
+       }
+
+       kfree (urb_priv);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * URB goes back to driver, and isn't reissued.
+ * It's completely gone from HC data structures.
+ * PRECONDITION:  no locks held, irqs blocked  (Giveback can call into HCD.)
+ */
+static void
+finish_urb (struct ohci_hcd *ohci, struct urb *urb, struct pt_regs *regs)
+{
+       // ASSERT (urb->hcpriv != 0);
+
+       urb_free_priv (ohci, urb->hcpriv);
+       urb->hcpriv = NULL;
+
+       spin_lock (&urb->lock);
+       if (likely (urb->status == -EINPROGRESS))
+               urb->status = 0;
+       spin_unlock (&urb->lock);
+
+       // what lock protects these?
+       switch (usb_pipetype (urb->pipe)) {
+       case PIPE_ISOCHRONOUS:
+               hcd_to_bus (&ohci->hcd)->bandwidth_isoc_reqs--;
+               break;
+       case PIPE_INTERRUPT:
+               hcd_to_bus (&ohci->hcd)->bandwidth_int_reqs--;
+               break;
+       }
+
+#ifdef OHCI_VERBOSE_DEBUG
+       urb_print (urb, "RET", usb_pipeout (urb->pipe));
+#endif
+       usb_hcd_giveback_urb (&ohci->hcd, urb, regs);
+}
+
+
+/*-------------------------------------------------------------------------*
+ * ED handling functions
+ *-------------------------------------------------------------------------*/  
+
+/* search for the right schedule branch to use for a periodic ed.
+ * does some load balancing; returns the branch, or negative errno.
+ */
+static int balance (struct ohci_hcd *ohci, int interval, int load)
+{
+       int     i, branch = -ENOSPC;
+
+       /* iso periods can be huge; iso tds specify frame numbers */
+       if (interval > NUM_INTS)
+               interval = NUM_INTS;
+
+       /* search for the least loaded schedule branch of that period
+        * that has enough bandwidth left unreserved.
+        */
+       for (i = 0; i < interval ; i++) {
+               if (branch < 0 || ohci->load [branch] > ohci->load [i]) {
+#if 1  /* CONFIG_USB_BANDWIDTH */
+                       int     j;
+
+                       /* usb 1.1 says 90% of one frame */
+                       for (j = i; j < NUM_INTS; j += interval) {
+                               if ((ohci->load [j] + load) > 900)
+                                       break;
+                       }
+                       if (j < NUM_INTS)
+                               continue;
+#endif
+                       branch = i; 
+               }
+       }
+       return branch;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* both iso and interrupt requests have periods; this routine puts them
+ * into the schedule tree in the apppropriate place.  most iso devices use
+ * 1msec periods, but that's not required.
+ */
+static void periodic_link (struct ohci_hcd *ohci, struct ed *ed)
+{
+       unsigned        i;
+
+       ohci_vdbg (ohci, "link %sed %p branch %d [%dus.], interval %d\n",
+               (ed->hwINFO & ED_ISO) ? "iso " : "",
+               ed, ed->branch, ed->load, ed->interval);
+
+       for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
+               struct ed       **prev = &ohci->periodic [i];
+               u32             *prev_p = &ohci->hcca->int_table [i];
+               struct ed       *here = *prev;
+
+               /* sorting each branch by period (slow before fast)
+                * lets us share the faster parts of the tree.
+                * (plus maybe: put interrupt eds before iso)
+                */
+               while (here && ed != here) {
+                       if (ed->interval > here->interval)
+                               break;
+                       prev = &here->ed_next;
+                       prev_p = &here->hwNextED;
+                       here = *prev;
+               }
+               if (ed != here) {
+                       ed->ed_next = here;
+                       if (here)
+                               ed->hwNextED = *prev_p;
+                       wmb ();
+                       *prev = ed;
+                       *prev_p = cpu_to_le32p (&ed->dma);
+               }
+               ohci->load [i] += ed->load;
+       }
+       hcd_to_bus (&ohci->hcd)->bandwidth_allocated += ed->load / ed->interval;
+}
+
+/* link an ed into one of the HC chains */
+
+static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
+{       
+       int     branch;
+
+       ed->state = ED_OPER;
+       ed->ed_prev = 0;
+       ed->ed_next = 0;
+       ed->hwNextED = 0;
+       wmb ();
+
+       /* we care about rm_list when setting CLE/BLE in case the HC was at
+        * work on some TD when CLE/BLE was turned off, and isn't quiesced
+        * yet.  finish_unlinks() restarts as needed, some upcoming INTR_SF.
+        *
+        * control and bulk EDs are doubly linked (ed_next, ed_prev), but
+        * periodic ones are singly linked (ed_next). that's because the
+        * periodic schedule encodes a tree like figure 3-5 in the ohci
+        * spec:  each qh can have several "previous" nodes, and the tree
+        * doesn't have unused/idle descriptors.
+        */
+       switch (ed->type) {
+       case PIPE_CONTROL:
+               if (ohci->ed_controltail == NULL) {
+                       writel (ed->dma, &ohci->regs->ed_controlhead);
+               } else {
+                       ohci->ed_controltail->ed_next = ed;
+                       ohci->ed_controltail->hwNextED = cpu_to_le32 (ed->dma);
+               }
+               ed->ed_prev = ohci->ed_controltail;
+               if (!ohci->ed_controltail && !ohci->ed_rm_list) {
+                       ohci->hc_control |= OHCI_CTRL_CLE;
+                       writel (0, &ohci->regs->ed_controlcurrent);
+                       writel (ohci->hc_control, &ohci->regs->control);
+               }
+               ohci->ed_controltail = ed;
+               break;
+
+       case PIPE_BULK:
+               if (ohci->ed_bulktail == NULL) {
+                       writel (ed->dma, &ohci->regs->ed_bulkhead);
+               } else {
+                       ohci->ed_bulktail->ed_next = ed;
+                       ohci->ed_bulktail->hwNextED = cpu_to_le32 (ed->dma);
+               }
+               ed->ed_prev = ohci->ed_bulktail;
+               if (!ohci->ed_bulktail && !ohci->ed_rm_list) {
+                       ohci->hc_control |= OHCI_CTRL_BLE;
+                       writel (0, &ohci->regs->ed_bulkcurrent);
+                       writel (ohci->hc_control, &ohci->regs->control);
+               }
+               ohci->ed_bulktail = ed;
+               break;
+
+       // case PIPE_INTERRUPT:
+       // case PIPE_ISOCHRONOUS:
+       default:
+               branch = balance (ohci, ed->interval, ed->load);
+               if (branch < 0) {
+                       ohci_dbg (ohci,
+                               "ERR %d, interval %d msecs, load %d\n",
+                               branch, ed->interval, ed->load);
+                       // FIXME if there are TDs queued, fail them!
+                       return branch;
+               }
+               ed->branch = branch;
+               periodic_link (ohci, ed);
+       }               
+
+       /* the HC may not see the schedule updates yet, but if it does
+        * then they'll be properly ordered.
+        */
+       return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* scan the periodic table to find and unlink this ED */
+static void periodic_unlink (struct ohci_hcd *ohci, struct ed *ed)
+{
+       int     i;
+
+       for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
+               struct ed       *temp;
+               struct ed       **prev = &ohci->periodic [i];
+               u32             *prev_p = &ohci->hcca->int_table [i];
+
+               while (*prev && (temp = *prev) != ed) {
+                       prev_p = &temp->hwNextED;
+                       prev = &temp->ed_next;
+               }
+               if (*prev) {
+                       *prev_p = ed->hwNextED;
+                       *prev = ed->ed_next;
+               }
+               ohci->load [i] -= ed->load;
+       }       
+       hcd_to_bus (&ohci->hcd)->bandwidth_allocated -= ed->load / ed->interval;
+
+       ohci_vdbg (ohci, "unlink %sed %p branch %d [%dus.], interval %d\n",
+               (ed->hwINFO & ED_ISO) ? "iso " : "",
+               ed, ed->branch, ed->load, ed->interval);
+}
+
+/* unlink an ed from one of the HC chains. 
+ * just the link to the ed is unlinked.
+ * the link from the ed still points to another operational ed or 0
+ * so the HC can eventually finish the processing of the unlinked ed
+ */
+static void ed_deschedule (struct ohci_hcd *ohci, struct ed *ed) 
+{
+       ed->hwINFO |= ED_SKIP;
+
+       switch (ed->type) {
+       case PIPE_CONTROL:
+               if (ed->ed_prev == NULL) {
+                       if (!ed->hwNextED) {
+                               ohci->hc_control &= ~OHCI_CTRL_CLE;
+                               writel (ohci->hc_control, &ohci->regs->control);
+                               writel (0, &ohci->regs->ed_controlcurrent);
+                               // post those pci writes
+                               (void) readl (&ohci->regs->control);
+                       }
+                       writel (le32_to_cpup (&ed->hwNextED),
+                               &ohci->regs->ed_controlhead);
+               } else {
+                       ed->ed_prev->ed_next = ed->ed_next;
+                       ed->ed_prev->hwNextED = ed->hwNextED;
+               }
+               if (ohci->ed_controltail == ed) {
+                       ohci->ed_controltail = ed->ed_prev;
+                       if (ohci->ed_controltail)
+                               ohci->ed_controltail->ed_next = 0;
+               } else if (ed->ed_next) {
+                       ed->ed_next->ed_prev = ed->ed_prev;
+               }
+               break;
+
+       case PIPE_BULK:
+               if (ed->ed_prev == NULL) {
+                       if (!ed->hwNextED) {
+                               ohci->hc_control &= ~OHCI_CTRL_BLE;
+                               writel (ohci->hc_control, &ohci->regs->control);
+                               writel (0, &ohci->regs->ed_bulkcurrent);
+                               // post those pci writes
+                               (void) readl (&ohci->regs->control);
+                       }
+                       writel (le32_to_cpup (&ed->hwNextED),
+                               &ohci->regs->ed_bulkhead);
+               } else {
+                       ed->ed_prev->ed_next = ed->ed_next;
+                       ed->ed_prev->hwNextED = ed->hwNextED;
+               }
+               if (ohci->ed_bulktail == ed) {
+                       ohci->ed_bulktail = ed->ed_prev;
+                       if (ohci->ed_bulktail)
+                               ohci->ed_bulktail->ed_next = 0;
+               } else if (ed->ed_next) {
+                       ed->ed_next->ed_prev = ed->ed_prev;
+               }
+               break;
+
+       // case PIPE_INTERRUPT:
+       // case PIPE_ISOCHRONOUS:
+       default:
+               periodic_unlink (ohci, ed);
+               break;
+       }
+
+       /* NOTE: Except for a couple of exceptionally clean unlink cases
+        * (like unlinking the only c/b ED, with no TDs) HCs may still be
+        * caching this operational ED (or its address).  Safe unlinking
+        * involves not marking it ED_IDLE till INTR_SF; we always do that
+        * if td_list isn't empty.  Otherwise the race is small; but ...
+        */
+       if (ed->state == ED_OPER) {
+               ed->state = ED_IDLE;
+               ed->hwINFO &= ~(ED_SKIP | ED_DEQUEUE);
+               ed->hwHeadP &= ~ED_H;
+               wmb ();
+       }
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* get and maybe (re)init an endpoint. init _should_ be done only as part
+ * of usb_set_configuration() or usb_set_interface() ... but the USB stack
+ * isn't very stateful, so we re-init whenever the HC isn't looking.
+ */
+static struct ed *ed_get (
+       struct ohci_hcd         *ohci,
+       struct usb_device       *udev,
+       unsigned int            pipe,
+       int                     interval
+) {
+       int                     is_out = !usb_pipein (pipe);
+       int                     type = usb_pipetype (pipe);
+       struct hcd_dev          *dev = (struct hcd_dev *) udev->hcpriv;
+       struct ed               *ed; 
+       unsigned                ep;
+       unsigned long           flags;
+
+       ep = usb_pipeendpoint (pipe) << 1;
+       if (type != PIPE_CONTROL && is_out)
+               ep |= 1;
+
+       spin_lock_irqsave (&ohci->lock, flags);
+
+       if (!(ed = dev->ep [ep])) {
+               struct td       *td;
+
+               ed = ed_alloc (ohci, SLAB_ATOMIC);
+               if (!ed) {
+                       /* out of memory */
+                       goto done;
+               }
+               dev->ep [ep] = ed;
+
+               /* dummy td; end of td list for ed */
+               td = td_alloc (ohci, SLAB_ATOMIC);
+               if (!td) {
+                       /* out of memory */
+                       ed_free (ohci, ed);
+                       ed = 0;
+                       goto done;
+               }
+               ed->dummy = td;
+               ed->hwTailP = cpu_to_le32 (td->td_dma);
+               ed->hwHeadP = ed->hwTailP;      /* ED_C, ED_H zeroed */
+               ed->state = ED_IDLE;
+               ed->type = type;
+       }
+
+       /* NOTE: only ep0 currently needs this "re"init logic, during
+        * enumeration (after set_address, or if ep0 maxpacket >8).
+        */
+       if (ed->state == ED_IDLE) {
+               u32     info;
+
+               info = usb_pipedevice (pipe);
+               info |= (ep >> 1) << 7;
+               info |= usb_maxpacket (udev, pipe, is_out) << 16;
+               info = cpu_to_le32 (info);
+               if (udev->speed == USB_SPEED_LOW)
+                       info |= ED_LOWSPEED;
+               /* only control transfers store pids in tds */
+               if (type != PIPE_CONTROL) {
+                       info |= is_out ? ED_OUT : ED_IN;
+                       if (type != PIPE_BULK) {
+                               /* periodic transfers... */
+                               if (type == PIPE_ISOCHRONOUS)
+                                       info |= ED_ISO;
+                               else if (interval > 32) /* iso can be bigger */
+                                       interval = 32;
+                               ed->interval = interval;
+                               ed->load = usb_calc_bus_time (
+                                       udev->speed, !is_out,
+                                       type == PIPE_ISOCHRONOUS,
+                                       usb_maxpacket (udev, pipe, is_out))
+                                               / 1000;
+                       }
+               }
+               ed->hwINFO = info;
+       }
+
+done:
+       spin_unlock_irqrestore (&ohci->lock, flags);
+       return ed; 
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* request unlinking of an endpoint from an operational HC.
+ * put the ep on the rm_list
+ * real work is done at the next start frame (SF) hardware interrupt
+ */
+static void start_urb_unlink (struct ohci_hcd *ohci, struct ed *ed)
+{    
+       ed->hwINFO |= ED_DEQUEUE;
+       ed->state = ED_UNLINK;
+       ed_deschedule (ohci, ed);
+
+       /* SF interrupt might get delayed; record the frame counter value that
+        * indicates when the HC isn't looking at it, so concurrent unlinks
+        * behave.  frame_no wraps every 2^16 msec, and changes right before
+        * SF is triggered.
+        */
+       ed->tick = le16_to_cpu (ohci->hcca->frame_no) + 1;
+
+       /* rm_list is just singly linked, for simplicity */
+       ed->ed_next = ohci->ed_rm_list;
+       ed->ed_prev = 0;
+       ohci->ed_rm_list = ed;
+
+       /* enable SOF interrupt */
+       if (!ohci->sleeping) {
+               writel (OHCI_INTR_SF, &ohci->regs->intrstatus);
+               writel (OHCI_INTR_SF, &ohci->regs->intrenable);
+               // flush those pci writes
+               (void) readl (&ohci->regs->control);
+       }
+}
+
+/*-------------------------------------------------------------------------*
+ * TD handling functions
+ *-------------------------------------------------------------------------*/
+
+/* enqueue next TD for this URB (OHCI spec 5.2.8.2) */
+
+static void
+td_fill (struct ohci_hcd *ohci, u32 info,
+       dma_addr_t data, int len,
+       struct urb *urb, int index)
+{
+       struct td               *td, *td_pt;
+       struct urb_priv         *urb_priv = urb->hcpriv;
+       int                     is_iso = info & TD_ISO;
+       int                     hash;
+
+       // ASSERT (index < urb_priv->length);
+
+       /* aim for only one interrupt per urb.  mostly applies to control
+        * and iso; other urbs rarely need more than one TD per urb.
+        * this way, only final tds (or ones with an error) cause IRQs.
+        * at least immediately; use DI=6 in case any control request is
+        * tempted to die part way through.
+        *
+        * NOTE: could delay interrupts even for the last TD, and get fewer
+        * interrupts ... increasing per-urb latency by sharing interrupts.
+        * Drivers that queue bulk urbs may request that behavior.
+        */
+       if (index != (urb_priv->length - 1)
+                       || (urb->transfer_flags & URB_NO_INTERRUPT))
+               info |= TD_DI_SET (6);
+
+       /* use this td as the next dummy */
+       td_pt = urb_priv->td [index];
+
+       /* fill the old dummy TD */
+       td = urb_priv->td [index] = urb_priv->ed->dummy;
+       urb_priv->ed->dummy = td_pt;
+
+       td->ed = urb_priv->ed;
+       td->next_dl_td = NULL;
+       td->index = index;
+       td->urb = urb; 
+       td->data_dma = data;
+       if (!len)
+               data = 0;
+
+       td->hwINFO = cpu_to_le32 (info);
+       if (is_iso) {
+               td->hwCBP = cpu_to_le32 (data & 0xFFFFF000);
+               td->hwPSW [0] = cpu_to_le16 ((data & 0x0FFF) | 0xE000);
+               td->ed->last_iso = info & 0xffff;
+       } else {
+               td->hwCBP = cpu_to_le32 (data); 
+       }                       
+       if (data)
+               td->hwBE = cpu_to_le32 (data + len - 1);
+       else
+               td->hwBE = 0;
+       td->hwNextTD = cpu_to_le32 (td_pt->td_dma);
+
+       /* append to queue */
+       list_add_tail (&td->td_list, &td->ed->td_list);
+
+       /* hash it for later reverse mapping */
+       hash = TD_HASH_FUNC (td->td_dma);
+       td->td_hash = ohci->td_hash [hash];
+       ohci->td_hash [hash] = td;
+
+       /* HC might read the TD (or cachelines) right away ... */
+       wmb ();
+       td->ed->hwTailP = td->hwNextTD;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* Prepare all TDs of a transfer, and queue them onto the ED.
+ * Caller guarantees HC is active.
+ * Usually the ED is already on the schedule, so TDs might be
+ * processed as soon as they're queued.
+ */
+static void td_submit_urb (
+       struct ohci_hcd *ohci,
+       struct urb      *urb
+) {
+       struct urb_priv *urb_priv = urb->hcpriv;
+       dma_addr_t      data;
+       int             data_len = urb->transfer_buffer_length;
+       int             cnt = 0;
+       u32             info = 0;
+       int             is_out = usb_pipeout (urb->pipe);
+
+       /* OHCI handles the bulk/interrupt data toggles itself.  We just
+        * use the device toggle bits for resetting, and rely on the fact
+        * that resetting toggle is meaningless if the endpoint is active.
+        */
+       if (!usb_gettoggle (urb->dev, usb_pipeendpoint (urb->pipe), is_out)) {
+               usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe),
+                       is_out, 1);
+               urb_priv->ed->hwHeadP &= ~ED_C;
+       }
+
+       urb_priv->td_cnt = 0;
+
+       if (data_len)
+               data = urb->transfer_dma;
+       else
+               data = 0;
+
+       /* NOTE:  TD_CC is set so we can tell which TDs the HC processed by
+        * using TD_CC_GET, as well as by seeing them on the done list.
+        * (CC = NotAccessed ... 0x0F, or 0x0E in PSWs for ISO.)
+        */
+       switch (urb_priv->ed->type) {
+
+       /* Bulk and interrupt are identical except for where in the schedule
+        * their EDs live.
+        */
+       case PIPE_INTERRUPT:
+               /* ... and periodic urbs have extra accounting */
+               hcd_to_bus (&ohci->hcd)->bandwidth_int_reqs++;
+               /* FALLTHROUGH */
+       case PIPE_BULK:
+               info = is_out
+                       ? TD_T_TOGGLE | TD_CC | TD_DP_OUT
+                       : TD_T_TOGGLE | TD_CC | TD_DP_IN;
+               /* TDs _could_ transfer up to 8K each */
+               while (data_len > 4096) {
+                       td_fill (ohci, info, data, 4096, urb, cnt);
+                       data += 4096;
+                       data_len -= 4096;
+                       cnt++;
+               }
+               /* maybe avoid ED halt on final TD short read */
+               if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
+                       info |= TD_R;
+               td_fill (ohci, info, data, data_len, urb, cnt);
+               cnt++;
+               if ((urb->transfer_flags & URB_ZERO_PACKET)
+                               && cnt < urb_priv->length) {
+                       td_fill (ohci, info, 0, 0, urb, cnt);
+                       cnt++;
+               }
+               /* maybe kickstart bulk list */
+               if (urb_priv->ed->type == PIPE_BULK) {
+                       wmb ();
+                       writel (OHCI_BLF, &ohci->regs->cmdstatus);
+               }
+               break;
+
+       /* control manages DATA0/DATA1 toggle per-request; SETUP resets it,
+        * any DATA phase works normally, and the STATUS ack is special.
+        */
+       case PIPE_CONTROL:
+               info = TD_CC | TD_DP_SETUP | TD_T_DATA0;
+               td_fill (ohci, info, urb->setup_dma, 8, urb, cnt++);
+               if (data_len > 0) {
+                       info = TD_CC | TD_R | TD_T_DATA1;
+                       info |= is_out ? TD_DP_OUT : TD_DP_IN;
+                       /* NOTE:  mishandles transfers >8K, some >4K */
+                       td_fill (ohci, info, data, data_len, urb, cnt++);
+               }
+               info = is_out
+                       ? TD_CC | TD_DP_IN | TD_T_DATA1
+                       : TD_CC | TD_DP_OUT | TD_T_DATA1;
+               td_fill (ohci, info, data, 0, urb, cnt++);
+               /* maybe kickstart control list */
+               wmb ();
+               writel (OHCI_CLF, &ohci->regs->cmdstatus);
+               break;
+
+       /* ISO has no retransmit, so no toggle; and it uses special TDs.
+        * Each TD could handle multiple consecutive frames (interval 1);
+        * we could often reduce the number of TDs here.
+        */
+       case PIPE_ISOCHRONOUS:
+               for (cnt = 0; cnt < urb->number_of_packets; cnt++) {
+                       int     frame = urb->start_frame;
+
+                       // FIXME scheduling should handle frame counter
+                       // roll-around ... exotic case (and OHCI has
+                       // a 2^16 iso range, vs other HCs max of 2^10)
+                       frame += cnt * urb->interval;
+                       frame &= 0xffff;
+                       td_fill (ohci, TD_CC | TD_ISO | frame,
+                               data + urb->iso_frame_desc [cnt].offset,
+                               urb->iso_frame_desc [cnt].length, urb, cnt);
+               }
+               hcd_to_bus (&ohci->hcd)->bandwidth_isoc_reqs++;
+               break;
+       }
+       // ASSERT (urb_priv->length == cnt);
+}
+
+/*-------------------------------------------------------------------------*
+ * Done List handling functions
+ *-------------------------------------------------------------------------*/
+
+/* calculate transfer length/status and update the urb
+ * PRECONDITION:  irqsafe (only for urb->status locking)
+ */
+static void td_done (struct ohci_hcd *ohci, struct urb *urb, struct td *td)
+{
+       u32     tdINFO = le32_to_cpup (&td->hwINFO);
+       int     cc = 0;
+
+       list_del (&td->td_list);
+
+       /* ISO ... drivers see per-TD length/status */
+       if (tdINFO & TD_ISO) {
+               u16     tdPSW = le16_to_cpu (td->hwPSW [0]);
+               int     dlen = 0;
+
+               /* NOTE:  assumes FC in tdINFO == 0 (and MAXPSW == 1) */
+
+               cc = (tdPSW >> 12) & 0xF;
+               if (tdINFO & TD_CC)     /* hc didn't touch? */
+                       return;
+
+               if (usb_pipeout (urb->pipe))
+                       dlen = urb->iso_frame_desc [td->index].length;
+               else {
+                       /* short reads are always OK for ISO */
+                       if (cc == TD_DATAUNDERRUN)
+                               cc = TD_CC_NOERROR;
+                       dlen = tdPSW & 0x3ff;
+               }
+               urb->actual_length += dlen;
+               urb->iso_frame_desc [td->index].actual_length = dlen;
+               urb->iso_frame_desc [td->index].status = cc_to_error [cc];
+
+               if (cc != TD_CC_NOERROR)
+                       ohci_vdbg (ohci,
+                               "urb %p iso td %p (%d) len %d cc %d\n",
+                               urb, td, 1 + td->index, dlen, cc);
+
+       /* BULK, INT, CONTROL ... drivers see aggregate length/status,
+        * except that "setup" bytes aren't counted and "short" transfers
+        * might not be reported as errors.
+        */
+       } else {
+               int     type = usb_pipetype (urb->pipe);
+               u32     tdBE = le32_to_cpup (&td->hwBE);
+
+               cc = TD_CC_GET (tdINFO);
+
+               /* control endpoints only have soft stalls */
+               if (type != PIPE_CONTROL && cc == TD_CC_STALL)
+                       usb_endpoint_halt (urb->dev,
+                               usb_pipeendpoint (urb->pipe),
+                               usb_pipeout (urb->pipe));
+
+               /* update packet status if needed (short is normally ok) */
+               if (cc == TD_DATAUNDERRUN
+                               && !(urb->transfer_flags & URB_SHORT_NOT_OK))
+                       cc = TD_CC_NOERROR;
+               if (cc != TD_CC_NOERROR && cc < 0x0E) {
+                       spin_lock (&urb->lock);
+                       if (urb->status == -EINPROGRESS)
+                               urb->status = cc_to_error [cc];
+                       spin_unlock (&urb->lock);
+               }
+
+               /* count all non-empty packets except control SETUP packet */
+               if ((type != PIPE_CONTROL || td->index != 0) && tdBE != 0) {
+                       if (td->hwCBP == 0)
+                               urb->actual_length += tdBE - td->data_dma + 1;
+                       else
+                               urb->actual_length +=
+                                         le32_to_cpup (&td->hwCBP)
+                                       - td->data_dma;
+               }
+
+               if (cc != TD_CC_NOERROR && cc < 0x0E)
+                       ohci_vdbg (ohci,
+                               "urb %p td %p (%d) cc %d, len=%d/%d\n",
+                               urb, td, 1 + td->index, cc,
+                               urb->actual_length,
+                               urb->transfer_buffer_length);
+       }
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline struct td *
+ed_halted (struct ohci_hcd *ohci, struct td *td, int cc, struct td *rev)
+{
+       struct urb              *urb = td->urb;
+       struct ed               *ed = td->ed;
+       struct list_head        *tmp = td->td_list.next;
+       u32                     toggle = ed->hwHeadP & ED_C;
+
+       /* clear ed halt; this is the td that caused it, but keep it inactive
+        * until its urb->complete() has a chance to clean up.
+        */
+       ed->hwINFO |= ED_SKIP;
+       wmb ();
+       ed->hwHeadP &= ~ED_H; 
+
+       /* put any later tds from this urb onto the donelist, after 'td',
+        * order won't matter here: no errors, and nothing was transferred.
+        * also patch the ed so it looks as if those tds completed normally.
+        */
+       while (tmp != &ed->td_list) {
+               struct td       *next;
+               u32             info;
+
+               next = list_entry (tmp, struct td, td_list);
+               tmp = next->td_list.next;
+
+               if (next->urb != urb)
+                       break;
+
+               /* NOTE: if multi-td control DATA segments get supported,
+                * this urb had one of them, this td wasn't the last td
+                * in that segment (TD_R clear), this ed halted because
+                * of a short read, _and_ URB_SHORT_NOT_OK is clear ...
+                * then we need to leave the control STATUS packet queued
+                * and clear ED_SKIP.
+                */
+               info = next->hwINFO;
+               info |= cpu_to_le32 (TD_DONE);
+               info &= ~cpu_to_le32 (TD_CC);
+               next->hwINFO = info;
+
+               next->next_dl_td = rev; 
+               rev = next;
+
+               if (ed->hwTailP == cpu_to_le32 (next->td_dma))
+                       ed->hwTailP = next->hwNextTD;
+               ed->hwHeadP = next->hwNextTD | toggle;
+       }
+
+       /* help for troubleshooting:  report anything that
+        * looks odd ... that doesn't include protocol stalls
+        * (or maybe some other things)
+        */
+       if (cc != TD_CC_STALL || !usb_pipecontrol (urb->pipe))
+               ohci_dbg (ohci,
+                       "urb %p path %s ep%d%s %08x cc %d --> status %d\n",
+                       urb, urb->dev->devpath,
+                       usb_pipeendpoint (urb->pipe),
+                       usb_pipein (urb->pipe) ? "in" : "out",
+                       le32_to_cpu (td->hwINFO),
+                       cc, cc_to_error [cc]);
+
+       return rev;
+}
+
+/* replies to the request have to be on a FIFO basis so
+ * we unreverse the hc-reversed done-list
+ */
+static struct td *dl_reverse_done_list (struct ohci_hcd *ohci)
+{
+       u32             td_dma;
+       struct td       *td_rev = NULL;
+       struct td       *td = NULL;
+       unsigned long   flags;
+
+       spin_lock_irqsave (&ohci->lock, flags);
+       td_dma = le32_to_cpup (&ohci->hcca->done_head);
+       ohci->hcca->done_head = 0;
+
+       /* get TD from hc's singly linked list, and
+        * prepend to ours.  ed->td_list changes later.
+        */
+       while (td_dma) {                
+               int             cc;
+
+               td = dma_to_td (ohci, td_dma);
+               if (!td) {
+                       ohci_err (ohci, "bad entry %8x\n", td_dma);
+                       break;
+               }
+
+               td->hwINFO |= cpu_to_le32 (TD_DONE);
+               cc = TD_CC_GET (le32_to_cpup (&td->hwINFO));
+
+               /* Non-iso endpoints can halt on error; un-halt,
+                * and dequeue any other TDs from this urb.
+                * No other TD could have caused the halt.
+                */
+               if (cc != TD_CC_NOERROR && (td->ed->hwHeadP & ED_H))
+                       td_rev = ed_halted (ohci, td, cc, td_rev);
+
+               td->next_dl_td = td_rev;        
+               td_rev = td;
+               td_dma = le32_to_cpup (&td->hwNextTD);
+       }       
+       spin_unlock_irqrestore (&ohci->lock, flags);
+       return td_rev;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* wrap-aware logic stolen from <linux/jiffies.h> */
+#define tick_before(t1,t2) ((((s16)(t1))-((s16)(t2))) < 0)
+
+/* there are some urbs/eds to unlink; called in_irq(), with HCD locked */
+static void
+finish_unlinks (struct ohci_hcd *ohci, u16 tick, struct pt_regs *regs)
+{
+       struct ed       *ed, **last;
+
+rescan_all:
+       for (last = &ohci->ed_rm_list, ed = *last; ed != NULL; ed = *last) {
+               struct list_head        *entry, *tmp;
+               int                     completed, modified;
+               u32                     *prev;
+
+               /* only take off EDs that the HC isn't using, accounting for
+                * frame counter wraps.
+                */
+               if (tick_before (tick, ed->tick) && !ohci->disabled) {
+                       last = &ed->ed_next;
+                       continue;
+               }
+
+               /* reentrancy:  if we drop the schedule lock, someone might
+                * have modified this list.  normally it's just prepending
+                * entries (which we'd ignore), but paranoia won't hurt.
+                */
+               *last = ed->ed_next;
+               ed->ed_next = 0;
+               modified = 0;
+
+               /* unlink urbs as requested, but rescan the list after
+                * we call a completion since it might have unlinked
+                * another (earlier) urb
+                */
+rescan_this:
+               completed = 0;
+               prev = &ed->hwHeadP;
+               list_for_each_safe (entry, tmp, &ed->td_list) {
+                       struct td       *td;
+                       struct urb      *urb;
+                       urb_priv_t      *urb_priv;
+                       u32             savebits;
+
+                       td = list_entry (entry, struct td, td_list);
+                       urb = td->urb;
+                       urb_priv = td->urb->hcpriv;
+
+                       if (urb_priv->state != URB_DEL) {
+                               prev = &td->hwNextTD;
+                               continue;
+                       }
+
+                       /* patch pointers hc uses ... tail, if we're removing
+                        * an otherwise active td, and whatever td pointer
+                        * points to this td
+                        */
+                       if (ed->hwTailP == cpu_to_le32 (td->td_dma))
+                               ed->hwTailP = td->hwNextTD;
+                       savebits = *prev & ~cpu_to_le32 (TD_MASK);
+                       *prev = td->hwNextTD | savebits;
+
+                       /* HC may have partly processed this TD */
+                       td_done (ohci, urb, td);
+                       urb_priv->td_cnt++;
+
+                       /* if URB is done, clean up */
+                       if (urb_priv->td_cnt == urb_priv->length) {
+                               modified = completed = 1;
+                               spin_unlock (&ohci->lock);
+                               finish_urb (ohci, urb, regs);
+                               spin_lock (&ohci->lock);
+                       }
+               }
+               if (completed && !list_empty (&ed->td_list))
+                       goto rescan_this;
+
+               /* ED's now officially unlinked, hc doesn't see */
+               ed->state = ED_IDLE;
+               ed->hwINFO &= ~(ED_SKIP | ED_DEQUEUE);
+               ed->hwHeadP &= ~ED_H;
+               ed->hwNextED = 0;
+
+               /* but if there's work queued, reschedule */
+               if (!list_empty (&ed->td_list)) {
+                       if (!ohci->disabled && !ohci->sleeping)
+                               ed_schedule (ohci, ed);
+               }
+
+               if (modified)
+                       goto rescan_all;
+       }
+
+       /* maybe reenable control and bulk lists */ 
+       if (!ohci->disabled && !ohci->ed_rm_list) {
+               u32     command = 0, control = 0;
+
+               if (ohci->ed_controltail) {
+                       command |= OHCI_CLF;
+                       if (!(ohci->hc_control & OHCI_CTRL_CLE)) {
+                               control |= OHCI_CTRL_CLE;
+                               writel (0, &ohci->regs->ed_controlcurrent);
+                       }
+               }
+               if (ohci->ed_bulktail) {
+                       command |= OHCI_BLF;
+                       if (!(ohci->hc_control & OHCI_CTRL_BLE)) {
+                               control |= OHCI_CTRL_BLE;
+                               writel (0, &ohci->regs->ed_bulkcurrent);
+                       }
+               }
+               
+               /* CLE/BLE to enable, CLF/BLF to (maybe) kickstart */
+               if (control) {
+                       ohci->hc_control |= control;
+                       writel (ohci->hc_control, &ohci->regs->control);   
+               }
+               if (command)
+                       writel (command, &ohci->regs->cmdstatus);   
+       }
+}
+
+
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Process normal completions (error or success) and clean the schedules.
+ *
+ * This is the main path for handing urbs back to drivers.  The only other
+ * path is finish_unlinks(), which unlinks URBs using ed_rm_list, instead of
+ * scanning the (re-reversed) donelist as this does.
+ */
+static void
+dl_done_list (struct ohci_hcd *ohci, struct td *td, struct pt_regs *regs)
+{
+       unsigned long   flags;
+
+       spin_lock_irqsave (&ohci->lock, flags);
+       while (td) {
+               struct td       *td_next = td->next_dl_td;
+               struct urb      *urb = td->urb;
+               urb_priv_t      *urb_priv = urb->hcpriv;
+               struct ed       *ed = td->ed;
+
+               /* update URB's length and status from TD */
+               td_done (ohci, urb, td);
+               urb_priv->td_cnt++;
+
+               /* If all this urb's TDs are done, call complete() */
+               if (urb_priv->td_cnt == urb_priv->length) {
+                       spin_unlock (&ohci->lock);
+                       finish_urb (ohci, urb, regs);
+                       spin_lock (&ohci->lock);
+               }
+
+               /* clean schedule:  unlink EDs that are no longer busy */
+               if (list_empty (&ed->td_list))
+                       ed_deschedule (ohci, ed);
+               /* ... reenabling halted EDs only after fault cleanup */
+               else if (!(ed->hwINFO & ED_DEQUEUE)) {
+                       td = list_entry (ed->td_list.next, struct td, td_list);
+                       if (!(td->hwINFO & TD_DONE))
+                               ed->hwINFO &= ~ED_SKIP;
+               }
+
+               td = td_next;
+       }  
+       spin_unlock_irqrestore (&ohci->lock, flags);
+}