diff options
Diffstat (limited to 'recipes/kexecboot/linux-kexecboot-2.6.29/musb/0003-USB-musb-NAK-timeout-scheme-on-bulk-RX-endpoint.patch')
-rw-r--r-- | recipes/kexecboot/linux-kexecboot-2.6.29/musb/0003-USB-musb-NAK-timeout-scheme-on-bulk-RX-endpoint.patch | 218 |
1 files changed, 218 insertions, 0 deletions
diff --git a/recipes/kexecboot/linux-kexecboot-2.6.29/musb/0003-USB-musb-NAK-timeout-scheme-on-bulk-RX-endpoint.patch b/recipes/kexecboot/linux-kexecboot-2.6.29/musb/0003-USB-musb-NAK-timeout-scheme-on-bulk-RX-endpoint.patch new file mode 100644 index 0000000000..fadad9e44a --- /dev/null +++ b/recipes/kexecboot/linux-kexecboot-2.6.29/musb/0003-USB-musb-NAK-timeout-scheme-on-bulk-RX-endpoint.patch @@ -0,0 +1,218 @@ +From ba7b26e69f4bb41f10be444c5fded853330f82b5 Mon Sep 17 00:00:00 2001 +From: Ajay Kumar Gupta <ajay.gupta-l0cyMroinI0@public.gmane.org> +Date: Tue, 24 Mar 2009 17:22:51 -0700 +Subject: [PATCH] USB: musb: NAK timeout scheme on bulk RX endpoint + +Fixes endpoint starvation issue when more than one bulk QH is +multiplexed on the reserved bulk RX endpoint, which is normal +for cases like serial and ethernet adapters. + +This patch sets the NAK timeout interval for such QHs, and when +a timeout triggers the next QH will be scheduled. (This resembles +the bulk scheduling done in hardware by EHCI, OHCI, and UHCI.) + +This scheme doesn't work for devices which are connected to a +high to full speed tree (transaction translator) as there is +no NAK timeout interrupt from the musb controller from such +devices. + +Tested with PIO, Inventra DMA, CPPI DMA. + +[ dbrownell-Rn4VEauK+AKRv+LV9MX5uipxlwaOVQ5f@public.gmane.org: fold in start_urb() update; + clarify only for bulk RX; don't accidentally clear WZC bits ] + +Signed-off-by: Ajay Kumar Gupta <ajay.gupta-l0cyMroinI0@public.gmane.org> +Cc: Felipe Balbi <felipe.balbi-xNZwKgViW5gAvxtiuMwx3w@public.gmane.org> +Signed-off-by: David Brownell <dbrownell-Rn4VEauK+AKRv+LV9MX5uipxlwaOVQ5f@public.gmane.org> +Signed-off-by: Greg Kroah-Hartman <gregkh-l3A5Bk7waGM@public.gmane.org> +--- + drivers/usb/musb/musb_host.c | 112 ++++++++++++++++++++++++++++++++---------- + 1 files changed, 85 insertions(+), 27 deletions(-) + +diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c +index 6dbbd07..bd1d5ae 100644 +--- a/drivers/usb/musb/musb_host.c ++++ b/drivers/usb/musb/musb_host.c +@@ -64,11 +64,8 @@ + * + * - DMA (Mentor/OMAP) ...has at least toggle update problems + * +- * - Still no traffic scheduling code to make NAKing for bulk or control +- * transfers unable to starve other requests; or to make efficient use +- * of hardware with periodic transfers. (Note that network drivers +- * commonly post bulk reads that stay pending for a long time; these +- * would make very visible trouble.) ++ * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet ++ * starvation ... nothing yet for TX, interrupt, or bulk. + * + * - Not tested with HNP, but some SRP paths seem to behave. + * +@@ -88,11 +85,8 @@ + * + * CONTROL transfers all go through ep0. BULK ones go through dedicated IN + * and OUT endpoints ... hardware is dedicated for those "async" queue(s). +- * + * (Yes, bulk _could_ use more of the endpoints than that, and would even +- * benefit from it ... one remote device may easily be NAKing while others +- * need to perform transfers in that same direction. The same thing could +- * be done in software though, assuming dma cooperates.) ++ * benefit from it.) + * + * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints. + * So far that scheduling is both dumb and optimistic: the endpoint will be +@@ -201,8 +195,9 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh) + len = urb->iso_frame_desc[0].length; + break; + default: /* bulk, interrupt */ +- buf = urb->transfer_buffer; +- len = urb->transfer_buffer_length; ++ /* actual_length may be nonzero on retry paths */ ++ buf = urb->transfer_buffer + urb->actual_length; ++ len = urb->transfer_buffer_length - urb->actual_length; + } + + DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n", +@@ -1045,7 +1040,8 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb) + + /* NOTE: this code path would be a good place to PAUSE a + * control transfer, if another one is queued, so that +- * ep0 is more likely to stay busy. ++ * ep0 is more likely to stay busy. That's already done ++ * for bulk RX transfers. + * + * if (qh->ring.next != &musb->control), then + * we have a candidate... NAKing is *NOT* an error +@@ -1197,6 +1193,7 @@ void musb_host_tx(struct musb *musb, u8 epnum) + /* NOTE: this code path would be a good place to PAUSE a + * transfer, if there's some other (nonperiodic) tx urb + * that could use this fifo. (dma complicates it...) ++ * That's already done for bulk RX transfers. + * + * if (bulk && qh->ring.next != &musb->out_bulk), then + * we have a candidate... NAKing is *NOT* an error +@@ -1358,6 +1355,50 @@ finish: + + #endif + ++/* Schedule next QH from musb->in_bulk and move the current qh to ++ * the end; avoids starvation for other endpoints. ++ */ ++static void musb_bulk_rx_nak_timeout(struct musb *musb, struct musb_hw_ep *ep) ++{ ++ struct dma_channel *dma; ++ struct urb *urb; ++ void __iomem *mbase = musb->mregs; ++ void __iomem *epio = ep->regs; ++ struct musb_qh *cur_qh, *next_qh; ++ u16 rx_csr; ++ ++ musb_ep_select(mbase, ep->epnum); ++ dma = is_dma_capable() ? ep->rx_channel : NULL; ++ ++ /* clear nak timeout bit */ ++ rx_csr = musb_readw(epio, MUSB_RXCSR); ++ rx_csr |= MUSB_RXCSR_H_WZC_BITS; ++ rx_csr &= ~MUSB_RXCSR_DATAERROR; ++ musb_writew(epio, MUSB_RXCSR, rx_csr); ++ ++ cur_qh = first_qh(&musb->in_bulk); ++ if (cur_qh) { ++ urb = next_urb(cur_qh); ++ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { ++ dma->status = MUSB_DMA_STATUS_CORE_ABORT; ++ musb->dma_controller->channel_abort(dma); ++ urb->actual_length += dma->actual_len; ++ dma->actual_len = 0L; ++ } ++ musb_save_toggle(ep, 1, urb); ++ ++ /* move cur_qh to end of queue */ ++ list_move_tail(&cur_qh->ring, &musb->in_bulk); ++ ++ /* get the next qh from musb->in_bulk */ ++ next_qh = first_qh(&musb->in_bulk); ++ ++ /* set rx_reinit and schedule the next qh */ ++ ep->rx_reinit = 1; ++ musb_start_urb(musb, 1, next_qh); ++ } ++} ++ + /* + * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso, + * and high-bandwidth IN transfer cases. +@@ -1421,18 +1462,26 @@ void musb_host_rx(struct musb *musb, u8 epnum) + } else if (rx_csr & MUSB_RXCSR_DATAERROR) { + + if (USB_ENDPOINT_XFER_ISOC != qh->type) { +- /* NOTE this code path would be a good place to PAUSE a +- * transfer, if there's some other (nonperiodic) rx urb +- * that could use this fifo. (dma complicates it...) ++ DBG(6, "RX end %d NAK timeout\n", epnum); ++ ++ /* NOTE: NAKing is *NOT* an error, so we want to ++ * continue. Except ... if there's a request for ++ * another QH, use that instead of starving it. + * +- * if (bulk && qh->ring.next != &musb->in_bulk), then +- * we have a candidate... NAKing is *NOT* an error ++ * Devices like Ethernet and serial adapters keep ++ * reads posted at all times, which will starve ++ * other devices without this logic. + */ +- DBG(6, "RX end %d NAK timeout\n", epnum); ++ if (usb_pipebulk(urb->pipe) ++ && qh->mux == 1 ++ && !list_is_singular(&musb->in_bulk)) { ++ musb_bulk_rx_nak_timeout(musb, hw_ep); ++ return; ++ } + musb_ep_select(mbase, epnum); +- musb_writew(epio, MUSB_RXCSR, +- MUSB_RXCSR_H_WZC_BITS +- | MUSB_RXCSR_H_REQPKT); ++ rx_csr |= MUSB_RXCSR_H_WZC_BITS; ++ rx_csr &= ~MUSB_RXCSR_DATAERROR; ++ musb_writew(epio, MUSB_RXCSR, rx_csr); + + goto finish; + } else { +@@ -1756,6 +1805,17 @@ static int musb_schedule( + head = &musb->in_bulk; + else + head = &musb->out_bulk; ++ ++ /* Enable bulk RX NAK timeout scheme when bulk requests are ++ * multiplexed. This scheme doen't work in high speed to full ++ * speed scenario as NAK interrupts are not coming from a ++ * full speed device connected to a high speed device. ++ * NAK timeout interval is 8 (128 uframe or 16ms) for HS and ++ * 4 (8 frame or 8ms) for FS device. ++ */ ++ if (is_in && qh->dev) ++ qh->intv_reg = ++ (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4; + goto success; + } else if (best_end < 0) { + return -ENOSPC; +@@ -1888,13 +1948,11 @@ static int musb_urb_enqueue( + * + * The downside of disabling this is that transfer scheduling + * gets VERY unfair for nonperiodic transfers; a misbehaving +- * peripheral could make that hurt. Or for reads, one that's +- * perfectly normal: network and other drivers keep reads +- * posted at all times, having one pending for a week should +- * be perfectly safe. ++ * peripheral could make that hurt. That's perfectly normal ++ * for reads from network or serial adapters ... so we have ++ * partial NAKlimit support for bulk RX. + * +- * The upside of disabling it is avoidng transfer scheduling +- * code to put this aside for while. ++ * The upside of disabling it is simpler transfer scheduling. + */ + interval = 0; + } +-- +1.6.0.4 + |