1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
|
From d408894fa4263440ed8a9e68566bacea7e6f6bed Mon Sep 17 00:00:00 2001
From: Sergei Shtylyov <sshtylyov-hkdhdckH98+B+jHODAdFcQ@public.gmane.org>
Date: Fri, 27 Mar 2009 12:57:50 -0700
Subject: [PATCH] musb_host: streamline musb_cleanup_urb() calls
The argument for the 'is_in' parameter of musb_cleanup_urb()
is always extracted from an URB that's passed to the function.
So that parameter is superfluous; remove it.
Signed-off-by: Sergei Shtylyov <sshtylyov-hkdhdckH98+B+jHODAdFcQ@public.gmane.org>
Signed-off-by: David Brownell <dbrownell-Rn4VEauK+AKRv+LV9MX5uipxlwaOVQ5f@public.gmane.org>
---
drivers/usb/musb/musb_host.c | 9 +++++----
1 files changed, 5 insertions(+), 4 deletions(-)
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index e121e0e..71e835e 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -1950,14 +1950,15 @@ done:
* called with controller locked, irqs blocked
* that hardware queue advances to the next transfer, unless prevented
*/
-static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in)
+static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
{
struct musb_hw_ep *ep = qh->hw_ep;
void __iomem *epio = ep->regs;
unsigned hw_end = ep->epnum;
void __iomem *regs = ep->musb->mregs;
- u16 csr;
+ int is_in = usb_pipein(urb->pipe);
int status = 0;
+ u16 csr;
musb_ep_select(regs, hw_end);
@@ -2056,7 +2057,7 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
kfree(qh);
}
} else
- ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
+ ret = musb_cleanup_urb(urb, qh);
done:
spin_unlock_irqrestore(&musb->lock, flags);
return ret;
@@ -2090,7 +2091,7 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
urb->status = -ESHUTDOWN;
/* cleanup */
- musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
+ musb_cleanup_urb(urb, qh);
/* Then nuke all the others ... and advance the
* queue on hw_ep (e.g. bulk ring) when we're done.
--
1.6.0.4
|