summaryrefslogtreecommitdiff
path: root/packages/linux/linux-2.6.18/atmel-husb2-udc-driver.patch
diff options
context:
space:
mode:
Diffstat (limited to 'packages/linux/linux-2.6.18/atmel-husb2-udc-driver.patch')
-rw-r--r--packages/linux/linux-2.6.18/atmel-husb2-udc-driver.patch2488
1 files changed, 2488 insertions, 0 deletions
diff --git a/packages/linux/linux-2.6.18/atmel-husb2-udc-driver.patch b/packages/linux/linux-2.6.18/atmel-husb2-udc-driver.patch
new file mode 100644
index 0000000000..f46a8f1dd8
--- /dev/null
+++ b/packages/linux/linux-2.6.18/atmel-husb2-udc-driver.patch
@@ -0,0 +1,2488 @@
+From nobody Mon Sep 17 00:00:00 2001
+From: HÃ¥vard Skinnemoen <hskinnemoen@atmel.com>
+Date: Fri Nov 18 18:13:25 2005 +0100
+Subject: [PATCH] Driver for the Atmel HUSB2 Device Controller
+
+This adds the driver for the Atmel HUSB2 Device Controller.
+
+---
+
+ drivers/usb/gadget/Kconfig | 10
+ drivers/usb/gadget/Makefile | 1
+ drivers/usb/gadget/gadget_chips.h | 8
+ drivers/usb/gadget/husb2_udc.c | 1998 ++++++++++++++++++++++++++++++++++++++
+ drivers/usb/gadget/husb2_udc.h | 406 +++++++
+ 5 files changed, 2423 insertions(+)
+
+Index: linux-2.6.18-avr32/drivers/usb/gadget/Kconfig
+===================================================================
+--- linux-2.6.18-avr32.orig/drivers/usb/gadget/Kconfig 2006-11-02 15:54:18.000000000 +0100
++++ linux-2.6.18-avr32/drivers/usb/gadget/Kconfig 2006-11-02 15:56:20.000000000 +0100
+@@ -154,6 +154,16 @@ config USB_LH7A40X
+ default USB_GADGET
+ select USB_GADGET_SELECTED
+
++config USB_GADGET_HUSB2DEV
++ boolean "Atmel HUSB2DEVICE"
++ select USB_GADGET_DUALSPEED
++ depends on AVR32
++
++config USB_HUSB2DEV
++ tristate
++ depends on USB_GADGET_HUSB2DEV
++ default USB_GADGET
++ select USB_GADGET_SELECTED
+
+ config USB_GADGET_OMAP
+ boolean "OMAP USB Device Controller"
+Index: linux-2.6.18-avr32/drivers/usb/gadget/Makefile
+===================================================================
+--- linux-2.6.18-avr32.orig/drivers/usb/gadget/Makefile 2006-11-02 15:54:18.000000000 +0100
++++ linux-2.6.18-avr32/drivers/usb/gadget/Makefile 2006-11-02 15:56:20.000000000 +0100
+@@ -8,6 +8,7 @@ obj-$(CONFIG_USB_GOKU) += goku_udc.o
+ obj-$(CONFIG_USB_OMAP) += omap_udc.o
+ obj-$(CONFIG_USB_LH7A40X) += lh7a40x_udc.o
+ obj-$(CONFIG_USB_AT91) += at91_udc.o
++obj-$(CONFIG_USB_HUSB2DEV) += husb2_udc.o
+
+ #
+ # USB gadget drivers
+Index: linux-2.6.18-avr32/drivers/usb/gadget/gadget_chips.h
+===================================================================
+--- linux-2.6.18-avr32.orig/drivers/usb/gadget/gadget_chips.h 2006-11-02 15:54:18.000000000 +0100
++++ linux-2.6.18-avr32/drivers/usb/gadget/gadget_chips.h 2006-11-02 15:56:20.000000000 +0100
+@@ -75,6 +75,12 @@
+ #define gadget_is_pxa27x(g) 0
+ #endif
+
++#ifdef CONFIG_USB_GADGET_HUSB2DEV
++#define gadget_is_husb2dev(g) !strcmp("husb2_udc", (g)->name)
++#else
++#define gadget_is_husb2dev(g) 0
++#endif
++
+ #ifdef CONFIG_USB_GADGET_S3C2410
+ #define gadget_is_s3c2410(g) !strcmp("s3c2410_udc", (g)->name)
+ #else
+@@ -169,5 +175,7 @@ static inline int usb_gadget_controller_
+ return 0x16;
+ else if (gadget_is_mpc8272(gadget))
+ return 0x17;
++ else if (gadget_is_husb2dev(gadget))
++ return 0x80;
+ return -ENOENT;
+ }
+Index: linux-2.6.18-avr32/drivers/usb/gadget/husb2_udc.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.18-avr32/drivers/usb/gadget/husb2_udc.c 2006-11-02 16:06:40.000000000 +0100
+@@ -0,0 +1,1998 @@
++/*
++ * Driver for the Atmel HUSB2device high speed USB device controller
++ *
++ * Copyright (C) 2005-2006 Atmel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++#undef DEBUG
++
++#include <linux/config.h>
++#include <linux/clk.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/device.h>
++#include <linux/dma-mapping.h>
++#include <linux/list.h>
++#include <linux/platform_device.h>
++#include <linux/usb_ch9.h>
++#include <linux/usb_gadget.h>
++#include <linux/dmapool.h>
++#include <linux/delay.h>
++
++#include <asm/io.h>
++
++#include "husb2_udc.h"
++
++#define DRIVER_VERSION "0.9"
++
++#define DMA_ADDR_INVALID (~(dma_addr_t)0)
++
++#define FIFO_IOMEM_ID 0
++#define CTRL_IOMEM_ID 1
++
++#ifdef DEBUG
++#define DBG_ERR 0x0001 /* report all error returns */
++#define DBG_HW 0x0002 /* debug hardware initialization */
++#define DBG_GADGET 0x0004 /* calls to/from gadget driver */
++#define DBG_INT 0x0008 /* interrupts */
++#define DBG_BUS 0x0010 /* report changes in bus state */
++#define DBG_QUEUE 0x0020 /* debug request queue processing */
++#define DBG_FIFO 0x0040 /* debug FIFO contents */
++#define DBG_DMA 0x0080 /* debug DMA handling */
++#define DBG_REQ 0x0100 /* print out queued request length */
++#define DBG_ALL 0xffff
++#define DBG_NONE 0x0000
++
++#define DEBUG_LEVEL (DBG_ERR|DBG_REQ)
++#define DBG(level, fmt, ...) \
++ do { \
++ if ((level) & DEBUG_LEVEL) \
++ printk(KERN_DEBUG "udc: " fmt, ## __VA_ARGS__); \
++ } while (0)
++#else
++#define DBG(level, fmt...)
++#endif
++
++static struct husb2_udc the_udc;
++
++#ifdef CONFIG_DEBUG_FS
++#include <linux/debugfs.h>
++#include <asm/uaccess.h>
++
++static int queue_dbg_open(struct inode *inode, struct file *file)
++{
++ struct husb2_ep *ep = inode->u.generic_ip;
++ struct husb2_request *req, *req_copy;
++ struct list_head *queue_data;
++
++ queue_data = kmalloc(sizeof(*queue_data), GFP_KERNEL);
++ if (!queue_data)
++ return -ENOMEM;
++ INIT_LIST_HEAD(queue_data);
++
++ spin_lock_irq(&ep->udc->lock);
++ list_for_each_entry(req, &ep->queue, queue) {
++ req_copy = kmalloc(sizeof(*req_copy), GFP_ATOMIC);
++ if (!req_copy)
++ goto fail;
++ memcpy(req_copy, req, sizeof(*req_copy));
++ list_add_tail(&req_copy->queue, queue_data);
++ }
++ spin_unlock_irq(&ep->udc->lock);
++
++ file->private_data = queue_data;
++ return 0;
++
++fail:
++ spin_unlock_irq(&ep->udc->lock);
++ list_for_each_entry_safe(req, req_copy, queue_data, queue) {
++ list_del(&req->queue);
++ kfree(req);
++ }
++ kfree(queue_data);
++ return -ENOMEM;
++}
++
++/*
++ * bbbbbbbb llllllll IZS sssss nnnn FDL\n\0
++ *
++ * b: buffer address
++ * l: buffer length
++ * I/i: interrupt/no interrupt
++ * Z/z: zero/no zero
++ * S/s: short ok/short not ok
++ * s: status
++ * n: nr_packets
++ * F/f: submitted/not submitted to FIFO
++ * D/d: using/not using DMA
++ * L/l: last transaction/not last transaction
++ */
++static ssize_t queue_dbg_read(struct file *file, char __user *buf,
++ size_t nbytes, loff_t *ppos)
++{
++ struct list_head *queue = file->private_data;
++ struct husb2_request *req, *tmp_req;
++ size_t len, remaining, actual = 0;
++ char tmpbuf[38];
++
++ if (!access_ok(VERIFY_WRITE, buf, nbytes))
++ return -EFAULT;
++
++ mutex_lock(&file->f_dentry->d_inode->i_mutex);
++ list_for_each_entry_safe(req, tmp_req, queue, queue) {
++ len = snprintf(tmpbuf, sizeof(tmpbuf),
++ "%8p %08x %c%c%c %5d %4u %c%c%c\n",
++ req->req.buf, req->req.length,
++ req->req.no_interrupt ? 'i' : 'I',
++ req->req.zero ? 'Z' : 'z',
++ req->req.short_not_ok ? 's' : 'S',
++ req->req.status,
++ req->nr_pkts,
++ req->submitted ? 'F' : 'f',
++ req->using_dma ? 'D' : 'd',
++ req->last_transaction ? 'L' : 'l');
++ len = min(len, sizeof(tmpbuf));
++ if (len > nbytes)
++ break;
++
++ list_del(&req->queue);
++ kfree(req);
++
++ remaining = __copy_to_user(buf, tmpbuf, len);
++ actual += len - remaining;
++ if (remaining)
++ break;
++
++ nbytes -= len;
++ buf += len;
++ }
++ mutex_unlock(&file->f_dentry->d_inode->i_mutex);
++
++ return actual;
++}
++
++static int queue_dbg_release(struct inode *inode, struct file *file)
++{
++ struct list_head *queue_data = file->private_data;
++ struct husb2_request *req, *tmp_req;
++
++ list_for_each_entry_safe(req, tmp_req, queue_data, queue) {
++ list_del(&req->queue);
++ kfree(req);
++ }
++ kfree(queue_data);
++ return 0;
++}
++
++static int regs_dbg_open(struct inode *inode, struct file *file)
++{
++ struct husb2_udc *udc;
++ unsigned int i;
++ u32 *data;
++ int ret = -ENOMEM;
++
++ mutex_lock(&inode->i_mutex);
++ udc = inode->u.generic_ip;
++ data = kmalloc(inode->i_size, GFP_KERNEL);
++ if (!data)
++ goto out;
++
++ spin_lock_irq(&udc->lock);
++ for (i = 0; i < inode->i_size / 4; i++)
++ data[i] = __raw_readl(udc->regs + i * 4);
++ spin_unlock_irq(&udc->lock);
++
++ file->private_data = data;
++ ret = 0;
++
++out:
++ mutex_unlock(&inode->i_mutex);
++
++ return ret;
++}
++
++static ssize_t regs_dbg_read(struct file *file, char __user *buf,
++ size_t nbytes, loff_t *ppos)
++{
++ struct inode *inode = file->f_dentry->d_inode;
++ int ret;
++
++ mutex_lock(&inode->i_mutex);
++ ret = simple_read_from_buffer(buf, nbytes, ppos,
++ file->private_data,
++ file->f_dentry->d_inode->i_size);
++ mutex_unlock(&inode->i_mutex);
++
++ return ret;
++}
++
++static int regs_dbg_release(struct inode *inode, struct file *file)
++{
++ kfree(file->private_data);
++ return 0;
++}
++
++const struct file_operations queue_dbg_fops = {
++ .owner = THIS_MODULE,
++ .open = queue_dbg_open,
++ .llseek = no_llseek,
++ .read = queue_dbg_read,
++ .release = queue_dbg_release,
++};
++
++const struct file_operations regs_dbg_fops = {
++ .owner = THIS_MODULE,
++ .open = regs_dbg_open,
++ .llseek = generic_file_llseek,
++ .read = regs_dbg_read,
++ .release = regs_dbg_release,
++};
++
++static void husb2_ep_init_debugfs(struct husb2_udc *udc,
++ struct husb2_ep *ep)
++{
++ struct dentry *ep_root;
++
++ ep_root = debugfs_create_dir(ep_name(ep), udc->debugfs_root);
++ if (!ep_root)
++ goto err_root;
++ ep->debugfs_dir = ep_root;
++
++ ep->debugfs_queue = debugfs_create_file("queue", 0400, ep_root,
++ ep, &queue_dbg_fops);
++ if (!ep->debugfs_queue)
++ goto err_queue;
++
++ if (ep_can_dma(ep)) {
++ ep->debugfs_dma_status
++ = debugfs_create_u32("dma_status", 0400, ep_root,
++ &ep->last_dma_status);
++ if (!ep->debugfs_dma_status)
++ goto err_dma_status;
++ }
++
++ return;
++
++err_dma_status:
++ debugfs_remove(ep->debugfs_queue);
++err_queue:
++ debugfs_remove(ep_root);
++err_root:
++ dev_err(&ep->udc->pdev->dev,
++ "failed to create debugfs directory for %s\n", ep_name(ep));
++}
++
++static void husb2_ep_cleanup_debugfs(struct husb2_ep *ep)
++{
++ debugfs_remove(ep->debugfs_queue);
++ debugfs_remove(ep->debugfs_dma_status);
++ debugfs_remove(ep->debugfs_dir);
++ ep->debugfs_dma_status = NULL;
++ ep->debugfs_dir = NULL;
++}
++
++static void husb2_init_debugfs(struct husb2_udc *udc)
++{
++ struct dentry *root, *regs;
++ struct resource *regs_resource;
++
++ root = debugfs_create_dir(udc->gadget.name, NULL);
++ if (IS_ERR(root) || !root)
++ goto err_root;
++ udc->debugfs_root = root;
++
++ regs = debugfs_create_file("regs", 0400, root, udc, &regs_dbg_fops);
++ if (!regs)
++ goto err_regs;
++
++ regs_resource = platform_get_resource(udc->pdev, IORESOURCE_MEM,
++ CTRL_IOMEM_ID);
++ regs->d_inode->i_size = regs_resource->end - regs_resource->start + 1;
++ udc->debugfs_regs = regs;
++
++ husb2_ep_init_debugfs(udc, to_husb2_ep(udc->gadget.ep0));
++
++ return;
++
++err_regs:
++ debugfs_remove(root);
++err_root:
++ udc->debugfs_root = NULL;
++ dev_err(&udc->pdev->dev, "debugfs is not available\n");
++}
++
++static void husb2_cleanup_debugfs(struct husb2_udc *udc)
++{
++ husb2_ep_cleanup_debugfs(to_husb2_ep(udc->gadget.ep0));
++ debugfs_remove(udc->debugfs_regs);
++ debugfs_remove(udc->debugfs_root);
++ udc->debugfs_regs = NULL;
++ udc->debugfs_root = NULL;
++}
++#else
++static inline void husb2_ep_init_debugfs(struct husb2_udc *udc,
++ struct husb2_ep *ep)
++{
++
++}
++
++static inline void husb2_ep_cleanup_debugfs(struct husb2_ep *ep)
++{
++
++}
++
++static inline void husb2_init_debugfs(struct husb2_udc *udc)
++{
++
++}
++
++static inline void husb2_cleanup_debugfs(struct husb2_udc *udc)
++{
++
++}
++#endif
++
++static void copy_to_fifo(void __iomem *fifo, void *buf, int len)
++{
++ unsigned long tmp;
++
++ DBG(DBG_FIFO, "copy to FIFO (len %d):\n", len);
++ for (; len > 0; len -= 4, buf += 4, fifo += 4) {
++ tmp = *(unsigned long *)buf;
++ if (len >= 4) {
++ DBG(DBG_FIFO, " -> %08lx\n", tmp);
++ __raw_writel(tmp, fifo);
++ } else {
++ do {
++ DBG(DBG_FIFO, " -> %02lx\n", tmp >> 24);
++ __raw_writeb(tmp >> 24, fifo);
++ fifo++;
++ tmp <<= 8;
++ } while (--len);
++ break;
++ }
++ }
++}
++
++static void copy_from_fifo(void *buf, void __iomem *fifo, int len)
++{
++ union {
++ unsigned long *w;
++ unsigned char *b;
++ } p;
++ unsigned long tmp;
++
++ DBG(DBG_FIFO, "copy from FIFO (len %d):\n", len);
++ for (p.w = buf; len > 0; len -= 4, p.w++, fifo += 4) {
++ if (len >= 4) {
++ tmp = __raw_readl(fifo);
++ *p.w = tmp;
++ DBG(DBG_FIFO, " -> %08lx\n", tmp);
++ } else {
++ do {
++ tmp = __raw_readb(fifo);
++ *p.b = tmp;
++ DBG(DBG_FIFO, " -> %02lx\n", tmp);
++ fifo++, p.b++;
++ } while (--len);
++ }
++ }
++}
++
++static void next_fifo_transaction(struct husb2_ep *ep,
++ struct husb2_request *req)
++{
++ unsigned int transaction_len;
++
++ transaction_len = req->req.length - req->req.actual;
++ req->last_transaction = 1;
++ if (transaction_len > ep->ep.maxpacket) {
++ transaction_len = ep->ep.maxpacket;
++ req->last_transaction = 0;
++ } else if (transaction_len == ep->ep.maxpacket
++ && req->req.zero) {
++ req->last_transaction = 0;
++ }
++ DBG(DBG_QUEUE, "%s: submit_transaction, req %p (length %d)%s\n",
++ ep_name(ep), req, transaction_len,
++ req->last_transaction ? ", done" : "");
++
++ copy_to_fifo(ep->fifo, req->req.buf + req->req.actual, transaction_len);
++ husb2_ep_writel(ep, SET_STA, HUSB2_BIT(TX_PK_RDY));
++ req->req.actual += transaction_len;
++}
++
++static void submit_request(struct husb2_ep *ep, struct husb2_request *req)
++{
++ DBG(DBG_QUEUE, "%s: submit_request: req %p (length %d)\n",
++ ep_name(ep), req, req->req.length);
++
++ req->req.actual = 0;
++ req->submitted = 1;
++
++ if (req->using_dma) {
++ if (req->req.length == 0) {
++ husb2_ep_writel(ep, CTL_ENB, HUSB2_BIT(TX_PK_RDY));
++ } else {
++ husb2_ep_writel(ep, CTL_DIS, HUSB2_BIT(TX_PK_RDY));
++ husb2_dma_writel(ep, NXT_DSC,
++ req->packet[0].desc_dma);
++ husb2_dma_writel(ep, CONTROL, HUSB2_BIT(DMA_LINK));
++ }
++ } else {
++ next_fifo_transaction(ep, req);
++ if (req->last_transaction)
++ husb2_ep_writel(ep, CTL_ENB, HUSB2_BIT(TX_COMPLETE));
++ else
++ husb2_ep_writel(ep, CTL_ENB, HUSB2_BIT(TX_PK_RDY));
++ }
++}
++
++static void submit_next_request(struct husb2_ep *ep)
++{
++ struct husb2_request *req;
++
++ if (list_empty(&ep->queue)) {
++ husb2_ep_writel(ep, CTL_DIS, (HUSB2_BIT(TX_PK_RDY)
++ | HUSB2_BIT(RX_BK_RDY)));
++ return;
++ }
++
++ req = list_entry(ep->queue.next, struct husb2_request, queue);
++ if (!req->submitted)
++ submit_request(ep, req);
++}
++
++static void send_status(struct husb2_udc *udc, struct husb2_ep *ep)
++{
++ ep->state = STATUS_STAGE_IN;
++ husb2_ep_writel(ep, SET_STA, HUSB2_BIT(TX_PK_RDY));
++ husb2_ep_writel(ep, CTL_ENB, HUSB2_BIT(TX_COMPLETE));
++}
++
++static void receive_data(struct husb2_ep *ep)
++{
++ struct husb2_udc *udc = ep->udc;
++ struct husb2_request *req;
++ unsigned long status;
++ unsigned int bytecount, nr_busy;
++ int is_complete = 0;
++
++ status = husb2_ep_readl(ep, STA);
++ nr_busy = HUSB2_BFEXT(BUSY_BANKS, status);
++
++ DBG(DBG_QUEUE, "receive data: nr_busy=%u\n", nr_busy);
++
++ while (nr_busy > 0) {
++ if (list_empty(&ep->queue)) {
++ husb2_ep_writel(ep, CTL_DIS, HUSB2_BIT(RX_BK_RDY));
++ break;
++ }
++ req = list_entry(ep->queue.next,
++ struct husb2_request, queue);
++
++ bytecount = HUSB2_BFEXT(BYTE_COUNT, status);
++
++ if (status & (1 << 31))
++ is_complete = 1;
++ if (req->req.actual + bytecount >= req->req.length) {
++ is_complete = 1;
++ bytecount = req->req.length - req->req.actual;
++ }
++
++ copy_from_fifo(req->req.buf + req->req.actual,
++ ep->fifo, bytecount);
++ req->req.actual += bytecount;
++
++ husb2_ep_writel(ep, CLR_STA, HUSB2_BIT(RX_BK_RDY));
++
++ if (is_complete) {
++ DBG(DBG_QUEUE, "%s: request done\n", ep_name(ep));
++ req->req.status = 0;
++ list_del_init(&req->queue);
++ req->req.complete(&ep->ep, &req->req);
++ }
++
++ status = husb2_ep_readl(ep, STA);
++ nr_busy = HUSB2_BFEXT(BUSY_BANKS, status);
++
++ if (is_complete && ep_is_control(ep)) {
++ BUG_ON(nr_busy != 0);
++ send_status(udc, ep);
++ break;
++ }
++ }
++}
++
++static void request_complete(struct husb2_ep *ep,
++ struct husb2_request *req,
++ int status)
++{
++ struct husb2_udc *udc = ep->udc;
++ int i;
++
++ BUG_ON(!list_empty(&req->queue));
++
++ if (req->req.status == -EINPROGRESS)
++ req->req.status = status;
++
++ if (req->packet) {
++ for (i = 0; i < req->nr_pkts; i++)
++ dma_pool_free(udc->desc_pool, req->packet[i].desc,
++ req->packet[i].desc_dma);
++ kfree(req->packet);
++ req->packet = NULL;
++ dma_unmap_single(&udc->pdev->dev,
++ req->req.dma, req->req.length,
++ (ep_is_in(ep)
++ ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
++ req->req.dma = DMA_ADDR_INVALID;
++ }
++
++ DBG(DBG_GADGET | DBG_REQ,
++ "%s: req %p complete: status %d, actual %u\n",
++ ep_name(ep), req, req->req.status, req->req.actual);
++ req->req.complete(&ep->ep, &req->req);
++}
++
++static void request_complete_list(struct husb2_ep *ep,
++ struct list_head *list,
++ int status)
++{
++ struct husb2_request *req, *tmp_req;
++
++ list_for_each_entry_safe(req, tmp_req, list, queue) {
++ list_del_init(&req->queue);
++ request_complete(ep, req, status);
++ }
++}
++
++static int husb2_ep_enable(struct usb_ep *_ep,
++ const struct usb_endpoint_descriptor *desc)
++{
++ struct husb2_ep *ep = to_husb2_ep(_ep);
++ struct husb2_udc *udc = ep->udc;
++ unsigned long flags, ept_cfg, maxpacket;
++
++ DBG(DBG_GADGET, "%s: ep_enable: desc=%p\n", ep_name(ep), desc);
++
++ maxpacket = le16_to_cpu(desc->wMaxPacketSize);
++
++ if (ep->index == 0
++ || desc->bDescriptorType != USB_DT_ENDPOINT
++ || ((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK)
++ != ep->index)
++ || maxpacket == 0
++ || maxpacket > ep->fifo_size) {
++ DBG(DBG_ERR, "ep_enable: Invalid argument");
++ return -EINVAL;
++ }
++
++ if (((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
++ == USB_ENDPOINT_XFER_ISOC)
++ && !(ep->capabilities & HUSB2_EP_CAP_ISOC)) {
++ DBG(DBG_ERR, "ep_enable: %s is not isoc capable\n",
++ ep_name(ep));
++ return -EINVAL;
++ }
++
++ if (maxpacket <= 8)
++ ept_cfg = HUSB2_BF(EPT_SIZE, HUSB2_EPT_SIZE_8);
++ else
++ /* LSB is bit 1, not 0 */
++ ept_cfg = HUSB2_BF(EPT_SIZE, fls(maxpacket - 1) - 3);
++ DBG(DBG_HW, "%s: EPT_SIZE = %lu (maxpacket = %lu)\n",
++ ep_name(ep), ept_cfg, maxpacket);
++
++ if ((desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
++ ept_cfg |= HUSB2_BIT(EPT_DIR);
++
++ switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
++ case USB_ENDPOINT_XFER_CONTROL:
++ ept_cfg |= HUSB2_BF(EPT_TYPE, HUSB2_EPT_TYPE_CONTROL);
++ break;
++ case USB_ENDPOINT_XFER_ISOC:
++ ept_cfg |= HUSB2_BF(EPT_TYPE, HUSB2_EPT_TYPE_ISO);
++ break;
++ case USB_ENDPOINT_XFER_BULK:
++ ept_cfg |= HUSB2_BF(EPT_TYPE, HUSB2_EPT_TYPE_BULK);
++ break;
++ case USB_ENDPOINT_XFER_INT:
++ ept_cfg |= HUSB2_BF(EPT_TYPE, HUSB2_EPT_TYPE_INT);
++ break;
++ }
++ ept_cfg |= HUSB2_BF(BK_NUMBER, ep->nr_banks);
++
++ spin_lock_irqsave(&ep->udc->lock, flags);
++
++ if (ep->desc) {
++ spin_unlock_irqrestore(&ep->udc->lock, flags);
++ DBG(DBG_ERR, "ep%d already enabled\n", ep->index);
++ return -EBUSY;
++ }
++
++ ep->desc = desc;
++ ep->ep.maxpacket = maxpacket;
++
++ husb2_ep_writel(ep, CFG, ept_cfg);
++ husb2_ep_writel(ep, CTL_ENB, HUSB2_BIT(EPT_ENABLE));
++
++ if (ep_can_dma(ep)) {
++ husb2_writel(udc, INT_ENB,
++ (husb2_readl(udc, INT_ENB)
++ | HUSB2_BF(EPT_INT, 1 << ep->index)
++ | HUSB2_BF(DMA_INT, 1 << ep->index)));
++ husb2_ep_writel(ep, CTL_ENB, HUSB2_BIT(AUTO_VALID));
++ } else {
++ husb2_writel(udc, INT_ENB,
++ (husb2_readl(udc, INT_ENB)
++ | HUSB2_BF(EPT_INT, 1 << ep->index)));
++ }
++
++ spin_unlock_irqrestore(&udc->lock, flags);
++
++ DBG(DBG_HW, "EPT_CFG%d after init: %#08lx\n", ep->index,
++ (unsigned long)husb2_ep_readl(ep, CFG));
++ DBG(DBG_HW, "INT_ENB after init: %#08lx\n",
++ (unsigned long)husb2_readl(udc, INT_ENB));
++
++ husb2_ep_init_debugfs(udc, ep);
++
++ return 0;
++}
++
++static int husb2_ep_disable(struct usb_ep *_ep)
++{
++ struct husb2_ep *ep = to_husb2_ep(_ep);
++ struct husb2_udc *udc = ep->udc;
++ LIST_HEAD(req_list);
++ unsigned long flags;
++
++ DBG(DBG_GADGET, "ep_disable: %s\n", ep_name(ep));
++
++ husb2_ep_cleanup_debugfs(ep);
++
++ spin_lock_irqsave(&udc->lock, flags);
++
++ if (!ep->desc) {
++ spin_unlock_irqrestore(&udc->lock, flags);
++ DBG(DBG_ERR, "ep_disable: %s not enabled\n",
++ ep_name(ep));
++ return -EINVAL;
++ }
++ ep->desc = NULL;
++
++ list_splice_init(&ep->queue, &req_list);
++ if (ep_can_dma(ep)) {
++ husb2_dma_writel(ep, CONTROL, 0);
++ husb2_dma_writel(ep, ADDRESS, 0);
++ husb2_dma_readl(ep, STATUS);
++ }
++ husb2_ep_writel(ep, CTL_DIS, HUSB2_BIT(EPT_ENABLE));
++ husb2_writel(udc, INT_ENB, (husb2_readl(udc, INT_ENB)
++ & ~HUSB2_BF(EPT_INT, 1 << ep->index)));
++
++ spin_unlock_irqrestore(&udc->lock, flags);
++
++ request_complete_list(ep, &req_list, -ESHUTDOWN);
++
++ return 0;
++}
++
++static struct usb_request *
++husb2_ep_alloc_request(struct usb_ep *_ep, unsigned gfp_flags)
++{
++ struct husb2_request *req;
++
++ DBG(DBG_GADGET, "ep_alloc_request: %p, 0x%x\n", _ep, gfp_flags);
++
++ req = kzalloc(sizeof(*req), gfp_flags);
++ if (!req)
++ return NULL;
++
++ INIT_LIST_HEAD(&req->queue);
++ req->req.dma = DMA_ADDR_INVALID;
++
++ return &req->req;
++}
++
++static void
++husb2_ep_free_request(struct usb_ep *_ep, struct usb_request *_req)
++{
++ struct husb2_request *req = to_husb2_req(_req);
++
++ DBG(DBG_GADGET, "ep_free_request: %p, %p\n", _ep, _req);
++
++ kfree(req);
++}
++
++static void *husb2_ep_alloc_buffer(struct usb_ep *_ep, unsigned bytes,
++ dma_addr_t *dma, unsigned gfp_flags)
++{
++ struct husb2_ep *ep = to_husb2_ep(_ep);
++ void *buf;
++
++ /*
++ * We depend on kmalloc() returning cache-aligned memory. This
++ * is normally guaranteed as long as we allocate a whole
++ * cacheline or more.
++ *
++ * When CONFIG_DEBUG_SLAB is enabled, however, the slab
++ * allocator inserts red zones and ownership information,
++ * causing the slab objects to be misaligned.
++ *
++ * One alternative would be to use dma_alloc_coherent, but
++ * that would make us unable to allocate anything less than a
++ * page at a time.
++ */
++#ifdef CONFIG_DEBUG_SLAB
++# error The HUSB2 UDC driver breaks with SLAB debugging enabled
++#endif
++
++ if (bytes < L1_CACHE_BYTES)
++ bytes = L1_CACHE_BYTES;
++
++ buf = kmalloc(bytes, gfp_flags);
++
++ /*
++ * Seems like we have to map the buffer any chance we get.
++ * ether.c wants us to initialize the dma member of a
++ * different request than the one receiving the buffer, so one
++ * never knows...
++ *
++ * Ah, screw it. The ether driver is probably wrong, and this
++ * is not the right place to do the mapping. The driver
++ * shouldn't mess with our DMA mappings anyway.
++ */
++ *dma = DMA_ADDR_INVALID;
++
++ DBG(DBG_GADGET, "ep_alloc_buffer: %s, %u, 0x%x -> %p\n",
++ ep_name(ep), bytes, gfp_flags, buf);
++
++ return buf;
++}
++
++static void husb2_ep_free_buffer(struct usb_ep *_ep, void *buf,
++ dma_addr_t dma, unsigned bytes)
++{
++ DBG(DBG_GADGET, "ep_free_buffer: %s, buf %p (size %u)\n",
++ _ep->name, buf, bytes);
++ kfree(buf);
++}
++
++static int queue_dma(struct husb2_udc *udc, struct husb2_ep *ep,
++ struct husb2_request *req, unsigned int direction,
++ gfp_t gfp_flags)
++{
++ struct husb2_packet *pkt, *prev_pkt;
++ unsigned int pkt_size, nr_pkts, i;
++ unsigned int residue;
++ dma_addr_t addr;
++ unsigned long flags;
++ u32 ctrl;
++
++ req->using_dma = 1;
++
++ if (req->req.length == 0) {
++ if (!req->req.zero)
++ return -EINVAL;
++ req->send_zlp = 1;
++
++ spin_lock_irqsave(&udc->lock, flags);
++ husb2_ep_writel(ep, CTL_ENB, HUSB2_BIT(TX_PK_RDY));
++ list_add_tail(&req->queue, &ep->queue);
++ spin_unlock_irqrestore(&udc->lock, flags);
++
++ return 0;
++ }
++
++ if (req->req.dma == DMA_ADDR_INVALID)
++ req->req.dma = dma_map_single(&udc->pdev->dev,
++ req->req.buf,
++ req->req.length,
++ direction);
++ else
++ dma_sync_single_for_device(&udc->pdev->dev,
++ req->req.dma,
++ req->req.length,
++ direction);
++
++ pkt_size = ep->ep.maxpacket;
++ nr_pkts = req->req.length / pkt_size;
++ residue = req->req.length % pkt_size;
++ if (residue != 0)
++ nr_pkts++;
++ else if (req->req.zero && ep_is_in(ep))
++ /* ensure last packet is short */
++ req->send_zlp = 1;
++
++ req->nr_pkts = nr_pkts;
++
++ req->packet = kzalloc(sizeof(*req->packet) * nr_pkts, gfp_flags);
++ if (!req->packet)
++ goto out_of_memory;
++
++ addr = req->req.dma;
++ ctrl = (HUSB2_BF(DMA_BUF_LEN, pkt_size)
++ | HUSB2_BIT(DMA_CH_EN) | HUSB2_BIT(DMA_LINK)
++ | HUSB2_BIT(DMA_END_TR_EN) | HUSB2_BIT(DMA_END_TR_IE));
++ prev_pkt = NULL;
++ pkt = NULL;
++ DBG(DBG_DMA, "DMA descriptors:\n");
++ for (i = 0; i < nr_pkts; i++) {
++ pkt = &req->packet[i];
++ pkt->desc = dma_pool_alloc(udc->desc_pool, gfp_flags,
++ &pkt->desc_dma);
++ if (!pkt->desc)
++ goto out_of_memory;
++
++ if (prev_pkt) {
++ prev_pkt->desc->next = pkt->desc_dma;
++ DBG(DBG_DMA, "[%d] n%08x a%08x c%08x\n",
++ i - 1, prev_pkt->desc->next, prev_pkt->desc->addr,
++ prev_pkt->desc->ctrl);
++ }
++ prev_pkt = pkt;
++
++ pkt->desc->addr = addr;
++ pkt->desc->ctrl = ctrl;
++ addr += pkt_size;
++ }
++
++ /* special care is needed for the last packet... */
++ ctrl = (HUSB2_BIT(DMA_CH_EN)
++ | HUSB2_BIT(DMA_END_TR_EN) | HUSB2_BIT(DMA_END_TR_IE)
++ | HUSB2_BIT(DMA_END_BUF_IE));
++ if (ep_is_in(ep))
++ ctrl |= HUSB2_BIT(DMA_END_BUF_EN);
++ if (req->req.zero || residue)
++ ctrl |= HUSB2_BF(DMA_BUF_LEN, residue);
++ else
++ ctrl |= HUSB2_BF(DMA_BUF_LEN, pkt_size);
++ pkt->desc->ctrl = ctrl;
++
++ DBG(DBG_DMA, "[%d] n%08x a%08x c%08x\n",
++ i - 1, prev_pkt->desc->next, prev_pkt->desc->addr,
++ prev_pkt->desc->ctrl);
++
++ /* Add this request to the queue and try to chain the DMA descriptors */
++ spin_lock_irqsave(&udc->lock, flags);
++
++ /* If the DMA controller is idle, start it */
++ if (list_empty(&ep->queue)) {
++ husb2_dma_writel(ep, NXT_DSC, req->packet[0].desc_dma);
++ husb2_dma_writel(ep, CONTROL, HUSB2_BIT(DMA_LINK));
++ }
++
++ list_add_tail(&req->queue, &ep->queue);
++
++ spin_unlock_irqrestore(&udc->lock, flags);
++
++ return 0;
++
++out_of_memory:
++ printk(KERN_ERR "ERROR: Could not allocate DMA memory for endpoint %s\n",
++ ep_name(ep));
++ if (req->packet) {
++ for (i = 0; i < nr_pkts; i++)
++ if (req->packet[i].desc)
++ dma_pool_free(udc->desc_pool,
++ req->packet[i].desc,
++ req->packet[i].desc_dma);
++ kfree(req->packet);
++ }
++
++ return -ENOMEM;
++}
++
++static int husb2_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
++ gfp_t gfp_flags)
++{
++ struct husb2_request *req = to_husb2_req(_req);
++ struct husb2_ep *ep = to_husb2_ep(_ep);
++ struct husb2_udc *udc = ep->udc;
++ unsigned long flags;
++ int direction_in = 0;
++
++ DBG(DBG_GADGET | DBG_QUEUE | DBG_REQ,
++ "%s: queue req %p, len %u\n", ep_name(ep), req, _req->length);
++
++ if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
++ return -ESHUTDOWN;
++ if (!ep->desc)
++ return -ENODEV;
++
++ req->nr_pkts = 0;
++ req->submitted = 0;
++ req->using_dma = 0;
++ req->last_transaction = 0;
++ req->send_zlp = 0;
++
++ BUG_ON(req->packet);
++
++ if (ep_is_in(ep)
++ || (ep_is_control(ep) && (ep->state == DATA_STAGE_IN
++ || ep->state == STATUS_STAGE_IN)))
++ direction_in = 1;
++
++ _req->status = -EINPROGRESS;
++ _req->actual = 0;
++
++ if (ep_can_dma(ep)) {
++ return queue_dma(udc, ep, req, (direction_in
++ ? DMA_TO_DEVICE
++ : DMA_FROM_DEVICE),
++ gfp_flags);
++ } else {
++ spin_lock_irqsave(&udc->lock, flags);
++ list_add_tail(&req->queue, &ep->queue);
++
++ if (direction_in)
++ husb2_ep_writel(ep, CTL_ENB, HUSB2_BIT(TX_PK_RDY));
++ else
++ husb2_ep_writel(ep, CTL_ENB, HUSB2_BIT(RX_BK_RDY));
++ spin_unlock_irqrestore(&udc->lock, flags);
++ }
++
++ return 0;
++}
++
++static void husb2_update_req(struct husb2_ep *ep, struct husb2_request *req,
++ u32 status)
++{
++ struct husb2_dma_desc *desc;
++ dma_addr_t from;
++ dma_addr_t addr;
++ size_t size;
++ unsigned int i;
++
++ addr = husb2_dma_readl(ep, ADDRESS);
++ req->req.actual = 0;
++
++ for (i = 0; i < req->nr_pkts; i++) {
++ desc = req->packet[i].desc;
++ from = desc->addr;
++ size = HUSB2_BFEXT(DMA_BUF_LEN, desc->ctrl);
++
++ req->req.actual += size;
++
++ DBG(DBG_DMA, " from=%#08x, size=%#zx\n", from, size);
++
++ if (from <= addr && (from + size) >= addr)
++ break;
++ }
++
++ req->req.actual -= HUSB2_BFEXT(DMA_BUF_LEN, status);
++}
++
++static int stop_dma(struct husb2_ep *ep, u32 *pstatus)
++{
++ unsigned int timeout;
++ u32 status;
++
++ /*
++ * Stop the DMA controller. When writing both CH_EN
++ * and LINK to 0, the other bits are not affected.
++ */
++ husb2_dma_writel(ep, CONTROL, 0);
++
++ /* Wait for the FIFO to empty */
++ for (timeout = 40; timeout; --timeout) {
++ status = husb2_dma_readl(ep, STATUS);
++ if (!(status & HUSB2_BIT(DMA_CH_EN)))
++ break;
++ udelay(1);
++ }
++
++ if (pstatus)
++ *pstatus = status;
++
++ if (timeout == 0) {
++ dev_err(&ep->udc->pdev->dev,
++ "%s: timed out waiting for DMA FIFO to empty\n",
++ ep_name(ep));
++ return -ETIMEDOUT;
++ }
++
++ return 0;
++}
++
++static int husb2_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
++{
++ struct husb2_ep *ep = to_husb2_ep(_ep);
++ struct husb2_udc *udc = ep->udc;
++ struct husb2_request *req = to_husb2_req(_req);
++ unsigned long flags;
++ u32 status;
++
++ DBG(DBG_GADGET | DBG_QUEUE, "ep_dequeue: %s, req %p\n", ep_name(ep), req);
++
++ spin_lock_irqsave(&udc->lock, flags);
++
++ if (req->using_dma) {
++ /*
++ * If this request is currently being transferred,
++ * stop the DMA controller and reset the FIFO.
++ */
++ if (ep->queue.next == &req->queue) {
++ status = husb2_dma_readl(ep, STATUS);
++ if (status & HUSB2_BIT(DMA_CH_EN))
++ stop_dma(ep, &status);
++
++#ifdef CONFIG_DEBUG_FS
++ ep->last_dma_status = status;
++#endif
++
++ husb2_writel(udc, EPT_RST,
++ 1 << ep_index(ep));
++
++ husb2_update_req(ep, req, status);
++ }
++ }
++
++ /*
++ * Errors should stop the queue from advancing until the
++ * completion function returns.
++ */
++ list_del_init(&req->queue);
++ spin_unlock_irqrestore(&udc->lock, flags);
++
++ request_complete(ep, req, -ECONNRESET);
++
++ /* Process the next request if any */
++ spin_lock_irqsave(&udc->lock, flags);
++ submit_next_request(ep);
++ spin_unlock_irqrestore(&udc->lock, flags);
++
++ return 0;
++}
++
++static int husb2_ep_set_halt(struct usb_ep *_ep, int value)
++{
++ struct husb2_ep *ep = to_husb2_ep(_ep);
++ struct husb2_udc *udc = ep->udc;
++ unsigned long flags;
++ int ret = 0;
++
++ DBG(DBG_GADGET, "endpoint %s: %s HALT\n", ep_name(ep),
++ value ? "set" : "clear");
++
++ if (!ep->desc) {
++ DBG(DBG_ERR, "Attempted to halt uninitialized ep %s\n",
++ ep_name(ep));
++ return -ENODEV;
++ }
++ if (ep_is_isochronous(ep)) {
++ DBG(DBG_ERR, "Attempted to halt isochronous ep %s\n",
++ ep_name(ep));
++ return -ENOTTY;
++ }
++
++ spin_lock_irqsave(&udc->lock, flags);
++
++ /*
++ * We can't halt IN endpoints while there are still data to be
++