summaryrefslogtreecommitdiff
path: root/packages/linux/files
diff options
context:
space:
mode:
Diffstat (limited to 'packages/linux/files')
-rw-r--r--packages/linux/files/.mtn2git_empty0
-rw-r--r--packages/linux/files/ipaq-hal.init14
-rw-r--r--packages/linux/files/ir240_sys_max_tx-2.diff110
-rw-r--r--packages/linux/files/ir241_qos_param-2.diff164
-rw-r--r--packages/linux/files/iw240_we15-6.diff399
-rw-r--r--packages/linux/files/iw241_we16-6.diff667
-rw-r--r--packages/linux/files/iw249_we17-13.diff768
-rw-r--r--packages/linux/files/iw_handlers.w13-5.diff1513
-rw-r--r--packages/linux/files/iw_handlers.w14-5.diff838
-rw-r--r--packages/linux/files/linux-2.4-cpufreq.patch20
-rw-r--r--packages/linux/files/linux-2.4-mmc-debugging.patch15
-rw-r--r--packages/linux/files/linux-2.4-no-short-loads.patch18
-rw-r--r--packages/linux/files/linux-2.4-usb-gadget.patch29506
-rw-r--r--packages/linux/files/linux-2.4.18-list_move.patch32
-rw-r--r--packages/linux/files/mipv6-1.1-v2.4.25.patch19832
-rw-r--r--packages/linux/files/usb-gadget-ether-compat.patch30
16 files changed, 53926 insertions, 0 deletions
diff --git a/packages/linux/files/.mtn2git_empty b/packages/linux/files/.mtn2git_empty
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/packages/linux/files/.mtn2git_empty
diff --git a/packages/linux/files/ipaq-hal.init b/packages/linux/files/ipaq-hal.init
index e69de29bb2..4efb52ec97 100644
--- a/packages/linux/files/ipaq-hal.init
+++ b/packages/linux/files/ipaq-hal.init
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+# make sure update-modules has been run
+# since the calls below depend on aliases...
+if [ ! -f /etc/modules.conf ]; then
+ update-modules || true
+fi
+
+modprobe ipaq_hal || exit 0
+
+if [ -d /proc/hal ]; then
+ model=`cat /proc/hal/model`
+ modprobe ipaq_hal_$model
+fi
diff --git a/packages/linux/files/ir240_sys_max_tx-2.diff b/packages/linux/files/ir240_sys_max_tx-2.diff
index e69de29bb2..5f1307d7dc 100644
--- a/packages/linux/files/ir240_sys_max_tx-2.diff
+++ b/packages/linux/files/ir240_sys_max_tx-2.diff
@@ -0,0 +1,110 @@
+--- linux/net/irda/irsysctl.c.orig 2003-05-13 11:20:16.000000000 +0200
++++ linux/net/irda/irsysctl.c 2005-01-22 18:39:40.496001712 +0100
+@@ -40,7 +40,8 @@
+
+ enum { DISCOVERY=1, DEVNAME, DEBUG, FAST_POLL, DISCOVERY_SLOTS,
+ DISCOVERY_TIMEOUT, SLOT_TIMEOUT, MAX_BAUD_RATE, MIN_TX_TURN_TIME,
+- MAX_NOREPLY_TIME, WARN_NOREPLY_TIME, LAP_KEEPALIVE_TIME, SPECIFIC_DEV };
++ MAX_TX_DATA_SIZE, MAX_NOREPLY_TIME, WARN_NOREPLY_TIME, LAP_KEEPALIVE_TIME,
++ SPECIFIC_DEV };
+
+ extern int sysctl_discovery;
+ extern int sysctl_discovery_slots;
+@@ -51,6 +52,7 @@
+ extern char sysctl_devname[];
+ extern int sysctl_max_baud_rate;
+ extern int sysctl_min_tx_turn_time;
++extern int sysctl_max_tx_data_size;
+ extern int sysctl_max_noreply_time;
+ extern int sysctl_warn_noreply_time;
+ extern int sysctl_lap_keepalive_time;
+@@ -71,6 +73,8 @@
+ static int min_max_baud_rate = 2400;
+ static int max_min_tx_turn_time = 10000; /* See qos.c - IrLAP spec */
+ static int min_min_tx_turn_time = 0;
++static int max_max_tx_data_size = 2048; /* See qos.c - IrLAP spec */
++static int min_max_tx_data_size = 64;
+ static int max_max_noreply_time = 40; /* See qos.c - IrLAP spec */
+ static int min_max_noreply_time = 3;
+ static int max_warn_noreply_time = 3; /* 3s == standard */
+@@ -128,6 +132,9 @@
+ { MIN_TX_TURN_TIME, "min_tx_turn_time", &sysctl_min_tx_turn_time,
+ sizeof(int), 0644, NULL, &proc_dointvec_minmax, &sysctl_intvec,
+ NULL, &min_min_tx_turn_time, &max_min_tx_turn_time },
++ { MAX_TX_DATA_SIZE, "max_tx_data_size", &sysctl_max_tx_data_size,
++ sizeof(int), 0644, NULL, &proc_dointvec_minmax, &sysctl_intvec,
++ NULL, &min_max_tx_data_size, &max_max_tx_data_size },
+ { MAX_NOREPLY_TIME, "max_noreply_time", &sysctl_max_noreply_time,
+ sizeof(int), 0644, NULL, &proc_dointvec_minmax, &sysctl_intvec,
+ NULL, &min_max_noreply_time, &max_max_noreply_time },
+--- linux/net/irda/qos.c.orig 2003-05-13 11:20:16.000000000 +0200
++++ linux/net/irda/qos.c 2005-01-22 18:36:46.759413688 +0100
+@@ -60,10 +60,26 @@
+ * Nonzero values (usec) are used as lower limit to the per-connection
+ * mtt value which was announced by the other end during negotiation.
+ * Might be helpful if the peer device provides too short mtt.
+- * Default is 10 which means using the unmodified value given by the peer
+- * except if it's 0 (0 is likely a bug in the other stack).
++ * Default is 10us which means using the unmodified value given by the
++ * peer except if it's 0 (0 is likely a bug in the other stack).
+ */
+ unsigned sysctl_min_tx_turn_time = 10;
++/*
++ * Maximum data size to be used in transmission in payload of LAP frame.
++ * There is a bit of confusion in the IrDA spec :
++ * The LAP spec defines the payload of a LAP frame (I field) to be
++ * 2048 bytes max (IrLAP 1.1, chapt 6.6.5, p40).
++ * On the other hand, the PHY mention frames of 2048 bytes max (IrPHY
++ * 1.2, chapt 5.3.2.1, p41). But, this number includes the LAP header
++ * (2 bytes), and CRC (32 bits at 4 Mb/s). So, for the I field (LAP
++ * payload), that's only 2042 bytes. Oups !
++ * I've had trouble trouble transmitting 2048 bytes frames with USB
++ * dongles and nsc-ircc at 4 Mb/s, so adjust to 2042... I don't know
++ * if this bug applies only for 2048 bytes frames or all negociated
++ * frame sizes, but all hardware seem to support "2048 bytes" frames.
++ * You can use the sysctl to play with this value anyway.
++ * Jean II */
++unsigned sysctl_max_tx_data_size = 2042;
+
+ /*
+ * Specific device list limits some negotiation parameters at the connection
+@@ -398,10 +414,10 @@
+ while ((qos->data_size.value > line_capacity) && (index > 0)) {
+ qos->data_size.value = data_sizes[index--];
+ IRDA_DEBUG(2, __FUNCTION__
+- "(), redusing data size to %d\n",
++ "(), reducing data size to %d\n",
+ qos->data_size.value);
+ }
+-#else /* Use method descibed in section 6.6.11 of IrLAP */
++#else /* Use method described in section 6.6.11 of IrLAP */
+ while (irlap_requested_line_capacity(qos) > line_capacity) {
+ ASSERT(index != 0, return;);
+
+@@ -409,18 +425,24 @@
+ if (qos->window_size.value > 1) {
+ qos->window_size.value--;
+ IRDA_DEBUG(2, __FUNCTION__
+- "(), redusing window size to %d\n",
++ "(), reducing window size to %d\n",
+ qos->window_size.value);
+ } else if (index > 1) {
+ qos->data_size.value = data_sizes[index--];
+ IRDA_DEBUG(2, __FUNCTION__
+- "(), redusing data size to %d\n",
++ "(), reducing data size to %d\n",
+ qos->data_size.value);
+ } else {
+ WARNING(__FUNCTION__ "(), nothing more we can do!\n");
+ }
+ }
+ #endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
++ /*
++ * Fix tx data size according to user limits - Jean II
++ */
++ if (qos->data_size.value > sysctl_max_tx_data_size)
++ /* Allow non discrete adjustement to avoid loosing capacity */
++ qos->data_size.value = sysctl_max_tx_data_size;
+ }
+
+ /*
diff --git a/packages/linux/files/ir241_qos_param-2.diff b/packages/linux/files/ir241_qos_param-2.diff
index e69de29bb2..dfe77c52b0 100644
--- a/packages/linux/files/ir241_qos_param-2.diff
+++ b/packages/linux/files/ir241_qos_param-2.diff
@@ -0,0 +1,164 @@
+--- linux/net/irda/qos.c.orig 2005-01-22 19:19:56.013787192 +0100
++++ linux/net/irda/qos.c 2005-01-22 19:21:13.493008560 +0100
+@@ -73,13 +73,18 @@
+ * 1.2, chapt 5.3.2.1, p41). But, this number includes the LAP header
+ * (2 bytes), and CRC (32 bits at 4 Mb/s). So, for the I field (LAP
+ * payload), that's only 2042 bytes. Oups !
+- * I've had trouble trouble transmitting 2048 bytes frames with USB
+- * dongles and nsc-ircc at 4 Mb/s, so adjust to 2042... I don't know
+- * if this bug applies only for 2048 bytes frames or all negociated
+- * frame sizes, but all hardware seem to support "2048 bytes" frames.
+- * You can use the sysctl to play with this value anyway.
++ * My nsc-ircc hardware has troubles receiving 2048 bytes frames at 4 Mb/s,
++ * so adjust to 2042... I don't know if this bug applies only for 2048
++ * bytes frames or all negociated frame sizes, but you can use the sysctl
++ * to play with this value anyway.
+ * Jean II */
+ unsigned sysctl_max_tx_data_size = 2042;
++/*
++ * Maximum transmit window, i.e. number of LAP frames between turn-around.
++ * This allow to override what the peer told us. Some peers are buggy and
++ * don't always support what they tell us.
++ * Jean II */
++unsigned sysctl_max_tx_window = 7;
+
+ /*
+ * Specific device list limits some negotiation parameters at the connection
+@@ -227,7 +232,19 @@
+ {
+ __u16 msb = 0x8000;
+ int index = 15; /* Current MSB */
+-
++
++ /* Check for buggy peers.
++ * Note : there is a small probability that it could be us, but I
++ * would expect driver authors to catch that pretty early and be
++ * able to check precisely what's going on. If a end user sees this,
++ * it's very likely the peer. - Jean II */
++ if (word == 0) {
++ WARNING("%s(), Detected buggy peer, adjust null PV to 0x1!\n",
++ __FUNCTION__);
++ /* The only safe choice (we don't know the array size) */
++ word = 0x1;
++ }
++
+ while (msb) {
+ if (word & msb)
+ break; /* Found it! */
+@@ -378,10 +395,14 @@
+
+ /*
+ * Make sure the mintt is sensible.
++ * Main culprit : Ericsson T39. - Jean II
+ */
+ if (sysctl_min_tx_turn_time > qos->min_turn_time.value) {
+ int i;
+
++ WARNING("%s(), Detected buggy peer, adjust mtt to %dus!\n",
++ __FUNCTION__, sysctl_min_tx_turn_time);
++
+ /* We don't really need bits, but easier this way */
+ i = value_highest_bit(sysctl_min_tx_turn_time, min_turn_times,
+ 8, &qos->min_turn_time.bits);
+@@ -443,6 +464,11 @@
+ if (qos->data_size.value > sysctl_max_tx_data_size)
+ /* Allow non discrete adjustement to avoid loosing capacity */
+ qos->data_size.value = sysctl_max_tx_data_size;
++ /*
++ * Override Tx window if user request it. - Jean II
++ */
++ if (qos->window_size.value > sysctl_max_tx_window)
++ qos->window_size.value = sysctl_max_tx_window;
+ }
+
+ /*
+--- linux/net/irda/irsysctl.c.orig 2005-01-22 19:19:56.006788256 +0100
++++ linux/net/irda/irsysctl.c 2005-01-22 19:24:31.273941288 +0100
+@@ -40,8 +40,8 @@
+
+ enum { DISCOVERY=1, DEVNAME, DEBUG, FAST_POLL, DISCOVERY_SLOTS,
+ DISCOVERY_TIMEOUT, SLOT_TIMEOUT, MAX_BAUD_RATE, MIN_TX_TURN_TIME,
+- MAX_TX_DATA_SIZE, MAX_NOREPLY_TIME, WARN_NOREPLY_TIME, LAP_KEEPALIVE_TIME,
+- SPECIFIC_DEV };
++ MAX_TX_DATA_SIZE, MAX_TX_WINDOW, MAX_NOREPLY_TIME, WARN_NOREPLY_TIME,
++ LAP_KEEPALIVE_TIME, SPECIFIC_DEV };
+
+ extern int sysctl_discovery;
+ extern int sysctl_discovery_slots;
+@@ -53,6 +53,7 @@
+ extern int sysctl_max_baud_rate;
+ extern int sysctl_min_tx_turn_time;
+ extern int sysctl_max_tx_data_size;
++extern int sysctl_max_tx_window;
+ extern int sysctl_max_noreply_time;
+ extern int sysctl_warn_noreply_time;
+ extern int sysctl_lap_keepalive_time;
+@@ -75,6 +76,8 @@
+ static int min_min_tx_turn_time = 0;
+ static int max_max_tx_data_size = 2048; /* See qos.c - IrLAP spec */
+ static int min_max_tx_data_size = 64;
++static int max_max_tx_window = 7; /* See qos.c - IrLAP spec */
++static int min_max_tx_window = 1;
+ static int max_max_noreply_time = 40; /* See qos.c - IrLAP spec */
+ static int min_max_noreply_time = 3;
+ static int max_warn_noreply_time = 3; /* 3s == standard */
+@@ -135,6 +138,9 @@
+ { MAX_TX_DATA_SIZE, "max_tx_data_size", &sysctl_max_tx_data_size,
+ sizeof(int), 0644, NULL, &proc_dointvec_minmax, &sysctl_intvec,
+ NULL, &min_max_tx_data_size, &max_max_tx_data_size },
++ { MAX_TX_WINDOW, "max_tx_window", &sysctl_max_tx_window,
++ sizeof(int), 0644, NULL, &proc_dointvec_minmax, &sysctl_intvec,
++ NULL, &min_max_tx_window, &max_max_tx_window },
+ { MAX_NOREPLY_TIME, "max_noreply_time", &sysctl_max_noreply_time,
+ sizeof(int), 0644, NULL, &proc_dointvec_minmax, &sysctl_intvec,
+ NULL, &min_max_noreply_time, &max_max_noreply_time },
+--- linux/net/irda/parameters.c.orig 2003-05-13 11:20:16.000000000 +0200
++++ linux/net/irda/parameters.c 2005-01-22 19:21:13.527003392 +0100
+@@ -204,11 +204,13 @@
+ {
+ irda_param_t p;
+ int n = 0;
++ int extract_len; /* Real lenght we extract */
+ int err;
+
+ p.pi = pi; /* In case handler needs to know */
+ p.pl = buf[1]; /* Extract lenght of value */
+ p.pv.i = 0; /* Clear value */
++ extract_len = p.pl; /* Default : extract all */
+
+ /* Check if buffer is long enough for parsing */
+ if (len < (2+p.pl)) {
+@@ -220,18 +222,30 @@
+ /*
+ * Check that the integer length is what we expect it to be. If the
+ * handler want a 16 bits integer then a 32 bits is not good enough
++ * PV_INTEGER means that the handler is flexible.
+ */
+ if (((type & PV_MASK) != PV_INTEGER) && ((type & PV_MASK) != p.pl)) {
+ ERROR(__FUNCTION__ "(), invalid parameter length! "
+ "Expected %d bytes, but value had %d bytes!\n",
+ type & PV_MASK, p.pl);
+
+- /* Skip parameter */
+- return p.pl+2;
++ /* Most parameters are bit/byte fields or little endian,
++ * so it's ok to only extract a subset of it (the subset
++ * that the handler expect). This is necessary, as some
++ * broken implementations seems to add extra undefined bits.
++ * If the parameter is shorter than we expect or is big
++ * endian, we can't play those tricks. Jean II */
++ if((p.pl < (type & PV_MASK)) || (type & PV_BIG_ENDIAN)) {
++ /* Skip parameter */
++ return p.pl+2;
++ } else {
++ /* Extract subset of it, fallthrough */
++ extract_len = type & PV_MASK;
++ }
+ }
+
+
+- switch (p.pl) {
++ switch (extract_len) {
+ case 1:
+ n += irda_param_unpack(buf+2, "b", &p.pv.i);
+ break;
diff --git a/packages/linux/files/iw240_we15-6.diff b/packages/linux/files/iw240_we15-6.diff
index e69de29bb2..2ebfd8ec12 100644
--- a/packages/linux/files/iw240_we15-6.diff
+++ b/packages/linux/files/iw240_we15-6.diff
@@ -0,0 +1,399 @@
+diff -u -p linux/include/linux/wireless.14.h linux/include/linux/wireless.h
+--- linux/include/linux/wireless.14.h Mon Dec 2 18:51:00 2002
++++ linux/include/linux/wireless.h Mon Dec 2 18:53:35 2002
+@@ -1,7 +1,7 @@
+ /*
+ * This file define a set of standard wireless extensions
+ *
+- * Version : 14 25.1.02
++ * Version : 15 12.7.02
+ *
+ * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com>
+ * Copyright (c) 1997-2002 Jean Tourrilhes, All Rights Reserved.
+@@ -80,7 +80,7 @@
+ * (there is some stuff that will be added in the future...)
+ * I just plan to increment with each new version.
+ */
+-#define WIRELESS_EXT 14
++#define WIRELESS_EXT 15
+
+ /*
+ * Changes :
+@@ -153,17 +153,32 @@
+ * - Define additional specific event numbers
+ * - Add "addr" and "param" fields in union iwreq_data
+ * - AP scanning stuff (SIOCSIWSCAN and friends)
++ *
++ * V14 to V15
++ * ----------
++ * - Add IW_PRIV_TYPE_ADDR for struct sockaddr private arg
++ * - Make struct iw_freq signed (both m & e), add explicit padding
++ * - Add IWEVCUSTOM for driver specific event/scanning token
++ * - Add IW_MAX_GET_SPY for driver returning a lot of addresses
++ * - Add IW_TXPOW_RANGE for range of Tx Powers
++ * - Add IWEVREGISTERED & IWEVEXPIRED events for Access Points
++ * - Add IW_MODE_MONITOR for passive monitor
+ */
+
+ /**************************** CONSTANTS ****************************/
+
+ /* -------------------------- IOCTL LIST -------------------------- */
+
+-/* Basic operations */
++/* Wireless Identification */
+ #define SIOCSIWCOMMIT 0x8B00 /* Commit pending changes to driver */
+ #define SIOCGIWNAME 0x8B01 /* get name == wireless protocol */
+-#define SIOCSIWNWID 0x8B02 /* set network id (the cell) */
+-#define SIOCGIWNWID 0x8B03 /* get network id */
++/* SIOCGIWNAME is used to verify the presence of Wireless Extensions.
++ * Common values : "IEEE 802.11-DS", "IEEE 802.11-FH", "IEEE 802.11b"...
++ * Don't put the name of your driver there, it's useless. */
++
++/* Basic operations */
++#define SIOCSIWNWID 0x8B02 /* set network id (pre-802.11) */
++#define SIOCGIWNWID 0x8B03 /* get network id (the cell) */
+ #define SIOCSIWFREQ 0x8B04 /* set channel/frequency (Hz) */
+ #define SIOCGIWFREQ 0x8B05 /* get channel/frequency (Hz) */
+ #define SIOCSIWMODE 0x8B06 /* set operation mode */
+@@ -178,16 +193,18 @@
+ #define SIOCGIWPRIV 0x8B0D /* get private ioctl interface info */
+ #define SIOCSIWSTATS 0x8B0E /* Unused */
+ #define SIOCGIWSTATS 0x8B0F /* Get /proc/net/wireless stats */
++/* SIOCGIWSTATS is strictly used between user space and the kernel, and
++ * is never passed to the driver (i.e. the driver will never see it). */
+
+-/* Mobile IP support */
++/* Mobile IP support (statistics per MAC address) */
+ #define SIOCSIWSPY 0x8B10 /* set spy addresses */
+ #define SIOCGIWSPY 0x8B11 /* get spy info (quality of link) */
+
+ /* Access Point manipulation */
+ #define SIOCSIWAP 0x8B14 /* set access point MAC addresses */
+ #define SIOCGIWAP 0x8B15 /* get access point MAC addresses */
+-#define SIOCGIWAPLIST 0x8B17 /* get list of access point in range */
+-#define SIOCSIWSCAN 0x8B18 /* trigger scanning */
++#define SIOCGIWAPLIST 0x8B17 /* Deprecated in favor of scanning */
++#define SIOCSIWSCAN 0x8B18 /* trigger scanning (list cells) */
+ #define SIOCGIWSCAN 0x8B19 /* get scanning results */
+
+ /* 802.11 specific support */
+@@ -197,9 +214,7 @@
+ #define SIOCGIWNICKN 0x8B1D /* get node name/nickname */
+ /* As the ESSID and NICKN are strings up to 32 bytes long, it doesn't fit
+ * within the 'iwreq' structure, so we need to use the 'data' member to
+- * point to a string in user space, like it is done for RANGE...
+- * The "flags" member indicate if the ESSID is active or not (promiscuous).
+- */
++ * point to a string in user space, like it is done for RANGE... */
+
+ /* Other parameters useful in 802.11 and some other devices */
+ #define SIOCSIWRATE 0x8B20 /* set default bit rate (bps) */
+@@ -257,7 +272,10 @@
+ /* Most events use the same identifier as ioctl requests */
+
+ #define IWEVTXDROP 0x8C00 /* Packet dropped to excessive retry */
+-#define IWEVQUAL 0x8C01 /* Quality part of statistics */
++#define IWEVQUAL 0x8C01 /* Quality part of statistics (scan) */
++#define IWEVCUSTOM 0x8C02 /* Driver specific ascii string */
++#define IWEVREGISTERED 0x8C03 /* Discovered a new node (AP mode) */
++#define IWEVEXPIRED 0x8C04 /* Expired a node (AP mode) */
+
+ #define IWEVFIRST 0x8C00
+
+@@ -273,7 +291,8 @@
+ #define IW_PRIV_TYPE_BYTE 0x1000 /* Char as number */
+ #define IW_PRIV_TYPE_CHAR 0x2000 /* Char as character */
+ #define IW_PRIV_TYPE_INT 0x4000 /* 32 bits int */
+-#define IW_PRIV_TYPE_FLOAT 0x5000
++#define IW_PRIV_TYPE_FLOAT 0x5000 /* struct iw_freq */
++#define IW_PRIV_TYPE_ADDR 0x6000 /* struct sockaddr */
+
+ #define IW_PRIV_SIZE_FIXED 0x0800 /* Variable or fixed nuber of args */
+
+@@ -297,13 +316,16 @@
+
+ /* Maximum tx powers in the range struct */
+ #define IW_MAX_TXPOWER 8
++/* Note : if you more than 8 TXPowers, just set the max and min or
++ * a few of them in the struct iw_range. */
+
+ /* Maximum of address that you may set with SPY */
+-#define IW_MAX_SPY 8
++#define IW_MAX_SPY 8 /* set */
++#define IW_MAX_GET_SPY 64 /* get */
+
+ /* Maximum of address that you may get in the
+ list of access points in range */
+-#define IW_MAX_AP 8
++#define IW_MAX_AP 64
+
+ /* Maximum size of the ESSID and NICKN strings */
+ #define IW_ESSID_MAX_SIZE 32
+@@ -315,6 +337,7 @@
+ #define IW_MODE_MASTER 3 /* Synchronisation master or Access Point */
+ #define IW_MODE_REPEAT 4 /* Wireless Repeater (forwarder) */
+ #define IW_MODE_SECOND 5 /* Secondary master/repeater (backup) */
++#define IW_MODE_MONITOR 6 /* Passive monitor (listen only) */
+
+ /* Maximum number of size of encoding token available
+ * they are listed in the range structure */
+@@ -350,8 +373,10 @@
+ #define IW_POWER_RELATIVE 0x0004 /* Value is not in seconds/ms/us */
+
+ /* Transmit Power flags available */
++#define IW_TXPOW_TYPE 0x00FF /* Type of value */
+ #define IW_TXPOW_DBM 0x0000 /* Value is in dBm */
+ #define IW_TXPOW_MWATT 0x0001 /* Value is in mW */
++#define IW_TXPOW_RANGE 0x1000 /* Range of value between min/max */
+
+ /* Retry limits and lifetime flags available */
+ #define IW_RETRY_ON 0x0000 /* No details... */
+@@ -376,6 +401,9 @@
+ /* Maximum size of returned data */
+ #define IW_SCAN_MAX_DATA 4096 /* In bytes */
+
++/* Max number of char in custom event - use multiple of them if needed */
++#define IW_CUSTOM_MAX 256 /* In bytes */
++
+ /****************************** TYPES ******************************/
+
+ /* --------------------------- SUBTYPES --------------------------- */
+@@ -411,9 +439,10 @@ struct iw_point
+ */
+ struct iw_freq
+ {
+- __u32 m; /* Mantissa */
+- __u16 e; /* Exponent */
++ __s32 m; /* Mantissa */
++ __s16 e; /* Exponent */
+ __u8 i; /* List index (when in range struct) */
++ __u8 pad; /* Unused - just for alignement */
+ };
+
+ /*
+diff -u -p linux/include/net/iw_handler.14.h linux/include/net/iw_handler.h
+--- linux/include/net/iw_handler.14.h Mon Dec 2 18:51:17 2002
++++ linux/include/net/iw_handler.h Mon Dec 2 18:54:51 2002
+@@ -1,7 +1,7 @@
+ /*
+ * This file define the new driver API for Wireless Extensions
+ *
+- * Version : 3 17.1.02
++ * Version : 4 21.6.02
+ *
+ * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com>
+ * Copyright (c) 2001-2002 Jean Tourrilhes, All Rights Reserved.
+@@ -206,7 +206,7 @@
+ * will be needed...
+ * I just plan to increment with each new version.
+ */
+-#define IW_HANDLER_VERSION 3
++#define IW_HANDLER_VERSION 4
+
+ /*
+ * Changes :
+@@ -217,6 +217,9 @@
+ * - Add Wireless Event support :
+ * o wireless_send_event() prototype
+ * o iwe_stream_add_event/point() inline functions
++ * V3 to V4
++ * --------
++ * - Reshuffle IW_HEADER_TYPE_XXX to map IW_PRIV_TYPE_XXX changes
+ */
+
+ /**************************** CONSTANTS ****************************/
+@@ -233,10 +236,10 @@
+ #define IW_HEADER_TYPE_CHAR 2 /* char [IFNAMSIZ] */
+ #define IW_HEADER_TYPE_UINT 4 /* __u32 */
+ #define IW_HEADER_TYPE_FREQ 5 /* struct iw_freq */
+-#define IW_HEADER_TYPE_POINT 6 /* struct iw_point */
+-#define IW_HEADER_TYPE_PARAM 7 /* struct iw_param */
+-#define IW_HEADER_TYPE_ADDR 8 /* struct sockaddr */
+-#define IW_HEADER_TYPE_QUAL 9 /* struct iw_quality */
++#define IW_HEADER_TYPE_ADDR 6 /* struct sockaddr */
++#define IW_HEADER_TYPE_POINT 8 /* struct iw_point */
++#define IW_HEADER_TYPE_PARAM 9 /* struct iw_param */
++#define IW_HEADER_TYPE_QUAL 10 /* struct iw_quality */
+
+ /* Handling flags */
+ /* Most are not implemented. I just use them as a reminder of some
+diff -u -p linux/net/core/wireless.14.c linux/net/core/wireless.c
+--- linux/net/core/wireless.14.c Mon Dec 2 18:51:35 2002
++++ linux/net/core/wireless.c Mon Dec 2 18:53:10 2002
+@@ -33,8 +33,16 @@
+ * o Propagate events as rtnetlink IFLA_WIRELESS option
+ * o Generate event on selected SET requests
+ *
+- * v4 - 18.04.01 - Jean II
++ * v4 - 18.04.02 - Jean II
+ * o Fix stupid off by one in iw_ioctl_description : IW_ESSID_MAX_SIZE + 1
++ *
++ * v5 - 21.06.02 - Jean II
++ * o Add IW_PRIV_TYPE_ADDR in priv_type_size (+cleanup)
++ * o Reshuffle IW_HEADER_TYPE_XXX to map IW_PRIV_TYPE_XXX changes
++ * o Add IWEVCUSTOM for driver specific event/scanning token
++ * o Turn on WE_STRICT_WRITE by default + kernel warning
++ * o Fix WE_STRICT_WRITE in ioctl_export_private() (32 => iw_num)
++ * o Fix off-by-one in test (extra_size <= IFNAMSIZ)
+ */
+
+ /***************************** INCLUDES *****************************/
+@@ -50,8 +58,9 @@
+
+ /**************************** CONSTANTS ****************************/
+
+-/* This will be turned on later on... */
+-#undef WE_STRICT_WRITE /* Check write buffer size */
++/* Enough lenience, let's make sure things are proper... */
++#define WE_STRICT_WRITE /* Check write buffer size */
++/* I'll probably drop both the define and kernel message in the next version */
+
+ /* Debuging stuff */
+ #undef WE_IOCTL_DEBUG /* Debug IOCTL API */
+@@ -106,7 +115,7 @@ static const struct iw_ioctl_description
+ /* SIOCSIWSPY */
+ { IW_HEADER_TYPE_POINT, 0, sizeof(struct sockaddr), 0, IW_MAX_SPY, 0},
+ /* SIOCGIWSPY */
+- { IW_HEADER_TYPE_POINT, 0, (sizeof(struct sockaddr) + sizeof(struct iw_quality)), 0, IW_MAX_SPY, 0},
++ { IW_HEADER_TYPE_POINT, 0, (sizeof(struct sockaddr) + sizeof(struct iw_quality)), 0, IW_MAX_GET_SPY, 0},
+ /* -- hole -- */
+ { IW_HEADER_TYPE_NULL, 0, 0, 0, 0, 0},
+ /* -- hole -- */
+@@ -176,25 +185,41 @@ static const struct iw_ioctl_description
+ { IW_HEADER_TYPE_ADDR, 0, 0, 0, 0, 0},
+ /* IWEVQUAL */
+ { IW_HEADER_TYPE_QUAL, 0, 0, 0, 0, 0},
++ /* IWEVCUSTOM */
++ { IW_HEADER_TYPE_POINT, 0, 1, 0, IW_CUSTOM_MAX, 0},
++ /* IWEVREGISTERED */
++ { IW_HEADER_TYPE_ADDR, 0, 0, 0, 0, 0},
++ /* IWEVEXPIRED */
++ { IW_HEADER_TYPE_ADDR, 0, 0, 0, 0, 0},
+ };
+ static const int standard_event_num = (sizeof(standard_event) /
+ sizeof(struct iw_ioctl_description));
+
+ /* Size (in bytes) of the various private data types */
+-static const char priv_type_size[] = { 0, 1, 1, 0, 4, 4, 0, 0 };
++static const char priv_type_size[] = {
++ 0, /* IW_PRIV_TYPE_NONE */
++ 1, /* IW_PRIV_TYPE_BYTE */
++ 1, /* IW_PRIV_TYPE_CHAR */
++ 0, /* Not defined */
++ sizeof(__u32), /* IW_PRIV_TYPE_INT */
++ sizeof(struct iw_freq), /* IW_PRIV_TYPE_FLOAT */
++ sizeof(struct sockaddr), /* IW_PRIV_TYPE_ADDR */
++ 0, /* Not defined */
++};
+
+ /* Size (in bytes) of various events */
+ static const int event_type_size[] = {
+- IW_EV_LCP_LEN,
++ IW_EV_LCP_LEN, /* IW_HEADER_TYPE_NULL */
++ 0,
++ IW_EV_CHAR_LEN, /* IW_HEADER_TYPE_CHAR */
+ 0,
+- IW_EV_CHAR_LEN,
++ IW_EV_UINT_LEN, /* IW_HEADER_TYPE_UINT */
++ IW_EV_FREQ_LEN, /* IW_HEADER_TYPE_FREQ */
++ IW_EV_ADDR_LEN, /* IW_HEADER_TYPE_ADDR */
+ 0,
+- IW_EV_UINT_LEN,
+- IW_EV_FREQ_LEN,
+ IW_EV_POINT_LEN, /* Without variable payload */
+- IW_EV_PARAM_LEN,
+- IW_EV_ADDR_LEN,
+- IW_EV_QUAL_LEN,
++ IW_EV_PARAM_LEN, /* IW_HEADER_TYPE_PARAM */
++ IW_EV_QUAL_LEN, /* IW_HEADER_TYPE_QUAL */
+ };
+
+ /************************ COMMON SUBROUTINES ************************/
+@@ -440,8 +465,10 @@ static inline int ioctl_export_private(s
+ return -EFAULT;
+ #ifdef WE_STRICT_WRITE
+ /* Check if there is enough buffer up there */
+- if(iwr->u.data.length < (SIOCIWLASTPRIV - SIOCIWFIRSTPRIV + 1))
++ if(iwr->u.data.length < dev->wireless_handlers->num_private_args) {
++ printk(KERN_ERR "%s (WE) : Buffer for request SIOCGIWPRIV too small (%d<%d)\n", dev->name, iwr->u.data.length, dev->wireless_handlers->num_private_args);
+ return -E2BIG;
++ }
+ #endif /* WE_STRICT_WRITE */
+
+ /* Set the number of available ioctls. */
+@@ -471,6 +498,7 @@ static inline int ioctl_standard_call(st
+ const struct iw_ioctl_description * descr;
+ struct iw_request_info info;
+ int ret = -EINVAL;
++ int user_size = 0;
+
+ /* Get the description of the IOCTL */
+ if((cmd - SIOCIWFIRST) >= standard_ioctl_num)
+@@ -518,11 +546,8 @@ static inline int ioctl_standard_call(st
+ /* Check NULL pointer */
+ if(iwr->u.data.pointer == NULL)
+ return -EFAULT;
+-#ifdef WE_STRICT_WRITE
+- /* Check if there is enough buffer up there */
+- if(iwr->u.data.length < descr->max_tokens)
+- return -E2BIG;
+-#endif /* WE_STRICT_WRITE */
++ /* Save user space buffer size for checking */
++ user_size = iwr->u.data.length;
+ }
+
+ #ifdef WE_IOCTL_DEBUG
+@@ -559,6 +584,15 @@ static inline int ioctl_standard_call(st
+
+ /* If we have something to return to the user */
+ if (!ret && IW_IS_GET(cmd)) {
++#ifdef WE_STRICT_WRITE
++ /* Check if there is enough buffer up there */
++ if(user_size < iwr->u.data.length) {
++ printk(KERN_ERR "%s (WE) : Buffer for request %04X too small (%d<%d)\n", dev->name, cmd, user_size, iwr->u.data.length);
++ kfree(extra);
++ return -E2BIG;
++ }
++#endif /* WE_STRICT_WRITE */
++
+ err = copy_to_user(iwr->u.data.pointer, extra,
+ iwr->u.data.length *
+ descr->token_size);
+@@ -646,12 +680,18 @@ static inline int ioctl_private_call(str
+ /* Compute the size of the set/get arguments */
+ if(descr != NULL) {
+ if(IW_IS_SET(cmd)) {
++ int offset = 0; /* For sub-ioctls */
++ /* Check for sub-ioctl handler */
++ if(descr->name[0] == '\0')
++ /* Reserve one int for sub-ioctl index */
++ offset = sizeof(__u32);
++
+ /* Size of set arguments */
+ extra_size = get_priv_size(descr->set_args);
+
+ /* Does it fits in iwr ? */
+ if((descr->set_args & IW_PRIV_SIZE_FIXED) &&
+- (extra_size < IFNAMSIZ))
++ ((extra_size + offset) <= IFNAMSIZ))
+ extra_size = 0;
+ } else {
+ /* Size of set arguments */
+@@ -659,7 +699,7 @@ static inline int ioctl_private_call(str
+
+ /* Does it fits in iwr ? */
+ if((descr->get_args & IW_PRIV_SIZE_FIXED) &&
+- (extra_size < IFNAMSIZ))
++ (extra_size <= IFNAMSIZ))
+ extra_size = 0;
+ }
+ }
+@@ -925,7 +965,7 @@ void wireless_send_event(struct net_devi
+ * The best the driver could do is to log an error message.
+ * We will do it ourselves instead...
+ */
+- printk(KERN_ERR "%s (WE) : Invalid Wireless Event (0x%04X)\n",
++ printk(KERN_ERR "%s (WE) : Invalid/Unknown Wireless Event (0x%04X)\n",
+ dev->name, cmd);
+ return;
+ }
diff --git a/packages/linux/files/iw241_we16-6.diff b/packages/linux/files/iw241_we16-6.diff
index e69de29bb2..71cb4c08f6 100644
--- a/packages/linux/files/iw241_we16-6.diff
+++ b/packages/linux/files/iw241_we16-6.diff
@@ -0,0 +1,667 @@
+diff -u -p linux/include/linux/wireless.15.h linux/include/linux/wireless.h
+--- linux/include/linux/wireless.15.h Fri Jan 10 16:55:07 2003
++++ linux/include/linux/wireless.h Wed Apr 2 16:33:31 2003
+@@ -1,7 +1,7 @@
+ /*
+ * This file define a set of standard wireless extensions
+ *
+- * Version : 15 12.7.02
++ * Version : 16 2.4.03
+ *
+ * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com>
+ * Copyright (c) 1997-2002 Jean Tourrilhes, All Rights Reserved.
+@@ -69,6 +69,8 @@
+
+ /***************************** INCLUDES *****************************/
+
++/* To minimise problems in user space, I might remove those headers
++ * at some point. Jean II */
+ #include <linux/types.h> /* for "caddr_t" et al */
+ #include <linux/socket.h> /* for "struct sockaddr" et al */
+ #include <linux/if.h> /* for IFNAMSIZ and co... */
+@@ -80,7 +82,7 @@
+ * (there is some stuff that will be added in the future...)
+ * I just plan to increment with each new version.
+ */
+-#define WIRELESS_EXT 15
++#define WIRELESS_EXT 16
+
+ /*
+ * Changes :
+@@ -163,6 +165,16 @@
+ * - Add IW_TXPOW_RANGE for range of Tx Powers
+ * - Add IWEVREGISTERED & IWEVEXPIRED events for Access Points
+ * - Add IW_MODE_MONITOR for passive monitor
++ *
++ * V15 to V16
++ * ----------
++ * - Increase the number of bitrates in iw_range to 32 (for 802.11g)
++ * - Increase the number of frequencies in iw_range to 32 (for 802.11b+a)
++ * - Reshuffle struct iw_range for increases, add filler
++ * - Increase IW_MAX_AP to 64 for driver returning a lot of addresses
++ * - Remove IW_MAX_GET_SPY because conflict with enhanced spy support
++ * - Add SIOCSIWTHRSPY/SIOCGIWTHRSPY and "struct iw_thrspy"
++ * - Add IW_ENCODE_TEMP and iw_range->encoding_login_index
+ */
+
+ /**************************** CONSTANTS ****************************/
+@@ -196,9 +208,11 @@
+ /* SIOCGIWSTATS is strictly used between user space and the kernel, and
+ * is never passed to the driver (i.e. the driver will never see it). */
+
+-/* Mobile IP support (statistics per MAC address) */
++/* Spy support (statistics per MAC address - used for Mobile IP support) */
+ #define SIOCSIWSPY 0x8B10 /* set spy addresses */
+ #define SIOCGIWSPY 0x8B11 /* get spy info (quality of link) */
++#define SIOCSIWTHRSPY 0x8B12 /* set spy threshold (spy event) */
++#define SIOCGIWTHRSPY 0x8B13 /* get spy threshold */
+
+ /* Access Point manipulation */
+ #define SIOCSIWAP 0x8B14 /* set access point MAC addresses */
+@@ -294,7 +308,7 @@
+ #define IW_PRIV_TYPE_FLOAT 0x5000 /* struct iw_freq */
+ #define IW_PRIV_TYPE_ADDR 0x6000 /* struct sockaddr */
+
+-#define IW_PRIV_SIZE_FIXED 0x0800 /* Variable or fixed nuber of args */
++#define IW_PRIV_SIZE_FIXED 0x0800 /* Variable or fixed number of args */
+
+ #define IW_PRIV_SIZE_MASK 0x07FF /* Max number of those args */
+
+@@ -306,13 +320,13 @@
+ /* ----------------------- OTHER CONSTANTS ----------------------- */
+
+ /* Maximum frequencies in the range struct */
+-#define IW_MAX_FREQUENCIES 16
++#define IW_MAX_FREQUENCIES 32
+ /* Note : if you have something like 80 frequencies,
+ * don't increase this constant and don't fill the frequency list.
+ * The user will be able to set by channel anyway... */
+
+ /* Maximum bit rates in the range struct */
+-#define IW_MAX_BITRATES 8
++#define IW_MAX_BITRATES 32
+
+ /* Maximum tx powers in the range struct */
+ #define IW_MAX_TXPOWER 8
+@@ -320,8 +334,7 @@
+ * a few of them in the struct iw_range. */
+
+ /* Maximum of address that you may set with SPY */
+-#define IW_MAX_SPY 8 /* set */
+-#define IW_MAX_GET_SPY 64 /* get */
++#define IW_MAX_SPY 8
+
+ /* Maximum of address that you may get in the
+ list of access points in range */
+@@ -354,7 +367,8 @@
+ #define IW_ENCODE_ENABLED 0x0000 /* Encoding enabled */
+ #define IW_ENCODE_RESTRICTED 0x4000 /* Refuse non-encoded packets */
+ #define IW_ENCODE_OPEN 0x2000 /* Accept non-encoded packets */
+-#define IW_ENCODE_NOKEY 0x0800 /* Key is write only, so not present */
++#define IW_ENCODE_NOKEY 0x0800 /* Key is write only, so not present */
++#define IW_ENCODE_TEMP 0x0400 /* Temporary key */
+
+ /* Power management flags available (along with the value, if any) */
+ #define IW_POWER_ON 0x0000 /* No details... */
+@@ -482,6 +496,17 @@ struct iw_missed
+ __u32 beacon; /* Missed beacons/superframe */
+ };
+
++/*
++ * Quality range (for spy threshold)
++ */
++struct iw_thrspy
++{
++ struct sockaddr addr; /* Source address (hw/mac) */
++ struct iw_quality qual; /* Quality of the link */
++ struct iw_quality low; /* Low threshold */
++ struct iw_quality high; /* High threshold */
++};
++
+ /* ------------------------ WIRELESS STATS ------------------------ */
+ /*
+ * Wireless statistics (used for /proc/net/wireless)
+@@ -534,7 +559,7 @@ union iwreq_data
+ struct iw_quality qual; /* Quality part of statistics */
+
+ struct sockaddr ap_addr; /* Access point address */
+- struct sockaddr addr; /* Destination address (hw) */
++ struct sockaddr addr; /* Destination address (hw/mac) */
+
+ struct iw_param param; /* Other small parameters */
+ struct iw_point data; /* Other large parameters */
+@@ -582,17 +607,31 @@ struct iw_range
+ __u32 min_nwid; /* Minimal NWID we are able to set */
+ __u32 max_nwid; /* Maximal NWID we are able to set */
+
+- /* Frequency */
+- __u16 num_channels; /* Number of channels [0; num - 1] */
+- __u8 num_frequency; /* Number of entry in the list */
+- struct iw_freq freq[IW_MAX_FREQUENCIES]; /* list */
+- /* Note : this frequency list doesn't need to fit channel numbers */
++ /* Old Frequency (backward compat - moved lower ) */
++ __u16 old_num_channels;
++ __u8 old_num_frequency;
++ /* Filler to keep "version" at the same offset */
++ __s32 old_freq[6];
+
+ /* signal level threshold range */
+ __s32 sensitivity;
+
+ /* Quality of link & SNR stuff */
++ /* Quality range (link, level, noise)
++ * If the quality is absolute, it will be in the range [0 ; max_qual],
++ * if the quality is dBm, it will be in the range [max_qual ; 0].
++ * Don't forget that we use 8 bit arithmetics... */
+ struct iw_quality max_qual; /* Quality of the link */
++ /* This should contain the average/typical values of the quality
++ * indicator. This should be the threshold between a "good" and
++ * a "bad" link (example : monitor going from green to orange).
++ * Currently, user space apps like quality monitors don't have any
++ * way to calibrate the measurement. With this, they can split
++ * the range between 0 and max_qual in different quality level
++ * (using a geometric subdivision centered on the average).
++ * I expect that people doing the user space apps will feedback
++ * us on which value we need to put in each driver... */
++ struct iw_quality avg_qual; /* Quality of the link */
+
+ /* Rates */
+ __u8 num_bitrates; /* Number of entries in the list */
+@@ -619,6 +658,8 @@ struct iw_range
+ __u16 encoding_size[IW_MAX_ENCODING_SIZES]; /* Different token sizes */
+ __u8 num_encoding_sizes; /* Number of entry in the list */
+ __u8 max_encoding_tokens; /* Max number of tokens */
++ /* For drivers that need a "login/passwd" form */
++ __u8 encoding_login_index; /* token index for login token */
+
+ /* Transmit power */
+ __u16 txpower_capa; /* What options are supported */
+@@ -638,18 +679,12 @@ struct iw_range
+ __s32 min_r_time; /* Minimal retry lifetime */
+ __s32 max_r_time; /* Maximal retry lifetime */
+
+- /* Average quality of link & SNR */
+- struct iw_quality avg_qual; /* Quality of the link */
+- /* This should contain the average/typical values of the quality
+- * indicator. This should be the threshold between a "good" and
+- * a "bad" link (example : monitor going from green to orange).
+- * Currently, user space apps like quality monitors don't have any
+- * way to calibrate the measurement. With this, they can split
+- * the range between 0 and max_qual in different quality level
+- * (using a geometric subdivision centered on the average).
+- * I expect that people doing the user space apps will feedback
+- * us on which value we need to put in each driver...
+- */
++ /* Frequency */
++ __u16 num_channels; /* Number of channels [0; num - 1] */
++ __u8 num_frequency; /* Number of entry in the list */
++ struct iw_freq freq[IW_MAX_FREQUENCIES]; /* list */
++ /* Note : this frequency list doesn't need to fit channel numbers,
++ * because each entry contain its channel index */
+ };
+
+ /*
+diff -u -p linux/include/net/iw_handler.15.h linux/include/net/iw_handler.h
+--- linux/include/net/iw_handler.15.h Fri Jan 10 16:55:17 2003
++++ linux/include/net/iw_handler.h Fri Jan 10 17:02:13 2003
+@@ -1,7 +1,7 @@
+ /*
+ * This file define the new driver API for Wireless Extensions
+ *
+- * Version : 4 21.6.02
++ * Version : 5 4.12.02
+ *
+ * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com>
+ * Copyright (c) 2001-2002 Jean Tourrilhes, All Rights Reserved.
+@@ -206,7 +206,7 @@
+ * will be needed...
+ * I just plan to increment with each new version.
+ */
+-#define IW_HANDLER_VERSION 4
++#define IW_HANDLER_VERSION 5
+
+ /*
+ * Changes :
+@@ -220,10 +220,18 @@
+ * V3 to V4
+ * --------
+ * - Reshuffle IW_HEADER_TYPE_XXX to map IW_PRIV_TYPE_XXX changes
++ *
++ * V4 to V5
++ * --------
++ * - Add new spy support : struct iw_spy_data & prototypes
+ */
+
+ /**************************** CONSTANTS ****************************/
+
++/* Enable enhanced spy support. Disable to reduce footprint */
++#define IW_WIRELESS_SPY
++#define IW_WIRELESS_THRSPY
++
+ /* Special error message for the driver to indicate that we
+ * should do a commit after return from the iw_handler */
+ #define EIWCOMMIT EINPROGRESS
+@@ -315,6 +323,9 @@ struct iw_handler_def
+ * We will automatically export that to user space... */
+ struct iw_priv_args * private_args;
+
++ /* Driver enhanced spy support */
++ long spy_offset; /* Spy data offset */
++
+ /* In the long term, get_wireless_stats will move from
+ * 'struct net_device' to here, to minimise bloat. */
+ };
+@@ -350,6 +361,33 @@ struct iw_ioctl_description
+
+ /* Need to think of short header translation table. Later. */
+
++/* --------------------- ENHANCED SPY SUPPORT --------------------- */
++/*
++ * In the old days, the driver was handling spy support all by itself.
++ * Now, the driver can delegate this task to Wireless Extensions.
++ * It needs to include this struct in its private part and use the
++ * standard spy iw_handler.
++ */
++
++/*
++ * Instance specific spy data, i.e. addresses spied and quality for them.
++ */
++struct iw_spy_data
++{
++#ifdef IW_WIRELESS_SPY
++ /* --- Standard spy support --- */
++ int spy_number;
++ u_char spy_address[IW_MAX_SPY][ETH_ALEN];
++ struct iw_quality spy_stat[IW_MAX_SPY];
++#ifdef IW_WIRELESS_THRSPY
++ /* --- Enhanced spy support (event) */
++ struct iw_quality spy_thr_low; /* Low threshold */
++ struct iw_quality spy_thr_high; /* High threshold */
++ u_char spy_thr_under[IW_MAX_SPY];
++#endif /* IW_WIRELESS_THRSPY */
++#endif /* IW_WIRELESS_SPY */
++};
++
+ /**************************** PROTOTYPES ****************************/
+ /*
+ * Functions part of the Wireless Extensions (defined in net/core/wireless.c).
+@@ -375,6 +413,31 @@ extern void wireless_send_event(struct n
+
+ /* We may need a function to send a stream of events to user space.
+ * More on that later... */
++
++/* Standard handler for SIOCSIWSPY */
++extern int iw_handler_set_spy(struct net_device * dev,
++ struct iw_request_info * info,
++ union iwreq_data * wrqu,
++ char * extra);
++/* Standard handler for SIOCGIWSPY */
++extern int iw_handler_get_spy(struct net_device * dev,
++ struct iw_request_info * info,
++ union iwreq_data * wrqu,
++ char * extra);
++/* Standard handler for SIOCSIWTHRSPY */
++extern int iw_handler_set_thrspy(struct net_device * dev,
++ struct iw_request_info *info,
++ union iwreq_data * wrqu,
++ char * extra);
++/* Standard handler for SIOCGIWTHRSPY */
++extern int iw_handler_get_thrspy(struct net_device * dev,
++ struct iw_request_info *info,
++ union iwreq_data * wrqu,
++ char * extra);
++/* Driver call to update spy records */
++extern void wireless_spy_update(struct net_device * dev,
++ unsigned char * address,
++ struct iw_quality * wstats);
+
+ /************************* INLINE FUNTIONS *************************/
+ /*
+diff -u -p linux/net/core/wireless.15.c linux/net/core/wireless.c
+--- linux/net/core/wireless.15.c Fri Jan 10 16:56:16 2003
++++ linux/net/core/wireless.c Fri Jan 10 16:59:55 2003
+@@ -2,7 +2,7 @@
+ * This file implement the Wireless Extensions APIs.
+ *
+ * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com>
+- * Copyright (c) 1997-2002 Jean Tourrilhes, All Rights Reserved.
++ * Copyright (c) 1997-2003 Jean Tourrilhes, All Rights Reserved.
+ *
+ * (As all part of the Linux kernel, this file is GPL)
+ */
+@@ -43,6 +43,11 @@
+ * o Turn on WE_STRICT_WRITE by default + kernel warning
+ * o Fix WE_STRICT_WRITE in ioctl_export_private() (32 => iw_num)
+ * o Fix off-by-one in test (extra_size <= IFNAMSIZ)
++ *
++ * v6 - 9.01.03 - Jean II
++ * o Add common spy support : iw_handler_set_spy(), wireless_spy_update()
++ * o Add enhanced spy support : iw_handler_set_thrspy() and event.
++ * o Add WIRELESS_EXT version display in /proc/net/wireless
+ */
+
+ /***************************** INCLUDES *****************************/
+@@ -52,6 +57,7 @@
+ #include <linux/types.h> /* off_t */
+ #include <linux/netdevice.h> /* struct ifreq, dev_get_by_name() */
+ #include <linux/rtnetlink.h> /* rtnetlink stuff */
++#include <linux/if_arp.h> /* ARPHRD_ETHER */
+
+ #include <linux/wireless.h> /* Pretty obvious */
+ #include <net/iw_handler.h> /* New driver API */
+@@ -65,6 +71,7 @@
+ /* Debuging stuff */
+ #undef WE_IOCTL_DEBUG /* Debug IOCTL API */
+ #undef WE_EVENT_DEBUG /* Debug Event dispatcher */
++#undef WE_SPY_DEBUG /* Debug enhanced spy support */
+
+ /* Options */
+ #define WE_EVENT_NETLINK /* Propagate events using rtnetlink */
+@@ -72,7 +79,7 @@
+
+ /************************* GLOBAL VARIABLES *************************/
+ /*
+- * You should not use global variables, because or re-entrancy.
++ * You should not use global variables, because of re-entrancy.
+ * On our case, it's only const, so it's OK...
+ */
+ /*
+@@ -115,11 +122,11 @@ static const struct iw_ioctl_description
+ /* SIOCSIWSPY */
+ { IW_HEADER_TYPE_POINT, 0, sizeof(struct sockaddr), 0, IW_MAX_SPY, 0},
+ /* SIOCGIWSPY */
+- { IW_HEADER_TYPE_POINT, 0, (sizeof(struct sockaddr) + sizeof(struct iw_quality)), 0, IW_MAX_GET_SPY, 0},
+- /* -- hole -- */
+- { IW_HEADER_TYPE_NULL, 0, 0, 0, 0, 0},
+- /* -- hole -- */
+- { IW_HEADER_TYPE_NULL, 0, 0, 0, 0, 0},
++ { IW_HEADER_TYPE_POINT, 0, (sizeof(struct sockaddr) + sizeof(struct iw_quality)), 0, IW_MAX_SPY, 0},
++ /* SIOCSIWTHRSPY */
++ { IW_HEADER_TYPE_POINT, 0, sizeof(struct iw_thrspy), 1, 1, 0},
++ /* SIOCGIWTHRSPY */
++ { IW_HEADER_TYPE_POINT, 0, sizeof(struct iw_thrspy), 1, 1, 0},
+ /* SIOCSIWAP */
+ { IW_HEADER_TYPE_ADDR, 0, 0, 0, 0, 0},
+ /* SIOCGIWAP */
+@@ -377,9 +384,9 @@ int dev_get_wireless_info(char * buffer,
+ struct net_device * dev;
+
+ size = sprintf(buffer,
+- "Inter-| sta-| Quality | Discarded packets | Missed\n"
+- " face | tus | link level noise | nwid crypt frag retry misc | beacon\n"
+- );
++ "Inter-| sta-| Quality | Discarded packets | Missed | WE\n"
++ " face | tus | link level noise | nwid crypt frag retry misc | beacon | %d\n",
++ WIRELESS_EXT);
+
+ pos += size;
+ len += size;
+@@ -1023,4 +1030,253 @@ void wireless_send_event(struct net_devi
+ kfree(event);
+
+ return; /* Always success, I guess ;-) */
++}
++
++/********************** ENHANCED IWSPY SUPPORT **********************/
++/*
++ * In the old days, the driver was handling spy support all by itself.
++ * Now, the driver can delegate this task to Wireless Extensions.
++ * It needs to use those standard spy iw_handler in struct iw_handler_def,
++ * push data to us via XXX and include struct iw_spy_data in its
++ * private part.
++ * One of the main advantage of centralising spy support here is that
++ * it becomes much easier to improve and extend it without having to touch
++ * the drivers. One example is the addition of the Spy-Threshold events.
++ * Note : IW_WIRELESS_SPY is defined in iw_handler.h
++ */
++
++/*------------------------------------------------------------------*/
++/*
++ * Standard Wireless Handler : set Spy List
++ */
++int iw_handler_set_spy(struct net_device * dev,
++ struct iw_request_info * info,
++ union iwreq_data * wrqu,
++ char * extra)
++{
++#ifdef IW_WIRELESS_SPY
++ struct iw_spy_data * spydata = (dev->priv +
++ dev->wireless_handlers->spy_offset);
++ struct sockaddr * address = (struct sockaddr *) extra;
++
++ /* Disable spy collection while we copy the addresses.
++ * As we don't disable interrupts, we need to do this to avoid races.
++ * As we are the only writer, this is good enough. */
++ spydata->spy_number = 0;
++
++ /* Are there are addresses to copy? */
++ if(wrqu->data.length > 0) {
++ int i;
++
++ /* Copy addresses */
++ for(i = 0; i < wrqu->data.length; i++)
++ memcpy(spydata->spy_address[i], address[i].sa_data,
++ ETH_ALEN);
++ /* Reset stats */
++ memset(spydata->spy_stat, 0,
++ sizeof(struct iw_quality) * IW_MAX_SPY);
++
++#ifdef WE_SPY_DEBUG
++ printk(KERN_DEBUG "iw_handler_set_spy() : offset %ld, spydata %p, num %d\n", dev->wireless_handlers->spy_offset, spydata, wrqu->data.length);
++ for (i = 0; i < wrqu->data.length; i++)
++ printk(KERN_DEBUG
++ "%02X:%02X:%02X:%02X:%02X:%02X \n",
++ spydata->spy_address[i][0],
++ spydata->spy_address[i][1],
++ spydata->spy_address[i][2],
++ spydata->spy_address[i][3],
++ spydata->spy_address[i][4],
++ spydata->spy_address[i][5]);
++#endif /* WE_SPY_DEBUG */
++ }
++ /* Enable addresses */
++ spydata->spy_number = wrqu->data.length;
++
++ return 0;
++#else /* IW_WIRELESS_SPY */
++ return -EOPNOTSUPP;
++#endif /* IW_WIRELESS_SPY */
++}
++
++/*------------------------------------------------------------------*/
++/*
++ * Standard Wireless Handler : get Spy List
++ */
++int iw_handler_get_spy(struct net_device * dev,
++ struct iw_request_info * info,
++ union iwreq_data * wrqu,
++ char * extra)
++{
++#ifdef IW_WIRELESS_SPY
++ struct iw_spy_data * spydata = (dev->priv +
++ dev->wireless_handlers->spy_offset);
++ struct sockaddr * address = (struct sockaddr *) extra;
++ int i;
++
++ wrqu->data.length = spydata->spy_number;
++
++ /* Copy addresses. */
++ for(i = 0; i < spydata->spy_number; i++) {
++ memcpy(address[i].sa_data, spydata->spy_address[i], ETH_ALEN);
++ address[i].sa_family = AF_UNIX;
++ }
++ /* Copy stats to the user buffer (just after). */
++ if(spydata->spy_number > 0)
++ memcpy(extra + (sizeof(struct sockaddr) *spydata->spy_number),
++ spydata->spy_stat,
++ sizeof(struct iw_quality) * spydata->spy_number);
++ /* Reset updated flags. */
++ for(i = 0; i < spydata->spy_number; i++)
++ spydata->spy_stat[i].updated = 0;
++ return 0;
++#else /* IW_WIRELESS_SPY */
++ return -EOPNOTSUPP;
++#endif /* IW_WIRELESS_SPY */
++}
++
++/*------------------------------------------------------------------*/
++/*
++ * Standard Wireless Handler : set spy threshold
++ */
++int iw_handler_set_thrspy(struct net_device * dev,
++ struct iw_request_info *info,
++ union iwreq_data * wrqu,
++ char * extra)
++{
++#ifdef IW_WIRELESS_THRSPY
++ struct iw_spy_data * spydata = (dev->priv +
++ dev->wireless_handlers->spy_offset);
++ struct iw_thrspy * threshold = (struct iw_thrspy *) extra;
++
++ /* Just do it */
++ memcpy(&(spydata->spy_thr_low), &(threshold->low),
++ 2 * sizeof(struct iw_quality));
++
++ /* Clear flag */
++ memset(spydata->spy_thr_under, '\0', sizeof(spydata->spy_thr_under));
++
++#ifdef WE_SPY_DEBUG
++ printk(KERN_DEBUG "iw_handler_set_thrspy() : low %d ; high %d\n", spydata->spy_thr_low.level, spydata->spy_thr_high.level);
++#endif /* WE_SPY_DEBUG */
++
++ return 0;
++#else /* IW_WIRELESS_THRSPY */
++ return -EOPNOTSUPP;
++#endif /* IW_WIRELESS_THRSPY */
++}
++
++/*------------------------------------------------------------------*/
++/*
++ * Standard Wireless Handler : get spy threshold
++ */
++int iw_handler_get_thrspy(struct net_device * dev,
++ struct iw_request_info *info,
++ union iwreq_data * wrqu,
++ char * extra)
++{
++#ifdef IW_WIRELESS_THRSPY
++ struct iw_spy_data * spydata = (dev->priv +
++ dev->wireless_handlers->spy_offset);
++ struct iw_thrspy * threshold = (struct iw_thrspy *) extra;
++
++ /* Just do it */
++ memcpy(&(threshold->low), &(spydata->spy_thr_low),
++ 2 * sizeof(struct iw_quality));
++
++ return 0;
++#else /* IW_WIRELESS_THRSPY */
++ return -EOPNOTSUPP;
++#endif /* IW_WIRELESS_THRSPY */
++}
++
++#ifdef IW_WIRELESS_THRSPY
++/*------------------------------------------------------------------*/
++/*
++ * Prepare and send a Spy Threshold event
++ */
++static void iw_send_thrspy_event(struct net_device * dev,
++ struct iw_spy_data * spydata,
++ unsigned char * address,
++ struct iw_quality * wstats)
++{
++ union iwreq_data wrqu;
++ struct iw_thrspy threshold;
++
++ /* Init */
++ wrqu.data.length = 1;
++ wrqu.data.flags = 0;
++ /* Copy address */
++ memcpy(threshold.addr.sa_data, address, ETH_ALEN);
++ threshold.addr.sa_family = ARPHRD_ETHER;
++ /* Copy stats */
++ memcpy(&(threshold.qual), wstats, sizeof(struct iw_quality));
++ /* Copy also thresholds */
++ memcpy(&(threshold.low), &(spydata->spy_thr_low),
++ 2 * sizeof(struct iw_quality));
++
++#ifdef WE_SPY_DEBUG
++ printk(KERN_DEBUG "iw_send_thrspy_event() : address %02X:%02X:%02X:%02X:%02X:%02X, level %d, up = %d\n",
++ threshold.addr.sa_data[0],
++ threshold.addr.sa_data[1],
++ threshold.addr.sa_data[2],
++ threshold.addr.sa_data[3],
++ threshold.addr.sa_data[4],
++ threshold.addr.sa_data[5], threshold.qual.level);
++#endif /* WE_SPY_DEBUG */
++
++ /* Send event to user space */
++ wireless_send_event(dev, SIOCGIWTHRSPY, &wrqu, (char *) &threshold);
++}
++#endif /* IW_WIRELESS_THRSPY */
++
++/* ---------------------------------------------------------------- */
++/*
++ * Call for the driver to update the spy data.
++ * For now, the spy data is a simple array. As the size of the array is
++ * small, this is good enough. If we wanted to support larger number of
++ * spy addresses, we should use something more efficient...
++ */
++void wireless_spy_update(struct net_device * dev,
++ unsigned char * address,
++ struct iw_quality * wstats)
++{
++#ifdef IW_WIRELESS_SPY
++ struct iw_spy_data * spydata = (dev->priv +
++ dev->wireless_handlers->spy_offset);
++ int i;
++ int match = -1;
++
++#ifdef WE_SPY_DEBUG
++ printk(KERN_DEBUG "wireless_spy_update() : offset %ld, spydata %p, address %02X:%02X:%02X:%02X:%02X:%02X\n", dev->wireless_handlers->spy_offset, spydata, address[0], address[1], address[2], address[3], address[4], address[5]);
++#endif /* WE_SPY_DEBUG */
++
++ /* Update all records that match */
++ for(i = 0; i < spydata->spy_number; i++)
++ if(!memcmp(address, spydata->spy_address[i], ETH_ALEN)) {
++ memcpy(&(spydata->spy_stat[i]), wstats,
++ sizeof(struct iw_quality));
++ match = i;
++ }
++#ifdef IW_WIRELESS_THRSPY
++ /* Generate an event if we cross the spy threshold.
++ * To avoid event storms, we have a simple hysteresis : we generate
++ * event only when we go under the low threshold or above the
++ * high threshold. */
++ if(match >= 0) {
++ if(spydata->spy_thr_under[match]) {
++ if(wstats->level > spydata->spy_thr_high.level) {
++ spydata->spy_thr_under[match] = 0;
++ iw_send_thrspy_event(dev, spydata,
++ address, wstats);
++ }
++ } else {
++ if(wstats->level < spydata->spy_thr_low.level) {
++ spydata->spy_thr_under[match] = 1;
++ iw_send_thrspy_event(dev, spydata,
++ address, wstats);
++ }
++ }
++ }
++#endif /* IW_WIRELESS_THRSPY */
++#endif /* IW_WIRELESS_SPY */
+ }
+diff -u -p linux/net/netsyms.15.c linux/net/netsyms.c
+--- linux/net/netsyms.15.c Fri Jan 10 16:56:32 2003
++++ linux/net/netsyms.c Fri Jan 10 17:01:09 2003
+@@ -594,6 +594,11 @@ EXPORT_SYMBOL(softnet_data);
+ #if defined(CONFIG_NET_RADIO) || defined(CONFIG_NET_PCMCIA_RADIO)
+ #include <net/iw_handler.h>
+ EXPORT_SYMBOL(wireless_send_event);
++EXPORT_SYMBOL(iw_handler_set_spy);
++EXPORT_SYMBOL(iw_handler_get_spy);
++EXPORT_SYMBOL(iw_handler_set_thrspy);
++EXPORT_SYMBOL(iw_handler_get_thrspy);
++EXPORT_SYMBOL(wireless_spy_update);
+ #endif /* CONFIG_NET_RADIO || CONFIG_NET_PCMCIA_RADIO */
+
+ #endif /* CONFIG_NET */
diff --git a/packages/linux/files/iw249_we17-13.diff b/packages/linux/files/iw249_we17-13.diff
index e69de29bb2..674f4ffbc0 100644
--- a/packages/linux/files/iw249_we17-13.diff
+++ b/packages/linux/files/iw249_we17-13.diff
@@ -0,0 +1,768 @@
+diff -u -p linux/include/linux/netdevice.we16.h linux/include/linux/netdevice.h
+--- linux/include/linux/netdevice.we16.h 2005-02-03 14:54:56.000000000 -0800
++++ linux/include/linux/netdevice.h 2005-02-03 15:43:30.000000000 -0800
+@@ -295,7 +295,9 @@ struct net_device
+
+ /* List of functions to handle Wireless Extensions (instead of ioctl).
+ * See <net/iw_handler.h> for details. Jean II */
+- struct iw_handler_def * wireless_handlers;
++ const struct iw_handler_def * wireless_handlers;
++ /* Instance data managed by the core of Wireless Extensions. */
++ struct iw_public_data * wireless_data;
+
+ struct ethtool_ops *ethtool_ops;
+
+diff -u -p linux/include/linux/wireless.we16.h linux/include/linux/wireless.h
+--- linux/include/linux/wireless.we16.h 2005-02-03 14:55:04.000000000 -0800
++++ linux/include/linux/wireless.h 2005-02-03 15:44:48.000000000 -0800
+@@ -1,10 +1,10 @@
+ /*
+ * This file define a set of standard wireless extensions
+ *
+- * Version : 16 2.4.03
++ * Version : 17 21.6.04
+ *
+ * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com>
+- * Copyright (c) 1997-2002 Jean Tourrilhes, All Rights Reserved.
++ * Copyright (c) 1997-2004 Jean Tourrilhes, All Rights Reserved.
+ */
+
+ #ifndef _LINUX_WIRELESS_H
+@@ -47,12 +47,12 @@
+ * # include/net/iw_handler.h
+ *
+ * Note as well that /proc/net/wireless implementation has now moved in :
+- * # include/linux/wireless.c
++ * # net/core/wireless.c
+ *
+ * Wireless Events (2002 -> onward) :
+ * --------------------------------
+ * Events are defined at the end of this file, and implemented in :
+- * # include/linux/wireless.c
++ * # net/core/wireless.c
+ *
+ * Other comments :
+ * --------------
+@@ -82,7 +82,7 @@
+ * (there is some stuff that will be added in the future...)
+ * I just plan to increment with each new version.
+ */
+-#define WIRELESS_EXT 16
++#define WIRELESS_EXT 17
+
+ /*
+ * Changes :
+@@ -175,6 +175,13 @@
+ * - Remove IW_MAX_GET_SPY because conflict with enhanced spy support
+ * - Add SIOCSIWTHRSPY/SIOCGIWTHRSPY and "struct iw_thrspy"
+ * - Add IW_ENCODE_TEMP and iw_range->encoding_login_index
++ *
++ * V16 to V17
++ * ----------
++ * - Add flags to frequency -> auto/fixed
++ * - Document (struct iw_quality *)->updated, add new flags (INVALID)
++ * - Wireless Event capability in struct iw_range
++ * - Add support for relative TxPower (yick !)
+ */
+
+ /**************************** CONSTANTS ****************************/
+@@ -251,7 +258,7 @@
+
+ /* -------------------- DEV PRIVATE IOCTL LIST -------------------- */
+
+-/* These 16 ioctl are wireless device private.
++/* These 32 ioctl are wireless device private, for 16 commands.
+ * Each driver is free to use them for whatever purpose it chooses,
+ * however the driver *must* export the description of those ioctls
+ * with SIOCGIWPRIV and *must* use arguments as defined below.
+@@ -266,8 +273,8 @@
+ * We now have 32 commands, so a bit more space ;-).
+ * Also, all 'odd' commands are only usable by root and don't return the
+ * content of ifr/iwr to user (but you are not obliged to use the set/get
+- * convention, just use every other two command).
+- * And I repeat : you are not obliged to use them with iwspy, but you
++ * convention, just use every other two command). More details in iwpriv.c.
++ * And I repeat : you are not forced to use them with iwpriv, but you
+ * must be compliant with it.
+ */
+
+@@ -352,6 +359,18 @@
+ #define IW_MODE_SECOND 5 /* Secondary master/repeater (backup) */
+ #define IW_MODE_MONITOR 6 /* Passive monitor (listen only) */
+
++/* Statistics flags (bitmask in updated) */
++#define IW_QUAL_QUAL_UPDATED 0x1 /* Value was updated since last read */
++#define IW_QUAL_LEVEL_UPDATED 0x2
++#define IW_QUAL_NOISE_UPDATED 0x4
++#define IW_QUAL_QUAL_INVALID 0x10 /* Driver doesn't provide value */
++#define IW_QUAL_LEVEL_INVALID 0x20
++#define IW_QUAL_NOISE_INVALID 0x40
++
++/* Frequency flags */
++#define IW_FREQ_AUTO 0x00 /* Let the driver decides */
++#define IW_FREQ_FIXED 0x01 /* Force a specific value */
++
+ /* Maximum number of size of encoding token available
+ * they are listed in the range structure */
+ #define IW_MAX_ENCODING_SIZES 8
+@@ -390,6 +409,7 @@
+ #define IW_TXPOW_TYPE 0x00FF /* Type of value */
+ #define IW_TXPOW_DBM 0x0000 /* Value is in dBm */
+ #define IW_TXPOW_MWATT 0x0001 /* Value is in mW */
++#define IW_TXPOW_RELATIVE 0x0002 /* Value is in arbitrary units */
+ #define IW_TXPOW_RANGE 0x1000 /* Range of value between min/max */
+
+ /* Retry limits and lifetime flags available */
+@@ -418,6 +438,25 @@
+ /* Max number of char in custom event - use multiple of them if needed */
+ #define IW_CUSTOM_MAX 256 /* In bytes */
+
++/* Event capability macros - in (struct iw_range *)->event_capa
++ * Because we have more than 32 possible events, we use an array of
++ * 32 bit bitmasks. Note : 32 bits = 0x20 = 2^5. */
++#define IW_EVENT_CAPA_BASE(cmd) ((cmd >= SIOCIWFIRSTPRIV) ? \
++ (cmd - SIOCIWFIRSTPRIV + 0x60) : \
++ (cmd - SIOCSIWCOMMIT))
++#define IW_EVENT_CAPA_INDEX(cmd) (IW_EVENT_CAPA_BASE(cmd) >> 5)
++#define IW_EVENT_CAPA_MASK(cmd) (1 << (IW_EVENT_CAPA_BASE(cmd) & 0x1F))
++/* Event capability constants - event autogenerated by the kernel
++ * This list is valid for most 802.11 devices, customise as needed... */
++#define IW_EVENT_CAPA_K_0 (IW_EVENT_CAPA_MASK(0x8B04) | \
++ IW_EVENT_CAPA_MASK(0x8B06) | \
++ IW_EVENT_CAPA_MASK(0x8B1A))
++#define IW_EVENT_CAPA_K_1 (IW_EVENT_CAPA_MASK(0x8B2A))
++/* "Easy" macro to set events in iw_range (less efficient) */
++#define IW_EVENT_CAPA_SET(event_capa, cmd) (event_capa[IW_EVENT_CAPA_INDEX(cmd)] |= IW_EVENT_CAPA_MASK(cmd))
++#define IW_EVENT_CAPA_SET_KERNEL(event_capa) {event_capa[0] |= IW_EVENT_CAPA_K_0; event_capa[1] |= IW_EVENT_CAPA_K_1; }
++
++
+ /****************************** TYPES ******************************/
+
+ /* --------------------------- SUBTYPES --------------------------- */
+@@ -456,7 +495,7 @@ struct iw_freq
+ __s32 m; /* Mantissa */
+ __s16 e; /* Exponent */
+ __u8 i; /* List index (when in range struct) */
+- __u8 pad; /* Unused - just for alignement */
++ __u8 flags; /* Flags (fixed/auto) */
+ };
+
+ /*
+@@ -610,11 +649,12 @@ struct iw_range
+ /* Old Frequency (backward compat - moved lower ) */
+ __u16 old_num_channels;
+ __u8 old_num_frequency;
+- /* Filler to keep "version" at the same offset */
+- __s32 old_freq[6];
++
++ /* Wireless event capability bitmasks */
++ __u32 event_capa[6];
+
+ /* signal level threshold range */
+- __s32 sensitivity;
++ __s32 sensitivity;
+
+ /* Quality of link & SNR stuff */
+ /* Quality range (link, level, noise)
+diff -u -p linux/include/net/iw_handler.we16.h linux/include/net/iw_handler.h
+--- linux/include/net/iw_handler.we16.h 2005-02-03 14:55:26.000000000 -0800
++++ linux/include/net/iw_handler.h 2005-02-03 15:47:04.000000000 -0800
+@@ -1,10 +1,10 @@
+ /*
+ * This file define the new driver API for Wireless Extensions
+ *
+- * Version : 5 4.12.02
++ * Version : 6 21.6.04
+ *
+ * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com>
+- * Copyright (c) 2001-2002 Jean Tourrilhes, All Rights Reserved.
++ * Copyright (c) 2001-2004 Jean Tourrilhes, All Rights Reserved.
+ */
+
+ #ifndef _IW_HANDLER_H
+@@ -206,7 +206,7 @@
+ * will be needed...
+ * I just plan to increment with each new version.
+ */
+-#define IW_HANDLER_VERSION 5
++#define IW_HANDLER_VERSION 6
+
+ /*
+ * Changes :
+@@ -224,11 +224,18 @@
+ * V4 to V5
+ * --------
+ * - Add new spy support : struct iw_spy_data & prototypes
++ *
++ * V5 to V6
++ * --------
++ * - Change the way we get to spy_data method for added safety
++ * - Remove spy #ifdef, they are always on -> cleaner code
++ * - Add IW_DESCR_FLAG_NOMAX flag for very large requests
++ * - Start migrating get_wireless_stats to struct iw_handler_def
+ */
+
+ /**************************** CONSTANTS ****************************/
+
+-/* Enable enhanced spy support. Disable to reduce footprint */
++/* Enhanced spy support available */
+ #define IW_WIRELESS_SPY
+ #define IW_WIRELESS_THRSPY
+
+@@ -258,6 +265,7 @@
+ #define IW_DESCR_FLAG_EVENT 0x0002 /* Generate an event on SET */
+ #define IW_DESCR_FLAG_RESTRICT 0x0004 /* GET : request is ROOT only */
+ /* SET : Omit payload from generated iwevent */
++#define IW_DESCR_FLAG_NOMAX 0x0008 /* GET : no limit on request size */
+ /* Driver level flags */
+ #define IW_DESCR_FLAG_WAIT 0x0100 /* Wait for driver event */
+
+@@ -311,23 +319,25 @@ struct iw_handler_def
+ /* Array of handlers for standard ioctls
+ * We will call dev->wireless_handlers->standard[ioctl - SIOCSIWNAME]
+ */
+- iw_handler * standard;
++ const iw_handler * standard;
+
+ /* Array of handlers for private ioctls
+ * Will call dev->wireless_handlers->private[ioctl - SIOCIWFIRSTPRIV]
+ */
+- iw_handler * private;
++ const iw_handler * private;
+
+ /* Arguments of private handler. This one is just a list, so you
+ * can put it in any order you want and should not leave holes...
+ * We will automatically export that to user space... */
+- struct iw_priv_args * private_args;
++ const struct iw_priv_args * private_args;
+
+- /* Driver enhanced spy support */
+- long spy_offset; /* Spy data offset */
++ /* This field will be *removed* in the next version of WE */
++ long spy_offset; /* DO NOT USE */
+
+- /* In the long term, get_wireless_stats will move from
+- * 'struct net_device' to here, to minimise bloat. */
++ /* New location of get_wireless_stats, to de-bloat struct net_device.
++ * The old pointer in struct net_device will be gradually phased
++ * out, and drivers are encouraged to use this one... */
++ struct iw_statistics* (*get_wireless_stats)(struct net_device *dev);
+ };
+
+ /* ---------------------- IOCTL DESCRIPTION ---------------------- */
+@@ -374,18 +384,29 @@ struct iw_ioctl_description
+ */
+ struct iw_spy_data
+ {
+-#ifdef IW_WIRELESS_SPY
+ /* --- Standard spy support --- */
+ int spy_number;
+ u_char spy_address[IW_MAX_SPY][ETH_ALEN];
+ struct iw_quality spy_stat[IW_MAX_SPY];
+-#ifdef IW_WIRELESS_THRSPY
+ /* --- Enhanced spy support (event) */
+ struct iw_quality spy_thr_low; /* Low threshold */
+ struct iw_quality spy_thr_high; /* High threshold */
+ u_char spy_thr_under[IW_MAX_SPY];
+-#endif /* IW_WIRELESS_THRSPY */
+-#endif /* IW_WIRELESS_SPY */
++};
++
++/* --------------------- DEVICE WIRELESS DATA --------------------- */
++/*
++ * This is all the wireless data specific to a device instance that
++ * is managed by the core of Wireless Extensions.
++ * We only keep pointer to those structures, so that a driver is free
++ * to share them between instances.
++ * This structure should be initialised before registering the device.
++ * Access to this data follow the same rules as any other struct net_device
++ * data (i.e. valid as long as struct net_device exist, same locking rules).
++ */
++struct iw_public_data {
++ /* Driver enhanced spy support */
++ struct iw_spy_data * spy_data;
+ };
+
+ /**************************** PROTOTYPES ****************************/
+diff -u -p linux/net/core/dev.we16.c linux/net/core/dev.c
+--- linux/net/core/dev.we16.c 2005-02-03 14:55:56.000000000 -0800
++++ linux/net/core/dev.c 2005-02-03 15:28:48.000000000 -0800
+@@ -2426,7 +2426,7 @@ int dev_ioctl(unsigned int cmd, void *ar
+ /* Follow me in net/core/wireless.c */
+ ret = wireless_process_ioctl(&ifr, cmd);
+ rtnl_unlock();
+- if (!ret && IW_IS_GET(cmd) &&
++ if (IW_IS_GET(cmd) &&
+ copy_to_user(arg, &ifr, sizeof(struct ifreq)))
+ return -EFAULT;
+ return ret;
+diff -u -p linux/net/core/wireless.we16.c linux/net/core/wireless.c
+--- linux/net/core/wireless.we16.c 2005-02-03 14:56:09.000000000 -0800
++++ linux/net/core/wireless.c 2005-02-03 16:33:22.000000000 -0800
+@@ -2,7 +2,7 @@
+ * This file implement the Wireless Extensions APIs.
+ *
+ * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com>
+- * Copyright (c) 1997-2003 Jean Tourrilhes, All Rights Reserved.
++ * Copyright (c) 1997-2004 Jean Tourrilhes, All Rights Reserved.
+ *
+ * (As all part of the Linux kernel, this file is GPL)
+ */
+@@ -48,6 +48,16 @@
+ * o Add common spy support : iw_handler_set_spy(), wireless_spy_update()
+ * o Add enhanced spy support : iw_handler_set_thrspy() and event.
+ * o Add WIRELESS_EXT version display in /proc/net/wireless
++ *
++ * v6 - 18.06.04 - Jean II
++ * o Change get_spydata() method for added safety
++ * o Remove spy #ifdef, they are always on -> cleaner code
++ * o Allow any size GET request if user specifies length > max
++ * and if request has IW_DESCR_FLAG_NOMAX flag or is SIOCGIWPRIV
++ * o Start migrating get_wireless_stats to struct iw_handler_def
++ * o Add wmb() in iw_handler_set_spy() for non-coherent archs/cpus
++ * Based on patch from Pavel Roskin <proski@gnu.org> :
++ * o Fix kernel data leak to user space in private handler handling
+ */
+
+ /***************************** INCLUDES *****************************/
+@@ -64,11 +74,7 @@
+
+ /**************************** CONSTANTS ****************************/
+
+-/* Enough lenience, let's make sure things are proper... */
+-#define WE_STRICT_WRITE /* Check write buffer size */
+-/* I'll probably drop both the define and kernel message in the next version */
+-
+-/* Debuging stuff */
++/* Debugging stuff */
+ #undef WE_IOCTL_DEBUG /* Debug IOCTL API */
+ #undef WE_EVENT_DEBUG /* Debug Event dispatcher */
+ #undef WE_SPY_DEBUG /* Debug enhanced spy support */
+@@ -134,11 +140,11 @@ static const struct iw_ioctl_description
+ /* -- hole -- */
+ { IW_HEADER_TYPE_NULL, 0, 0, 0, 0, 0},
+ /* SIOCGIWAPLIST */
+- { IW_HEADER_TYPE_POINT, 0, (sizeof(struct sockaddr) + sizeof(struct iw_quality)), 0, IW_MAX_AP, 0},
++ { IW_HEADER_TYPE_POINT, 0, (sizeof(struct sockaddr) + sizeof(struct iw_quality)), 0, IW_MAX_AP, IW_DESCR_FLAG_NOMAX},
+ /* SIOCSIWSCAN */
+ { IW_HEADER_TYPE_PARAM, 0, 0, 0, 0, 0},
+ /* SIOCGIWSCAN */
+- { IW_HEADER_TYPE_POINT, 0, 1, 0, IW_SCAN_MAX_DATA, 0},
++ { IW_HEADER_TYPE_POINT, 0, 1, 0, IW_SCAN_MAX_DATA, IW_DESCR_FLAG_NOMAX},
+ /* SIOCSIWESSID */
+ { IW_HEADER_TYPE_POINT, 0, 1, 0, IW_ESSID_MAX_SIZE + 1, IW_DESCR_FLAG_EVENT},
+ /* SIOCGIWESSID */
+@@ -203,7 +209,7 @@ static const int standard_event_num = (s
+ sizeof(struct iw_ioctl_description));
+
+ /* Size (in bytes) of the various private data types */
+-static const char priv_type_size[] = {
++static const char iw_priv_type_size[] = {
+ 0, /* IW_PRIV_TYPE_NONE */
+ 1, /* IW_PRIV_TYPE_BYTE */
+ 1, /* IW_PRIV_TYPE_CHAR */
+@@ -270,12 +276,15 @@ static inline iw_handler get_handler(str
+ */
+ static inline struct iw_statistics *get_wireless_stats(struct net_device *dev)
+ {
++ /* New location */
++ if((dev->wireless_handlers != NULL) &&
++ (dev->wireless_handlers->get_wireless_stats != NULL))
++ return dev->wireless_handlers->get_wireless_stats(dev);
++
++ /* Old location, will be phased out in next WE */
+ return (dev->get_wireless_stats ?
+ dev->get_wireless_stats(dev) :
+ (struct iw_statistics *) NULL);
+- /* In the future, get_wireless_stats may move from 'struct net_device'
+- * to 'struct iw_handler_def', to de-bloat struct net_device.
+- * Definitely worse a thought... */
+ }
+
+ /* ---------------------------------------------------------------- */
+@@ -310,14 +319,32 @@ static inline int call_commit_handler(st
+
+ /* ---------------------------------------------------------------- */
+ /*
+- * Number of private arguments
++ * Calculate size of private arguments
+ */
+ static inline int get_priv_size(__u16 args)
+ {
+ int num = args & IW_PRIV_SIZE_MASK;
+ int type = (args & IW_PRIV_TYPE_MASK) >> 12;
+
+- return num * priv_type_size[type];
++ return num * iw_priv_type_size[type];
++}
++
++/* ---------------------------------------------------------------- */
++/*
++ * Re-calculate the size of private arguments
++ */
++static inline int adjust_priv_size(__u16 args,
++ union iwreq_data * wrqu)
++{
++ int num = wrqu->data.length;
++ int max = args & IW_PRIV_SIZE_MASK;
++ int type = (args & IW_PRIV_TYPE_MASK) >> 12;
++
++ /* Make sure the driver doesn't goof up */
++ if (max < num)
++ num = max;
++
++ return num * iw_priv_type_size[type];
+ }
+
+
+@@ -350,11 +377,14 @@ static inline int sprintf_wireless_stats
+ dev->name,
+ stats->status,
+ stats->qual.qual,
+- stats->qual.updated & 1 ? '.' : ' ',
++ stats->qual.updated & IW_QUAL_QUAL_UPDATED
++ ? '.' : ' ',
+ ((__u8) stats->qual.level),
+- stats->qual.updated & 2 ? '.' : ' ',
++ stats->qual.updated & IW_QUAL_LEVEL_UPDATED
++ ? '.' : ' ',
+ ((__u8) stats->qual.noise),
+- stats->qual.updated & 4 ? '.' : ' ',
++ stats->qual.updated & IW_QUAL_NOISE_UPDATED
++ ? '.' : ' ',
+ stats->discard.nwid,
+ stats->discard.code,
+ stats->discard.fragment,
+@@ -470,13 +500,15 @@ static inline int ioctl_export_private(s
+ /* Check NULL pointer */
+ if(iwr->u.data.pointer == NULL)
+ return -EFAULT;
+-#ifdef WE_STRICT_WRITE
++
+ /* Check if there is enough buffer up there */
+ if(iwr->u.data.length < dev->wireless_handlers->num_private_args) {
+- printk(KERN_ERR "%s (WE) : Buffer for request SIOCGIWPRIV too small (%d<%d)\n", dev->name, iwr->u.data.length, dev->wireless_handlers->num_private_args);
++ /* User space can't know in advance how large the buffer
++ * needs to be. Give it a hint, so that we can support
++ * any size buffer we want somewhat efficiently... */
++ iwr->u.data.length = dev->wireless_handlers->num_private_args;
+ return -E2BIG;
+ }
+-#endif /* WE_STRICT_WRITE */
+
+ /* Set the number of available ioctls. */
+ iwr->u.data.length = dev->wireless_handlers->num_private_args;
+@@ -505,7 +537,6 @@ static inline int ioctl_standard_call(st
+ const struct iw_ioctl_description * descr;
+ struct iw_request_info info;
+ int ret = -EINVAL;
+- int user_size = 0;
+
+ /* Get the description of the IOCTL */
+ if((cmd - SIOCIWFIRST) >= standard_ioctl_num)
+@@ -536,8 +567,14 @@ static inline int ioctl_standard_call(st
+ #endif /* WE_SET_EVENT */
+ } else {
+ char * extra;
++ int extra_size;
++ int user_length = 0;
+ int err;
+
++ /* Calculate space needed by arguments. Always allocate
++ * for max space. Easier, and won't last long... */
++ extra_size = descr->max_tokens * descr->token_size;
++
+ /* Check what user space is giving us */
+ if(IW_IS_SET(cmd)) {
+ /* Check NULL pointer */
+@@ -554,18 +591,33 @@ static inline int ioctl_standard_call(st
+ if(iwr->u.data.pointer == NULL)
+ return -EFAULT;
+ /* Save user space buffer size for checking */
+- user_size = iwr->u.data.length;
++ user_length = iwr->u.data.length;
++
++ /* Don't check if user_length > max to allow forward
++ * compatibility. The test user_length < min is
++ * implied by the test at the end. */
++
++ /* Support for very large requests */
++ if((descr->flags & IW_DESCR_FLAG_NOMAX) &&
++ (user_length > descr->max_tokens)) {
++ /* Allow userspace to GET more than max so
++ * we can support any size GET requests.
++ * There is still a limit : -ENOMEM. */
++ extra_size = user_length * descr->token_size;
++ /* Note : user_length is originally a __u16,
++ * and token_size is controlled by us,
++ * so extra_size won't get negative and
++ * won't overflow... */
++ }
+ }
+
+ #ifdef WE_IOCTL_DEBUG
+ printk(KERN_DEBUG "%s (WE) : Malloc %d bytes\n",
+- dev->name, descr->max_tokens * descr->token_size);
++ dev->name, extra_size);
+ #endif /* WE_IOCTL_DEBUG */
+
+- /* Always allocate for max space. Easier, and won't last
+- * long... */
+- extra = kmalloc(descr->max_tokens * descr->token_size,
+- GFP_KERNEL);
++ /* Create the kernel buffer */
++ extra = kmalloc(extra_size, GFP_KERNEL);
+ if (extra == NULL) {
+ return -ENOMEM;
+ }
+@@ -591,14 +643,11 @@ static inline int ioctl_standard_call(st
+
+ /* If we have something to return to the user */
+ if (!ret && IW_IS_GET(cmd)) {
+-#ifdef WE_STRICT_WRITE
+ /* Check if there is enough buffer up there */
+- if(user_size < iwr->u.data.length) {
+- printk(KERN_ERR "%s (WE) : Buffer for request %04X too small (%d<%d)\n", dev->name, cmd, user_size, iwr->u.data.length);
++ if(user_length < iwr->u.data.length) {
+ kfree(extra);
+ return -E2BIG;
+ }
+-#endif /* WE_STRICT_WRITE */
+
+ err = copy_to_user(iwr->u.data.pointer, extra,
+ iwr->u.data.length *
+@@ -661,7 +710,7 @@ static inline int ioctl_private_call(str
+ iw_handler handler)
+ {
+ struct iwreq * iwr = (struct iwreq *) ifr;
+- struct iw_priv_args * descr = NULL;
++ const struct iw_priv_args * descr = NULL;
+ struct iw_request_info info;
+ int extra_size = 0;
+ int i;
+@@ -701,7 +750,7 @@ static inline int ioctl_private_call(str
+ ((extra_size + offset) <= IFNAMSIZ))
+ extra_size = 0;
+ } else {
+- /* Size of set arguments */
++ /* Size of get arguments */
+ extra_size = get_priv_size(descr->get_args);
+
+ /* Does it fits in iwr ? */
+@@ -771,6 +820,14 @@ static inline int ioctl_private_call(str
+
+ /* If we have something to return to the user */
+ if (!ret && IW_IS_GET(cmd)) {
++
++ /* Adjust for the actual length if it's variable,
++ * avoid leaking kernel bits outside. */
++ if (!(descr->get_args & IW_PRIV_SIZE_FIXED)) {
++ extra_size = adjust_priv_size(descr->get_args,
++ &(iwr->u));
++ }
++
+ err = copy_to_user(iwr->u.data.pointer, extra,
+ extra_size);
+ if (err)
+@@ -1042,9 +1099,25 @@ void wireless_send_event(struct net_devi
+ * One of the main advantage of centralising spy support here is that
+ * it becomes much easier to improve and extend it without having to touch
+ * the drivers. One example is the addition of the Spy-Threshold events.
+- * Note : IW_WIRELESS_SPY is defined in iw_handler.h
+ */
+
++/* ---------------------------------------------------------------- */
++/*
++ * Return the pointer to the spy data in the driver.
++ * Because this is called on the Rx path via wireless_spy_update(),
++ * we want it to be efficient...
++ */
++static inline struct iw_spy_data * get_spydata(struct net_device *dev)
++{
++ /* This is the new way */
++ if(dev->wireless_data)
++ return(dev->wireless_data->spy_data);
++
++ /* This is the old way. Doesn't work for multi-headed drivers.
++ * It will be removed in the next version of WE. */
++ return (dev->priv + dev->wireless_handlers->spy_offset);
++}
++
+ /*------------------------------------------------------------------*/
+ /*
+ * Standard Wireless Handler : set Spy List
+@@ -1054,16 +1127,26 @@ int iw_handler_set_spy(struct net_device
+ union iwreq_data * wrqu,
+ char * extra)
+ {
+-#ifdef IW_WIRELESS_SPY
+- struct iw_spy_data * spydata = (dev->priv +
+- dev->wireless_handlers->spy_offset);
++ struct iw_spy_data * spydata = get_spydata(dev);
+ struct sockaddr * address = (struct sockaddr *) extra;
+
++ /* Make sure driver is not buggy or using the old API */
++ if(!spydata)
++ return -EOPNOTSUPP;
++
+ /* Disable spy collection while we copy the addresses.
+- * As we don't disable interrupts, we need to do this to avoid races.
+- * As we are the only writer, this is good enough. */
++ * While we copy addresses, any call to wireless_spy_update()
++ * will NOP. This is OK, as anyway the addresses are changing. */
+ spydata->spy_number = 0;
+
++ /* We want to operate without locking, because wireless_spy_update()
++ * most likely will happen in the interrupt handler, and therefore
++ * have its own locking constraints and needs performance.
++ * The rtnl_lock() make sure we don't race with the other iw_handlers.
++ * This make sure wireless_spy_update() "see" that the spy list
++ * is temporarily disabled. */
++ wmb();
++
+ /* Are there are addresses to copy? */
+ if(wrqu->data.length > 0) {
+ int i;
+@@ -1089,13 +1172,14 @@ int iw_handler_set_spy(struct net_device
+ spydata->spy_address[i][5]);
+ #endif /* WE_SPY_DEBUG */
+ }
++
++ /* Make sure above is updated before re-enabling */
++ wmb();
++
+ /* Enable addresses */
+ spydata->spy_number = wrqu->data.length;
+
+ return 0;
+-#else /* IW_WIRELESS_SPY */
+- return -EOPNOTSUPP;
+-#endif /* IW_WIRELESS_SPY */
+ }
+
+ /*------------------------------------------------------------------*/
+@@ -1107,12 +1191,14 @@ int iw_handler_get_spy(struct net_device
+ union iwreq_data * wrqu,
+ char * extra)
+ {
+-#ifdef IW_WIRELESS_SPY
+- struct iw_spy_data * spydata = (dev->priv +
+- dev->wireless_handlers->spy_offset);
++ struct iw_spy_data * spydata = get_spydata(dev);
+ struct sockaddr * address = (struct sockaddr *) extra;
+ int i;
+
++ /* Make sure driver is not buggy or using the old API */
++ if(!spydata)
++ return -EOPNOTSUPP;
++
+ wrqu->data.length = spydata->spy_number;
+
+ /* Copy addresses. */
+@@ -1129,9 +1215,6 @@ int iw_handler_get_spy(struct net_device
+ for(i = 0; i < spydata->spy_number; i++)
+ spydata->spy_stat[i].updated = 0;
+ return 0;
+-#else /* IW_WIRELESS_SPY */
+- return -EOPNOTSUPP;
+-#endif /* IW_WIRELESS_SPY */
+ }
+
+ /*------------------------------------------------------------------*/
+@@ -1143,11 +1226,13 @@ int iw_handler_set_thrspy(struct net_dev
+ union iwreq_data * wrqu,
+ char * extra)
+ {
+-#ifdef IW_WIRELESS_THRSPY
+- struct iw_spy_data * spydata = (dev->priv +
+- dev->wireless_handlers->spy_offset);
++ struct iw_spy_data * spydata = get_spydata(dev);
+ struct iw_thrspy * threshold = (struct iw_thrspy *) extra;
+
++ /* Make sure driver is not buggy or using the old API */
++ if(!spydata)
++ return -EOPNOTSUPP;
++
+ /* Just do it */
+ memcpy(&(spydata->spy_thr_low), &(threshold->low),
+ 2 * sizeof(struct iw_quality));
+@@ -1160,9 +1245,6 @@ int iw_handler_set_thrspy(struct net_dev
+ #endif /* WE_SPY_DEBUG */
+
+ return 0;
+-#else /* IW_WIRELESS_THRSPY */
+- return -EOPNOTSUPP;
+-#endif /* IW_WIRELESS_THRSPY */
+ }
+
+ /*------------------------------------------------------------------*/
+@@ -1174,22 +1256,20 @@ int iw_handler_get_thrspy(struct net_dev
+ union iwreq_data * wrqu,
+ char * extra)
+ {
+-#ifdef IW_WIRELESS_THRSPY
+- struct iw_spy_data * spydata = (dev->priv +
+- dev->wireless_handlers->spy_offset);
++ struct iw_spy_data * spydata = get_spydata(dev);
+ struct iw_thrspy * threshold = (struct iw_thrspy *) extra;
+
++ /* Make sure driver is not buggy or using the old API */
++ if(!spydata)
++ return -EOPNOTSUPP;
++
+ /* Just do it */
+ memcpy(&(threshold->low), &(spydata->spy_thr_low),
+ 2 * sizeof(struct iw_quality));
+
+ return 0;
+-#else /* IW_WIRELESS_THRSPY */
+- return -EOPNOTSUPP;
+-#endif /* IW_WIRELESS_THRSPY */
+ }
+
+-#ifdef IW_WIRELESS_THRSPY
+ /*------------------------------------------------------------------*/
+ /*
+ * Prepare and send a Spy Threshold event
+@@ -1227,7 +1307,6 @@ static void iw_send_thrspy_event(struct
+ /* Send event to user space */
+ wireless_send_event(dev, SIOCGIWTHRSPY, &wrqu, (char *) &threshold);
+ }
+-#endif /* IW_WIRELESS_THRSPY */
+
+ /* ---------------------------------------------------------------- */
+ /*
+@@ -1240,12 +1319,14 @@ void wireless_spy_update(struct net_devi
+ unsigned char * address,
+ struct iw_quality * wstats)
+ {
+-#ifdef IW_WIRELESS_SPY
+- struct iw_spy_data * spydata = (dev->priv +
+- dev->wireless_handlers->spy_offset);
++ struct iw_spy_data * spydata = get_spydata(dev);
+ int i;
+ int match = -1;
+
++ /* Make sure driver is not buggy or using the old API */
++ if(!spydata)
++ return;
++
+ #ifdef WE_SPY_DEBUG
+ printk(KERN_DEBUG "wireless_spy_update() : offset %ld, spydata %p, address %02X:%02X:%02X:%02X:%02X:%02X\n", dev->wireless_handlers->spy_offset, spydata, address[0], address[1], address[2], address[3], address[4], address[5]);
+ #endif /* WE_SPY_DEBUG */
+@@ -1257,7 +1338,7 @@ void wireless_spy_update(struct net_devi
+ sizeof(struct iw_quality));
+ match = i;
+ }
+-#ifdef IW_WIRELESS_THRSPY
++
+ /* Generate an event if we cross the spy threshold.
+ * To avoid event storms, we have a simple hysteresis : we generate
+ * event only when we go under the low threshold or above the
+@@ -1277,6 +1358,4 @@ void wireless_spy_update(struct net_devi
+ }
+ }
+ }
+-#endif /* IW_WIRELESS_THRSPY */
+-#endif /* IW_WIRELESS_SPY */
+ }
diff --git a/packages/linux/files/iw_handlers.w13-5.diff b/packages/linux/files/iw_handlers.w13-5.diff
index e69de29bb2..a27a7654a9 100644
--- a/packages/linux/files/iw_handlers.w13-5.diff
+++ b/packages/linux/files/iw_handlers.w13-5.diff
@@ -0,0 +1,1513 @@
+diff -u -p -r --new-file linux/include/linux-w12/netdevice.h linux/include/linux/netdevice.h
+--- linux/include/linux-w12/netdevice.h Thu Nov 22 11:47:09 2001
++++ linux/include/linux/netdevice.h Thu Jan 17 12:00:39 2002
+@@ -278,6 +278,10 @@ struct net_device
+ struct net_device_stats* (*get_stats)(struct net_device *dev);
+ struct iw_statistics* (*get_wireless_stats)(struct net_device *dev);
+
++ /* List of functions to handle Wireless Extensions (instead of ioctl).
++ * See <net/iw_handler.h> for details. Jean II */
++ struct iw_handler_def * wireless_handlers;
++
+ /*
+ * This marks the end of the "visible" part of the structure. All
+ * fields hereafter are internal to the system, and may change at
+diff -u -p -r --new-file linux/include/linux-w12/wireless.h linux/include/linux/wireless.h
+--- linux/include/linux-w12/wireless.h Thu Nov 22 11:47:12 2001
++++ linux/include/linux/wireless.h Thu Jan 17 12:04:08 2002
+@@ -1,9 +1,10 @@
+ /*
+ * This file define a set of standard wireless extensions
+ *
+- * Version : 12 5.10.01
++ * Version : 13 6.12.01
+ *
+ * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com>
++ * Copyright (c) 1997-2001 Jean Tourrilhes, All Rights Reserved.
+ */
+
+ #ifndef _LINUX_WIRELESS_H
+@@ -11,6 +12,8 @@
+
+ /************************** DOCUMENTATION **************************/
+ /*
++ * Initial APIs (1996 -> onward) :
++ * -----------------------------
+ * Basically, the wireless extensions are for now a set of standard ioctl
+ * call + /proc/net/wireless
+ *
+@@ -27,16 +30,27 @@
+ * We have the list of command plus a structure descibing the
+ * data exchanged...
+ * Note that to add these ioctl, I was obliged to modify :
+- * net/core/dev.c (two place + add include)
+- * net/ipv4/af_inet.c (one place + add include)
++ * # net/core/dev.c (two place + add include)
++ * # net/ipv4/af_inet.c (one place + add include)
+ *
+ * /proc/net/wireless is a copy of /proc/net/dev.
+ * We have a structure for data passed from the driver to /proc/net/wireless
+ * Too add this, I've modified :
+- * net/core/dev.c (two other places)
+- * include/linux/netdevice.h (one place)
+- * include/linux/proc_fs.h (one place)
++ * # net/core/dev.c (two other places)
++ * # include/linux/netdevice.h (one place)
++ * # include/linux/proc_fs.h (one place)
++ *
++ * New driver API (2001 -> onward) :
++ * -------------------------------
++ * This file is only concerned with the user space API and common definitions.
++ * The new driver API is defined and documented in :
++ * # include/net/iw_handler.h
+ *
++ * Note as well that /proc/net/wireless implementation has now moved in :
++ * # include/linux/wireless.c
++ *
++ * Other comments :
++ * --------------
+ * Do not add here things that are redundant with other mechanisms
+ * (drivers init, ifconfig, /proc/net/dev, ...) and with are not
+ * wireless specific.
+@@ -54,16 +68,14 @@
+ #include <linux/socket.h> /* for "struct sockaddr" et al */
+ #include <linux/if.h> /* for IFNAMSIZ and co... */
+
+-/**************************** CONSTANTS ****************************/
+-
+-/* --------------------------- VERSION --------------------------- */
++/***************************** VERSION *****************************/
+ /*
+ * This constant is used to know the availability of the wireless
+ * extensions and to know which version of wireless extensions it is
+ * (there is some stuff that will be added in the future...)
+ * I just plan to increment with each new version.
+ */
+-#define WIRELESS_EXT 12
++#define WIRELESS_EXT 13
+
+ /*
+ * Changes :
+@@ -123,12 +135,20 @@
+ * - Add DEV PRIVATE IOCTL to avoid collisions in SIOCDEVPRIVATE space
+ * - Add new statistics (frag, retry, beacon)
+ * - Add average quality (for user space calibration)
++ *
++ * V12 to V13
++ * ----------
++ * - Document creation of new driver API.
++ * - Extract union iwreq_data from struct iwreq (for new driver API).
++ * - Rename SIOCSIWNAME as SIOCSIWCOMMIT
+ */
+
++/**************************** CONSTANTS ****************************/
++
+ /* -------------------------- IOCTL LIST -------------------------- */
+
+ /* Basic operations */
+-#define SIOCSIWNAME 0x8B00 /* Unused */
++#define SIOCSIWCOMMIT 0x8B00 /* Commit pending changes to driver */
+ #define SIOCGIWNAME 0x8B01 /* get name == wireless protocol */
+ #define SIOCSIWNWID 0x8B02 /* set network id (the cell) */
+ #define SIOCGIWNWID 0x8B03 /* get network id */
+@@ -414,13 +434,49 @@ struct iw_statistics
+
+ /* ------------------------ IOCTL REQUEST ------------------------ */
+ /*
++ * This structure defines the payload of an ioctl, and is used
++ * below.
++ *
++ * Note that this structure should fit on the memory footprint
++ * of iwreq (which is the same as ifreq), which mean a max size of
++ * 16 octets = 128 bits. Warning, pointers might be 64 bits wide...
++ * You should check this when increasing the structures defined
++ * above in this file...
++ */
++union iwreq_data
++{
++ /* Config - generic */
++ char name[IFNAMSIZ];
++ /* Name : used to verify the presence of wireless extensions.
++ * Name of the protocol/provider... */
++
++ struct iw_point essid; /* Extended network name */
++ struct iw_param nwid; /* network id (or domain - the cell) */
++ struct iw_freq freq; /* frequency or channel :
++ * 0-1000 = channel
++ * > 1000 = frequency in Hz */
++
++ struct iw_param sens; /* signal level threshold */
++ struct iw_param bitrate; /* default bit rate */
++ struct iw_param txpower; /* default transmit power */
++ struct iw_param rts; /* RTS threshold threshold */
++ struct iw_param frag; /* Fragmentation threshold */
++ __u32 mode; /* Operation mode */
++ struct iw_param retry; /* Retry limits & lifetime */
++
++ struct iw_point encoding; /* Encoding stuff : tokens */
++ struct iw_param power; /* PM duration/timeout */
++
++ struct sockaddr ap_addr; /* Access point address */
++
++ struct iw_point data; /* Other large parameters */
++};
++
++/*
+ * The structure to exchange data for ioctl.
+ * This structure is the same as 'struct ifreq', but (re)defined for
+ * convenience...
+- *
+- * Note that it should fit on the same memory footprint !
+- * You should check this when increasing the above structures (16 octets)
+- * 16 octets = 128 bits. Warning, pointers might be 64 bits wide...
++ * Do I need to remind you about structure size (32 octets) ?
+ */
+ struct iwreq
+ {
+@@ -429,35 +485,8 @@ struct iwreq
+ char ifrn_name[IFNAMSIZ]; /* if name, e.g. "eth0" */
+ } ifr_ifrn;
+
+- /* Data part */
+- union
+- {
+- /* Config - generic */
+- char name[IFNAMSIZ];
+- /* Name : used to verify the presence of wireless extensions.
+- * Name of the protocol/provider... */
+-
+- struct iw_point essid; /* Extended network name */
+- struct iw_param nwid; /* network id (or domain - the cell) */
+- struct iw_freq freq; /* frequency or channel :
+- * 0-1000 = channel
+- * > 1000 = frequency in Hz */
+-
+- struct iw_param sens; /* signal level threshold */
+- struct iw_param bitrate; /* default bit rate */
+- struct iw_param txpower; /* default transmit power */
+- struct iw_param rts; /* RTS threshold threshold */
+- struct iw_param frag; /* Fragmentation threshold */
+- __u32 mode; /* Operation mode */
+- struct iw_param retry; /* Retry limits & lifetime */
+-
+- struct iw_point encoding; /* Encoding stuff : tokens */
+- struct iw_param power; /* PM duration/timeout */
+-
+- struct sockaddr ap_addr; /* Access point address */
+-
+- struct iw_point data; /* Other large parameters */
+- } u;
++ /* Data part (defined just above) */
++ union iwreq_data u;
+ };
+
+ /* -------------------------- IOCTL DATA -------------------------- */
+diff -u -p -r --new-file linux/include/net-w12/iw_handler.h linux/include/net/iw_handler.h
+--- linux/include/net-w12/iw_handler.h Wed Dec 31 16:00:00 1969
++++ linux/include/net/iw_handler.h Thu Jan 17 12:16:46 2002
+@@ -0,0 +1,374 @@
++/*
++ * This file define the new driver API for Wireless Extensions
++ *
++ * Version : 2 6.12.01
++ *
++ * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com>
++ * Copyright (c) 2001 Jean Tourrilhes, All Rights Reserved.
++ */
++
++#ifndef _IW_HANDLER_H
++#define _IW_HANDLER_H
++
++/************************** DOCUMENTATION **************************/
++/*
++ * Initial driver API (1996 -> onward) :
++ * -----------------------------------
++ * The initial API just sends the IOCTL request received from user space
++ * to the driver (via the driver ioctl handler). The driver has to
++ * handle all the rest...
++ *
++ * The initial API also defines a specific handler in struct net_device
++ * to handle wireless statistics.
++ *
++ * The initial APIs served us well and has proven a reasonably good design.
++ * However, there is a few shortcommings :
++ * o No events, everything is a request to the driver.
++ * o Large ioctl function in driver with gigantic switch statement
++ * (i.e. spaghetti code).
++ * o Driver has to mess up with copy_to/from_user, and in many cases
++ * does it unproperly. Common mistakes are :
++ * * buffer overflows (no checks or off by one checks)
++ * * call copy_to/from_user with irq disabled
++ * o The user space interface is tied to ioctl because of the use
++ * copy_to/from_user.
++ *
++ * New driver API (2001 -> onward) :
++ * -------------------------------
++ * The new driver API is just a bunch of standard functions (handlers),
++ * each handling a specific Wireless Extension. The driver just export
++ * the list of handler it supports, and those will be called apropriately.
++ *
++ * I tried to keep the main advantage of the previous API (simplicity,
++ * efficiency and light weight), and also I provide a good dose of backward
++ * compatibility (most structures are the same, driver can use both API
++ * simultaneously, ...).
++ * Hopefully, I've also addressed the shortcomming of the initial API.
++ *
++ * The advantage of the new API are :
++ * o Handling of Extensions in driver broken in small contained functions
++ * o Tighter checks of ioctl before calling the driver
++ * o Flexible commit strategy (at least, the start of it)
++ * o Backward compatibility (can be mixed with old API)
++ * o Driver doesn't have to worry about memory and user-space issues
++ * The last point is important for the following reasons :
++ * o You are now able to call the new driver API from any API you
++ * want (including from within other parts of the kernel).
++ * o Common mistakes are avoided (buffer overflow, user space copy
++ * with irq disabled and so on).
++ *
++ * The Drawback of the new API are :
++ * o bloat (especially kernel)
++ * o need to migrate existing drivers to new API
++ * My initial testing shows that the new API adds around 3kB to the kernel
++ * and save between 0 and 5kB from a typical driver.
++ * Also, as all structures and data types are unchanged, the migration is
++ * quite straightforward (but tedious).
++ *
++ * ---
++ *
++ * The new driver API is defined below in this file. User space should
++ * not be aware of what's happening down there...
++ *
++ * A new kernel wrapper is in charge of validating the IOCTLs and calling
++ * the appropriate driver handler. This is implemented in :
++ * # net/core/wireless.c
++ *
++ * The driver export the list of handlers in :
++ * # include/linux/netdevice.h (one place)
++ *
++ * The new driver API is available for WIRELESS_EXT >= 13.
++ * Good luck with migration to the new API ;-)
++ */
++
++/* ---------------------- THE IMPLEMENTATION ---------------------- */
++/*
++ * Some of the choice I've made are pretty controversials. Defining an
++ * API is very much weighting compromises. This goes into some of the
++ * details and the thinking behind the implementation.
++ *
++ * Implementation goals :
++ * --------------------
++ * The implementation goals were as follow :
++ * o Obvious : you should not need a PhD to understand what's happening,
++ * the benefit is easier maintainance.
++ * o Flexible : it should accomodate a wide variety of driver
++ * implementations and be as flexible as the old API.
++ * o Lean : it should be efficient memory wise to minimise the impact
++ * on kernel footprint.
++ * o Transparent to user space : the large number of user space
++ * applications that use Wireless Extensions should not need
++ * any modifications.
++ *
++ * Array of functions versus Struct of functions
++ * ---------------------------------------------
++ * 1) Having an array of functions allow the kernel code to access the
++ * handler in a single lookup, which is much more efficient (think hash
++ * table here).
++ * 2) The only drawback is that driver writer may put their handler in
++ * the wrong slot. This is trivial to test (I set the frequency, the
++ * bitrate changes). Once the handler is in the proper slot, it will be
++ * there forever, because the array is only extended at the end.
++ * 3) Backward/forward compatibility : adding new handler just require
++ * extending the array, so you can put newer driver in older kernel
++ * without having to patch the kernel code (and vice versa).
++ *
++ * All handler are of the same generic type
++ * ----------------------------------------
++ * That's a feature !!!
++ * 1) Having a generic handler allow to have generic code, which is more
++ * efficient. If each of the handler was individually typed I would need
++ * to add a big switch in the kernel (== more bloat). This solution is
++ * more scalable, adding new Wireless Extensions doesn't add new code.
++ * 2) You can use the same handler in different slots of the array. For
++ * hardware, it may be more efficient or logical to handle multiple
++ * Wireless Extensions with a single function, and the API allow you to
++ * do that. (An example would be a single record on the card to control
++ * both bitrate and frequency, the handler would read the old record,
++ * modify it according to info->cmd and rewrite it).
++ *
++ * Functions prototype uses union iwreq_data
++ * -----------------------------------------
++ * Some would have prefered functions defined this way :
++ * static int mydriver_ioctl_setrate(struct net_device *dev,
++ * long rate, int auto)
++ * 1) The kernel code doesn't "validate" the content of iwreq_data, and
++ * can't do it (different hardware may have different notion of what a
++ * valid frequency is), so we don't pretend that we do it.
++ * 2) The above form is not extendable. If I want to add a flag (for
++ * example to distinguish setting max rate and basic rate), I would
++ * break the prototype. Using iwreq_data is more flexible.
++ * 3) Also, the above form is not generic (see above).
++ * 4) I don't expect driver developper using the wrong field of the
++ * union (Doh !), so static typechecking doesn't add much value.
++ * 5) Lastly, you can skip the union by doing :
++ * static int mydriver_ioctl_setrate(struct net_device *dev,
++ * struct iw_request_info *info,
++ * struct iw_param *rrq,
++ * char *extra)
++ * And then adding the handler in the array like this :
++ * (iw_handler) mydriver_ioctl_setrate, // SIOCSIWRATE
++ *
++ * Using functions and not a registry
++ * ----------------------------------
++ * Another implementation option would have been for every instance to
++ * define a registry (a struct containing all the Wireless Extensions)
++ * and only have a function to commit the registry to the hardware.
++ * 1) This approach can be emulated by the current code, but not
++ * vice versa.
++ * 2) Some drivers don't keep any configuration in the driver, for them
++ * adding such a registry would be a significant bloat.
++ * 3) The code to translate from Wireless Extension to native format is
++ * needed anyway, so it would not reduce significantely the amount of code.
++ * 4) The current approach only selectively translate Wireless Extensions
++ * to native format and only selectively set, whereas the registry approach
++ * would require to translate all WE and set all parameters for any single
++ * change.
++ * 5) For many Wireless Extensions, the GET operation return the current
++ * dynamic value, not the value that was set.
++ *
++ * This header is <net/iw_handler.h>
++ * ---------------------------------
++ * 1) This header is kernel space only and should not be exported to
++ * user space. Headers in "include/linux/" are exported, headers in
++ * "include/net/" are not.
++ *
++ * Mixed 32/64 bit issues
++ * ----------------------
++ * The Wireless Extensions are designed to be 64 bit clean, by using only
++ * datatypes with explicit storage size.
++ * There are some issues related to kernel and user space using different
++ * memory model, and in particular 64bit kernel with 32bit user space.
++ * The problem is related to struct iw_point, that contains a pointer
++ * that *may* need to be translated.
++ * This is quite messy. The new API doesn't solve this problem (it can't),
++ * but is a step in the right direction :
++ * 1) Meta data about each ioctl is easily available, so we know what type
++ * of translation is needed.
++ * 2) The move of data between kernel and user space is only done in a single
++ * place in the kernel, so adding specific hooks in there is possible.
++ * 3) In the long term, it allows to move away from using ioctl as the
++ * user space API.
++ *
++ * So many comments and so few code
++ * --------------------------------
++ * That's a feature. Comments won't bloat the resulting kernel binary.
++ */
++
++/***************************** INCLUDES *****************************/
++
++#include <linux/wireless.h> /* IOCTL user space API */
++
++/***************************** VERSION *****************************/
++/*
++ * This constant is used to know which version of the driver API is
++ * available. Hopefully, this will be pretty stable and no changes
++ * will be needed...
++ * I just plan to increment with each new version.
++ */
++#define IW_HANDLER_VERSION 2
++
++/**************************** CONSTANTS ****************************/
++
++/* Special error message for the driver to indicate that we
++ * should do a commit after return from the iw_handler */
++#define EIWCOMMIT EINPROGRESS
++
++/* Flags available in struct iw_request_info */
++#define IW_REQUEST_FLAG_NONE 0x0000 /* No flag so far */
++
++/* Type of headers we know about (basically union iwreq_data) */
++#define IW_HEADER_TYPE_NULL 0 /* Not available */
++#define IW_HEADER_TYPE_CHAR 2 /* char [IFNAMSIZ] */
++#define IW_HEADER_TYPE_UINT 4 /* __u32 */
++#define IW_HEADER_TYPE_FREQ 5 /* struct iw_freq */
++#define IW_HEADER_TYPE_POINT 6 /* struct iw_point */
++#define IW_HEADER_TYPE_PARAM 7 /* struct iw_param */
++#define IW_HEADER_TYPE_ADDR 8 /* struct sockaddr */
++
++/* Handling flags */
++/* Most are not implemented. I just use them as a reminder of some
++ * cool features we might need one day ;-) */
++#define IW_DESCR_FLAG_NONE 0x0000 /* Obvious */
++/* Wrapper level flags */
++#define IW_DESCR_FLAG_DUMP 0x0001 /* Not part of the dump command */
++#define IW_DESCR_FLAG_EVENT 0x0002 /* Generate an event on SET */
++#define IW_DESCR_FLAG_RESTRICT 0x0004 /* GET request is ROOT only */
++/* Driver level flags */
++#define IW_DESCR_FLAG_WAIT 0x0100 /* Wait for driver event */
++
++/****************************** TYPES ******************************/
++
++/* ----------------------- WIRELESS HANDLER ----------------------- */
++/*
++ * A wireless handler is just a standard function, that looks like the
++ * ioctl handler.
++ * We also define there how a handler list look like... As the Wireless
++ * Extension space is quite dense, we use a simple array, which is faster
++ * (that's the perfect hash table ;-).
++ */
++
++/*
++ * Meta data about the request passed to the iw_handler.
++ * Most handlers can safely ignore what's in there.
++ * The 'cmd' field might come handy if you want to use the same handler
++ * for multiple command...
++ * This struct is also my long term insurance. I can add new fields here
++ * without breaking the prototype of iw_handler...
++ */
++struct iw_request_info
++{
++ __u16 cmd; /* Wireless Extension command */
++ __u16 flags; /* More to come ;-) */
++};
++
++/*
++ * This is how a function handling a Wireless Extension should look
++ * like (both get and set, standard and private).
++ */
++typedef int (*iw_handler)(struct net_device *dev, struct iw_request_info *info,
++ union iwreq_data *wrqu, char *extra);
++
++/*
++ * This define all the handler that the driver export.
++ * As you need only one per driver type, please use a static const
++ * shared by all driver instances... Same for the members...
++ * This will be linked from net_device in <linux/netdevice.h>
++ */
++struct iw_handler_def
++{
++ /* Number of handlers defined (more precisely, index of the
++ * last defined handler + 1) */
++ __u16 num_standard;
++ __u16 num_private;
++ /* Number of private arg description */
++ __u16 num_private_args;
++
++ /* Array of handlers for standard ioctls
++ * We will call dev->wireless_handlers->standard[ioctl - SIOCSIWNAME]
++ */
++ iw_handler * standard;
++
++ /* Array of handlers for private ioctls
++ * Will call dev->wireless_handlers->private[ioctl - SIOCIWFIRSTPRIV]
++ */
++ iw_handler * private;
++
++ /* Arguments of private handler. This one is just a list, so you
++ * can put it in any order you want and should not leave holes...
++ * We will automatically export that to user space... */
++ struct iw_priv_args * private_args;
++
++ /* In the long term, get_wireless_stats will move from
++ * 'struct net_device' to here, to minimise bloat. */
++};
++
++/* ----------------------- WIRELESS EVENTS ----------------------- */
++/*
++ * Currently we don't support events, so let's just plan for the
++ * future...
++ */
++
++/*
++ * A Wireless Event.
++ */
++// How do we define short header ? We don't want a flag on length.
++// Probably a flag on event ? Highest bit to zero...
++struct iw_event
++{
++ __u16 length; /* Lenght of this stuff */
++ __u16 event; /* Wireless IOCTL */
++ union iwreq_data header; /* IOCTL fixed payload */
++ char extra[0]; /* Optional IOCTL data */
++};
++
++/* ---------------------- IOCTL DESCRIPTION ---------------------- */
++/*
++ * One of the main goal of the new interface is to deal entirely with
++ * user space/kernel space memory move.
++ * For that, we need to know :
++ * o if iwreq is a pointer or contain the full data
++ * o what is the size of the data to copy
++ *
++ * For private IOCTLs, we use the same rules as used by iwpriv and
++ * defined in struct iw_priv_args.
++ *
++ * For standard IOCTLs, things are quite different and we need to
++ * use the stuctures below. Actually, this struct is also more
++ * efficient, but that's another story...
++ */
++
++/*
++ * Describe how a standard IOCTL looks like.
++ */
++struct iw_ioctl_description
++{
++ __u8 header_type; /* NULL, iw_point or other */
++ __u8 token_type; /* Future */
++ __u16 token_size; /* Granularity of payload */
++ __u16 min_tokens; /* Min acceptable token number */
++ __u16 max_tokens; /* Max acceptable token number */
++ __u32 flags; /* Special handling of the request */
++};
++
++/* Need to think of short header translation table. Later. */
++
++/**************************** PROTOTYPES ****************************/
++/*
++ * Functions part of the Wireless Extensions (defined in net/core/wireless.c).
++ * Those may be called only within the kernel.
++ */
++
++/* First : function strictly used inside the kernel */
++
++/* Handle /proc/net/wireless, called in net/code/dev.c */
++extern int dev_get_wireless_info(char * buffer, char **start, off_t offset,
++ int length);
++
++/* Handle IOCTLs, called in net/code/dev.c */
++extern int wireless_process_ioctl(struct ifreq *ifr, unsigned int cmd);
++
++/* Second : functions that may be called by driver modules */
++/* None yet */
++
++#endif /* _LINUX_WIRELESS_H */
+diff -u -p -r --new-file linux/net/core-w12/Makefile linux/net/core/Makefile
+--- linux/net/core-w12/Makefile Tue Oct 30 15:08:12 2001
++++ linux/net/core/Makefile Thu Jan 17 11:06:07 2002
+@@ -26,5 +26,8 @@ obj-$(CONFIG_NET) += dev.o dev_mcast.o d
+ obj-$(CONFIG_NETFILTER) += netfilter.o
+ obj-$(CONFIG_NET_DIVERT) += dv.o
+ obj-$(CONFIG_NET_PROFILE) += profile.o
++obj-$(CONFIG_NET_RADIO) += wireless.o
++# Ugly. I wish all wireless drivers were moved in drivers/net/wireless
++obj-$(CONFIG_NET_PCMCIA_RADIO) += wireless.o
+
+ include $(TOPDIR)/Rules.make
+diff -u -p -r --new-file linux/net/core-w12/dev.c linux/net/core/dev.c
+--- linux/net/core-w12/dev.c Wed Nov 7 14:39:36 2001
++++ linux/net/core/dev.c Thu Jan 17 11:06:07 2002
+@@ -102,6 +102,7 @@
+ #include <linux/module.h>
+ #if defined(CONFIG_NET_RADIO) || defined(CONFIG_NET_PCMCIA_RADIO)
+ #include <linux/wireless.h> /* Note : will define WIRELESS_EXT */
++#include <net/iw_handler.h>
+ #endif /* CONFIG_NET_RADIO || CONFIG_NET_PCMCIA_RADIO */
+ #ifdef CONFIG_PLIP
+ extern int plip_init(void);
+@@ -1796,122 +1797,6 @@ static int dev_proc_stats(char *buffer,
+ #endif /* CONFIG_PROC_FS */
+
+
+-#ifdef WIRELESS_EXT
+-#ifdef CONFIG_PROC_FS
+-
+-/*
+- * Print one entry of /proc/net/wireless
+- * This is a clone of /proc/net/dev (just above)
+- */
+-static int sprintf_wireless_stats(char *buffer, struct net_device *dev)
+-{
+- /* Get stats from the driver */
+- struct iw_statistics *stats = (dev->get_wireless_stats ?
+- dev->get_wireless_stats(dev) :
+- (struct iw_statistics *) NULL);
+- int size;
+-
+- if (stats != (struct iw_statistics *) NULL) {
+- size = sprintf(buffer,
+- "%6s: %04x %3d%c %3d%c %3d%c %6d %6d %6d %6d %6d %6d\n",
+- dev->name,
+- stats->status,
+- stats->qual.qual,
+- stats->qual.updated & 1 ? '.' : ' ',
+- stats->qual.level,
+- stats->qual.updated & 2 ? '.' : ' ',
+- stats->qual.noise,
+- stats->qual.updated & 4 ? '.' : ' ',
+- stats->discard.nwid,
+- stats->discard.code,
+- stats->discard.fragment,
+- stats->discard.retries,
+- stats->discard.misc,
+- stats->miss.beacon);
+- stats->qual.updated = 0;
+- }
+- else
+- size = 0;
+-
+- return size;
+-}
+-
+-/*
+- * Print info for /proc/net/wireless (print all entries)
+- * This is a clone of /proc/net/dev (just above)
+- */
+-static int dev_get_wireless_info(char * buffer, char **start, off_t offset,
+- int length)
+-{
+- int len = 0;
+- off_t begin = 0;
+- off_t pos = 0;
+- int size;
+-
+- struct net_device * dev;
+-
+- size = sprintf(buffer,
+- "Inter-| sta-| Quality | Discarded packets | Missed\n"
+- " face | tus | link level noise | nwid crypt frag retry misc | beacon\n"
+- );
+-
+- pos += size;
+- len += size;
+-
+- read_lock(&dev_base_lock);
+- for (dev = dev_base; dev != NULL; dev = dev->next) {
+- size = sprintf_wireless_stats(buffer + len, dev);
+- len += size;
+- pos = begin + len;
+-
+- if (pos < offset) {
+- len = 0;
+- begin = pos;
+- }
+- if (pos > offset + length)
+- break;
+- }
+- read_unlock(&dev_base_lock);
+-
+- *start = buffer + (offset - begin); /* Start of wanted data */
+- len -= (offset - begin); /* Start slop */
+- if (len > length)
+- len = length; /* Ending slop */
+- if (len < 0)
+- len = 0;
+-
+- return len;
+-}
+-#endif /* CONFIG_PROC_FS */
+-
+-/*
+- * Allow programatic access to /proc/net/wireless even if /proc
+- * doesn't exist... Also more efficient...
+- */
+-static inline int dev_iwstats(struct net_device *dev, struct ifreq *ifr)
+-{
+- /* Get stats from the driver */
+- struct iw_statistics *stats = (dev->get_wireless_stats ?
+- dev->get_wireless_stats(dev) :
+- (struct iw_statistics *) NULL);
+-
+- if (stats != (struct iw_statistics *) NULL) {
+- struct iwreq * wrq = (struct iwreq *)ifr;
+-
+- /* Copy statistics to the user buffer */
+- if(copy_to_user(wrq->u.data.pointer, stats,
+- sizeof(struct iw_statistics)))
+- return -EFAULT;
+-
+- /* Check if we need to clear the update flag */
+- if(wrq->u.data.flags != 0)
+- stats->qual.updated = 0;
+- return(0);
+- } else
+- return -EOPNOTSUPP;
+-}
+-#endif /* WIRELESS_EXT */
+-
+ /**
+ * netdev_set_master - set up master/slave pair
+ * @slave: slave device
+@@ -2209,11 +2094,6 @@ static int dev_ifsioc(struct ifreq *ifr,
+ notifier_call_chain(&netdev_chain, NETDEV_CHANGENAME, dev);
+ return 0;
+
+-#ifdef WIRELESS_EXT
+- case SIOCGIWSTATS:
+- return dev_iwstats(dev, ifr);
+-#endif /* WIRELESS_EXT */
+-
+ /*
+ * Unknown or private ioctl
+ */
+@@ -2239,17 +2119,6 @@ static int dev_ifsioc(struct ifreq *ifr,
+ return -EOPNOTSUPP;
+ }
+
+-#ifdef WIRELESS_EXT
+- if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
+- if (dev->do_ioctl) {
+- if (!netif_device_present(dev))
+- return -ENODEV;
+- return dev->do_ioctl(dev, ifr, cmd);
+- }
+- return -EOPNOTSUPP;
+- }
+-#endif /* WIRELESS_EXT */
+-
+ }
+ return -EINVAL;
+ }
+@@ -2431,7 +2300,8 @@ int dev_ioctl(unsigned int cmd, void *ar
+ }
+ dev_load(ifr.ifr_name);
+ rtnl_lock();
+- ret = dev_ifsioc(&ifr, cmd);
++ /* Follow me in net/core/wireless.c */
++ ret = wireless_process_ioctl(&ifr, cmd);
+ rtnl_unlock();
+ if (!ret && IW_IS_GET(cmd) &&
+ copy_to_user(arg, &ifr, sizeof(struct ifreq)))
+@@ -2856,6 +2726,7 @@ int __init net_dev_init(void)
+ proc_net_create("dev", 0, dev_get_info);
+ create_proc_read_entry("net/softnet_stat", 0, 0, dev_proc_stats, NULL);
+ #ifdef WIRELESS_EXT
++ /* Available in net/core/wireless.c */
+ proc_net_create("wireless", 0, dev_get_wireless_info);
+ #endif /* WIRELESS_EXT */
+ #endif /* CONFIG_PROC_FS */
+diff -u -p -r --new-file linux/net/core-w12/wireless.c linux/net/core/wireless.c
+--- linux/net/core-w12/wireless.c Wed Dec 31 16:00:00 1969
++++ linux/net/core/wireless.c Mon Jan 21 11:13:23 2002
+@@ -0,0 +1,733 @@
++/*
++ * This file implement the Wireless Extensions APIs.
++ *
++ * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com>
++ * Copyright (c) 1997-2001 Jean Tourrilhes, All Rights Reserved.
++ *
++ * (As all part of the Linux kernel, this file is GPL)
++ */
++
++/************************** DOCUMENTATION **************************/
++/*
++ * API definition :
++ * --------------
++ * See <linux/wireless.h> for details of the APIs and the rest.
++ *
++ * History :
++ * -------
++ *
++ * v1 - 5.12.01 - Jean II
++ * o Created this file.
++ *
++ * v2 - 13.12.01 - Jean II
++ * o Move /proc/net/wireless stuff from net/core/dev.c to here
++ * o Make Wireless Extension IOCTLs go through here
++ * o Added iw_handler handling ;-)
++ * o Added standard ioctl description
++ * o Initial dumb commit strategy based on orinoco.c
++ */
++
++/***************************** INCLUDES *****************************/
++
++#include <asm/uaccess.h> /* copy_to_user() */
++#include <linux/config.h> /* Not needed ??? */
++#include <linux/types.h> /* off_t */
++#include <linux/netdevice.h> /* struct ifreq, dev_get_by_name() */
++
++#include <linux/wireless.h> /* Pretty obvious */
++#include <net/iw_handler.h> /* New driver API */
++
++/**************************** CONSTANTS ****************************/
++
++/* This will be turned on later on... */
++#undef WE_STRICT_WRITE /* Check write buffer size */
++
++/* Debuging stuff */
++#undef WE_IOCTL_DEBUG /* Debug IOCTL API */
++
++/************************* GLOBAL VARIABLES *************************/
++/*
++ * You should not use global variables, because or re-entrancy.
++ * On our case, it's only const, so it's OK...
++ */
++static const struct iw_ioctl_description standard_ioctl[] = {
++ /* SIOCSIWCOMMIT (internal) */
++ { IW_HEADER_TYPE_NULL, 0, 0, 0, 0, 0},
++ /* SIOCGIWNAME */
++ { IW_HEADER_TYPE_CHAR, 0, 0, 0, 0, IW_DESCR_FLAG_DUMP},
++ /* SIOCSIWNWID */
++ { IW_HEADER_TYPE_PARAM, 0, 0, 0, 0, IW_DESCR_FLAG_EVENT},
++ /* SIOCGIWNWID */
++ { IW_HEADER_TYPE_PARAM, 0, 0, 0, 0, IW_DESCR_FLAG_DUMP},
++ /* SIOCSIWFREQ */
++ { IW_HEADER_TYPE_FREQ, 0, 0, 0, 0, IW_DESCR_FLAG_EVENT},
++ /* SIOCGIWFREQ */
++ { IW_HEADER_TYPE_FREQ, 0, 0, 0, 0, IW_DESCR_FLAG_DUMP},
++ /* SIOCSIWMODE */
++ { IW_HEADER_TYPE_UINT, 0, 0, 0, 0, IW_DESCR_FLAG_EVENT},
++ /* SIOCGIWMODE */
++ { IW_HEADER_TYPE_UINT, 0, 0, 0, 0, IW_DESCR_FLAG_DUMP},
++ /* SIOCSIWSENS */
++ { IW_HEADER_TYPE_PARAM, 0, 0, 0, 0, 0},
++ /* SIOCGIWSENS */
++ { IW_HEADER_TYPE_PARAM, 0, 0, 0, 0, 0},
++ /* SIOCSIWRANGE */
++ { IW_HEADER_TYPE_NULL, 0, 0, 0, 0, 0},
++ /* SIOCGIWRANGE */
++ { IW_HEADER_TYPE_POINT, 0, 1, 0, sizeof(struct iw_range), IW_DESCR_FLAG_DUMP},
++ /* SIOCSIWPRIV */
++ { IW_HEADER_TYPE_NULL, 0, 0, 0, 0, 0},
++ /* SIOCGIWPRIV (handled directly by us) */
++ { IW_HEADER_TYPE_NULL, 0, 0, 0, 0, 0},
++ /* SIOCSIWSTATS */
++ { IW_HEADER_TYPE_NULL, 0, 0, 0, 0, 0},
++ /* SIOCGIWSTATS (handled directly by us) */
++ { IW_HEADER_TYPE_NULL, 0, 0, 0, 0, IW_DESCR_FLAG_DUMP},
++ /* SIOCSIWSPY */
++ { IW_HEADER_TYPE_POINT, 0, sizeof(struct sockaddr), 0, IW_MAX_SPY, 0},
++ /* SIOCGIWSPY */
++ { IW_HEADER_TYPE_POINT, 0, (sizeof(struct sockaddr) + sizeof(struct iw_quality)), 0, IW_MAX_SPY, 0},
++ /* -- hole -- */
++ { IW_HEADER_TYPE_NULL, 0, 0, 0, 0, 0},
++ /* -- hole -- */
++ { IW_HEADER_TYPE_NULL, 0, 0, 0, 0, 0},
++ /* SIOCSIWAP */
++ { IW_HEADER_TYPE_ADDR, 0, 0, 0, 0, 0},
++ /* SIOCGIWAP */
++ { IW_HEADER_TYPE_ADDR, 0, 0, 0, 0, IW_DESCR_FLAG_DUMP},
++ /* -- hole -- */
++ { IW_HEADER_TYPE_NULL, 0, 0, 0, 0, 0},
++ /* SIOCGIWAPLIST */
++ { IW_HEADER_TYPE_POINT, 0, (sizeof(struct sockaddr) + sizeof(struct iw_quality)), 0, IW_MAX_AP, 0},
++ /* -- hole -- */
++ { IW_HEADER_TYPE_NULL, 0, 0, 0, 0, 0},
++ /* -- hole -- */
++ { IW_HEADER_TYPE_NULL, 0, 0, 0, 0, 0},
++ /* SIOCSIWESSID */
++ { IW_HEADER_TYPE_POINT, 0, 1, 0, IW_ESSID_MAX_SIZE, IW_DESCR_FLAG_EVENT},
++ /* SIOCGIWESSID */
++ { IW_HEADER_TYPE_POINT, 0, 1, 0, IW_ESSID_MAX_SIZE, IW_DESCR_FLAG_DUMP},
++ /* SIOCSIWNICKN */
++ { IW_HEADER_TYPE_POINT, 0, 1, 0, IW_ESSID_MAX_SIZE, 0},
++ /* SIOCGIWNICKN */
++ { IW_HEADER_TYPE_POINT, 0, 1, 0, IW_ESSID_MAX_SIZE, 0},
++ /* -- hole -- */
++ { IW_HEADER_TYPE_NULL, 0, 0, 0, 0, 0},
++ /* -- hole -- */
++ { IW_HEADER_TYPE_NULL, 0, 0, 0, 0, 0},
++ /* SIOCSIWRATE */
++ { IW_HEADER_TYPE_PARAM, 0, 0, 0, 0, 0},
++ /* SIOCGIWRATE */
++ { IW_HEADER_TYPE_PARAM, 0, 0, 0, 0, 0},
++ /* SIOCSIWRTS */
++ { IW_HEADER_TYPE_PARAM, 0, 0, 0, 0, 0},
++ /* SIOCGIWRTS */
++ { IW_HEADER_TYPE_PARAM, 0, 0, 0, 0, 0},
++ /* SIOCSIWFRAG */
++ { IW_HEADER_TYPE_PARAM, 0, 0, 0, 0, 0},
++ /* SIOCGIWFRAG */
++ { IW_HEADER_TYPE_PARAM, 0, 0, 0, 0, 0},
++ /* SIOCSIWTXPOW */
++ { IW_HEADER_TYPE_PARAM, 0, 0, 0, 0, 0},
++ /* SIOCGIWTXPOW */
++ { IW_HEADER_TYPE_PARAM, 0, 0, 0, 0, 0},
++ /* SIOCSIWRETRY */
++ { IW_HEADER_TYPE_PARAM, 0, 0, 0, 0, 0},
++ /* SIOCGIWRETRY */
++ { IW_HEADER_TYPE_PARAM, 0, 0, 0, 0, 0},
++ /* SIOCSIWENCODE */
++ { IW_HEADER_TYPE_POINT, 4, 1, 0, IW_ENCODING_TOKEN_MAX, IW_DESCR_FLAG_EVENT | IW_DESCR_FLAG_RESTRICT},
++ /* SIOCGIWENCODE */
++ { IW_HEADER_TYPE_POINT, 0, 1, 0, IW_ENCODING_TOKEN_MAX, IW_DESCR_FLAG_DUMP | IW_DESCR_FLAG_RESTRICT},
++ /* SIOCSIWPOWER */
++ { IW_HEADER_TYPE_PARAM, 0, 0, 0, 0, 0},
++ /* SIOCGIWPOWER */
++ { IW_HEADER_TYPE_PARAM, 0, 0, 0, 0, 0},
++};
++
++/* Size (in bytes) of the various private data types */
++char priv_type_size[] = { 0, 1, 1, 0, 4, 4, 0, 0 };
++
++/************************ COMMON SUBROUTINES ************************/
++/*
++ * Stuff that may be used in various place or doesn't fit in one
++ * of the section below.
++ */
++
++/* ---------------------------------------------------------------- */
++/*
++ * Return the driver handler associated with a specific Wireless Extension.
++ * Called from various place, so make sure it remains efficient.
++ */
++static inline iw_handler get_handler(struct net_device *dev,
++ unsigned int cmd)
++{
++ unsigned int index; /* MUST be unsigned */
++
++ /* Check if we have some wireless handlers defined */
++ if(dev->wireless_handlers == NULL)
++ return NULL;
++
++ /* Try as a standard command */
++ index = cmd - SIOCIWFIRST;
++ if(index < dev->wireless_handlers->num_standard)
++ return dev->wireless_handlers->standard[index];
++
++ /* Try as a private command */
++ index = cmd - SIOCIWFIRSTPRIV;
++ if(index < dev->wireless_handlers->num_private)
++ return dev->wireless_handlers->private[index];
++
++ /* Not found */
++ return NULL;
++}
++
++/* ---------------------------------------------------------------- */
++/*
++ * Get statistics out of the driver
++ */
++static inline struct iw_statistics *get_wireless_stats(struct net_device *dev)
++{
++ return (dev->get_wireless_stats ?
++ dev->get_wireless_stats(dev) :
++ (struct iw_statistics *) NULL);
++ /* In the future, get_wireless_stats may move from 'struct net_device'
++ * to 'struct iw_handler_def', to de-bloat struct net_device.
++ * Definitely worse a thought... */
++}
++
++/* ---------------------------------------------------------------- */
++/*
++ * Call the commit handler in the driver
++ * (if exist and if conditions are right)
++ *
++ * Note : our current commit strategy is currently pretty dumb,
++ * but we will be able to improve on that...
++ * The goal is to try to agreagate as many changes as possible
++ * before doing the commit. Drivers that will define a commit handler
++ * are usually those that need a reset after changing parameters, so
++ * we want to minimise the number of reset.
++ * A cool idea is to use a timer : at each "set" command, we re-set the
++ * timer, when the timer eventually fires, we call the driver.
++ * Hopefully, more on that later.
++ *
++ * Also, I'm waiting to see how many people will complain about the
++ * netif_running(dev) test. I'm open on that one...
++ * Hopefully, the driver will remember to do a commit in "open()" ;-)
++ */
++static inline int call_commit_handler(struct net_device * dev)
++{
++ if((netif_running(dev)) &&
++ (dev->wireless_handlers->standard[0] != NULL)) {
++ /* Call the commit handler on the driver */
++ return dev->wireless_handlers->standard[0](dev, NULL,
++ NULL, NULL);
++ } else
++ return 0; /* Command completed successfully */
++}
++
++/* ---------------------------------------------------------------- */
++/*
++ * Number of private arguments
++ */
++static inline int get_priv_size(__u16 args)
++{
++ int num = args & IW_PRIV_SIZE_MASK;
++ int type = (args & IW_PRIV_TYPE_MASK) >> 12;
++
++ return num * priv_type_size[type];
++}
++
++
++/******************** /proc/net/wireless SUPPORT ********************/
++/*
++ * The /proc/net/wireless file is a human readable user-space interface
++ * exporting various wireless specific statistics from the wireless devices.
++ * This is the most popular part of the Wireless Extensions ;-)
++ *
++ * This interface is a pure clone of /proc/net/dev (in net/core/dev.c).
++ * The content of the file is basically the content of "struct iw_statistics".
++ */
++
++#ifdef CONFIG_PROC_FS
++
++/* ---------------------------------------------------------------- */
++/*
++ * Print one entry (line) of /proc/net/wireless
++ */
++static inline int sprintf_wireless_stats(char *buffer, struct net_device *dev)
++{
++ /* Get stats from the driver */
++ struct iw_statistics *stats;
++ int size;
++
++ stats = get_wireless_stats(dev);
++ if (stats != (struct iw_statistics *) NULL) {
++ size = sprintf(buffer,
++ "%6s: %04x %3d%c %3d%c %3d%c %6d %6d %6d %6d %6d %6d\n",
++ dev->name,
++ stats->status,
++ stats->qual.qual,
++ stats->qual.updated & 1 ? '.' : ' ',
++ stats->qual.level,
++ stats->qual.updated & 2 ? '.' : ' ',
++ stats->qual.noise,
++ stats->qual.updated & 4 ? '.' : ' ',
++ stats->discard.nwid,
++ stats->discard.code,
++ stats->discard.fragment,
++ stats->discard.retries,
++ stats->discard.misc,
++ stats->miss.beacon);
++ stats->qual.updated = 0;
++ }
++ else
++ size = 0;
++
++ return size;
++}
++
++/* ---------------------------------------------------------------- */
++/*
++ * Print info for /proc/net/wireless (print all entries)
++ */
++int dev_get_wireless_info(char * buffer, char **start, off_t offset,
++ int length)
++{
++ int len = 0;
++ off_t begin = 0;
++ off_t pos = 0;
++ int size;
++
++ struct net_device * dev;
++
++ size = sprintf(buffer,
++ "Inter-| sta-| Quality | Discarded packets | Missed\n"
++ " face | tus | link level noise | nwid crypt frag retry misc | beacon\n"
++ );
++
++ pos += size;
++ len += size;
++
++ read_lock(&dev_base_lock);
++ for (dev = dev_base; dev != NULL; dev = dev->next) {
++ size = sprintf_wireless_stats(buffer + len, dev);
++ len += size;
++ pos = begin + len;
++
++ if (pos < offset) {
++ len = 0;
++ begin = pos;
++ }
++ if (pos > offset + length)
++ break;
++ }
++ read_unlock(&dev_base_lock);
++
++ *start = buffer + (offset - begin); /* Start of wanted data */
++ len -= (offset - begin); /* Start slop */
++ if (len > length)
++ len = length; /* Ending slop */
++ if (len < 0)
++ len = 0;
++
++ return len;
++}
++#endif /* CONFIG_PROC_FS */
++
++/************************** IOCTL SUPPORT **************************/
++/*
++ * The original user space API to configure all those Wireless Extensions
++ * is through IOCTLs.
++ * In there, we check if we need to call the new driver API (iw_handler)
++ * or just call the driver ioctl handler.
++ */
++
++/* ---------------------------------------------------------------- */
++/*
++ * Allow programatic access to /proc/net/wireless even if /proc
++ * doesn't exist... Also more efficient...
++ */
++static inline int dev_iwstats(struct net_device *dev, struct ifreq *ifr)
++{
++ /* Get stats from the driver */
++ struct iw_statistics *stats;
++
++ stats = get_wireless_stats(dev);
++ if (stats != (struct iw_statistics *) NULL) {
++ struct iwreq * wrq = (struct iwreq *)ifr;
++
++ /* Copy statistics to the user buffer */
++ if(copy_to_user(wrq->u.data.pointer, stats,
++ sizeof(struct iw_statistics)))
++ return -EFAULT;
++
++ /* Check if we need to clear the update flag */
++ if(wrq->u.data.flags != 0)
++ stats->qual.updated = 0;
++ return 0;
++ } else
++ return -EOPNOTSUPP;
++}
++
++/* ---------------------------------------------------------------- */
++/*
++ * Export the driver private handler definition
++ * They will be picked up by tools like iwpriv...
++ */
++static inline int ioctl_export_private(struct net_device * dev,
++ struct ifreq * ifr)
++{
++ struct iwreq * iwr = (struct iwreq *) ifr;
++
++ /* Check if the driver has something to export */
++ if((dev->wireless_handlers->num_private_args == 0) ||
++ (dev->wireless_handlers->private_args == NULL))
++ return -EOPNOTSUPP;
++
++ /* Check NULL pointer */
++ if(iwr->u.data.pointer == NULL)
++ return -EFAULT;
++#ifdef WE_STRICT_WRITE
++ /* Check if there is enough buffer up there */
++ if(iwr->u.data.length < (SIOCIWLASTPRIV - SIOCIWFIRSTPRIV + 1))
++ return -E2BIG;
++#endif /* WE_STRICT_WRITE */
++
++ /* Set the number of available ioctls. */
++ iwr->u.data.length = dev->wireless_handlers->num_private_args;
++
++ /* Copy structure to the user buffer. */
++ if (copy_to_user(iwr->u.data.pointer,
++ dev->wireless_handlers->private_args,
++ sizeof(struct iw_priv_args) * iwr->u.data.length))
++ return -EFAULT;
++
++ return 0;
++}
++
++/* ---------------------------------------------------------------- */
++/*
++ * Wrapper to call a standard Wireless Extension handler.
++ * We do various checks and also take care of moving data between
++ * user space and kernel space.
++ */
++static inline int ioctl_standard_call(struct net_device * dev,
++ struct ifreq * ifr,
++ unsigned int cmd,
++ iw_handler handler)
++{
++ struct iwreq * iwr = (struct iwreq *) ifr;
++ const struct iw_ioctl_description * descr;
++ struct iw_request_info info;
++ int ret = -EINVAL;
++
++ /* Get the description of the IOCTL */
++ descr = &(standard_ioctl[cmd - SIOCIWFIRST]);
++
++#ifdef WE_IOCTL_DEBUG
++ printk(KERN_DEBUG "%s : Found standard handler for 0x%04X\n",
++ ifr->ifr_name, cmd);
++ printk(KERN_DEBUG "Header type : %d, token type : %d, token_size : %d, max_token : %d\n", descr->header_type, descr->token_type, descr->token_size, descr->max_tokens);
++#endif /* WE_IOCTL_DEBUG */
++
++ /* Prepare the call */
++ info.cmd = cmd;
++ info.flags = 0;
++
++ /* Check if we have a pointer to user space data or not */
++ if(descr->header_type != IW_HEADER_TYPE_POINT) {
++ /* No extra arguments. Trivial to handle */
++ ret = handler(dev, &info, &(iwr->u), NULL);
++ } else {
++ char * extra;
++ int err;
++
++ /* Check what user space is giving us */
++ if(IW_IS_SET(cmd)) {
++ /* Check NULL pointer */
++ if((iwr->u.data.pointer == NULL) &&
++ (iwr->u.data.length != 0))
++ return -EFAULT;
++ /* Check if number of token fits within bounds */
++ if(iwr->u.data.length > descr->max_tokens)
++ return -E2BIG;
++ if(iwr->u.data.length < descr->min_tokens)
++ return -EINVAL;
++ } else {
++ /* Check NULL pointer */
++ if(iwr->u.data.pointer == NULL)
++ return -EFAULT;
++#ifdef WE_STRICT_WRITE
++ /* Check if there is enough buffer up there */
++ if(iwr->u.data.length < descr->max_tokens)
++ return -E2BIG;
++#endif /* WE_STRICT_WRITE */
++ }
++
++#ifdef WE_IOCTL_DEBUG
++ printk(KERN_DEBUG "Malloc %d bytes\n",
++ descr->max_tokens * descr->token_size);
++#endif /* WE_IOCTL_DEBUG */
++
++ /* Always allocate for max space. Easier, and won't last
++ * long... */
++ extra = kmalloc(descr->max_tokens * descr->token_size,
++ GFP_KERNEL);
++ if (extra == NULL) {
++ return -ENOMEM;
++ }
++
++ /* If it is a SET, get all the extra data in here */
++ if(IW_IS_SET(cmd) && (iwr->u.data.length != 0)) {
++ err = copy_from_user(extra, iwr->u.data.pointer,
++ iwr->u.data.length *
++ descr->token_size);
++ if (err) {
++ kfree(extra);
++ return -EFAULT;
++ }
++#ifdef WE_IOCTL_DEBUG
++ printk(KERN_DEBUG "Got %d bytes\n",
++ iwr->u.data.length * descr->token_size);
++#endif /* WE_IOCTL_DEBUG */
++ }
++
++ /* Call the handler */
++ ret = handler(dev, &info, &(iwr->u), extra);
++
++ /* If we have something to return to the user */
++ if (!ret && IW_IS_GET(cmd)) {
++ err = copy_to_user(iwr->u.data.pointer, extra,
++ iwr->u.data.length *
++ descr->token_size);
++ if (err)
++ ret = -EFAULT;
++#ifdef WE_IOCTL_DEBUG
++ printk(KERN_DEBUG "Wrote %d bytes\n",
++ iwr->u.data.length * descr->token_size);
++#endif /* WE_IOCTL_DEBUG */
++ }
++
++ /* Cleanup - I told you it wasn't that long ;-) */
++ kfree(extra);
++ }
++
++ /* Call commit handler if needed and defined */
++ if(ret == -EIWCOMMIT)
++ ret = call_commit_handler(dev);
++
++ /* Here, we will generate the appropriate event if needed */
++
++ return ret;
++}
++
++/* ---------------------------------------------------------------- */
++/*
++ * Wrapper to call a private Wireless Extension handler.
++ * We do various checks and also take care of moving data between
++ * user space and kernel space.
++ * It's not as nice and slimline as the standard wrapper. The cause
++ * is struct iw_priv_args, which was not really designed for the
++ * job we are going here.
++ *
++ * IMPORTANT : This function prevent to set and get data on the same
++ * IOCTL and enforce the SET/GET convention. Not doing it would be
++ * far too hairy...
++ * If you need to set and get data at the same time, please don't use
++ * a iw_handler but process it in your ioctl handler (i.e. use the
++ * old driver API).
++ */
++static inline int ioctl_private_call(struct net_device * dev,
++ struct ifreq * ifr,
++ unsigned int cmd,
++ iw_handler handler)
++{
++ struct iwreq * iwr = (struct iwreq *) ifr;
++ struct iw_priv_args * descr = NULL;
++ struct iw_request_info info;
++ int extra_size = 0;
++ int i;
++ int ret = -EINVAL;
++
++ /* Get the description of the IOCTL */
++ for(i = 0; i < dev->wireless_handlers->num_private_args; i++)
++ if(cmd == dev->wireless_handlers->private_args[i].cmd) {
++ descr = &(dev->wireless_handlers->private_args[i]);
++ break;
++ }
++
++#ifdef WE_IOCTL_DEBUG
++ printk(KERN_DEBUG "%s : Found private handler for 0x%04X\n",
++ ifr->ifr_name, cmd);
++ if(descr) {
++ printk(KERN_DEBUG "Name %s, set %X, get %X\n",
++ descr->name, descr->set_args, descr->get_args);
++ }
++#endif /* WE_IOCTL_DEBUG */
++
++ /* Compute the size of the set/get arguments */
++ if(descr != NULL) {
++ if(IW_IS_SET(cmd)) {
++ /* Size of set arguments */
++ extra_size = get_priv_size(descr->set_args);
++
++ /* Does it fits in iwr ? */
++ if((descr->set_args & IW_PRIV_SIZE_FIXED) &&
++ (extra_size < IFNAMSIZ))
++ extra_size = 0;
++ } else {
++ /* Size of set arguments */
++ extra_size = get_priv_size(descr->get_args);
++
++ /* Does it fits in iwr ? */
++ if((descr->get_args & IW_PRIV_SIZE_FIXED) &&
++ (extra_size < IFNAMSIZ))
++ extra_size = 0;
++ }
++ }
++
++ /* Prepare the call */
++ info.cmd = cmd;
++ info.flags = 0;
++
++ /* Check if we have a pointer to user space data or not. */
++ if(extra_size == 0) {
++ /* No extra arguments. Trivial to handle */
++ ret = handler(dev, &info, &(iwr->u), (char *) &(iwr->u));
++ } else {
++ char * extra;
++ int err;
++
++ /* Check what user space is giving us */
++ if(IW_IS_SET(cmd)) {
++ /* Check NULL pointer */
++ if((iwr->u.data.pointer == NULL) &&
++ (iwr->u.data.length != 0))
++ return -EFAULT;
++
++ /* Does it fits within bounds ? */
++ if(iwr->u.data.length > (descr->set_args &
++ IW_PRIV_SIZE_MASK))
++ return -E2BIG;
++ } else {
++ /* Check NULL pointer */
++ if(iwr->u.data.pointer == NULL)
++ return -EFAULT;
++ }
++
++#ifdef WE_IOCTL_DEBUG
++ printk(KERN_DEBUG "Malloc %d bytes\n", extra_size);
++#endif /* WE_IOCTL_DEBUG */
++
++ /* Always allocate for max space. Easier, and won't last
++ * long... */
++ extra = kmalloc(extra_size, GFP_KERNEL);
++ if (extra == NULL) {
++ return -ENOMEM;
++ }
++
++ /* If it is a SET, get all the extra data in here */
++ if(IW_IS_SET(cmd) && (iwr->u.data.length != 0)) {
++ err = copy_from_user(extra, iwr->u.data.pointer,
++ extra_size);
++ if (err) {
++ kfree(extra);
++ return -EFAULT;
++ }
++#ifdef WE_IOCTL_DEBUG
++ printk(KERN_DEBUG "Got %d elem\n", iwr->u.data.length);
++#endif /* WE_IOCTL_DEBUG */
++ }
++
++ /* Call the handler */
++ ret = handler(dev, &info, &(iwr->u), extra);
++
++ /* If we have something to return to the user */
++ if (!ret && IW_IS_GET(cmd)) {
++ err = copy_to_user(iwr->u.data.pointer, extra,
++ extra_size);
++ if (err)
++ ret = -EFAULT;
++#ifdef WE_IOCTL_DEBUG
++ printk(KERN_DEBUG "Wrote %d elem\n",
++ iwr->u.data.length);
++#endif /* WE_IOCTL_DEBUG */
++ }
++
++ /* Cleanup - I told you it wasn't that long ;-) */
++ kfree(extra);
++ }
++
++
++ /* Call commit handler if needed and defined */
++ if(ret == -EIWCOMMIT)
++ ret = call_commit_handler(dev);
++
++ return ret;
++}
++
++/* ---------------------------------------------------------------- */
++/*
++ * Main IOCTl dispatcher. Called from the main networking code
++ * (dev_ioctl() in net/core/dev.c).
++ * Check the type of IOCTL and call the appropriate wrapper...
++ */
++int wireless_process_ioctl(struct ifreq *ifr, unsigned int cmd)
++{
++ struct net_device *dev;
++ iw_handler handler;
++
++ /* Permissions are already checked in dev_ioctl() before calling us.
++ * The copy_to/from_user() of ifr is also dealt with in there */
++
++ /* Make sure the device exist */
++ if ((dev = __dev_get_by_name(ifr->ifr_name)) == NULL)
++ return -ENODEV;
++
++ /* A bunch of special cases, then the generic case...
++ * Note that 'cmd' is already filtered in dev_ioctl() with
++ * (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) */
++ switch(cmd)
++ {
++ case SIOCGIWSTATS:
++ /* Get Wireless Stats */
++ return dev_iwstats(dev, ifr);
++
++ case SIOCGIWPRIV:
++ /* Check if we have some wireless handlers defined */
++ if(dev->wireless_handlers != NULL) {
++ /* We export to user space the definition of
++ * the private handler ourselves */
++ return ioctl_export_private(dev, ifr);
++ }
++ // ## Fall-through for old API ##
++ default:
++ /* Generic IOCTL */
++ /* Basic check */
++ if (!netif_device_present(dev))
++ return -ENODEV;
++ /* New driver API : try to find the handler */
++ handler = get_handler(dev, cmd);
++ if(handler != NULL) {
++ /* Standard and private are not the same */
++ if(cmd < SIOCIWFIRSTPRIV)
++ return ioctl_standard_call(dev,
++ ifr,
++ cmd,
++ handler);
++ else
++ return ioctl_private_call(dev,
++ ifr,
++ cmd,
++ handler);
++ }
++ /* Old driver API : call driver ioctl handler */
++ if (dev->do_ioctl) {
++ return dev->do_ioctl(dev, ifr, cmd);
++ }
++ return -EOPNOTSUPP;
++ }
++ /* Not reached */
++ return -EINVAL;
++}
diff --git a/packages/linux/files/iw_handlers.w14-5.diff b/packages/linux/files/iw_handlers.w14-5.diff
index e69de29bb2..539b160068 100644
--- a/packages/linux/files/iw_handlers.w14-5.diff
+++ b/packages/linux/files/iw_handlers.w14-5.diff
@@ -0,0 +1,838 @@
+diff -u -p -r --new-file linux/include/linux-w13/rtnetlink.h linux/include/linux/rtnetlink.h
+--- linux/include/linux-w13/rtnetlink.h Thu Jun 6 14:44:08 2002
++++ linux/include/linux/rtnetlink.h Thu Jun 6 15:47:44 2002
+@@ -440,12 +440,14 @@ enum
+ #define IFLA_COST IFLA_COST
+ IFLA_PRIORITY,
+ #define IFLA_PRIORITY IFLA_PRIORITY
+- IFLA_MASTER
++ IFLA_MASTER,
+ #define IFLA_MASTER IFLA_MASTER
++ IFLA_WIRELESS, /* Wireless Extension event - see wireless.h */
++#define IFLA_WIRELESS IFLA_WIRELESS
+ };
+
+
+-#define IFLA_MAX IFLA_MASTER
++#define IFLA_MAX IFLA_WIRELESS
+
+ #define IFLA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct ifinfomsg))))
+ #define IFLA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ifinfomsg))
+diff -u -p -r --new-file linux/include/linux-w13/wireless.h linux/include/linux/wireless.h
+--- linux/include/linux-w13/wireless.h Thu Jun 6 15:00:28 2002
++++ linux/include/linux/wireless.h Thu Jun 6 15:47:44 2002
+@@ -1,10 +1,10 @@
+ /*
+ * This file define a set of standard wireless extensions
+ *
+- * Version : 13 6.12.01
++ * Version : 14 25.1.02
+ *
+ * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com>
+- * Copyright (c) 1997-2001 Jean Tourrilhes, All Rights Reserved.
++ * Copyright (c) 1997-2002 Jean Tourrilhes, All Rights Reserved.
+ */
+
+ #ifndef _LINUX_WIRELESS_H
+@@ -40,7 +40,7 @@
+ * # include/linux/netdevice.h (one place)
+ * # include/linux/proc_fs.h (one place)
+ *
+- * New driver API (2001 -> onward) :
++ * New driver API (2002 -> onward) :
+ * -------------------------------
+ * This file is only concerned with the user space API and common definitions.
+ * The new driver API is defined and documented in :
+@@ -49,6 +49,11 @@
+ * Note as well that /proc/net/wireless implementation has now moved in :
+ * # include/linux/wireless.c
+ *
++ * Wireless Events (2002 -> onward) :
++ * --------------------------------
++ * Events are defined at the end of this file, and implemented in :
++ * # include/linux/wireless.c
++ *
+ * Other comments :
+ * --------------
+ * Do not add here things that are redundant with other mechanisms
+@@ -75,7 +80,7 @@
+ * (there is some stuff that will be added in the future...)
+ * I just plan to increment with each new version.
+ */
+-#define WIRELESS_EXT 13
++#define WIRELESS_EXT 14
+
+ /*
+ * Changes :
+@@ -141,6 +146,13 @@
+ * - Document creation of new driver API.
+ * - Extract union iwreq_data from struct iwreq (for new driver API).
+ * - Rename SIOCSIWNAME as SIOCSIWCOMMIT
++ *
++ * V13 to V14
++ * ----------
++ * - Wireless Events support : define struct iw_event
++ * - Define additional specific event numbers
++ * - Add "addr" and "param" fields in union iwreq_data
++ * - AP scanning stuff (SIOCSIWSCAN and friends)
+ */
+
+ /**************************** CONSTANTS ****************************/
+@@ -175,6 +187,8 @@
+ #define SIOCSIWAP 0x8B14 /* set access point MAC addresses */
+ #define SIOCGIWAP 0x8B15 /* get access point MAC addresses */
+ #define SIOCGIWAPLIST 0x8B17 /* get list of access point in range */
++#define SIOCSIWSCAN 0x8B18 /* trigger scanning */
++#define SIOCGIWSCAN 0x8B19 /* get scanning results */
+
+ /* 802.11 specific support */
+ #define SIOCSIWESSID 0x8B1A /* set ESSID (network name) */
+@@ -238,6 +252,15 @@
+ #define IW_IS_SET(cmd) (!((cmd) & 0x1))
+ #define IW_IS_GET(cmd) ((cmd) & 0x1)
+
++/* ----------------------- WIRELESS EVENTS ----------------------- */
++/* Those are *NOT* ioctls, do not issue request on them !!! */
++/* Most events use the same identifier as ioctl requests */
++
++#define IWEVTXDROP 0x8C00 /* Packet dropped to excessive retry */
++#define IWEVQUAL 0x8C01 /* Quality part of statistics */
++
++#define IWEVFIRST 0x8C00
++
+ /* ------------------------- PRIVATE INFO ------------------------- */
+ /*
+ * The following is used with SIOCGIWPRIV. It allow a driver to define
+@@ -340,6 +363,19 @@
+ #define IW_RETRY_MAX 0x0002 /* Value is a maximum */
+ #define IW_RETRY_RELATIVE 0x0004 /* Value is not in seconds/ms/us */
+
++/* Scanning request flags */
++#define IW_SCAN_DEFAULT 0x0000 /* Default scan of the driver */
++#define IW_SCAN_ALL_ESSID 0x0001 /* Scan all ESSIDs */
++#define IW_SCAN_THIS_ESSID 0x0002 /* Scan only this ESSID */
++#define IW_SCAN_ALL_FREQ 0x0004 /* Scan all Frequencies */
++#define IW_SCAN_THIS_FREQ 0x0008 /* Scan only this Frequency */
++#define IW_SCAN_ALL_MODE 0x0010 /* Scan all Modes */
++#define IW_SCAN_THIS_MODE 0x0020 /* Scan only this Mode */
++#define IW_SCAN_ALL_RATE 0x0040 /* Scan all Bit-Rates */
++#define IW_SCAN_THIS_RATE 0x0080 /* Scan only this Bit-Rate */
++/* Maximum size of returned data */
++#define IW_SCAN_MAX_DATA 4096 /* In bytes */
++
+ /****************************** TYPES ******************************/
+
+ /* --------------------------- SUBTYPES --------------------------- */
+@@ -466,9 +502,12 @@ union iwreq_data
+
+ struct iw_point encoding; /* Encoding stuff : tokens */
+ struct iw_param power; /* PM duration/timeout */
++ struct iw_quality qual; /* Quality part of statistics */
+
+ struct sockaddr ap_addr; /* Access point address */
++ struct sockaddr addr; /* Destination address (hw) */
+
++ struct iw_param param; /* Other small parameters */
+ struct iw_point data; /* Other large parameters */
+ };
+
+@@ -595,5 +634,36 @@ struct iw_priv_args
+ __u16 get_args; /* Type and number of args */
+ char name[IFNAMSIZ]; /* Name of the extension */
+ };
++
++/* ----------------------- WIRELESS EVENTS ----------------------- */
++/*
++ * Wireless events are carried through the rtnetlink socket to user
++ * space. They are encapsulated in the IFLA_WIRELESS field of
++ * a RTM_NEWLINK message.
++ */
++
++/*
++ * A Wireless Event. Contains basically the same data as the ioctl...
++ */
++struct iw_event
++{
++ __u16 len; /* Real lenght of this stuff */
++ __u16 cmd; /* Wireless IOCTL */
++ union iwreq_data u; /* IOCTL fixed payload */
++};
++
++/* Size of the Event prefix (including padding and alignement junk) */
++#define IW_EV_LCP_LEN (sizeof(struct iw_event) - sizeof(union iwreq_data))
++/* Size of the various events */
++#define IW_EV_CHAR_LEN (IW_EV_LCP_LEN + IFNAMSIZ)
++#define IW_EV_UINT_LEN (IW_EV_LCP_LEN + sizeof(__u32))
++#define IW_EV_FREQ_LEN (IW_EV_LCP_LEN + sizeof(struct iw_freq))
++#define IW_EV_POINT_LEN (IW_EV_LCP_LEN + sizeof(struct iw_point))
++#define IW_EV_PARAM_LEN (IW_EV_LCP_LEN + sizeof(struct iw_param))
++#define IW_EV_ADDR_LEN (IW_EV_LCP_LEN + sizeof(struct sockaddr))
++#define IW_EV_QUAL_LEN (IW_EV_LCP_LEN + sizeof(struct iw_quality))
++
++/* Note : in the case of iw_point, the extra data will come at the
++ * end of the event */
+
+ #endif /* _LINUX_WIRELESS_H */
+diff -u -p -r --new-file linux/include/net-w13/iw_handler.h linux/include/net/iw_handler.h
+--- linux/include/net-w13/iw_handler.h Thu Jun 6 15:06:16 2002
++++ linux/include/net/iw_handler.h Thu Jun 6 15:48:06 2002
+@@ -1,10 +1,10 @@
+ /*
+ * This file define the new driver API for Wireless Extensions
+ *
+- * Version : 2 6.12.01
++ * Version : 3 17.1.02
+ *
+ * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com>
+- * Copyright (c) 2001 Jean Tourrilhes, All Rights Reserved.
++ * Copyright (c) 2001-2002 Jean Tourrilhes, All Rights Reserved.
+ */
+
+ #ifndef _IW_HANDLER_H
+@@ -33,7 +33,7 @@
+ * o The user space interface is tied to ioctl because of the use
+ * copy_to/from_user.
+ *
+- * New driver API (2001 -> onward) :
++ * New driver API (2002 -> onward) :
+ * -------------------------------
+ * The new driver API is just a bunch of standard functions (handlers),
+ * each handling a specific Wireless Extension. The driver just export
+@@ -206,7 +206,18 @@
+ * will be needed...
+ * I just plan to increment with each new version.
+ */
+-#define IW_HANDLER_VERSION 2
++#define IW_HANDLER_VERSION 3
++
++/*
++ * Changes :
++ *
++ * V2 to V3
++ * --------
++ * - Move event definition in <linux/wireless.h>
++ * - Add Wireless Event support :
++ * o wireless_send_event() prototype
++ * o iwe_stream_add_event/point() inline functions
++ */
+
+ /**************************** CONSTANTS ****************************/
+
+@@ -225,6 +236,7 @@
+ #define IW_HEADER_TYPE_POINT 6 /* struct iw_point */
+ #define IW_HEADER_TYPE_PARAM 7 /* struct iw_param */
+ #define IW_HEADER_TYPE_ADDR 8 /* struct sockaddr */
++#define IW_HEADER_TYPE_QUAL 9 /* struct iw_quality */
+
+ /* Handling flags */
+ /* Most are not implemented. I just use them as a reminder of some
+@@ -233,7 +245,8 @@
+ /* Wrapper level flags */
+ #define IW_DESCR_FLAG_DUMP 0x0001 /* Not part of the dump command */
+ #define IW_DESCR_FLAG_EVENT 0x0002 /* Generate an event on SET */
+-#define IW_DESCR_FLAG_RESTRICT 0x0004 /* GET request is ROOT only */
++#define IW_DESCR_FLAG_RESTRICT 0x0004 /* GET : request is ROOT only */
++ /* SET : Omit payload from generated iwevent */
+ /* Driver level flags */
+ #define IW_DESCR_FLAG_WAIT 0x0100 /* Wait for driver event */
+
+@@ -303,25 +316,6 @@ struct iw_handler_def
+ * 'struct net_device' to here, to minimise bloat. */
+ };
+
+-/* ----------------------- WIRELESS EVENTS ----------------------- */
+-/*
+- * Currently we don't support events, so let's just plan for the
+- * future...
+- */
+-
+-/*
+- * A Wireless Event.
+- */
+-// How do we define short header ? We don't want a flag on length.
+-// Probably a flag on event ? Highest bit to zero...
+-struct iw_event
+-{
+- __u16 length; /* Lenght of this stuff */
+- __u16 event; /* Wireless IOCTL */
+- union iwreq_data header; /* IOCTL fixed payload */
+- char extra[0]; /* Optional IOCTL data */
+-};
+-
+ /* ---------------------- IOCTL DESCRIPTION ---------------------- */
+ /*
+ * One of the main goal of the new interface is to deal entirely with
+@@ -369,6 +363,88 @@ extern int dev_get_wireless_info(char *
+ extern int wireless_process_ioctl(struct ifreq *ifr, unsigned int cmd);
+
+ /* Second : functions that may be called by driver modules */
+-/* None yet */
+
+-#endif /* _LINUX_WIRELESS_H */
++/* Send a single event to user space */
++extern void wireless_send_event(struct net_device * dev,
++ unsigned int cmd,
++ union iwreq_data * wrqu,
++ char * extra);
++
++/* We may need a function to send a stream of events to user space.
++ * More on that later... */
++
++/************************* INLINE FUNTIONS *************************/
++/*
++ * Function that are so simple that it's more efficient inlining them
++ */
++
++/*------------------------------------------------------------------*/
++/*
++ * Wrapper to add an Wireless Event to a stream of events.
++ */
++static inline char *
++iwe_stream_add_event(char * stream, /* Stream of events */
++ char * ends, /* End of stream */
++ struct iw_event *iwe, /* Payload */
++ int event_len) /* Real size of payload */
++{
++ /* Check if it's possible */
++ if((stream + event_len) < ends) {
++ iwe->len = event_len;
++ memcpy(stream, (char *) iwe, event_len);
++ stream += event_len;
++ }
++ return stream;
++}
++
++/*------------------------------------------------------------------*/
++/*
++ * Wrapper to add an short Wireless Event containing a pointer to a
++ * stream of events.
++ */
++static inline char *
++iwe_stream_add_point(char * stream, /* Stream of events */
++ char * ends, /* End of stream */
++ struct iw_event *iwe, /* Payload */
++ char * extra)
++{
++ int event_len = IW_EV_POINT_LEN + iwe->u.data.length;
++ /* Check if it's possible */
++ if((stream + event_len) < ends) {
++ iwe->len = event_len;
++ memcpy(stream, (char *) iwe, IW_EV_POINT_LEN);
++ memcpy(stream + IW_EV_POINT_LEN, extra, iwe->u.data.length);
++ stream += event_len;
++ }
++ return stream;
++}
++
++/*------------------------------------------------------------------*/
++/*
++ * Wrapper to add a value to a Wireless Event in a stream of events.
++ * Be careful, this one is tricky to use properly :
++ * At the first run, you need to have (value = event + IW_EV_LCP_LEN).
++ */
++static inline char *
++iwe_stream_add_value(char * event, /* Event in the stream */
++ char * value, /* Value in event */
++ char * ends, /* End of stream */
++ struct iw_event *iwe, /* Payload */
++ int event_len) /* Real size of payload */
++{
++ /* Don't duplicate LCP */
++ event_len -= IW_EV_LCP_LEN;
++
++ /* Check if it's possible */
++ if((value + event_len) < ends) {
++ /* Add new value */
++ memcpy(value, (char *) iwe + IW_EV_LCP_LEN, event_len);
++ value += event_len;
++ /* Patch LCP */
++ iwe->len = value - event;
++ memcpy(event, (char *) iwe, IW_EV_LCP_LEN);
++ }
++ return value;
++}
++
++#endif /* _IW_HANDLER_H */
+diff -u -p -r --new-file linux/net/netsyms-w13.c linux/net/netsyms.c
+--- linux/net/netsyms-w13.c Thu Jun 6 15:46:34 2002
++++ linux/net/netsyms.c Thu Jun 6 15:47:44 2002
+@@ -588,4 +588,10 @@ EXPORT_SYMBOL(register_gifconf);
+ EXPORT_SYMBOL(net_call_rx_atomic);
+ EXPORT_SYMBOL(softnet_data);
+
++#if defined(CONFIG_NET_RADIO) || defined(CONFIG_NET_PCMCIA_RADIO)
++/* Don't include the whole header mess for a single function */
++extern void wireless_send_event(struct net_device *dev, unsigned int cmd, union iwreq_data *wrqu, char *extra);
++EXPORT_SYMBOL(wireless_send_event);
++#endif /* CONFIG_NET_RADIO || CONFIG_NET_PCMCIA_RADIO */
++
+ #endif /* CONFIG_NET */
+diff -u -p -r --new-file linux/net/core/wireless-w13.c linux/net/core/wireless.c
+--- linux/net/core/wireless-w13.c Thu Jun 6 15:46:45 2002
++++ linux/net/core/wireless.c Thu Jun 6 15:48:06 2002
+@@ -2,7 +2,7 @@
+ * This file implement the Wireless Extensions APIs.
+ *
+ * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com>
+- * Copyright (c) 1997-2001 Jean Tourrilhes, All Rights Reserved.
++ * Copyright (c) 1997-2002 Jean Tourrilhes, All Rights Reserved.
+ *
+ * (As all part of the Linux kernel, this file is GPL)
+ */
+@@ -25,6 +25,16 @@
+ * o Added iw_handler handling ;-)
+ * o Added standard ioctl description
+ * o Initial dumb commit strategy based on orinoco.c
++ *
++ * v3 - 19.12.01 - Jean II
++ * o Make sure we don't go out of standard_ioctl[] in ioctl_standard_call
++ * o Add event dispatcher function
++ * o Add event description
++ * o Propagate events as rtnetlink IFLA_WIRELESS option
++ * o Generate event on selected SET requests
++ *
++ * v4 - 18.04.01 - Jean II
++ * o Fix stupid off by one in iw_ioctl_description : IW_ESSID_MAX_SIZE + 1
+ */
+
+ /***************************** INCLUDES *****************************/
+@@ -33,6 +43,7 @@
+ #include <linux/config.h> /* Not needed ??? */
+ #include <linux/types.h> /* off_t */
+ #include <linux/netdevice.h> /* struct ifreq, dev_get_by_name() */
++#include <linux/rtnetlink.h> /* rtnetlink stuff */
+
+ #include <linux/wireless.h> /* Pretty obvious */
+ #include <net/iw_handler.h> /* New driver API */
+@@ -44,14 +55,23 @@
+
+ /* Debuging stuff */
+ #undef WE_IOCTL_DEBUG /* Debug IOCTL API */
++#undef WE_EVENT_DEBUG /* Debug Event dispatcher */
++
++/* Options */
++#define WE_EVENT_NETLINK /* Propagate events using rtnetlink */
++#define WE_SET_EVENT /* Generate an event on some set commands */
+
+ /************************* GLOBAL VARIABLES *************************/
+ /*
+ * You should not use global variables, because or re-entrancy.
+ * On our case, it's only const, so it's OK...
+ */
++/*
++ * Meta-data about all the standard Wireless Extension request we
++ * know about.
++ */
+ static const struct iw_ioctl_description standard_ioctl[] = {
+- /* SIOCSIWCOMMIT (internal) */
++ /* SIOCSIWCOMMIT */
+ { IW_HEADER_TYPE_NULL, 0, 0, 0, 0, 0},
+ /* SIOCGIWNAME */
+ { IW_HEADER_TYPE_CHAR, 0, 0, 0, 0, IW_DESCR_FLAG_DUMP},
+@@ -99,18 +119,18 @@ static const struct iw_ioctl_description
+ { IW_HEADER_TYPE_NULL, 0, 0, 0, 0, 0},
+ /* SIOCGIWAPLIST */
+ { IW_HEADER_TYPE_POINT, 0, (sizeof(struct sockaddr) + sizeof(struct iw_quality)), 0, IW_MAX_AP, 0},
+- /* -- hole -- */
+- { IW_HEADER_TYPE_NULL, 0, 0, 0, 0, 0},
+- /* -- hole -- */
+- { IW_HEADER_TYPE_NULL, 0, 0, 0, 0, 0},
++ /* SIOCSIWSCAN */
++ { IW_HEADER_TYPE_PARAM, 0, 0, 0, 0, 0},
++ /* SIOCGIWSCAN */
++ { IW_HEADER_TYPE_POINT, 0, 1, 0, IW_SCAN_MAX_DATA, 0},
+ /* SIOCSIWESSID */
+- { IW_HEADER_TYPE_POINT, 0, 1, 0, IW_ESSID_MAX_SIZE, IW_DESCR_FLAG_EVENT},
++ { IW_HEADER_TYPE_POINT, 0, 1, 0, IW_ESSID_MAX_SIZE + 1, IW_DESCR_FLAG_EVENT},
+ /* SIOCGIWESSID */
+- { IW_HEADER_TYPE_POINT, 0, 1, 0, IW_ESSID_MAX_SIZE, IW_DESCR_FLAG_DUMP},
++ { IW_HEADER_TYPE_POINT, 0, 1, 0, IW_ESSID_MAX_SIZE + 1, IW_DESCR_FLAG_DUMP},
+ /* SIOCSIWNICKN */
+- { IW_HEADER_TYPE_POINT, 0, 1, 0, IW_ESSID_MAX_SIZE, 0},
++ { IW_HEADER_TYPE_POINT, 0, 1, 0, IW_ESSID_MAX_SIZE + 1, 0},
+ /* SIOCGIWNICKN */
+- { IW_HEADER_TYPE_POINT, 0, 1, 0, IW_ESSID_MAX_SIZE, 0},
++ { IW_HEADER_TYPE_POINT, 0, 1, 0, IW_ESSID_MAX_SIZE + 1, 0},
+ /* -- hole -- */
+ { IW_HEADER_TYPE_NULL, 0, 0, 0, 0, 0},
+ /* -- hole -- */
+@@ -136,7 +156,7 @@ static const struct iw_ioctl_description
+ /* SIOCGIWRETRY */
+ { IW_HEADER_TYPE_PARAM, 0, 0, 0, 0, 0},
+ /* SIOCSIWENCODE */
+- { IW_HEADER_TYPE_POINT, 4, 1, 0, IW_ENCODING_TOKEN_MAX, IW_DESCR_FLAG_EVENT | IW_DESCR_FLAG_RESTRICT},
++ { IW_HEADER_TYPE_POINT, 0, 1, 0, IW_ENCODING_TOKEN_MAX, IW_DESCR_FLAG_EVENT | IW_DESCR_FLAG_RESTRICT},
+ /* SIOCGIWENCODE */
+ { IW_HEADER_TYPE_POINT, 0, 1, 0, IW_ENCODING_TOKEN_MAX, IW_DESCR_FLAG_DUMP | IW_DESCR_FLAG_RESTRICT},
+ /* SIOCSIWPOWER */
+@@ -144,9 +164,38 @@ static const struct iw_ioctl_description
+ /* SIOCGIWPOWER */
+ { IW_HEADER_TYPE_PARAM, 0, 0, 0, 0, 0},
+ };
++static const int standard_ioctl_num = (sizeof(standard_ioctl) /
++ sizeof(struct iw_ioctl_description));
++
++/*
++ * Meta-data about all the additional standard Wireless Extension events
++ * we know about.
++ */
++static const struct iw_ioctl_description standard_event[] = {
++ /* IWEVTXDROP */
++ { IW_HEADER_TYPE_ADDR, 0, 0, 0, 0, 0},
++ /* IWEVQUAL */
++ { IW_HEADER_TYPE_QUAL, 0, 0, 0, 0, 0},
++};
++static const int standard_event_num = (sizeof(standard_event) /
++ sizeof(struct iw_ioctl_description));
+
+ /* Size (in bytes) of the various private data types */
+-char priv_type_size[] = { 0, 1, 1, 0, 4, 4, 0, 0 };
++static const char priv_type_size[] = { 0, 1, 1, 0, 4, 4, 0, 0 };
++
++/* Size (in bytes) of various events */
++static const int event_type_size[] = {
++ IW_EV_LCP_LEN,
++ 0,
++ IW_EV_CHAR_LEN,
++ 0,
++ IW_EV_UINT_LEN,
++ IW_EV_FREQ_LEN,
++ IW_EV_POINT_LEN, /* Without variable payload */
++ IW_EV_PARAM_LEN,
++ IW_EV_ADDR_LEN,
++ IW_EV_QUAL_LEN,
++};
+
+ /************************ COMMON SUBROUTINES ************************/
+ /*
+@@ -162,7 +211,8 @@ char priv_type_size[] = { 0, 1, 1, 0, 4,
+ static inline iw_handler get_handler(struct net_device *dev,
+ unsigned int cmd)
+ {
+- unsigned int index; /* MUST be unsigned */
++ /* Don't "optimise" the following variable, it will crash */
++ unsigned int index; /* *MUST* be unsigned */
+
+ /* Check if we have some wireless handlers defined */
+ if(dev->wireless_handlers == NULL)
+@@ -269,9 +319,9 @@ static inline int sprintf_wireless_stats
+ stats->status,
+ stats->qual.qual,
+ stats->qual.updated & 1 ? '.' : ' ',
+- stats->qual.level,
++ ((__u8) stats->qual.level),
+ stats->qual.updated & 2 ? '.' : ' ',
+- stats->qual.noise,
++ ((__u8) stats->qual.noise),
+ stats->qual.updated & 4 ? '.' : ' ',
+ stats->discard.nwid,
+ stats->discard.code,
+@@ -423,12 +473,14 @@ static inline int ioctl_standard_call(st
+ int ret = -EINVAL;
+
+ /* Get the description of the IOCTL */
++ if((cmd - SIOCIWFIRST) >= standard_ioctl_num)
++ return -EOPNOTSUPP;
+ descr = &(standard_ioctl[cmd - SIOCIWFIRST]);
+
+ #ifdef WE_IOCTL_DEBUG
+- printk(KERN_DEBUG "%s : Found standard handler for 0x%04X\n",
++ printk(KERN_DEBUG "%s (WE) : Found standard handler for 0x%04X\n",
+ ifr->ifr_name, cmd);
+- printk(KERN_DEBUG "Header type : %d, token type : %d, token_size : %d, max_token : %d\n", descr->header_type, descr->token_type, descr->token_size, descr->max_tokens);
++ printk(KERN_DEBUG "%s (WE) : Header type : %d, Token type : %d, size : %d, token : %d\n", dev->name, descr->header_type, descr->token_type, descr->token_size, descr->max_tokens);
+ #endif /* WE_IOCTL_DEBUG */
+
+ /* Prepare the call */
+@@ -437,8 +489,16 @@ static inline int ioctl_standard_call(st
+
+ /* Check if we have a pointer to user space data or not */
+ if(descr->header_type != IW_HEADER_TYPE_POINT) {
++
+ /* No extra arguments. Trivial to handle */
+ ret = handler(dev, &info, &(iwr->u), NULL);
++
++#ifdef WE_SET_EVENT
++ /* Generate an event to notify listeners of the change */
++ if((descr->flags & IW_DESCR_FLAG_EVENT) &&
++ ((ret == 0) || (ret == -EIWCOMMIT)))
++ wireless_send_event(dev, cmd, &(iwr->u), NULL);
++#endif /* WE_SET_EVENT */
+ } else {
+ char * extra;
+ int err;
+@@ -466,8 +526,8 @@ static inline int ioctl_standard_call(st
+ }
+
+ #ifdef WE_IOCTL_DEBUG
+- printk(KERN_DEBUG "Malloc %d bytes\n",
+- descr->max_tokens * descr->token_size);
++ printk(KERN_DEBUG "%s (WE) : Malloc %d bytes\n",
++ dev->name, descr->max_tokens * descr->token_size);
+ #endif /* WE_IOCTL_DEBUG */
+
+ /* Always allocate for max space. Easier, and won't last
+@@ -488,7 +548,8 @@ static inline int ioctl_standard_call(st
+ return -EFAULT;
+ }
+ #ifdef WE_IOCTL_DEBUG
+- printk(KERN_DEBUG "Got %d bytes\n",
++ printk(KERN_DEBUG "%s (WE) : Got %d bytes\n",
++ dev->name,
+ iwr->u.data.length * descr->token_size);
+ #endif /* WE_IOCTL_DEBUG */
+ }
+@@ -504,11 +565,26 @@ static inline int ioctl_standard_call(st
+ if (err)
+ ret = -EFAULT;
+ #ifdef WE_IOCTL_DEBUG
+- printk(KERN_DEBUG "Wrote %d bytes\n",
++ printk(KERN_DEBUG "%s (WE) : Wrote %d bytes\n",
++ dev->name,
+ iwr->u.data.length * descr->token_size);
+ #endif /* WE_IOCTL_DEBUG */
+ }
+
++#ifdef WE_SET_EVENT
++ /* Generate an event to notify listeners of the change */
++ if((descr->flags & IW_DESCR_FLAG_EVENT) &&
++ ((ret == 0) || (ret == -EIWCOMMIT))) {
++ if(descr->flags & IW_DESCR_FLAG_RESTRICT)
++ /* If the event is restricted, don't
++ * export the payload */
++ wireless_send_event(dev, cmd, &(iwr->u), NULL);
++ else
++ wireless_send_event(dev, cmd, &(iwr->u),
++ extra);
++ }
++#endif /* WE_SET_EVENT */
++
+ /* Cleanup - I told you it wasn't that long ;-) */
+ kfree(extra);
+ }
+@@ -558,11 +634,12 @@ static inline int ioctl_private_call(str
+ }
+
+ #ifdef WE_IOCTL_DEBUG
+- printk(KERN_DEBUG "%s : Found private handler for 0x%04X\n",
++ printk(KERN_DEBUG "%s (WE) : Found private handler for 0x%04X\n",
+ ifr->ifr_name, cmd);
+ if(descr) {
+- printk(KERN_DEBUG "Name %s, set %X, get %X\n",
+- descr->name, descr->set_args, descr->get_args);
++ printk(KERN_DEBUG "%s (WE) : Name %s, set %X, get %X\n",
++ dev->name, descr->name,
++ descr->set_args, descr->get_args);
+ }
+ #endif /* WE_IOCTL_DEBUG */
+
+@@ -617,7 +694,8 @@ static inline int ioctl_private_call(str
+ }
+
+ #ifdef WE_IOCTL_DEBUG
+- printk(KERN_DEBUG "Malloc %d bytes\n", extra_size);
++ printk(KERN_DEBUG "%s (WE) : Malloc %d bytes\n",
++ dev->name, extra_size);
+ #endif /* WE_IOCTL_DEBUG */
+
+ /* Always allocate for max space. Easier, and won't last
+@@ -636,7 +714,8 @@ static inline int ioctl_private_call(str
+ return -EFAULT;
+ }
+ #ifdef WE_IOCTL_DEBUG
+- printk(KERN_DEBUG "Got %d elem\n", iwr->u.data.length);
++ printk(KERN_DEBUG "%s (WE) : Got %d elem\n",
++ dev->name, iwr->u.data.length);
+ #endif /* WE_IOCTL_DEBUG */
+ }
+
+@@ -650,8 +729,8 @@ static inline int ioctl_private_call(str
+ if (err)
+ ret = -EFAULT;
+ #ifdef WE_IOCTL_DEBUG
+- printk(KERN_DEBUG "Wrote %d elem\n",
+- iwr->u.data.length);
++ printk(KERN_DEBUG "%s (WE) : Wrote %d elem\n",
++ dev->name, iwr->u.data.length);
+ #endif /* WE_IOCTL_DEBUG */
+ }
+
+@@ -730,4 +809,178 @@ int wireless_process_ioctl(struct ifreq
+ }
+ /* Not reached */
+ return -EINVAL;
++}
++
++/************************* EVENT PROCESSING *************************/
++/*
++ * Process events generated by the wireless layer or the driver.
++ * Most often, the event will be propagated through rtnetlink
++ */
++
++#ifdef WE_EVENT_NETLINK
++/* "rtnl" is defined in net/core/rtnetlink.c, but we need it here.
++ * It is declared in <linux/rtnetlink.h> */
++
++/* ---------------------------------------------------------------- */
++/*
++ * Fill a rtnetlink message with our event data.
++ * Note that we propage only the specified event and don't dump the
++ * current wireless config. Dumping the wireless config is far too
++ * expensive (for each parameter, the driver need to query the hardware).
++ */
++static inline int rtnetlink_fill_iwinfo(struct sk_buff * skb,
++ struct net_device * dev,
++ int type,
++ char * event,
++ int event_len)
++{
++ struct ifinfomsg *r;
++ struct nlmsghdr *nlh;
++ unsigned char *b = skb->tail;
++
++ nlh = NLMSG_PUT(skb, 0, 0, type, sizeof(*r));
++ r = NLMSG_DATA(nlh);
++ r->ifi_family = AF_UNSPEC;
++ r->ifi_type = dev->type;
++ r->ifi_index = dev->ifindex;
++ r->ifi_flags = dev->flags;
++ r->ifi_change = 0; /* Wireless changes don't affect those flags */
++
++ /* Add the wireless events in the netlink packet */
++ RTA_PUT(skb, IFLA_WIRELESS,
++ event_len, event);
++
++ nlh->nlmsg_len = skb->tail - b;
++ return skb->len;
++
++nlmsg_failure:
++rtattr_failure:
++ skb_trim(skb, b - skb->data);
++ return -1;
++}
++
++/* ---------------------------------------------------------------- */
++/*
++ * Create and broadcast and send it on the standard rtnetlink socket
++ * This is a pure clone rtmsg_ifinfo() in net/core/rtnetlink.c
++ * Andrzej Krzysztofowicz mandated that I used a IFLA_XXX field
++ * within a RTM_NEWLINK event.
++ */
++static inline void rtmsg_iwinfo(struct net_device * dev,
++ char * event,
++ int event_len)
++{
++ struct sk_buff *skb;
++ int size = NLMSG_GOODSIZE;
++
++ skb = alloc_skb(size, GFP_ATOMIC);
++ if (!skb)
++ return;
++
++ if (rtnetlink_fill_iwinfo(skb, dev, RTM_NEWLINK,
++ event, event_len) < 0) {
++ kfree_skb(skb);
++ return;
++ }
++ NETLINK_CB(skb).dst_groups = RTMGRP_LINK;
++ netlink_broadcast(rtnl, skb, 0, RTMGRP_LINK, GFP_ATOMIC);
++}
++#endif /* WE_EVENT_NETLINK */
++
++/* ---------------------------------------------------------------- */
++/*
++ * Main event dispatcher. Called from other parts and drivers.
++ * Send the event on the apropriate channels.
++ * May be called from interrupt context.
++ */
++void wireless_send_event(struct net_device * dev,
++ unsigned int cmd,
++ union iwreq_data * wrqu,
++ char * extra)
++{
++ const struct iw_ioctl_description * descr = NULL;
++ int extra_len = 0;
++ struct iw_event *event; /* Mallocated whole event */
++ int event_len; /* Its size */
++ int hdr_len; /* Size of the event header */
++ /* Don't "optimise" the following variable, it will crash */
++ unsigned cmd_index; /* *MUST* be unsigned */
++
++ /* Get the description of the IOCTL */
++ if(cmd <= SIOCIWLAST) {
++ cmd_index = cmd - SIOCIWFIRST;
++ if(cmd_index < standard_ioctl_num)
++ descr = &(standard_ioctl[cmd_index]);
++ } else {
++ cmd_index = cmd - IWEVFIRST;
++ if(cmd_index < standard_event_num)
++ descr = &(standard_event[cmd_index]);
++ }
++ /* Don't accept unknown events */
++ if(descr == NULL) {
++ /* Note : we don't return an error to the driver, because
++ * the driver would not know what to do about it. It can't
++ * return an error to the user, because the event is not
++ * initiated by a user request.
++ * The best the driver could do is to log an error message.
++ * We will do it ourselves instead...
++ */
++ printk(KERN_ERR "%s (WE) : Invalid Wireless Event (0x%04X)\n",
++ dev->name, cmd);
++ return;
++ }
++#ifdef WE_EVENT_DEBUG
++ printk(KERN_DEBUG "%s (WE) : Got event 0x%04X\n",
++ dev->name, cmd);
++ printk(KERN_DEBUG "%s (WE) : Header type : %d, Token type : %d, size : %d, token : %d\n", dev->name, descr->header_type, descr->token_type, descr->token_size, descr->max_tokens);
++#endif /* WE_EVENT_DEBUG */
++
++ /* Check extra parameters and set extra_len */
++ if(descr->header_type == IW_HEADER_TYPE_POINT) {
++ /* Check if number of token fits within bounds */
++ if(wrqu->data.length > descr->max_tokens) {
++ printk(KERN_ERR "%s (WE) : Wireless Event too big (%d)\n", dev->name, wrqu->data.length);
++ return;
++ }
++ if(wrqu->data.length < descr->min_tokens) {
++ printk(KERN_ERR "%s (WE) : Wireless Event too small (%d)\n", dev->name, wrqu->data.length);
++ return;
++ }
++ /* Calculate extra_len - extra is NULL for restricted events */
++ if(extra != NULL)
++ extra_len = wrqu->data.length * descr->token_size;
++#ifdef WE_EVENT_DEBUG
++ printk(KERN_DEBUG "%s (WE) : Event 0x%04X, tokens %d, extra_len %d\n", dev->name, cmd, wrqu->data.length, extra_len);
++#endif /* WE_EVENT_DEBUG */
++ }
++
++ /* Total length of the event */
++ hdr_len = event_type_size[descr->header_type];
++ event_len = hdr_len + extra_len;
++
++#ifdef WE_EVENT_DEBUG
++ printk(KERN_DEBUG "%s (WE) : Event 0x%04X, hdr_len %d, event_len %d\n", dev->name, cmd, hdr_len, event_len);
++#endif /* WE_EVENT_DEBUG */
++
++ /* Create temporary buffer to hold the event */
++ event = kmalloc(event_len, GFP_ATOMIC);
++ if(event == NULL)
++ return;
++
++ /* Fill event */
++ event->len = event_len;
++ event->cmd = cmd;
++ memcpy(&event->u, wrqu, hdr_len - IW_EV_LCP_LEN);
++ if(extra != NULL)
++ memcpy(((char *) event) + hdr_len, extra, extra_len);
++
++#ifdef WE_EVENT_NETLINK
++ /* rtnetlink event channel */
++ rtmsg_iwinfo(dev, (char *) event, event_len);
++#endif /* WE_EVENT_NETLINK */
++
++ /* Cleanup */
++ kfree(event);
++
++ return; /* Always success, I guess ;-) */
+ }
diff --git a/packages/linux/files/linux-2.4-cpufreq.patch b/packages/linux/files/linux-2.4-cpufreq.patch
index e69de29bb2..c3526bb30d 100644
--- a/packages/linux/files/linux-2.4-cpufreq.patch
+++ b/packages/linux/files/linux-2.4-cpufreq.patch
@@ -0,0 +1,20 @@
+Index: include/linux/cpufreq.h
+===================================================================
+RCS file: /cvs/linux/kernel/include/linux/cpufreq.h,v
+retrieving revision 1.4
+diff -u -r1.4 cpufreq.h
+--- linux/include/linux/cpufreq.h 23 Aug 2002 22:18:47 -0000 1.4
++++ linux/include/linux/cpufreq.h 29 Apr 2004 08:44:18 -0000
+@@ -16,9 +16,9 @@
+ #include <linux/notifier.h>
+
+ #ifndef CONFIG_SMP
+-#define cpufreq_current(cpu) ((void)(cpu), __cpufreq_cur)
+-#define cpufreq_max(cpu) ((void)(cpu), __cpufreq_max)
+-#define cpufreq_min(cpu) ((void)(cpu), __cpufreq_min)
++#define cpufreq_current(cpu) (__cpufreq_cur)
++#define cpufreq_max(cpu) (__cpufreq_max)
++#define cpufreq_min(cpu) (__cpufreq_min)
+ #else
+ /*
+ * Should be something like:
diff --git a/packages/linux/files/linux-2.4-mmc-debugging.patch b/packages/linux/files/linux-2.4-mmc-debugging.patch
index e69de29bb2..6cde9e8ed2 100644
--- a/packages/linux/files/linux-2.4-mmc-debugging.patch
+++ b/packages/linux/files/linux-2.4-mmc-debugging.patch
@@ -0,0 +1,15 @@
+--- kernel/include/linux/mmc/mmc_protocol.h 2005-04-09 17:30:57.930462521 +0200
++++ /tmp/mmc_protocol.h 2005-04-09 17:30:33.649097537 +0200
+@@ -273,10 +273,10 @@
+ #define START_MMC_DEBUG(n) do { if (n <= g_mmc_debug)
+ #define END_MMC_DEBUG } while (0)
+ #else
+ #define MMC_DEBUG(n, args...)
+-#define START_MMC_DEBUG(n)
+-#define END_MMC_DEBUG
++#define START_MMC_DEBUG(n) do { if (0)
++#define END_MMC_DEBUG } while (0)
+ #endif /* CONFIG_MMC_DEBUG */
+
+ #endif /* MMC_MMC_PROTOCOL_H */
+
diff --git a/packages/linux/files/linux-2.4-no-short-loads.patch b/packages/linux/files/linux-2.4-no-short-loads.patch
index e69de29bb2..f2d6c74224 100644
--- a/packages/linux/files/linux-2.4-no-short-loads.patch
+++ b/packages/linux/files/linux-2.4-no-short-loads.patch
@@ -0,0 +1,18 @@
+Index: arch/arm/Makefile
+===================================================================
+RCS file: /cvs/linux/kernel/arch/arm/Makefile,v
+retrieving revision 1.47
+diff -u -r1.47 Makefile
+--- linux/arch/arm/Makefile 9 Jul 2003 14:10:56 -0000 1.47
++++ linux/arch/arm/Makefile 28 Apr 2004 21:11:04 -0000
+@@ -60,8 +60,8 @@
+ tune-$(CONFIG_CPU_XSCALE) :=-mtune=xscale
+ #tune-$(CONFIG_CPU_XSCALE) :=-mtune=strongarm
+
+-CFLAGS_BOOT :=$(apcs-y) $(arch-y) $(tune-y) -mshort-load-bytes -msoft-float -Uarm
+-CFLAGS +=$(apcs-y) $(arch-y) $(tune-y) -mshort-load-bytes -msoft-float -Uarm
++CFLAGS_BOOT :=$(apcs-y) $(arch-y) $(tune-y) -msoft-float -Uarm
++CFLAGS +=$(apcs-y) $(arch-y) $(tune-y) -msoft-float -Uarm
+ AFLAGS +=$(apcs-y) $(arch-y) -msoft-float
+
+ ifeq ($(CONFIG_CPU_26),y)
diff --git a/packages/linux/files/linux-2.4-usb-gadget.patch b/packages/linux/files/linux-2.4-usb-gadget.patch
index e69de29bb2..0864ee98f5 100644
--- a/packages/linux/files/linux-2.4-usb-gadget.patch
+++ b/packages/linux/files/linux-2.4-usb-gadget.patch
@@ -0,0 +1,29506 @@
+diff -x '*~' -x '.*' -r -N -u /tmp/kernel/Documentation/Configure.help kernel/Documentation/Configure.help
+--- /tmp/kernel/Documentation/Configure.help 2005-04-22 17:52:12.265476882 +0200
++++ kernel/Documentation/Configure.help 2005-04-22 17:57:15.940717930 +0200
+@@ -23701,6 +23701,163 @@
+ brave people. System crashes and other bad things are likely to occur if
+ you use this driver. If in doubt, select N.
+
++CONFIG_USB_GADGET
++ USB is a master/slave protocol, organized with one master
++ host (such as a PC) controlling up to 127 peripheral devices.
++ The USB hardware is asymmetric, which makes it easier to set up:
++ you can't connect two "to-the-host" connectors to each other.
++
++ Linux can run in the host, or in the peripheral. In both cases
++ you need a low level bus controller driver, and some software
++ talking to it. Peripheral controllers are often discrete silicon,
++ or are integrated with the CPU in a microcontroller. The more
++ familiar host side controllers have names like like "EHCI", "OHCI",
++ or "UHCI", and are usually integrated into southbridges on PC
++ motherboards.
++
++ Enable this configuration option if you want to run Linux inside
++ a USB peripheral device. Configure one hardware driver for your
++ peripheral/device side bus controller, and a "gadget driver" for
++ your peripheral protocol. (If you use modular gadget drivers,
++ you may configure more than one.)
++
++ If in doubt, say "N" and don't enable these drivers; most people
++ don't have this kind of hardware (except maybe inside Linux PDAs).
++
++CONFIG_USB_GADGET_NET2280
++ NetChip 2280 is a PCI based USB peripheral controller which
++ supports both full and high speed USB 2.0 data transfers.
++
++ It has six configurable endpoints, as well as endpoint zero
++ (for control transfers) and several endpoints with dedicated
++ functions.
++
++ Say "y" to link the driver statically, or "m" to build a
++ dynamically linked module called "net2280" and force all
++ gadget drivers to also be dynamically linked.
++
++CONFIG_USB_GADGET_GOKU
++ The Toshiba TC86C001 is a PCI device which includes controllers
++ for full speed USB devices, IDE, I2C, SIO, plus a USB host (OHCI).
++
++ The device controller has three configurable (bulk or interrupt)
++ endpoints, plus endpoint zero (for control transfers).
++
++ Say "y" to link the driver statically, or "m" to build a
++ dynamically linked module called "goku_udc" and force all
++ gadget drivers to also be dynamically linked.
++
++CONFIG_USB_GADGET_PXA2XX
++ Intel's PXA 2xx series XScale ARM-5TE processors include
++ an integrated full speed USB 1.1 device controller.
++
++ It has fifteen fixed-function endpoints, as well as endpoint
++ zero (for control transfers).
++
++ Say "y" to link the driver statically, or "m" to build a
++ dynamically linked module called "pxa2xx_udc" and force all
++ gadget drivers to also be dynamically linked.
++
++CONFIG_USB_GADGET_SUPERH
++ Some Renesas SuperH processors (SH7705, SH7727...) include an
++ integrated high speed USB 1.1 device controller.
++
++ It has three fixed-function endpoints, as well as endpoint zero (for
++ control transfers).
++
++ Say "y" to link the driver statically, or "m" to build a
++ dynamically linked module called "superh_udc" and force all
++ gadget drivers to also be dynamically linked.
++
++CONFIG_USB_ZERO
++ Gadget Zero is a two-configuration device. It either sinks and
++ sources bulk data; or it loops back a configurable number of
++ transfers. It also implements control requests, for "chapter 9"
++ conformance. The driver needs only two bulk-capable endpoints, so
++ it can work on top of most device-side usb controllers. It's
++ useful for testing, and is also a working example showing how
++ USB "gadget drivers" can be written.
++
++ Make this be the first driver you try using on top of any new
++ USB peripheral controller driver. Then you can use host-side
++ test software, like the "usbtest" driver, to put your hardware
++ and its driver through a basic set of functional tests.
++
++ Gadget Zero also works with the host-side "usb-skeleton" driver,
++ and with many kinds of host-side test software. You may need
++ to tweak product and vendor IDs before host software knows about
++ this device, and arrange to select an appropriate configuration.
++
++ Say "y" to link the driver statically, or "m" to build a
++ dynamically linked module called "g_zero".
++
++CONFIG_USB_ETH
++ This driver implements Ethernet style communication, in either
++ of two ways:
++
++ - The "Communication Device Class" (CDC) Ethernet Control Model.
++ That protocol is often avoided with pure Ethernet adapters, in
++ favor of simpler vendor-specific hardware, but is widely
++ supported by firmware for smart network devices.
++
++ - On hardware can't implement that protocol, a simpler approach
++ is used, placing fewer demands on USB.
++
++ Within the USB device, this gadget driver exposes a network device
++ "usbX", where X depends on what other networking devices you have.
++ Treat it like a two-node Ethernet link: host, and gadget.
++
++ The Linux-USB host-side "usbnet" driver interoperates with this
++ driver, so that deep I/O queues can be supported. On 2.4 kernels,
++ use "CDCEther" instead, if you're using the CDC option. That CDC
++ mode should also interoperate with standard CDC Ethernet class
++ drivers on other host operating systems.
++
++ Say "y" to link the driver statically, or "m" to build a
++ dynamically linked module called "g_ether".
++
++CONFIG_USB_ETH_RNDIS
++ Microsoft Windows XP bundles the "Remote NDIS" (RNDIS) protocol,
++ and Microsoft provides redistributable binary RNDIS drivers for
++ older versions of Windows.
++
++ If you say "y" here, the Ethernet gadget driver will try to provide
++ a second device configuration, supporting RNDIS to talk to such
++ Microsoft USB hosts.
++
++CONFIG_USB_FILE_STORAGE
++ The File-backed Storage Gadget acts as a USB Mass Storage
++ disk drive. As its storage repository it can use a regular
++ file or a block device (in much the same way as the "loop"
++ device driver), specified as a module parameter.
++
++CONFIG_USB_FILE_STORAGE_TEST
++ Say "y" to generate the larger testing version of the
++ File-backed Storage Gadget, useful for probing the
++ behavior of USB Mass Storage hosts. Not needed for
++ normal operation.
++
++CONFIG_USB_ETH_RNDIS
++ Microsoft Windows XP bundles the "Remote NDIS" (RNDIS) protocol,
++ and Microsoft provides redistributable binary RNDIS drivers for
++ older versions of Windows.
++
++ If you say "y" here, the Ethernet gadget driver will try to provide
++ a second device configuration, supporting RNDIS to talk to such
++ Microsoft USB hosts.
++
++CONFIG_USB_FILE_STORAGE
++ The File-backed Storage Gadget acts as a USB Mass Storage
++ disk drive. As its storage repository it can use a regular
++ file or a block device (in much the same way as the "loop"
++ device driver), specified as a module parameter.
++
++CONFIG_USB_FILE_STORAGE_TEST
++ Say "y" to generate the larger testing version of the
++ File-backed Storage Gadget, useful for probing the
++ behavior of USB Mass Storage hosts. Not needed for
++ normal operation.
++
+ Winbond W83977AF IrDA Device Driver
+ CONFIG_WINBOND_FIR
+ Say Y here if you want to build IrDA support for the Winbond
+diff -x '*~' -x '.*' -r -N -u /tmp/kernel/Makefile kernel/Makefile
+--- /tmp/kernel/Makefile 2005-04-22 17:52:12.362461090 +0200
++++ kernel/Makefile 2005-04-22 17:53:19.374549284 +0200
+@@ -196,6 +196,7 @@
+ DRIVERS-$(CONFIG_HAMRADIO) += drivers/net/hamradio/hamradio.o
+ DRIVERS-$(CONFIG_TC) += drivers/tc/tc.a
+ DRIVERS-$(CONFIG_USB) += drivers/usb/usbdrv.o
++DRIVERS-$(CONFIG_USB_GADGET) += drivers/usb/gadget/built-in.o
+ DRIVERS-$(CONFIG_LAB) += drivers/bootldr/labmod.o
+ DRIVERS-$(CONFIG_INPUT) += drivers/input/inputdrv.o
+ DRIVERS-$(CONFIG_I2O) += drivers/message/i2o/i2o.o
+diff -x '*~' -x '.*' -r -N -u /tmp/kernel/drivers/Makefile kernel/drivers/Makefile
+--- /tmp/kernel/drivers/Makefile 2005-04-22 17:52:12.728401503 +0200
++++ kernel/drivers/Makefile 2005-04-22 17:53:19.523525026 +0200
+@@ -27,6 +27,7 @@
+ subdir-$(CONFIG_MAC) += macintosh
+ subdir-$(CONFIG_PPC) += macintosh
+ subdir-$(CONFIG_USB) += usb
++subdir-$(CONFIG_USB_GADGET) += usb/gadget
+ subdir-$(CONFIG_INPUT) += input
+ subdir-$(CONFIG_PHONE) += telephony
+ subdir-$(CONFIG_SGI) += sgi
+diff -x '*~' -x '.*' -r -N -u /tmp/kernel/drivers/usb/Config.in kernel/drivers/usb/Config.in
+--- /tmp/kernel/drivers/usb/Config.in 2005-04-22 17:52:20.663109467 +0200
++++ kernel/drivers/usb/Config.in 2005-04-22 17:53:19.376548959 +0200
+@@ -120,4 +120,7 @@
+ dep_tristate ' USB Auerswald ISDN support (EXPERIMENTAL)' CONFIG_USB_AUERSWALD $CONFIG_USB $CONFIG_EXPERIMENTAL
+ dep_tristate ' Tieman Voyager USB Braille display support (EXPERIMENTAL)' CONFIG_USB_BRLVGER $CONFIG_USB $CONFIG_EXPERIMENTAL
+ fi
++
++source drivers/usb/gadget/Config.in
++
+ endmenu
+diff -x '*~' -x '.*' -r -N -u /tmp/kernel/drivers/usb/gadget/Config.in kernel/drivers/usb/gadget/Config.in
+--- /tmp/kernel/drivers/usb/gadget/Config.in 1970-01-01 01:00:00.000000000 +0100
++++ kernel/drivers/usb/gadget/Config.in 2005-04-22 17:53:19.403544563 +0200
+@@ -0,0 +1,128 @@
++#
++# USB device-side configuration
++# for 2.4 kbuild, drivers/usb/gadget/Config.in
++#
++# Long term, this likely doesn't all belong in one directory
++# Plan to split it up eventually.
++#
++mainmenu_option next_comment
++comment 'Support for USB gadgets'
++
++tristate 'Support for USB Gadgets' CONFIG_USB_GADGET
++if [ "$CONFIG_USB_GADGET" = "y" -o "$CONFIG_USB_GADGET" = "m" ]; then
++
++ #
++ # really want _exactly one_ device controller driver at a time,
++ # since they control compile options for gadget drivers.
++ #
++ choice 'USB Peripheral Controller Driver' "\
++ Intel-PXA2xx/IXP4xx CONFIG_USB_GADGET_PXA2XX \
++ National-N9603/N9604 CONFIG_USB_GADGET_N9604 \
++ NetChip-2280 CONFIG_USB_GADGET_NET2280 \
++ Renesas-SH7705/7727 CONFIG_USB_GADGET_SUPERH \
++ Toshiba-TC86C001(Goku-S) CONFIG_USB_GADGET_GOKU \
++ " NetChip-2280
++
++ define_tristate CONFIG_USB_GADGET_CONTROLLER n
++
++ if [ "$CONFIG_ARCH_PXA" = "y" -o "$CONFIG_ARCH_IXP425" = "y" ] ; then
++ if [ "$CONFIG_USB_GADGET_PXA2XX" = "y" ] ; then
++ define_tristate CONFIG_USB_PXA2XX $CONFIG_USB_GADGET
++ define_tristate CONFIG_USB_GADGET_CONTROLLER $CONFIG_USB_PXA2XX
++ fi
++ fi
++ if [ "$CONFIG_PCI" = "y" -a "$CONFIG_USB_GADGET_NET2280" = "y" ] ; then
++ define_tristate CONFIG_USB_NET2280 $CONFIG_USB_GADGET
++ define_tristate CONFIG_USB_GADGET_CONTROLLER $CONFIG_USB_NET2280
++ fi
++ if [ "$CONFIG_SUPERH" = "y" -a "$CONFIG_USB_GADGET_SUPERH" = "y" ] ; then
++ define_tristate CONFIG_USB_SUPERH $CONFIG_USB_GADGET
++ define_tristate CONFIG_USB_GADGET_CONTROLLER $CONFIG_USB_SUPERH
++ fi
++ if [ "$CONFIG_PCI" = "y" -a "$CONFIG_USB_GADGET_GOKU" = "y" ] ; then
++ define_tristate CONFIG_USB_GOKU $CONFIG_USB_GADGET
++ define_tristate CONFIG_USB_GADGET_CONTROLLER $CONFIG_USB_GOKU
++ fi
++ if [ "$CONFIG_USB_GADGET_N9604" = "y" ] ; then
++ define_tristate CONFIG_USB_N9604 $CONFIG_USB_GADGET
++ define_tristate CONFIG_USB_GADGET_CONTROLLER $CONFIG_USB_N9604
++ fi
++
++ # or any other controller that supports high speed transfers ...
++ define_bool CONFIG_USB_GADGET_DUALSPEED $CONFIG_USB_GADGET_NET2280
++
++ if [ "$CONFIG_USB_GADGET_CONTROLLER" = "y" -o "$CONFIG_USB_GADGET_CONTROLLER" = "m" ] ; then
++
++ #
++ # no reason not to enable more than one gadget driver module, but
++ # for static linking that would make no sense since the usb model
++ # has exactly one of these upstream connections and only one
++ # lowest-level driver can control it.
++ #
++ # gadget drivers are compiled to work on specific hardware, since
++ #
++ # (a) gadget driver need hardware-specific configuration, like what
++ # endpoint names and numbers to use, maxpacket sizes, etc
++ #
++ # (b) specific hardware features like iso endpoints may be required
++ #
++ comment 'USB Gadget Drivers'
++
++ # FIXME when drivers all use #ifdef CONFIG_USB_GADGET_* tests,
++ # just remove all this driver-specific define_bool logic
++
++ dep_tristate ' Gadget Zero (DEVELOPMENT)' CONFIG_USB_ZERO $CONFIG_USB_GADGET_CONTROLLER
++ dep_tristate ' Ethernet Gadget (EXPERIMENTAL)' CONFIG_USB_ETH $CONFIG_USB_GADGET_CONTROLLER $CONFIG_NET
++ if [ "$CONFIG_USB_ETH" = "y" -o "$CONFIG_USB_ETH" = "m" ] ; then
++ bool ' RNDIS support (EXPERIMENTAL)' CONFIG_USB_ETH_RNDIS
++ fi
++ dep_tristate ' Gadget Filesystem API (EXPERIMENTAL)' CONFIG_USB_GADGETFS $CONFIG_USB_GADGET_CONTROLLER
++ dep_tristate ' File-backed Storage Gadget (DEVELOPMENT)' CONFIG_USB_FILE_STORAGE $CONFIG_USB_GADGET_CONTROLLER
++ dep_mbool ' File-backed Storage Gadget test mode' CONFIG_USB_FILE_STORAGE_TEST $CONFIG_USB_FILE_STORAGE
++ dep_tristate ' Serial Gadget (EXPERIMENTAL)' CONFIG_USB_G_SERIAL $CONFIG_USB_GADGET_CONTROLLER
++
++
++ # enforce the "only one statically linked gadget driver" rule
++
++ if [ "$CONFIG_USB_ZERO" = "y" ]; then
++ # zero = y
++ define_tristate CONFIG_USB_ETH n
++ define_tristate CONFIG_USB_GADGETFS n
++ define_tristate CONFIG_USB_FILE_STORAGE n
++ define_tristate CONFIG_USB_G_SERIAL n
++ fi
++
++ if [ "$CONFIG_USB_ETH" = "y" ]; then
++ define_tristate CONFIG_USB_ZERO n
++ # eth = y
++ define_tristate CONFIG_USB_GADGETFS n
++ define_tristate CONFIG_USB_FILE_STORAGE n
++ define_tristate CONFIG_USB_G_SERIAL n
++ fi
++
++ if [ "$CONFIG_USB_GADGETFS" = "y" ]; then
++ define_tristate CONFIG_USB_ZERO n
++ define_tristate CONFIG_USB_ETH n
++ # gadgetfs = y
++ define_tristate CONFIG_USB_FILE_STORAGE n
++ define_tristate CONFIG_USB_G_SERIAL n
++ fi
++
++ if [ "$CONFIG_USB_FILE_STORAGE" = "y" ]; then
++ define_tristate CONFIG_USB_ZERO n
++ define_tristate CONFIG_USB_ETH n
++ define_tristate CONFIG_USB_GADGETFS n
++ # file_storage = y
++ define_tristate CONFIG_USB_G_SERIAL n
++ fi
++
++ if [ "$CONFIG_USB_G_SERIAL" = "y" ]; then
++ define_tristate CONFIG_USB_ZERO n
++ define_tristate CONFIG_USB_ETH n
++ define_tristate CONFIG_USB_GADGETFS n
++ define_tristate CONFIG_USB_FILE_STORAGE n
++ # g_serial = y
++ fi
++ fi
++fi
++endmenu
+diff -x '*~' -x '.*' -r -N -u /tmp/kernel/drivers/usb/gadget/Makefile kernel/drivers/usb/gadget/Makefile
+--- /tmp/kernel/drivers/usb/gadget/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ kernel/drivers/usb/gadget/Makefile 2005-04-22 17:53:19.405544237 +0200
+@@ -0,0 +1,58 @@
++#
++# Makefile for USB peripheral controller and gadget drivers
++# for kbuild 2.4
++#
++
++# for static linking
++O_TARGET := built-in.o
++
++list-multi := g_zero.o g_ether.o gadgetfs.o g_file_storage.o g_serial.o
++
++obj-$(CONFIG_USB_NET2280) += net2280.o
++obj-$(CONFIG_USB_PXA2XX) += pxa2xx_udc.o
++obj-$(CONFIG_USB_GOKU) += goku_udc.o
++obj-$(CONFIG_USB_SUPERH) += superh_udc.o
++obj-$(CONFIG_USB_N9604) += n9604.o
++
++# only one of these may be statically linked ...
++controller-$(CONFIG_USB_NET2280) += net2280.o
++controller-$(CONFIG_USB_PXA2XX) += pxa2xx_udc.o
++controller-$(CONFIG_USB_GOKU) += goku_udc.o
++controller-$(CONFIG_USB_SUPERH) += superh_udc.o
++controller-$(CONFIG_USB_N9604) += n9604.o
++
++# ... and only one of these, too; kbuild/kconfig don't help though.
++g_zero-objs := zero.o usbstring.o config.o epautoconf.o
++obj-$(CONFIG_USB_ZERO) += g_zero.o
++
++g_ether-objs := ether.o usbstring.o config.o epautoconf.o
++obj-$(CONFIG_USB_ETH) += g_ether.o
++
++ifeq ($(CONFIG_USB_ETH_RNDIS),y)
++ g_ether-objs += rndis.o
++endif
++
++gadgetfs-objs := inode.o usbstring.o
++obj-$(CONFIG_USB_GADGETFS) += gadgetfs.o
++
++g_file_storage-objs := file_storage.o usbstring.o config.o \
++ epautoconf.o
++obj-$(CONFIG_USB_FILE_STORAGE) += g_file_storage.o
++
++g_serial-objs := gserial.o usbstring.o epautoconf.o
++obj-$(CONFIG_USB_G_SERIAL) += g_serial.o
++
++export-objs := $(controller-y) $(controller-m)
++
++include $(TOPDIR)/Rules.make
++
++g_zero.o: $(g_zero-objs)
++ $(LD) -r -o $@ $(g_zero-objs)
++g_ether.o: $(g_ether-objs)
++ $(LD) -r -o $@ $(g_ether-objs)
++gadgetfs.o: $(gadgetfs-objs)
++ $(LD) -r -o $@ $(gadgetfs-objs)
++g_file_storage.o: $(g_file_storage-objs)
++ $(LD) -r -o $@ $(g_file_storage-objs)
++g_serial.o: $(g_serial-objs)
++ $(LD) -r -o $@ $(g_serial-objs)
+diff -x '*~' -x '.*' -r -N -u /tmp/kernel/drivers/usb/gadget/config.c kernel/drivers/usb/gadget/config.c
+--- /tmp/kernel/drivers/usb/gadget/config.c 1970-01-01 01:00:00.000000000 +0100
++++ kernel/drivers/usb/gadget/config.c 2005-04-22 17:53:19.408543749 +0200
+@@ -0,0 +1,116 @@
++/*
++ * usb/gadget/config.c -- simplify building config descriptors
++ *
++ * Copyright (C) 2003 David Brownell
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#include <linux/errno.h>
++#include <linux/kernel.h>
++#include <linux/list.h>
++#include <linux/string.h>
++#include <asm/byteorder.h>
++
++#include <linux/usb_ch9.h>
++
++
++/**
++ * usb_descriptor_fillbuf - fill buffer with descriptors
++ * @buf: Buffer to be filled
++ * @buflen: Size of buf
++ * @src: Array of descriptor pointers, terminated by null pointer.
++ *
++ * Copies descriptors into the buffer, returning the length or a
++ * negative error code if they can't all be copied. Useful when
++ * assembling descriptors for an associated set of interfaces used
++ * as part of configuring a composite device; or in other cases where
++ * sets of descriptors need to be marshaled.
++ */
++int
++usb_descriptor_fillbuf(void *buf, unsigned buflen,
++ const struct usb_descriptor_header **src)
++{
++ u8 *dest = buf;
++
++ if (!src)
++ return -EINVAL;
++
++ /* fill buffer from src[] until null descriptor ptr */
++ for (; 0 != *src; src++) {
++ unsigned len = (*src)->bLength;
++
++ if (len > buflen)
++ return -EINVAL;
++ memcpy(dest, *src, len);
++ buflen -= len;
++ dest += len;
++ }
++ return dest - (u8 *)buf;
++}
++
++
++/**
++ * usb_gadget_config_buf - builts a complete configuration descriptor
++ * @config: Header for the descriptor, including characteristics such
++ * as power requirements and number of interfaces.
++ * @desc: Null-terminated vector of pointers to the descriptors (interface,
++ * endpoint, etc) defining all functions in this device configuration.
++ * @buf: Buffer for the resulting configuration descriptor.
++ * @length: Length of buffer. If this is not big enough to hold the
++ * entire configuration descriptor, an error code will be returned.
++ *
++ * This copies descriptors into the response buffer, building a descriptor
++ * for that configuration. It returns the buffer length or a negative
++ * status code. The config.wTotalLength field is set to match the length
++ * of the result, but other descriptor fields (including power usage and
++ * interface count) must be set by the caller.
++ *
++ * Gadget drivers could use this when constructing a config descriptor
++ * in response to USB_REQ_GET_DESCRIPTOR. They will need to patch the
++ * resulting bDescriptorType value if USB_DT_OTHER_SPEED_CONFIG is needed.
++ */
++int usb_gadget_config_buf(
++ const struct usb_config_descriptor *config,
++ void *buf,
++ unsigned length,
++ const struct usb_descriptor_header **desc
++)
++{
++ struct usb_config_descriptor *cp = buf;
++ int len;
++
++ /* config descriptor first */
++ if (length < USB_DT_CONFIG_SIZE || !desc)
++ return -EINVAL;
++ *cp = *config;
++
++ /* then interface/endpoint/class/vendor/... */
++ len = usb_descriptor_fillbuf(USB_DT_CONFIG_SIZE + (u8*)buf,
++ length - USB_DT_CONFIG_SIZE, desc);
++ if (len < 0)
++ return len;
++ len += USB_DT_CONFIG_SIZE;
++ if (len > 0xffff)
++ return -EINVAL;
++
++ /* patch up the config descriptor */
++ cp->bLength = USB_DT_CONFIG_SIZE;
++ cp->bDescriptorType = USB_DT_CONFIG;
++ cp->wTotalLength = cpu_to_le16(len);
++ cp->bmAttributes |= USB_CONFIG_ATT_ONE;
++ return len;
++}
++
+diff -x '*~' -x '.*' -r -N -u /tmp/kernel/drivers/usb/gadget/epautoconf.c kernel/drivers/usb/gadget/epautoconf.c
+--- /tmp/kernel/drivers/usb/gadget/epautoconf.c 1970-01-01 01:00:00.000000000 +0100
++++ kernel/drivers/usb/gadget/epautoconf.c 2005-04-22 17:53:19.410543423 +0200
+@@ -0,0 +1,311 @@
++/*
++ * epautoconf.c -- endpoint autoconfiguration for usb gadget drivers
++ *
++ * Copyright (C) 2004 David Brownell
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/types.h>
++#include <linux/list.h>
++#include <linux/errno.h>
++#include <linux/ctype.h>
++#include <linux/string.h>
++#include <linux/usb_ch9.h>
++#include <linux/usb_gadget.h>
++
++#include <asm/byteorder.h>
++
++#include "gadget_chips.h"
++
++
++/* we must assign addresses for configurable endpoints (like net2280) */
++static __initdata unsigned epnum;
++
++// #define MANY_ENDPOINTS
++#ifdef MANY_ENDPOINTS
++/* more than 15 configurable endpoints */
++static __initdata unsigned in_epnum;
++#endif
++
++
++/*
++ * This should work with endpoints from controller drivers sharing the
++ * same endpoint naming convention. By example:
++ *
++ * - ep1, ep2, ... address is fixed, not direction or type
++ * - ep1in, ep2out, ... address and direction are fixed, not type
++ * - ep1-bulk, ep2-bulk, ... address and type are fixed, not direction
++ * - ep1in-bulk, ep2out-iso, ... all three are fixed
++ * - ep-* ... no functionality restrictions
++ *
++ * Type suffixes are "-bulk", "-iso", or "-int". Numbers are decimal.
++ * Less common restrictions are implied by gadget_is_*().
++ *
++ * NOTE: each endpoint is unidirectional, as specified by its USB
++ * descriptor; and isn't specific to a configuration or altsetting.
++ */
++static int __init
++ep_matches (
++ struct usb_gadget *gadget,
++ struct usb_ep *ep,
++ struct usb_endpoint_descriptor *desc
++)
++{
++ u8 type;
++ const char *tmp;
++ u16 max;
++
++ /* endpoint already claimed? */
++ if (0 != ep->driver_data)
++ return 0;
++
++ /* only support ep0 for portable CONTROL traffic */
++ type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
++ if (USB_ENDPOINT_XFER_CONTROL == type)
++ return 0;
++
++ /* some other naming convention */
++ if ('e' != ep->name[0])
++ return 0;
++
++ /* type-restriction: "-iso", "-bulk", or "-int".
++ * direction-restriction: "in", "out".
++ */
++ if ('-' != ep->name[2]) {
++ tmp = strrchr (ep->name, '-');
++ if (tmp) {
++ switch (type) {
++ case USB_ENDPOINT_XFER_INT:
++ /* bulk endpoints handle interrupt transfers,
++ * except the toggle-quirky iso-synch kind
++ */
++ if ('s' == tmp[2]) // == "-iso"
++ return 0;
++ /* for now, avoid PXA "interrupt-in";
++ * it's documented as never using DATA1.
++ */
++ if (gadget_is_pxa (gadget)
++ && 'i' == tmp [1])
++ return 0;
++ break;
++ case USB_ENDPOINT_XFER_BULK:
++ if ('b' != tmp[1]) // != "-bulk"
++ return 0;
++ break;
++ case USB_ENDPOINT_XFER_ISOC:
++ if ('s' != tmp[2]) // != "-iso"
++ return 0;
++ }
++ } else {
++ tmp = ep->name + strlen (ep->name);
++ }
++
++ /* direction-restriction: "..in-..", "out-.." */
++ tmp--;
++ if (!isdigit (*tmp)) {
++ if (desc->bEndpointAddress & USB_DIR_IN) {
++ if ('n' != *tmp)
++ return 0;
++ } else {
++ if ('t' != *tmp)
++ return 0;
++ }
++ }
++ }
++
++ /* endpoint maxpacket size is an input parameter, except for bulk
++ * where it's an output parameter representing the full speed limit.
++ * the usb spec fixes high speed bulk maxpacket at 512 bytes.
++ */
++ max = 0x7ff & le16_to_cpup (&desc->wMaxPacketSize);
++ switch (type) {
++ case USB_ENDPOINT_XFER_INT:
++ /* INT: limit 64 bytes full speed, 1024 high speed */
++ if (!gadget->is_dualspeed && max > 64)
++ return 0;
++ /* FALLTHROUGH */
++
++ case USB_ENDPOINT_XFER_ISOC:
++ /* ISO: limit 1023 bytes full speed, 1024 high speed */
++ if (ep->maxpacket < max)
++ return 0;
++ if (!gadget->is_dualspeed && max > 1023)
++ return 0;
++
++ /* BOTH: "high bandwidth" works only at high speed */
++ if ((desc->wMaxPacketSize & __constant_cpu_to_le16(3<<11))) {
++ if (!gadget->is_dualspeed)
++ return 0;
++ /* configure your hardware with enough buffering!! */
++ }
++ break;
++ }
++
++ /* MATCH!! */
++
++ /* report address */
++ if (isdigit (ep->name [2])) {
++ u8 num = simple_strtol (&ep->name [2], NULL, 10);
++ desc->bEndpointAddress |= num;
++#ifdef MANY_ENDPOINTS
++ } else if (desc->bEndpointAddress & USB_DIR_IN) {
++ if (++in_epnum > 15)
++ return 0;
++ desc->bEndpointAddress = USB_DIR_IN | in_epnum;
++#endif
++ } else {
++ if (++epnum > 15)
++ return 0;
++ desc->bEndpointAddress |= epnum;
++ }
++
++ /* report (variable) full speed bulk maxpacket */
++ if (USB_ENDPOINT_XFER_BULK == type) {
++ int size = ep->maxpacket;
++
++ /* min() doesn't work on bitfields with gcc-3.5 */
++ if (size > 64)
++ size = 64;
++ desc->wMaxPacketSize = cpu_to_le16(size);
++ }
++ return 1;
++}
++
++static struct usb_ep * __init
++find_ep (struct usb_gadget *gadget, const char *name)
++{
++ struct usb_ep *ep;
++
++ list_for_each_entry (ep, &gadget->ep_list, ep_list) {
++ if (0 == strcmp (ep->name, name))
++ return ep;
++ }
++ return NULL;
++}
++
++/**
++ * usb_ep_autoconfig - choose an endpoint matching the descriptor
++ * @gadget: The device to which the endpoint must belong.
++ * @desc: Endpoint descriptor, with endpoint direction and transfer mode
++ * initialized. For periodic transfers, the maximum packet
++ * size must also be initialized. This is modified on success.
++ *
++ * By choosing an endpoint to use with the specified descriptor, this
++ * routine simplifies writing gadget drivers that work with multiple
++ * USB device controllers. The endpoint would be passed later to
++ * usb_ep_enable(), along with some descriptor.
++ *
++ * That second descriptor won't always be the same as the first one.
++ * For example, isochronous endpoints can be autoconfigured for high
++ * bandwidth, and then used in several lower bandwidth altsettings.
++ * Also, high and full speed descriptors will be different.
++ *
++ * Be sure to examine and test the results of autoconfiguration on your
++ * hardware. This code may not make the best choices about how to use the
++ * USB controller, and it can't know all the restrictions that may apply.
++ * Some combinations of driver and hardware won't be able to autoconfigure.
++ *
++ * On success, this returns an un-claimed usb_ep, and modifies the endpoint
++ * descriptor bEndpointAddress. For bulk endpoints, the wMaxPacket value
++ * is initialized as if the endpoint were used at full speed. To prevent
++ * the endpoint from being returned by a later autoconfig call, claim it
++ * by assigning ep->driver_data to some non-null value.
++ *
++ * On failure, this returns a null endpoint descriptor.
++ */
++struct usb_ep * __init usb_ep_autoconfig (
++ struct usb_gadget *gadget,
++ struct usb_endpoint_descriptor *desc
++)
++{
++ struct usb_ep *ep;
++ u8 type;
++
++ type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
++
++ /* First, apply chip-specific "best usage" knowledge.
++ * This might make a good usb_gadget_ops hook ...
++ */
++ if (gadget_is_net2280 (gadget) && type == USB_ENDPOINT_XFER_INT) {
++ /* ep-e, ep-f are PIO with only 64 byte fifos */
++ ep = find_ep (gadget, "ep-e");
++ if (ep && ep_matches (gadget, ep, desc))
++ return ep;
++ ep = find_ep (gadget, "ep-f");
++ if (ep && ep_matches (gadget, ep, desc))
++ return ep;
++
++ } else if (gadget_is_goku (gadget)) {
++ if (USB_ENDPOINT_XFER_INT == type) {
++ /* single buffering is enough */
++ ep = find_ep (gadget, "ep3-bulk");
++ if (ep && ep_matches (gadget, ep, desc))
++ return ep;
++ } else if (USB_ENDPOINT_XFER_BULK == type
++ && (USB_DIR_IN & desc->bEndpointAddress)) {
++ /* DMA may be available */
++ ep = find_ep (gadget, "ep2-bulk");
++ if (ep && ep_matches (gadget, ep, desc))
++ return ep;
++ }
++
++ } else if (gadget_is_sh (gadget) && USB_ENDPOINT_XFER_INT == type) {
++ /* single buffering is enough; maybe 8 byte fifo is too */
++ ep = find_ep (gadget, "ep3in-bulk");
++ if (ep && ep_matches (gadget, ep, desc))
++ return ep;
++
++ } else if (gadget_is_mq11xx (gadget) && USB_ENDPOINT_XFER_INT == type) {
++ ep = find_ep (gadget, "ep1-bulk");
++ if (ep && ep_matches (gadget, ep, desc))
++ return ep;
++ }
++
++ /* Second, look at endpoints until an unclaimed one looks usable */
++ list_for_each_entry (ep, &gadget->ep_list, ep_list) {
++ if (ep_matches (gadget, ep, desc))
++ return ep;
++ }
++
++ /* Fail */
++ return NULL;
++}
++
++/**
++ * usb_ep_autoconfig_reset - reset endpoint autoconfig state
++ * @gadget: device for which autoconfig state will be reset
++ *
++ * Use this for devices where one configuration may need to assign
++ * endpoint resources very differently from the next one. It clears
++ * state such as ep->driver_data and the record of assigned endpoints
++ * used by usb_ep_autoconfig().
++ */
++void __init usb_ep_autoconfig_reset (struct usb_gadget *gadget)
++{
++ struct usb_ep *ep;
++
++ list_for_each_entry (ep, &gadget->ep_list, ep_list) {
++ ep->driver_data = NULL;
++ }
++#ifdef MANY_ENDPOINTS
++ in_epnum = 0;
++#endif
++ epnum = 0;
++}
++
+diff -x '*~' -x '.*' -r -N -u /tmp/kernel/drivers/usb/gadget/ether.c kernel/drivers/usb/gadget/ether.c
+--- /tmp/kernel/drivers/usb/gadget/ether.c 1970-01-01 01:00:00.000000000 +0100
++++ kernel/drivers/usb/gadget/ether.c 2005-04-22 18:01:31.044861540 +0200
+@@ -0,0 +1,2734 @@
++/*
++ * ether.c -- Ethernet gadget driver, with CDC and non-CDC options
++ *
++ * Copyright (C) 2003-2005 David Brownell
++ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++
++// #define DEBUG 1
++// #define VERBOSE
++
++#include <linux/config.h>
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/delay.h>
++#include <linux/ioport.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/smp_lock.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/timer.h>
++#include <linux/list.h>
++#include <linux/interrupt.h>
++#include <linux/uts.h>
++#include <linux/version.h>
++#include <linux/moduleparam.h>
++#include <linux/ctype.h>
++
++#include <asm/byteorder.h>
++#include <asm/io.h>
++#include <asm/irq.h>
++#include <asm/system.h>
++#include <asm/uaccess.h>
++#include <asm/unaligned.h>
++
++#include <linux/usb_ch9.h>
++#include <linux/usb_cdc.h>
++#include <linux/usb_gadget.h>
++
++#include <linux/random.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/ethtool.h>
++
++#include "gadget_chips.h"
++
++/*-------------------------------------------------------------------------*/
++
++/*
++ * Ethernet gadget driver -- with CDC and non-CDC options
++ * Builds on hardware support for a full duplex link.
++ *
++ * CDC Ethernet is the standard USB solution for sending Ethernet frames
++ * using USB. Real hardware tends to use the same framing protocol but look
++ * different for control features. This driver strongly prefers to use
++ * this USB-IF standard as its open-systems interoperability solution;
++ * most host side USB stacks (except from Microsoft) support it.
++ *
++ * There's some hardware that can't talk CDC. We make that hardware
++ * implement a "minimalist" vendor-agnostic CDC core: same framing, but
++ * link-level setup only requires activating the configuration.
++ * Linux supports it, but other host operating systems may not.
++ * (This is a subset of CDC Ethernet.)
++ *
++ * A third option is also in use. Rather than CDC Ethernet, or something
++ * simpler, Microsoft pushes their own approach: RNDIS. The published
++ * RNDIS specs are ambiguous and appear to be incomplete, and are also
++ * needlessly complex.
++ */
++
++#define DRIVER_DESC "Ethernet Gadget"
++#define DRIVER_VERSION "Equinox 2004"
++
++static const char shortname [] = "ether";
++static const char driver_desc [] = DRIVER_DESC;
++
++#define RX_EXTRA 20 /* guard against rx overflows */
++
++#ifdef CONFIG_USB_ETH_RNDIS
++#include "rndis.h"
++#else
++#define rndis_init() 0
++#define rndis_exit() do{}while(0)
++#endif
++
++/* 2.6-compat */
++#ifndef container_of
++#define container_of list_entry
++#endif
++
++/** PO: really needed? */
++#include <linux/tqueue.h>
++#define work_struct tq_struct
++#define INIT_WORK INIT_TQUEUE
++#define schedule_work schedule_task
++#define flush_scheduled_work flush_scheduled_tasks
++
++static void random_ether_addr (u8 *addr)
++{
++ get_random_bytes (addr, ETH_ALEN);
++ addr [0] &= 0xfe; // clear multicast bit
++ addr [0] |= 0x02; // set local assignment bit (IEEE802)
++}
++
++/* CDC and RNDIS support the same host-chosen outgoing packet filters. */
++#define DEFAULT_FILTER (USB_CDC_PACKET_TYPE_BROADCAST \
++ |USB_CDC_PACKET_TYPE_DIRECTED)
++
++
++/*-------------------------------------------------------------------------*/
++
++struct eth_dev {
++ spinlock_t lock;
++ struct usb_gadget *gadget;
++ struct usb_request *req; /* for control responses */
++ struct usb_request *stat_req; /* for cdc & rndis status */
++
++ u8 config;
++ struct usb_ep *in_ep, *out_ep, *status_ep;
++ const struct usb_endpoint_descriptor
++ *in, *out, *status;
++ struct list_head tx_reqs, rx_reqs;
++
++ struct net_device *net;
++ struct net_device_stats stats;
++ atomic_t tx_qlen;
++
++ struct work_struct work;
++ unsigned zlp:1;
++ unsigned cdc:1;
++ unsigned rndis:1;
++ unsigned suspended:1;
++ u16 cdc_filter;
++ unsigned long todo;
++#define WORK_RX_MEMORY 0
++ int rndis_config;
++ u8 host_mac [ETH_ALEN];
++};
++
++/* This version autoconfigures as much as possible at run-time.
++ *
++ * It also ASSUMES a self-powered device, without remote wakeup,
++ * although remote wakeup support would make sense.
++ */
++static const char *EP_IN_NAME;
++static const char *EP_OUT_NAME;
++static const char *EP_STATUS_NAME;
++
++/*-------------------------------------------------------------------------*/
++
++/* DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!!
++ * Instead: allocate your own, using normal USB-IF procedures.
++ */
++
++/* Thanks to NetChip Technologies for donating this product ID.
++ * It's for devices with only CDC Ethernet configurations.
++ */
++#define CDC_VENDOR_NUM 0x0525 /* NetChip */
++#define CDC_PRODUCT_NUM 0xa4a1 /* Linux-USB Ethernet Gadget */
++
++/* For hardware that can't talk CDC, we use the same vendor ID that
++ * ARM Linux has used for ethernet-over-usb, both with sa1100 and
++ * with pxa250. We're protocol-compatible, if the host-side drivers
++ * use the endpoint descriptors. bcdDevice (version) is nonzero, so
++ * drivers that need to hard-wire endpoint numbers have a hook.
++ *
++ * The protocol is a minimal subset of CDC Ether, which works on any bulk
++ * hardware that's not deeply broken ... even on hardware that can't talk
++ * RNDIS (like SA-1100, with no interrupt endpoint, or anything that
++ * doesn't handle control-OUT).
++ */
++#define SIMPLE_VENDOR_NUM 0x049f
++#define SIMPLE_PRODUCT_NUM 0x505a
++
++/* For hardware that can talk RNDIS and either of the above protocols,
++ * use this ID ... the windows INF files will know it. Unless it's
++ * used with CDC Ethernet, Linux 2.4 hosts will need updates to choose
++ * the non-RNDIS configuration.
++ */
++#define RNDIS_VENDOR_NUM 0x0525 /* NetChip */
++#define RNDIS_PRODUCT_NUM 0xa4a2 /* Ethernet/RNDIS Gadget */
++
++
++/* Some systems will want different product identifers published in the
++ * device descriptor, either numbers or strings or both. These string
++ * parameters are in UTF-8 (superset of ASCII's 7 bit characters).
++ */
++
++static ushort __initdata idVendor;
++MODULE_PARM(idVendor, "h");
++MODULE_PARM_DESC(idVendor, "USB Vendor ID");
++
++static ushort __initdata idProduct;
++MODULE_PARM(idProduct, "h");
++MODULE_PARM_DESC(idProduct, "USB Product ID");
++
++static ushort __initdata bcdDevice;
++MODULE_PARM(bcdDevice, "h");
++MODULE_PARM_DESC(bcdDevice, "USB Device version (BCD)");
++
++static char *__initdata iManufacturer;
++MODULE_PARM(iManufacturer, "s");
++MODULE_PARM_DESC(iManufacturer, "USB Manufacturer string");
++
++static char *__initdata iProduct;
++MODULE_PARM(iProduct, "s");
++MODULE_PARM_DESC(iProduct, "USB Product string");
++
++/* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */
++static char *__initdata dev_addr;
++MODULE_PARM(dev_addr, "s");
++MODULE_PARM_DESC(dev_addr, "Device Ethernet Address");
++
++/* this address is invisible to ifconfig */
++static char *__initdata host_addr;
++MODULE_PARM(host_addr, "s");
++MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
++
++
++/*-------------------------------------------------------------------------*/
++
++/* Include CDC support if we could run on CDC-capable hardware. */
++
++#ifdef CONFIG_USB_GADGET_NET2280
++#define DEV_CONFIG_CDC
++#endif
++
++#ifdef CONFIG_USB_GADGET_DUMMY_HCD
++#define DEV_CONFIG_CDC
++#endif
++
++#ifdef CONFIG_USB_GADGET_GOKU
++#define DEV_CONFIG_CDC
++#endif
++
++#ifdef CONFIG_USB_GADGET_LH7A40X
++#define DEV_CONFIG_CDC
++#endif
++
++#ifdef CONFIG_USB_GADGET_MQ11XX
++#define DEV_CONFIG_CDC
++#endif
++
++#ifdef CONFIG_USB_GADGET_OMAP
++#define DEV_CONFIG_CDC
++#endif
++
++#ifdef CONFIG_USB_GADGET_N9604
++#define DEV_CONFIG_CDC
++#endif
++
++#ifdef CONFIG_USB_GADGET_PXA27X
++#define DEV_CONFIG_CDC
++#endif
++
++#ifdef CONFIG_USB_GADGET_AT91
++#define DEV_CONFIG_CDC
++#endif
++
++
++/* For CDC-incapable hardware, choose the simple cdc subset.
++ * Anything that talks bulk (without notable bugs) can do this.
++ */
++#ifdef CONFIG_USB_GADGET_PXA2XX
++#define DEV_CONFIG_SUBSET
++#endif
++
++#ifdef CONFIG_USB_GADGET_SH
++#define DEV_CONFIG_SUBSET
++#endif
++
++#ifdef CONFIG_USB_GADGET_SA1100
++/* use non-CDC for backwards compatibility */
++#define DEV_CONFIG_SUBSET
++#endif
++
++#ifdef CONFIG_USB_GADGET_S3C2410
++#define DEV_CONFIG_CDC
++#endif
++
++/*-------------------------------------------------------------------------*/
++
++/* "main" config is either CDC, or its simple subset */
++static inline int is_cdc(struct eth_dev *dev)
++{
++#if !defined(DEV_CONFIG_SUBSET)
++ return 1; /* only cdc possible */
++#elif !defined (DEV_CONFIG_CDC)
++ return 0; /* only subset possible */
++#else
++ return dev->cdc; /* depends on what hardware we found */
++#endif
++}
++
++/* "secondary" RNDIS config may sometimes be activated */
++static inline int rndis_active(struct eth_dev *dev)
++{
++#ifdef CONFIG_USB_ETH_RNDIS
++ return dev->rndis;
++#else
++ return 0;
++#endif
++}
++
++#define subset_active(dev) (!is_cdc(dev) && !rndis_active(dev))
++#define cdc_active(dev) ( is_cdc(dev) && !rndis_active(dev))
++
++
++
++#define DEFAULT_QLEN 2 /* double buffering by default */
++
++/* peak bulk transfer bits-per-second */
++#define HS_BPS (13 * 512 * 8 * 1000 * 8)
++#define FS_BPS (19 * 64 * 1 * 1000 * 8)
++
++#ifdef CONFIG_USB_GADGET_DUALSPEED
++
++static unsigned qmult = 5;
++MODULE_PARM(qmult, "i");
++
++
++/* for dual-speed hardware, use deeper queues at highspeed */
++#define qlen(gadget) \
++ (DEFAULT_QLEN*((gadget->speed == USB_SPEED_HIGH) ? qmult : 1))
++
++/* also defer IRQs on highspeed TX */
++#define TX_DELAY qmult
++
++#define BITRATE(g) (((g)->speed == USB_SPEED_HIGH) ? HS_BPS : FS_BPS)
++
++#else /* full speed (low speed doesn't do bulk) */
++#define qlen(gadget) DEFAULT_QLEN
++
++#define BITRATE(g) FS_BPS
++#endif
++
++
++/*-------------------------------------------------------------------------*/
++
++#define xprintk(d,level,fmt,args...) \
++ printk(level "%s: " fmt , (d)->net->name , ## args)
++
++#ifdef DEBUG
++#undef DEBUG
++#define DEBUG(dev,fmt,args...) \
++ xprintk(dev , KERN_DEBUG , fmt , ## args)
++#else
++#define DEBUG(dev,fmt,args...) \
++ do { } while (0)
++#endif /* DEBUG */
++
++#ifdef VERBOSE
++#define VDEBUG DEBUG
++#else
++#define VDEBUG(dev,fmt,args...) \
++ do { } while (0)
++#endif /* DEBUG */
++
++#define ERROR(dev,fmt,args...) \
++ xprintk(dev , KERN_ERR , fmt , ## args)
++#define WARN(dev,fmt,args...) \
++ xprintk(dev , KERN_WARNING , fmt , ## args)
++#define INFO(dev,fmt,args...) \
++ xprintk(dev , KERN_INFO , fmt , ## args)
++
++/*-------------------------------------------------------------------------*/
++
++/* USB DRIVER HOOKUP (to the hardware driver, below us), mostly
++ * ep0 implementation: descriptors, config management, setup().
++ * also optional class-specific notification interrupt transfer.
++ */
++
++/*
++ * DESCRIPTORS ... most are static, but strings and (full) configuration
++ * descriptors are built on demand. For now we do either full CDC, or
++ * our simple subset, with RNDIS as an optional second configuration.
++ *
++ * RNDIS includes some CDC ACM descriptors ... like CDC Ethernet. But
++ * the class descriptors match a modem (they're ignored; it's really just
++ * Ethernet functionality), they don't need the NOP altsetting, and the
++ * status transfer endpoint isn't optional.
++ */
++
++#define STRING_MANUFACTURER 1
++#define STRING_PRODUCT 2
++#define STRING_ETHADDR 3
++#define STRING_DATA 4
++#define STRING_CONTROL 5
++#define STRING_RNDIS_CONTROL 6
++#define STRING_CDC 7
++#define STRING_SUBSET 8
++#define STRING_RNDIS 9
++
++#define USB_BUFSIZ 256 /* holds our biggest descriptor */
++
++/*
++ * This device advertises one configuration, eth_config, unless RNDIS
++ * is enabled (rndis_config) on hardware supporting at least two configs.
++ *
++ * NOTE: Controllers like superh_udc should probably be able to use
++ * an RNDIS-only configuration.
++ *
++ * FIXME define some higher-powered configurations to make it easier
++ * to recharge batteries ...
++ */
++
++#define DEV_CONFIG_VALUE 1 /* cdc or subset */
++#define DEV_RNDIS_CONFIG_VALUE 2 /* rndis; optional */
++
++static struct usb_device_descriptor
++device_desc = {
++ .bLength = sizeof device_desc,
++ .bDescriptorType = USB_DT_DEVICE,
++
++ .bcdUSB = __constant_cpu_to_le16 (0x0200),
++
++ .bDeviceClass = USB_CLASS_COMM,
++ .bDeviceSubClass = 0,
++ .bDeviceProtocol = 0,
++
++ .idVendor = __constant_cpu_to_le16 (CDC_VENDOR_NUM),
++ .idProduct = __constant_cpu_to_le16 (CDC_PRODUCT_NUM),
++ .iManufacturer = STRING_MANUFACTURER,
++ .iProduct = STRING_PRODUCT,
++ .bNumConfigurations = 1,
++};
++
++static struct usb_otg_descriptor
++otg_descriptor = {
++ .bLength = sizeof otg_descriptor,
++ .bDescriptorType = USB_DT_OTG,
++
++ .bmAttributes = USB_OTG_SRP,
++};
++
++static struct usb_config_descriptor
++eth_config = {
++ .bLength = sizeof eth_config,
++ .bDescriptorType = USB_DT_CONFIG,
++
++ /* compute wTotalLength on the fly */
++ .bNumInterfaces = 2,
++ .bConfigurationValue = DEV_CONFIG_VALUE,
++ .iConfiguration = STRING_CDC,
++ .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
++ .bMaxPower = 50,
++};
++
++#ifdef CONFIG_USB_ETH_RNDIS
++static struct usb_config_descriptor
++rndis_config = {
++ .bLength = sizeof rndis_config,
++ .bDescriptorType = USB_DT_CONFIG,
++
++ /* compute wTotalLength on the fly */
++ .bNumInterfaces = 2,
++ .bConfigurationValue = DEV_RNDIS_CONFIG_VALUE,
++ .iConfiguration = STRING_RNDIS,
++ .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
++ .bMaxPower = 50,
++};
++#endif
++
++/*
++ * Compared to the simple CDC subset, the full CDC Ethernet model adds
++ * three class descriptors, two interface descriptors, optional status
++ * endpoint. Both have a "data" interface and two bulk endpoints.
++ * There are also differences in how control requests are handled.
++ *
++ * RNDIS shares a lot with CDC-Ethernet, since it's a variant of
++ * the CDC-ACM (modem) spec.
++ */
++
++#ifdef DEV_CONFIG_CDC
++static struct usb_interface_descriptor
++control_intf = {
++ .bLength = sizeof control_intf,
++ .bDescriptorType = USB_DT_INTERFACE,
++
++ .bInterfaceNumber = 0,
++ /* status endpoint is optional; this may be patched later */
++ .bNumEndpoints = 1,
++ .bInterfaceClass = USB_CLASS_COMM,
++ .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
++ .bInterfaceProtocol = USB_CDC_PROTO_NONE,
++ .iInterface = STRING_CONTROL,
++};
++#endif
++
++#ifdef CONFIG_USB_ETH_RNDIS
++static const struct usb_interface_descriptor
++rndis_control_intf = {
++ .bLength = sizeof rndis_control_intf,
++ .bDescriptorType = USB_DT_INTERFACE,
++
++ .bInterfaceNumber = 0,
++ .bNumEndpoints = 1,
++ .bInterfaceClass = USB_CLASS_COMM,
++ .bInterfaceSubClass = USB_CDC_SUBCLASS_ACM,
++ .bInterfaceProtocol = USB_CDC_ACM_PROTO_VENDOR,
++ .iInterface = STRING_RNDIS_CONTROL,
++};
++#endif
++
++#if defined(DEV_CONFIG_CDC) || defined(CONFIG_USB_ETH_RNDIS)
++
++static const struct usb_cdc_header_desc header_desc = {
++ .bLength = sizeof header_desc,
++ .bDescriptorType = USB_DT_CS_INTERFACE,
++ .bDescriptorSubType = USB_CDC_HEADER_TYPE,
++
++ .bcdCDC = __constant_cpu_to_le16 (0x0110),
++};
++
++static const struct usb_cdc_union_desc union_desc = {
++ .bLength = sizeof union_desc,
++ .bDescriptorType = USB_DT_CS_INTERFACE,
++ .bDescriptorSubType = USB_CDC_UNION_TYPE,
++
++ .bMasterInterface0 = 0, /* index of control interface */
++ .bSlaveInterface0 = 1, /* index of DATA interface */
++};
++
++#endif /* CDC || RNDIS */
++
++#ifdef CONFIG_USB_ETH_RNDIS
++
++static const struct usb_cdc_call_mgmt_descriptor call_mgmt_descriptor = {
++ .bLength = sizeof call_mgmt_descriptor,
++ .bDescriptorType = USB_DT_CS_INTERFACE,
++ .bDescriptorSubType = USB_CDC_CALL_MANAGEMENT_TYPE,
++
++ .bmCapabilities = 0x00,
++ .bDataInterface = 0x01,
++};
++
++static struct usb_cdc_acm_descriptor acm_descriptor = {
++ .bLength = sizeof acm_descriptor,
++ .bDescriptorType = USB_DT_CS_INTERFACE,
++ .bDescriptorSubType = USB_CDC_ACM_TYPE,
++
++ .bmCapabilities = 0x00,
++};
++
++#endif
++
++#ifdef DEV_CONFIG_CDC
++
++static const struct usb_cdc_ether_desc ether_desc = {
++ .bLength = sizeof ether_desc,
++ .bDescriptorType = USB_DT_CS_INTERFACE,
++ .bDescriptorSubType = USB_CDC_ETHERNET_TYPE,
++
++ /* this descriptor actually adds value, surprise! */
++ .iMACAddress = STRING_ETHADDR,
++ .bmEthernetStatistics = __constant_cpu_to_le32 (0), /* no statistics */
++ .wMaxSegmentSize = __constant_cpu_to_le16 (ETH_FRAME_LEN),
++ .wNumberMCFilters = __constant_cpu_to_le16 (0),
++ .bNumberPowerFilters = 0,
++};
++
++#endif
++
++#if defined(DEV_CONFIG_CDC) || defined(CONFIG_USB_ETH_RNDIS)
++
++/* include the status endpoint if we can, even where it's optional.
++ * use wMaxPacketSize big enough to fit CDC_NOTIFY_SPEED_CHANGE in one
++ * packet, to simplify cancelation; and a big transfer interval, to
++ * waste less bandwidth.
++ *
++ * some drivers (like Linux 2.4 cdc-ether!) "need" it to exist even
++ * if they ignore the connect/disconnect notifications that real aether
++ * can provide. more advanced cdc configurations might want to support
++ * encapsulated commands (vendor-specific, using control-OUT).
++ *
++ * RNDIS requires the status endpoint, since it uses that encapsulation
++ * mechanism for its funky RPC scheme.
++ */
++
++#define LOG2_STATUS_INTERVAL_MSEC 5 /* 1 << 5 == 32 msec */
++#define STATUS_BYTECOUNT 16 /* 8 byte header + data */
++
++static struct usb_endpoint_descriptor
++fs_status_desc = {
++ .bLength = USB_DT_ENDPOINT_SIZE,
++ .bDescriptorType = USB_DT_ENDPOINT,
++
++ .bEndpointAddress = USB_DIR_IN,
++ .bmAttributes = USB_ENDPOINT_XFER_INT,
++ .wMaxPacketSize = __constant_cpu_to_le16 (STATUS_BYTECOUNT),
++ .bInterval = 1 << LOG2_STATUS_INTERVAL_MSEC,
++};
++#endif
++
++#ifdef DEV_CONFIG_CDC
++
++/* the default data interface has no endpoints ... */
++
++static const struct usb_interface_descriptor
++data_nop_intf = {
++ .bLength = sizeof data_nop_intf,
++ .bDescriptorType = USB_DT_INTERFACE,
++
++ .bInterfaceNumber = 1,
++ .bAlternateSetting = 0,
++ .bNumEndpoints = 0,
++ .bInterfaceClass = USB_CLASS_CDC_DATA,
++ .bInterfaceSubClass = 0,
++ .bInterfaceProtocol = 0,
++};
++
++/* ... but the "real" data interface has two bulk endpoints */
++
++static const struct usb_interface_descriptor
++data_intf = {
++ .bLength = sizeof data_intf,
++ .bDescriptorType = USB_DT_INTERFACE,
++
++ .bInterfaceNumber = 1,
++ .bAlternateSetting = 1,
++ .bNumEndpoints = 2,
++ .bInterfaceClass = USB_CLASS_CDC_DATA,
++ .bInterfaceSubClass = 0,
++ .bInterfaceProtocol = 0,
++ .iInterface = STRING_DATA,
++};
++
++#endif
++
++#ifdef CONFIG_USB_ETH_RNDIS
++
++/* RNDIS doesn't activate by changing to the "real" altsetting */
++
++static const struct usb_interface_descriptor
++rndis_data_intf = {
++ .bLength = sizeof rndis_data_intf,
++ .bDescriptorType = USB_DT_INTERFACE,
++
++ .bInterfaceNumber = 1,
++ .bAlternateSetting = 0,
++ .bNumEndpoints = 2,
++ .bInterfaceClass = USB_CLASS_CDC_DATA,
++ .bInterfaceSubClass = 0,
++ .bInterfaceProtocol = 0,
++ .iInterface = STRING_DATA,
++};
++
++#endif
++
++#ifdef DEV_CONFIG_SUBSET
++
++/*
++ * "Simple" CDC-subset option is a simple vendor-neutral model that most
++ * full speed controllers can handle: one interface, two bulk endpoints.
++ */
++
++static const struct usb_interface_descriptor
++subset_data_intf = {
++ .bLength = sizeof subset_data_intf,
++ .bDescriptorType = USB_DT_INTERFACE,
++
++ .bInterfaceNumber = 0,
++ .bAlternateSetting = 0,
++ .bNumEndpoints = 2,
++ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
++ .bInterfaceSubClass = 0,
++ .bInterfaceProtocol = 0,
++ .iInterface = STRING_DATA,
++};
++
++#endif /* SUBSET */
++
++
++static struct usb_endpoint_descriptor
++fs_source_desc = {
++ .bLength = USB_DT_ENDPOINT_SIZE,
++ .bDescriptorType = USB_DT_ENDPOINT,
++
++ .bEndpointAddress = USB_DIR_IN,
++ .bmAttributes = USB_ENDPOINT_XFER_BULK,
++};
++
++static struct usb_endpoint_descriptor
++fs_sink_desc = {
++ .bLength = USB_DT_ENDPOINT_SIZE,
++ .bDescriptorType = USB_DT_ENDPOINT,
++
++ .bEndpointAddress = USB_DIR_OUT,
++ .bmAttributes = USB_ENDPOINT_XFER_BULK,
++};
++
++static const struct usb_descriptor_header *fs_eth_function [11] = {
++ (struct usb_descriptor_header *) &otg_descriptor,
++#ifdef DEV_CONFIG_CDC
++ /* "cdc" mode descriptors */
++ (struct usb_descriptor_header *) &control_intf,
++ (struct usb_descriptor_header *) &header_desc,
++ (struct usb_descriptor_header *) &union_desc,
++ (struct usb_descriptor_header *) &ether_desc,
++ /* NOTE: status endpoint may need to be removed */
++ (struct usb_descriptor_header *) &fs_status_desc,
++ /* data interface, with altsetting */
++ (struct usb_descriptor_header *) &data_nop_intf,
++ (struct usb_descriptor_header *) &data_intf,
++ (struct usb_descriptor_header *) &fs_source_desc,
++ (struct usb_descriptor_header *) &fs_sink_desc,
++ NULL,
++#endif /* DEV_CONFIG_CDC */
++};
++
++static inline void __init fs_subset_descriptors(void)
++{
++#ifdef DEV_CONFIG_SUBSET
++ fs_eth_function[1] = (struct usb_descriptor_header *) &subset_data_intf;
++ fs_eth_function[2] = (struct usb_descriptor_header *) &fs_source_desc;
++ fs_eth_function[3] = (struct usb_descriptor_header *) &fs_sink_desc;
++ fs_eth_function[4] = NULL;
++#else
++ fs_eth_function[1] = NULL;
++#endif
++}
++
++#ifdef CONFIG_USB_ETH_RNDIS
++static const struct usb_descriptor_header *fs_rndis_function [] = {
++ (struct usb_descriptor_header *) &otg_descriptor,
++ /* control interface matches ACM, not Ethernet */
++ (struct usb_descriptor_header *) &rndis_control_intf,
++ (struct usb_descriptor_header *) &header_desc,
++ (struct usb_descriptor_header *) &call_mgmt_descriptor,
++ (struct usb_descriptor_header *) &acm_descriptor,
++ (struct usb_descriptor_header *) &union_desc,
++ (struct usb_descriptor_header *) &fs_status_desc,
++ /* data interface has no altsetting */
++ (struct usb_descriptor_header *) &rndis_data_intf,
++ (struct usb_descriptor_header *) &fs_source_desc,
++ (struct usb_descriptor_header *) &fs_sink_desc,
++ NULL,
++};
++#endif
++
++#ifdef CONFIG_USB_GADGET_DUALSPEED
++
++/*
++ * usb 2.0 devices need to expose both high speed and full speed
++ * descriptors, unless they only run at full speed.
++ */
++
++#if defined(DEV_CONFIG_CDC) || defined(CONFIG_USB_ETH_RNDIS)
++static struct usb_endpoint_descriptor
++hs_status_desc = {
++ .bLength = USB_DT_ENDPOINT_SIZE,
++ .bDescriptorType = USB_DT_ENDPOINT,
++
++ .bmAttributes = USB_ENDPOINT_XFER_INT,
++ .wMaxPacketSize = __constant_cpu_to_le16 (STATUS_BYTECOUNT),
++ .bInterval = LOG2_STATUS_INTERVAL_MSEC + 4,
++};
++#endif /* DEV_CONFIG_CDC */
++
++static struct usb_endpoint_descriptor
++hs_source_desc = {
++ .bLength = USB_DT_ENDPOINT_SIZE,
++ .bDescriptorType = USB_DT_ENDPOINT,
++
++ .bmAttributes = USB_ENDPOINT_XFER_BULK,
++ .wMaxPacketSize = __constant_cpu_to_le16 (512),
++};
++
++static struct usb_endpoint_descriptor
++hs_sink_desc = {
++ .bLength = USB_DT_ENDPOINT_SIZE,
++ .bDescriptorType = USB_DT_ENDPOINT,
++
++ .bmAttributes = USB_ENDPOINT_XFER_BULK,
++ .wMaxPacketSize = __constant_cpu_to_le16 (512),
++};
++
++static struct usb_qualifier_descriptor
++dev_qualifier = {
++ .bLength = sizeof dev_qualifier,
++ .bDescriptorType = USB_DT_DEVICE_QUALIFIER,
++
++ .bcdUSB = __constant_cpu_to_le16 (0x0200),
++ .bDeviceClass = USB_CLASS_COMM,
++
++ .bNumConfigurations = 1,
++};
++
++static const struct usb_descriptor_header *hs_eth_function [11] = {
++ (struct usb_descriptor_header *) &otg_descriptor,
++#ifdef DEV_CONFIG_CDC
++ /* "cdc" mode descriptors */
++ (struct usb_descriptor_header *) &control_intf,
++ (struct usb_descriptor_header *) &header_desc,
++ (struct usb_descriptor_header *) &union_desc,
++ (struct usb_descriptor_header *) &ether_desc,
++ /* NOTE: status endpoint may need to be removed */
++ (struct usb_descriptor_header *) &hs_status_desc,
++ /* data interface, with altsetting */
++ (struct usb_descriptor_header *) &data_nop_intf,
++ (struct usb_descriptor_header *) &data_intf,
++ (struct usb_descriptor_header *) &hs_source_desc,
++ (struct usb_descriptor_header *) &hs_sink_desc,
++ NULL,
++#endif /* DEV_CONFIG_CDC */
++};
++
++static inline void __init hs_subset_descriptors(void)
++{
++#ifdef DEV_CONFIG_SUBSET
++ hs_eth_function[1] = (struct usb_descriptor_header *) &subset_data_intf;
++ hs_eth_function[2] = (struct usb_descriptor_header *) &fs_source_desc;
++ hs_eth_function[3] = (struct usb_descriptor_header *) &fs_sink_desc;
++ hs_eth_function[4] = NULL;
++#else
++ hs_eth_function[1] = NULL;
++#endif
++}
++
++#ifdef CONFIG_USB_ETH_RNDIS
++static const struct usb_descriptor_header *hs_rndis_function [] = {
++ (struct usb_descriptor_header *) &otg_descriptor,
++ /* control interface matches ACM, not Ethernet */
++ (struct usb_descriptor_header *) &rndis_control_intf,
++ (struct usb_descriptor_header *) &header_desc,
++ (struct usb_descriptor_header *) &call_mgmt_descriptor,
++ (struct usb_descriptor_header *) &acm_descriptor,
++ (struct usb_descriptor_header *) &union_desc,
++ (struct usb_descriptor_header *) &hs_status_desc,
++ /* data interface has no altsetting */
++ (struct usb_descriptor_header *) &rndis_data_intf,
++ (struct usb_descriptor_header *) &hs_source_desc,
++ (struct usb_descriptor_header *) &hs_sink_desc,
++ NULL,
++};
++#endif
++
++
++/* maxpacket and other transfer characteristics vary by speed. */
++#define ep_desc(g,hs,fs) (((g)->speed==USB_SPEED_HIGH)?(hs):(fs))
++
++#else
++
++/* if there's no high speed support, maxpacket doesn't change. */
++#define ep_desc(g,hs,fs) fs
++
++static inline void __init hs_subset_descriptors(void)
++{
++}
++
++#endif /* !CONFIG_USB_GADGET_DUALSPEED */
++
++/*-------------------------------------------------------------------------*/
++
++/* descriptors that are built on-demand */
++
++static char manufacturer [50];
++static char product_desc [40] = DRIVER_DESC;
++
++#ifdef DEV_CONFIG_CDC
++/* address that the host will use ... usually assigned at random */
++static char ethaddr [2 * ETH_ALEN + 1];
++#endif
++
++/* static strings, in UTF-8 */
++static struct usb_string strings [] = {
++ { STRING_MANUFACTURER, manufacturer, },
++ { STRING_PRODUCT, product_desc, },
++ { STRING_DATA, "Ethernet Data", },
++#ifdef DEV_CONFIG_CDC
++ { STRING_CDC, "CDC Ethernet", },
++ { STRING_ETHADDR, ethaddr, },
++ { STRING_CONTROL, "CDC Communications Control", },
++#endif
++#ifdef DEV_CONFIG_SUBSET
++ { STRING_SUBSET, "CDC Ethernet Subset", },
++#endif
++#ifdef CONFIG_USB_ETH_RNDIS
++ { STRING_RNDIS, "RNDIS", },
++ { STRING_RNDIS_CONTROL, "RNDIS Communications Control", },
++#endif
++ { } /* end of list */
++};
++
++static struct usb_gadget_strings stringtab = {
++ .language = 0x0409, /* en-us */
++ .strings = strings,
++};
++
++/*
++ * one config, two interfaces: control, data.
++ * complications: class descriptors, and an altsetting.
++ */
++static int
++config_buf (enum usb_device_speed speed,
++ u8 *buf, u8 type,
++ unsigned index, int is_otg)
++{
++ int len;
++ const struct usb_config_descriptor *config;
++ const struct usb_descriptor_header **function;
++#ifdef CONFIG_USB_GADGET_DUALSPEED
++ int hs = (speed == USB_SPEED_HIGH);
++
++ if (type == USB_DT_OTHER_SPEED_CONFIG)
++ hs = !hs;
++#define which_fn(t) (hs ? hs_ ## t ## _function : fs_ ## t ## _function)
++#else
++#define which_fn(t) (fs_ ## t ## _function)
++#endif
++
++ if (index >= device_desc.bNumConfigurations)
++ return -EINVAL;
++
++#ifdef CONFIG_USB_ETH_RNDIS
++ /* list the RNDIS config first, to make Microsoft's drivers
++ * happy. DOCSIS 1.0 needs this too.
++ */
++ if (device_desc.bNumConfigurations == 2 && index == 0) {
++ config = &rndis_config;
++ function = which_fn (rndis);
++ } else
++#endif
++ {
++ config = &eth_config;
++ function = which_fn (eth);
++ }
++
++ /* for now, don't advertise srp-only devices */
++ if (!is_otg)
++ function++;
++
++ len = usb_gadget_config_buf (config, buf, USB_BUFSIZ, function);
++ if (len < 0)
++ return len;
++ ((struct usb_config_descriptor *) buf)->bDescriptorType = type;
++ return len;
++}
++
++/*-------------------------------------------------------------------------*/
++
++static void eth_start (struct eth_dev *dev, int gfp_flags);
++static int alloc_requests (struct eth_dev *dev, unsigned n, int gfp_flags);
++
++#ifdef DEV_CONFIG_CDC
++static inline int ether_alt_ep_setup (struct eth_dev *dev, struct usb_ep *ep)
++{
++ const struct usb_endpoint_descriptor *d;
++
++ /* With CDC, the host isn't allowed to use these two data
++ * endpoints in the default altsetting for the interface.
++ * so we don't activate them yet. Reset from SET_INTERFACE.
++ *
++ * Strictly speaking RNDIS should work the same: activation is
++ * a side effect of setting a packet filter. Deactivation is
++ * from REMOTE_NDIS_HALT_MSG, reset from REMOTE_NDIS_RESET_MSG.
++ */
++
++ /* one endpoint writes data back IN to the host */
++ if (strcmp (ep->name, EP_IN_NAME) == 0) {
++ d = ep_desc (dev->gadget, &hs_source_desc, &fs_source_desc);
++ ep->driver_data = dev;
++ dev->in = d;
++
++ /* one endpoint just reads OUT packets */
++ } else if (strcmp (ep->name, EP_OUT_NAME) == 0) {
++ d = ep_desc (dev->gadget, &hs_sink_desc, &fs_sink_desc);
++ ep->driver_data = dev;
++ dev->out = d;
++
++ /* optional status/notification endpoint */
++ } else if (EP_STATUS_NAME &&
++ strcmp (ep->name, EP_STATUS_NAME) == 0) {
++ int result;
++
++ d = ep_desc (dev->gadget, &hs_status_desc, &fs_status_desc);
++ result = usb_ep_enable (ep, d);
++ if (result < 0)
++ return result;
++
++ ep->driver_data = dev;
++ dev->status = d;
++ }
++ return 0;
++}
++#endif
++
++#if defined(DEV_CONFIG_SUBSET) || defined(CONFIG_USB_ETH_RNDIS)
++static inline int ether_ep_setup (struct eth_dev *dev, struct usb_ep *ep)
++{
++ int result;
++ const struct usb_endpoint_descriptor *d;
++
++ /* CDC subset is simpler: if the device is there,
++ * it's live with rx and tx endpoints.
++ *
++ * Do this as a shortcut for RNDIS too.
++ */
++
++ /* one endpoint writes data back IN to the host */
++ if (strcmp (ep->name, EP_IN_NAME) == 0) {
++ d = ep_desc (dev->gadget, &hs_source_desc, &fs_source_desc);
++ result = usb_ep_enable (ep, d);
++ if (result < 0)
++ return result;
++
++ ep->driver_data = dev;
++ dev->in = d;
++
++ /* one endpoint just reads OUT packets */
++ } else if (strcmp (ep->name, EP_OUT_NAME) == 0) {
++ d = ep_desc (dev->gadget, &hs_sink_desc, &fs_sink_desc);
++ result = usb_ep_enable (ep, d);
++ if (result < 0)
++ return result;
++
++ ep->driver_data = dev;
++ dev->out = d;
++ }
++
++ return 0;
++}
++#endif
++
++static int
++set_ether_config (struct eth_dev *dev, int gfp_flags)
++{
++ int result = 0;
++ struct usb_ep *ep;
++ struct usb_gadget *gadget = dev->gadget;
++
++ gadget_for_each_ep (ep, gadget) {
++#ifdef DEV_CONFIG_CDC
++ if (!dev->rndis && dev->cdc) {
++ result = ether_alt_ep_setup (dev, ep);
++ if (result == 0)
++ continue;
++ }
++#endif
++
++#ifdef CONFIG_USB_ETH_RNDIS
++ if (dev->rndis && strcmp (ep->name, EP_STATUS_NAME) == 0) {
++ const struct usb_endpoint_descriptor *d;
++ d = ep_desc (gadget, &hs_status_desc, &fs_status_desc);
++ result = usb_ep_enable (ep, d);
++ if (result == 0) {
++ ep->driver_data = dev;
++ dev->status = d;
++ continue;
++ }
++ } else
++#endif
++
++ {
++#if defined(DEV_CONFIG_SUBSET) || defined(CONFIG_USB_ETH_RNDIS)
++ result = ether_ep_setup (dev, ep);
++ if (result == 0)
++ continue;
++#endif
++ }
++
++ /* stop on error */
++ ERROR (dev, "can't enable %s, result %d\n", ep->name, result);
++ break;
++ }
++ if (!result && (!dev->in_ep || !dev->out_ep))
++ result = -ENODEV;
++
++ if (result == 0)
++ result = alloc_requests (dev, qlen (gadget), gfp_flags);
++
++ /* on error, disable any endpoints */
++ if (result < 0) {
++#if defined(DEV_CONFIG_CDC) || defined(CONFIG_USB_ETH_RNDIS)
++ if (dev->status)
++ (void) usb_ep_disable (dev->status_ep);
++#endif
++ dev->status = NULL;
++#if defined(DEV_CONFIG_SUBSET) || defined(CONFIG_USB_ETH_RNDIS)
++ if (dev->rndis || !dev->cdc) {
++ if (dev->in)
++ (void) usb_ep_disable (dev->in_ep);
++ if (dev->out)
++ (void) usb_ep_disable (dev->out_ep);
++ }
++#endif
++ dev->in = NULL;
++ dev->out = NULL;
++ } else
++
++ /* activate non-CDC configs right away
++ * this isn't strictly according to the RNDIS spec
++ */
++#if defined(DEV_CONFIG_SUBSET) || defined(CONFIG_USB_ETH_RNDIS)
++ if (dev->rndis || !dev->cdc) {
++ netif_carrier_on (dev->net);
++ if (netif_running (dev->net)) {
++ spin_unlock (&dev->lock);
++ eth_start (dev, GFP_ATOMIC);
++ spin_lock (&dev->lock);
++ }
++ }
++#endif
++
++ if (result == 0)
++ DEBUG (dev, "qlen %d\n", qlen (gadget));
++
++ /* caller is responsible for cleanup on error */
++ return result;
++}
++
++static void eth_reset_config (struct eth_dev *dev)
++{
++ struct usb_request *req;
++
++ if (dev->config == 0)
++ return;
++
++ DEBUG (dev, "%s\n", __FUNCTION__);
++
++ netif_stop_queue (dev->net);
++ netif_carrier_off (dev->net);
++
++ /* disable endpoints, forcing (synchronous) completion of
++ * pending i/o. then free the requests.
++ */
++ if (dev->in) {
++ usb_ep_disable (dev->in_ep);
++ while (likely (!list_empty (&dev->tx_reqs))) {
++ req = container_of (dev->tx_reqs.next,
++ struct usb_request, list);
++ list_del (&req->list);
++ usb_ep_free_request (dev->in_ep, req);
++ }
++ }
++ if (dev->out) {
++ usb_ep_disable (dev->out_ep);
++ while (likely (!list_empty (&dev->rx_reqs))) {
++ req = container_of (dev->rx_reqs.next,
++ struct usb_request, list);
++ list_del (&req->list);
++ usb_ep_free_request (dev->out_ep, req);
++ }
++ }
++
++ if (dev->status) {
++ usb_ep_disable (dev->status_ep);
++ }
++ dev->config = 0;
++}
++
++/* change our operational config. must agree with the code
++ * that returns config descriptors, and altsetting code.
++ */
++static int
++eth_set_config (struct eth_dev *dev, unsigned number, int gfp_flags)
++{
++ int result = 0;
++ struct usb_gadget *gadget = dev->gadget;
++
++ if (number == dev->config)
++ return 0;
++
++ if (gadget_is_sa1100 (gadget)
++ && dev->config
++ && atomic_read (&dev->tx_qlen) != 0) {
++ /* tx fifo is full, but we can't clear it...*/
++ INFO (dev, "can't change configurations\n");
++ return -ESPIPE;
++ }
++ eth_reset_config (dev);
++
++ /* default: pass all packets, no multicast filtering */
++ dev->cdc_filter = 0x000f;
++
++ switch (number) {
++ case DEV_CONFIG_VALUE:
++ dev->rndis = 0;
++ result = set_ether_config (dev, gfp_flags);
++ break;
++#ifdef CONFIG_USB_ETH_RNDIS
++ case DEV_RNDIS_CONFIG_VALUE:
++ dev->rndis = 1;
++ result = set_ether_config (dev, gfp_flags);
++ break;
++#endif
++ default:
++ result = -EINVAL;
++ /* FALL THROUGH */
++ case 0:
++ break;
++ }
++
++ if (result) {
++ if (number)
++ eth_reset_config (dev);
++ usb_gadget_vbus_draw(dev->gadget,
++ dev->gadget->is_otg ? 8 : 100);
++ } else {
++ char *speed;
++ unsigned power;
++
++ power = 2 * eth_config.bMaxPower;
++ usb_gadget_vbus_draw(dev->gadget, power);
++
++ switch (gadget->speed) {
++ case USB_SPEED_FULL: speed = "full"; break;
++#ifdef CONFIG_USB_GADGET_DUALSPEED
++ case USB_SPEED_HIGH: speed = "high"; break;
++#endif
++ default: speed = "?"; break;
++ }
++
++ dev->config = number;
++ INFO (dev, "%s speed config #%d: %d mA, %s, using %s\n",
++ speed, number, power, driver_desc,
++ dev->rndis
++ ? "RNDIS"
++ : (dev->cdc
++ ? "CDC Ethernet"
++ : "CDC Ethernet Subset"));
++ }
++ return result;
++}
++
++/*-------------------------------------------------------------------------*/
++
++#ifdef DEV_CONFIG_CDC
++
++static void eth_status_complete (struct usb_ep *ep, struct usb_request *req)
++{
++ struct usb_cdc_notification *event = req->buf;
++ int value = req->status;
++ struct eth_dev *dev = ep->driver_data;
++
++ /* issue the second notification if host reads the first */
++ if (event->bNotificationType == USB_CDC_NOTIFY_NETWORK_CONNECTION
++ && value == 0) {
++ __le32 *data = req->buf + sizeof *event;
++
++ event->bmRequestType = 0xA1;
++ event->bNotificationType = USB_CDC_NOTIFY_SPEED_CHANGE;
++ event->wValue = __constant_cpu_to_le16 (0);
++ event->wIndex = __constant_cpu_to_le16 (1);
++ event->wLength = __constant_cpu_to_le16 (8);
++
++ /* SPEED_CHANGE data is up/down speeds in bits/sec */
++ data [0] = data [1] = cpu_to_le32 (BITRATE (dev->gadget));
++
++ req->length = STATUS_BYTECOUNT;
++ value = usb_ep_queue (ep, req, GFP_ATOMIC);
++ DEBUG (dev, "send SPEED_CHANGE --> %d\n", value);
++ if (value == 0)
++ return;
++ } else if (value != -ECONNRESET)
++ DEBUG (dev, "event %02x --> %d\n",
++ event->bNotificationType, value);
++ event->bmRequestType = 0xff;
++}
++
++static void issue_start_status (struct eth_dev *dev)
++{
++ struct usb_request *req = dev->stat_req;
++ struct usb_cdc_notification *event;
++ int value;
++
++ DEBUG (dev, "%s, flush old status first\n", __FUNCTION__);
++
++ /* flush old status
++ *
++ * FIXME ugly idiom, maybe we'd be better with just
++ * a "cancel the whole queue" primitive since any
++ * unlink-one primitive has way too many error modes.
++ * here, we "know" toggle is already clear...
++ */
++ usb_ep_disable (dev->status_ep);
++ usb_ep_enable (dev->status_ep, dev->status);
++
++ /* 3.8.1 says to issue first NETWORK_CONNECTION, then
++ * a SPEED_CHANGE. could be useful in some configs.
++ */
++ event = req->buf;
++ event->bmRequestType = 0xA1;
++ event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
++ event->wValue = __constant_cpu_to_le16 (1); /* connected */
++ event->wIndex = __constant_cpu_to_le16 (1);
++ event->wLength = 0;
++
++ req->length = sizeof *event;
++ req->complete = eth_status_complete;
++ value = usb_ep_queue (dev->status_ep, req, GFP_ATOMIC);
++ if (value < 0)
++ DEBUG (dev, "status buf queue --> %d\n", value);
++}
++
++#endif
++
++/*-------------------------------------------------------------------------*/
++
++static void eth_setup_complete (struct usb_ep *ep, struct usb_request *req)
++{
++ if (req->status || req->actual != req->length)
++ DEBUG ((struct eth_dev *) ep->driver_data,
++ "setup complete --> %d, %d/%d\n",
++ req->status, req->actual, req->length);
++}
++
++#ifdef CONFIG_USB_ETH_RNDIS
++
++static void rndis_response_complete (struct usb_ep *ep, struct usb_request *req)
++{
++ if (req->status || req->actual != req->length)
++ DEBUG ((struct eth_dev *) ep->driver_data,
++ "rndis response complete --> %d, %d/%d\n",
++ req->status, req->actual, req->length);
++
++ /* done sending after USB_CDC_GET_ENCAPSULATED_RESPONSE */
++}
++
++static void rndis_command_complete (struct usb_ep *ep, struct usb_request *req)
++{
++ struct eth_dev *dev = ep->driver_data;
++ int status;
++
++ /* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */
++ spin_lock(&dev->lock);
++ status = rndis_msg_parser (dev->rndis_config, (u8 *) req->buf);
++ if (status < 0)
++ ERROR(dev, "%s: rndis parse error %d\n", __FUNCTION__, status);
++ spin_unlock(&dev->lock);
++}
++
++#endif /* RNDIS */
++
++/*
++ * The setup() callback implements all the ep0 functionality that's not
++ * handled lower down. CDC has a number of less-common features:
++ *
++ * - two interfaces: control, and ethernet data
++ * - Ethernet data interface has two altsettings: default, and active
++ * - class-specific descriptors for the control interface
++ * - class-specific control requests
++ */
++static int
++eth_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
++{
++ struct eth_dev *dev = get_gadget_data (gadget);
++ struct usb_request *req = dev->req;
++ int value = -EOPNOTSUPP;
++ u16 wIndex = ctrl->wIndex;
++ u16 wValue = ctrl->wValue;
++ u16 wLength = ctrl->wLength;
++
++ /* descriptors just go into the pre-allocated ep0 buffer,
++ * while config change events may enable network traffic.
++ */
++ req->complete = eth_setup_complete;
++ switch (ctrl->bRequest) {
++
++ case USB_REQ_GET_DESCRIPTOR:
++ if (ctrl->bRequestType != USB_DIR_IN)
++ break;
++ switch (wValue >> 8) {
++
++ case USB_DT_DEVICE:
++ value = min (wLength, (u16) sizeof device_desc);
++ memcpy (req->buf, &device_desc, value);
++ break;
++#ifdef CONFIG_USB_GADGET_DUALSPEED
++ case USB_DT_DEVICE_QUALIFIER:
++ if (!gadget->is_dualspeed)
++ break;
++ value = min (wLength, (u16) sizeof dev_qualifier);
++ memcpy (req->buf, &dev_qualifier, value);
++ break;
++
++ case USB_DT_OTHER_SPEED_CONFIG:
++ if (!gadget->is_dualspeed)
++ break;
++ // FALLTHROUGH
++#endif /* CONFIG_USB_GADGET_DUALSPEED */
++ case USB_DT_CONFIG:
++ value = config_buf (gadget->speed, req->buf,
++ wValue >> 8,
++ wValue & 0xff,
++ gadget->is_otg);
++ if (value >= 0)
++ value = min (wLength, (u16) value);
++ break;
++
++ case USB_DT_STRING:
++ value = usb_gadget_get_string (&stringtab,
++ wValue & 0xff, req->buf);
++ if (value >= 0)
++ value = min (wLength, (u16) value);
++ break;
++ }
++ break;
++
++ case USB_REQ_SET_CONFIGURATION:
++ if (ctrl->bRequestType != 0)
++ break;
++ if (gadget->a_hnp_support)
++ DEBUG (dev, "HNP available\n");
++ else if (gadget->a_alt_hnp_support)
++ DEBUG (dev, "HNP needs a different root port\n");
++ spin_lock (&dev->lock);
++ value = eth_set_config (dev, wValue, GFP_ATOMIC);
++ spin_unlock (&dev->lock);
++ break;
++ case USB_REQ_GET_CONFIGURATION:
++ if (ctrl->bRequestType != USB_DIR_IN)
++ break;
++ *(u8 *)req->buf = dev->config;
++ value = min (wLength, (u16) 1);
++ break;
++
++ case USB_REQ_SET_INTERFACE:
++ if (ctrl->bRequestType != USB_RECIP_INTERFACE
++ || !dev->config
++ || wIndex > 1)
++ break;
++ if (!dev->cdc && wIndex != 0)
++ break;
++ spin_lock (&dev->lock);
++
++ /* PXA hardware partially handles SET_INTERFACE;
++ * we need to kluge around that interference.
++ */
++ if (gadget_is_pxa (gadget)) {
++ value = eth_set_config (dev, DEV_CONFIG_VALUE,
++ GFP_ATOMIC);
++ goto done_set_intf;
++ }
++
++#ifdef DEV_CONFIG_CDC
++ switch (wIndex) {
++ case 0: /* control/master intf */
++ if (wValue != 0)
++ break;
++ if (dev->status) {
++ usb_ep_disable (dev->status_ep);
++ usb_ep_enable (dev->status_ep, dev->status);
++ }
++ value = 0;
++ break;
++ case 1: /* data intf */
++ if (wValue > 1)
++ break;
++ usb_ep_disable (dev->in_ep);
++ usb_ep_disable (dev->out_ep);
++
++ /* CDC requires the data transfers not be done from
++ * the default interface setting ... also, setting
++ * the non-default interface clears filters etc.
++ */
++ if (wValue == 1) {
++ usb_ep_enable (dev->in_ep, dev->in);
++ usb_ep_enable (dev->out_ep, dev->out);
++ dev->cdc_filter = DEFAULT_FILTER;
++ netif_carrier_on (dev->net);
++ if (dev->status)
++ issue_start_status (dev);
++ if (netif_running (dev->net)) {
++ spin_unlock (&dev->lock);
++ eth_start (dev, GFP_ATOMIC);
++ spin_lock (&dev->lock);
++ }
++ } else {
++ netif_stop_queue (dev->net);
++ netif_carrier_off (dev->net);
++ }
++ value = 0;
++ break;
++ }
++#else
++ /* FIXME this is wrong, as is the assumption that
++ * all non-PXA hardware talks real CDC ...
++ */
++ WARN(dev, "set_interface ignored!\n");
++#endif /* DEV_CONFIG_CDC */
++
++done_set_intf:
++ spin_unlock (&dev->lock);
++ break;
++ case USB_REQ_GET_INTERFACE:
++ if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE)
++ || !dev->config
++ || wIndex > 1)
++ break;
++ if (!(dev->cdc || dev->rndis) && wIndex != 0)
++ break;
++
++ /* for CDC, iff carrier is on, data interface is active. */
++ if (dev->rndis || wIndex != 1)
++ *(u8 *)req->buf = 0;
++ else
++ *(u8 *)req->buf = netif_carrier_ok (dev->net) ? 1 : 0;
++ value = min (wLength, (u16) 1);
++ break;
++
++#ifdef DEV_CONFIG_CDC
++ case USB_CDC_SET_ETHERNET_PACKET_FILTER:
++ /* see 6.2.30: no data, wIndex = interface,
++ * wValue = packet filter bitmap
++ */
++ if (ctrl->bRequestType != (USB_TYPE_CLASS|USB_RECIP_INTERFACE)
++ || !dev->cdc
++ || dev->rndis
++ || wLength != 0
++ || wIndex > 1)
++ break;
++ DEBUG (dev, "packet filter %02x\n", wValue);
++ dev->cdc_filter = wValue;
++ value = 0;
++ break;
++
++ /* and potentially:
++ * case USB_CDC_SET_ETHERNET_MULTICAST_FILTERS:
++ * case USB_CDC_SET_ETHERNET_PM_PATTERN_FILTER:
++ * case USB_CDC_GET_ETHERNET_PM_PATTERN_FILTER:
++ * case USB_CDC_GET_ETHERNET_STATISTIC:
++ */
++
++#endif /* DEV_CONFIG_CDC */
++
++#ifdef CONFIG_USB_ETH_RNDIS
++ /* RNDIS uses the CDC command encapsulation mechanism to implement
++ * an RPC scheme, with much getting/setting of attributes by OID.
++ */
++ case USB_CDC_SEND_ENCAPSULATED_COMMAND:
++ if (ctrl->bRequestType != (USB_TYPE_CLASS|USB_RECIP_INTERFACE)
++ || !dev->rndis
++ || wLength > USB_BUFSIZ
++ || wValue
++ || rndis_control_intf.bInterfaceNumber
++ != wIndex)
++ break;
++ /* read the request, then process it */
++ value = wLength;
++ req->complete = rndis_command_complete;
++ /* later, rndis_control_ack () sends a notification */
++ break;
++
++ case USB_CDC_GET_ENCAPSULATED_RESPONSE:
++ if ((USB_DIR_IN|USB_TYPE_CLASS|USB_RECIP_INTERFACE)
++ == ctrl->bRequestType
++ && dev->rndis
++ // && wLength >= 0x0400
++ && !wValue
++ && rndis_control_intf.bInterfaceNumber
++ == wIndex) {
++ u8 *buf;
++
++ /* return the result */
++ buf = rndis_get_next_response (dev->rndis_config,
++ &value);
++ if (buf) {
++ memcpy (req->buf, buf, value);
++ req->complete = rndis_response_complete;
++ rndis_free_response(dev->rndis_config, buf);
++ }
++ /* else stalls ... spec says to avoid that */
++ }
++ break;
++#endif /* RNDIS */
++
++ default:
++ VDEBUG (dev,
++ "unknown control req%02x.%02x v%04x i%04x l%d\n",
++ ctrl->bRequestType, ctrl->bRequest,
++ wValue, wIndex, wLength);
++ }
++
++ /* respond with data transfer before status phase? */
++ if (value >= 0) {
++ req->length = value;
++ req->zero = value < wLength
++ && (value % gadget->ep0->maxpacket) == 0;
++ value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC);
++ if (value < 0) {
++ DEBUG (dev, "ep_queue --> %d\n", value);
++ req->status = 0;
++ eth_setup_complete (gadget->ep0, req);
++ }
++ }
++
++ /* host either stalls (value < 0) or reports success */
++ return value;
++}
++
++static void
++eth_disconnect (struct usb_gadget *gadget)
++{
++ struct eth_dev *dev = get_gadget_data (gadget);
++ unsigned long flags;
++
++ spin_lock_irqsave (&dev->lock, flags);
++ netif_stop_queue (dev->net);
++ netif_carrier_off (dev->net);
++ eth_reset_config (dev);
++ spin_unlock_irqrestore (&dev->lock, flags);
++
++ /* FIXME RNDIS should enter RNDIS_UNINITIALIZED */
++
++ /* next we may get setup() calls to enumerate new connections;
++ * or an unbind() during shutdown (including removing module).
++ */
++}
++
++/*-------------------------------------------------------------------------*/
++
++/* NETWORK DRIVER HOOKUP (to the layer above this driver) */
++
++/* glue code: in more recent 2.4 kernels these functions are contained in netdev.h */
++
++#ifndef HAVE_NETDEV_PRIV
++static inline void *netdev_priv(struct net_device *net)
++{
++ return net->priv;
++}
++#endif
++
++#ifndef HAVE_FREE_NETDEV
++static inline void free_netdev(struct net_device *dev)
++{
++ kfree(dev);
++}
++#endif
++
++static int eth_change_mtu (struct net_device *net, int new_mtu)
++{
++ struct eth_dev *dev = netdev_priv(net);
++
++ // FIXME if rndis, don't change while link's live
++
++ if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN)
++ return -ERANGE;
++ /* no zero-length packet read wanted after mtu-sized packets */
++ if (((new_mtu + sizeof (struct ethhdr)) % dev->in_ep->maxpacket) == 0)
++ return -EDOM;
++ net->mtu = new_mtu;
++ return 0;
++}
++
++static struct net_device_stats *eth_get_stats (struct net_device *net)
++{
++ return &((struct eth_dev *)netdev_priv(net))->stats;
++}
++
++static int eth_ethtool_ioctl (struct net_device *net, void *useraddr)
++{
++ struct eth_dev *dev = (struct eth_dev *) net->priv;
++ u32 cmd;
++
++ if (get_user (cmd, (u32 *)useraddr))
++ return -EFAULT;
++ switch (cmd) {
++
++ case ETHTOOL_GDRVINFO: { /* get driver info */
++ struct ethtool_drvinfo info;
++
++ memset (&info, 0, sizeof info);
++ info.cmd = ETHTOOL_GDRVINFO;
++ strncpy (info.driver, shortname, sizeof info.driver);
++ strncpy (info.version, DRIVER_VERSION, sizeof info.version);
++ strncpy (info.fw_version, dev->gadget->name, sizeof info.fw_version);
++ strncpy (info.bus_info, dev->gadget->dev.bus_id,
++ sizeof info.bus_info);
++ if (copy_to_user (useraddr, &info, sizeof (info)))
++ return -EFAULT;
++ return 0;
++ }
++
++ case ETHTOOL_GLINK: { /* get link status */
++ struct ethtool_value edata = { ETHTOOL_GLINK };
++
++ edata.data = (dev->gadget->speed != USB_SPEED_UNKNOWN);
++ if (copy_to_user (useraddr, &edata, sizeof (edata)))
++ return -EFAULT;
++ return 0;
++ }
++
++ }
++ /* Note that the ethtool user space code requires EOPNOTSUPP */
++ return -EOPNOTSUPP;
++}
++
++static int eth_ioctl (struct net_device *net, struct ifreq *rq, int cmd)
++{
++ switch (cmd) {
++ case SIOCETHTOOL:
++ return eth_ethtool_ioctl (net, (void *)rq->ifr_data);
++ default:
++ return -EOPNOTSUPP;
++ }
++}
++
++static void defer_kevent (struct eth_dev *dev, int flag)
++{
++ if (test_and_set_bit (flag, &dev->todo))
++ return;
++ if (!schedule_work (&dev->work))
++ ERROR (dev, "kevent %d may have been dropped\n", flag);
++ else
++ DEBUG (dev, "kevent %d scheduled\n", flag);
++}
++
++static void rx_complete (struct usb_ep *ep, struct usb_request *req);
++
++#ifndef NET_IP_ALIGN
++/* this can be a cpu-specific value */
++#define NET_IP_ALIGN 2
++#endif
++
++static int
++rx_submit (struct eth_dev *dev, struct usb_request *req, int gfp_flags)
++{
++ struct sk_buff *skb;
++ int retval = -ENOMEM;
++ size_t size;
++
++ /* Padding up to RX_EXTRA handles minor disagreements with host.
++ * Normally we use the USB "terminate on short read" convention;
++ * so allow up to (N*maxpacket), since that memory is normally
++ * already allocated. Some hardware doesn't deal well with short
++ * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
++ * byte off the end (to force hardware errors on overflow).
++ *
++ * RNDIS uses internal framing, and explicitly allows senders to
++ * pad to end-of-packet. That's potentially nice for speed,
++ * but means receivers can't recover synch on their own.
++ */
++ size = (sizeof (struct ethhdr) + dev->net->mtu + RX_EXTRA);
++ size += dev->out_ep->maxpacket - 1;
++#ifdef CONFIG_USB_ETH_RNDIS
++ if (dev->rndis)
++ size += sizeof (struct rndis_packet_msg_type);
++#endif
++ size -= size % dev->out_ep->maxpacket;
++
++ if ((skb = alloc_skb (size + NET_IP_ALIGN, gfp_flags)) == 0) {
++ DEBUG (dev, "no rx skb\n");
++ goto enomem;
++ }
++
++ /* Some platforms perform better when IP packets are aligned,
++ * but on at least one, checksumming fails otherwise. Note:
++ * this doesn't account for variable-sized RNDIS headers.
++ */
++ skb_reserve(skb, NET_IP_ALIGN);
++
++ req->buf = skb->data;
++ req->length = size;
++ req->complete = rx_complete;
++ req->context = skb;
++
++ retval = usb_ep_queue (dev->out_ep, req, gfp_flags);
++ if (retval == -ENOMEM)
++enomem:
++ defer_kevent (dev, WORK_RX_MEMORY);
++ if (retval) {
++ DEBUG (dev, "rx submit --> %d\n", retval);
++ dev_kfree_skb_any (skb);
++ spin_lock (&dev->lock);
++ list_add (&req->list, &dev->rx_reqs);
++ spin_unlock (&dev->lock);
++ }
++ return retval;
++}
++
++static void rx_complete (struct usb_ep *ep, struct usb_request *req)
++{
++ struct sk_buff *skb = req->context;
++ struct eth_dev *dev = ep->driver_data;
++ int status = req->status;
++
++ switch (status) {
++
++ /* normal completion */
++ case 0:
++ skb_put (skb, req->actual);
++#ifdef CONFIG_USB_ETH_RNDIS
++ /* we know MaxPacketsPerTransfer == 1 here */
++ if (dev->rndis)
++ rndis_rm_hdr (req->buf, &(skb->len));
++#endif
++ if (ETH_HLEN > skb->len || skb->len > ETH_FRAME_LEN) {
++ dev->stats.rx_errors++;
++ dev->stats.rx_length_errors++;
++ DEBUG (dev, "rx length %d\n", skb->len);
++ break;
++ }
++
++ skb->dev = dev->net;
++ skb->protocol = eth_type_trans (skb, dev->net);
++ dev->stats.rx_packets++;
++ dev->stats.rx_bytes += skb->len;
++
++ /* no buffer copies needed, unless hardware can't
++ * use skb buffers.
++ */
++ status = netif_rx (skb);
++ skb = NULL;
++ break;
++
++ /* software-driven interface shutdown */
++ case -ECONNRESET: // unlink
++ case -ESHUTDOWN: // disconnect etc
++ VDEBUG (dev, "rx shutdown, code %d\n", status);
++ goto quiesce;
++
++ /* for hardware automagic (such as pxa) */
++ case -ECONNABORTED: // endpoint reset
++ DEBUG (dev, "rx %s reset\n", ep->name);
++ defer_kevent (dev, WORK_RX_MEMORY);
++quiesce:
++ dev_kfree_skb_any (skb);
++ goto clean;
++
++ /* data overrun */
++ case -EOVERFLOW:
++ dev->stats.rx_over_errors++;
++ // FALLTHROUGH
++
++ default:
++ dev->stats.rx_errors++;
++ DEBUG (dev, "rx status %d\n", status);
++ break;
++ }
++
++ if (skb)
++ dev_kfree_skb_any (skb);
++ if (!netif_running (dev->net)) {
++clean:
++ /* nobody reading rx_reqs, so no dev->lock */
++ list_add (&req->list, &dev->rx_reqs);
++ req = NULL;
++ }
++ if (req)
++ rx_submit (dev, req, GFP_ATOMIC);
++}
++
++static int prealloc (struct list_head *list, struct usb_ep *ep,
++ unsigned n, int gfp_flags)
++{
++ unsigned i;
++ struct usb_request *req;
++
++ if (!n)
++ return -ENOMEM;
++
++ /* queue/recycle up to N requests */
++ i = n;
++ list_for_each_entry (req, list, list) {
++ if (i-- == 0)
++ goto extra;
++ }
++ while (i--) {
++ req = usb_ep_alloc_request (ep, gfp_flags);
++ if (!req)
++ return list_empty (list) ? -ENOMEM : 0;
++ list_add (&req->list, list);
++ }
++ return 0;
++
++extra:
++ /* free extras */
++ for (;;) {
++ struct list_head *next;
++
++ next = req->list.next;
++ list_del (&req->list);
++ usb_ep_free_request (ep, req);
++
++ if (next == list)
++ break;
++
++ req = container_of (next, struct usb_request, list);
++ }
++ return 0;
++}
++
++static int alloc_requests (struct eth_dev *dev, unsigned n, int gfp_flags)
++{
++ int status;
++
++ status = prealloc (&dev->tx_reqs, dev->in_ep, n, gfp_flags);
++ if (status < 0)
++ goto fail;
++ status = prealloc (&dev->rx_reqs, dev->out_ep, n, gfp_flags);
++ if (status < 0)
++ goto fail;
++ return 0;
++fail:
++ DEBUG (dev, "can't alloc requests\n");
++ return status;
++}
++
++static void rx_fill (struct eth_dev *dev, int gfp_flags)
++{
++ struct usb_request *req;
++ unsigned long flags;
++
++ clear_bit (WORK_RX_MEMORY, &dev->todo);
++
++ /* fill unused rxq slots with some skb */
++ spin_lock_irqsave (&dev->lock, flags);
++ while (!list_empty (&dev->rx_reqs)) {
++ req = container_of (dev->rx_reqs.next,
++ struct usb_request, list);
++ list_del_init (&req->list);
++ spin_unlock_irqrestore (&dev->lock, flags);
++
++ if (rx_submit (dev, req, gfp_flags) < 0) {
++ defer_kevent (dev, WORK_RX_MEMORY);
++ return;
++ }
++
++ spin_lock_irqsave (&dev->lock, flags);
++ }
++ spin_unlock_irqrestore (&dev->lock, flags);
++}
++
++static void eth_work (void *_dev)
++{
++ struct eth_dev *dev = _dev;
++
++ if (test_bit (WORK_RX_MEMORY, &dev->todo)) {
++ if (netif_running (dev->net))
++ rx_fill (dev, GFP_KERNEL);
++ else
++ clear_bit (WORK_RX_MEMORY, &dev->todo);
++ }
++
++ if (dev->todo)
++ DEBUG (dev, "work done, flags = 0x%lx\n", dev->todo);
++}
++
++static void tx_complete (struct usb_ep *ep, struct usb_request *req)
++{
++ struct sk_buff *skb = req->context;
++ struct eth_dev *dev = ep->driver_data;
++
++ switch (req->status) {
++ default:
++ dev->stats.tx_errors++;
++ VDEBUG (dev, "tx err %d\n", req->status);
++ /* FALLTHROUGH */
++ case -ECONNRESET: // unlink
++ case -ESHUTDOWN: // disconnect etc
++ break;
++ case 0:
++ dev->stats.tx_bytes += skb->len;
++ }
++ dev->stats.tx_packets++;
++
++ spin_lock (&dev->lock);
++ list_add (&req->list, &dev->tx_reqs);
++ spin_unlock (&dev->lock);
++ dev_kfree_skb_any (skb);
++
++ atomic_dec (&dev->tx_qlen);
++ if (netif_carrier_ok (dev->net))
++ netif_wake_queue (dev->net);
++}
++
++static inline int eth_is_promisc (struct eth_dev *dev)
++{
++ /* no filters for the CDC subset; always promisc */
++ if (subset_active (dev))
++ return 1;
++ return dev->cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
++}
++
++static int eth_start_xmit (struct sk_buff *skb, struct net_device *net)
++{
++ struct eth_dev *dev = netdev_priv(net);
++ int length = skb->len;
++ int retval;
++ struct usb_request *req = NULL;
++ unsigned long flags;
++
++ /* apply outgoing CDC or RNDIS filters */
++ if (!eth_is_promisc (dev)) {
++ u8 *dest = skb->data;
++
++ if (dest [0] & 0x01) {
++ u16 type;
++
++ /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
++ * SET_ETHERNET_MULTICAST_FILTERS requests
++ */
++ if (memcmp (dest, net->broadcast, ETH_ALEN) == 0)
++ type = USB_CDC_PACKET_TYPE_BROADCAST;
++ else
++ type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
++ if (!(dev->cdc_filter & type)) {
++ dev_kfree_skb_any (skb);
++ return 0;
++ }
++ }
++ /* ignores USB_CDC_PACKET_TYPE_DIRECTED */
++ }
++
++ spin_lock_irqsave (&dev->lock, flags);
++ req = container_of (dev->tx_reqs.next, struct usb_request, list);
++ list_del (&req->list);
++ if (list_empty (&dev->tx_reqs))
++ netif_stop_queue (net);
++ spin_unlock_irqrestore (&dev->lock, flags);
++
++ /* no buffer copies needed, unless the network stack did it
++ * or the hardware can't use skb buffers.
++ * or there's not enough space for any RNDIS headers we need
++ */
++#ifdef CONFIG_USB_ETH_RNDIS
++ if (dev->rndis) {
++ struct sk_buff *skb_rndis;
++
++ skb_rndis = skb_realloc_headroom (skb,
++ sizeof (struct rndis_packet_msg_type));
++ if (!skb_rndis)
++ goto drop;
++
++ dev_kfree_skb_any (skb);
++ skb = skb_rndis;
++ rndis_add_hdr (skb);
++ length = skb->len;
++ }
++#endif
++ req->buf = skb->data;
++ req->context = skb;
++ req->complete = tx_complete;
++
++ /* use zlp framing on tx for strict CDC-Ether conformance,
++ * though any robust network rx path ignores extra padding.
++ * and some hardware doesn't like to write zlps.
++ */
++ req->zero = 1;
++ if (!dev->zlp && (length % dev->in_ep->maxpacket) == 0)
++ length++;
++
++ req->length = length;
++
++#ifdef CONFIG_USB_GADGET_DUALSPEED
++ /* throttle highspeed IRQ rate back slightly */
++ req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH)
++ ? ((atomic_read (&dev->tx_qlen) % TX_DELAY) != 0)
++ : 0;
++#endif
++
++ retval = usb_ep_queue (dev->in_ep, req, GFP_ATOMIC);
++ switch (retval) {
++ default:
++ DEBUG (dev, "tx queue err %d\n", retval);
++ break;
++ case 0:
++ net->trans_start = jiffies;
++ atomic_inc (&dev->tx_qlen);
++ }
++
++ if (retval) {
++#ifdef CONFIG_USB_ETH_RNDIS
++drop:
++#endif
++ dev->stats.tx_dropped++;
++ dev_kfree_skb_any (skb);
++ spin_lock_irqsave (&dev->lock, flags);
++ if (list_empty (&dev->tx_reqs))
++ netif_start_queue (net);
++ list_add (&req->list, &dev->tx_reqs);
++ spin_unlock_irqrestore (&dev->lock, flags);
++ }
++ return 0;
++}
++
++/*-------------------------------------------------------------------------*/
++
++#ifdef CONFIG_USB_ETH_RNDIS
++
++static void rndis_send_media_state (struct eth_dev *dev, int connect)
++{
++ if (!dev)
++ return;
++
++ if (connect) {
++ if (rndis_signal_connect (dev->rndis_config))
++ return;
++ } else {
++ if (rndis_signal_disconnect (dev->rndis_config))
++ return;
++ }
++}
++
++static void
++rndis_control_ack_complete (struct usb_ep *ep, struct usb_request *req)
++{
++ if (req->status || req->actual != req->length)
++ DEBUG ((struct eth_dev *) ep->driver_data,
++ "rndis control ack complete --> %d, %d/%d\n",
++ req->status, req->actual, req->length);
++
++ usb_ep_free_buffer(ep, req->buf, req->dma, 8);
++ usb_ep_free_request(ep, req);
++}
++
++static int rndis_control_ack (struct net_device *net)
++{
++ struct eth_dev *dev = netdev_priv(net);
++ u32 length;
++ struct usb_request *resp;
++
++ /* in case RNDIS calls this after disconnect */
++ if (!dev->status_ep) {
++ DEBUG (dev, "status ENODEV\n");
++ return -ENODEV;
++ }
++
++ /* Allocate memory for notification ie. ACK */
++ resp = usb_ep_alloc_request (dev->status_ep, GFP_ATOMIC);
++ if (!resp) {
++ DEBUG (dev, "status ENOMEM\n");
++ return -ENOMEM;
++ }
++
++ resp->buf = usb_ep_alloc_buffer (dev->status_ep, 8,
++ &resp->dma, GFP_ATOMIC);
++ if (!resp->buf) {
++ DEBUG (dev, "status buf ENOMEM\n");
++ usb_ep_free_request (dev->status_ep, resp);
++ return -ENOMEM;
++ }
++
++ /* Send RNDIS RESPONSE_AVAILABLE notification;
++ * USB_CDC_NOTIFY_RESPONSE_AVAILABLE should work too
++ */
++ resp->length = 8;
++ resp->complete = rndis_control_ack_complete;
++
++ *((__le32 *) resp->buf) = __constant_cpu_to_le32 (1);
++ *((__le32 *) resp->buf + 1) = __constant_cpu_to_le32 (0);
++
++ length = usb_ep_queue (dev->status_ep, resp, GFP_ATOMIC);
++ if (length < 0) {
++ resp->status = 0;
++ rndis_control_ack_complete (dev->status_ep, resp);
++ }
++
++ return 0;
++}
++
++#endif /* RNDIS */
++
++static void eth_start (struct eth_dev *dev, int gfp_flags)
++{
++ DEBUG (dev, "%s\n", __FUNCTION__);
++
++ /* fill the rx queue */
++ rx_fill (dev, gfp_flags);
++
++ /* and open the tx floodgates */
++ atomic_set (&dev->tx_qlen, 0);
++ netif_wake_queue (dev->net);
++#ifdef CONFIG_USB_ETH_RNDIS
++ if (dev->rndis) {
++ rndis_set_param_medium (dev->rndis_config,
++ NDIS_MEDIUM_802_3,
++ BITRATE(dev->gadget));
++ rndis_send_media_state (dev, 1);
++ }
++#endif
++}
++
++static int eth_open (struct net_device *net)
++{
++ struct eth_dev *dev = netdev_priv(net);
++
++ DEBUG (dev, "%s\n", __FUNCTION__);
++ if (netif_carrier_ok (dev->net))
++ eth_start (dev, GFP_KERNEL);
++ return 0;
++}
++
++static int eth_stop (struct net_device *net)
++{
++ struct eth_dev *dev = netdev_priv(net);
++
++ VDEBUG (dev, "%s\n", __FUNCTION__);
++ netif_stop_queue (net);
++
++ DEBUG (dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
++ dev->stats.rx_packets, dev->stats.tx_packets,
++ dev->stats.rx_errors, dev->stats.tx_errors
++ );
++
++ /* ensure there are no more active requests */
++ if (dev->config) {
++ usb_ep_disable (dev->in_ep);
++ usb_ep_disable (dev->out_ep);
++ if (netif_carrier_ok (dev->net)) {
++ DEBUG (dev, "host still using in/out endpoints\n");
++ // FIXME idiom may leave toggle wrong here
++ usb_ep_enable (dev->in_ep, dev->in);
++ usb_ep_enable (dev->out_ep, dev->out);
++ }
++ if (dev->status_ep) {
++ usb_ep_disable (dev->status_ep);
++ usb_ep_enable (dev->status_ep, dev->status);
++ }
++ }
++
++#ifdef CONFIG_USB_ETH_RNDIS
++ if (dev->rndis) {
++ rndis_set_param_medium (dev->rndis_config,
++ NDIS_MEDIUM_802_3, 0);
++ rndis_send_media_state (dev, 0);
++ }
++#endif
++
++ return 0;
++}
++
++/*-------------------------------------------------------------------------*/
++
++static struct usb_request *eth_req_alloc (struct usb_ep *ep, unsigned size)
++{
++ struct usb_request *req;
++
++ req = usb_ep_alloc_request (ep, GFP_KERNEL);
++ if (!req)
++ return NULL;
++
++ req->buf = kmalloc (size, GFP_KERNEL);
++ if (!req->buf) {
++ usb_ep_free_request (ep, req);
++ req = NULL;
++ }
++ return req;
++}
++
++static void
++eth_req_free (struct usb_ep *ep, struct usb_request *req)
++{
++ kfree (req->buf);
++ usb_ep_free_request (ep, req);
++}
++
++
++static void
++eth_unbind (struct usb_gadget *gadget)
++{
++ struct eth_dev *dev = get_gadget_data (gadget);
++
++ DEBUG (dev, "unbind\n");
++#ifdef CONFIG_USB_ETH_RNDIS
++ rndis_deregister (dev->rndis_config);
++ rndis_exit ();
++#endif
++
++ /* we've already been disconnected ... no i/o is active */
++ if (dev->req) {
++ eth_req_free (gadget->ep0, dev->req);
++ dev->req = NULL;
++ }
++ if (dev->stat_req) {
++ eth_req_free (dev->status_ep, dev->stat_req);
++ dev->stat_req = NULL;
++ }
++
++ unregister_netdev (dev->net);
++ free_netdev(dev->net);
++
++ /* assuming we used keventd, it must quiesce too */
++ flush_scheduled_work ();
++ set_gadget_data (gadget, NULL);
++}
++
++static u8 __init nibble (unsigned char c)
++{
++ if (likely (isdigit (c)))
++ return c - '0';
++ c = toupper (c);
++ if (likely (isxdigit (c)))
++ return 10 + c - 'A';
++ return 0;
++}
++
++static void __init get_ether_addr (const char *str, u8 *dev_addr)
++{
++ if (str) {
++ unsigned i;
++
++ for (i = 0; i < 6; i++) {
++ unsigned char num;
++
++ if((*str == '.') || (*str == ':'))
++ str++;
++ num = nibble(*str++) << 4;
++ num |= (nibble(*str++));
++ dev_addr [i] = num;
++ }
++ if (is_valid_ether_addr (dev_addr))
++ return;
++ }
++ random_ether_addr(dev_addr);
++}
++
++static int __init
++eth_bind (struct usb_gadget *gadget)
++{
++ struct eth_dev *dev;
++ struct net_device *net;
++ u8 cdc = 1, zlp = 1, rndis = 1;
++ struct usb_ep *in_ep, *out_ep, *status_ep = NULL;
++ int status = -ENOMEM;
++
++ /* these flags are only ever cleared; compiler take note */
++#ifndef DEV_CONFIG_CDC
++ cdc = 0;
++#endif
++#ifndef CONFIG_USB_ETH_RNDIS
++ rndis = 0;
++#endif
++
++ /* Because most host side USB stacks handle CDC Ethernet, that
++ * standard protocol is _strongly_ preferred for interop purposes.
++ * (By everyone except Microsoft.)
++ */
++ if (gadget_is_net2280 (gadget)) {
++ device_desc.bcdDevice = __constant_cpu_to_le16 (0x0201);
++ } else if (gadget_is_dummy (gadget)) {
++ device_desc.bcdDevice = __constant_cpu_to_le16 (0x0202);
++ } else if (gadget_is_pxa (gadget)) {
++ device_desc.bcdDevice = __constant_cpu_to_le16 (0x0203);
++ /* pxa doesn't support altsettings */
++ cdc = 0;
++ } else if (gadget_is_sh(gadget)) {
++ device_desc.bcdDevice = __constant_cpu_to_le16 (0x0204);
++ /* sh doesn't support multiple interfaces or configs */
++ cdc = 0;
++ rndis = 0;
++ } else if (gadget_is_sa1100 (gadget)) {
++ device_desc.bcdDevice = __constant_cpu_to_le16 (0x0205);
++ /* hardware can't write zlps */
++ zlp = 0;
++ /* sa1100 CAN do CDC, without status endpoint ... we use
++ * non-CDC to be compatible with ARM Linux-2.4 "usb-eth".
++ */
++ cdc = 0;
++ } else if (gadget_is_goku (gadget)) {
++ device_desc.bcdDevice = __constant_cpu_to_le16 (0x0206);
++ } else if (gadget_is_mq11xx (gadget)) {
++ device_desc.bcdDevice = __constant_cpu_to_le16 (0x0207);
++ } else if (gadget_is_omap (gadget)) {
++ device_desc.bcdDevice = __constant_cpu_to_le16 (0x0208);
++ } else if (gadget_is_lh7a40x(gadget)) {
++ device_desc.bcdDevice = __constant_cpu_to_le16 (0x0209);
++ } else if (gadget_is_n9604(gadget)) {
++ device_desc.bcdDevice = __constant_cpu_to_le16 (0x0210);
++ } else if (gadget_is_pxa27x(gadget)) {
++ device_desc.bcdDevice = __constant_cpu_to_le16 (0x0211);
++ } else if (gadget_is_s3c2410(gadget)) {
++ device_desc.bcdDevice = __constant_cpu_to_le16 (0x0212);
++ } else if (gadget_is_at91(gadget)) {
++ device_desc.bcdDevice = __constant_cpu_to_le16 (0x0213);
++ } else {
++ /* can't assume CDC works. don't want to default to
++ * anything less functional on CDC-capable hardware,
++ * so we fail in this case.
++ */
++ printk (KERN_ERR "%s: "
++ "controller '%s' not recognized\n",
++ shortname, gadget->name);
++ return -ENODEV;
++ }
++ snprintf (manufacturer, sizeof manufacturer,
++ UTS_SYSNAME " " UTS_RELEASE "/%s",
++ gadget->name);
++
++ /* If there's an RNDIS configuration, that's what Windows wants to
++ * be using ... so use these product IDs here and in the "linux.inf"
++ * needed to install MSFT drivers. Current Linux kernels will use
++ * the second configuration if it's CDC Ethernet, and need some help
++ * to choose the right configuration otherwise.
++ */
++ if (rndis) {
++ device_desc.idVendor =
++ __constant_cpu_to_le16(RNDIS_VENDOR_NUM);
++ device_desc.idProduct =
++ __constant_cpu_to_le16(RNDIS_PRODUCT_NUM);
++ snprintf (product_desc, sizeof product_desc,
++ "RNDIS/%s", driver_desc);
++
++ /* CDC subset ... recognized by Linux since 2.4.10, but Windows
++ * drivers aren't widely available.
++ */
++ } else if (!cdc) {
++ device_desc.bDeviceClass = USB_CLASS_VENDOR_SPEC;
++ device_desc.idVendor =
++ __constant_cpu_to_le16(SIMPLE_VENDOR_NUM);
++ device_desc.idProduct =
++ __constant_cpu_to_le16(SIMPLE_PRODUCT_NUM);
++ }
++
++ /* support optional vendor/distro customization */
++ if (idVendor) {
++ if (!idProduct) {
++ printk (KERN_ERR "%s: idVendor needs idProduct!\n",
++ shortname);
++ return -ENODEV;
++ }
++ device_desc.idVendor = cpu_to_le16(idVendor);
++ device_desc.idProduct = cpu_to_le16(idProduct);
++ if (bcdDevice)
++ device_desc.bcdDevice = cpu_to_le16(bcdDevice);
++ }
++ if (iManufacturer)
++ strncpy (manufacturer, iManufacturer, sizeof manufacturer);
++ if (iProduct)
++ strncpy (product_desc, iProduct, sizeof product_desc);
++
++ /* all we really need is bulk IN/OUT */
++ usb_ep_autoconfig_reset (gadget);
++ in_ep = usb_ep_autoconfig (gadget, &fs_source_desc);
++ if (!in_ep) {
++autoconf_fail:
++ printk (KERN_ERR "%s: can't autoconfigure on %s\n",
++ shortname, gadget->name);
++ return -ENODEV;
++ }
++ EP_IN_NAME = in_ep->name;
++ in_ep->driver_data = in_ep; /* claim */
++
++ out_ep = usb_ep_autoconfig (gadget, &fs_sink_desc);
++ if (!out_ep)
++ goto autoconf_fail;
++ EP_OUT_NAME = out_ep->name;
++ out_ep->driver_data = out_ep; /* claim */
++
++#if defined(DEV_CONFIG_CDC) || defined(CONFIG_USB_ETH_RNDIS)
++ /* CDC Ethernet control interface doesn't require a status endpoint.
++ * Since some hosts expect one, try to allocate one anyway.
++ */
++ if (cdc || rndis) {
++ status_ep = usb_ep_autoconfig (gadget, &fs_status_desc);
++ if (status_ep) {
++ EP_STATUS_NAME = status_ep->name;
++ status_ep->driver_data = status_ep; /* claim */
++ } else if (rndis) {
++ printk (KERN_ERR "%s: "
++ "can't run RNDIS on %s\n",
++ shortname, gadget->name);
++ return -ENODEV;
++#ifdef DEV_CONFIG_CDC
++ /* pxa25x only does CDC subset; often used with RNDIS */
++ } else if (cdc) {
++ control_intf.bNumEndpoints = 0;
++ /* FIXME remove endpoint from descriptor list */
++#endif
++ }
++ }
++#endif
++
++ /* one config: cdc, else minimal subset */
++ if (!cdc) {
++ eth_config.bNumInterfaces = 1;
++ eth_config.iConfiguration = STRING_SUBSET;
++ fs_subset_descriptors();
++ hs_subset_descriptors();
++ }
++
++ /* For now RNDIS is always a second config */
++ if (rndis)
++ device_desc.bNumConfigurations = 2;
++
++#ifdef CONFIG_USB_GADGET_DUALSPEED
++ if (rndis)
++ dev_qualifier.bNumConfigurations = 2;
++ else if (!cdc)
++ dev_qualifier.bDeviceClass = USB_CLASS_VENDOR_SPEC;
++
++ /* assumes ep0 uses the same value for both speeds ... */
++ dev_qualifier.bMaxPacketSize0 = device_desc.bMaxPacketSize0;
++
++ /* and that all endpoints are dual-speed */
++ hs_source_desc.bEndpointAddress = fs_source_desc.bEndpointAddress;
++ hs_sink_desc.bEndpointAddress = fs_sink_desc.bEndpointAddress;
++#if defined(DEV_CONFIG_CDC) || defined(CONFIG_USB_ETH_RNDIS)
++ if (EP_STATUS_NAME)
++ hs_status_desc.bEndpointAddress =
++ fs_status_desc.bEndpointAddress;
++#endif
++#endif /* DUALSPEED */
++
++ device_desc.bMaxPacketSize0 = gadget->ep0->maxpacket;
++ usb_gadget_set_selfpowered (gadget);
++
++ if (gadget->is_otg) {
++ otg_descriptor.bmAttributes |= USB_OTG_HNP,
++ eth_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
++ eth_config.bMaxPower = 4;
++#ifdef CONFIG_USB_ETH_RNDIS
++ rndis_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
++ rndis_config.bMaxPower = 4;
++#endif
++ }
++
++ net = alloc_etherdev (sizeof *dev);
++ if (!net)
++ return status;
++ dev = netdev_priv(net);
++ spin_lock_init (&dev->lock);
++ INIT_WORK (&dev->work, eth_work, dev);
++ INIT_LIST_HEAD (&dev->tx_reqs);
++ INIT_LIST_HEAD (&dev->rx_reqs);
++
++ /* network device setup */
++ dev->net = net;
++ SET_MODULE_OWNER (net);
++ strcpy (net->name, "usb%d");
++ dev->cdc = cdc;
++ dev->zlp = zlp;
++
++ dev->in_ep = in_ep;
++ dev->out_ep = out_ep;
++ dev->status_ep = status_ep;
++
++ /* Module params for these addresses should come from ID proms.
++ * The host side address is used with CDC and RNDIS, and commonly
++ * ends up in a persistent config database.
++ */
++ get_ether_addr(dev_addr, net->dev_addr);
++ if (cdc || rndis) {
++ get_ether_addr(host_addr, dev->host_mac);
++#ifdef DEV_CONFIG_CDC
++ snprintf (ethaddr, sizeof ethaddr, "%02X%02X%02X%02X%02X%02X",
++ dev->host_mac [0], dev->host_mac [1],
++ dev->host_mac [2], dev->host_mac [3],
++ dev->host_mac [4], dev->host_mac [5]);
++#endif
++ }
++
++ if (rndis) {
++ status = rndis_init();
++ if (status < 0) {
++ printk (KERN_ERR "%s: can't init RNDIS, %d\n",
++ shortname, status);
++ goto fail;
++ }
++ }
++
++ net->change_mtu = eth_change_mtu;
++ net->get_stats = eth_get_stats;
++ net->hard_start_xmit = eth_start_xmit;
++ net->open = eth_open;
++ net->stop = eth_stop;
++ // watchdog_timeo, tx_timeout ...
++ // set_multicast_list
++ net->do_ioctl = eth_ioctl;
++
++ /* preallocate control message data and buffer */
++ dev->req = eth_req_alloc (gadget->ep0, USB_BUFSIZ);
++ if (!dev->req)
++ goto fail;
++ dev->req->complete = eth_setup_complete;
++
++ /* PO: this code my be reached with STATUS_BYTECOUNT undefined
++ Don't allocate stat_req then?!
++ */
++#ifdef STATUS_BYTECOUNT
++ /* ... and maybe likewise for status transfer */
++ if (dev->status_ep) {
++ dev->stat_req = eth_req_alloc (dev->status_ep,
++ STATUS_BYTECOUNT);
++ if (!dev->stat_req) {
++ eth_req_free (gadget->ep0, dev->req);
++ goto fail;
++ }
++ }
++#endif
++
++ /* finish hookup to lower layer ... */
++ dev->gadget = gadget;
++ set_gadget_data (gadget, dev);
++ gadget->ep0->driver_data = dev;
++
++ /* two kinds of host-initiated state changes:
++ * - iff DATA transfer is active, carrier is "on"
++ * - tx queueing enabled if open *and* carrier is "on"
++ */
++ netif_stop_queue (dev->net);
++ netif_carrier_off (dev->net);
++
++ // SET_NETDEV_DEV (dev->net, &gadget->dev);
++ status = register_netdev (dev->net);
++ if (status < 0)
++ goto fail1;
++
++ INFO (dev, "%s, version: " DRIVER_VERSION "\n", driver_desc);
++ INFO (dev, "using %s, OUT %s IN %s%s%s\n", gadget->name,
++ EP_OUT_NAME, EP_IN_NAME,
++ EP_STATUS_NAME ? " STATUS " : "",
++ EP_STATUS_NAME ? EP_STATUS_NAME : ""
++ );
++ INFO (dev, "MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
++ net->dev_addr [0], net->dev_addr [1],
++ net->dev_addr [2], net->dev_addr [3],
++ net->dev_addr [4], net->dev_addr [5]);
++
++ if (cdc || rndis)
++ INFO (dev, "HOST MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
++ dev->host_mac [0], dev->host_mac [1],
++ dev->host_mac [2], dev->host_mac [3],
++ dev->host_mac [4], dev->host_mac [5]);
++
++#ifdef CONFIG_USB_ETH_RNDIS
++ if (rndis) {
++ u32 vendorID = 0;
++
++ /* FIXME RNDIS vendor id == "vendor NIC code" == ? */
++
++ dev->rndis_config = rndis_register (rndis_control_ack);
++ if (dev->rndis_config < 0) {
++fail0:
++ unregister_netdev (dev->net);
++ status = -ENODEV;
++ goto fail;
++ }
++
++ /* these set up a lot of the OIDs that RNDIS needs */
++ rndis_set_host_mac (dev->rndis_config, dev->host_mac);
++ if (rndis_set_param_dev (dev->rndis_config, dev->net,
++ &dev->stats))
++ goto fail0;
++ if (rndis_set_param_vendor (dev->rndis_config, vendorID,
++ manufacturer))
++ goto fail0;
++ if (rndis_set_param_medium (dev->rndis_config,
++ NDIS_MEDIUM_802_3,
++ 0))
++ goto fail0;
++ INFO (dev, "RNDIS ready\n");
++ }
++#endif
++
++ return status;
++
++fail1:
++ DEBUG (dev, "register_netdev failed, %d\n", status);
++fail:
++ eth_unbind (gadget);
++ return status;
++}
++
++/*-------------------------------------------------------------------------*/
++
++static void
++eth_suspend (struct usb_gadget *gadget)
++{
++ struct eth_dev *dev = get_gadget_data (gadget);
++
++ DEBUG (dev, "suspend\n");
++ dev->suspended = 1;
++}
++
++static void
++eth_resume (struct usb_gadget *gadget)
++{
++ struct eth_dev *dev = get_gadget_data (gadget);
++
++ DEBUG (dev, "resume\n");
++ dev->suspended = 0;
++}
++
++/*-------------------------------------------------------------------------*/
++
++static struct usb_gadget_driver eth_driver = {
++#ifdef CONFIG_USB_GADGET_DUALSPEED
++ .speed = USB_SPEED_HIGH,
++#else
++ .speed = USB_SPEED_FULL,
++#endif
++ .function = (char *) driver_desc,
++ .bind = eth_bind,
++ .unbind = eth_unbind,
++
++ .setup = eth_setup,
++ .disconnect = eth_disconnect,
++
++ /* PO: is this available? */
++ .suspend = eth_suspend,
++ .resume = eth_resume,
++
++ .driver = {
++ .name = (char *) shortname,
++ // .shutdown = ...
++ // .suspend = ...
++ // .resume = ...
++ },
++};
++
++MODULE_DESCRIPTION (DRIVER_DESC);
++MODULE_AUTHOR ("David Brownell, Benedikt Spanger");
++MODULE_LICENSE ("GPL");
++
++
++static int __init init (void)
++{
++ return usb_gadget_register_driver (&eth_driver);
++}
++module_init (init);
++
++static void __exit cleanup (void)
++{
++ usb_gadget_unregister_driver (&eth_driver);
++}
++module_exit (cleanup);
++
+diff -x '*~' -x '.*' -r -N -u /tmp/kernel/drivers/usb/gadget/file_storage.c kernel/drivers/usb/gadget/file_storage.c
+--- /tmp/kernel/drivers/usb/gadget/file_storage.c 1970-01-01 01:00:00.000000000 +0100
++++ kernel/drivers/usb/gadget/file_storage.c 2005-04-22 17:53:19.431540004 +0200
+@@ -0,0 +1,3939 @@
++/*
++ * file_storage.c -- File-backed USB Storage Gadget, for USB development
++ *
++ * Copyright (C) 2003, 2004 Alan Stern
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ * notice, this list of conditions, and the following disclaimer,
++ * without modification.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * 3. The names of the above-listed copyright holders may not be used
++ * to endorse or promote products derived from this software without
++ * specific prior written permission.
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
++ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
++ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
++ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
++ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
++ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
++ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
++ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
++ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
++ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/*
++ * The File-backed Storage Gadget acts as a USB Mass Storage device,
++ * appearing to the host as a disk drive. In addition to providing an
++ * example of a genuinely useful gadget driver for a USB device, it also
++ * illustrates a technique of double-buffering for increased throughput.
++ * Last but not least, it gives an easy way to probe the behavior of the
++ * Mass Storage drivers in a USB host.
++ *
++ * Backing storage is provided by a regular file or a block device, specified
++ * by the "file" module parameter. Access can be limited to read-only by
++ * setting the optional "ro" module parameter.
++ *
++ * The gadget supports the Control-Bulk (CB), Control-Bulk-Interrupt (CBI),
++ * and Bulk-Only (also known as Bulk-Bulk-Bulk or BBB) transports, selected
++ * by the optional "transport" module parameter. It also supports the
++ * following protocols: RBC (0x01), ATAPI or SFF-8020i (0x02), QIC-157 (0c03),
++ * UFI (0x04), SFF-8070i (0x05), and transparent SCSI (0x06), selected by
++ * the optional "protocol" module parameter. For testing purposes the
++ * gadget will indicate that it has removable media if the optional
++ * "removable" module parameter is set. In addition, the default Vendor ID,
++ * Product ID, and release number can be overridden.
++ *
++ * There is support for multiple logical units (LUNs), each of which has
++ * its own backing file. The number of LUNs can be set using the optional
++ * "luns" module parameter (anywhere from 1 to 8), and the corresponding
++ * files are specified using comma-separated lists for "file" and "ro".
++ * The default number of LUNs is taken from the number of "file" elements;
++ * it is 1 if "file" is not given. If "removable" is not set then a backing
++ * file must be specified for each LUN. If it is set, then an unspecified
++ * or empty backing filename means the LUN's medium is not loaded.
++ *
++ * Requirements are modest; only a bulk-in and a bulk-out endpoint are
++ * needed (an interrupt-out endpoint is also needed for CBI). The memory
++ * requirement amounts to two 16K buffers, size configurable by a parameter.
++ * Support is included for both full-speed and high-speed operation.
++ *
++ * Module options:
++ *
++ * file=filename[,filename...]
++ * Required if "removable" is not set, names of
++ * the files or block devices used for
++ * backing storage
++ * ro=b[,b...] Default false, booleans for read-only access
++ * luns=N Default N = number of filenames, number of
++ * LUNs to support
++ * transport=XXX Default BBB, transport name (CB, CBI, or BBB)
++ * protocol=YYY Default SCSI, protocol name (RBC, 8020 or
++ * ATAPI, QIC, UFI, 8070, or SCSI;
++ * also 1 - 6)
++ * removable Default false, boolean for removable media
++ * vendor=0xVVVV Default 0x0525 (NetChip), USB Vendor ID
++ * product=0xPPPP Default 0xa4a5 (FSG), USB Product ID
++ * release=0xRRRR Override the USB release number (bcdDevice)
++ * buflen=N Default N=16384, buffer size used (will be
++ * rounded down to a multiple of
++ * PAGE_CACHE_SIZE)
++ * stall Default determined according to the type of
++ * USB device controller (usually true),
++ * boolean to permit the driver to halt
++ * bulk endpoints
++ *
++ * If CONFIG_USB_FILE_STORAGE_TEST is not set, only the "file" and "ro"
++ * options are available; default values are used for everything else.
++ *
++ * This gadget driver is heavily based on "Gadget Zero" by David Brownell.
++ */
++
++
++/*
++ * Driver Design
++ *
++ * The FSG driver is fairly straightforward. There is a main kernel
++ * thread that handles most of the work. Interrupt routines field
++ * callbacks from the controller driver: bulk- and interrupt-request
++ * completion notifications, endpoint-0 events, and disconnect events.
++ * Completion events are passed to the main thread by wakeup calls. Many
++ * ep0 requests are handled at interrupt time, but SetInterface,
++ * SetConfiguration, and device reset requests are forwarded to the
++ * thread in the form of "exceptions" using SIGUSR1 signals (since they
++ * should interrupt any ongoing file I/O operations).
++ *
++ * The thread's main routine implements the standard command/data/status
++ * parts of a SCSI interaction. It and its subroutines are full of tests
++ * for pending signals/exceptions -- all this polling is necessary since
++ * the kernel has no setjmp/longjmp equivalents. (Maybe this is an
++ * indication that the driver really wants to be running in userspace.)
++ * An important point is that so long as the thread is alive it keeps an
++ * open reference to the backing file. This will prevent unmounting
++ * the backing file's underlying filesystem and could cause problems
++ * during system shutdown, for example. To prevent such problems, the
++ * thread catches INT, TERM, and KILL signals and converts them into
++ * an EXIT exception.
++ *
++ * In normal operation the main thread is started during the gadget's
++ * fsg_bind() callback and stopped during fsg_unbind(). But it can also
++ * exit when it receives a signal, and there's no point leaving the
++ * gadget running when the thread is dead. So just before the thread
++ * exits, it deregisters the gadget driver. This makes things a little
++ * tricky: The driver is deregistered at two places, and the exiting
++ * thread can indirectly call fsg_unbind() which in turn can tell the
++ * thread to exit. The first problem is resolved through the use of the
++ * REGISTERED atomic bitflag; the driver will only be deregistered once.
++ * The second problem is resolved by having fsg_unbind() check
++ * fsg->state; it won't try to stop the thread if the state is already
++ * FSG_STATE_TERMINATED.
++ *
++ * To provide maximum throughput, the driver uses a circular pipeline of
++ * buffer heads (struct fsg_buffhd). In principle the pipeline can be
++ * arbitrarily long; in practice the benefits don't justify having more
++ * than 2 stages (i.e., double buffering). But it helps to think of the
++ * pipeline as being a long one. Each buffer head contains a bulk-in and
++ * a bulk-out request pointer (since the buffer can be used for both
++ * output and input -- directions always are given from the host's
++ * point of view) as well as a pointer to the buffer and various state
++ * variables.
++ *
++ * Use of the pipeline follows a simple protocol. There is a variable
++ * (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
++ * At any time that buffer head may still be in use from an earlier
++ * request, so each buffer head has a state variable indicating whether
++ * it is EMPTY, FULL, or BUSY. Typical use involves waiting for the
++ * buffer head to be EMPTY, filling the buffer either by file I/O or by
++ * USB I/O (during which the buffer head is BUSY), and marking the buffer
++ * head FULL when the I/O is complete. Then the buffer will be emptied
++ * (again possibly by USB I/O, during which it is marked BUSY) and
++ * finally marked EMPTY again (possibly by a completion routine).
++ *
++ * A module parameter tells the driver to avoid stalling the bulk
++ * endpoints wherever the transport specification allows. This is
++ * necessary for some UDCs like the SuperH, which cannot reliably clear a
++ * halt on a bulk endpoint. However, under certain circumstances the
++ * Bulk-only specification requires a stall. In such cases the driver
++ * will halt the endpoint and set a flag indicating that it should clear
++ * the halt in software during the next device reset. Hopefully this
++ * will permit everything to work correctly.
++ *
++ * One subtle point concerns sending status-stage responses for ep0
++ * requests. Some of these requests, such as device reset, can involve
++ * interrupting an ongoing file I/O operation, which might take an
++ * arbitrarily long time. During that delay the host might give up on
++ * the original ep0 request and issue a new one. When that happens the
++ * driver should not notify the host about completion of the original
++ * request, as the host will no longer be waiting for it. So the driver
++ * assigns to each ep0 request a unique tag, and it keeps track of the
++ * tag value of the request associated with a long-running exception
++ * (device-reset, interface-change, or configuration-change). When the
++ * exception handler is finished, the status-stage response is submitted
++ * only if the current ep0 request tag is equal to the exception request
++ * tag. Thus only the most recently received ep0 request will get a
++ * status-stage response.
++ *
++ * Warning: This driver source file is too long. It ought to be split up
++ * into a header file plus about 3 separate .c files, to handle the details
++ * of the Gadget, USB Mass Storage, and SCSI protocols.
++ */
++
++
++#undef DEBUG
++#undef VERBOSE
++#undef DUMP_MSGS
++
++#include <linux/config.h>
++
++#include <asm/system.h>
++#include <asm/uaccess.h>
++
++#include <linux/bitops.h>
++#include <linux/blkdev.h>
++#include <linux/compiler.h>
++#include <linux/completion.h>
++#include <linux/dcache.h>
++#include <linux/fcntl.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/limits.h>
++#include <linux/list.h>
++#include <linux/module.h>
++#include <linux/pagemap.h>
++#include <linux/rwsem.h>
++#include <linux/sched.h>
++#include <linux/signal.h>
++#include <linux/slab.h>
++#include <linux/spinlock.h>
++#include <linux/string.h>
++#include <linux/uts.h>
++#include <linux/version.h>
++#include <linux/wait.h>
++
++#include <linux/usb_ch9.h>
++#include <linux/usb_gadget.h>
++
++#include "gadget_chips.h"
++
++
++/*-------------------------------------------------------------------------*/
++
++#define DRIVER_DESC "File-backed Storage Gadget"
++#define DRIVER_NAME "g_file_storage"
++#define DRIVER_VERSION "05 June 2004"
++
++static const char longname[] = DRIVER_DESC;
++static const char shortname[] = DRIVER_NAME;
++
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_AUTHOR("Alan Stern");
++MODULE_LICENSE("Dual BSD/GPL");
++
++/* Thanks to NetChip Technologies for donating this product ID.
++ *
++ * DO NOT REUSE THESE IDs with any other driver!! Ever!!
++ * Instead: allocate your own, using normal USB-IF procedures. */
++#define DRIVER_VENDOR_ID 0x0525 // NetChip
++#define DRIVER_PRODUCT_ID 0xa4a5 // Linux-USB File-backed Storage Gadget
++
++
++/*
++ * This driver assumes self-powered hardware and has no way for users to
++ * trigger remote wakeup. It uses autoconfiguration to select endpoints
++ * and endpoint addresses.
++ */
++
++
++/*-------------------------------------------------------------------------*/
++
++#define fakedev_printk(level, dev, format, args...) \
++ printk(level "%s %s: " format , DRIVER_NAME , (dev)->name , ## args)
++
++#define xprintk(f,level,fmt,args...) \
++ fakedev_printk(level , (f)->gadget , fmt , ## args)
++#define yprintk(l,level,fmt,args...) \
++ fakedev_printk(level , &(l)->dev , fmt , ## args)
++
++#ifdef DEBUG
++#define DBG(fsg,fmt,args...) \
++ xprintk(fsg , KERN_DEBUG , fmt , ## args)
++#define LDBG(lun,fmt,args...) \
++ yprintk(lun , KERN_DEBUG , fmt , ## args)
++#define MDBG(fmt,args...) \
++ printk(KERN_DEBUG DRIVER_NAME ": " fmt , ## args)
++#else
++#define DBG(fsg,fmt,args...) \
++ do { } while (0)
++#define LDBG(lun,fmt,args...) \
++ do { } while (0)
++#define MDBG(fmt,args...) \
++ do { } while (0)
++#undef VERBOSE
++#undef DUMP_MSGS
++#endif /* DEBUG */
++
++#ifdef VERBOSE
++#define VDBG DBG
++#define VLDBG LDBG
++#else
++#define VDBG(fsg,fmt,args...) \
++ do { } while (0)
++#define VLDBG(lun,fmt,args...) \
++ do { } while (0)
++#endif /* VERBOSE */
++
++#define ERROR(fsg,fmt,args...) \
++ xprintk(fsg , KERN_ERR , fmt , ## args)
++#define LERROR(lun,fmt,args...) \
++ yprintk(lun , KERN_ERR , fmt , ## args)
++
++#define WARN(fsg,fmt,args...) \
++ xprintk(fsg , KERN_WARNING , fmt , ## args)
++#define LWARN(lun,fmt,args...) \
++ yprintk(lun , KERN_WARNING , fmt , ## args)
++
++#define INFO(fsg,fmt,args...) \
++ xprintk(fsg , KERN_INFO , fmt , ## args)
++#define LINFO(lun,fmt,args...) \
++ yprintk(lun , KERN_INFO , fmt , ## args)
++
++#define MINFO(fmt,args...) \
++ printk(KERN_INFO DRIVER_NAME ": " fmt , ## args)
++
++
++/*-------------------------------------------------------------------------*/
++
++/* Encapsulate the module parameter settings */
++
++#define MAX_LUNS 8
++
++static char *file[MAX_LUNS] = {NULL, };
++static int ro[MAX_LUNS] = {0, };
++static unsigned int luns = 0;
++ // Default values
++static char *transport = "BBB";
++static char *protocol = "SCSI";
++static int removable = 0;
++static unsigned short vendor = DRIVER_VENDOR_ID;
++static unsigned short product = DRIVER_PRODUCT_ID;
++static unsigned short release = 0xffff; // Use controller chip type
++static unsigned int buflen = 16384;
++static int stall = 1;
++
++static struct {
++ unsigned int nluns;
++
++ char *transport_parm;
++ char *protocol_parm;
++ int removable;
++ unsigned short vendor;
++ unsigned short product;
++ unsigned short release;
++ unsigned int buflen;
++ int can_stall;
++
++ int transport_type;
++ char *transport_name;
++ int protocol_type;
++ char *protocol_name;
++
++} mod_data;
++
++
++MODULE_PARM(file, "1-8s");
++MODULE_PARM_DESC(file, "names of backing files or devices");
++
++MODULE_PARM(ro, "1-8b");
++MODULE_PARM_DESC(ro, "true to force read-only");
++
++
++/* In the non-TEST version, only the file and ro module parameters
++ * are available. */
++#ifdef CONFIG_USB_FILE_STORAGE_TEST
++
++MODULE_PARM(luns, "i");
++MODULE_PARM_DESC(luns, "number of LUNs");
++
++MODULE_PARM(transport, "s");
++MODULE_PARM_DESC(transport, "type of transport (BBB, CBI, or CB)");
++
++MODULE_PARM(protocol, "s");
++MODULE_PARM_DESC(protocol, "type of protocol (RBC, 8020, QIC, UFI, "
++ "8070, or SCSI)");
++
++MODULE_PARM(removable, "b");
++MODULE_PARM_DESC(removable, "true to simulate removable media");
++
++MODULE_PARM(vendor, "h");
++MODULE_PARM_DESC(vendor, "USB Vendor ID");
++
++MODULE_PARM(product, "h");
++MODULE_PARM_DESC(product, "USB Product ID");
++
++MODULE_PARM(release, "h");
++MODULE_PARM_DESC(release, "USB release number");
++
++MODULE_PARM(buflen, "i");
++MODULE_PARM_DESC(buflen, "I/O buffer size");
++
++MODULE_PARM(stall, "i");
++MODULE_PARM_DESC(stall, "false to prevent bulk stalls");
++
++#endif /* CONFIG_USB_FILE_STORAGE_TEST */
++
++
++/*-------------------------------------------------------------------------*/
++
++/* USB protocol value = the transport method */
++#define USB_PR_CBI 0x00 // Control/Bulk/Interrupt
++#define USB_PR_CB 0x01 // Control/Bulk w/o interrupt
++#define USB_PR_BULK 0x50 // Bulk-only
++
++/* USB subclass value = the protocol encapsulation */
++#define USB_SC_RBC 0x01 // Reduced Block Commands (flash)
++#define USB_SC_8020 0x02 // SFF-8020i, MMC-2, ATAPI (CD-ROM)
++#define USB_SC_QIC 0x03 // QIC-157 (tape)
++#define USB_SC_UFI 0x04 // UFI (floppy)
++#define USB_SC_8070 0x05 // SFF-8070i (removable)
++#define USB_SC_SCSI 0x06 // Transparent SCSI
++
++/* Bulk-only data structures */
++
++/* Command Block Wrapper */
++struct bulk_cb_wrap {
++ u32 Signature; // Contains 'USBC'
++ u32 Tag; // Unique per command id
++ u32 DataTransferLength; // Size of the data
++ u8 Flags; // Direction in bit 7
++ u8 Lun; // LUN (normally 0)
++ u8 Length; // Of the CDB, <= MAX_COMMAND_SIZE
++ u8 CDB[16]; // Command Data Block
++};
++
++#define USB_BULK_CB_WRAP_LEN 31
++#define USB_BULK_CB_SIG 0x43425355 // Spells out USBC
++#define USB_BULK_IN_FLAG 0x80
++
++/* Command Status Wrapper */
++struct bulk_cs_wrap {
++ u32 Signature; // Should = 'USBS'
++ u32 Tag; // Same as original command
++ u32 Residue; // Amount not transferred
++ u8 Status; // See below
++};
++
++#define USB_BULK_CS_WRAP_LEN 13
++#define USB_BULK_CS_SIG 0x53425355 // Spells out 'USBS'
++#define USB_STATUS_PASS 0
++#define USB_STATUS_FAIL 1
++#define USB_STATUS_PHASE_ERROR 2
++
++/* Bulk-only class specific requests */
++#define USB_BULK_RESET_REQUEST 0xff
++#define USB_BULK_GET_MAX_LUN_REQUEST 0xfe
++
++
++/* CBI Interrupt data structure */
++struct interrupt_data {
++ u8 bType;
++ u8 bValue;
++};
++
++#define CBI_INTERRUPT_DATA_LEN 2
++
++/* CBI Accept Device-Specific Command request */
++#define USB_CBI_ADSC_REQUEST 0x00
++
++
++#define MAX_COMMAND_SIZE 16 // Length of a SCSI Command Data Block
++
++/* SCSI commands that we recognize */
++#define SC_FORMAT_UNIT 0x04
++#define SC_INQUIRY 0x12
++#define SC_MODE_SELECT_6 0x15
++#define SC_MODE_SELECT_10 0x55
++#define SC_MODE_SENSE_6 0x1a
++#define SC_MODE_SENSE_10 0x5a
++#define SC_PREVENT_ALLOW_MEDIUM_REMOVAL 0x1e
++#define SC_READ_6 0x08
++#define SC_READ_10 0x28
++#define SC_READ_12 0xa8
++#define SC_READ_CAPACITY 0x25
++#define SC_READ_FORMAT_CAPACITIES 0x23
++#define SC_RELEASE 0x17
++#define SC_REQUEST_SENSE 0x03
++#define SC_RESERVE 0x16
++#define SC_SEND_DIAGNOSTIC 0x1d
++#define SC_START_STOP_UNIT 0x1b
++#define SC_SYNCHRONIZE_CACHE 0x35
++#define SC_TEST_UNIT_READY 0x00
++#define SC_VERIFY 0x2f
++#define SC_WRITE_6 0x0a
++#define SC_WRITE_10 0x2a
++#define SC_WRITE_12 0xaa
++
++/* SCSI Sense Key/Additional Sense Code/ASC Qualifier values */
++#define SS_NO_SENSE 0
++#define SS_COMMUNICATION_FAILURE 0x040800
++#define SS_INVALID_COMMAND 0x052000
++#define SS_INVALID_FIELD_IN_CDB 0x052400
++#define SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE 0x052100
++#define SS_LOGICAL_UNIT_NOT_SUPPORTED 0x052500
++#define SS_MEDIUM_NOT_PRESENT 0x023a00
++#define SS_MEDIUM_REMOVAL_PREVENTED 0x055302
++#define SS_NOT_READY_TO_READY_TRANSITION 0x062800
++#define SS_RESET_OCCURRED 0x062900
++#define SS_SAVING_PARAMETERS_NOT_SUPPORTED 0x053900
++#define SS_UNRECOVERED_READ_ERROR 0x031100
++#define SS_WRITE_ERROR 0x030c02
++#define SS_WRITE_PROTECTED 0x072700
++
++#define SK(x) ((u8) ((x) >> 16)) // Sense Key byte, etc.
++#define ASC(x) ((u8) ((x) >> 8))
++#define ASCQ(x) ((u8) (x))
++
++
++/*-------------------------------------------------------------------------*/
++
++/*
++ * These definitions will permit the compiler to avoid generating code for
++ * parts of the driver that aren't used in the non-TEST version. Even gcc
++ * can recognize when a test of a constant expression yields a dead code
++ * path.
++ *
++ * Also, in the non-TEST version, open_backing_file() is only used during
++ * initialization and the sysfs attribute store_xxx routines aren't used
++ * at all. We will define NORMALLY_INIT to mark them as __init so they
++ * don't occupy kernel code space unnecessarily.
++ */
++
++#ifdef CONFIG_USB_FILE_STORAGE_TEST
++
++#define transport_is_bbb() (mod_data.transport_type == USB_PR_BULK)
++#define transport_is_cbi() (mod_data.transport_type == USB_PR_CBI)
++#define protocol_is_scsi() (mod_data.protocol_type == USB_SC_SCSI)
++#define backing_file_is_open(curlun) ((curlun)->filp != NULL)
++#define NORMALLY_INIT
++
++#else
++
++#define transport_is_bbb() 1
++#define transport_is_cbi() 0
++#define protocol_is_scsi() 1
++#define backing_file_is_open(curlun) 1
++#define NORMALLY_INIT __init
++
++#endif /* CONFIG_USB_FILE_STORAGE_TEST */
++
++
++struct lun {
++ struct file *filp;
++ loff_t file_length;
++ loff_t num_sectors;
++
++ unsigned int ro : 1;
++ unsigned int prevent_medium_removal : 1;
++ unsigned int registered : 1;
++
++ u32 sense_data;
++ u32 sense_data_info;
++ u32 unit_attention_data;
++
++#define BUS_ID_SIZE 20
++ struct __lun_device {
++ char name[BUS_ID_SIZE];
++ void *driver_data;
++ } dev;
++};
++
++
++/* Big enough to hold our biggest descriptor */
++#define EP0_BUFSIZE 256
++#define DELAYED_STATUS (EP0_BUFSIZE + 999) // An impossibly large value
++
++/* Number of buffers we will use. 2 is enough for double-buffering */
++#define NUM_BUFFERS 2
++
++enum fsg_buffer_state {
++ BUF_STATE_EMPTY = 0,
++ BUF_STATE_FULL,
++ BUF_STATE_BUSY
++};
++
++struct fsg_buffhd {
++ void *buf;
++ dma_addr_t dma;
++ volatile enum fsg_buffer_state state;
++ struct fsg_buffhd *next;
++
++ /* The NetChip 2280 is faster, and handles some protocol faults
++ * better, if we don't submit any short bulk-out read requests.
++ * So we will record the intended request length here. */
++ unsigned int bulk_out_intended_length;
++
++ struct usb_request *inreq;
++ volatile int inreq_busy;
++ struct usb_request *outreq;
++ volatile int outreq_busy;
++};
++
++enum fsg_state {
++ FSG_STATE_COMMAND_PHASE = -10, // This one isn't used anywhere
++ FSG_STATE_DATA_PHASE,
++ FSG_STATE_STATUS_PHASE,
++
++ FSG_STATE_IDLE = 0,
++ FSG_STATE_ABORT_BULK_OUT,
++ FSG_STATE_RESET,
++ FSG_STATE_INTERFACE_CHANGE,
++ FSG_STATE_CONFIG_CHANGE,
++ FSG_STATE_DISCONNECT,
++ FSG_STATE_EXIT,
++ FSG_STATE_TERMINATED
++};
++
++enum data_direction {
++ DATA_DIR_UNKNOWN = 0,
++ DATA_DIR_FROM_HOST,
++ DATA_DIR_TO_HOST,
++ DATA_DIR_NONE
++};
++
++struct fsg_dev {
++ /* lock protects: state, all the req_busy's, and cbbuf_cmnd */
++ spinlock_t lock;
++ struct usb_gadget *gadget;
++
++ /* filesem protects: backing files in use */
++ struct rw_semaphore filesem;
++
++ struct usb_ep *ep0; // Handy copy of gadget->ep0
++ struct usb_request *ep0req; // For control responses
++ volatile unsigned int ep0_req_tag;
++ const char *ep0req_name;
++
++ struct usb_request *intreq; // For interrupt responses
++ volatile int intreq_busy;
++ struct fsg_buffhd *intr_buffhd;
++
++ unsigned int bulk_out_maxpacket;
++ enum fsg_state state; // For exception handling
++ unsigned int exception_req_tag;
++
++ u8 config, new_config;
++
++ unsigned int running : 1;
++ unsigned int bulk_in_enabled : 1;
++ unsigned int bulk_out_enabled : 1;
++ unsigned int intr_in_enabled : 1;
++ unsigned int phase_error : 1;
++ unsigned int short_packet_received : 1;
++ unsigned int bad_lun_okay : 1;
++
++ unsigned long atomic_bitflags;
++#define REGISTERED 0
++#define CLEAR_BULK_HALTS 1
++
++ struct usb_ep *bulk_in;
++ struct usb_ep *bulk_out;
++ struct usb_ep *intr_in;
++
++ struct fsg_buffhd *next_buffhd_to_fill;
++ struct fsg_buffhd *next_buffhd_to_drain;
++ struct fsg_buffhd buffhds[NUM_BUFFERS];
++
++ wait_queue_head_t thread_wqh;
++ int thread_wakeup_needed;
++ struct completion thread_notifier;
++ int thread_pid;
++ struct task_struct *thread_task;
++ sigset_t thread_signal_mask;
++
++ int cmnd_size;
++ u8 cmnd[MAX_COMMAND_SIZE];
++ enum data_direction data_dir;
++ u32 data_size;
++ u32 data_size_from_cmnd;
++ u32 tag;
++ unsigned int lun;
++ u32 residue;
++ u32 usb_amount_left;
++
++ /* The CB protocol offers no way for a host to know when a command
++ * has completed. As a result the next command may arrive early,
++ * and we will still have to handle it. For that reason we need
++ * a buffer to store new commands when using CB (or CBI, which
++ * does not oblige a host to wait for command completion either). */
++ int cbbuf_cmnd_size;
++ u8 cbbuf_cmnd[MAX_COMMAND_SIZE];
++
++ unsigned int nluns;
++ struct lun *luns;
++ struct lun *curlun;
++};
++
++typedef void (*fsg_routine_t)(struct fsg_dev *);
++
++static int inline exception_in_progress(struct fsg_dev *fsg)
++{
++ return (fsg->state > FSG_STATE_IDLE);
++}
++
++/* Make bulk-out requests be divisible by the maxpacket size */
++static void inline set_bulk_out_req_length(struct fsg_dev *fsg,
++ struct fsg_buffhd *bh, unsigned int length)
++{
++ unsigned int rem;
++
++ bh->bulk_out_intended_length = length;
++ rem = length % fsg->bulk_out_maxpacket;
++ if (rem > 0)
++ length += fsg->bulk_out_maxpacket - rem;
++ bh->outreq->length = length;
++}
++
++static struct fsg_dev *the_fsg;
++static struct usb_gadget_driver fsg_driver;
++
++static void close_backing_file(struct lun *curlun);
++static void close_all_backing_files(struct fsg_dev *fsg);
++
++
++/*-------------------------------------------------------------------------*/
++
++#ifdef DUMP_MSGS
++
++static void dump_msg(struct fsg_dev *fsg, const char *label,
++ const u8 *buf, unsigned int length)
++{
++ unsigned int start, num, i;
++ char line[52], *p;
++
++ if (length >= 512)
++ return;
++ DBG(fsg, "%s, length %u:\n", label, length);
++
++ start = 0;
++ while (length > 0) {
++ num = min(length, 16u);
++ p = line;
++ for (i = 0; i < num; ++i) {
++ if (i == 8)
++ *p++ = ' ';
++ sprintf(p, " %02x", buf[i]);
++ p += 3;
++ }
++ *p = 0;
++ printk(KERN_DEBUG "%6x: %s\n", start, line);
++ buf += num;
++ start += num;
++ length -= num;
++ }
++}
++
++static void inline dump_cdb(struct fsg_dev *fsg)
++{}
++
++#else
++
++static void inline dump_msg(struct fsg_dev *fsg, const char *label,
++ const u8 *buf, unsigned int length)
++{}
++
++static void inline dump_cdb(struct fsg_dev *fsg)
++{
++ int i;
++ char cmdbuf[3*MAX_COMMAND_SIZE + 1];
++
++ for (i = 0; i < fsg->cmnd_size; ++i)
++ sprintf(cmdbuf + i*3, " %02x", fsg->cmnd[i]);
++ VDBG(fsg, "SCSI CDB: %s\n", cmdbuf);
++}
++
++#endif /* DUMP_MSGS */
++
++
++static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
++{
++ const char *name;
++
++ if (ep == fsg->bulk_in)
++ name = "bulk-in";
++ else if (ep == fsg->bulk_out)
++ name = "bulk-out";
++ else
++ name = ep->name;
++ DBG(fsg, "%s set halt\n", name);
++ return usb_ep_set_halt(ep);
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++/* Routines for unaligned data access */
++
++static u16 inline get_be16(u8 *buf)
++{
++ return ((u16) buf[0] << 8) | ((u16) buf[1]);
++}
++
++static u32 inline get_be32(u8 *buf)
++{
++ return ((u32) buf[0] << 24) | ((u32) buf[1] << 16) |
++ ((u32) buf[2] << 8) | ((u32) buf[3]);
++}
++
++static void inline put_be16(u8 *buf, u16 val)
++{
++ buf[0] = val >> 8;
++ buf[1] = val;
++}
++
++static void inline put_be32(u8 *buf, u32 val)
++{
++ buf[0] = val >> 24;
++ buf[1] = val >> 16;
++ buf[2] = val >> 8;
++ buf[3] = val;
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++/*
++ * DESCRIPTORS ... most are static, but strings and (full) configuration
++ * descriptors are built on demand. Also the (static) config and interface
++ * descriptors are adjusted during fsg_bind().
++ */
++#define STRING_MANUFACTURER 1
++#define STRING_PRODUCT 2
++#define STRING_SERIAL 3
++
++/* There is only one configuration. */
++#define CONFIG_VALUE 1
++
++static struct usb_device_descriptor
++device_desc = {
++ .bLength = sizeof device_desc,
++ .bDescriptorType = USB_DT_DEVICE,
++
++ .bcdUSB = __constant_cpu_to_le16(0x0200),
++ .bDeviceClass = USB_CLASS_PER_INTERFACE,
++
++ /* The next three values can be overridden by module parameters */
++ .idVendor = __constant_cpu_to_le16(DRIVER_VENDOR_ID),
++ .idProduct = __constant_cpu_to_le16(DRIVER_PRODUCT_ID),
++ .bcdDevice = __constant_cpu_to_le16(0xffff),
++
++ .iManufacturer = STRING_MANUFACTURER,
++ .iProduct = STRING_PRODUCT,
++ .iSerialNumber = STRING_SERIAL,
++ .bNumConfigurations = 1,
++};
++
++static struct usb_config_descriptor
++config_desc = {
++ .bLength = sizeof config_desc,
++ .bDescriptorType = USB_DT_CONFIG,
++
++ /* wTotalLength computed by usb_gadget_config_buf() */
++ .bNumInterfaces = 1,
++ .bConfigurationValue = CONFIG_VALUE,
++ .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
++ .bMaxPower = 1, // self-powered
++};
++
++/* There is only one interface. */
++
++static struct usb_interface_descriptor
++intf_desc = {
++ .bLength = sizeof intf_desc,
++ .bDescriptorType = USB_DT_INTERFACE,
++
++ .bNumEndpoints = 2, // Adjusted during fsg_bind()
++ .bInterfaceClass = USB_CLASS_MASS_STORAGE,
++ .bInterfaceSubClass = USB_SC_SCSI, // Adjusted during fsg_bind()
++ .bInterfaceProtocol = USB_PR_BULK, // Adjusted during fsg_bind()
++};
++
++/* Three full-speed endpoint descriptors: bulk-in, bulk-out,
++ * and interrupt-in. */
++
++static struct usb_endpoint_descriptor
++fs_bulk_in_desc = {
++ .bLength = USB_DT_ENDPOINT_SIZE,
++ .bDescriptorType = USB_DT_ENDPOINT,
++
++ .bEndpointAddress = USB_DIR_IN,
++ .bmAttributes = USB_ENDPOINT_XFER_BULK,
++ /* wMaxPacketSize set by autoconfiguration */
++};
++
++static struct usb_endpoint_descriptor
++fs_bulk_out_desc = {
++ .bLength = USB_DT_ENDPOINT_SIZE,
++ .bDescriptorType = USB_DT_ENDPOINT,
++
++ .bEndpointAddress = USB_DIR_OUT,
++ .bmAttributes = USB_ENDPOINT_XFER_BULK,
++ /* wMaxPacketSize set by autoconfiguration */
++};
++
++static struct usb_endpoint_descriptor
++fs_intr_in_desc = {
++ .bLength = USB_DT_ENDPOINT_SIZE,
++ .bDescriptorType = USB_DT_ENDPOINT,
++
++ .bEndpointAddress = USB_DIR_IN,
++ .bmAttributes = USB_ENDPOINT_XFER_INT,
++ .wMaxPacketSize = __constant_cpu_to_le16(2),
++ .bInterval = 32, // frames -> 32 ms
++};
++
++static const struct usb_descriptor_header *fs_function[] = {
++ (struct usb_descriptor_header *) &intf_desc,
++ (struct usb_descriptor_header *) &fs_bulk_in_desc,
++ (struct usb_descriptor_header *) &fs_bulk_out_desc,
++ (struct usb_descriptor_header *) &fs_intr_in_desc,
++ NULL,
++};
++
++
++#ifdef CONFIG_USB_GADGET_DUALSPEED
++
++/*
++ * USB 2.0 devices need to expose both high speed and full speed
++ * descriptors, unless they only run at full speed.
++ *
++ * That means alternate endpoint descriptors (bigger packets)
++ * and a "device qualifier" ... plus more construction options
++ * for the config descriptor.
++ */
++static struct usb_qualifier_descriptor
++dev_qualifier = {
++ .bLength = sizeof dev_qualifier,
++ .bDescriptorType = USB_DT_DEVICE_QUALIFIER,
++
++ .bcdUSB = __constant_cpu_to_le16(0x0200),
++ .bDeviceClass = USB_CLASS_PER_INTERFACE,
++
++ .bNumConfigurations = 1,
++};
++
++static struct usb_endpoint_descriptor
++hs_bulk_in_desc = {
++ .bLength = USB_DT_ENDPOINT_SIZE,
++ .bDescriptorType = USB_DT_ENDPOINT,
++
++ /* bEndpointAddress copied from fs_bulk_in_desc during fsg_bind() */
++ .bmAttributes = USB_ENDPOINT_XFER_BULK,
++ .wMaxPacketSize = __constant_cpu_to_le16(512),
++};
++
++static struct usb_endpoint_descriptor
++hs_bulk_out_desc = {
++ .bLength = USB_DT_ENDPOINT_SIZE,
++ .bDescriptorType = USB_DT_ENDPOINT,
++
++ /* bEndpointAddress copied from fs_bulk_out_desc during fsg_bind() */
++ .bmAttributes = USB_ENDPOINT_XFER_BULK,
++ .wMaxPacketSize = __constant_cpu_to_le16(512),
++ .bInterval = 1, // NAK every 1 uframe
++};
++
++static struct usb_endpoint_descriptor
++hs_intr_in_desc = {
++ .bLength = USB_DT_ENDPOINT_SIZE,
++ .bDescriptorType = USB_DT_ENDPOINT,
++
++ /* bEndpointAddress copied from fs_intr_in_desc during fsg_bind() */
++ .bmAttributes = USB_ENDPOINT_XFER_INT,
++ .wMaxPacketSize = __constant_cpu_to_le16(2),
++ .bInterval = 9, // 2**(9-1) = 256 uframes -> 32 ms
++};
++
++static const struct usb_descriptor_header *hs_function[] = {
++ (struct usb_descriptor_header *) &intf_desc,
++ (struct usb_descriptor_header *) &hs_bulk_in_desc,
++ (struct usb_descriptor_header *) &hs_bulk_out_desc,
++ (struct usb_descriptor_header *) &hs_intr_in_desc,
++ NULL,
++};
++
++/* Maxpacket and other transfer characteristics vary by speed. */
++#define ep_desc(g,fs,hs) (((g)->speed==USB_SPEED_HIGH) ? (hs) : (fs))
++
++#else
++
++/* If there's no high speed support, always use the full-speed descriptor. */
++#define ep_desc(g,fs,hs) fs
++
++#endif /* !CONFIG_USB_GADGET_DUALSPEED */
++
++
++/* The CBI specification limits the serial string to 12 uppercase hexadecimal
++ * characters. */
++static char manufacturer[40];
++static char serial[13];
++
++/* Static strings, in UTF-8 (for simplicity we use only ASCII characters) */
++static struct usb_string strings[] = {
++ {STRING_MANUFACTURER, manufacturer},
++ {STRING_PRODUCT, longname},
++ {STRING_SERIAL, serial},
++ {}
++};
++
++static struct usb_gadget_strings stringtab = {
++ .language = 0x0409, // en-us
++ .strings = strings,
++};
++
++
++/*
++ * Config descriptors must agree with the code that sets configurations
++ * and with code managing interfaces and their altsettings. They must
++ * also handle different speeds and other-speed requests.
++ */
++static int populate_config_buf(enum usb_device_speed speed,
++ u8 *buf, u8 type, unsigned index)
++{
++ int len;
++ const struct usb_descriptor_header **function;
++
++ if (index > 0)
++ return -EINVAL;
++
++#ifdef CONFIG_USB_GADGET_DUALSPEED
++ if (type == USB_DT_OTHER_SPEED_CONFIG)
++ speed = (USB_SPEED_FULL + USB_SPEED_HIGH) - speed;
++ if (speed == USB_SPEED_HIGH)
++ function = hs_function;
++ else
++#endif
++ function = fs_function;
++
++ len = usb_gadget_config_buf(&config_desc, buf, EP0_BUFSIZE, function);
++ if (len < 0)
++ return len;
++ ((struct usb_config_descriptor *) buf)->bDescriptorType = type;
++ return len;
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++/* These routines may be called in process context or in_irq */
++
++static void wakeup_thread(struct fsg_dev *fsg)
++{
++ /* Tell the main thread that something has happened */
++ fsg->thread_wakeup_needed = 1;
++ wake_up_all(&fsg->thread_wqh);
++}
++
++
++static void raise_exception(struct fsg_dev *fsg, enum fsg_state new_state)
++{
++ unsigned long flags;
++ struct task_struct *thread_task;
++
++ /* Do nothing if a higher-priority exception is already in progress.
++ * If a lower-or-equal priority exception is in progress, preempt it
++ * and notify the main thread by sending it a signal. */
++ spin_lock_irqsave(&fsg->lock, flags);
++ if (fsg->state <= new_state) {
++ fsg->exception_req_tag = fsg->ep0_req_tag;
++ fsg->state = new_state;
++ thread_task = fsg->thread_task;
++ if (thread_task)
++ send_sig_info(SIGUSR1, (void *) 1L, thread_task);
++ }
++ spin_unlock_irqrestore(&fsg->lock, flags);
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++/* The disconnect callback and ep0 routines. These always run in_irq,
++ * except that ep0_queue() is called in the main thread to acknowledge
++ * completion of various requests: set config, set interface, and
++ * Bulk-only device reset. */
++
++static void fsg_disconnect(struct usb_gadget *gadget)
++{
++ struct fsg_dev *fsg = get_gadget_data(gadget);
++
++ DBG(fsg, "disconnect or port reset\n");
++ raise_exception(fsg, FSG_STATE_DISCONNECT);
++}
++
++
++static int ep0_queue(struct fsg_dev *fsg)
++{
++ int rc;
++
++ rc = usb_ep_queue(fsg->ep0, fsg->ep0req, GFP_ATOMIC);
++ if (rc != 0 && rc != -ESHUTDOWN) {
++
++ /* We can't do much more than wait for a reset */
++ WARN(fsg, "error in submission: %s --> %d\n",
++ fsg->ep0->name, rc);
++ }
++ return rc;
++}
++
++static void ep0_complete(struct usb_ep *ep, struct usb_request *req)
++{
++ struct fsg_dev *fsg = (struct fsg_dev *) ep->driver_data;
++
++ if (req->actual > 0)
++ dump_msg(fsg, fsg->ep0req_name, req->buf, req->actual);
++ if (req->status || req->actual != req->length)
++ DBG(fsg, "%s --> %d, %u/%u\n", __FUNCTION__,
++ req->status, req->actual, req->length);
++ if (req->status == -ECONNRESET) // Request was cancelled
++ usb_ep_fifo_flush(ep);
++
++ if (req->status == 0 && req->context)
++ ((fsg_routine_t) (req->context))(fsg);
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++/* Bulk and interrupt endpoint completion handlers.
++ * These always run in_irq. */
++
++static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
++{
++ struct fsg_dev *fsg = (struct fsg_dev *) ep->driver_data;
++ struct fsg_buffhd *bh = (struct fsg_buffhd *) req->context;
++
++ if (req->status || req->actual != req->length)
++ DBG(fsg, "%s --> %d, %u/%u\n", __FUNCTION__,
++ req->status, req->actual, req->length);
++ if (req->status == -ECONNRESET) // Request was cancelled
++ usb_ep_fifo_flush(ep);
++
++ /* Hold the lock while we update the request and buffer states */
++ spin_lock(&fsg->lock);
++ bh->inreq_busy = 0;
++ bh->state = BUF_STATE_EMPTY;
++ spin_unlock(&fsg->lock);
++ wakeup_thread(fsg);
++}
++
++static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
++{
++ struct fsg_dev *fsg = (struct fsg_dev *) ep->driver_data;
++ struct fsg_buffhd *bh = (struct fsg_buffhd *) req->context;
++
++ dump_msg(fsg, "bulk-out", req->buf, req->actual);
++ if (req->status || req->actual != bh->bulk_out_intended_length)
++ DBG(fsg, "%s --> %d, %u/%u\n", __FUNCTION__,
++ req->status, req->actual,
++ bh->bulk_out_intended_length);
++ if (req->status == -ECONNRESET) // Request was cancelled
++ usb_ep_fifo_flush(ep);
++
++ /* Hold the lock while we update the request and buffer states */
++ spin_lock(&fsg->lock);
++ bh->outreq_busy = 0;
++ bh->state = BUF_STATE_FULL;
++ spin_unlock(&fsg->lock);
++ wakeup_thread(fsg);
++}
++
++static void intr_in_complete(struct usb_ep *ep, struct usb_request *req)
++{
++#ifdef CONFIG_USB_FILE_STORAGE_TEST
++ struct fsg_dev *fsg = (struct fsg_dev *) ep->driver_data;
++ struct fsg_buffhd *bh = (struct fsg_buffhd *) req->context;
++
++ if (req->status || req->actual != req->length)
++ DBG(fsg, "%s --> %d, %u/%u\n", __FUNCTION__,
++ req->status, req->actual, req->length);
++ if (req->status == -ECONNRESET) // Request was cancelled
++ usb_ep_fifo_flush(ep);
++
++ /* Hold the lock while we update the request and buffer states */
++ spin_lock(&fsg->lock);
++ fsg->intreq_busy = 0;
++ bh->state = BUF_STATE_EMPTY;
++ spin_unlock(&fsg->lock);
++ wakeup_thread(fsg);
++#endif /* CONFIG_USB_FILE_STORAGE_TEST */
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++/* Ep0 class-specific handlers. These always run in_irq. */
++
++static void received_cbi_adsc(struct fsg_dev *fsg, struct fsg_buffhd *bh)
++{
++#ifdef CONFIG_USB_FILE_STORAGE_TEST
++ struct usb_request *req = fsg->ep0req;
++ static u8 cbi_reset_cmnd[6] = {
++ SC_SEND_DIAGNOSTIC, 4, 0xff, 0xff, 0xff, 0xff};
++
++ /* Error in command transfer? */
++ if (req->status || req->length != req->actual ||
++ req->actual < 6 || req->actual > MAX_COMMAND_SIZE) {
++
++ /* Not all controllers allow a protocol stall after
++ * receiving control-out data, but we'll try anyway. */
++ fsg_set_halt(fsg, fsg->ep0);
++ return; // Wait for reset
++ }
++
++ /* Is it the special reset command? */
++ if (req->actual >= sizeof cbi_reset_cmnd &&
++ memcmp(req->buf, cbi_reset_cmnd,
++ sizeof cbi_reset_cmnd) == 0) {
++
++ /* Raise an exception to stop the current operation
++ * and reinitialize our state. */
++ DBG(fsg, "cbi reset request\n");
++ raise_exception(fsg, FSG_STATE_RESET);
++ return;
++ }
++
++ VDBG(fsg, "CB[I] accept device-specific command\n");
++ spin_lock(&fsg->lock);
++
++ /* Save the command for later */
++ if (fsg->cbbuf_cmnd_size)
++ WARN(fsg, "CB[I] overwriting previous command\n");
++ fsg->cbbuf_cmnd_size = req->actual;
++ memcpy(fsg->cbbuf_cmnd, req->buf, fsg->cbbuf_cmnd_size);
++
++ spin_unlock(&fsg->lock);
++ wakeup_thread(fsg);
++#endif /* CONFIG_USB_FILE_STORAGE_TEST */
++}
++
++
++static int class_setup_req(struct fsg_dev *fsg,
++ const struct usb_ctrlrequest *ctrl)
++{
++ struct usb_request *req = fsg->ep0req;
++ int value = -EOPNOTSUPP;
++
++ if (!fsg->config)
++ return value;
++
++ /* Handle Bulk-only class-specific requests */
++ if (transport_is_bbb()) {
++ switch (ctrl->bRequest) {
++
++ case USB_BULK_RESET_REQUEST:
++ if (ctrl->bRequestType != (USB_DIR_OUT |
++ USB_TYPE_CLASS | USB_RECIP_INTERFACE))
++ break;
++ if (ctrl->wIndex != 0) {
++ value = -EDOM;
++ break;
++ }
++
++ /* Raise an exception to stop the current operation
++ * and reinitialize our state. */
++ DBG(fsg, "bulk reset request\n");
++ raise_exception(fsg, FSG_STATE_RESET);
++ value = DELAYED_STATUS;
++ break;
++
++ case USB_BULK_GET_MAX_LUN_REQUEST:
++ if (ctrl->bRequestType != (USB_DIR_IN |
++ USB_TYPE_CLASS | USB_RECIP_INTERFACE))
++ break;
++ if (ctrl->wIndex != 0) {
++ value = -EDOM;
++ break;
++ }
++ VDBG(fsg, "get max LUN\n");
++ *(u8 *) req->buf = fsg->nluns - 1;
++ value = min(ctrl->wLength, (u16) 1);
++ break;
++ }
++ }
++
++ /* Handle CBI class-specific requests */
++ else {
++ switch (ctrl->bRequest) {
++
++ case USB_CBI_ADSC_REQUEST:
++ if (ctrl->bRequestType != (USB_DIR_OUT |
++ USB_TYPE_CLASS | USB_RECIP_INTERFACE))
++ break;
++ if (ctrl->wIndex != 0) {
++ value = -EDOM;
++ break;
++ }
++ if (ctrl->wLength > MAX_COMMAND_SIZE) {
++ value = -EOVERFLOW;
++ break;
++ }
++ value = ctrl->wLength;
++ fsg->ep0req->context = received_cbi_adsc;
++ break;
++ }
++ }
++
++ if (value == -EOPNOTSUPP)
++ VDBG(fsg,
++ "unknown class-specific control req "
++ "%02x.%02x v%04x i%04x l%u\n",
++ ctrl->bRequestType, ctrl->bRequest,
++ ctrl->wValue, ctrl->wIndex, ctrl->wLength);
++ return value;
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++/* Ep0 standard request handlers. These always run in_irq. */
++
++static int standard_setup_req(struct fsg_dev *fsg,
++ const struct usb_ctrlrequest *ctrl)
++{
++ struct usb_request *req = fsg->ep0req;
++ int value = -EOPNOTSUPP;
++
++ /* Usually this just stores reply data in the pre-allocated ep0 buffer,
++ * but config change events will also reconfigure hardware. */
++ switch (ctrl->bRequest) {
++
++ case USB_REQ_GET_DESCRIPTOR:
++ if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD |
++ USB_RECIP_DEVICE))
++ break;
++ switch (ctrl->wValue >> 8) {
++
++ case USB_DT_DEVICE:
++ VDBG(fsg, "get device descriptor\n");
++ value = min(ctrl->wLength, (u16) sizeof device_desc);
++ memcpy(req->buf, &device_desc, value);
++ break;
++#ifdef CONFIG_USB_GADGET_DUALSPEED
++ case USB_DT_DEVICE_QUALIFIER:
++ VDBG(fsg, "get device qualifier\n");
++ if (!fsg->gadget->is_dualspeed)
++ break;
++ value = min(ctrl->wLength, (u16) sizeof dev_qualifier);
++ memcpy(req->buf, &dev_qualifier, value);
++ break;
++
++ case USB_DT_OTHER_SPEED_CONFIG:
++ VDBG(fsg, "get other-speed config descriptor\n");
++ if (!fsg->gadget->is_dualspeed)
++ break;
++ goto get_config;
++#endif
++ case USB_DT_CONFIG:
++ VDBG(fsg, "get configuration descriptor\n");
++#ifdef CONFIG_USB_GADGET_DUALSPEED
++ get_config:
++#endif
++ value = populate_config_buf(fsg->gadget->speed,
++ req->buf,
++ ctrl->wValue >> 8,
++ ctrl->wValue & 0xff);
++ if (value >= 0)
++ value = min(ctrl->wLength, (u16) value);
++ break;
++
++ case USB_DT_STRING:
++ VDBG(fsg, "get string descriptor\n");
++
++ /* wIndex == language code */
++ value = usb_gadget_get_string(&stringtab,
++ ctrl->wValue & 0xff, req->buf);
++ if (value >= 0)
++ value = min(ctrl->wLength, (u16) value);
++ break;
++ }
++ break;
++
++ /* One config, two speeds */
++ case USB_REQ_SET_CONFIGURATION:
++ if (ctrl->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD |
++ USB_RECIP_DEVICE))
++ break;
++ VDBG(fsg, "set configuration\n");
++ if (ctrl->wValue == CONFIG_VALUE || ctrl->wValue == 0) {
++ fsg->new_config = ctrl->wValue;
++
++ /* Raise an exception to wipe out previous transaction
++ * state (queued bufs, etc) and set the new config. */
++ raise_exception(fsg, FSG_STATE_CONFIG_CHANGE);
++ value = DELAYED_STATUS;
++ }
++ break;
++ case USB_REQ_GET_CONFIGURATION:
++ if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD |
++ USB_RECIP_DEVICE))
++ break;
++ VDBG(fsg, "get configuration\n");
++ *(u8 *) req->buf = fsg->config;
++ value = min(ctrl->wLength, (u16) 1);
++ break;
++
++ case USB_REQ_SET_INTERFACE:
++ if (ctrl->bRequestType != (USB_DIR_OUT| USB_TYPE_STANDARD |
++ USB_RECIP_INTERFACE))
++ break;
++ if (fsg->config && ctrl->wIndex == 0) {
++
++ /* Raise an exception to wipe out previous transaction
++ * state (queued bufs, etc) and install the new
++ * interface altsetting. */
++ raise_exception(fsg, FSG_STATE_INTERFACE_CHANGE);
++ value = DELAYED_STATUS;
++ }
++ break;
++ case USB_REQ_GET_INTERFACE:
++ if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD |
++ USB_RECIP_INTERFACE))
++ break;
++ if (!fsg->config)
++ break;
++ if (ctrl->wIndex != 0) {
++ value = -EDOM;
++ break;
++ }
++ VDBG(fsg, "get interface\n");
++ *(u8 *) req->buf = 0;
++ value = min(ctrl->wLength, (u16) 1);
++ break;
++
++ default:
++ VDBG(fsg,
++ "unknown control req %02x.%02x v%04x i%04x l%u\n",
++ ctrl->bRequestType, ctrl->bRequest,
++ ctrl->wValue, ctrl->wIndex, ctrl->wLength);
++ }
++
++ return value;
++}
++
++
++static int fsg_setup(struct usb_gadget *gadget,
++ const struct usb_ctrlrequest *ctrl)
++{
++ struct fsg_dev *fsg = get_gadget_data(gadget);
++ int rc;
++
++ ++fsg->ep0_req_tag; // Record arrival of a new request
++ fsg->ep0req->context = NULL;
++ fsg->ep0req->length = 0;
++ dump_msg(fsg, "ep0-setup", (u8 *) ctrl, sizeof(*ctrl));
++
++ if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS)
++ rc = class_setup_req(fsg, ctrl);
++ else
++ rc = standard_setup_req(fsg, ctrl);
++
++ /* Respond with data/status or defer until later? */
++ if (rc >= 0 && rc != DELAYED_STATUS) {
++ fsg->ep0req->length = rc;
++ fsg->ep0req_name = (ctrl->bRequestType & USB_DIR_IN ?
++ "ep0-in" : "ep0-out");
++ rc = ep0_queue(fsg);
++ }
++
++ /* Device either stalls (rc < 0) or reports success */
++ return rc;
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++/* All the following routines run in process context */
++
++
++/* Use this for bulk or interrupt transfers, not ep0 */
++static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
++ struct usb_request *req, volatile int *pbusy,
++ volatile enum fsg_buffer_state *state)
++{
++ int rc;
++
++ if (ep == fsg->bulk_in)
++ dump_msg(fsg, "bulk-in", req->buf, req->length);
++ else if (ep == fsg->intr_in)
++ dump_msg(fsg, "intr-in", req->buf, req->length);
++ *pbusy = 1;
++ *state = BUF_STATE_BUSY;
++ rc = usb_ep_queue(ep, req, GFP_KERNEL);
++ if (rc != 0) {
++ *pbusy = 0;
++ *state = BUF_STATE_EMPTY;
++
++ /* We can't do much more than wait for a reset */
++
++ /* Note: currently the net2280 driver fails zero-length
++ * submissions if DMA is enabled. */
++ if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP &&
++ req->length == 0))
++ WARN(fsg, "error in submission: %s --> %d\n",
++ ep->name, rc);
++ }
++}
++
++
++static int sleep_thread(struct fsg_dev *fsg)
++{
++ int rc;
++
++ /* Wait until a signal arrives or we are woken up */
++ rc = wait_event_interruptible(fsg->thread_wqh,
++ fsg->thread_wakeup_needed);
++ fsg->thread_wakeup_needed = 0;
++ return (rc ? -EINTR : 0);
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++static int do_read(struct fsg_dev *fsg)
++{
++ struct lun *curlun = fsg->curlun;
++ u32 lba;
++ struct fsg_buffhd *bh;
++ int rc;
++ u32 amount_left;
++ loff_t file_offset, file_offset_tmp;
++ unsigned int amount;
++ unsigned int partial_page;
++ ssize_t nread;
++
++ /* Get the starting Logical Block Address and check that it's
++ * not too big */
++ if (fsg->cmnd[0] == SC_READ_6)
++ lba = (fsg->cmnd[1] << 16) | get_be16(&fsg->cmnd[2]);
++ else {
++ lba = get_be32(&fsg->cmnd[2]);
++
++ /* We allow DPO (Disable Page Out = don't save data in the
++ * cache) and FUA (Force Unit Access = don't read from the
++ * cache), but we don't implement them. */
++ if ((fsg->cmnd[1] & ~0x18) != 0) {
++ curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
++ return -EINVAL;
++ }
++ }
++ if (lba >= curlun->num_sectors) {
++ curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
++ return -EINVAL;
++ }
++ file_offset = ((loff_t) lba) << 9;
++
++ /* Carry out the file reads */
++ amount_left = fsg->data_size_from_cmnd;
++ if (unlikely(amount_left == 0))
++ return -EIO; // No default reply
++
++ for (;;) {
++
++ /* Figure out how much we need to read:
++ * Try to read the remaining amount.
++ * But don't read more than the buffer size.
++ * And don't try to read past the end of the file.
++ * Finally, if we're not at a page boundary, don't read past
++ * the next page.
++ * If this means reading 0 then we were asked to read past
++ * the end of file. */
++ amount = min((unsigned int) amount_left, mod_data.buflen);
++ amount = min((loff_t) amount,
++ curlun->file_length - file_offset);
++ partial_page = file_offset & (PAGE_CACHE_SIZE - 1);
++ if (partial_page > 0)
++ amount = min(amount, (unsigned int) PAGE_CACHE_SIZE -
++ partial_page);
++
++ /* Wait for the next buffer to become available */
++ bh = fsg->next_buffhd_to_fill;
++ while (bh->state != BUF_STATE_EMPTY) {
++ if ((rc = sleep_thread(fsg)) != 0)
++ return rc;
++ }
++
++ /* If we were asked to read past the end of file,
++ * end with an empty buffer. */
++ if (amount == 0) {
++ curlun->sense_data =
++ SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
++ curlun->sense_data_info = file_offset >> 9;
++ bh->inreq->length = 0;
++ bh->state = BUF_STATE_FULL;
++ break;
++ }
++
++ /* Perform the read */
++ file_offset_tmp = file_offset;
++ nread = curlun->filp->f_op->read(curlun->filp,
++ (char *) bh->buf,
++ amount, &file_offset_tmp);
++ VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
++ (unsigned long long) file_offset,
++ (int) nread);
++ if (signal_pending(current))
++ return -EINTR;
++
++ if (nread < 0) {
++ LDBG(curlun, "error in file read: %d\n",
++ (int) nread);
++ nread = 0;
++ } else if (nread < amount) {
++ LDBG(curlun, "partial file read: %d/%u\n",
++ (int) nread, amount);
++ nread -= (nread & 511); // Round down to a block
++ }
++ file_offset += nread;
++ amount_left -= nread;
++ fsg->residue -= nread;
++ bh->inreq->length = nread;
++ bh->state = BUF_STATE_FULL;
++
++ /* If an error occurred, report it and its position */
++ if (nread < amount) {
++ curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
++ curlun->sense_data_info = file_offset >> 9;
++ break;
++ }
++
++ if (amount_left == 0)
++ break; // No more left to read
++
++ /* Send this buffer and go read some more */
++ bh->inreq->zero = 0;
++ start_transfer(fsg, fsg->bulk_in, bh->inreq,
++ &bh->inreq_busy, &bh->state);
++ fsg->next_buffhd_to_fill = bh->next;
++ }
++
++ return -EIO; // No default reply
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++static int do_write(struct fsg_dev *fsg)
++{
++ struct lun *curlun = fsg->curlun;
++ u32 lba;
++ struct fsg_buffhd *bh;
++ int get_some_more;
++ u32 amount_left_to_req, amount_left_to_write;
++ loff_t usb_offset, file_offset, file_offset_tmp;
++ unsigned int amount;
++ unsigned int partial_page;
++ ssize_t nwritten;
++ int rc;
++
++ if (curlun->ro) {
++ curlun->sense_data = SS_WRITE_PROTECTED;
++ return -EINVAL;
++ }
++ curlun->filp->f_flags &= ~O_SYNC; // Default is not to wait
++
++ /* Get the starting Logical Block Address and check that it's
++ * not too big */
++ if (fsg->cmnd[0] == SC_WRITE_6)
++ lba = (fsg->cmnd[1] << 16) | get_be16(&fsg->cmnd[2]);
++ else {
++ lba = get_be32(&fsg->cmnd[2]);
++
++ /* We allow DPO (Disable Page Out = don't save data in the
++ * cache) and FUA (Force Unit Access = write directly to the
++ * medium). We don't implement DPO; we implement FUA by
++ * performing synchronous output. */
++ if ((fsg->cmnd[1] & ~0x18) != 0) {
++ curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
++ return -EINVAL;
++ }
++ if (fsg->cmnd[1] & 0x08) // FUA
++ curlun->filp->f_flags |= O_SYNC;
++ }
++ if (lba >= curlun->num_sectors) {
++ curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
++ return -EINVAL;
++ }
++
++ /* Carry out the file writes */
++ get_some_more = 1;
++ file_offset = usb_offset = ((loff_t) lba) << 9;
++ amount_left_to_req = amount_left_to_write = fsg->data_size_from_cmnd;
++
++ while (amount_left_to_write > 0) {
++
++ /* Queue a request for more data from the host */
++ bh = fsg->next_buffhd_to_fill;
++ if (bh->state == BUF_STATE_EMPTY && get_some_more) {
++
++ /* Figure out how much we want to get:
++ * Try to get the remaining amount.
++ * But don't get more than the buffer size.
++ * And don't try to go past the end of the file.
++ * If we're not at a page boundary,
++ * don't go past the next page.
++ * If this means getting 0, then we were asked
++ * to write past the end of file.
++ * Finally, round down to a block boundary. */
++ amount = min(amount_left_to_req, mod_data.buflen);
++ amount = min((loff_t) amount, curlun->file_length -
++ usb_offset);
++ partial_page = usb_offset & (PAGE_CACHE_SIZE - 1);
++ if (partial_page > 0)
++ amount = min(amount,
++ (unsigned int) PAGE_CACHE_SIZE - partial_page);
++
++ if (amount == 0) {
++ get_some_more = 0;
++ curlun->sense_data =
++ SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
++ curlun->sense_data_info = usb_offset >> 9;
++ continue;
++ }
++ amount -= (amount & 511);
++ if (amount == 0) {
++
++ /* Why were we were asked to transfer a
++ * partial block? */
++ get_some_more = 0;
++ continue;
++ }
++
++ /* Get the next buffer */
++ usb_offset += amount;
++ fsg->usb_amount_left -= amount;
++ amount_left_to_req -= amount;
++ if (amount_left_to_req == 0)
++ get_some_more = 0;
++
++ /* amount is always divisible by 512, hence by
++ * the bulk-out maxpacket size */
++ bh->outreq->length = bh->bulk_out_intended_length =
++ amount;
++ start_transfer(fsg, fsg->bulk_out, bh->outreq,
++ &bh->outreq_busy, &bh->state);
++ fsg->next_buffhd_to_fill = bh->next;
++ continue;
++ }
++
++ /* Write the received data to the backing file */
++ bh = fsg->next_buffhd_to_drain;
++ if (bh->state == BUF_STATE_EMPTY && !get_some_more)
++ break; // We stopped early
++ if (bh->state == BUF_STATE_FULL) {
++ fsg->next_buffhd_to_drain = bh->next;
++ bh->state = BUF_STATE_EMPTY;
++
++ /* Did something go wrong with the transfer? */
++ if (bh->outreq->status != 0) {
++ curlun->sense_data = SS_COMMUNICATION_FAILURE;
++ curlun->sense_data_info = file_offset >> 9;
++ break;
++ }
++
++ amount = bh->outreq->actual;
++ if (curlun->file_length - file_offset < amount) {
++ LERROR(curlun,
++ "write %u @ %llu beyond end %llu\n",
++ amount, (unsigned long long) file_offset,
++ (unsigned long long) curlun->file_length);
++ amount = curlun->file_length - file_offset;
++ }
++
++ /* Perform the write */
++ file_offset_tmp = file_offset;
++ nwritten = curlun->filp->f_op->write(curlun->filp,
++ (char *) bh->buf,
++ amount, &file_offset_tmp);
++ VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
++ (unsigned long long) file_offset,
++ (int) nwritten);
++ if (signal_pending(current))
++ return -EINTR; // Interrupted!
++
++ if (nwritten < 0) {
++ LDBG(curlun, "error in file write: %d\n",
++ (int) nwritten);
++ nwritten = 0;
++ } else if (nwritten < amount) {
++ LDBG(curlun, "partial file write: %d/%u\n",
++ (int) nwritten, amount);
++ nwritten -= (nwritten & 511);
++ // Round down to a block
++ }
++ file_offset += nwritten;
++ amount_left_to_write -= nwritten;
++ fsg->residue -= nwritten;
++
++ /* If an error occurred, report it and its position */
++ if (nwritten < amount) {
++ curlun->sense_data = SS_WRITE_ERROR;
++ curlun->sense_data_info = file_offset >> 9;
++ break;
++ }
++
++ /* Did the host decide to stop early? */
++ if (bh->outreq->actual != bh->outreq->length) {
++ fsg->short_packet_received = 1;
++ break;
++ }
++ continue;
++ }
++
++ /* Wait for something to happen */
++ if ((rc = sleep_thread(fsg)) != 0)
++ return rc;
++ }
++
++ return -EIO; // No default reply
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++/* Sync the file data, don't bother with the metadata.
++ * This code was copied from fs/buffer.c:sys_fdatasync(). */
++static int fsync_sub(struct lun *curlun)
++{
++ struct file *filp = curlun->filp;
++ struct inode *inode;
++ int rc, err;
++
++ if (curlun->ro || !filp)
++ return 0;
++ if (!filp->f_op->fsync)
++ return -EINVAL;
++
++ inode = filp->f_dentry->d_inode;
++ down(&inode->i_sem);
++ rc = filemap_fdatasync(inode->i_mapping);
++ err = filp->f_op->fsync(filp, filp->f_dentry, 1);
++ if (!rc)
++ rc = err;
++ err = filemap_fdatawait(inode->i_mapping);
++ if (!rc)
++ rc = err;
++ up(&inode->i_sem);
++ VLDBG(curlun, "fdatasync -> %d\n", rc);
++ return rc;
++}
++
++static void fsync_all(struct fsg_dev *fsg)
++{
++ int i;
++
++ for (i = 0; i < fsg->nluns; ++i)
++ fsync_sub(&fsg->luns[i]);
++}
++
++static int do_synchronize_cache(struct fsg_dev *fsg)
++{
++ struct lun *curlun = fsg->curlun;
++ int rc;
++
++ /* We ignore the requested LBA and write out all file's
++ * dirty data buffers. */
++ rc = fsync_sub(curlun);
++ if (rc)
++ curlun->sense_data = SS_WRITE_ERROR;
++ return 0;
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++static void invalidate_sub(struct lun *curlun)
++{
++ struct file *filp = curlun->filp;
++ struct inode *inode = filp->f_dentry->d_inode;
++
++ invalidate_inode_pages(inode);
++}
++
++static int do_verify(struct fsg_dev *fsg)
++{
++ struct lun *curlun = fsg->curlun;
++ u32 lba;
++ u32 verification_length;
++ struct fsg_buffhd *bh = fsg->next_buffhd_to_fill;
++ loff_t file_offset, file_offset_tmp;
++ u32 amount_left;
++ unsigned int amount;
++ ssize_t nread;
++
++ /* Get the starting Logical Block Address and check that it's
++ * not too big */
++ lba = get_be32(&fsg->cmnd[2]);
++ if (lba >= curlun->num_sectors) {
++ curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
++ return -EINVAL;
++ }
++
++ /* We allow DPO (Disable Page Out = don't save data in the
++ * cache) but we don't implement it. */
++ if ((fsg->cmnd[1] & ~0x10) != 0) {
++ curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
++ return -EINVAL;
++ }
++
++ verification_length = get_be16(&fsg->cmnd[7]);
++ if (unlikely(verification_length == 0))
++ return -EIO; // No default reply
++
++ /* Prepare to carry out the file verify */
++ amount_left = verification_length << 9;
++ file_offset = ((loff_t) lba) << 9;
++
++ /* Write out all the dirty buffers before invalidating them */
++ fsync_sub(curlun);
++ if (signal_pending(current))
++ return -EINTR;
++
++ invalidate_sub(curlun);
++ if (signal_pending(current))
++ return -EINTR;
++
++ /* Just try to read the requested blocks */
++ while (amount_left > 0) {
++
++ /* Figure out how much we need to read:
++ * Try to read the remaining amount, but not more than
++ * the buffer size.
++ * And don't try to read past the end of the file.
++ * If this means reading 0 then we were asked to read
++ * past the end of file. */
++ amount = min((unsigned int) amount_left, mod_data.buflen);
++ amount = min((loff_t) amount,
++ curlun->file_length - file_offset);
++ if (amount == 0) {
++ curlun->sense_data =
++ SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
++ curlun->sense_data_info = file_offset >> 9;
++ break;
++ }
++
++ /* Perform the read */
++ file_offset_tmp = file_offset;
++ nread = curlun->filp->f_op->read(curlun->filp,
++ (char *) bh->buf,
++ amount, &file_offset_tmp);
++ VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
++ (unsigned long long) file_offset,
++ (int) nread);
++ if (signal_pending(current))
++ return -EINTR;
++
++ if (nread < 0) {
++ LDBG(curlun, "error in file verify: %d\n",
++ (int) nread);
++ nread = 0;
++ } else if (nread < amount) {
++ LDBG(curlun, "partial file verify: %d/%u\n",
++ (int) nread, amount);
++ nread -= (nread & 511); // Round down to a sector
++ }
++ if (nread == 0) {
++ curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
++ curlun->sense_data_info = file_offset >> 9;
++ break;
++ }
++ file_offset += nread;
++ amount_left -= nread;
++ }
++ return 0;
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++static int do_inquiry(struct fsg_dev *fsg, struct fsg_buffhd *bh)
++{
++ u8 *buf = (u8 *) bh->buf;
++
++ static char vendor_id[] = "Linux ";
++ static char product_id[] = "File-Stor Gadget";
++
++ if (!fsg->curlun) { // Unsupported LUNs are okay
++ fsg->bad_lun_okay = 1;
++ memset(buf, 0, 36);
++ buf[0] = 0x7f; // Unsupported, no device-type
++ return 36;
++ }
++
++ memset(buf, 0, 8); // Non-removable, direct-access device
++ if (mod_data.removable)
++ buf[1] = 0x80;
++ buf[2] = 2; // ANSI SCSI level 2
++ buf[3] = 2; // SCSI-2 INQUIRY data format
++ buf[4] = 31; // Additional length
++ // No special options
++ sprintf(buf + 8, "%-8s%-16s%04x", vendor_id, product_id,
++ mod_data.release);
++ return 36;
++}
++
++
++static int do_request_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
++{
++ struct lun *curlun = fsg->curlun;
++ u8 *buf = (u8 *) bh->buf;
++ u32 sd, sdinfo;
++
++ /*
++ * From the SCSI-2 spec., section 7.9 (Unit attention condition):
++ *
++ * If a REQUEST SENSE command is received from an initiator
++ * with a pending unit attention condition (before the target
++ * generates the contingent allegiance condition), then the
++ * target shall either:
++ * a) report any pending sense data and preserve the unit
++ * attention condition on the logical unit, or,
++ * b) report the unit attention condition, may discard any
++ * pending sense data, and clear the unit attention
++ * condition on the logical unit for that initiator.
++ *
++ * FSG normally uses option a); enable this code to use option b).
++ */
++#if 0
++ if (curlun && curlun->unit_attention_data != SS_NO_SENSE) {
++ curlun->sense_data = curlun->unit_attention_data;
++ curlun->unit_attention_data = SS_NO_SENSE;
++ }
++#endif
++
++ if (!curlun) { // Unsupported LUNs are okay
++ fsg->bad_lun_okay = 1;
++ sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
++ sdinfo = 0;
++ } else {
++ sd = curlun->sense_data;
++ sdinfo = curlun->sense_data_info;
++ curlun->sense_data = SS_NO_SENSE;
++ curlun->sense_data_info = 0;
++ }
++
++ memset(buf, 0, 18);
++ buf[0] = 0x80 | 0x70; // Valid, current error
++ buf[2] = SK(sd);
++ put_be32(&buf[3], sdinfo); // Sense information
++ buf[7] = 18 - 8; // Additional sense length
++ buf[12] = ASC(sd);
++ buf[13] = ASCQ(sd);
++ return 18;
++}
++
++
++static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh)
++{
++ struct lun *curlun = fsg->curlun;
++ u32 lba = get_be32(&fsg->cmnd[2]);
++ int pmi = fsg->cmnd[8];
++ u8 *buf = (u8 *) bh->buf;
++
++ /* Check the PMI and LBA fields */
++ if (pmi > 1 || (pmi == 0 && lba != 0)) {
++ curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
++ return -EINVAL;
++ }
++
++ put_be32(&buf[0], curlun->num_sectors - 1); // Max logical block
++ put_be32(&buf[4], 512); // Block length
++ return 8;
++}
++
++
++static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
++{
++ struct lun *curlun = fsg->curlun;
++ int mscmnd = fsg->cmnd[0];
++ u8 *buf = (u8 *) bh->buf;
++ u8 *buf0 = buf;
++ int pc, page_code;
++ int changeable_values, all_pages;
++ int valid_page = 0;
++ int len, limit;
++
++ if ((fsg->cmnd[1] & ~0x08) != 0) { // Mask away DBD
++ curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
++ return -EINVAL;
++ }
++ pc = fsg->cmnd[2] >> 6;
++ page_code = fsg->cmnd[2] & 0x3f;
++ if (pc == 3) {
++ curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED;
++ return -EINVAL;
++ }
++ changeable_values = (pc == 1);
++ all_pages = (page_code == 0x3f);
++
++ /* Write the mode parameter header. Fixed values are: default
++ * medium type, no cache control (DPOFUA), and no block descriptors.
++ * The only variable value is the WriteProtect bit. We will fill in
++ * the mode data length later. */
++ memset(buf, 0, 8);
++ if (mscmnd == SC_MODE_SENSE_6) {
++ buf[2] = (curlun->ro ? 0x80 : 0x00); // WP, DPOFUA
++ buf += 4;
++ limit = 255;
++ } else { // SC_MODE_SENSE_10
++ buf[3] = (curlun->ro ? 0x80 : 0x00); // WP, DPOFUA
++ buf += 8;
++ limit = 65535; // Should really be mod_data.buflen
++ }
++
++ /* No block descriptors */
++
++ /* The mode pages, in numerical order. The only page we support
++ * is the Caching page. */
++ if (page_code == 0x08 || all_pages) {
++ valid_page = 1;
++ buf[0] = 0x08; // Page code
++ buf[1] = 10; // Page length
++ memset(buf+2, 0, 10); // None of the fields are changeable
++
++ if (!changeable_values) {
++ buf[2] = 0x04; // Write cache enable,
++ // Read cache not disabled
++ // No cache retention priorities
++ put_be16(&buf[4], 0xffff); // Don't disable prefetch
++ // Minimum prefetch = 0
++ put_be16(&buf[8], 0xffff); // Maximum prefetch
++ put_be16(&buf[10], 0xffff); // Maximum prefetch ceiling
++ }
++ buf += 12;
++ }
++
++ /* Check that a valid page was requested and the mode data length
++ * isn't too long. */
++ len = buf - buf0;
++ if (!valid_page || len > limit) {
++ curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
++ return -EINVAL;
++ }
++
++ /* Store the mode data length */
++ if (mscmnd == SC_MODE_SENSE_6)
++ buf0[0] = len - 1;
++ else
++ put_be16(buf0, len - 2);
++ return len;
++}
++
++
++static int do_start_stop(struct fsg_dev *fsg)
++{
++ struct lun *curlun = fsg->curlun;
++ int loej, start;
++
++ if (!mod_data.removable) {
++ curlun->sense_data = SS_INVALID_COMMAND;
++ return -EINVAL;
++ }
++
++ // int immed = fsg->cmnd[1] & 0x01;
++ loej = fsg->cmnd[4] & 0x02;
++ start = fsg->cmnd[4] & 0x01;
++
++#ifdef CONFIG_USB_FILE_STORAGE_TEST
++ if ((fsg->cmnd[1] & ~0x01) != 0 || // Mask away Immed
++ (fsg->cmnd[4] & ~0x03) != 0) { // Mask LoEj, Start
++ curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
++ return -EINVAL;
++ }
++
++ if (!start) {
++
++ /* Are we allowed to unload the media? */
++ if (curlun->prevent_medium_removal) {
++ LDBG(curlun, "unload attempt prevented\n");
++ curlun->sense_data = SS_MEDIUM_REMOVAL_PREVENTED;
++ return -EINVAL;
++ }
++ if (loej) { // Simulate an unload/eject
++ up_read(&fsg->filesem);
++ down_write(&fsg->filesem);
++ close_backing_file(curlun);
++ up_write(&fsg->filesem);
++ down_read(&fsg->filesem);
++ }
++ } else {
++
++ /* Our emulation doesn't support mounting; the medium is
++ * available for use as soon as it is loaded. */
++ if (!backing_file_is_open(curlun)) {
++ curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
++ return -EINVAL;
++ }
++ }
++#endif
++ return 0;
++}
++
++
++static int do_prevent_allow(struct fsg_dev *fsg)
++{
++ struct lun *curlun = fsg->curlun;
++ int prevent;
++
++ if (!mod_data.removable) {
++ curlun->sense_data = SS_INVALID_COMMAND;
++ return -EINVAL;
++ }
++
++ prevent = fsg->cmnd[4] & 0x01;
++ if ((fsg->cmnd[4] & ~0x01) != 0) { // Mask away Prevent
++ curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
++ return -EINVAL;
++ }
++
++ if (curlun->prevent_medium_removal && !prevent)
++ fsync_sub(curlun);
++ curlun->prevent_medium_removal = prevent;
++ return 0;
++}
++
++
++static int do_read_format_capacities(struct fsg_dev *fsg,
++ struct fsg_buffhd *bh)
++{
++ struct lun *curlun = fsg->curlun;
++ u8 *buf = (u8 *) bh->buf;
++
++ buf[0] = buf[1] = buf[2] = 0;
++ buf[3] = 8; // Only the Current/Maximum Capacity Descriptor
++ buf += 4;
++
++ put_be32(&buf[0], curlun->num_sectors); // Number of blocks
++ put_be32(&buf[4], 512); // Block length
++ buf[4] = 0x02; // Current capacity
++ return 12;
++}
++
++
++static int do_mode_select(struct fsg_dev *fsg, struct fsg_buffhd *bh)
++{
++ struct lun *curlun = fsg->curlun;
++
++ /* We don't support MODE SELECT */
++ curlun->sense_data = SS_INVALID_COMMAND;
++ return -EINVAL;
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
++{
++ int rc;
++
++ rc = fsg_set_halt(fsg, fsg->bulk_in);
++ if (rc == -EAGAIN)
++ VDBG(fsg, "delayed bulk-in endpoint halt\n");
++ while (rc != 0) {
++ if (rc != -EAGAIN) {
++ WARN(fsg, "usb_ep_set_halt -> %d\n", rc);
++ rc = 0;
++ break;
++ }
++
++ /* Wait for a short time and then try again */
++ set_current_state(TASK_INTERRUPTIBLE);
++ if (schedule_timeout(HZ / 10) != 0)
++ return -EINTR;
++ rc = usb_ep_set_halt(fsg->bulk_in);
++ }
++ return rc;
++}
++
++static int pad_with_zeros(struct fsg_dev *fsg)
++{
++ struct fsg_buffhd *bh = fsg->next_buffhd_to_fill;
++ u32 nkeep = bh->inreq->length;
++ u32 nsend;
++ int rc;
++
++ bh->state = BUF_STATE_EMPTY; // For the first iteration
++ fsg->usb_amount_left = nkeep + fsg->residue;
++ while (fsg->usb_amount_left > 0) {
++
++ /* Wait for the next buffer to be free */
++ while (bh->state != BUF_STATE_EMPTY) {
++ if ((rc = sleep_thread(fsg)) != 0)
++ return rc;
++ }
++
++ nsend = min(fsg->usb_amount_left, (u32) mod_data.buflen);
++ memset(bh->buf + nkeep, 0, nsend - nkeep);
++ bh->inreq->length = nsend;
++ bh->inreq->zero = 0;
++ start_transfer(fsg, fsg->bulk_in, bh->inreq,
++ &bh->inreq_busy, &bh->state);
++ bh = fsg->next_buffhd_to_fill = bh->next;
++ fsg->usb_amount_left -= nsend;
++ nkeep = 0;
++ }
++ return 0;
++}
++
++static int throw_away_data(struct fsg_dev *fsg)
++{
++ struct fsg_buffhd *bh;
++ u32 amount;
++ int rc;
++
++ while ((bh = fsg->next_buffhd_to_drain)->state != BUF_STATE_EMPTY ||
++ fsg->usb_amount_left > 0) {
++
++ /* Throw away the data in a filled buffer */
++ if (bh->state == BUF_STATE_FULL) {
++ bh->state = BUF_STATE_EMPTY;
++ fsg->next_buffhd_to_drain = bh->next;
++
++ /* A short packet or an error ends everything */
++ if (bh->outreq->actual != bh->outreq->length ||
++ bh->outreq->status != 0) {
++ raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
++ return -EINTR;
++ }
++ continue;
++ }
++
++ /* Try to submit another request if we need one */
++ bh = fsg->next_buffhd_to_fill;
++ if (bh->state == BUF_STATE_EMPTY && fsg->usb_amount_left > 0) {
++ amount = min(fsg->usb_amount_left,
++ (u32) mod_data.buflen);
++
++ /* amount is always divisible by 512, hence by
++ * the bulk-out maxpacket size */
++ bh->outreq->length = bh->bulk_out_intended_length =
++ amount;
++ start_transfer(fsg, fsg->bulk_out, bh->outreq,
++ &bh->outreq_busy, &bh->state);
++ fsg->next_buffhd_to_fill = bh->next;
++ fsg->usb_amount_left -= amount;
++ continue;
++ }
++
++ /* Otherwise wait for something to happen */
++ if ((rc = sleep_thread(fsg)) != 0)
++ return rc;
++ }
++ return 0;
++}
++
++
++static int finish_reply(struct fsg_dev *fsg)
++{
++ struct fsg_buffhd *bh = fsg->next_buffhd_to_fill;
++ int rc = 0;
++
++ switch (fsg->data_dir) {
++ case DATA_DIR_NONE:
++ break; // Nothing to send
++
++ /* If we don't know whether the host wants to read or write,
++ * this must be CB or CBI with an unknown command. We mustn't
++ * try to send or receive any data. So stall both bulk pipes
++ * if we can and wait for a reset. */
++ case DATA_DIR_UNKNOWN:
++ if (mod_data.can_stall) {
++ fsg_set_halt(fsg, fsg->bulk_out);
++ rc = halt_bulk_in_endpoint(fsg);
++ }
++ break;
++
++ /* All but the last buffer of data must have already been sent */
++ case DATA_DIR_TO_HOST:
++ if (fsg->data_size == 0)
++ ; // Nothing to send
++
++ /* If there's no residue, simply send the last buffer */
++ else if (fsg->residue == 0) {
++ bh->inreq->zero = 0;
++ start_transfer(fsg, fsg->bulk_in, bh->inreq,
++ &bh->inreq_busy, &bh->state);
++ fsg->next_buffhd_to_fill = bh->next;
++ }
++
++ /* There is a residue. For CB and CBI, simply mark the end
++ * of the data with a short packet. However, if we are
++ * allowed to stall, there was no data at all (residue ==
++ * data_size), and the command failed (invalid LUN or
++ * sense data is set), then halt the bulk-in endpoint
++ * instead. */
++ else if (!transport_is_bbb()) {
++ if (mod_data.can_stall &&
++ fsg->residue == fsg->data_size &&
++ (!fsg->curlun || fsg->curlun->sense_data != SS_NO_SENSE)) {
++ bh->state = BUF_STATE_EMPTY;
++ rc = halt_bulk_in_endpoint(fsg);
++ } else {
++ bh->inreq->zero = 1;
++ start_transfer(fsg, fsg->bulk_in, bh->inreq,
++ &bh->inreq_busy, &bh->state);
++ fsg->next_buffhd_to_fill = bh->next;
++ }
++ }
++
++ /* For Bulk-only, if we're allowed to stall then send the
++ * short packet and halt the bulk-in endpoint. If we can't
++ * stall, pad out the remaining data with 0's. */
++ else {
++ if (mod_data.can_stall) {
++ bh->inreq->zero = 1;
++ start_transfer(fsg, fsg->bulk_in, bh->inreq,
++ &bh->inreq_busy, &bh->state);
++ fsg->next_buffhd_to_fill = bh->next;
++ rc = halt_bulk_in_endpoint(fsg);
++ } else
++ rc = pad_with_zeros(fsg);
++ }
++ break;
++
++ /* We have processed all we want from the data the host has sent.
++ * There may still be outstanding bulk-out requests. */
++ case DATA_DIR_FROM_HOST:
++ if (fsg->residue == 0)
++ ; // Nothing to receive
++
++ /* Did the host stop sending unexpectedly early? */
++ else if (fsg->short_packet_received) {
++ raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
++ rc = -EINTR;
++ }
++
++ /* We haven't processed all the incoming data. If we are
++ * allowed to stall, halt the bulk-out endpoint and cancel
++ * any outstanding requests. */
++ else if (mod_data.can_stall) {
++ fsg_set_halt(fsg, fsg->bulk_out);
++ raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
++ rc = -EINTR;
++ }
++
++ /* We can't stall. Read in the excess data and throw it
++ * all away. */
++ else
++ rc = throw_away_data(fsg);
++ break;
++ }
++ return rc;
++}
++
++
++static int send_status(struct fsg_dev *fsg)
++{
++ struct lun *curlun = fsg->curlun;
++ struct fsg_buffhd *bh;
++ int rc;
++ u8 status = USB_STATUS_PASS;
++ u32 sd, sdinfo = 0;
++
++ /* Wait for the next buffer to become available */
++ bh = fsg->next_buffhd_to_fill;
++ while (bh->state != BUF_STATE_EMPTY) {
++ if ((rc = sleep_thread(fsg)) != 0)
++ return rc;
++ }
++
++ if (curlun) {
++ sd = curlun->sense_data;
++ sdinfo = curlun->sense_data_info;
++ } else if (fsg->bad_lun_okay)
++ sd = SS_NO_SENSE;
++ else
++ sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
++
++ if (fsg->phase_error) {
++ DBG(fsg, "sending phase-error status\n");
++ status = USB_STATUS_PHASE_ERROR;
++ sd = SS_INVALID_COMMAND;
++ } else if (sd != SS_NO_SENSE) {
++ DBG(fsg, "sending command-failure status\n");
++ status = USB_STATUS_FAIL;
++ VDBG(fsg, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
++ " info x%x\n",
++ SK(sd), ASC(sd), ASCQ(sd), sdinfo);
++ }
++
++ if (transport_is_bbb()) {
++ struct bulk_cs_wrap *csw = (struct bulk_cs_wrap *) bh->buf;
++
++ /* Store and send the Bulk-only CSW */
++ csw->Signature = __constant_cpu_to_le32(USB_BULK_CS_SIG);
++ csw->Tag = fsg->tag;
++ csw->Residue = cpu_to_le32(fsg->residue);
++ csw->Status = status;
++
++ bh->inreq->length = USB_BULK_CS_WRAP_LEN;
++ bh->inreq->zero = 0;
++ start_transfer(fsg, fsg->bulk_in, bh->inreq,
++ &bh->inreq_busy, &bh->state);
++
++ } else if (mod_data.transport_type == USB_PR_CB) {
++
++ /* Control-Bulk transport has no status stage! */
++ return 0;
++
++ } else { // USB_PR_CBI
++ struct interrupt_data *buf = (struct interrupt_data *)
++ bh->buf;
++
++ /* Store and send the Interrupt data. UFI sends the ASC
++ * and ASCQ bytes. Everything else sends a Type (which
++ * is always 0) and the status Value. */
++ if (mod_data.protocol_type == USB_SC_UFI) {
++ buf->bType = ASC(sd);
++ buf->bValue = ASCQ(sd);
++ } else {
++ buf->bType = 0;
++ buf->bValue = status;
++ }
++ fsg->intreq->length = CBI_INTERRUPT_DATA_LEN;
++
++ fsg->intr_buffhd = bh; // Point to the right buffhd
++ fsg->intreq->buf = bh->inreq->buf;
++ fsg->intreq->dma = bh->inreq->dma;
++ fsg->intreq->context = bh;
++ start_transfer(fsg, fsg->intr_in, fsg->intreq,
++ &fsg->intreq_busy, &bh->state);
++ }
++
++ fsg->next_buffhd_to_fill = bh->next;
++ return 0;
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++/* Check whether the command is properly formed and whether its data size
++ * and direction agree with the values we already have. */
++static int check_command(struct fsg_dev *fsg, int cmnd_size,
++ enum data_direction data_dir, unsigned int mask,
++ int needs_medium, const char *name)
++{
++ int i;
++ int lun = fsg->cmnd[1] >> 5;
++ static const char dirletter[4] = {'u', 'o', 'i', 'n'};
++ char hdlen[20];
++ struct lun *curlun;
++
++ /* Adjust the expected cmnd_size for protocol encapsulation padding.
++ * Transparent SCSI doesn't pad. */
++ if (protocol_is_scsi())
++ ;
++
++ /* There's some disagreement as to whether RBC pads commands or not.
++ * We'll play it safe and accept either form. */
++ else if (mod_data.protocol_type == USB_SC_RBC) {
++ if (fsg->cmnd_size == 12)
++ cmnd_size = 12;
++
++ /* All the other protocols pad to 12 bytes */
++ } else
++ cmnd_size = 12;
++
++ hdlen[0] = 0;
++ if (fsg->data_dir != DATA_DIR_UNKNOWN)
++ sprintf(hdlen, ", H%c=%u", dirletter[(int) fsg->data_dir],
++ fsg->data_size);
++ VDBG(fsg, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n",
++ name, cmnd_size, dirletter[(int) data_dir],
++ fsg->data_size_from_cmnd, fsg->cmnd_size, hdlen);
++
++ /* We can't reply at all until we know the correct data direction
++ * and size. */
++ if (fsg->data_size_from_cmnd == 0)
++ data_dir = DATA_DIR_NONE;
++ if (fsg->data_dir == DATA_DIR_UNKNOWN) { // CB or CBI
++ fsg->data_dir = data_dir;
++ fsg->data_size = fsg->data_size_from_cmnd;
++
++ } else { // Bulk-only
++ if (fsg->data_size < fsg->data_size_from_cmnd) {
++
++ /* Host data size < Device data size is a phase error.
++ * Carry out the command, but only transfer as much
++ * as we are allowed. */
++ fsg->data_size_from_cmnd = fsg->data_size;
++ fsg->phase_error = 1;
++ }
++ }
++ fsg->residue = fsg->usb_amount_left = fsg->data_size;
++
++ /* Conflicting data directions is a phase error */
++ if (fsg->data_dir != data_dir && fsg->data_size_from_cmnd > 0)
++ goto phase_error;
++
++ /* Verify the length of the command itself */
++ if (cmnd_size != fsg->cmnd_size) {
++
++ /* Special case workaround: MS-Windows issues REQUEST SENSE
++ * with cbw->Length == 12 (it should be 6). */
++ if (fsg->cmnd[0] == SC_REQUEST_SENSE && fsg->cmnd_size == 12)
++ cmnd_size = fsg->cmnd_size;
++ else
++ goto phase_error;
++ }
++
++ /* Check that the LUN values are oonsistent */
++ if (transport_is_bbb()) {
++ if (fsg->lun != lun)
++ DBG(fsg, "using LUN %d from CBW, "
++ "not LUN %d from CDB\n",
++ fsg->lun, lun);
++ } else
++ fsg->lun = lun; // Use LUN from the command
++
++ /* Check the LUN */
++ if (fsg->lun >= 0 && fsg->lun < fsg->nluns) {
++ fsg->curlun = curlun = &fsg->luns[fsg->lun];
++ if (fsg->cmnd[0] != SC_REQUEST_SENSE) {
++ curlun->sense_data = SS_NO_SENSE;
++ curlun->sense_data_info = 0;
++ }
++ } else {
++ fsg->curlun = curlun = NULL;
++ fsg->bad_lun_okay = 0;
++
++ /* INQUIRY and REQUEST SENSE commands are explicitly allowed
++ * to use unsupported LUNs; all others may not. */
++ if (fsg->cmnd[0] != SC_INQUIRY &&
++ fsg->cmnd[0] != SC_REQUEST_SENSE) {
++ DBG(fsg, "unsupported LUN %d\n", fsg->lun);
++ return -EINVAL;
++ }
++ }
++
++ /* If a unit attention condition exists, only INQUIRY and
++ * REQUEST SENSE commands are allowed; anything else must fail. */
++ if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
++ fsg->cmnd[0] != SC_INQUIRY &&
++ fsg->cmnd[0] != SC_REQUEST_SENSE) {
++ curlun->sense_data = curlun->unit_attention_data;
++ curlun->unit_attention_data = SS_NO_SENSE;
++ return -EINVAL;
++ }
++
++ /* Check that only command bytes listed in the mask are non-zero */
++ fsg->cmnd[1] &= 0x1f; // Mask away the LUN
++ for (i = 1; i < cmnd_size; ++i) {
++ if (fsg->cmnd[i] && !(mask & (1 << i))) {
++ if (curlun)
++ curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
++ return -EINVAL;
++ }
++ }
++
++ /* If the medium isn't mounted and the command needs to access
++ * it, return an error. */
++ if (curlun && !backing_file_is_open(curlun) && needs_medium) {
++ curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
++ return -EINVAL;
++ }
++
++ return 0;
++
++phase_error:
++ fsg->phase_error = 1;
++ return -EINVAL;
++}
++
++
++static int do_scsi_command(struct fsg_dev *fsg)
++{
++ struct fsg_buffhd *bh;
++ int rc;
++ int reply = -EINVAL;
++ int i;
++ static char unknown[16];
++
++ dump_cdb(fsg);
++
++ /* Wait for the next buffer to become available for data or status */
++ bh = fsg->next_buffhd_to_drain = fsg->next_buffhd_to_fill;
++ while (bh->state != BUF_STATE_EMPTY) {
++ if ((rc = sleep_thread(fsg)) != 0)
++ return rc;
++ }
++ fsg->phase_error = 0;
++ fsg->short_packet_received = 0;
++
++ down_read(&fsg->filesem); // We're using the backing file
++ switch (fsg->cmnd[0]) {
++
++ case SC_INQUIRY:
++ fsg->data_size_from_cmnd = fsg->cmnd[4];
++ if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
++ (1<<4), 0,
++ "INQUIRY")) == 0)
++ reply = do_inquiry(fsg, bh);
++ break;
++
++ case SC_MODE_SELECT_6:
++ fsg->data_size_from_cmnd = fsg->cmnd[4];
++ if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
++ (1<<1) | (1<<4), 0,
++ "MODE SELECT(6)")) == 0)
++ reply = do_mode_select(fsg, bh);
++ break;
++
++ case SC_MODE_SELECT_10:
++ fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
++ if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
++ (1<<1) | (3<<7), 0,
++ "MODE SELECT(10)")) == 0)
++ reply = do_mode_select(fsg, bh);
++ break;
++
++ case SC_MODE_SENSE_6:
++ fsg->data_size_from_cmnd = fsg->cmnd[4];
++ if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
++ (1<<1) | (1<<2) | (1<<4), 0,
++ "MODE SENSE(6)")) == 0)
++ reply = do_mode_sense(fsg, bh);
++ break;
++
++ case SC_MODE_SENSE_10:
++ fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
++ if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
++ (1<<1) | (1<<2) | (3<<7), 0,
++ "MODE SENSE(10)")) == 0)
++ reply = do_mode_sense(fsg, bh);
++ break;
++
++ case SC_PREVENT_ALLOW_MEDIUM_REMOVAL:
++ fsg->data_size_from_cmnd = 0;
++ if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
++ (1<<4), 0,
++ "PREVENT-ALLOW MEDIUM REMOVAL")) == 0)
++ reply = do_prevent_allow(fsg);
++ break;
++
++ case SC_READ_6:
++ i = fsg->cmnd[4];
++ fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
++ if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
++ (7<<1) | (1<<4), 1,
++ "READ(6)")) == 0)
++ reply = do_read(fsg);
++ break;
++
++ case SC_READ_10:
++ fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]) << 9;
++ if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
++ (1<<1) | (0xf<<2) | (3<<7), 1,
++ "READ(10)")) == 0)
++ reply = do_read(fsg);
++ break;
++
++ case SC_READ_12:
++ fsg->data_size_from_cmnd = get_be32(&fsg->cmnd[6]) << 9;
++ if ((reply = check_command(fsg, 12, DATA_DIR_TO_HOST,
++ (1<<1) | (0xf<<2) | (0xf<<6), 1,
++ "READ(12)")) == 0)
++ reply = do_read(fsg);
++ break;
++
++ case SC_READ_CAPACITY:
++ fsg->data_size_from_cmnd = 8;
++ if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
++ (0xf<<2) | (1<<8), 1,
++ "READ CAPACITY")) == 0)
++ reply = do_read_capacity(fsg, bh);
++ break;
++
++ case SC_READ_FORMAT_CAPACITIES:
++ fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
++ if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
++ (3<<7), 1,
++ "READ FORMAT CAPACITIES")) == 0)
++ reply = do_read_format_capacities(fsg, bh);
++ break;
++
++ case SC_REQUEST_SENSE:
++ fsg->data_size_from_cmnd = fsg->cmnd[4];
++ if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
++ (1<<4), 0,
++ "REQUEST SENSE")) == 0)
++ reply = do_request_sense(fsg, bh);
++ break;
++
++ case SC_START_STOP_UNIT:
++ fsg->data_size_from_cmnd = 0;
++ if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
++ (1<<1) | (1<<4), 0,
++ "START-STOP UNIT")) == 0)
++ reply = do_start_stop(fsg);
++ break;
++
++ case SC_SYNCHRONIZE_CACHE:
++ fsg->data_size_from_cmnd = 0;
++ if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
++ (0xf<<2) | (3<<7), 1,
++ "SYNCHRONIZE CACHE")) == 0)
++ reply = do_synchronize_cache(fsg);
++ break;
++
++ case SC_TEST_UNIT_READY:
++ fsg->data_size_from_cmnd = 0;
++ reply = check_command(fsg, 6, DATA_DIR_NONE,
++ 0, 1,
++ "TEST UNIT READY");
++ break;
++
++ /* Although optional, this command is used by MS-Windows. We
++ * support a minimal version: BytChk must be 0. */
++ case SC_VERIFY:
++ fsg->data_size_from_cmnd = 0;
++ if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
++ (1<<1) | (0xf<<2) | (3<<7), 1,
++ "VERIFY")) == 0)
++ reply = do_verify(fsg);
++ break;
++
++ case SC_WRITE_6:
++ i = fsg->cmnd[4];
++ fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
++ if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
++ (7<<1) | (1<<4), 1,
++ "WRITE(6)")) == 0)
++ reply = do_write(fsg);
++ break;
++
++ case SC_WRITE_10:
++ fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]) << 9;
++ if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
++ (1<<1) | (0xf<<2) | (3<<7), 1,
++ "WRITE(10)")) == 0)
++ reply = do_write(fsg);
++ break;
++
++ case SC_WRITE_12:
++ fsg->data_size_from_cmnd = get_be32(&fsg->cmnd[6]) << 9;
++ if ((reply = check_command(fsg, 12, DATA_DIR_FROM_HOST,
++ (1<<1) | (0xf<<2) | (0xf<<6), 1,
++ "WRITE(12)")) == 0)
++ reply = do_write(fsg);
++ break;
++
++ /* Some mandatory commands that we recognize but don't implement.
++ * They don't mean much in this setting. It's left as an exercise
++ * for anyone interested to implement RESERVE and RELEASE in terms
++ * of Posix locks. */
++ case SC_FORMAT_UNIT:
++ case SC_RELEASE:
++ case SC_RESERVE:
++ case SC_SEND_DIAGNOSTIC:
++ // Fall through
++
++ default:
++ fsg->data_size_from_cmnd = 0;
++ sprintf(unknown, "Unknown x%02x", fsg->cmnd[0]);
++ if ((reply = check_command(fsg, fsg->cmnd_size,
++ DATA_DIR_UNKNOWN, 0xff, 0, unknown)) == 0) {
++ fsg->curlun->sense_data = SS_INVALID_COMMAND;
++ reply = -EINVAL;
++ }
++ break;
++ }
++ up_read(&fsg->filesem);
++
++ if (reply == -EINTR || signal_pending(current))
++ return -EINTR;
++
++ /* Set up the single reply buffer for finish_reply() */
++ if (reply == -EINVAL)
++ reply = 0; // Error reply length
++ if (reply >= 0 && fsg->data_dir == DATA_DIR_TO_HOST) {
++ reply = min((u32) reply, fsg->data_size_from_cmnd);
++ bh->inreq->length = reply;
++ bh->state = BUF_STATE_FULL;
++ fsg->residue -= reply;
++ } // Otherwise it's already set
++
++ return 0;
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
++{
++ struct usb_request *req = bh->outreq;
++ struct bulk_cb_wrap *cbw = (struct bulk_cb_wrap *) req->buf;
++
++ /* Was this a real packet? */
++ if (req->status)
++ return -EINVAL;
++
++ /* Is the CBW valid? */
++ if (req->actual != USB_BULK_CB_WRAP_LEN ||
++ cbw->Signature != __constant_cpu_to_le32(
++ USB_BULK_CB_SIG)) {
++ DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
++ req->actual,
++ le32_to_cpu(cbw->Signature));
++
++ /* The Bulk-only spec says we MUST stall the bulk pipes!
++ * If we want to avoid stalls, set a flag so that we will
++ * clear the endpoint halts at the next reset. */
++ if (!mod_data.can_stall)
++ set_bit(CLEAR_BULK_HALTS, &fsg->atomic_bitflags);
++ fsg_set_halt(fsg, fsg->bulk_out);
++ halt_bulk_in_endpoint(fsg);
++ return -EINVAL;
++ }
++
++ /* Is the CBW meaningful? */
++ if (cbw->Lun >= MAX_LUNS || cbw->Flags & ~USB_BULK_IN_FLAG ||
++ cbw->Length < 6 || cbw->Length > MAX_COMMAND_SIZE) {
++ DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
++ "cmdlen %u\n",
++ cbw->Lun, cbw->Flags, cbw->Length);
++
++ /* We can do anything we want here, so let's stall the
++ * bulk pipes if we are allowed to. */
++ if (mod_data.can_stall) {
++ fsg_set_halt(fsg, fsg->bulk_out);
++ halt_bulk_in_endpoint(fsg);
++ }
++ return -EINVAL;
++ }
++
++ /* Save the command for later */
++ fsg->cmnd_size = cbw->Length;
++ memcpy(fsg->cmnd, cbw->CDB, fsg->cmnd_size);
++ if (cbw->Flags & USB_BULK_IN_FLAG)
++ fsg->data_dir = DATA_DIR_TO_HOST;
++ else
++ fsg->data_dir = DATA_DIR_FROM_HOST;
++ fsg->data_size = le32_to_cpu(cbw->DataTransferLength);
++ if (fsg->data_size == 0)
++ fsg->data_dir = DATA_DIR_NONE;
++ fsg->lun = cbw->Lun;
++ fsg->tag = cbw->Tag;
++ return 0;
++}
++
++
++static int get_next_command(struct fsg_dev *fsg)
++{
++ struct fsg_buffhd *bh;
++ int rc = 0;
++
++ if (transport_is_bbb()) {
++
++ /* Wait for the next buffer to become available */
++ bh = fsg->next_buffhd_to_fill;
++ while (bh->state != BUF_STATE_EMPTY) {
++ if ((rc = sleep_thread(fsg)) != 0)
++ return rc;
++ }
++
++ /* Queue a request to read a Bulk-only CBW */
++ set_bulk_out_req_length(fsg, bh, USB_BULK_CB_WRAP_LEN);
++ start_transfer(fsg, fsg->bulk_out, bh->outreq,
++ &bh->outreq_busy, &bh->state);
++
++ /* We will drain the buffer in software, which means we
++ * can reuse it for the next filling. No need to advance
++ * next_buffhd_to_fill. */
++
++ /* Wait for the CBW to arrive */
++ while (bh->state != BUF_STATE_FULL) {
++ if ((rc = sleep_thread(fsg)) != 0)
++ return rc;
++ }
++ rc = received_cbw(fsg, bh);
++ bh->state = BUF_STATE_EMPTY;
++
++ } else { // USB_PR_CB or USB_PR_CBI
++
++ /* Wait for the next command to arrive */
++ while (fsg->cbbuf_cmnd_size == 0) {
++ if ((rc = sleep_thread(fsg)) != 0)
++ return rc;
++ }
++
++ /* Is the previous status interrupt request still busy?
++ * The host is allowed to skip reading the status,
++ * so we must cancel it. */
++ if (fsg->intreq_busy)
++ usb_ep_dequeue(fsg->intr_in, fsg->intreq);
++
++ /* Copy the command and mark the buffer empty */
++ fsg->data_dir = DATA_DIR_UNKNOWN;
++ spin_lock_irq(&fsg->lock);
++ fsg->cmnd_size = fsg->cbbuf_cmnd_size;
++ memcpy(fsg->cmnd, fsg->cbbuf_cmnd, fsg->cmnd_size);
++ fsg->cbbuf_cmnd_size = 0;
++ spin_unlock_irq(&fsg->lock);
++ }
++ return rc;
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++static int enable_endpoint(struct fsg_dev *fsg, struct usb_ep *ep,
++ const struct usb_endpoint_descriptor *d)
++{
++ int rc;
++
++ ep->driver_data = fsg;
++ rc = usb_ep_enable(ep, d);
++ if (rc)
++ ERROR(fsg, "can't enable %s, result %d\n", ep->name, rc);
++ return rc;
++}
++
++static int alloc_request(struct fsg_dev *fsg, struct usb_ep *ep,
++ struct usb_request **preq)
++{
++ *preq = usb_ep_alloc_request(ep, GFP_ATOMIC);
++ if (*preq)
++ return 0;
++ ERROR(fsg, "can't allocate request for %s\n", ep->name);
++ return -ENOMEM;
++}
++
++/*
++ * Reset interface setting and re-init endpoint state (toggle etc).
++ * Call with altsetting < 0 to disable the interface. The only other
++ * available altsetting is 0, which enables the interface.
++ */
++static int do_set_interface(struct fsg_dev *fsg, int altsetting)
++{
++ int rc = 0;
++ int i;
++ const struct usb_endpoint_descriptor *d;
++
++ if (fsg->running)
++ DBG(fsg, "reset interface\n");
++
++reset:
++ /* Deallocate the requests */
++ for (i = 0; i < NUM_BUFFERS; ++i) {
++ struct fsg_buffhd *bh = &fsg->buffhds[i];
++
++ if (bh->inreq) {
++ usb_ep_free_request(fsg->bulk_in, bh->inreq);
++ bh->inreq = NULL;
++ }
++ if (bh->outreq) {
++ usb_ep_free_request(fsg->bulk_out, bh->outreq);
++ bh->outreq = NULL;
++ }
++ }
++ if (fsg->intreq) {
++ usb_ep_free_request(fsg->intr_in, fsg->intreq);
++ fsg->intreq = NULL;
++ }
++
++ /* Disable the endpoints */
++ if (fsg->bulk_in_enabled) {
++ usb_ep_disable(fsg->bulk_in);
++ fsg->bulk_in_enabled = 0;
++ }
++ if (fsg->bulk_out_enabled) {
++ usb_ep_disable(fsg->bulk_out);
++ fsg->bulk_out_enabled = 0;
++ }
++ if (fsg->intr_in_enabled) {
++ usb_ep_disable(fsg->intr_in);
++ fsg->intr_in_enabled = 0;
++ }
++
++ fsg->running = 0;
++ if (altsetting < 0 || rc != 0)
++ return rc;
++
++ DBG(fsg, "set interface %d\n", altsetting);
++
++ /* Enable the endpoints */
++ d = ep_desc(fsg->gadget, &fs_bulk_in_desc, &hs_bulk_in_desc);
++ if ((rc = enable_endpoint(fsg, fsg->bulk_in, d)) != 0)
++ goto reset;
++ fsg->bulk_in_enabled = 1;
++
++ d = ep_desc(fsg->gadget, &fs_bulk_out_desc, &hs_bulk_out_desc);
++ if ((rc = enable_endpoint(fsg, fsg->bulk_out, d)) != 0)
++ goto reset;
++ fsg->bulk_out_enabled = 1;
++ fsg->bulk_out_maxpacket = d->wMaxPacketSize;
++
++ if (transport_is_cbi()) {
++ d = ep_desc(fsg->gadget, &fs_intr_in_desc, &hs_intr_in_desc);
++ if ((rc = enable_endpoint(fsg, fsg->intr_in, d)) != 0)
++ goto reset;
++ fsg->intr_in_enabled = 1;
++ }
++
++ /* Allocate the requests */
++ for (i = 0; i < NUM_BUFFERS; ++i) {
++ struct fsg_buffhd *bh = &fsg->buffhds[i];
++
++ if ((rc = alloc_request(fsg, fsg->bulk_in, &bh->inreq)) != 0)
++ goto reset;
++ if ((rc = alloc_request(fsg, fsg->bulk_out, &bh->outreq)) != 0)
++ goto reset;
++ bh->inreq->buf = bh->outreq->buf = bh->buf;
++ bh->inreq->dma = bh->outreq->dma = bh->dma;
++ bh->inreq->context = bh->outreq->context = bh;
++ bh->inreq->complete = bulk_in_complete;
++ bh->outreq->complete = bulk_out_complete;
++ }
++ if (transport_is_cbi()) {
++ if ((rc = alloc_request(fsg, fsg->intr_in, &fsg->intreq)) != 0)
++ goto reset;
++ fsg->intreq->complete = intr_in_complete;
++ }
++
++ fsg->running = 1;
++ for (i = 0; i < fsg->nluns; ++i)
++ fsg->luns[i].unit_attention_data = SS_RESET_OCCURRED;
++ return rc;
++}
++
++
++/*
++ * Change our operational configuration. This code must agree with the code
++ * that returns config descriptors, and with interface altsetting code.
++ *
++ * It's also responsible for power management interactions. Some
++ * configurations might not work with our current power sources.
++ * For now we just assume the gadget is always self-powered.
++ */
++static int do_set_config(struct fsg_dev *fsg, u8 new_config)
++{
++ int rc = 0;
++
++ /* Disable the single interface */
++ if (fsg->config != 0) {
++ DBG(fsg, "reset config\n");
++ fsg->config = 0;
++ rc = do_set_interface(fsg, -1);
++ }
++
++ /* Enable the interface */
++ if (new_config != 0) {
++ fsg->config = new_config;
++ if ((rc = do_set_interface(fsg, 0)) != 0)
++ fsg->config = 0; // Reset on errors
++ else {
++ char *speed;
++
++ switch (fsg->gadget->speed) {
++ case USB_SPEED_LOW: speed = "low"; break;
++ case USB_SPEED_FULL: speed = "full"; break;
++ case USB_SPEED_HIGH: speed = "high"; break;
++ default: speed = "?"; break;
++ }
++ INFO(fsg, "%s speed config #%d\n", speed, fsg->config);
++ }
++ }
++ return rc;
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++static void handle_exception(struct fsg_dev *fsg)
++{
++ siginfo_t info;
++ int sig;
++ int i;
++ int num_active;
++ struct fsg_buffhd *bh;
++ enum fsg_state old_state;
++ u8 new_config;
++ struct lun *curlun;
++ unsigned int exception_req_tag;
++ int rc;
++
++ /* Clear the existing signals. Anything but SIGUSR1 is converted
++ * into a high-priority EXIT exception. */
++ for (;;) {
++ spin_lock_irq(&current->sigmask_lock);
++ sig = dequeue_signal(&fsg->thread_signal_mask, &info);
++ spin_unlock_irq(&current->sigmask_lock);
++ if (!sig)
++ break;
++ if (sig != SIGUSR1) {
++ if (fsg->state < FSG_STATE_EXIT)
++ DBG(fsg, "Main thread exiting on signal\n");
++ raise_exception(fsg, FSG_STATE_EXIT);
++ }
++ }
++
++ /* Cancel all the pending transfers */
++ if (fsg->intreq_busy)
++ usb_ep_dequeue(fsg->intr_in, fsg->intreq);
++ for (i = 0; i < NUM_BUFFERS; ++i) {
++ bh = &fsg->buffhds[i];
++ if (bh->inreq_busy)
++ usb_ep_dequeue(fsg->bulk_in, bh->inreq);
++ if (bh->outreq_busy)
++ usb_ep_dequeue(fsg->bulk_out, bh->outreq);
++ }
++
++ /* Wait until everything is idle */
++ for (;;) {
++ num_active = fsg->intreq_busy;
++ for (i = 0; i < NUM_BUFFERS; ++i) {
++ bh = &fsg->buffhds[i];
++ num_active += bh->inreq_busy + bh->outreq_busy;
++ }
++ if (num_active == 0)
++ break;
++ if (sleep_thread(fsg))
++ return;
++ }
++
++ /* Clear out the controller's fifos */
++ if (fsg->bulk_in_enabled)
++ usb_ep_fifo_flush(fsg->bulk_in);
++ if (fsg->bulk_out_enabled)
++ usb_ep_fifo_flush(fsg->bulk_out);
++ if (fsg->intr_in_enabled)
++ usb_ep_fifo_flush(fsg->intr_in);
++
++ /* Reset the I/O buffer states and pointers, the SCSI
++ * state, and the exception. Then invoke the handler. */
++ spin_lock_irq(&fsg->lock);
++
++ for (i = 0; i < NUM_BUFFERS; ++i) {
++ bh = &fsg->buffhds[i];
++ bh->state = BUF_STATE_EMPTY;
++ }
++ fsg->next_buffhd_to_fill = fsg->next_buffhd_to_drain =
++ &fsg->buffhds[0];
++
++ exception_req_tag = fsg->exception_req_tag;
++ new_config = fsg->new_config;
++ old_state = fsg->state;
++
++ if (old_state == FSG_STATE_ABORT_BULK_OUT)
++ fsg->state = FSG_STATE_STATUS_PHASE;
++ else {
++ for (i = 0; i < fsg->nluns; ++i) {
++ curlun = &fsg->luns[i];
++ curlun->prevent_medium_removal = 0;
++ curlun->sense_data = curlun->unit_attention_data =
++ SS_NO_SENSE;
++ curlun->sense_data_info = 0;
++ }
++ fsg->state = FSG_STATE_IDLE;
++ }
++ spin_unlock_irq(&fsg->lock);
++
++ /* Carry out any extra actions required for the exception */
++ switch (old_state) {
++ default:
++ break;
++
++ case FSG_STATE_ABORT_BULK_OUT:
++ send_status(fsg);
++ spin_lock_irq(&fsg->lock);
++ if (fsg->state == FSG_STATE_STATUS_PHASE)
++ fsg->state = FSG_STATE_IDLE;
++ spin_unlock_irq(&fsg->lock);
++ break;
++
++ case FSG_STATE_RESET:
++ /* In case we were forced against our will to halt a
++ * bulk endpoint, clear the halt now. (The SuperH UDC
++ * requires this.) */
++ if (test_and_clear_bit(CLEAR_BULK_HALTS,
++ &fsg->atomic_bitflags)) {
++ usb_ep_clear_halt(fsg->bulk_in);
++ usb_ep_clear_halt(fsg->bulk_out);
++ }
++
++ if (transport_is_bbb()) {
++ if (fsg->ep0_req_tag == exception_req_tag)
++ ep0_queue(fsg); // Complete the status stage
++
++ } else if (transport_is_cbi())
++ send_status(fsg); // Status by interrupt pipe
++
++ /* Technically this should go here, but it would only be
++ * a waste of time. Ditto for the INTERFACE_CHANGE and
++ * CONFIG_CHANGE cases. */
++ // for (i = 0; i < fsg->nluns; ++i)
++ // fsg->luns[i].unit_attention_data = SS_RESET_OCCURRED;
++ break;
++
++ case FSG_STATE_INTERFACE_CHANGE:
++ rc = do_set_interface(fsg, 0);
++ if (fsg->ep0_req_tag != exception_req_tag)
++ break;
++ if (rc != 0) // STALL on errors
++ fsg_set_halt(fsg, fsg->ep0);
++ else // Complete the status stage
++ ep0_queue(fsg);
++ break;
++
++ case FSG_STATE_CONFIG_CHANGE:
++ rc = do_set_config(fsg, new_config);
++ if (fsg->ep0_req_tag != exception_req_tag)
++ break;
++ if (rc != 0) // STALL on errors
++ fsg_set_halt(fsg, fsg->ep0);
++ else // Complete the status stage
++ ep0_queue(fsg);
++ break;
++
++ case FSG_STATE_DISCONNECT:
++ fsync_all(fsg);
++ do_set_config(fsg, 0); // Unconfigured state
++ break;
++
++ case FSG_STATE_EXIT:
++ case FSG_STATE_TERMINATED:
++ do_set_config(fsg, 0); // Free resources
++ spin_lock_irq(&fsg->lock);
++ fsg->state = FSG_STATE_TERMINATED; // Stop the thread
++ spin_unlock_irq(&fsg->lock);
++ break;
++ }
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++static int fsg_main_thread(void *fsg_)
++{
++ struct fsg_dev *fsg = (struct fsg_dev *) fsg_;
++
++ fsg->thread_task = current;
++
++ /* Release all our userspace resources */
++ daemonize();
++ reparent_to_init();
++ strncpy(current->comm, "file-storage-gadget",
++ sizeof(current->comm) - 1);
++
++ /* Allow the thread to be killed by a signal, but set the signal mask
++ * to block everything but INT, TERM, KILL, and USR1. */
++ siginitsetinv(&fsg->thread_signal_mask, sigmask(SIGINT) |
++ sigmask(SIGTERM) | sigmask(SIGKILL) |
++ sigmask(SIGUSR1));
++ spin_lock_irq(&current->sigmask_lock);
++ flush_signals(current);
++ current->blocked = fsg->thread_signal_mask;
++ recalc_sigpending(current);
++ spin_unlock_irq(&current->sigmask_lock);
++
++ /* Arrange for userspace references to be interpreted as kernel
++ * pointers. That way we can pass a kernel pointer to a routine
++ * that expects a __user pointer and it will work okay. */
++ set_fs(get_ds());
++
++ /* Wait for the gadget registration to finish up */
++ wait_for_completion(&fsg->thread_notifier);
++
++ /* The main loop */
++ while (fsg->state != FSG_STATE_TERMINATED) {
++ if (exception_in_progress(fsg) || signal_pending(current)) {
++ handle_exception(fsg);
++ continue;
++ }
++
++ if (!fsg->running) {
++ sleep_thread(fsg);
++ continue;
++ }
++
++ if (get_next_command(fsg))
++ continue;
++
++ spin_lock_irq(&fsg->lock);
++ if (!exception_in_progress(fsg))
++ fsg->state = FSG_STATE_DATA_PHASE;
++ spin_unlock_irq(&fsg->lock);
++
++ if (do_scsi_command(fsg) || finish_reply(fsg))
++ continue;
++
++ spin_lock_irq(&fsg->lock);
++ if (!exception_in_progress(fsg))
++ fsg->state = FSG_STATE_STATUS_PHASE;
++ spin_unlock_irq(&fsg->lock);
++
++ if (send_status(fsg))
++ continue;
++
++ spin_lock_irq(&fsg->lock);
++ if (!exception_in_progress(fsg))
++ fsg->state = FSG_STATE_IDLE;
++ spin_unlock_irq(&fsg->lock);
++ }
++
++ fsg->thread_task = NULL;
++ flush_signals(current);
++
++ /* In case we are exiting because of a signal, unregister the
++ * gadget driver and close the backing file. */
++ if (test_and_clear_bit(REGISTERED, &fsg->atomic_bitflags)) {
++ usb_gadget_unregister_driver(&fsg_driver);
++ close_all_backing_files(fsg);
++ }
++
++ /* Let the unbind and cleanup routines know the thread has exited */
++ complete_and_exit(&fsg->thread_notifier, 0);
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++/* If the next two routines are called while the gadget is registered,
++ * the caller must own fsg->filesem for writing. */
++
++static int NORMALLY_INIT open_backing_file(struct lun *curlun,
++ const char *filename)
++{
++ int ro;
++ struct file *filp = NULL;
++ int rc = -EINVAL;
++ struct inode *inode = NULL;
++ loff_t size;
++ loff_t num_sectors;
++
++ /* R/W if we can, R/O if we must */
++ ro = curlun->ro;
++ if (!ro) {
++ filp = filp_open(filename, O_RDWR | O_LARGEFILE, 0);
++ if (-EROFS == PTR_ERR(filp))
++ ro = 1;
++ }
++ if (ro)
++ filp = filp_open(filename, O_RDONLY | O_LARGEFILE, 0);
++ if (IS_ERR(filp)) {
++ LINFO(curlun, "unable to open backing file: %s\n", filename);
++ return PTR_ERR(filp);
++ }
++
++ if (!(filp->f_mode & FMODE_WRITE))
++ ro = 1;
++
++ if (filp->f_dentry)
++ inode = filp->f_dentry->d_inode;
++ if (inode && S_ISBLK(inode->i_mode)) {
++ kdev_t dev = inode->i_rdev;
++
++ if (blk_size[MAJOR(dev)])
++ size = (loff_t) blk_size[MAJOR(dev)][MINOR(dev)] <<
++ BLOCK_SIZE_BITS;
++ else {
++ LINFO(curlun, "unable to find file size: %s\n",
++ filename);
++ goto out;
++ }
++ } else if (inode && S_ISREG(inode->i_mode))
++ size = inode->i_size;
++ else {
++ LINFO(curlun, "invalid file type: %s\n", filename);
++ goto out;
++ }
++
++ /* If we can't read the file, it's no good.
++ * If we can't write the file, use it read-only. */
++ if (!filp->f_op || !filp->f_op->read) {
++ LINFO(curlun, "file not readable: %s\n", filename);
++ goto out;
++ }
++ if (IS_RDONLY(inode) || !filp->f_op->write)
++ ro = 1;
++
++ num_sectors = size >> 9; // File size in 512-byte sectors
++ if (num_sectors == 0) {
++ LINFO(curlun, "file too small: %s\n", filename);
++ rc = -ETOOSMALL;
++ goto out;
++ }
++
++ get_file(filp);
++ curlun->ro = ro;
++ curlun->filp = filp;
++ curlun->file_length = size;
++ curlun->num_sectors = num_sectors;
++ LDBG(curlun, "open backing file: %s\n", filename);
++ rc = 0;
++
++out:
++ filp_close(filp, current->files);
++ return rc;
++}
++
++
++static void close_backing_file(struct lun *curlun)
++{
++ if (curlun->filp) {
++ LDBG(curlun, "close backing file\n");
++ fput(curlun->filp);
++ curlun->filp = NULL;
++ }
++}
++
++static void close_all_backing_files(struct fsg_dev *fsg)
++{
++ int i;
++
++ for (i = 0; i < fsg->nluns; ++i)
++ close_backing_file(&fsg->luns[i]);
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++static void fsg_unbind(struct usb_gadget *gadget)
++{
++ struct fsg_dev *fsg = get_gadget_data(gadget);
++ int i;
++ struct usb_request *req = fsg->ep0req;
++
++ DBG(fsg, "unbind\n");
++ clear_bit(REGISTERED, &fsg->atomic_bitflags);
++
++ /* If the thread isn't already dead, tell it to exit now */
++ if (fsg->state != FSG_STATE_TERMINATED) {
++ raise_exception(fsg, FSG_STATE_EXIT);
++ wait_for_completion(&fsg->thread_notifier);
++
++ /* The cleanup routine waits for this completion also */
++ complete(&fsg->thread_notifier);
++ }
++
++ /* Free the data buffers */
++ for (i = 0; i < NUM_BUFFERS; ++i) {
++ struct fsg_buffhd *bh = &fsg->buffhds[i];
++
++ if (bh->buf)
++ usb_ep_free_buffer(fsg->bulk_in, bh->buf, bh->dma,
++ mod_data.buflen);
++ }
++
++ /* Free the request and buffer for endpoint 0 */
++ if (req) {
++ if (req->buf)
++ usb_ep_free_buffer(fsg->ep0, req->buf,
++ req->dma, EP0_BUFSIZE);
++ usb_ep_free_request(fsg->ep0, req);
++ }
++
++ set_gadget_data(gadget, 0);
++}
++
++
++static int __init check_parameters(struct fsg_dev *fsg)
++{
++ int prot;
++
++ /* Store the default values */
++ mod_data.transport_type = USB_PR_BULK;
++ mod_data.transport_name = "Bulk-only";
++ mod_data.protocol_type = USB_SC_SCSI;
++ mod_data.protocol_name = "Transparent SCSI";
++
++ if (gadget_is_sh(fsg->gadget))
++ mod_data.can_stall = 0;
++
++ if (mod_data.release == 0xffff) { // Parameter wasn't set
++ if (gadget_is_net2280(fsg->gadget))
++ mod_data.release = __constant_cpu_to_le16(0x0221);
++ else if (gadget_is_dummy(fsg->gadget))
++ mod_data.release = __constant_cpu_to_le16(0x0222);
++ else if (gadget_is_pxa(fsg->gadget))
++ mod_data.release = __constant_cpu_to_le16(0x0223);
++ else if (gadget_is_sh(fsg->gadget))
++ mod_data.release = __constant_cpu_to_le16(0x0224);
++
++ /* The sa1100 controller is not supported */
++
++ else if (gadget_is_goku(fsg->gadget))
++ mod_data.release = __constant_cpu_to_le16(0x0226);
++ else if (gadget_is_mq11xx(fsg->gadget))
++ mod_data.release = __constant_cpu_to_le16(0x0227);
++ else if (gadget_is_omap(fsg->gadget))
++ mod_data.release = __constant_cpu_to_le16(0x0228);
++ else {
++ WARN(fsg, "controller '%s' not recognized\n",
++ fsg->gadget->name);
++ mod_data.release = __constant_cpu_to_le16(0x0299);
++ }
++ }
++
++ prot = simple_strtol(mod_data.protocol_parm, NULL, 0);
++
++#ifdef CONFIG_USB_FILE_STORAGE_TEST
++ if (strnicmp(mod_data.transport_parm, "BBB", 10) == 0) {
++ ; // Use default setting
++ } else if (strnicmp(mod_data.transport_parm, "CB", 10) == 0) {
++ mod_data.transport_type = USB_PR_CB;
++ mod_data.transport_name = "Control-Bulk";
++ } else if (strnicmp(mod_data.transport_parm, "CBI", 10) == 0) {
++ mod_data.transport_type = USB_PR_CBI;
++ mod_data.transport_name = "Control-Bulk-Interrupt";
++ } else {
++ ERROR(fsg, "invalid transport: %s\n", mod_data.transport_parm);
++ return -EINVAL;
++ }
++
++ if (strnicmp(mod_data.protocol_parm, "SCSI", 10) == 0 ||
++ prot == USB_SC_SCSI) {
++ ; // Use default setting
++ } else if (strnicmp(mod_data.protocol_parm, "RBC", 10) == 0 ||
++ prot == USB_SC_RBC) {
++ mod_data.protocol_type = USB_SC_RBC;
++ mod_data.protocol_name = "RBC";
++ } else if (strnicmp(mod_data.protocol_parm, "8020", 4) == 0 ||
++ strnicmp(mod_data.protocol_parm, "ATAPI", 10) == 0 ||
++ prot == USB_SC_8020) {
++ mod_data.protocol_type = USB_SC_8020;
++ mod_data.protocol_name = "8020i (ATAPI)";
++ } else if (strnicmp(mod_data.protocol_parm, "QIC", 3) == 0 ||
++ prot == USB_SC_QIC) {
++ mod_data.protocol_type = USB_SC_QIC;
++ mod_data.protocol_name = "QIC-157";
++ } else if (strnicmp(mod_data.protocol_parm, "UFI", 10) == 0 ||
++ prot == USB_SC_UFI) {
++ mod_data.protocol_type = USB_SC_UFI;
++ mod_data.protocol_name = "UFI";
++ } else if (strnicmp(mod_data.protocol_parm, "8070", 4) == 0 ||
++ prot == USB_SC_8070) {
++ mod_data.protocol_type = USB_SC_8070;
++ mod_data.protocol_name = "8070i";
++ } else {
++ ERROR(fsg, "invalid protocol: %s\n", mod_data.protocol_parm);
++ return -EINVAL;
++ }
++
++ mod_data.buflen &= PAGE_CACHE_MASK;
++ if (mod_data.buflen <= 0) {
++ ERROR(fsg, "invalid buflen\n");
++ return -ETOOSMALL;
++ }
++#endif /* CONFIG_USB_FILE_STORAGE_TEST */
++
++ return 0;
++}
++
++
++static int __init fsg_bind(struct usb_gadget *gadget)
++{
++ struct fsg_dev *fsg = the_fsg;
++ int rc;
++ int i;
++ struct lun *curlun;
++ struct usb_ep *ep;
++ struct usb_request *req;
++ char *pathbuf, *p;
++
++ fsg->gadget = gadget;
++ set_gadget_data(gadget, fsg);
++ fsg->ep0 = gadget->ep0;
++ fsg->ep0->driver_data = fsg;
++
++ if ((rc = check_parameters(fsg)) != 0)
++ goto out;
++
++ /* Find out how many LUNs there should be */
++ i = mod_data.nluns;
++ if (i == 0) {
++ for (i = MAX_LUNS; i > 1; --i) {
++ if (file[i - 1])
++ break;
++ }
++ }
++ if (i > MAX_LUNS) {
++ ERROR(fsg, "invalid number of LUNs: %d\n", i);
++ rc = -EINVAL;
++ goto out;
++ }
++
++ /* Create the LUNs and open their backing files. We can't register
++ * the LUN devices until the gadget itself is registered, which
++ * doesn't happen until after fsg_bind() returns. */
++ fsg->luns = kmalloc(i * sizeof(struct lun), GFP_KERNEL);
++ if (!fsg->luns) {
++ rc = -ENOMEM;
++ goto out;
++ }
++ memset(fsg->luns, 0, i * sizeof(struct lun));
++ fsg->nluns = i;
++
++ for (i = 0; i < fsg->nluns; ++i) {
++ curlun = &fsg->luns[i];
++ curlun->ro = ro[i];
++ curlun->dev.driver_data = fsg;
++ snprintf(curlun->dev.name, BUS_ID_SIZE,
++ "%s-lun%d", gadget->name, i);
++
++ if (file[i] && *file[i]) {
++ if ((rc = open_backing_file(curlun, file[i])) != 0)
++ goto out;
++ } else if (!mod_data.removable) {
++ ERROR(fsg, "no file given for LUN%d\n", i);
++ rc = -EINVAL;
++ goto out;
++ }
++ }
++
++ /* Find all the endpoints we will use */
++ usb_ep_autoconfig_reset(gadget);
++ ep = usb_ep_autoconfig(gadget, &fs_bulk_in_desc);
++ if (!ep)
++ goto autoconf_fail;
++ ep->driver_data = fsg; // claim the endpoint
++ fsg->bulk_in = ep;
++
++ ep = usb_ep_autoconfig(gadget, &fs_bulk_out_desc);
++ if (!ep)
++ goto autoconf_fail;
++ ep->driver_data = fsg; // claim the endpoint
++ fsg->bulk_out = ep;
++
++ if (transport_is_cbi()) {
++ ep = usb_ep_autoconfig(gadget, &fs_intr_in_desc);
++ if (!ep)
++ goto autoconf_fail;
++ ep->driver_data = fsg; // claim the endpoint
++ fsg->intr_in = ep;
++ }
++
++ /* Fix up the descriptors */
++ device_desc.bMaxPacketSize0 = fsg->ep0->maxpacket;
++ device_desc.idVendor = cpu_to_le16(mod_data.vendor);
++ device_desc.idProduct = cpu_to_le16(mod_data.product);
++ device_desc.bcdDevice = cpu_to_le16(mod_data.release);
++
++ i = (transport_is_cbi() ? 3 : 2); // Number of endpoints
++ intf_desc.bNumEndpoints = i;
++ intf_desc.bInterfaceSubClass = mod_data.protocol_type;
++ intf_desc.bInterfaceProtocol = mod_data.transport_type;
++ fs_function[i+1] = NULL;
++
++#ifdef CONFIG_USB_GADGET_DUALSPEED
++ hs_function[i+1] = NULL;
++
++ /* Assume ep0 uses the same maxpacket value for both speeds */
++ dev_qualifier.bMaxPacketSize0 = fsg->ep0->maxpacket;
++
++ /* Assume that all endpoint addresses are the same for both speeds */
++ hs_bulk_in_desc.bEndpointAddress = fs_bulk_in_desc.bEndpointAddress;
++ hs_bulk_out_desc.bEndpointAddress = fs_bulk_out_desc.bEndpointAddress;
++ hs_intr_in_desc.bEndpointAddress = fs_intr_in_desc.bEndpointAddress;
++#endif
++
++ rc = -ENOMEM;
++
++ /* Allocate the request and buffer for endpoint 0 */
++ fsg->ep0req = req = usb_ep_alloc_request(fsg->ep0, GFP_KERNEL);
++ if (!req)
++ goto out;
++ req->buf = usb_ep_alloc_buffer(fsg->ep0, EP0_BUFSIZE,
++ &req->dma, GFP_KERNEL);
++ if (!req->buf)
++ goto out;
++ req->complete = ep0_complete;
++
++ /* Allocate the data buffers */
++ for (i = 0; i < NUM_BUFFERS; ++i) {
++ struct fsg_buffhd *bh = &fsg->buffhds[i];
++
++ bh->buf = usb_ep_alloc_buffer(fsg->bulk_in, mod_data.buflen,
++ &bh->dma, GFP_KERNEL);
++ if (!bh->buf)
++ goto out;
++ bh->next = bh + 1;
++ }
++ fsg->buffhds[NUM_BUFFERS - 1].next = &fsg->buffhds[0];
++
++ /* This should reflect the actual gadget power source */
++ usb_gadget_set_selfpowered(gadget);
++
++ snprintf(manufacturer, sizeof manufacturer,
++ UTS_SYSNAME " " UTS_RELEASE " with %s",
++ gadget->name);
++
++ /* On a real device, serial[] would be loaded from permanent
++ * storage. We just encode it from the driver version string. */
++ for (i = 0; i < sizeof(serial) - 2; i += 2) {
++ unsigned char c = DRIVER_VERSION[i / 2];
++
++ if (!c)
++ break;
++ sprintf(&serial[i], "%02X", c);
++ }
++
++ if ((rc = kernel_thread(fsg_main_thread, fsg, (CLONE_VM | CLONE_FS |
++ CLONE_FILES))) < 0)
++ goto out;
++ fsg->thread_pid = rc;
++
++ INFO(fsg, DRIVER_DESC ", version: " DRIVER_VERSION "\n");
++ INFO(fsg, "Number of LUNs=%d\n", fsg->nluns);
++
++ pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
++ for (i = 0; i < fsg->nluns; ++i) {
++ curlun = &fsg->luns[i];
++ if (backing_file_is_open(curlun)) {
++ p = NULL;
++ if (pathbuf) {
++ p = d_path(curlun->filp->f_dentry,
++ curlun->filp->f_vfsmnt,
++ pathbuf, PATH_MAX);
++ if (IS_ERR(p))
++ p = NULL;
++ }
++ LINFO(curlun, "ro=%d, file: %s\n",
++ curlun->ro, (p ? p : "(error)"));
++ }
++ }
++ kfree(pathbuf);
++
++ DBG(fsg, "transport=%s (x%02x)\n",
++ mod_data.transport_name, mod_data.transport_type);
++ DBG(fsg, "protocol=%s (x%02x)\n",
++ mod_data.protocol_name, mod_data.protocol_type);
++ DBG(fsg, "VendorID=x%04x, ProductID=x%04x, Release=x%04x\n",
++ mod_data.vendor, mod_data.product, mod_data.release);
++ DBG(fsg, "removable=%d, stall=%d, buflen=%u\n",
++ mod_data.removable, mod_data.can_stall,
++ mod_data.buflen);
++ DBG(fsg, "I/O thread pid: %d\n", fsg->thread_pid);
++ return 0;
++
++autoconf_fail:
++ ERROR(fsg, "unable to autoconfigure all endpoints\n");
++ rc = -ENOTSUPP;
++
++out:
++ fsg->state = FSG_STATE_TERMINATED; // The thread is dead
++ fsg_unbind(gadget);
++ close_all_backing_files(fsg);
++ return rc;
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++static struct usb_gadget_driver fsg_driver = {
++#ifdef CONFIG_USB_GADGET_DUALSPEED
++ .speed = USB_SPEED_HIGH,
++#else
++ .speed = USB_SPEED_FULL,
++#endif
++ .function = (char *) longname,
++ .bind = fsg_bind,
++ .unbind = fsg_unbind,
++ .disconnect = fsg_disconnect,
++ .setup = fsg_setup,
++
++ .driver = {
++ .name = (char *) shortname,
++ // .release = ...
++ // .suspend = ...
++ // .resume = ...
++ },
++};
++
++
++static int __init fsg_alloc(void)
++{
++ struct fsg_dev *fsg;
++
++ fsg = kmalloc(sizeof *fsg, GFP_KERNEL);
++ if (!fsg)
++ return -ENOMEM;
++ memset(fsg, 0, sizeof *fsg);
++ spin_lock_init(&fsg->lock);
++ init_rwsem(&fsg->filesem);
++ init_waitqueue_head(&fsg->thread_wqh);
++ init_completion(&fsg->thread_notifier);
++
++ the_fsg = fsg;
++ return 0;
++}
++
++
++static void fsg_free(struct fsg_dev *fsg)
++{
++ kfree(fsg->luns);
++ kfree(fsg);
++}
++
++
++static int __init fsg_init(void)
++{
++ int rc;
++ struct fsg_dev *fsg;
++
++ /* Put the module parameters where they belong -- arghh! */
++ mod_data.nluns = luns;
++ mod_data.transport_parm = transport;
++ mod_data.protocol_parm = protocol;
++ mod_data.removable = removable;
++ mod_data.vendor = vendor;
++ mod_data.product = product;
++ mod_data.release = release;
++ mod_data.buflen = buflen;
++ mod_data.can_stall = stall;
++
++ if ((rc = fsg_alloc()) != 0)
++ return rc;
++ fsg = the_fsg;
++ if ((rc = usb_gadget_register_driver(&fsg_driver)) != 0) {
++ fsg_free(fsg);
++ return rc;
++ }
++ set_bit(REGISTERED, &fsg->atomic_bitflags);
++
++ /* Tell the thread to start working */
++ complete(&fsg->thread_notifier);
++ return 0;
++}
++module_init(fsg_init);
++
++
++static void __exit fsg_cleanup(void)
++{
++ struct fsg_dev *fsg = the_fsg;
++
++ /* Unregister the driver iff the thread hasn't already done so */
++ if (test_and_clear_bit(REGISTERED, &fsg->atomic_bitflags))
++ usb_gadget_unregister_driver(&fsg_driver);
++
++ /* Wait for the thread to finish up */
++ wait_for_completion(&fsg->thread_notifier);
++
++ close_all_backing_files(fsg);
++ fsg_free(fsg);
++}
++module_exit(fsg_cleanup);
+diff -x '*~' -x '.*' -r -N -u /tmp/kernel/drivers/usb/gadget/gadget_chips.h kernel/drivers/usb/gadget/gadget_chips.h
+--- /tmp/kernel/drivers/usb/gadget/gadget_chips.h 1970-01-01 01:00:00.000000000 +0100
++++ kernel/drivers/usb/gadget/gadget_chips.h 2005-04-22 17:53:19.434539516 +0200
+@@ -0,0 +1,92 @@
++/*
++ * USB device controllers have lots of quirks. Use these macros in
++ * gadget drivers or other code that needs to deal with them, and which
++ * autoconfigures instead of using early binding to the hardware.
++ *
++ * This could eventually work like the ARM mach_is_*() stuff, driven by
++ * some config file that gets updated as new hardware is supported.
++ *
++ * NOTE: some of these controller drivers may not be available yet.
++ */
++#ifdef CONFIG_USB_GADGET_NET2280
++#define gadget_is_net2280(g) !strcmp("net2280", (g)->name)
++#else
++#define gadget_is_net2280(g) 0
++#endif
++
++#ifdef CONFIG_USB_GADGET_DUMMY_HCD
++#define gadget_is_dummy(g) !strcmp("dummy_udc", (g)->name)
++#else
++#define gadget_is_dummy(g) 0
++#endif
++
++#ifdef CONFIG_USB_GADGET_PXA2XX
++#define gadget_is_pxa(g) !strcmp("pxa2xx_udc", (g)->name)
++#else
++#define gadget_is_pxa(g) 0
++#endif
++
++#ifdef CONFIG_USB_GADGET_GOKU
++#define gadget_is_goku(g) !strcmp("goku_udc", (g)->name)
++#else
++#define gadget_is_goku(g) 0
++#endif
++
++#ifdef CONFIG_USB_GADGET_SUPERH
++#define gadget_is_sh(g) !strcmp("sh_udc", (g)->name)
++#else
++#define gadget_is_sh(g) 0
++#endif
++
++#ifdef CONFIG_USB_GADGET_SA1100
++#define gadget_is_sa1100(g) !strcmp("sa1100_udc", (g)->name)
++#else
++#define gadget_is_sa1100(g) 0
++#endif
++
++#ifdef CONFIG_USB_GADGET_LH7A40X
++#define gadget_is_lh7a40x(g) !strcmp("lh7a40x_udc", (g)->name)
++#else
++#define gadget_is_lh7a40x(g) 0
++#endif
++
++#ifdef CONFIG_USB_GADGET_MQ11XX
++#define gadget_is_mq11xx(g) !strcmp("mq11xx_udc", (g)->name)
++#else
++#define gadget_is_mq11xx(g) 0
++#endif
++
++#ifdef CONFIG_USB_GADGET_OMAP
++#define gadget_is_omap(g) !strcmp("omap_udc", (g)->name)
++#else
++#define gadget_is_omap(g) 0
++#endif
++
++#ifdef CONFIG_USB_GADGET_N9604
++#define gadget_is_n9604(g) !strcmp("n9604_udc", (g)->name)
++#else
++#define gadget_is_n9604(g) 0
++#endif
++
++#ifdef CONFIG_USB_GADGET_PXA27X
++#define gadget_is_pxa27x(g) !strcmp("pxa27x_udc", (g)->name)
++#else
++#define gadget_is_pxa27x(g) 0
++#endif
++
++#ifdef CONFIG_USB_GADGET_S3C2410
++#define gadget_is_s3c2410(g) !strcmp("s3c2410_udc", (g)->name)
++#else
++#define gadget_is_s3c2410(g) 0
++#endif
++
++#ifdef CONFIG_USB_GADGET_AT91
++#define gadget_is_at91(g) !strcmp("at91_udc", (g)->name)
++#else
++#define gadget_is_at91(g) 0
++#endif
++
++// CONFIG_USB_GADGET_SX2
++// CONFIG_USB_GADGET_AU1X00
++// ...
++
+diff -x '*~' -x '.*' -r -N -u /tmp/kernel/drivers/usb/gadget/goku_udc.c kernel/drivers/usb/gadget/goku_udc.c
+--- /tmp/kernel/drivers/usb/gadget/goku_udc.c 1970-01-01 01:00:00.000000000 +0100
++++ kernel/drivers/usb/gadget/goku_udc.c 2005-04-22 17:53:19.440538539 +0200
+@@ -0,0 +1,1975 @@
++/*
++ * Toshiba TC86C001 ("Goku-S") USB Device Controller driver
++ *
++ * Copyright (C) 2000-2002 Lineo
++ * by Stuart Lynne, Tom Rushworth, and Bruce Balden
++ * Copyright (C) 2002 Toshiba Corporation
++ * Copyright (C) 2003 MontaVista Software (source@mvista.com)
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2. This program is licensed "as is" without any
++ * warranty of any kind, whether express or implied.
++ */
++
++/*
++ * This device has ep0 and three semi-configurable bulk/interrupt endpoints.
++ *
++ * - Endpoint numbering is fixed: ep{1,2,3}-bulk
++ * - Gadget drivers can choose ep maxpacket (8/16/32/64)
++ * - Gadget drivers can choose direction (IN, OUT)
++ * - DMA works with ep1 (OUT transfers) and ep2 (IN transfers).
++ */
++
++#undef DEBUG
++// #define VERBOSE /* extra debug messages (success too) */
++// #define USB_TRACE /* packet-level success messages */
++
++#include <linux/config.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/delay.h>
++#include <linux/ioport.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/smp_lock.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/timer.h>
++#include <linux/list.h>
++#include <linux/interrupt.h>
++#include <linux/proc_fs.h>
++#include <linux/usb_ch9.h>
++#include <linux/usb_gadget.h>
++
++#include <asm/byteorder.h>
++#include <asm/io.h>
++#include <asm/irq.h>
++#include <asm/system.h>
++#include <asm/unaligned.h>
++
++
++#include "goku_udc.h"
++
++#define DRIVER_DESC "TC86C001 USB Device Controller"
++#define DRIVER_VERSION "30-Oct 2003"
++
++#define DMA_ADDR_INVALID (~(dma_addr_t)0)
++
++static const char driver_name [] = "goku_udc";
++static const char driver_desc [] = DRIVER_DESC;
++
++MODULE_AUTHOR("source@mvista.com");
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL");
++
++
++/*
++ * IN dma behaves ok under testing, though the IN-dma abort paths don't
++ * seem to behave quite as expected. Used by default.
++ *
++ * OUT dma documents design problems handling the common "short packet"
++ * transfer termination policy; it couldn't enabled by default, even
++ * if the OUT-dma abort problems had a resolution.
++ */
++static unsigned use_dma = 1;
++
++#if 0
++//#include <linux/moduleparam.h>
++/* "modprobe goku_udc use_dma=1" etc
++ * 0 to disable dma
++ * 1 to use IN dma only (normal operation)
++ * 2 to use IN and OUT dma
++ */
++module_param(use_dma, uint, S_IRUGO);
++#endif
++
++/*-------------------------------------------------------------------------*/
++
++static void nuke(struct goku_ep *, int status);
++
++static inline void
++command(struct goku_udc_regs *regs, int command, unsigned epnum)
++{
++ writel(COMMAND_EP(epnum) | command, &regs->Command);
++ udelay(300);
++}
++
++static int
++goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
++{
++ struct goku_udc *dev;
++ struct goku_ep *ep;
++ u32 mode;
++ u16 max;
++ unsigned long flags;
++
++ ep = container_of(_ep, struct goku_ep, ep);
++ if (!_ep || !desc || ep->desc
++ || desc->bDescriptorType != USB_DT_ENDPOINT)
++ return -EINVAL;
++ dev = ep->dev;
++ if (ep == &dev->ep[0])
++ return -EINVAL;
++ if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
++ return -ESHUTDOWN;
++ if (ep->num != (desc->bEndpointAddress & 0x0f))
++ return -EINVAL;
++
++ switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
++ case USB_ENDPOINT_XFER_BULK:
++ case USB_ENDPOINT_XFER_INT:
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ if ((readl(ep->reg_status) & EPxSTATUS_EP_MASK)
++ != EPxSTATUS_EP_INVALID)
++ return -EBUSY;
++
++ /* enabling the no-toggle interrupt mode would need an api hook */
++ mode = 0;
++ max = le16_to_cpu(get_unaligned(&desc->wMaxPacketSize));
++ switch (max) {
++ case 64: mode++;
++ case 32: mode++;
++ case 16: mode++;
++ case 8: mode <<= 3;
++ break;
++ default:
++ return -EINVAL;
++ }
++ mode |= 2 << 1; /* bulk, or intr-with-toggle */
++
++ /* ep1/ep2 dma direction is chosen early; it works in the other
++ * direction, with pio. be cautious with out-dma.
++ */
++ ep->is_in = (USB_DIR_IN & desc->bEndpointAddress) != 0;
++ if (ep->is_in) {
++ mode |= 1;
++ ep->dma = (use_dma != 0) && (ep->num == UDC_MSTRD_ENDPOINT);
++ } else {
++ ep->dma = (use_dma == 2) && (ep->num == UDC_MSTWR_ENDPOINT);
++ if (ep->dma)
++ DBG(dev, "%s out-dma hides short packets\n",
++ ep->ep.name);
++ }
++
++ spin_lock_irqsave(&ep->dev->lock, flags);
++
++ /* ep1 and ep2 can do double buffering and/or dma */
++ if (ep->num < 3) {
++ struct goku_udc_regs *regs = ep->dev->regs;
++ u32 tmp;
++
++ /* double buffer except (for now) with pio in */
++ tmp = ((ep->dma || !ep->is_in)
++ ? 0x10 /* double buffered */
++ : 0x11 /* single buffer */
++ ) << ep->num;
++ tmp |= readl(&regs->EPxSingle);
++ writel(tmp, &regs->EPxSingle);
++
++ tmp = (ep->dma ? 0x10/*dma*/ : 0x11/*pio*/) << ep->num;
++ tmp |= readl(&regs->EPxBCS);
++ writel(tmp, &regs->EPxBCS);
++ }
++ writel(mode, ep->reg_mode);
++ command(ep->dev->regs, COMMAND_RESET, ep->num);
++ ep->ep.maxpacket = max;
++ ep->stopped = 0;
++ ep->desc = desc;
++ spin_unlock_irqrestore(&ep->dev->lock, flags);
++
++ DBG(dev, "enable %s %s %s maxpacket %u\n", ep->ep.name,
++ ep->is_in ? "IN" : "OUT",
++ ep->dma ? "dma" : "pio",
++ max);
++
++ return 0;
++}
++
++static void ep_reset(struct goku_udc_regs *regs, struct goku_ep *ep)
++{
++ struct goku_udc *dev = ep->dev;
++
++ if (regs) {
++ command(regs, COMMAND_INVALID, ep->num);
++ if (ep->num) {
++ if (ep->num == UDC_MSTWR_ENDPOINT)
++ dev->int_enable &= ~(INT_MSTWREND
++ |INT_MSTWRTMOUT);
++ else if (ep->num == UDC_MSTRD_ENDPOINT)
++ dev->int_enable &= ~INT_MSTRDEND;
++ dev->int_enable &= ~INT_EPxDATASET (ep->num);
++ } else
++ dev->int_enable &= ~INT_EP0;
++ writel(dev->int_enable, &regs->int_enable);
++ readl(&regs->int_enable);
++ if (ep->num < 3) {
++ struct goku_udc_regs *regs = ep->dev->regs;
++ u32 tmp;
++
++ tmp = readl(&regs->EPxSingle);
++ tmp &= ~(0x11 << ep->num);
++ writel(tmp, &regs->EPxSingle);
++
++ tmp = readl(&regs->EPxBCS);
++ tmp &= ~(0x11 << ep->num);
++ writel(tmp, &regs->EPxBCS);
++ }
++ /* reset dma in case we're still using it */
++ if (ep->dma) {
++ u32 master;
++
++ master = readl(&regs->dma_master) & MST_RW_BITS;
++ if (ep->num == UDC_MSTWR_ENDPOINT) {
++ master &= ~MST_W_BITS;
++ master |= MST_WR_RESET;
++ } else {
++ master &= ~MST_R_BITS;
++ master |= MST_RD_RESET;
++ }
++ writel(master, &regs->dma_master);
++ }
++ }
++
++ ep->ep.maxpacket = MAX_FIFO_SIZE;
++ ep->desc = 0;
++ ep->stopped = 1;
++ ep->irqs = 0;
++ ep->dma = 0;
++}
++
++static int goku_ep_disable(struct usb_ep *_ep)
++{
++ struct goku_ep *ep;
++ struct goku_udc *dev;
++ unsigned long flags;
++
++ ep = container_of(_ep, struct goku_ep, ep);
++ if (!_ep || !ep->desc)
++ return -ENODEV;
++ dev = ep->dev;
++ if (dev->ep0state == EP0_SUSPEND)
++ return -EBUSY;
++
++ VDBG(dev, "disable %s\n", _ep->name);
++
++ spin_lock_irqsave(&dev->lock, flags);
++ nuke(ep, -ESHUTDOWN);
++ ep_reset(dev->regs, ep);
++ spin_unlock_irqrestore(&dev->lock, flags);
++
++ return 0;
++}
++
++/*-------------------------------------------------------------------------*/
++
++static struct usb_request *
++goku_alloc_request(struct usb_ep *_ep, int gfp_flags)
++{
++ struct goku_request *req;
++
++ if (!_ep)
++ return 0;
++ req = kmalloc(sizeof *req, gfp_flags);
++ if (!req)
++ return 0;
++
++ memset(req, 0, sizeof *req);
++ req->req.dma = DMA_ADDR_INVALID;
++ INIT_LIST_HEAD(&req->queue);
++ return &req->req;
++}
++
++static void
++goku_free_request(struct usb_ep *_ep, struct usb_request *_req)
++{
++ struct goku_request *req;
++
++ if (!_ep || !_req)
++ return;
++
++ req = container_of(_req, struct goku_request, req);
++ WARN_ON(!list_empty(&req->queue));
++ kfree(req);
++}
++
++/*-------------------------------------------------------------------------*/
++
++#undef USE_KMALLOC
++
++/* many common platforms have dma-coherent caches, which means that it's
++ * safe to use kmalloc() memory for all i/o buffers without using any
++ * cache flushing calls. (unless you're trying to share cache lines
++ * between dma and non-dma activities, which is a slow idea in any case.)
++ *
++ * other platforms need more care, with 2.6 having a moderately general
++ * solution except for the common "buffer is smaller than a page" case.
++ */
++#if defined(CONFIG_X86)
++#define USE_KMALLOC
++
++#elif defined(CONFIG_MIPS) && !defined(CONFIG_NONCOHERENT_IO)
++#define USE_KMALLOC
++
++#elif defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
++#define USE_KMALLOC
++
++#endif
++
++/* allocating buffers this way eliminates dma mapping overhead, which
++ * on some platforms will mean eliminating a per-io buffer copy. with
++ * some kinds of system caches, further tweaks may still be needed.
++ */
++static void *
++goku_alloc_buffer(struct usb_ep *_ep, unsigned bytes,
++ dma_addr_t *dma, int gfp_flags)
++{
++ void *retval;
++ struct goku_ep *ep;
++
++ ep = container_of(_ep, struct goku_ep, ep);
++ if (!_ep)
++ return 0;
++ *dma = DMA_ADDR_INVALID;
++
++#if defined(USE_KMALLOC)
++ retval = kmalloc(bytes, gfp_flags);
++ if (retval)
++ *dma = virt_to_phys(retval);
++#else
++ if (ep->dma) {
++ /* one problem with this call is that it wastes memory on
++ * typical 1/N page allocations: it allocates 1-N pages.
++ * another is that it always uses GFP_ATOMIC.
++ */
++#warning Using pci_alloc_consistent even with buffers smaller than a page.
++ retval = pci_alloc_consistent(ep->dev->pdev, bytes, dma);
++ } else
++ retval = kmalloc(bytes, gfp_flags);
++#endif
++ return retval;
++}
++
++static void
++goku_free_buffer(struct usb_ep *_ep, void *buf, dma_addr_t dma, unsigned bytes)
++{
++ /* free memory into the right allocator */
++#ifndef USE_KMALLOC
++ if (dma != DMA_ADDR_INVALID) {
++ struct goku_ep *ep;
++
++ ep = container_of(_ep, struct goku_ep, ep);
++ if (!_ep)
++ return;
++ /* one problem with this call is that some platforms
++ * don't allow it to be used in_irq().
++ */
++ pci_free_consistent(ep->dev->pdev, bytes, buf, dma);
++ } else
++#endif
++ kfree (buf);
++}
++
++/*-------------------------------------------------------------------------*/
++
++static void
++done(struct goku_ep *ep, struct goku_request *req, int status)
++{
++ struct goku_udc *dev;
++ unsigned stopped = ep->stopped;
++
++ list_del_init(&req->queue);
++
++ if (likely(req->req.status == -EINPROGRESS))
++ req->req.status = status;
++ else
++ status = req->req.status;
++
++ dev = ep->dev;
++ if (req->mapped) {
++ pci_unmap_single(dev->pdev, req->req.dma, req->req.length,
++ ep->is_in ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
++ req->req.dma = DMA_ADDR_INVALID;
++ req->mapped = 0;
++ }
++
++#ifndef USB_TRACE
++ if (status && status != -ESHUTDOWN)
++#endif
++ VDBG(dev, "complete %s req %p stat %d len %u/%u\n",
++ ep->ep.name, &req->req, status,
++ req->req.actual, req->req.length);
++
++ /* don't modify queue heads during completion callback */
++ ep->stopped = 1;
++ spin_unlock(&dev->lock);
++ req->req.complete(&ep->ep, &req->req);
++ spin_lock(&dev->lock);
++ ep->stopped = stopped;
++}
++
++/*-------------------------------------------------------------------------*/
++
++static inline int
++write_packet(u32 *fifo, u8 *buf, struct goku_request *req, unsigned max)
++{
++ unsigned length, count;
++
++ length = min(req->req.length - req->req.actual, max);
++ req->req.actual += length;
++
++ count = length;
++ while (likely(count--))
++ writel(*buf++, fifo);
++ return length;
++}
++
++// return: 0 = still running, 1 = completed, negative = errno
++static int write_fifo(struct goku_ep *ep, struct goku_request *req)
++{
++ struct goku_udc *dev = ep->dev;
++ u32 tmp;
++ u8 *buf;
++ unsigned count;
++ int is_last;
++
++ tmp = readl(&dev->regs->DataSet);
++ buf = req->req.buf + req->req.actual;
++ prefetch(buf);
++
++ dev = ep->dev;
++ if (unlikely(ep->num == 0 && dev->ep0state != EP0_IN))
++ return -EL2HLT;
++
++ /* NOTE: just single-buffered PIO-IN for now. */
++ if (unlikely((tmp & DATASET_A(ep->num)) != 0))
++ return 0;
++
++ /* clear our "packet available" irq */
++ if (ep->num != 0)
++ writel(~INT_EPxDATASET(ep->num), &dev->regs->int_status);
++
++ count = write_packet(ep->reg_fifo, buf, req, ep->ep.maxpacket);
++
++ /* last packet often short (sometimes a zlp, especially on ep0) */
++ if (unlikely(count != ep->ep.maxpacket)) {
++ writel(~(1<<ep->num), &dev->regs->EOP);
++ if (ep->num == 0) {
++ dev->ep[0].stopped = 1;
++ dev->ep0state = EP0_STATUS;
++ }
++ is_last = 1;
++ } else {
++ if (likely(req->req.length != req->req.actual)
++ || req->req.zero)
++ is_last = 0;
++ else
++ is_last = 1;
++ }
++#if 0 /* printk seemed to trash is_last...*/
++//#ifdef USB_TRACE
++ VDBG(dev, "wrote %s %u bytes%s IN %u left %p\n",
++ ep->ep.name, count, is_last ? "/last" : "",
++ req->req.length - req->req.actual, req);
++#endif
++
++ /* requests complete when all IN data is in the FIFO,
++ * or sometimes later, if a zlp was needed.
++ */
++ if (is_last) {
++ done(ep, req, 0);
++ return 1;
++ }
++
++ return 0;
++}
++
++static int read_fifo(struct goku_ep *ep, struct goku_request *req)
++{
++ struct goku_udc_regs *regs;
++ u32 size, set;
++ u8 *buf;
++ unsigned bufferspace, is_short, dbuff;
++
++ regs = ep->dev->regs;
++top:
++ buf = req->req.buf + req->req.actual;
++ prefetchw(buf);
++
++ if (unlikely(ep->num == 0 && ep->dev->ep0state != EP0_OUT))
++ return -EL2HLT;
++
++ dbuff = (ep->num == 1 || ep->num == 2);
++ do {
++ /* ack dataset irq matching the status we'll handle */
++ if (ep->num != 0)
++ writel(~INT_EPxDATASET(ep->num), &regs->int_status);
++
++ set = readl(&regs->DataSet) & DATASET_AB(ep->num);
++ size = readl(&regs->EPxSizeLA[ep->num]);
++ bufferspace = req->req.length - req->req.actual;
++
++ /* usually do nothing without an OUT packet */
++ if (likely(ep->num != 0 || bufferspace != 0)) {
++ if (unlikely(set == 0))
++ break;
++ /* use ep1/ep2 double-buffering for OUT */
++ if (!(size & PACKET_ACTIVE))
++ size = readl(&regs->EPxSizeLB[ep->num]);
++ if (!(size & PACKET_ACTIVE)) // "can't happen"
++ break;
++ size &= DATASIZE; /* EPxSizeH == 0 */
++
++ /* ep0out no-out-data case for set_config, etc */
++ } else
++ size = 0;
++
++ /* read all bytes from this packet */
++ req->req.actual += size;
++ is_short = (size < ep->ep.maxpacket);
++#ifdef USB_TRACE
++ VDBG(ep->dev, "read %s %u bytes%s OUT req %p %u/%u\n",
++ ep->ep.name, size, is_short ? "/S" : "",
++ req, req->req.actual, req->req.length);
++#endif
++ while (likely(size-- != 0)) {
++ u8 byte = (u8) readl(ep->reg_fifo);
++
++ if (unlikely(bufferspace == 0)) {
++ /* this happens when the driver's buffer
++ * is smaller than what the host sent.
++ * discard the extra data in this packet.
++ */
++ if (req->req.status != -EOVERFLOW)
++ DBG(ep->dev, "%s overflow %u\n",
++ ep->ep.name, size);
++ req->req.status = -EOVERFLOW;
++ } else {
++ *buf++ = byte;
++ bufferspace--;
++ }
++ }
++
++ /* completion */
++ if (unlikely(is_short || req->req.actual == req->req.length)) {
++ if (unlikely(ep->num == 0)) {
++ /* non-control endpoints now usable? */
++ if (ep->dev->req_config)
++ writel(ep->dev->configured
++ ? USBSTATE_CONFIGURED
++ : 0,
++ &regs->UsbState);
++ /* ep0out status stage */
++ writel(~(1<<0), &regs->EOP);
++ ep->stopped = 1;
++ ep->dev->ep0state = EP0_STATUS;
++ }
++ done(ep, req, 0);
++
++ /* empty the second buffer asap */
++ if (dbuff && !list_empty(&ep->queue)) {
++ req = list_entry(ep->queue.next,
++ struct goku_request, queue);
++ goto top;
++ }
++ return 1;
++ }
++ } while (dbuff);
++ return 0;
++}
++
++static inline void
++pio_irq_enable(struct goku_udc *dev, struct goku_udc_regs *regs, int epnum)
++{
++ dev->int_enable |= INT_EPxDATASET (epnum);
++ writel(dev->int_enable, &regs->int_enable);
++ /* write may still be posted */
++}
++
++static inline void
++pio_irq_disable(struct goku_udc *dev, struct goku_udc_regs *regs, int epnum)
++{
++ dev->int_enable &= ~INT_EPxDATASET (epnum);
++ writel(dev->int_enable, &regs->int_enable);
++ /* write may still be posted */
++}
++
++static inline void
++pio_advance(struct goku_ep *ep)
++{
++ struct goku_request *req;
++
++ if (unlikely(list_empty (&ep->queue)))
++ return;
++ req = list_entry(ep->queue.next, struct goku_request, queue);
++ (ep->is_in ? write_fifo : read_fifo)(ep, req);
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++// return: 0 = q running, 1 = q stopped, negative = errno
++static int start_dma(struct goku_ep *ep, struct goku_request *req)
++{
++ struct goku_udc_regs *regs = ep->dev->regs;
++ u32 master;
++ u32 start = req->req.dma;
++ u32 end = start + req->req.length - 1;
++
++ master = readl(&regs->dma_master) & MST_RW_BITS;
++
++ /* re-init the bits affecting IN dma; careful with zlps */
++ if (likely(ep->is_in)) {
++ if (unlikely(master & MST_RD_ENA)) {
++ DBG (ep->dev, "start, IN active dma %03x!!\n",
++ master);
++// return -EL2HLT;
++ }
++ writel(end, &regs->in_dma_end);
++ writel(start, &regs->in_dma_start);
++
++ master &= ~MST_R_BITS;
++ if (unlikely(req->req.length == 0))
++ master = MST_RD_ENA | MST_RD_EOPB;
++ else if ((req->req.length % ep->ep.maxpacket) != 0
++ || req->req.zero)
++ master = MST_RD_ENA | MST_EOPB_ENA;
++ else
++ master = MST_RD_ENA | MST_EOPB_DIS;
++
++ ep->dev->int_enable |= INT_MSTRDEND;
++
++ /* Goku DMA-OUT merges short packets, which plays poorly with
++ * protocols where short packets mark the transfer boundaries.
++ * The chip supports a nonstandard policy with INT_MSTWRTMOUT,
++ * ending transfers after 3 SOFs; we don't turn it on.
++ */
++ } else {
++ if (unlikely(master & MST_WR_ENA)) {
++ DBG (ep->dev, "start, OUT active dma %03x!!\n",
++ master);
++// return -EL2HLT;
++ }
++ writel(end, &regs->out_dma_end);
++ writel(start, &regs->out_dma_start);
++
++ master &= ~MST_W_BITS;
++ master |= MST_WR_ENA | MST_TIMEOUT_DIS;
++
++ ep->dev->int_enable |= INT_MSTWREND|INT_MSTWRTMOUT;
++ }
++
++ writel(master, &regs->dma_master);
++ writel(ep->dev->int_enable, &regs->int_enable);
++ return 0;
++}
++
++static void dma_advance(struct goku_udc *dev, struct goku_ep *ep)
++{
++ struct goku_request *req;
++ struct goku_udc_regs *regs = ep->dev->regs;
++ u32 master;
++
++ master = readl(&regs->dma_master);
++
++ if (unlikely(list_empty(&ep->queue))) {
++stop:
++ if (ep->is_in)
++ dev->int_enable &= ~INT_MSTRDEND;
++ else
++ dev->int_enable &= ~(INT_MSTWREND|INT_MSTWRTMOUT);
++ writel(dev->int_enable, &regs->int_enable);
++ return;
++ }
++ req = list_entry(ep->queue.next, struct goku_request, queue);
++
++ /* normal hw dma completion (not abort) */
++ if (likely(ep->is_in)) {
++ if (unlikely(master & MST_RD_ENA))
++ return;
++ req->req.actual = readl(&regs->in_dma_current);
++ } else {
++ if (unlikely(master & MST_WR_ENA))
++ return;
++
++ /* hardware merges short packets, and also hides packet
++ * overruns. a partial packet MAY be in the fifo here.
++ */
++ req->req.actual = readl(&regs->out_dma_current);
++ }
++ req->req.actual -= req->req.dma;
++ req->req.actual++;
++
++#ifdef USB_TRACE
++ VDBG(dev, "done %s %s dma, %u/%u bytes, req %p\n",
++ ep->ep.name, ep->is_in ? "IN" : "OUT",
++ req->req.actual, req->req.length, req);
++#endif
++ done(ep, req, 0);
++ if (list_empty(&ep->queue))
++ goto stop;
++ req = list_entry(ep->queue.next, struct goku_request, queue);
++ (void) start_dma(ep, req);
++}
++
++static void abort_dma(struct goku_ep *ep, int status)
++{
++ struct goku_udc_regs *regs = ep->dev->regs;
++ struct goku_request *req;
++ u32 curr, master;
++
++ /* NAK future host requests, hoping the implicit delay lets the
++ * dma engine finish reading (or writing) its latest packet and
++ * empty the dma buffer (up to 16 bytes).
++ *
++ * This avoids needing to clean up a partial packet in the fifo;
++ * we can't do that for IN without side effects to HALT and TOGGLE.
++ */
++ command(regs, COMMAND_FIFO_DISABLE, ep->num);
++ req = list_entry(ep->queue.next, struct goku_request, queue);
++ master = readl(&regs->dma_master) & MST_RW_BITS;
++
++ /* FIXME using these resets isn't usably documented. this may
++ * not work unless it's followed by disabling the endpoint.
++ *
++ * FIXME the OUT reset path doesn't even behave consistently.
++ */
++ if (ep->is_in) {
++ if (unlikely((readl(&regs->dma_master) & MST_RD_ENA) == 0))
++ goto finished;
++ curr = readl(&regs->in_dma_current);
++
++ writel(curr, &regs->in_dma_end);
++ writel(curr, &regs->in_dma_start);
++
++ master &= ~MST_R_BITS;
++ master |= MST_RD_RESET;
++ writel(master, &regs->dma_master);
++
++ if (readl(&regs->dma_master) & MST_RD_ENA)
++ DBG(ep->dev, "IN dma active after reset!\n");
++
++ } else {
++ if (unlikely((readl(&regs->dma_master) & MST_WR_ENA) == 0))
++ goto finished;
++ curr = readl(&regs->out_dma_current);
++
++ writel(curr, &regs->out_dma_end);
++ writel(curr, &regs->out_dma_start);
++
++ master &= ~MST_W_BITS;
++ master |= MST_WR_RESET;
++ writel(master, &regs->dma_master);
++
++ if (readl(&regs->dma_master) & MST_WR_ENA)
++ DBG(ep->dev, "OUT dma active after reset!\n");
++ }
++ req->req.actual = (curr - req->req.dma) + 1;
++ req->req.status = status;
++
++ VDBG(ep->dev, "%s %s %s %d/%d\n", __FUNCTION__, ep->ep.name,
++ ep->is_in ? "IN" : "OUT",
++ req->req.actual, req->req.length);
++
++ command(regs, COMMAND_FIFO_ENABLE, ep->num);
++
++ return;
++
++finished:
++ /* dma already completed; no abort needed */
++ command(regs, COMMAND_FIFO_ENABLE, ep->num);
++ req->req.actual = req->req.length;
++ req->req.status = 0;
++}
++
++/*-------------------------------------------------------------------------*/
++
++static int
++goku_queue(struct usb_ep *_ep, struct usb_request *_req, int gfp_flags)
++{
++ struct goku_request *req;
++ struct goku_ep *ep;
++ struct goku_udc *dev;
++ unsigned long flags;
++ int status;
++
++ /* always require a cpu-view buffer so pio works */
++ req = container_of(_req, struct goku_request, req);
++ if (unlikely(!_req || !_req->complete
++ || !_req->buf || !list_empty(&req->queue)))
++ return -EINVAL;
++ ep = container_of(_ep, struct goku_ep, ep);
++ if (unlikely(!_ep || (!ep->desc && ep->num != 0)))
++ return -EINVAL;
++ dev = ep->dev;
++ if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN))
++ return -ESHUTDOWN;
++
++ /* can't touch registers when suspended */
++ if (dev->ep0state == EP0_SUSPEND)
++ return -EBUSY;
++
++ /* set up dma mapping in case the caller didn't */
++ if (ep->dma && _req->dma == DMA_ADDR_INVALID) {
++ _req->dma = pci_map_single(dev->pdev, _req->buf, _req->length,
++ ep->is_in ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
++ req->mapped = 1;
++ }
++
++#ifdef USB_TRACE
++ VDBG(dev, "%s queue req %p, len %u buf %p\n",
++ _ep->name, _req, _req->length, _req->buf);
++#endif
++
++ spin_lock_irqsave(&dev->lock, flags);
++
++ _req->status = -EINPROGRESS;
++ _req->actual = 0;
++
++ /* for ep0 IN without premature status, zlp is required and
++ * writing EOP starts the status stage (OUT).
++ */
++ if (unlikely(ep->num == 0 && ep->is_in))
++ _req->zero = 1;
++
++ /* kickstart this i/o queue? */
++ status = 0;
++ if (list_empty(&ep->queue) && likely(!ep->stopped)) {
++ /* dma: done after dma completion IRQ (or error)
++ * pio: done after last fifo operation
++ */
++ if (ep->dma)
++ status = start_dma(ep, req);
++ else
++ status = (ep->is_in ? write_fifo : read_fifo)(ep, req);
++
++ if (unlikely(status != 0)) {
++ if (status > 0)
++ status = 0;
++ req = 0;
++ }
++
++ } /* else pio or dma irq handler advances the queue. */
++
++ if (likely(req != 0))
++ list_add_tail(&req->queue, &ep->queue);
++
++ if (likely(!list_empty(&ep->queue))
++ && likely(ep->num != 0)
++ && !ep->dma
++ && !(dev->int_enable & INT_EPxDATASET (ep->num)))
++ pio_irq_enable(dev, dev->regs, ep->num);
++
++ spin_unlock_irqrestore(&dev->lock, flags);
++
++ /* pci writes may still be posted */
++ return status;
++}
++
++/* dequeue ALL requests */
++static void nuke(struct goku_ep *ep, int status)
++{
++ struct goku_request *req;
++
++ ep->stopped = 1;
++ if (list_empty(&ep->queue))
++ return;
++ if (ep->dma)
++ abort_dma(ep, status);
++ while (!list_empty(&ep->queue)) {
++ req = list_entry(ep->queue.next, struct goku_request, queue);
++ done(ep, req, status);
++ }
++}
++
++/* dequeue JUST ONE request */
++static int goku_dequeue(struct usb_ep *_ep, struct usb_request *_req)
++{
++ struct goku_request *req;
++ struct goku_ep *ep;
++ struct goku_udc *dev;
++ unsigned long flags;
++
++ ep = container_of(_ep, struct goku_ep, ep);
++ if (!_ep || !_req || (!ep->desc && ep->num != 0))
++ return -EINVAL;
++ dev = ep->dev;
++ if (!dev->driver)
++ return -ESHUTDOWN;
++
++ /* we can't touch (dma) registers when suspended */
++ if (dev->ep0state == EP0_SUSPEND)
++ return -EBUSY;
++
++ VDBG(dev, "%s %s %s %s %p\n", __FUNCTION__, _ep->name,
++ ep->is_in ? "IN" : "OUT",
++ ep->dma ? "dma" : "pio",
++ _req);
++
++ spin_lock_irqsave(&dev->lock, flags);
++
++ /* make sure it's actually queued on this endpoint */
++ list_for_each_entry (req, &ep->queue, queue) {
++ if (&req->req == _req)
++ break;
++ }
++ if (&req->req != _req) {
++ spin_unlock_irqrestore (&dev->lock, flags);
++ return -EINVAL;
++ }
++
++ if (ep->dma && ep->queue.next == &req->queue && !ep->stopped) {
++ abort_dma(ep, -ECONNRESET);
++ done(ep, req, -ECONNRESET);
++ dma_advance(dev, ep);
++ } else if (!list_empty(&req->queue))
++ done(ep, req, -ECONNRESET);
++ else
++ req = 0;
++ spin_unlock_irqrestore(&dev->lock, flags);
++
++ return req ? 0 : -EOPNOTSUPP;
++}
++
++/*-------------------------------------------------------------------------*/
++
++static void goku_clear_halt(struct goku_ep *ep)
++{
++ // assert (ep->num !=0)
++ VDBG(ep->dev, "%s clear halt\n", ep->ep.name);
++ command(ep->dev->regs, COMMAND_SETDATA0, ep->num);
++ command(ep->dev->regs, COMMAND_STALL_CLEAR, ep->num);
++ if (ep->stopped) {
++ ep->stopped = 0;
++ if (ep->dma) {
++ struct goku_request *req;
++
++ if (list_empty(&ep->queue))
++ return;
++ req = list_entry(ep->queue.next, struct goku_request,
++ queue);
++ (void) start_dma(ep, req);
++ } else
++ pio_advance(ep);
++ }
++}
++
++static int goku_set_halt(struct usb_ep *_ep, int value)
++{
++ struct goku_ep *ep;
++ unsigned long flags;
++ int retval = 0;
++
++ if (!_ep)
++ return -ENODEV;
++ ep = container_of (_ep, struct goku_ep, ep);
++
++ if (ep->num == 0) {
++ if (value) {
++ ep->dev->ep0state = EP0_STALL;
++ ep->dev->ep[0].stopped = 1;
++ } else
++ return -EINVAL;
++
++ /* don't change EPxSTATUS_EP_INVALID to READY */
++ } else if (!ep->desc) {
++ DBG(ep->dev, "%s %s inactive?\n", __FUNCTION__, ep->ep.name);
++ return -EINVAL;
++ }
++
++ spin_lock_irqsave(&ep->dev->lock, flags);
++ if (!list_empty(&ep->queue))
++ retval = -EAGAIN;
++ else if (ep->is_in && value
++ /* data in (either) packet buffer? */
++ && (ep->dev->regs->DataSet & DATASET_AB(ep->num)))
++ retval = -EAGAIN;
++ else if (!value)
++ goku_clear_halt(ep);
++ else {
++ ep->stopped = 1;
++ VDBG(ep->dev, "%s set halt\n", ep->ep.name);
++ command(ep->dev->regs, COMMAND_STALL, ep->num);
++ readl(ep->reg_status);
++ }
++ spin_unlock_irqrestore(&ep->dev->lock, flags);
++ return retval;
++}
++
++static int goku_fifo_status(struct usb_ep *_ep)
++{
++ struct goku_ep *ep;
++ struct goku_udc_regs *regs;
++ u32 size;
++
++ if (!_ep)
++ return -ENODEV;
++ ep = container_of(_ep, struct goku_ep, ep);
++
++ /* size is only reported sanely for OUT */
++ if (ep->is_in)
++ return -EOPNOTSUPP;
++
++ /* ignores 16-byte dma buffer; SizeH == 0 */
++ regs = ep->dev->regs;
++ size = readl(&regs->EPxSizeLA[ep->num]) & DATASIZE;
++ size += readl(&regs->EPxSizeLB[ep->num]) & DATASIZE;
++ VDBG(ep->dev, "%s %s %u\n", __FUNCTION__, ep->ep.name, size);
++ return size;
++}
++
++static void goku_fifo_flush(struct usb_ep *_ep)
++{
++ struct goku_ep *ep;
++ struct goku_udc_regs *regs;
++ u32 size;
++
++ if (!_ep)
++ return;
++ ep = container_of(_ep, struct goku_ep, ep);
++ VDBG(ep->dev, "%s %s\n", __FUNCTION__, ep->ep.name);
++
++ /* don't change EPxSTATUS_EP_INVALID to READY */
++ if (!ep->desc && ep->num != 0) {
++ DBG(ep->dev, "%s %s inactive?\n", __FUNCTION__, ep->ep.name);
++ return;
++ }
++
++ regs = ep->dev->regs;
++ size = readl(&regs->EPxSizeLA[ep->num]);
++ size &= DATASIZE;
++
++ /* Non-desirable behavior: FIFO_CLEAR also clears the
++ * endpoint halt feature. For OUT, we _could_ just read
++ * the bytes out (PIO, if !ep->dma); for in, no choice.
++ */
++ if (size)
++ command(regs, COMMAND_FIFO_CLEAR, ep->num);
++}
++
++static struct usb_ep_ops goku_ep_ops = {
++ .enable = goku_ep_enable,
++ .disable = goku_ep_disable,
++
++ .alloc_request = goku_alloc_request,
++ .free_request = goku_free_request,
++
++ .alloc_buffer = goku_alloc_buffer,
++ .free_buffer = goku_free_buffer,
++
++ .queue = goku_queue,
++ .dequeue = goku_dequeue,
++
++ .set_halt = goku_set_halt,
++ .fifo_status = goku_fifo_status,
++ .fifo_flush = goku_fifo_flush,
++};
++
++/*-------------------------------------------------------------------------*/
++
++static int goku_get_frame(struct usb_gadget *_gadget)
++{
++ return -EOPNOTSUPP;
++}
++
++static const struct usb_gadget_ops goku_ops = {
++ .get_frame = goku_get_frame,
++ // no remote wakeup
++ // not selfpowered
++};
++
++/*-------------------------------------------------------------------------*/
++
++static inline char *dmastr(void)
++{
++ if (use_dma == 0)
++ return "(dma disabled)";
++ else if (use_dma == 2)
++ return "(dma IN and OUT)";
++ else
++ return "(dma IN)";
++}
++
++/* if we're trying to save space, don't bother with this proc file */
++
++#if defined(CONFIG_PROC_FS) && !defined(CONFIG_EMBEDDED)
++# define UDC_PROC_FILE
++#endif
++
++#ifdef UDC_PROC_FILE
++
++static const char proc_node_name [] = "driver/udc";
++
++#define FOURBITS "%s%s%s%s"
++#define EIGHTBITS FOURBITS FOURBITS
++
++static void
++dump_intmask(const char *label, u32 mask, char **next, unsigned *size)
++{
++ int t;
++
++ /* int_status is the same format ... */
++ t = snprintf(*next, *size,
++ "%s %05X =" FOURBITS EIGHTBITS EIGHTBITS "\n",
++ label, mask,
++ (mask & INT_PWRDETECT) ? " power" : "",
++ (mask & INT_SYSERROR) ? " sys" : "",
++ (mask & INT_MSTRDEND) ? " in-dma" : "",
++ (mask & INT_MSTWRTMOUT) ? " wrtmo" : "",
++
++ (mask & INT_MSTWREND) ? " out-dma" : "",
++ (mask & INT_MSTWRSET) ? " wrset" : "",
++ (mask & INT_ERR) ? " err" : "",
++ (mask & INT_SOF) ? " sof" : "",
++
++ (mask & INT_EP3NAK) ? " ep3nak" : "",
++ (mask & INT_EP2NAK) ? " ep2nak" : "",
++ (mask & INT_EP1NAK) ? " ep1nak" : "",
++ (mask & INT_EP3DATASET) ? " ep3" : "",
++
++ (mask & INT_EP2DATASET) ? " ep2" : "",
++ (mask & INT_EP1DATASET) ? " ep1" : "",
++ (mask & INT_STATUSNAK) ? " ep0snak" : "",
++ (mask & INT_STATUS) ? " ep0status" : "",
++
++ (mask & INT_SETUP) ? " setup" : "",
++ (mask & INT_ENDPOINT0) ? " ep0" : "",
++ (mask & INT_USBRESET) ? " reset" : "",
++ (mask & INT_SUSPEND) ? " suspend" : "");
++ *size -= t;
++ *next += t;
++}
++
++
++static int
++udc_proc_read(char *buffer, char **start, off_t off, int count,
++ int *eof, void *_dev)
++{
++ char *buf = buffer;
++ struct goku_udc *dev = _dev;
++ struct goku_udc_regs *regs = dev->regs;
++ char *next = buf;
++ unsigned size = count;
++ unsigned long flags;
++ int i, t, is_usb_connected;
++ u32 tmp;
++
++ if (off != 0)
++ return 0;
++
++ local_irq_save(flags);
++
++ /* basic device status */
++ tmp = readl(&regs->power_detect);
++ is_usb_connected = tmp & PW_DETECT;
++ t = snprintf(next, size,
++ "%s - %s\n"
++ "%s version: %s %s\n"
++ "Gadget driver: %s\n"
++ "Host %s, %s\n"
++ "\n",
++ pci_name(dev->pdev), driver_desc,
++ driver_name, DRIVER_VERSION, dmastr(),
++ dev->driver ? dev->driver->driver.name : "(none)",
++ is_usb_connected
++ ? ((tmp & PW_PULLUP) ? "full speed" : "powered")
++ : "disconnected",
++ ({char *tmp;
++ switch(dev->ep0state){
++ case EP0_DISCONNECT: tmp = "ep0_disconnect"; break;
++ case EP0_IDLE: tmp = "ep0_idle"; break;
++ case EP0_IN: tmp = "ep0_in"; break;
++ case EP0_OUT: tmp = "ep0_out"; break;
++ case EP0_STATUS: tmp = "ep0_status"; break;
++ case EP0_STALL: tmp = "ep0_stall"; break;
++ case EP0_SUSPEND: tmp = "ep0_suspend"; break;
++ default: tmp = "ep0_?"; break;
++ } tmp; })
++ );
++ size -= t;
++ next += t;
++
++ dump_intmask("int_status", readl(&regs->int_status), &next, &size);
++ dump_intmask("int_enable", readl(&regs->int_enable), &next, &size);
++
++ if (!is_usb_connected || !dev->driver || (tmp & PW_PULLUP) == 0)
++ goto done;
++
++ /* registers for (active) device and ep0 */
++ t = snprintf(next, size, "\nirqs %lu\ndataset %02x "
++ "single.bcs %02x.%02x state %x addr %u\n",
++ dev->irqs, readl(&regs->DataSet),
++ readl(&regs->EPxSingle), readl(&regs->EPxBCS),
++ readl(&regs->UsbState),
++ readl(&regs->address));
++ size -= t;
++ next += t;
++
++ tmp = readl(&regs->dma_master);
++ t = snprintf(next, size,
++ "dma %03X =" EIGHTBITS "%s %s\n", tmp,
++ (tmp & MST_EOPB_DIS) ? " eopb-" : "",
++ (tmp & MST_EOPB_ENA) ? " eopb+" : "",
++ (tmp & MST_TIMEOUT_DIS) ? " tmo-" : "",
++ (tmp & MST_TIMEOUT_ENA) ? " tmo+" : "",
++
++ (tmp & MST_RD_EOPB) ? " eopb" : "",
++ (tmp & MST_RD_RESET) ? " in_reset" : "",
++ (tmp & MST_WR_RESET) ? " out_reset" : "",
++ (tmp & MST_RD_ENA) ? " IN" : "",
++
++ (tmp & MST_WR_ENA) ? " OUT" : "",
++ (tmp & MST_CONNECTION)
++ ? "ep1in/ep2out"
++ : "ep1out/ep2in");
++ size -= t;
++ next += t;
++
++ /* dump endpoint queues */
++ for (i = 0; i < 4; i++) {
++ struct goku_ep *ep = &dev->ep [i];
++ struct goku_request *req;
++ int t;
++
++ if (i && !ep->desc)
++ continue;
++
++ tmp = readl(ep->reg_status);
++ t = snprintf(next, size,
++ "%s %s max %u %s, irqs %lu, "
++ "status %02x (%s) " FOURBITS "\n",
++ ep->ep.name,
++ ep->is_in ? "in" : "out",
++ ep->ep.maxpacket,
++ ep->dma ? "dma" : "pio",
++ ep->irqs,
++ tmp, ({ char *s;
++ switch (tmp & EPxSTATUS_EP_MASK) {
++ case EPxSTATUS_EP_READY:
++ s = "ready"; break;
++ case EPxSTATUS_EP_DATAIN:
++ s = "packet"; break;
++ case EPxSTATUS_EP_FULL:
++ s = "full"; break;
++ case EPxSTATUS_EP_TX_ERR: // host will retry
++ s = "tx_err"; break;
++ case EPxSTATUS_EP_RX_ERR:
++ s = "rx_err"; break;
++ case EPxSTATUS_EP_BUSY: /* ep0 only */
++ s = "busy"; break;
++ case EPxSTATUS_EP_STALL:
++ s = "stall"; break;
++ case EPxSTATUS_EP_INVALID: // these "can't happen"
++ s = "invalid"; break;
++ default:
++ s = "?"; break;
++ }; s; }),
++ (tmp & EPxSTATUS_TOGGLE) ? "data1" : "data0",
++ (tmp & EPxSTATUS_SUSPEND) ? " suspend" : "",
++ (tmp & EPxSTATUS_FIFO_DISABLE) ? " disable" : "",
++ (tmp & EPxSTATUS_STAGE_ERROR) ? " ep0stat" : ""
++ );
++ if (t <= 0 || t > size)
++ goto done;
++ size -= t;
++ next += t;
++
++ if (list_empty(&ep->queue)) {
++ t = snprintf(next, size, "\t(nothing queued)\n");
++ if (t <= 0 || t > size)
++ goto done;
++ size -= t;
++ next += t;
++ continue;
++ }
++ list_for_each_entry(req, &ep->queue, queue) {
++ if (ep->dma && req->queue.prev == &ep->queue) {
++ if (i == UDC_MSTRD_ENDPOINT)
++ tmp = readl(&regs->in_dma_current);
++ else
++ tmp = readl(&regs->out_dma_current);
++ tmp -= req->req.dma;
++ tmp++;
++ } else
++ tmp = req->req.actual;
++
++ t = snprintf(next, size,
++ "\treq %p len %u/%u buf %p\n",
++ &req->req, tmp, req->req.length,
++ req->req.buf);
++ if (t <= 0 || t > size)
++ goto done;
++ size -= t;
++ next += t;
++ }
++ }
++
++done:
++ local_irq_restore(flags);
++ *eof = 1;
++ return count - size;
++}
++
++#endif /* UDC_PROC_FILE */
++
++/*-------------------------------------------------------------------------*/
++
++static void udc_reinit (struct goku_udc *dev)
++{
++ static char *names [] = { "ep0", "ep1-bulk", "ep2-bulk", "ep3-bulk" };
++
++ unsigned i;
++
++ INIT_LIST_HEAD (&dev->gadget.ep_list);
++ dev->gadget.ep0 = &dev->ep [0].ep;
++ dev->gadget.speed = USB_SPEED_UNKNOWN;
++ dev->ep0state = EP0_DISCONNECT;
++ dev->irqs = 0;
++
++ for (i = 0; i < 4; i++) {
++ struct goku_ep *ep = &dev->ep[i];
++
++ ep->num = i;
++ ep->ep.name = names[i];
++ ep->reg_fifo = &dev->regs->ep_fifo [i];
++ ep->reg_status = &dev->regs->ep_status [i];
++ ep->reg_mode = &dev->regs->ep_mode[i];
++
++ ep->ep.ops = &goku_ep_ops;
++ list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list);
++ ep->dev = dev;
++ INIT_LIST_HEAD (&ep->queue);
++
++ ep_reset(0, ep);
++ }
++
++ dev->ep[0].reg_mode = 0;
++ dev->ep[0].ep.maxpacket = MAX_EP0_SIZE;
++ list_del_init (&dev->ep[0].ep.ep_list);
++}
++
++static void udc_reset(struct goku_udc *dev)
++{
++ struct goku_udc_regs *regs = dev->regs;
++
++ writel(0, &regs->power_detect);
++ writel(0, &regs->int_enable);
++ readl(&regs->int_enable);
++ dev->int_enable = 0;
++
++ /* deassert reset, leave USB D+ at hi-Z (no pullup)
++ * don't let INT_PWRDETECT sequence begin
++ */
++ udelay(250);
++ writel(PW_RESETB, &regs->power_detect);
++ readl(&regs->int_enable);
++}
++
++static void ep0_start(struct goku_udc *dev)
++{
++ struct goku_udc_regs *regs = dev->regs;
++ unsigned i;
++
++ VDBG(dev, "%s\n", __FUNCTION__);
++
++ udc_reset(dev);
++ udc_reinit (dev);
++ //writel(MST_EOPB_ENA | MST_TIMEOUT_ENA, &regs->dma_master);
++
++ /* hw handles set_address, set_feature, get_status; maybe more */
++ writel( G_REQMODE_SET_INTF | G_REQMODE_GET_INTF
++ | G_REQMODE_SET_CONF | G_REQMODE_GET_CONF
++ | G_REQMODE_GET_DESC
++ | G_REQMODE_CLEAR_FEAT
++ , &regs->reqmode);
++
++ for (i = 0; i < 4; i++)
++ dev->ep[i].irqs = 0;
++
++ /* can't modify descriptors after writing UsbReady */
++ for (i = 0; i < DESC_LEN; i++)
++ writel(0, &regs->descriptors[i]);
++ writel(0, &regs->UsbReady);
++
++ /* expect ep0 requests when the host drops reset */
++ writel(PW_RESETB | PW_PULLUP, &regs->power_detect);
++ dev->int_enable = INT_DEVWIDE | INT_EP0;
++ writel(dev->int_enable, &dev->regs->int_enable);
++ readl(&regs->int_enable);
++ dev->gadget.speed = USB_SPEED_FULL;
++ dev->ep0state = EP0_IDLE;
++}
++
++static void udc_enable(struct goku_udc *dev)
++{
++ /* start enumeration now, or after power detect irq */
++ if (readl(&dev->regs->power_detect) & PW_DETECT)
++ ep0_start(dev);
++ else {
++ DBG(dev, "%s\n", __FUNCTION__);
++ dev->int_enable = INT_PWRDETECT;
++ writel(dev->int_enable, &dev->regs->int_enable);
++ }
++}
++
++/*-------------------------------------------------------------------------*/
++
++/* keeping it simple:
++ * - one bus driver, initted first;
++ * - one function driver, initted second
++ */
++
++static struct goku_udc *the_controller;
++
++/* when a driver is successfully registered, it will receive
++ * control requests including set_configuration(), which enables
++ * non-control requests. then usb traffic follows until a
++ * disconnect is reported. then a host may connect again, or
++ * the driver might get unbound.
++ */
++int usb_gadget_register_driver(struct usb_gadget_driver *driver)
++{
++ struct goku_udc *dev = the_controller;
++ int retval;
++
++ if (!driver
++ || driver->speed != USB_SPEED_FULL
++ || !driver->bind
++ || !driver->unbind
++ || !driver->disconnect
++ || !driver->setup)
++ return -EINVAL;
++ if (!dev)
++ return -ENODEV;
++ if (dev->driver)
++ return -EBUSY;
++
++ /* hook up the driver */
++ dev->driver = driver;
++ retval = driver->bind(&dev->gadget);
++ if (retval) {
++ DBG(dev, "bind to driver %s --> error %d\n",
++ driver->driver.name, retval);
++ dev->driver = 0;
++ return retval;
++ }
++
++ /* then enable host detection and ep0; and we're ready
++ * for set_configuration as well as eventual disconnect.
++ */
++ udc_enable(dev);
++
++ DBG(dev, "registered gadget driver '%s'\n", driver->driver.name);
++ return 0;
++}
++EXPORT_SYMBOL(usb_gadget_register_driver);
++
++static void
++stop_activity(struct goku_udc *dev, struct usb_gadget_driver *driver)
++{
++ unsigned i;
++
++ DBG (dev, "%s\n", __FUNCTION__);
++
++ if (dev->gadget.speed == USB_SPEED_UNKNOWN)
++ driver = 0;
++
++ /* disconnect gadget driver after quiesceing hw and the driver */
++ udc_reset (dev);
++ for (i = 0; i < 4; i++)
++ nuke(&dev->ep [i], -ESHUTDOWN);
++ if (driver) {
++ spin_unlock(&dev->lock);
++ driver->disconnect(&dev->gadget);
++ spin_lock(&dev->lock);
++ }
++
++ if (dev->driver)
++ udc_enable(dev);
++}
++
++int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
++{
++ struct goku_udc *dev = the_controller;
++ unsigned long flags;
++
++ if (!dev)
++ return -ENODEV;
++ if (!driver || driver != dev->driver)
++ return -EINVAL;
++
++ spin_lock_irqsave(&dev->lock, flags);
++ dev->driver = 0;
++ stop_activity(dev, driver);
++ spin_unlock_irqrestore(&dev->lock, flags);
++
++ driver->unbind(&dev->gadget);
++
++ DBG(dev, "unregistered driver '%s'\n", driver->driver.name);
++ return 0;
++}
++EXPORT_SYMBOL(usb_gadget_unregister_driver);
++
++
++/*-------------------------------------------------------------------------*/
++
++static void ep0_setup(struct goku_udc *dev)
++{
++ struct goku_udc_regs *regs = dev->regs;
++ struct usb_ctrlrequest ctrl;
++ int tmp;
++
++ /* read SETUP packet and enter DATA stage */
++ ctrl.bRequestType = readl(&regs->bRequestType);
++ ctrl.bRequest = readl(&regs->bRequest);
++ ctrl.wValue = (readl(&regs->wValueH) << 8) | readl(&regs->wValueL);
++ ctrl.wIndex = (readl(&regs->wIndexH) << 8) | readl(&regs->wIndexL);
++ ctrl.wLength = (readl(&regs->wLengthH) << 8) | readl(&regs->wLengthL);
++ writel(0, &regs->SetupRecv);
++
++ nuke(&dev->ep[0], 0);
++ dev->ep[0].stopped = 0;
++ if (likely(ctrl.bRequestType & USB_DIR_IN)) {
++ dev->ep[0].is_in = 1;
++ dev->ep0state = EP0_IN;
++ /* detect early status stages */
++ writel(ICONTROL_STATUSNAK, &dev->regs->IntControl);
++ } else {
++ dev->ep[0].is_in = 0;
++ dev->ep0state = EP0_OUT;
++
++ /* NOTE: CLEAR_FEATURE is done in software so that we can
++ * synchronize transfer restarts after bulk IN stalls. data
++ * won't even enter the fifo until the halt is cleared.
++ */
++ switch (ctrl.bRequest) {
++ case USB_REQ_CLEAR_FEATURE:
++ switch (ctrl.bRequestType) {
++ case USB_RECIP_ENDPOINT:
++ tmp = ctrl.wIndex & 0x0f;
++ /* active endpoint */
++ if (tmp > 3 || (!dev->ep[tmp].desc && tmp != 0))
++ goto stall;
++ if (ctrl.wIndex & USB_DIR_IN) {
++ if (!dev->ep[tmp].is_in)
++ goto stall;
++ } else {
++ if (dev->ep[tmp].is_in)
++ goto stall;
++ }
++ /* endpoint halt */
++ if (ctrl.wValue != 0)
++ goto stall;
++ if (tmp)
++ goku_clear_halt(&dev->ep[tmp]);
++succeed:
++ /* start ep0out status stage */
++ writel(~(1<<0), &regs->EOP);
++ dev->ep[0].stopped = 1;
++ dev->ep0state = EP0_STATUS;
++ return;
++ case USB_RECIP_DEVICE:
++ /* device remote wakeup: always clear */
++ if (ctrl.wValue != 1)
++ goto stall;
++ VDBG(dev, "clear dev remote wakeup\n");
++ goto succeed;
++ case USB_RECIP_INTERFACE:
++ goto stall;
++ default: /* pass to gadget driver */
++ break;
++ }
++ break;
++ default:
++ break;
++ }
++ }
++
++#ifdef USB_TRACE
++ VDBG(dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
++ ctrl.bRequestType, ctrl.bRequest,
++ ctrl.wValue, ctrl.wIndex, ctrl.wLength);
++#endif
++
++ /* hw wants to know when we're configured (or not) */
++ dev->req_config = (ctrl.bRequest == USB_REQ_SET_CONFIGURATION
++ && ctrl.bRequestType == USB_RECIP_DEVICE);
++ if (unlikely(dev->req_config))
++ dev->configured = (ctrl.wValue != 0);
++
++ /* delegate everything to the gadget driver.
++ * it may respond after this irq handler returns.
++ */
++ spin_unlock (&dev->lock);
++ tmp = dev->driver->setup(&dev->gadget, &ctrl);
++ spin_lock (&dev->lock);
++ if (unlikely(tmp < 0)) {
++stall:
++#ifdef USB_TRACE
++ VDBG(dev, "req %02x.%02x protocol STALL; err %d\n",
++ ctrl.bRequestType, ctrl.bRequest, tmp);
++#endif
++ command(regs, COMMAND_STALL, 0);
++ dev->ep[0].stopped = 1;
++ dev->ep0state = EP0_STALL;
++ }
++
++ /* expect at least one data or status stage irq */
++}
++
++#define ACK(irqbit) { \
++ stat &= ~irqbit; \
++ writel(~irqbit, &regs->int_status); \
++ handled = 1; \
++ }
++
++static irqreturn_t goku_irq(int irq, void *_dev, struct pt_regs *r)
++{
++ struct goku_udc *dev = _dev;
++ struct goku_udc_regs *regs = dev->regs;
++ struct goku_ep *ep;
++ u32 stat, handled = 0;
++ unsigned i, rescans = 5;
++
++ spin_lock(&dev->lock);
++
++rescan:
++ stat = readl(&regs->int_status) & dev->int_enable;
++ if (!stat)
++ goto done;
++ dev->irqs++;
++
++ /* device-wide irqs */
++ if (unlikely(stat & INT_DEVWIDE)) {
++ if (stat & INT_SYSERROR) {
++ ERROR(dev, "system error\n");
++ stop_activity(dev, dev->driver);
++ stat = 0;
++ handled = 1;
++ // FIXME have a neater way to prevent re-enumeration
++ dev->driver = 0;
++ goto done;
++ }
++ if (stat & INT_PWRDETECT) {
++ writel(~stat, &regs->int_status);
++ if (readl(&dev->regs->power_detect) & PW_DETECT) {
++ VDBG(dev, "connect\n");
++ ep0_start(dev);
++ } else {
++ DBG(dev, "disconnect\n");
++ if (dev->gadget.speed == USB_SPEED_FULL)
++ stop_activity(dev, dev->driver);
++ dev->ep0state = EP0_DISCONNECT;
++ dev->int_enable = INT_DEVWIDE;
++ writel(dev->int_enable, &dev->regs->int_enable);
++ }
++ stat = 0;
++ handled = 1;
++ goto done;
++ }
++ if (stat & INT_SUSPEND) {
++ ACK(INT_SUSPEND);
++ if (readl(&regs->ep_status[0]) & EPxSTATUS_SUSPEND) {
++ switch (dev->ep0state) {
++ case EP0_DISCONNECT:
++ case EP0_SUSPEND:
++ goto pm_next;
++ default:
++ break;
++ }
++ DBG(dev, "USB suspend\n");
++ dev->ep0state = EP0_SUSPEND;
++ if (dev->gadget.speed != USB_SPEED_UNKNOWN
++ && dev->driver
++ && dev->driver->suspend) {
++ spin_unlock(&dev->lock);
++ dev->driver->suspend(&dev->gadget);
++ spin_lock(&dev->lock);
++ }
++ } else {
++ if (dev->ep0state != EP0_SUSPEND) {
++ DBG(dev, "bogus USB resume %d\n",
++ dev->ep0state);
++ goto pm_next;
++ }
++ DBG(dev, "USB resume\n");
++ dev->ep0state = EP0_IDLE;
++ if (dev->gadget.speed != USB_SPEED_UNKNOWN
++ && dev->driver
++ && dev->driver->resume) {
++ spin_unlock(&dev->lock);
++ dev->driver->resume(&dev->gadget);
++ spin_lock(&dev->lock);
++ }
++ }
++ }
++pm_next:
++ if (stat & INT_USBRESET) { /* hub reset done */
++ ACK(INT_USBRESET);
++ INFO(dev, "USB reset done, gadget %s\n",
++ dev->driver->driver.name);
++ }
++ // and INT_ERR on some endpoint's crc/bitstuff/... problem
++ }
++
++ /* progress ep0 setup, data, or status stages.
++ * no transition {EP0_STATUS, EP0_STALL} --> EP0_IDLE; saves irqs
++ */
++ if (stat & INT_SETUP) {
++ ACK(INT_SETUP);
++ dev->ep[0].irqs++;
++ ep0_setup(dev);
++ }
++ if (stat & INT_STATUSNAK) {
++ ACK(INT_STATUSNAK|INT_ENDPOINT0);
++ if (dev->ep0state == EP0_IN) {
++ ep = &dev->ep[0];
++ ep->irqs++;
++ nuke(ep, 0);
++ writel(~(1<<0), &regs->EOP);
++ dev->ep0state = EP0_STATUS;
++ }
++ }
++ if (stat & INT_ENDPOINT0) {
++ ACK(INT_ENDPOINT0);
++ ep = &dev->ep[0];
++ ep->irqs++;
++ pio_advance(ep);
++ }
++
++ /* dma completion */
++ if (stat & INT_MSTRDEND) { /* IN */
++ ACK(INT_MSTRDEND);
++ ep = &dev->ep[UDC_MSTRD_ENDPOINT];
++ ep->irqs++;
++ dma_advance(dev, ep);
++ }
++ if (stat & INT_MSTWREND) { /* OUT */
++ ACK(INT_MSTWREND);
++ ep = &dev->ep[UDC_MSTWR_ENDPOINT];
++ ep->irqs++;
++ dma_advance(dev, ep);
++ }
++ if (stat & INT_MSTWRTMOUT) { /* OUT */
++ ACK(INT_MSTWRTMOUT);
++ ep = &dev->ep[UDC_MSTWR_ENDPOINT];
++ ep->irqs++;
++ ERROR(dev, "%s write timeout ?\n", ep->ep.name);
++ // reset dma? then dma_advance()
++ }
++
++ /* pio */
++ for (i = 1; i < 4; i++) {
++ u32 tmp = INT_EPxDATASET(i);
++
++ if (!(stat & tmp))
++ continue;
++ ep = &dev->ep[i];
++ pio_advance(ep);
++ if (list_empty (&ep->queue))
++ pio_irq_disable(dev, regs, i);
++ stat &= ~tmp;
++ handled = 1;
++ ep->irqs++;
++ }
++
++ if (rescans--)
++ goto rescan;
++
++done:
++ (void)readl(&regs->int_enable);
++ spin_unlock(&dev->lock);
++ if (stat)
++ DBG(dev, "unhandled irq status: %05x (%05x, %05x)\n", stat,
++ readl(&regs->int_status), dev->int_enable);
++ return IRQ_RETVAL(handled);
++}
++
++#undef ACK
++
++/*-------------------------------------------------------------------------*/
++
++/* tear down the binding between this driver and the pci device */
++
++static void goku_remove(struct pci_dev *pdev)
++{
++ struct goku_udc *dev = pci_get_drvdata(pdev);
++
++ DBG(dev, "%s\n", __FUNCTION__);
++ /* start with the driver above us */
++ if (dev->driver) {
++ /* should have been done already by driver model core */
++ WARN(dev, "pci remove, driver '%s' is still registered\n",
++ dev->driver->driver.name);
++ usb_gadget_unregister_driver(dev->driver);
++ }
++
++#ifdef UDC_PROC_FILE
++ remove_proc_entry(proc_node_name, NULL);
++#endif
++ if (dev->regs)
++ udc_reset(dev);
++ if (dev->got_irq)
++ free_irq(pdev->irq, dev);
++ if (dev->regs)
++ iounmap(dev->regs);
++ if (dev->got_region)
++ release_mem_region(pci_resource_start (pdev, 0),
++ pci_resource_len (pdev, 0));
++ if (dev->enabled)
++ pci_disable_device(pdev);
++
++ pci_set_drvdata(pdev, 0);
++ dev->regs = 0;
++ the_controller = 0;
++
++ INFO(dev, "unbind\n");
++}
++
++/* wrap this driver around the specified pci device, but
++ * don't respond over USB until a gadget driver binds to us.
++ */
++
++static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
++{
++ struct goku_udc *dev = 0;
++ unsigned long resource, len;
++ void *base = 0;
++ int retval;
++ char buf [8], *bufp;
++
++ /* if you want to support more than one controller in a system,
++ * usb_gadget_driver_{register,unregister}() must change.
++ */
++ if (the_controller) {
++ WARN(dev, "ignoring %s\n", pci_name(pdev));
++ return -EBUSY;
++ }
++ if (!pdev->irq) {
++ printk(KERN_ERR "Check PCI %s IRQ setup!\n", pci_name(pdev));
++ retval = -ENODEV;
++ goto done;
++ }
++
++ /* alloc, and start init */
++ dev = kmalloc (sizeof *dev, SLAB_KERNEL);
++ if (dev == NULL){
++ pr_debug("enomem %s\n", pci_name(pdev));
++ retval = -ENOMEM;
++ goto done;
++ }
++
++ memset(dev, 0, sizeof *dev);
++ spin_lock_init(&dev->lock);
++ dev->pdev = pdev;
++ dev->gadget.ops = &goku_ops;
++
++ /* the "gadget" abstracts/virtualizes the controller */
++ dev->gadget.dev.bus_id = "gadget";
++ dev->gadget.name = driver_name;
++
++ /* now all the pci goodies ... */
++ retval = pci_enable_device(pdev);
++ if (retval < 0) {
++ DBG(dev, "can't enable, %d\n", retval);
++ goto done;
++ }
++ dev->enabled = 1;
++
++ resource = pci_resource_start(pdev, 0);
++ len = pci_resource_len(pdev, 0);
++ if (!request_mem_region(resource, len, driver_name)) {
++ DBG(dev, "controller already in use\n");
++ retval = -EBUSY;
++ goto done;
++ }
++ dev->got_region = 1;
++
++ base = ioremap_nocache(resource, len);
++ if (base == NULL) {
++ DBG(dev, "can't map memory\n");
++ retval = -EFAULT;
++ goto done;
++ }
++ dev->regs = (struct goku_udc_regs *) base;
++
++ pci_set_drvdata(pdev, dev);
++ INFO(dev, "%s\n", driver_desc);
++ INFO(dev, "version: " DRIVER_VERSION " %s\n", dmastr());
++#ifndef __sparc__
++ snprintf(buf, sizeof buf, "%d", pdev->irq);
++ bufp = buf;
++#else
++ bufp = __irq_itoa(pdev->irq);
++#endif
++ INFO(dev, "irq %s, pci mem %p\n", bufp, base);
++
++ /* init to known state, then setup irqs */
++ udc_reset(dev);
++ udc_reinit (dev);
++ if (request_irq(pdev->irq, goku_irq, SA_SHIRQ/*|SA_SAMPLE_RANDOM*/,
++ driver_name, dev) != 0) {
++ DBG(dev, "request interrupt %s failed\n", bufp);
++ retval = -EBUSY;
++ goto done;
++ }
++ dev->got_irq = 1;
++ if (use_dma)
++ pci_set_master(pdev);
++
++
++#ifdef UDC_PROC_FILE
++ create_proc_read_entry(proc_node_name, 0, NULL, udc_proc_read, dev);
++#endif
++
++ /* done */
++ the_controller = dev;
++
++ return 0;
++
++done:
++ if (dev)
++ goku_remove (pdev);
++ return retval;
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++static struct pci_device_id pci_ids [] = { {
++ .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
++ .class_mask = ~0,
++ .vendor = 0x102f, /* Toshiba */
++ .device = 0x0107, /* this UDC */
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++
++}, { /* end: all zeroes */ }
++};
++MODULE_DEVICE_TABLE (pci, pci_ids);
++
++static struct pci_driver goku_pci_driver = {
++ .name = (char *) driver_name,
++ .id_table = pci_ids,
++
++ .probe = goku_probe,
++ .remove = goku_remove,
++
++ /* FIXME add power management support */
++};
++
++static int __init init (void)
++{
++ return pci_module_init (&goku_pci_driver);
++}
++module_init (init);
++
++static void __exit cleanup (void)
++{
++ pci_unregister_driver (&goku_pci_driver);
++}
++module_exit (cleanup);
+diff -x '*~' -x '.*' -r -N -u /tmp/kernel/drivers/usb/gadget/goku_udc.h kernel/drivers/usb/gadget/goku_udc.h
+--- /tmp/kernel/drivers/usb/gadget/goku_udc.h 1970-01-01 01:00:00.000000000 +0100
++++ kernel/drivers/usb/gadget/goku_udc.h 2005-04-22 17:53:19.443538050 +0200
+@@ -0,0 +1,321 @@
++/*
++ * Toshiba TC86C001 ("Goku-S") USB Device Controller driver
++ *
++ * Copyright (C) 2000-2002 Lineo
++ * by Stuart Lynne, Tom Rushworth, and Bruce Balden
++ * Copyright (C) 2002 Toshiba Corporation
++ * Copyright (C) 2003 MontaVista Software (source@mvista.com)
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2. This program is licensed "as is" without any
++ * warranty of any kind, whether express or implied.
++ */
++
++/*
++ * PCI BAR 0 points to these registers.
++ */
++struct goku_udc_regs {
++ /* irq management */
++ u32 int_status; /* 0x000 */
++ u32 int_enable;
++#define INT_SUSPEND 0x00001 /* or resume */
++#define INT_USBRESET 0x00002
++#define INT_ENDPOINT0 0x00004
++#define INT_SETUP 0x00008
++#define INT_STATUS 0x00010
++#define INT_STATUSNAK 0x00020
++#define INT_EPxDATASET(n) (0x00020 << (n)) /* 0 < n < 4 */
++# define INT_EP1DATASET 0x00040
++# define INT_EP2DATASET 0x00080
++# define INT_EP3DATASET 0x00100
++#define INT_EPnNAK(n) (0x00100 < (n)) /* 0 < n < 4 */
++# define INT_EP1NAK 0x00200
++# define INT_EP2NAK 0x00400
++# define INT_EP3NAK 0x00800
++#define INT_SOF 0x01000
++#define INT_ERR 0x02000
++#define INT_MSTWRSET 0x04000
++#define INT_MSTWREND 0x08000
++#define INT_MSTWRTMOUT 0x10000
++#define INT_MSTRDEND 0x20000
++#define INT_SYSERROR 0x40000
++#define INT_PWRDETECT 0x80000
++
++#define INT_DEVWIDE (INT_PWRDETECT|INT_SYSERROR/*|INT_ERR*/|INT_USBRESET|INT_SUSPEND)
++#define INT_EP0 (INT_SETUP|INT_ENDPOINT0/*|INT_STATUS*/|INT_STATUSNAK)
++
++ u32 dma_master;
++#define MST_EOPB_DIS 0x0800
++#define MST_EOPB_ENA 0x0400
++#define MST_TIMEOUT_DIS 0x0200
++#define MST_TIMEOUT_ENA 0x0100
++#define MST_RD_EOPB 0x0080 /* write-only */
++#define MST_RD_RESET 0x0040
++#define MST_WR_RESET 0x0020
++#define MST_RD_ENA 0x0004 /* 1:start, 0:ignore */
++#define MST_WR_ENA 0x0002 /* 1:start, 0:ignore */
++#define MST_CONNECTION 0x0001 /* 0 for ep1out/ep2in */
++
++#define MST_R_BITS (MST_EOPB_DIS|MST_EOPB_ENA \
++ |MST_RD_ENA|MST_RD_RESET)
++#define MST_W_BITS (MST_TIMEOUT_DIS|MST_TIMEOUT_ENA \
++ |MST_WR_ENA|MST_WR_RESET)
++#define MST_RW_BITS (MST_R_BITS|MST_W_BITS \
++ |MST_CONNECTION)
++
++/* these values assume (dma_master & MST_CONNECTION) == 0 */
++#define UDC_MSTWR_ENDPOINT 1
++#define UDC_MSTRD_ENDPOINT 2
++
++ /* dma master write */
++ u32 out_dma_start;
++ u32 out_dma_end;
++ u32 out_dma_current;
++
++ /* dma master read */
++ u32 in_dma_start;
++ u32 in_dma_end;
++ u32 in_dma_current;
++
++ u32 power_detect;
++#define PW_DETECT 0x04
++#define PW_RESETB 0x02
++#define PW_PULLUP 0x01
++
++ u8 _reserved0 [0x1d8];
++
++ /* endpoint registers */
++ u32 ep_fifo [4]; /* 0x200 */
++ u8 _reserved1 [0x10];
++ u32 ep_mode [4]; /* only 1-3 valid */
++ u8 _reserved2 [0x10];
++
++ u32 ep_status [4];
++#define EPxSTATUS_TOGGLE 0x40
++#define EPxSTATUS_SUSPEND 0x20
++#define EPxSTATUS_EP_MASK (0x07<<2)
++# define EPxSTATUS_EP_READY (0<<2)
++# define EPxSTATUS_EP_DATAIN (1<<2)
++# define EPxSTATUS_EP_FULL (2<<2)
++# define EPxSTATUS_EP_TX_ERR (3<<2)
++# define EPxSTATUS_EP_RX_ERR (4<<2)
++# define EPxSTATUS_EP_BUSY (5<<2)
++# define EPxSTATUS_EP_STALL (6<<2)
++# define EPxSTATUS_EP_INVALID (7<<2)
++#define EPxSTATUS_FIFO_DISABLE 0x02
++#define EPxSTATUS_STAGE_ERROR 0x01
++
++ u8 _reserved3 [0x10];
++ u32 EPxSizeLA[4];
++#define PACKET_ACTIVE (1<<7)
++#define DATASIZE 0x7f
++ u8 _reserved3a [0x10];
++ u32 EPxSizeLB[4]; /* only 1,2 valid */
++ u8 _reserved3b [0x10];
++ u32 EPxSizeHA[4]; /* only 1-3 valid */
++ u8 _reserved3c [0x10];
++ u32 EPxSizeHB[4]; /* only 1,2 valid */
++ u8 _reserved4[0x30];
++
++ /* SETUP packet contents */
++ u32 bRequestType; /* 0x300 */
++ u32 bRequest;
++ u32 wValueL;
++ u32 wValueH;
++ u32 wIndexL;
++ u32 wIndexH;
++ u32 wLengthL;
++ u32 wLengthH;
++
++ /* command interaction/handshaking */
++ u32 SetupRecv; /* 0x320 */
++ u32 CurrConfig;
++ u32 StdRequest;
++ u32 Request;
++ u32 DataSet;
++#define DATASET_A(epnum) (1<<(2*(epnum)))
++#define DATASET_B(epnum) (2<<(2*(epnum)))
++#define DATASET_AB(epnum) (3<<(2*(epnum)))
++ u8 _reserved5[4];
++
++ u32 UsbState;
++#define USBSTATE_CONFIGURED 0x04
++#define USBSTATE_ADDRESSED 0x02
++#define USBSTATE_DEFAULT 0x01
++
++ u32 EOP;
++
++ u32 Command; /* 0x340 */
++#define COMMAND_SETDATA0 2
++#define COMMAND_RESET 3
++#define COMMAND_STALL 4
++#define COMMAND_INVALID 5
++#define COMMAND_FIFO_DISABLE 7
++#define COMMAND_FIFO_ENABLE 8
++#define COMMAND_INIT_DESCRIPTOR 9
++#define COMMAND_FIFO_CLEAR 10 /* also stall */
++#define COMMAND_STALL_CLEAR 11
++#define COMMAND_EP(n) ((n) << 4)
++
++ u32 EPxSingle;
++ u8 _reserved6[4];
++ u32 EPxBCS;
++ u8 _reserved7[8];
++ u32 IntControl;
++#define ICONTROL_STATUSNAK 1
++ u8 _reserved8[4];
++
++ u32 reqmode; // 0x360 standard request mode, low 8 bits
++#define G_REQMODE_SET_INTF (1<<7)
++#define G_REQMODE_GET_INTF (1<<6)
++#define G_REQMODE_SET_CONF (1<<5)
++#define G_REQMODE_GET_CONF (1<<4)
++#define G_REQMODE_GET_DESC (1<<3)
++#define G_REQMODE_SET_FEAT (1<<2)
++#define G_REQMODE_CLEAR_FEAT (1<<1)
++#define G_REQMODE_GET_STATUS (1<<0)
++
++ u32 ReqMode;
++ u8 _reserved9[0x18];
++ u32 PortStatus; /* 0x380 */
++ u8 _reserved10[8];
++ u32 address;
++ u32 buff_test;
++ u8 _reserved11[4];
++ u32 UsbReady;
++ u8 _reserved12[4];
++ u32 SetDescStall; /* 0x3a0 */
++ u8 _reserved13[0x45c];
++
++ /* hardware could handle limited GET_DESCRIPTOR duties */
++#define DESC_LEN 0x80
++ u32 descriptors[DESC_LEN]; /* 0x800 */
++ u8 _reserved14[0x600];
++
++} __attribute__ ((packed));
++
++#define MAX_FIFO_SIZE 64
++#define MAX_EP0_SIZE 8 /* ep0 fifo is bigger, though */
++
++
++/*-------------------------------------------------------------------------*/
++
++/* DRIVER DATA STRUCTURES and UTILITIES */
++
++struct goku_ep {
++ struct usb_ep ep;
++ struct goku_udc *dev;
++ unsigned long irqs;
++
++ unsigned num:8,
++ dma:1,
++ is_in:1,
++ stopped:1;
++
++ /* analogous to a host-side qh */
++ struct list_head queue;
++ const struct usb_endpoint_descriptor *desc;
++
++ u32 *reg_fifo;
++ u32 *reg_mode;
++ u32 *reg_status;
++};
++
++struct goku_request {
++ struct usb_request req;
++ struct list_head queue;
++
++ unsigned mapped:1;
++};
++
++enum ep0state {
++ EP0_DISCONNECT, /* no host */
++ EP0_IDLE, /* between STATUS ack and SETUP report */
++ EP0_IN, EP0_OUT, /* data stage */
++ EP0_STATUS, /* status stage */
++ EP0_STALL, /* data or status stages */
++ EP0_SUSPEND, /* usb suspend */
++};
++
++struct goku_udc {
++ /* each pci device provides one gadget, several endpoints */
++ struct usb_gadget gadget;
++ spinlock_t lock;
++ struct goku_ep ep[4];
++ struct usb_gadget_driver *driver;
++
++ enum ep0state ep0state;
++ unsigned got_irq:1,
++ got_region:1,
++ req_config:1,
++ configured:1,
++ enabled:1;
++
++ /* pci state used to access those endpoints */
++ struct pci_dev *pdev;
++ struct goku_udc_regs *regs;
++ u32 int_enable;
++
++ /* statistics... */
++ unsigned long irqs;
++};
++
++/*-------------------------------------------------------------------------*/
++
++#define xprintk(dev,level,fmt,args...) \
++ printk(level "%s %s: " fmt , driver_name , \
++ pci_name(dev->pdev) , ## args)
++
++#ifdef DEBUG
++#define DBG(dev,fmt,args...) \
++ xprintk(dev , KERN_DEBUG , fmt , ## args)
++#else
++#define DBG(dev,fmt,args...) \
++ do { } while (0)
++#endif /* DEBUG */
++
++#ifdef VERBOSE
++#define VDBG DBG
++#else
++#define VDBG(dev,fmt,args...) \
++ do { } while (0)
++#endif /* VERBOSE */
++
++#define ERROR(dev,fmt,args...) \
++ xprintk(dev , KERN_ERR , fmt , ## args)
++#define WARN(dev,fmt,args...) \
++ xprintk(dev , KERN_WARNING , fmt , ## args)
++#define INFO(dev,fmt,args...) \
++ xprintk(dev , KERN_INFO , fmt , ## args)
++
++/*-------------------------------------------------------------------------*/
++
++/* 2.5 stuff that's sometimes missing in 2.4 */
++
++#ifndef container_of
++#define container_of list_entry
++#endif
++
++#ifndef likely
++#define likely(x) (x)
++#define unlikely(x) (x)
++#endif
++
++#ifndef BUG_ON
++#define BUG_ON(condition) do { if (unlikely((condition)!=0)) BUG(); } while(0)
++#endif
++
++#ifndef WARN_ON
++#define WARN_ON(x) do { } while (0)
++#endif
++
++#ifndef IRQ_NONE
++typedef void irqreturn_t;
++#define IRQ_NONE
++#define IRQ_HANDLED
++#define IRQ_RETVAL(x)
++#endif
++
++#ifndef pci_name
++#define pci_name(pdev) ((pdev)->slot_name)
++#endif
+diff -x '*~' -x '.*' -r -N -u /tmp/kernel/drivers/usb/gadget/gserial.c kernel/drivers/usb/gadget/gserial.c
+--- /tmp/kernel/drivers/usb/gadget/gserial.c 1970-01-01 01:00:00.000000000 +0100
++++ kernel/drivers/usb/gadget/gserial.c 2005-04-22 17:53:19.450536911 +0200
+@@ -0,0 +1,2301 @@
++/*
++ * g_serial.c -- USB gadget serial driver
++ *
++ * $Id: gserial.c,v 1.17 2003/10/01 06:31:57 borchers Exp $
++ *
++ * Copyright 2003 (c) Al Borchers (alborchers@steinerpoint.com)
++ *
++ * This code is based in part on the Gadget Zero driver, which
++ * is Copyright (C) 2003 by David Brownell, all rights reserved.
++ *
++ * This code also borrows from usbserial.c, which is
++ * Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
++ * Copyright (c) 2000 Peter Berger (pberger@brimson.com)
++ * Copyright (c) 2000 Al Borchers (alborchers@steinerpoint.com)
++ *
++ * This software is distributed under the terms of the GNU General
++ * Public License ("GPL") as published by the Free Software Foundation,
++ * either version 2 of that License or (at your option) any later version.
++ *
++ */
++
++#ifndef __KERNEL__
++#define __KERNEL__
++#endif
++
++#ifndef MODULE
++#define MODULE
++#endif
++
++
++/* Includes */
++
++#include <linux/config.h>
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/delay.h>
++#include <linux/ioport.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/smp_lock.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/timer.h>
++#include <linux/list.h>
++#include <linux/interrupt.h>
++#include <linux/uts.h>
++#include <linux/version.h>
++#include <linux/wait.h>
++#include <linux/list.h>
++#include <linux/proc_fs.h>
++#include <linux/tty_flip.h>
++
++#include <asm/byteorder.h>
++#include <asm/io.h>
++#include <asm/irq.h>
++#include <asm/system.h>
++#include <asm/unaligned.h>
++#include <asm/uaccess.h>
++
++#include <linux/usb_ch9.h>
++#include <linux/usb_gadget.h>
++
++#include "gadget_chips.h"
++
++
++/* Wait Cond */
++
++#define __wait_cond_interruptible(wq, condition, lock, flags, ret) \
++do { \
++ wait_queue_t __wait; \
++ init_waitqueue_entry(&__wait, current); \
++ \
++ add_wait_queue(&wq, &__wait); \
++ for (;;) { \
++ set_current_state(TASK_INTERRUPTIBLE); \
++ if (condition) \
++ break; \
++ if (!signal_pending(current)) { \
++ spin_unlock_irqrestore(lock, flags); \
++ schedule(); \
++ spin_lock_irqsave(lock, flags); \
++ continue; \
++ } \
++ ret = -ERESTARTSYS; \
++ break; \
++ } \
++ current->state = TASK_RUNNING; \
++ remove_wait_queue(&wq, &__wait); \
++} while (0)
++
++#define wait_cond_interruptible(wq, condition, lock, flags) \
++({ \
++ int __ret = 0; \
++ if (!(condition)) \
++ __wait_cond_interruptible(wq, condition, lock, flags, \
++ __ret); \
++ __ret; \
++})
++
++#define __wait_cond_interruptible_timeout(wq, condition, lock, flags, \
++ timeout, ret) \
++do { \
++ signed long __timeout = timeout; \
++ wait_queue_t __wait; \
++ init_waitqueue_entry(&__wait, current); \
++ \
++ add_wait_queue(&wq, &__wait); \
++ for (;;) { \
++ set_current_state(TASK_INTERRUPTIBLE); \
++ if (__timeout == 0) \
++ break; \
++ if (condition) \
++ break; \
++ if (!signal_pending(current)) { \
++ spin_unlock_irqrestore(lock, flags); \
++ __timeout = schedule_timeout(__timeout); \
++ spin_lock_irqsave(lock, flags); \
++ continue; \
++ } \
++ ret = -ERESTARTSYS; \
++ break; \
++ } \
++ current->state = TASK_RUNNING; \
++ remove_wait_queue(&wq, &__wait); \
++} while (0)
++
++#define wait_cond_interruptible_timeout(wq, condition, lock, flags, \
++ timeout) \
++({ \
++ int __ret = 0; \
++ if (!(condition)) \
++ __wait_cond_interruptible_timeout(wq, condition, lock, \
++ flags, timeout, __ret); \
++ __ret; \
++})
++
++
++/* Defines */
++
++#define GS_VERSION_STR "v1.0"
++#define GS_VERSION_NUM 0x0100
++
++#define GS_LONG_NAME "Gadget Serial"
++#define GS_SHORT_NAME "g_serial"
++
++#define GS_MAJOR 127
++#define GS_MINOR_START 0
++
++#define GS_NUM_PORTS 16
++
++#define GS_NUM_CONFIGS 1
++#define GS_NO_CONFIG_ID 0
++#define GS_BULK_CONFIG_ID 2
++
++#define GS_NUM_INTERFACES 1
++#define GS_INTERFACE_ID 0
++#define GS_ALT_INTERFACE_ID 0
++
++#define GS_NUM_ENDPOINTS 2
++
++#define GS_MAX_DESC_LEN 256
++
++#define GS_DEFAULT_READ_Q_SIZE 32
++#define GS_DEFAULT_WRITE_Q_SIZE 32
++
++#define GS_DEFAULT_WRITE_BUF_SIZE 8192
++#define GS_TMP_BUF_SIZE 8192
++
++#define GS_CLOSE_TIMEOUT 15
++
++/* debug macro */
++#if G_SERIAL_DEBUG
++
++static int debug = G_SERIAL_DEBUG;
++
++#define gs_debug(format, arg...) \
++ do { if(debug) printk( KERN_DEBUG format, ## arg ); } while(0)
++#define gs_debug_level(level, format, arg...) \
++ do { if(debug>=level) printk( KERN_DEBUG format, ## arg ); } while(0)
++
++#else
++
++#define gs_debug(format, arg...) \
++ do { } while(0)
++#define gs_debug_level(level, format, arg...) \
++ do { } while(0)
++
++#endif /* G_SERIAL_DEBUG */
++
++
++/* Thanks to NetChip Technologies for donating this product ID.
++ *
++ * DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!!
++ * Instead: allocate your own, using normal USB-IF procedures.
++ */
++#define GS_VENDOR_ID 0x0525 /* NetChip */
++#define GS_PRODUCT_ID 0xa4a6 /* Linux-USB Serial Gadget */
++
++
++/* Structures */
++
++struct gs_dev;
++
++/* circular buffer */
++struct gs_buf {
++ unsigned int buf_size;
++ char *buf_buf;
++ char *buf_get;
++ char *buf_put;
++};
++
++/* list of requests */
++struct gs_req_entry {
++ struct list_head re_entry;
++ struct usb_request *re_req;
++};
++
++/* the port structure holds info for each port, one for each minor number */
++struct gs_port {
++ struct gs_dev *port_dev; /* pointer to device struct */
++ struct tty_struct *port_tty; /* pointer to tty struct */
++ spinlock_t port_lock;
++ int port_num;
++ int port_open_count;
++ int port_in_use; /* open/close in progress */
++ wait_queue_head_t port_write_wait;/* waiting to write */
++ struct gs_buf *port_write_buf;
++};
++
++/* the device structure holds info for the USB device */
++struct gs_dev {
++ struct usb_gadget *dev_gadget; /* gadget device pointer */
++ spinlock_t dev_lock; /* lock for set/reset config */
++ int dev_config; /* configuration number */
++ struct usb_ep *dev_in_ep; /* address of in endpoint */
++ struct usb_ep *dev_out_ep; /* address of out endpoint */
++ struct usb_request *dev_ctrl_req; /* control request */
++ struct list_head dev_req_list; /* list of write requests */
++ int dev_sched_port; /* round robin port scheduled */
++ struct gs_port *dev_port[GS_NUM_PORTS]; /* the ports */
++};
++
++
++/* Functions */
++
++/* module */
++static int __init gs_module_init( void );
++static void __exit gs_module_exit( void );
++
++/* tty driver */
++static int gs_open( struct tty_struct *tty, struct file *file );
++static void gs_close( struct tty_struct *tty, struct file *file );
++static int gs_write( struct tty_struct *tty, int from_user,
++ const unsigned char *buf, int count );
++static void gs_put_char( struct tty_struct *tty, unsigned char ch );
++static void gs_flush_chars( struct tty_struct *tty );
++static int gs_write_room( struct tty_struct *tty );
++static int gs_chars_in_buffer( struct tty_struct *tty );
++static void gs_throttle( struct tty_struct * tty );
++static void gs_unthrottle( struct tty_struct * tty );
++static void gs_break( struct tty_struct *tty, int break_state );
++static int gs_ioctl( struct tty_struct *tty, struct file *file,
++ unsigned int cmd, unsigned long arg );
++static void gs_set_termios( struct tty_struct *tty, struct termios *old );
++static int gs_read_proc( char *page, char **start, off_t off, int count,
++ int *eof, void *data );
++
++static int gs_send( struct gs_dev *dev );
++static int gs_send_packet( struct gs_dev *dev, char *packet,
++ unsigned int size );
++static int gs_recv_packet( struct gs_dev *dev, char *packet,
++ unsigned int size );
++static void gs_read_complete( struct usb_ep *ep, struct usb_request *req );
++static void gs_write_complete( struct usb_ep *ep, struct usb_request *req );
++
++/* gadget driver */
++static int gs_bind( struct usb_gadget *gadget );
++static void gs_unbind( struct usb_gadget *gadget );
++static int gs_setup( struct usb_gadget *gadget,
++ const struct usb_ctrlrequest *ctrl );
++static void gs_setup_complete( struct usb_ep *ep, struct usb_request *req );
++static void gs_disconnect( struct usb_gadget *gadget );
++static int gs_set_config( struct gs_dev *dev, unsigned config );
++static void gs_reset_config( struct gs_dev *dev );
++static int gs_build_config_desc( u8 *buf, enum usb_device_speed speed,
++ u8 type, unsigned int index );
++
++static struct usb_request *gs_alloc_req( struct usb_ep *ep, unsigned int len,
++ int kmalloc_flags );
++static void gs_free_req( struct usb_ep *ep, struct usb_request *req );
++
++static struct gs_req_entry *gs_alloc_req_entry( struct usb_ep *ep, unsigned len,
++ int kmalloc_flags );
++static void gs_free_req_entry( struct usb_ep *ep, struct gs_req_entry *req );
++
++static int gs_alloc_ports( struct gs_dev *dev, int kmalloc_flags );
++static void gs_free_ports( struct gs_dev *dev );
++
++/* circular buffer */
++static struct gs_buf *gs_buf_alloc( unsigned int size, int kmalloc_flags );
++static void gs_buf_free( struct gs_buf *gb );
++static void gs_buf_clear( struct gs_buf *gb );
++static unsigned int gs_buf_data_avail( struct gs_buf *gb );
++static unsigned int gs_buf_space_avail( struct gs_buf *gb );
++static unsigned int gs_buf_put( struct gs_buf *gb, const char *buf,
++ unsigned int count );
++static unsigned int gs_buf_get( struct gs_buf *gb, char *buf,
++ unsigned int count );
++
++/* external functions */
++extern int net2280_set_fifo_mode(struct usb_gadget *gadget, int mode);
++
++
++/* Globals */
++
++static struct gs_dev *gs_device;
++
++static const char *EP_IN_NAME;
++static const char *EP_OUT_NAME;
++
++static struct semaphore gs_open_close_sem[GS_NUM_PORTS];
++
++static unsigned int read_q_size = GS_DEFAULT_READ_Q_SIZE;
++static unsigned int write_q_size = GS_DEFAULT_WRITE_Q_SIZE;
++
++static unsigned int write_buf_size = GS_DEFAULT_WRITE_BUF_SIZE;
++
++static unsigned char gs_tmp_buf[GS_TMP_BUF_SIZE];
++static struct semaphore gs_tmp_buf_sem;
++
++/* tty variables */
++static int gs_refcount;
++static struct tty_struct *gs_tty[GS_NUM_PORTS];
++static struct termios *gs_termios[GS_NUM_PORTS];
++static struct termios *gs_termios_locked[GS_NUM_PORTS];
++
++/* tty driver struct */
++static struct tty_driver gs_tty_driver = {
++ .magic = TTY_DRIVER_MAGIC,
++ .driver_name = GS_SHORT_NAME,
++ .name = "ttygs",
++ .major = GS_MAJOR,
++ .minor_start = GS_MINOR_START,
++ .num = GS_NUM_PORTS,
++ .type = TTY_DRIVER_TYPE_SERIAL,
++ .subtype = SERIAL_TYPE_NORMAL,
++ .flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_NO_DEVFS,
++ .refcount = &gs_refcount,
++ .table = gs_tty,
++ .termios = gs_termios,
++ .termios_locked = gs_termios_locked,
++
++ .open = gs_open,
++ .close = gs_close,
++ .write = gs_write,
++ .put_char = gs_put_char,
++ .flush_chars = gs_flush_chars,
++ .write_room = gs_write_room,
++ .ioctl = gs_ioctl,
++ .set_termios = gs_set_termios,
++ .throttle = gs_throttle,
++ .unthrottle = gs_unthrottle,
++ .break_ctl = gs_break,
++ .chars_in_buffer = gs_chars_in_buffer,
++ .read_proc = gs_read_proc,
++};
++
++/* gadget driver struct */
++static struct usb_gadget_driver gs_gadget_driver = {
++#ifdef CONFIG_USB_GADGET_DUALSPEED
++ .speed = USB_SPEED_HIGH,
++#else
++ .speed = USB_SPEED_FULL,
++#endif
++ .function = GS_LONG_NAME,
++ .bind = gs_bind,
++ .unbind = gs_unbind,
++ .setup = gs_setup,
++ .disconnect = gs_disconnect,
++ .driver = {
++ .name = GS_SHORT_NAME,
++ /* .shutdown = ... */
++ /* .suspend = ... */
++ /* .resume = ... */
++ },
++};
++
++
++/* USB descriptors */
++
++#define GS_MANUFACTURER_STR_ID 1
++#define GS_PRODUCT_STR_ID 2
++#define GS_SERIAL_STR_ID 3
++#define GS_CONFIG_STR_ID 4
++
++/* static strings, in iso 8859/1 */
++static char manufacturer[40];
++static struct usb_string gs_strings[] = {
++ { GS_MANUFACTURER_STR_ID, manufacturer },
++ { GS_PRODUCT_STR_ID, GS_LONG_NAME },
++ { GS_SERIAL_STR_ID, "0" },
++ { GS_CONFIG_STR_ID, "Bulk" },
++ { } /* end of list */
++};
++
++static struct usb_gadget_strings gs_string_table = {
++ .language = 0x0409, /* en-us */
++ .strings = gs_strings,
++};
++
++static struct usb_device_descriptor gs_device_desc = {
++ .bLength = USB_DT_DEVICE_SIZE,
++ .bDescriptorType = USB_DT_DEVICE,
++ .bcdUSB = __constant_cpu_to_le16(0x0200),
++ .bDeviceClass = USB_CLASS_VENDOR_SPEC,
++ .idVendor = __constant_cpu_to_le16(GS_VENDOR_ID),
++ .idProduct = __constant_cpu_to_le16(GS_PRODUCT_ID),
++ .iManufacturer = GS_MANUFACTURER_STR_ID,
++ .iProduct = GS_PRODUCT_STR_ID,
++ .iSerialNumber = GS_SERIAL_STR_ID,
++ .bNumConfigurations = GS_NUM_CONFIGS,
++};
++
++static const struct usb_config_descriptor gs_config_desc = {
++ .bLength = USB_DT_CONFIG_SIZE,
++ .bDescriptorType = USB_DT_CONFIG,
++ /* .wTotalLength set by gs_build_config_desc */
++ .bNumInterfaces = GS_NUM_INTERFACES,
++ .bConfigurationValue = GS_BULK_CONFIG_ID,
++ .iConfiguration = GS_CONFIG_STR_ID,
++ .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
++ .bMaxPower = 1,
++};
++
++static const struct usb_interface_descriptor gs_interface_desc = {
++ .bLength = USB_DT_INTERFACE_SIZE,
++ .bDescriptorType = USB_DT_INTERFACE,
++ .bNumEndpoints = GS_NUM_ENDPOINTS,
++ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
++ .iInterface = GS_CONFIG_STR_ID,
++};
++
++static struct usb_endpoint_descriptor gs_fullspeed_in_desc = {
++ .bLength = USB_DT_ENDPOINT_SIZE,
++ .bDescriptorType = USB_DT_ENDPOINT,
++ .bEndpointAddress = USB_DIR_IN,
++ .bmAttributes = USB_ENDPOINT_XFER_BULK,
++};
++
++static struct usb_endpoint_descriptor gs_fullspeed_out_desc = {
++ .bLength = USB_DT_ENDPOINT_SIZE,
++ .bDescriptorType = USB_DT_ENDPOINT,
++ .bEndpointAddress = USB_DIR_OUT,
++ .bmAttributes = USB_ENDPOINT_XFER_BULK,
++};
++
++static struct usb_endpoint_descriptor gs_highspeed_in_desc = {
++ .bLength = USB_DT_ENDPOINT_SIZE,
++ .bDescriptorType = USB_DT_ENDPOINT,
++ .bmAttributes = USB_ENDPOINT_XFER_BULK,
++ .wMaxPacketSize = __constant_cpu_to_le16(512),
++};
++
++static struct usb_endpoint_descriptor gs_highspeed_out_desc = {
++ .bLength = USB_DT_ENDPOINT_SIZE,
++ .bDescriptorType = USB_DT_ENDPOINT,
++ .bmAttributes = USB_ENDPOINT_XFER_BULK,
++ .wMaxPacketSize = __constant_cpu_to_le16(512),
++};
++
++#ifdef CONFIG_USB_GADGET_DUALSPEED
++static struct usb_qualifier_descriptor gs_qualifier_desc = {
++ .bLength = sizeof(struct usb_qualifier_descriptor),
++ .bDescriptorType = USB_DT_DEVICE_QUALIFIER,
++ .bcdUSB = __constant_cpu_to_le16 (0x0200),
++ .bDeviceClass = USB_CLASS_VENDOR_SPEC,
++ /* assumes ep0 uses the same value for both speeds ... */
++ .bNumConfigurations = GS_NUM_CONFIGS,
++};
++#endif
++
++
++/* Module */
++
++MODULE_DESCRIPTION( GS_LONG_NAME );
++MODULE_AUTHOR( "Al Borchers" );
++MODULE_LICENSE( "GPL" );
++
++#if G_SERIAL_DEBUG
++MODULE_PARM( debug, "i" );
++MODULE_PARM_DESC( debug, "Enable debugging, 0=off, 1=on" );
++#endif
++
++MODULE_PARM( read_q_size, "i" );
++MODULE_PARM_DESC( read_q_size, "Read request queue size, default=32" );
++
++MODULE_PARM( write_q_size, "i" );
++MODULE_PARM_DESC( write_q_size, "Write request queue size, default=32" );
++
++MODULE_PARM( write_buf_size, "i" );
++MODULE_PARM_DESC( write_buf_size, "Write buffer size, default=8192" );
++
++module_init( gs_module_init );
++module_exit( gs_module_exit );
++
++/*
++* gs_module_init
++*
++* Register as a USB gadget driver and a tty driver.
++*/
++
++static int __init gs_module_init( void )
++{
++
++ int i,ret;
++
++
++ if( (ret=usb_gadget_register_driver( &gs_gadget_driver )) ) {
++ printk( KERN_ERR
++ "gs_module_init: cannot register gadget driver, ret=%d\n",
++ ret );
++ return( ret );
++ }
++
++ /* initial stty settings */
++ gs_tty_driver.init_termios = tty_std_termios;
++ gs_tty_driver.init_termios.c_cflag
++ = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
++
++ for( i=0; i<GS_NUM_PORTS; i++ )
++ sema_init( &gs_open_close_sem[i], 1 );
++
++ sema_init( &gs_tmp_buf_sem, 1 );
++
++ if( (ret=tty_register_driver( &gs_tty_driver )) ) {
++ usb_gadget_unregister_driver( &gs_gadget_driver );
++ printk( KERN_ERR
++ "gs_module_init: cannot register tty driver, ret=%d\n",
++ ret );
++ return( ret );
++ }
++
++ printk( KERN_INFO "gs_module_init: %s %s loaded\n", GS_LONG_NAME,
++ GS_VERSION_STR );
++
++ return( 0 );
++
++}
++
++
++/*
++* gs_module_exit
++*
++* Unregister as a tty driver and a USB gadget driver.
++*/
++
++static void __exit gs_module_exit( void )
++{
++
++ tty_unregister_driver( &gs_tty_driver );
++ usb_gadget_unregister_driver( &gs_gadget_driver );
++
++ printk( KERN_INFO "gs_module_exit: %s %s unloaded\n",
++ GS_LONG_NAME, GS_VERSION_STR );
++
++}
++
++
++/* TTY Driver */
++
++/*
++ * gs_open
++ */
++
++static int gs_open( struct tty_struct *tty, struct file *file )
++{
++
++ int port_num;
++ unsigned long flags;
++ struct gs_port *port;
++ struct gs_dev *dev;
++ struct gs_buf *buf;
++ struct semaphore *sem;
++
++
++ port_num = MINOR( tty->device ) - GS_MINOR_START;
++
++ gs_debug( "gs_open: (%d,%p,%p)\n", port_num, tty, file );
++
++ tty->driver_data = NULL;
++
++ if( port_num < 0 || port_num >= GS_NUM_PORTS ) {
++ printk( KERN_ERR "gs_open: (%d,%p,%p) invalid port number\n",
++ port_num, tty, file );
++ return( -ENODEV );
++ }
++
++ dev = gs_device;
++
++ if( dev == NULL ) {
++ printk( KERN_ERR "gs_open: (%d,%p,%p) NULL device pointer\n",
++ port_num, tty, file );
++ return( -ENODEV );
++ }
++
++ sem = &gs_open_close_sem[port_num];
++ if( down_interruptible( sem ) ) {
++ printk( KERN_ERR
++ "gs_open: (%d,%p,%p) interrupted waiting for semaphore\n",
++ port_num, tty, file );
++ return( -ERESTARTSYS );
++ }
++
++ spin_lock_irqsave(&dev->dev_lock, flags );
++
++ if( dev->dev_config == GS_NO_CONFIG_ID ) {
++ printk( KERN_ERR
++ "gs_open: (%d,%p,%p) device is not connected\n",
++ port_num, tty, file );
++ spin_unlock_irqrestore(&dev->dev_lock, flags );
++ up( sem );
++ return( -ENODEV );
++ }
++
++ port = dev->dev_port[port_num];
++
++ if( port == NULL ) {
++ printk( KERN_ERR "gs_open: (%d,%p,%p) NULL port pointer\n",
++ port_num, tty, file );
++ spin_unlock_irqrestore(&dev->dev_lock, flags );
++ up( sem );
++ return( -ENODEV );
++ }
++
++ spin_lock(&port->port_lock );
++ spin_unlock(&dev->dev_lock );
++
++ if( port->port_dev == NULL ) {
++ printk( KERN_ERR "gs_open: (%d,%p,%p) port disconnected (1)\n",
++ port_num, tty, file );
++ spin_unlock_irqrestore(&port->port_lock, flags );
++ up( sem );
++ return( -EIO );
++ }
++
++ if( port->port_open_count > 0 ) {
++ ++port->port_open_count;
++ spin_unlock_irqrestore(&port->port_lock, flags );
++ gs_debug( "gs_open: (%d,%p,%p) already open\n",
++ port_num, tty, file );
++ up( sem );
++ return( 0 );
++ }
++
++ /* mark port as in use, we can drop port lock and sleep if necessary */
++ port->port_in_use = 1;
++
++ /* allocate write buffer on first open */
++ if( port->port_write_buf == NULL ) {
++
++ spin_unlock_irqrestore(&port->port_lock, flags );
++ buf = gs_buf_alloc( write_buf_size, GFP_KERNEL );
++ spin_lock_irqsave(&port->port_lock, flags );
++
++ /* might have been disconnected while asleep, check */
++ if( port->port_dev == NULL ) {
++ printk( KERN_ERR
++ "gs_open: (%d,%p,%p) port disconnected (2)\n",
++ port_num, tty, file );
++ port->port_in_use = 0;
++ spin_unlock_irqrestore(&port->port_lock, flags );
++ up( sem );
++ return( -EIO );
++ }
++
++ if( (port->port_write_buf=buf) == NULL ) {
++ printk( KERN_ERR "gs_open: (%d,%p,%p) cannot allocate port write buffer\n",
++ port_num, tty, file );
++ port->port_in_use = 0;
++ spin_unlock_irqrestore(&port->port_lock, flags );
++ up( sem );
++ return( -ENOMEM );
++ }
++
++ }
++
++ /* wait for carrier detect (not implemented) */
++
++ /* might have been disconnected while asleep, check */
++ if( port->port_dev == NULL ) {
++ printk( KERN_ERR "gs_open: (%d,%p,%p) port disconnected (3)\n",
++ port_num, tty, file );
++ port->port_in_use = 0;
++ spin_unlock_irqrestore(&port->port_lock, flags );
++ up( sem );
++ return( -EIO );
++ }
++
++ tty->driver_data = port;
++ port->port_tty = tty;
++ port->port_open_count = 1;
++ port->port_in_use = 0;
++
++ spin_unlock_irqrestore(&port->port_lock, flags );
++ up( sem );
++
++ gs_debug( "gs_open: (%d,%p,%p) completed\n", port_num, tty, file );
++
++ return( 0 );
++
++}
++
++
++/*
++ * gs_close
++ */
++
++static void gs_close( struct tty_struct *tty, struct file *file )
++{
++
++ unsigned long flags;
++ struct gs_port *port = tty->driver_data;
++ struct semaphore *sem;
++
++
++ if( port == NULL ) {
++ printk( KERN_ERR "gs_close: NULL port pointer\n" );
++ return;
++ }
++
++ gs_debug( "gs_close: (%d,%p,%p)\n", port->port_num, tty, file );
++
++ sem = &gs_open_close_sem[port->port_num];
++ down( sem );
++
++ spin_lock_irqsave(&port->port_lock, flags );
++
++ if( port->port_open_count == 0 ) {
++ printk( KERN_ERR
++ "gs_close: (%d,%p,%p) port is already closed\n",
++ port->port_num, tty, file );
++ spin_unlock_irqrestore(&port->port_lock, flags );
++ up( sem );
++ return;
++ }
++
++ if( port->port_open_count > 0 ) {
++ --port->port_open_count;
++ spin_unlock_irqrestore(&port->port_lock, flags );
++ up( sem );
++ return;
++ }
++
++ /* free disconnected port on final close */
++ if( port->port_dev == NULL ) {
++ kfree( port );
++ spin_unlock_irqrestore(&port->port_lock, flags );
++ up( sem );
++ return;
++ }
++
++ /* mark port as closed but in use, we can drop port lock */
++ /* and sleep if necessary */
++ port->port_in_use = 1;
++ port->port_open_count = 0;
++
++ /* wait for write buffer to drain, or */
++ /* at most GS_CLOSE_TIMEOUT seconds */
++ if( gs_buf_data_avail( port->port_write_buf ) > 0 ) {
++ wait_cond_interruptible_timeout( port->port_write_wait,
++ port->port_dev == NULL
++ || gs_buf_data_avail(port->port_write_buf) == 0,
++ &port->port_lock, flags, GS_CLOSE_TIMEOUT * HZ );
++ }
++
++ /* free disconnected port on final close */
++ /* (might have happened during the above sleep) */
++ if( port->port_dev == NULL ) {
++ kfree( port );
++ spin_unlock_irqrestore(&port->port_lock, flags );
++ up( sem );
++ return;
++ }
++
++ gs_buf_clear( port->port_write_buf );
++
++ tty->driver_data = NULL;
++ port->port_tty = NULL;
++ port->port_in_use = 0;
++
++ spin_unlock_irqrestore(&port->port_lock, flags );
++ up( sem );
++
++ gs_debug( "gs_close: (%d,%p,%p) completed\n",
++ port->port_num, tty, file );
++
++}
++
++
++/*
++ * gs_write
++ */
++
++static int gs_write( struct tty_struct *tty, int from_user,
++ const unsigned char *buf, int count )
++{
++
++ unsigned long flags;
++ struct gs_port *port = tty->driver_data;
++
++
++ if( port == NULL ) {
++ printk( KERN_ERR "gs_write: NULL port pointer\n" );
++ return( -EIO );
++ }
++
++ gs_debug( "gs_write: (%d,%p) writing %d bytes\n", port->port_num, tty,
++ count );
++
++ if( count == 0 )
++ return( 0 );
++
++ /* copy from user into tmp buffer, get tmp_buf semaphore */
++ if( from_user ) {
++ if( count > GS_TMP_BUF_SIZE )
++ count = GS_TMP_BUF_SIZE;
++ down( &gs_tmp_buf_sem );
++ if( copy_from_user( gs_tmp_buf, buf, count ) != 0 ) {
++ up( &gs_tmp_buf_sem );
++ printk( KERN_ERR
++ "gs_write: (%d,%p) cannot copy from user space\n",
++ port->port_num, tty );
++ return( -EFAULT );
++ }
++ buf = gs_tmp_buf;
++ }
++
++ spin_lock_irqsave(&port->port_lock, flags );
++
++ if( port->port_dev == NULL ) {
++ printk( KERN_ERR "gs_write: (%d,%p) port is not connected\n",
++ port->port_num, tty );
++ spin_unlock_irqrestore(&port->port_lock, flags );
++ if( from_user )
++ up( &gs_tmp_buf_sem );
++ return( -EIO );
++ }
++
++ if( port->port_open_count == 0 ) {
++ printk( KERN_ERR "gs_write: (%d,%p) port is closed\n",
++ port->port_num, tty );
++ spin_unlock_irqrestore(&port->port_lock, flags );
++ if( from_user )
++ up( &gs_tmp_buf_sem );
++ return( -EBADF );
++ }
++
++ count = gs_buf_put( port->port_write_buf, buf, count );
++
++ spin_unlock_irqrestore(&port->port_lock, flags );
++
++ if( from_user )
++ up( &gs_tmp_buf_sem );
++
++ gs_send( gs_device );
++
++ gs_debug( "gs_write: (%d,%p) wrote %d bytes\n", port->port_num, tty,
++ count );
++
++ return( count );
++
++}
++
++
++/*
++ * gs_put_char
++ */
++
++static void gs_put_char( struct tty_struct *tty, unsigned char ch )
++{
++
++ unsigned long flags;
++ struct gs_port *port = tty->driver_data;
++
++
++ if( port == NULL ) {
++ printk( KERN_ERR "gs_put_char: NULL port pointer\n" );
++ return;
++ }
++
++ gs_debug( "gs_put_char: (%d,%p) char=0x%x, called from %p, %p, %p\n", port->port_num, tty, ch, __builtin_return_address(0), __builtin_return_address(1), __builtin_return_address(2) );
++
++ spin_lock_irqsave(&port->port_lock, flags );
++
++ if( port->port_dev == NULL ) {
++ printk( KERN_ERR "gs_put_char: (%d,%p) port is not connected\n",
++ port->port_num, tty );
++ spin_unlock_irqrestore(&port->port_lock, flags );
++ return;
++ }
++
++ if( port->port_open_count == 0 ) {
++ printk( KERN_ERR "gs_put_char: (%d,%p) port is closed\n",
++ port->port_num, tty );
++ spin_unlock_irqrestore(&port->port_lock, flags );
++ return;
++ }
++
++ gs_buf_put( port->port_write_buf, &ch, 1 );
++
++ spin_unlock_irqrestore(&port->port_lock, flags );
++
++}
++
++
++/*
++ * gs_flush_chars
++ */
++
++static void gs_flush_chars( struct tty_struct *tty )
++{
++
++ unsigned long flags;
++ struct gs_port *port = tty->driver_data;
++
++
++ if( port == NULL ) {
++ printk( KERN_ERR "gs_flush_chars: NULL port pointer\n" );
++ return;
++ }
++
++ gs_debug( "gs_flush_chars: (%d,%p)\n", port->port_num, tty );
++
++ spin_lock_irqsave(&port->port_lock, flags );
++
++ if( port->port_dev == NULL ) {
++ printk( KERN_ERR
++ "gs_flush_chars: (%d,%p) port is not connected\n",
++ port->port_num, tty );
++ spin_unlock_irqrestore(&port->port_lock, flags );
++ return;
++ }
++
++ if( port->port_open_count == 0 ) {
++ printk( KERN_ERR "gs_flush_chars: (%d,%p) port is closed\n",
++ port->port_num, tty );
++ spin_unlock_irqrestore(&port->port_lock, flags );
++ return;
++ }
++
++ spin_unlock_irqrestore(&port->port_lock, flags );
++
++ gs_send( gs_device );
++
++}
++
++
++/*
++ * gs_write_room
++ */
++
++static int gs_write_room( struct tty_struct *tty )
++{
++
++ int room = 0;
++ unsigned long flags;
++ struct gs_port *port = tty->driver_data;
++
++
++ if( port == NULL )
++ return( 0 );
++
++ spin_lock_irqsave(&port->port_lock, flags );
++
++ if( port->port_dev != NULL && port->port_open_count > 0
++ && port->port_write_buf != NULL )
++ room = gs_buf_space_avail( port->port_write_buf );
++
++ spin_unlock_irqrestore(&port->port_lock, flags );
++
++ gs_debug( "gs_write_room: (%d,%p) room=%d\n",
++ port->port_num, tty, room );
++
++ return( room );
++
++}
++
++
++/*
++ * gs_chars_in_buffer
++ */
++
++static int gs_chars_in_buffer( struct tty_struct *tty )
++{
++
++ int chars = 0;
++ unsigned long flags;
++ struct gs_port *port = tty->driver_data;
++
++
++ if( port == NULL )
++ return( 0 );
++
++ spin_lock_irqsave(&port->port_lock, flags );
++
++ if( port->port_dev != NULL && port->port_open_count > 0
++ && port->port_write_buf != NULL )
++ chars = gs_buf_data_avail( port->port_write_buf );
++
++ spin_unlock_irqrestore(&port->port_lock, flags );
++
++ gs_debug( "gs_chars_in_buffer: (%d,%p) chars=%d\n",
++ port->port_num, tty, chars );
++
++ return( chars );
++
++}
++
++
++/*
++ * gs_throttle
++ */
++
++static void gs_throttle( struct tty_struct *tty )
++{
++
++}
++
++
++/*
++ * gs_unthrottle
++ */
++
++static void gs_unthrottle( struct tty_struct *tty )
++{
++
++}
++
++
++/*
++ * gs_break
++ */
++
++static void gs_break( struct tty_struct *tty, int break_state )
++{
++
++}
++
++
++/*
++ * gs_ioctl
++ */
++
++static int gs_ioctl( struct tty_struct *tty, struct file *file,
++ unsigned int cmd, unsigned long arg )
++{
++
++ struct gs_port *port = tty->driver_data;
++
++
++ if( port == NULL ) {
++ printk( KERN_ERR "gs_ioctl: NULL port pointer\n" );
++ return( -EIO );
++ }
++
++ gs_debug( "gs_ioctl: (%d,%p,%p) cmd=0x%4.4x, arg=%lu\n",
++ port->port_num, tty, file, cmd, arg );
++
++ /* handle ioctls */
++
++ /* could not handle ioctl */
++ return( -ENOIOCTLCMD );
++
++}
++
++
++/*
++ * gs_set_termios
++ */
++
++static void gs_set_termios( struct tty_struct *tty, struct termios *old )
++{
++
++}
++
++
++/*
++ * gs_read_proc
++ */
++
++static int gs_read_proc( char *page, char **start, off_t off, int count,
++ int *eof, void *data )
++{
++
++ return( 0 );
++
++}
++
++
++/*
++* gs_send
++*
++* This function finds available write requests, calls
++* gs_send_packet to fill these packets with data, and
++* continues until either there are no more write requests
++* available or no more data to send. This function is
++* run whenever data arrives or write requests are available.
++*/
++
++static int gs_send( struct gs_dev *dev )
++{
++
++ int ret,len;
++ unsigned long flags;
++ struct usb_ep *ep;
++ struct usb_request *req;
++ struct gs_req_entry *req_entry;
++
++
++ if( dev == NULL ) {
++ printk( KERN_ERR "gs_send: NULL device pointer\n" );
++ return( -ENODEV );
++ }
++
++ spin_lock_irqsave(&dev->dev_lock, flags );
++
++ ep = dev->dev_in_ep;
++
++ while( !list_empty( &dev->dev_req_list ) ) {
++
++ req_entry = list_entry( dev->dev_req_list.next,
++ struct gs_req_entry, re_entry );
++
++ req = req_entry->re_req;
++
++ len = gs_send_packet( dev, req->buf, ep->maxpacket );
++
++ if( len > 0 ) {
++gs_debug_level( 3, "gs_send: len=%d, 0x%2.2x 0x%2.2x 0x%2.2x ...\n", len, *((unsigned char *)req->buf), *((unsigned char *)req->buf+1), *((unsigned char *)req->buf+2) );
++ list_del( &req_entry->re_entry );
++ req->length = len;
++ if( (ret=usb_ep_queue( ep, req, GFP_ATOMIC )) ) {
++ printk( KERN_ERR
++ "gs_send: cannot queue read request, ret=%d\n",
++ ret );
++ break;
++ }
++ } else {
++ break;
++ }
++
++ }
++
++ spin_unlock_irqrestore(&dev->dev_lock, flags );
++
++ return( 0 );
++
++}
++
++
++/*
++ * gs_send_packet
++ *
++ * If there is data to send, a packet is built in the given
++ * buffer and the size is returned. If there is no data to
++ * send, 0 is returned. If there is any error a negative
++ * error number is returned.
++ *
++ * Called during USB completion routine, on interrupt time.
++ *
++ * We assume that disconnect will not happen until all completion
++ * routines have completed, so we can assume that the dev_port
++ * array does not change during the lifetime of this function.
++ */
++
++static int gs_send_packet( struct gs_dev *dev, char *packet, unsigned int size )
++{
++
++ unsigned int len;
++ struct gs_port *port;
++
++
++ /* TEMPORARY -- only port 0 is supported right now */
++ port = dev->dev_port[0];
++
++ if( port == NULL ) {
++ printk( KERN_ERR
++ "gs_send_packet: port=%d, NULL port pointer\n",
++ 0 );
++ return( -EIO );
++ }
++
++ spin_lock(&port->port_lock );
++
++ len = gs_buf_data_avail( port->port_write_buf );
++ if( len < size )
++ size = len;
++
++ if( size == 0 ) {
++ spin_unlock(&port->port_lock );
++ return( 0 );
++ }
++
++ size = gs_buf_get( port->port_write_buf, packet, size );
++
++ wake_up_interruptible( &port->port_tty->write_wait );
++
++ spin_unlock(&port->port_lock );
++
++ return( size );
++
++}
++
++
++/*
++ * gs_recv_packet
++ *
++ * Called for each USB packet received. Reads the packet
++ * header and stuffs the data in the appropriate tty buffer.
++ * Returns 0 if successful, or a negative error number.
++ *
++ * Called during USB completion routine, on interrupt time.
++ *
++ * We assume that disconnect will not happen until all completion
++ * routines have completed, so we can assume that the dev_port
++ * array does not change during the lifetime of this function.
++ */
++
++static int gs_recv_packet( struct gs_dev *dev, char *packet, unsigned int size )
++{
++
++ unsigned int len;
++ struct gs_port *port;
++
++
++ /* TEMPORARY -- only port 0 is supported right now */
++ port = dev->dev_port[0];
++
++ if( port == NULL ) {
++ printk( KERN_ERR "gs_recv_packet: port=%d, NULL port pointer\n",
++ port->port_num );
++ return( -EIO );
++ }
++
++ spin_lock(&port->port_lock );
++
++ if( port->port_tty == NULL ) {
++ printk( KERN_ERR "gs_recv_packet: port=%d, NULL tty pointer\n",
++ port->port_num );
++ spin_unlock(&port->port_lock );
++ return( -EIO );
++ }
++
++ if( port->port_tty->magic != TTY_MAGIC ) {
++ printk( KERN_ERR "gs_recv_packet: port=%d, bad tty magic\n",
++ port->port_num );
++ spin_unlock(&port->port_lock );
++ return( -EIO );
++ }
++
++ len = (unsigned int)(TTY_FLIPBUF_SIZE - port->port_tty->flip.count);
++ if( len < size )
++ size = len;
++
++ if( size > 0 ) {
++ memcpy( port->port_tty->flip.char_buf_ptr, packet, size );
++ port->port_tty->flip.char_buf_ptr += size;
++ port->port_tty->flip.count += size;
++ tty_flip_buffer_push( port->port_tty );
++ wake_up_interruptible( &port->port_tty->read_wait );
++ }
++
++ spin_unlock(&port->port_lock );
++
++ return( 0 );
++
++}
++
++
++/*
++* gs_read_complete
++*/
++
++static void gs_read_complete( struct usb_ep *ep, struct usb_request *req )
++{
++
++ int ret;
++ struct gs_dev *dev = ep->driver_data;
++
++
++ if( dev == NULL ) {
++ printk( KERN_ERR "gs_read_complete: NULL device pointer\n" );
++ return;
++ }
++
++ switch( req->status ) {
++
++ case 0:
++ /* normal completion */
++ gs_recv_packet( dev, req->buf, req->actual );
++requeue:
++ req->length = ep->maxpacket;
++ if( (ret=usb_ep_queue( ep, req, GFP_ATOMIC )) ) {
++ printk( KERN_ERR
++ "gs_read_complete: cannot queue read request, ret=%d\n",
++ ret );
++ }
++ break;
++
++ case -ESHUTDOWN:
++ /* disconnect */
++ gs_debug( "gs_read_complete: shutdown\n" );
++ gs_free_req( ep, req );
++ break;
++
++ default:
++ /* unexpected */
++ printk( KERN_ERR
++ "gs_read_complete: unexpected status error, status=%d\n",
++ req->status );
++ goto requeue;
++ break;
++
++ }
++
++}
++
++
++/*
++* gs_write_complete
++*/
++
++static void gs_write_complete( struct usb_ep *ep, struct usb_request *req )
++{
++
++ struct gs_dev *dev = ep->driver_data;
++ struct gs_req_entry *gs_req = req->context;
++
++
++ if( dev == NULL ) {
++ printk( KERN_ERR "gs_write_complete: NULL device pointer\n" );
++ return;
++ }
++
++ switch( req->status ) {
++
++ case 0:
++ /* normal completion */
++requeue:
++ if( gs_req == NULL ) {
++ printk( KERN_ERR
++ "gs_write_complete: NULL request pointer\n" );
++ return;
++ }
++
++ spin_lock(&dev->dev_lock );
++ list_add( &gs_req->re_entry, &dev->dev_req_list );
++ spin_unlock(&dev->dev_lock );
++
++ gs_send( dev );
++
++ break;
++
++ case -ESHUTDOWN:
++ /* disconnect */
++ gs_debug( "gs_write_complete: shutdown\n" );
++ gs_free_req( ep, req );
++ break;
++
++ default:
++ printk( KERN_ERR
++ "gs_write_complete: unexpected status error, status=%d\n",
++ req->status );
++ goto requeue;
++ break;
++
++ }
++
++}
++
++
++/* Gadget Driver */
++
++/*
++ * gs_bind
++ *
++ * Called on module load. Allocates and initializes the device
++ * structure and a control request.
++ */
++static int gs_bind(struct usb_gadget *gadget)
++{
++ int ret;
++ struct usb_ep *ep;
++ struct gs_dev *dev;
++
++ usb_ep_autoconfig_reset(gadget);
++
++ ep = usb_ep_autoconfig(gadget, &gs_fullspeed_in_desc);
++ if (!ep)
++ goto autoconf_fail;
++ EP_IN_NAME = ep->name;
++ ep->driver_data = ep; /* claim the endpoint */
++
++ ep = usb_ep_autoconfig(gadget, &gs_fullspeed_out_desc);
++ if (!ep)
++ goto autoconf_fail;
++ EP_OUT_NAME = ep->name;
++ ep->driver_data = ep; /* claim the endpoint */
++
++ /* device specific bcdDevice value in device descriptor */
++ if (gadget_is_net2280(gadget)) {
++ gs_device_desc.bcdDevice =
++ __constant_cpu_to_le16(GS_VERSION_NUM|0x0001);
++ } else if (gadget_is_pxa(gadget)) {
++ gs_device_desc.bcdDevice =
++ __constant_cpu_to_le16(GS_VERSION_NUM|0x0002);
++ } else if (gadget_is_sh(gadget)) {
++ gs_device_desc.bcdDevice =
++ __constant_cpu_to_le16(GS_VERSION_NUM|0x0003);
++ } else if (gadget_is_sa1100(gadget)) {
++ gs_device_desc.bcdDevice =
++ __constant_cpu_to_le16(GS_VERSION_NUM|0x0004);
++ } else if (gadget_is_goku(gadget)) {
++ gs_device_desc.bcdDevice =
++ __constant_cpu_to_le16(GS_VERSION_NUM|0x0005);
++ } else if (gadget_is_mq11xx(gadget)) {
++ gs_device_desc.bcdDevice =
++ __constant_cpu_to_le16(GS_VERSION_NUM|0x0006);
++ } else if (gadget_is_omap(gadget)) {
++ gs_device_desc.bcdDevice =
++ __constant_cpu_to_le16(GS_VERSION_NUM|0x0007);
++ } else {
++ printk(KERN_WARNING "gs_bind: controller '%s' not recognized\n",
++ gadget->name);
++ /* unrecognized, but safe unless bulk is REALLY quirky */
++ gs_device_desc.bcdDevice =
++ __constant_cpu_to_le16(GS_VERSION_NUM|0x0099);
++ }
++
++ gs_device_desc.bMaxPacketSize0 = gadget->ep0->maxpacket;
++#ifdef CONFIG_USB_GADGET_DUALSPEED
++ /* assume ep0 uses the same packet size for both speeds */
++ gs_qualifier_desc.bMaxPacketSize0 = gs_device_desc.bMaxPacketSize0;
++ /* assume endpoints are dual-speed */
++ gs_highspeed_in_desc.bEndpointAddress =
++ gs_fullspeed_in_desc.bEndpointAddress;
++ gs_highspeed_out_desc.bEndpointAddress =
++ gs_fullspeed_out_desc.bEndpointAddress;
++#endif /* CONFIG_USB_GADGET_DUALSPEED */
++
++ usb_gadget_set_selfpowered(gadget);
++
++ gs_device = dev = kmalloc(sizeof(struct gs_dev), GFP_KERNEL);
++ if (dev == NULL)
++ return -ENOMEM;
++
++ snprintf (manufacturer, sizeof(manufacturer),
++ UTS_SYSNAME " " UTS_RELEASE " with %s", gadget->name);
++
++ memset(dev, 0, sizeof(struct gs_dev));
++ dev->dev_gadget = gadget;
++ spin_lock_init(&dev->dev_lock);
++ INIT_LIST_HEAD(&dev->dev_req_list);
++ set_gadget_data(gadget, dev);
++
++ if ((ret = gs_alloc_ports(dev, GFP_KERNEL)) != 0) {
++ printk(KERN_ERR "gs_bind: cannot allocate ports\n");
++ gs_unbind(gadget);
++ return ret;
++ }
++
++ /* preallocate control response and buffer */
++ dev->dev_ctrl_req = gs_alloc_req(gadget->ep0, GS_MAX_DESC_LEN,
++ GFP_KERNEL);
++ if (dev->dev_ctrl_req == NULL) {
++ gs_unbind(gadget);
++ return -ENOMEM;
++ }
++ dev->dev_ctrl_req->complete = gs_setup_complete;
++
++ gadget->ep0->driver_data = dev;
++
++ printk(KERN_INFO "gs_bind: %s %s bound\n",
++ GS_LONG_NAME, GS_VERSION_STR);
++
++ return 0;
++
++autoconf_fail:
++ printk(KERN_ERR "gs_bind: cannot autoconfigure on %s\n", gadget->name);
++ return -ENODEV;
++}
++
++
++/*
++ * gs_unbind
++ *
++ * Called on module unload. Frees the control request and device
++ * structure.
++ */
++
++static void gs_unbind( struct usb_gadget *gadget )
++{
++
++ struct gs_dev *dev = get_gadget_data( gadget );
++
++
++ gs_device = NULL;
++
++ /* read/write requests already freed, only control request remains */
++ if( dev != NULL ) {
++ if( dev->dev_ctrl_req != NULL )
++ gs_free_req( gadget->ep0, dev->dev_ctrl_req );
++ gs_free_ports( dev );
++ kfree( dev );
++ set_gadget_data( gadget, NULL );
++ }
++
++ printk( KERN_INFO "gs_unbind: %s %s unbound\n", GS_LONG_NAME,
++ GS_VERSION_STR );
++
++}
++
++
++/*
++ * gs_setup
++ *
++ * Implements all the control endpoint functionality that's not
++ * handled in hardware or the hardware driver.
++ *
++ * Returns the size of the data sent to the host, or a negative
++ * error number.
++ */
++
++static int gs_setup( struct usb_gadget *gadget,
++ const struct usb_ctrlrequest *ctrl )
++{
++
++ int ret = -EOPNOTSUPP;
++ unsigned int sv_config;
++ struct gs_dev *dev = get_gadget_data( gadget );
++ struct usb_request *req = dev->dev_ctrl_req;
++
++
++ switch (ctrl->bRequest) {
++
++ case USB_REQ_GET_DESCRIPTOR:
++
++ if( ctrl->bRequestType != USB_DIR_IN )
++ break;
++
++ switch (ctrl->wValue >> 8) {
++
++ case USB_DT_DEVICE:
++ ret = min( ctrl->wLength,
++ (u16)sizeof(struct usb_device_descriptor) );
++ memcpy( req->buf, &gs_device_desc, ret );
++ break;
++
++#ifdef CONFIG_USB_GADGET_DUALSPEED
++ case USB_DT_DEVICE_QUALIFIER:
++ if (!gadget->is_dualspeed)
++ break;
++ ret = min( ctrl->wLength,
++ (u16)sizeof(struct usb_qualifier_descriptor) );
++ memcpy( req->buf, &gs_qualifier_desc, ret );
++ break;
++
++ case USB_DT_OTHER_SPEED_CONFIG:
++#endif /* CONFIG_USB_GADGET_DUALSPEED */
++ case USB_DT_CONFIG:
++ ret = gs_build_config_desc( req->buf, gadget->speed,
++ ctrl->wValue >> 8, ctrl->wValue & 0xff );
++ if( ret >= 0 )
++ ret = min( ctrl->wLength, (u16)ret );
++ break;
++
++ case USB_DT_STRING:
++ /* wIndex == language code. */
++ ret = usb_gadget_get_string( &gs_string_table,
++ ctrl->wValue & 0xff, req->buf );
++ if( ret >= 0 )
++ ret = min( ctrl->wLength, (u16)ret );
++ break;
++ }
++ break;
++
++ case USB_REQ_SET_CONFIGURATION:
++ if( ctrl->bRequestType != 0 )
++ break;
++ spin_lock( &dev->dev_lock );
++ ret = gs_set_config( dev, ctrl->wValue );
++ spin_unlock( &dev->dev_lock );
++ break;
++
++ case USB_REQ_GET_CONFIGURATION:
++ if( ctrl->bRequestType != USB_DIR_IN )
++ break;
++ *(u8 *)req->buf = dev->dev_config;
++ ret = min( ctrl->wLength, (u16)1 );
++ break;
++
++ case USB_REQ_SET_INTERFACE:
++ if( ctrl->bRequestType != USB_RECIP_INTERFACE )
++ break;
++ spin_lock( &dev->dev_lock );
++ if( dev->dev_config == GS_BULK_CONFIG_ID
++ && ctrl->wIndex == GS_INTERFACE_ID
++ && ctrl->wValue == GS_ALT_INTERFACE_ID ) {
++ sv_config = dev->dev_config;
++ /* since there is only one interface, setting the */
++ /* interface is equivalent to setting the config */
++ gs_reset_config( dev );
++ gs_set_config( dev, sv_config );
++ ret = 0;
++ }
++ spin_unlock( &dev->dev_lock );
++ break;
++
++ case USB_REQ_GET_INTERFACE:
++ if( ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE) )
++ break;
++ if( dev->dev_config == GS_NO_CONFIG_ID )
++ break;
++ if( ctrl->wIndex != GS_INTERFACE_ID ) {
++ ret = -EDOM;
++ break;
++ }
++ *(u8 *)req->buf = GS_ALT_INTERFACE_ID;
++ ret = min( ctrl->wLength, (u16)1 );
++ break;
++
++ default:
++ printk( KERN_ERR "gs_setup: unknown request, type=%02x, request=%02x, value=%04x, index=%04x, length=%d\n",
++ ctrl->bRequestType, ctrl->bRequest, ctrl->wValue,
++ ctrl->wIndex, ctrl->wLength );
++ break;
++
++ }
++
++ /* respond with data transfer before status phase? */
++ if( ret >= 0 ) {
++ req->length = ret;
++ ret = usb_ep_queue( gadget->ep0, req, GFP_ATOMIC );
++ if( ret < 0 ) {
++ printk( KERN_ERR
++ "gs_setup: cannot queue response, ret=%d\n",
++ ret );
++ req->status = 0;
++ gs_setup_complete( gadget->ep0, req );
++ }
++ }
++
++ /* device either stalls (ret < 0) or reports success */
++ return( ret );
++
++}
++
++
++/*
++ * gs_setup_complete
++ */
++
++static void gs_setup_complete( struct usb_ep *ep, struct usb_request *req )
++{
++ if( req->status || req->actual != req->length ) {
++ printk( KERN_ERR "gs_setup_complete: status error, status=%d, actual=%d, length=%d\n",
++ req->status, req->actual, req->length );
++ }
++}
++
++
++/*
++ * gs_disconnect
++ *
++ * Called when the device is disconnected. Frees the closed
++ * ports and disconnects open ports. Open ports will be freed
++ * on close. Then reallocates the ports for the next connection.
++ */
++
++static void gs_disconnect( struct usb_gadget *gadget )
++{
++
++ unsigned long flags;
++ struct gs_dev *dev = get_gadget_data( gadget );
++
++
++ spin_lock_irqsave( &dev->dev_lock, flags );
++
++ gs_reset_config( dev );
++
++ /* free closed ports and disconnect open ports */
++ /* (open ports will be freed when closed) */
++ gs_free_ports( dev );
++
++ /* re-allocate ports for the next connection */
++ if( gs_alloc_ports( dev, GFP_ATOMIC ) != 0 )
++ printk( KERN_ERR "gs_disconnect: cannot re-allocate ports\n" );
++
++ spin_unlock_irqrestore( &dev->dev_lock, flags );
++
++ printk( KERN_INFO "gs_disconnect: %s disconnected\n", GS_LONG_NAME );
++
++}
++
++
++/*
++ * gs_set_config
++ *
++ * Configures the device by enabling device specific
++ * optimizations, setting up the endpoints, allocating
++ * read and write requests and queuing read requests.
++ *
++ * The device lock must be held when calling this function.
++ */
++
++static int gs_set_config( struct gs_dev *dev, unsigned config )
++{
++
++ int i;
++ int ret = 0;
++ struct usb_gadget *gadget = dev->dev_gadget;
++ struct usb_ep *ep;
++ struct usb_request *req;
++ struct gs_req_entry *req_entry;
++
++
++ if( dev == NULL ) {
++ printk( KERN_ERR "gs_set_config: NULL device pointer\n" );
++ return( 0 );
++ }
++
++ if( config == dev->dev_config )
++ return( 0 );
++
++ gs_reset_config( dev );
++
++ if( config == GS_NO_CONFIG_ID )
++ return( 0 );
++
++ if( config != GS_BULK_CONFIG_ID )
++ return( -EINVAL );
++
++ /* device specific optimizations */
++ if (gadget_is_net2280(gadget))
++ net2280_set_fifo_mode(gadget, 1);
++
++ gadget_for_each_ep( ep, gadget ) {
++
++ if( strcmp( ep->name, EP_IN_NAME ) == 0 ) {
++ ret = usb_ep_enable( ep,
++ gadget->speed == USB_SPEED_HIGH ?
++ &gs_highspeed_in_desc : &gs_fullspeed_in_desc );
++ if( ret == 0 ) {
++ ep->driver_data = dev;
++ dev->dev_in_ep = ep;
++ } else {
++ printk( KERN_ERR "gs_set_config: cannot enable in endpoint %s, ret=%d\n",
++ ep->name, ret );
++ gs_reset_config( dev );
++ return( ret );
++ }
++ }
++
++ else if( strcmp( ep->name, EP_OUT_NAME ) == 0 ) {
++ ret = usb_ep_enable( ep,
++ gadget->speed == USB_SPEED_HIGH ?
++ &gs_highspeed_out_desc :
++ &gs_fullspeed_out_desc );
++ if( ret == 0 ) {
++ ep->driver_data = dev;
++ dev->dev_out_ep = ep;
++ } else {
++ printk( KERN_ERR "gs_set_config: cannot enable out endpoint %s, ret=%d\n",
++ ep->name, ret );
++ gs_reset_config( dev );
++ return( ret );
++ }
++ }
++
++ }
++
++ if( dev->dev_in_ep == NULL || dev->dev_out_ep == NULL ) {
++ gs_reset_config( dev );
++ printk( KERN_ERR "gs_set_config: cannot find endpoints\n" );
++ return( -ENODEV );
++ }
++
++ /* allocate and queue read requests */
++ ep = dev->dev_out_ep;
++ for( i=0; i<read_q_size && ret == 0; i++ ) {
++ if( (req=gs_alloc_req( ep, ep->maxpacket, GFP_ATOMIC )) ) {
++ req->complete = gs_read_complete;
++ if( (ret=usb_ep_queue( ep, req, GFP_ATOMIC )) ) {
++ printk( KERN_ERR "gs_set_config: cannot queue read request, ret=%d\n",
++ ret );
++ }
++ } else {
++ gs_reset_config( dev );
++ printk( KERN_ERR
++ "gs_set_config: cannot allocate read requests\n" );
++ return( -ENOMEM );
++ }
++ }
++
++ /* allocate write requests, and put on free list */
++ ep = dev->dev_in_ep;
++ for( i=0; i<write_q_size; i++ ) {
++ if( (req_entry=gs_alloc_req_entry( ep, ep->maxpacket,
++ GFP_ATOMIC )) ) {
++ req_entry->re_req->complete = gs_write_complete;
++ list_add( &req_entry->re_entry, &dev->dev_req_list );
++ } else {
++ gs_reset_config( dev );
++ printk( KERN_ERR
++ "gs_set_config: cannot allocate write requests\n" );
++ return( -ENOMEM );
++ }
++ }
++
++ dev->dev_config = config;
++
++ printk( KERN_INFO "gs_set_config: %s configured for %s speed\n",
++ GS_LONG_NAME,
++ gadget->speed == USB_SPEED_HIGH ? "high" : "full" );
++
++ return( 0 );
++
++}
++
++
++/*
++ * gs_reset_config
++ *
++ * Mark the device as not configured, disable all endpoints,
++ * which forces completion of pending I/O and frees queued
++ * requests, and free the remaining write requests on the
++ * free list.
++ *
++ * The device lock must be held when calling this function.
++ */
++
++static void gs_reset_config( struct gs_dev *dev )
++{
++
++ struct gs_req_entry *req_entry;
++
++
++ if( dev == NULL ) {
++ printk( KERN_ERR "gs_reset_config: NULL device pointer\n" );
++ return;
++ }
++
++ if( dev->dev_config == GS_NO_CONFIG_ID )
++ return;
++
++ dev->dev_config = GS_NO_CONFIG_ID;
++
++ /* free write requests on the free list */
++ while( !list_empty( &dev->dev_req_list ) ) {
++ req_entry = list_entry( dev->dev_req_list.next,
++ struct gs_req_entry, re_entry );
++ list_del( &req_entry->re_entry );
++ gs_free_req_entry( dev->dev_in_ep, req_entry );
++ }
++
++ /* disable endpoints, forcing completion of pending i/o; */
++ /* completion handlers free their requests in this case */
++ if( dev->dev_in_ep ) {
++ usb_ep_disable( dev->dev_in_ep );
++ dev->dev_in_ep = NULL;
++ }
++ if( dev->dev_out_ep ) {
++ usb_ep_disable( dev->dev_out_ep );
++ dev->dev_out_ep = NULL;
++ }
++
++}
++
++
++/*
++ * gs_build_config_desc
++ *
++ * Builds a config descriptor in the given buffer and returns the
++ * length, or a negative error number.
++ */
++
++static int gs_build_config_desc( u8 *buf, enum usb_device_speed speed,
++ u8 type, unsigned int index )
++{
++
++ int high_speed;
++ int len = USB_DT_CONFIG_SIZE + USB_DT_INTERFACE_SIZE
++ + GS_NUM_ENDPOINTS * USB_DT_ENDPOINT_SIZE;
++
++
++ /* only one config */
++ if( index != 0 )
++ return( -EINVAL );
++
++ memcpy( buf, &gs_config_desc, USB_DT_CONFIG_SIZE );
++ ((struct usb_config_descriptor *)buf)->bDescriptorType = type;
++ ((struct usb_config_descriptor *)buf)->wTotalLength =
++ __constant_cpu_to_le16( len );
++ buf += USB_DT_CONFIG_SIZE;
++
++ memcpy( buf, &gs_interface_desc, USB_DT_INTERFACE_SIZE );
++ buf += USB_DT_INTERFACE_SIZE;
++
++ /* other speed switches high and full speed */
++ high_speed = (speed == USB_SPEED_HIGH);
++ if( type == USB_DT_OTHER_SPEED_CONFIG )
++ high_speed = !high_speed;
++
++ memcpy( buf,
++ high_speed ? &gs_highspeed_in_desc : &gs_fullspeed_in_desc,
++ USB_DT_ENDPOINT_SIZE );
++ buf += USB_DT_ENDPOINT_SIZE;
++ memcpy( buf,
++ high_speed ? &gs_highspeed_out_desc : &gs_fullspeed_out_desc,
++ USB_DT_ENDPOINT_SIZE );
++
++ return( len );
++
++}
++
++
++/*
++ * gs_alloc_req
++ *
++ * Allocate a usb_request and its buffer. Returns a pointer to the
++ * usb_request or NULL if there is an error.
++ */
++
++static struct usb_request *gs_alloc_req( struct usb_ep *ep, unsigned int len,
++ int kmalloc_flags )
++{
++
++ struct usb_request *req;
++
++
++ if( ep == NULL )
++ return( NULL );
++
++ req = usb_ep_alloc_request( ep, kmalloc_flags );
++
++ if( req != NULL ) {
++ req->length = len;
++ req->buf = usb_ep_alloc_buffer( ep, len, &req->dma,
++ kmalloc_flags );
++ if( req->buf == NULL ) {
++ usb_ep_free_request( ep, req );
++ return( NULL );
++ }
++ }
++
++ return( req );
++
++}
++
++
++/*
++ * gs_free_req
++ *
++ * Free a usb_request and its buffer.
++ */
++
++static void gs_free_req( struct usb_ep *ep, struct usb_request *req )
++{
++ if( ep != NULL && req != NULL ) {
++ if( req->buf != NULL )
++ usb_ep_free_buffer( ep, req->buf, req->dma,
++ req->length );
++ usb_ep_free_request( ep, req );
++ }
++}
++
++
++/*
++ * gs_alloc_req_entry
++ *
++ * Allocates a request and its buffer, using the given
++ * endpoint, buffer len, and kmalloc flags.
++ */
++
++static struct gs_req_entry *gs_alloc_req_entry( struct usb_ep *ep,
++ unsigned len, int kmalloc_flags )
++{
++
++ struct gs_req_entry *req;
++
++
++ req = kmalloc( sizeof(struct gs_req_entry), kmalloc_flags );
++ if( req == NULL )
++ return( NULL );
++
++ req->re_req = gs_alloc_req( ep, len, kmalloc_flags );
++ if( req->re_req == NULL ) {
++ kfree( req );
++ return( NULL );
++ }
++
++ req->re_req->context = req;
++
++ return( req );
++
++}
++
++
++/*
++ * gs_free_req_entry
++ *
++ * Frees a request and its buffer.
++ */
++
++static void gs_free_req_entry( struct usb_ep *ep, struct gs_req_entry *req )
++{
++ if( ep != NULL && req != NULL ) {
++ if( req->re_req != NULL )
++ gs_free_req( ep, req->re_req );
++ kfree( req );
++ }
++}
++
++
++/*
++ * gs_alloc_ports
++ *
++ * Allocate all ports and set the gs_dev struct to point to them.
++ * Return 0 if successful, or a negative error number.
++ *
++ * The device lock is normally held when calling this function.
++ */
++
++static int gs_alloc_ports( struct gs_dev *dev, int kmalloc_flags )
++{
++
++ int i;
++ struct gs_port *port;
++
++
++ if( dev == NULL )
++ return( -EIO );
++
++ for( i=0; i<GS_NUM_PORTS; i++ ) {
++
++ if( (port=(struct gs_port *)kmalloc( sizeof(struct gs_port),
++ kmalloc_flags )) == NULL )
++ return( -ENOMEM );
++
++ memset( port, 0, sizeof( struct gs_port ) );
++ port->port_dev = dev;
++ port->port_num = i;
++ spin_lock_init( &port->port_lock );
++ init_waitqueue_head( &port->port_write_wait );
++
++ dev->dev_port[i] = port;
++
++ }
++
++ return( 0 );
++
++}
++
++
++/*
++ * gs_free_ports
++ *
++ * Free all closed ports. Open ports are disconnected by
++ * freeing their write buffers, setting their device pointers
++ * and the pointers to them in the device to NULL. These
++ * ports will be freed when closed.
++ *
++ * The device lock is normally held when calling this function.
++ */
++
++static void gs_free_ports( struct gs_dev *dev )
++{
++
++ int i;
++ unsigned long flags;
++ struct gs_port *port;
++
++
++ if( dev == NULL )
++ return;
++
++ for( i=0; i<GS_NUM_PORTS; i++ ) {
++
++ if( (port=dev->dev_port[i]) != NULL ) {
++
++ dev->dev_port[i] = NULL;
++
++ spin_lock_irqsave(&port->port_lock, flags );
++
++ if( port->port_write_buf != NULL ) {
++ gs_buf_free( port->port_write_buf );
++ port->port_write_buf = NULL;
++ }
++
++ if( port->port_open_count > 0 || port->port_in_use ) {
++ port->port_dev = NULL;
++ wake_up_interruptible( &port->port_write_wait );
++ wake_up_interruptible( &port->port_tty->read_wait );
++ wake_up_interruptible( &port->port_tty->write_wait );
++ } else {
++ kfree( port );
++ }
++
++ spin_unlock_irqrestore(&port->port_lock, flags );
++
++ }
++
++ }
++
++}
++
++
++/* Circular Buffer */
++
++/*
++ * gs_buf_alloc
++ *
++ * Allocate a circular buffer and all associated memory.
++ */
++
++static struct gs_buf *gs_buf_alloc( unsigned int size, int kmalloc_flags )
++{
++
++ struct gs_buf *gb;
++
++
++ if( size == 0 )
++ return( NULL );
++
++ gb = (struct gs_buf *)kmalloc( sizeof(struct gs_buf), kmalloc_flags );
++ if( gb == NULL )
++ return( NULL );
++
++ gb->buf_buf = kmalloc( size, kmalloc_flags );
++ if( gb->buf_buf == NULL ) {
++ kfree( gb );
++ return( NULL );
++ }
++
++ gb->buf_size = size;
++ gb->buf_get = gb->buf_put = gb->buf_buf;
++
++ return( gb );
++
++}
++
++
++/*
++ * gs_buf_free
++ *
++ * Free the buffer and all associated memory.
++ */
++
++void gs_buf_free( struct gs_buf *gb )
++{
++ if( gb != NULL ) {
++ if( gb->buf_buf != NULL )
++ kfree( gb->buf_buf );
++ kfree( gb );
++ }
++}
++
++
++/*
++ * gs_buf_clear
++ *
++ * Clear out all data in the circular buffer.
++ */
++
++void gs_buf_clear( struct gs_buf *gb )
++{
++ if( gb != NULL )
++ gb->buf_get = gb->buf_put;
++ /* equivalent to a get of all data available */
++}
++
++
++/*
++ * gs_buf_data_avail
++ *
++ * Return the number of bytes of data available in the circular
++ * buffer.
++ */
++
++unsigned int gs_buf_data_avail( struct gs_buf *gb )
++{
++ if( gb != NULL )
++ return( (gb->buf_size + gb->buf_put - gb->buf_get)
++ % gb->buf_size );
++ else
++ return( 0 );
++}
++
++
++/*
++ * gs_buf_space_avail
++ *
++ * Return the number of bytes of space available in the circular
++ * buffer.
++ */
++
++unsigned int gs_buf_space_avail( struct gs_buf *gb )
++{
++ if( gb != NULL )
++ return( (gb->buf_size + gb->buf_get - gb->buf_put - 1)
++ % gb->buf_size );
++ else
++ return( 0 );
++}
++
++
++/*
++ * gs_buf_put
++ *
++ * Copy data data from a user buffer and put it into the circular buffer.
++ * Restrict to the amount of space available.
++ *
++ * Return the number of bytes copied.
++ */
++
++unsigned int gs_buf_put( struct gs_buf *gb, const char *buf,
++ unsigned int count )
++{
++
++ unsigned int len;
++
++
++ if( gb == NULL )
++ return( 0 );
++
++ len = gs_buf_space_avail( gb );
++ if( count > len )
++ count = len;
++
++ if( count == 0 )
++ return( 0 );
++
++ len = gb->buf_buf + gb->buf_size - gb->buf_put;
++ if( count > len ) {
++ memcpy( gb->buf_put, buf, len );
++ memcpy( gb->buf_buf, buf+len, count - len );
++ gb->buf_put = gb->buf_buf + count - len;
++ } else {
++ memcpy( gb->buf_put, buf, count );
++ if( count < len )
++ gb->buf_put += count;
++ else /* count == len */
++ gb->buf_put = gb->buf_buf;
++ }
++
++ return( count );
++
++}
++
++
++/*
++ * gs_buf_get
++ *
++ * Get data from the circular buffer and copy to the given buffer.
++ * Restrict to the amount of data available.
++ *
++ * Return the number of bytes copied.
++ */
++
++unsigned int gs_buf_get( struct gs_buf *gb, char *buf, unsigned int count )
++{
++
++ unsigned int len;
++
++
++ if( gb == NULL )
++ return( 0 );
++
++ len = gs_buf_data_avail( gb );
++ if( count > len )
++ count = len;
++
++ if( count == 0 )
++ return( 0 );
++
++ len = gb->buf_buf + gb->buf_size - gb->buf_get;
++ if( count > len ) {
++ memcpy( buf, gb->buf_get, len );
++ memcpy( buf+len, gb->buf_buf, count - len );
++ gb->buf_get = gb->buf_buf + count - len;
++ } else {
++ memcpy( buf, gb->buf_get, count );
++ if( count < len )
++ gb->buf_get += count;
++ else /* count == len */
++ gb->buf_get = gb->buf_buf;
++ }
++
++ return( count );
++
++}
+diff -x '*~' -x '.*' -r -N -u /tmp/kernel/drivers/usb/gadget/inode.c kernel/drivers/usb/gadget/inode.c
+--- /tmp/kernel/drivers/usb/gadget/inode.c 1970-01-01 01:00:00.000000000 +0100
++++ kernel/drivers/usb/gadget/inode.c 2005-04-22 17:53:19.456535934 +0200
+@@ -0,0 +1,1807 @@
++/*
++ * inode.c -- user mode filesystem api for usb gadget controllers
++ *
++ * Copyright (C) 2003 David Brownell
++ * Copyright (C) 2003 Agilent Technologies
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++
++#define DEBUG 1 /* data to help fault diagnosis */
++// #define VERBOSE /* extra debug messages (success too) */
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/pagemap.h>
++#include <linux/uts.h>
++#include <linux/version.h>
++#include <linux/wait.h>
++#include <linux/compiler.h>
++#include <asm/uaccess.h>
++#include <linux/slab.h>
++
++#ifndef BUG_ON
++#define BUG_ON(condition) do { if (unlikely((condition)!=0)) BUG(); } while(0)
++#endif
++
++#include <linux/usb_gadgetfs.h>
++#include <linux/usb_gadget.h>
++
++
++/*
++ * The gadgetfs API maps each endpoint to a file descriptor so that you
++ * can use standard synchronous read/write calls for I/O. There's some
++ * O_NONBLOCK and O_ASYNC/FASYNC style i/o support. Example usermode
++ * drivers show how this works in practice.
++ *
++ * Key parts that must be USB-specific are protocols defining how the
++ * read/write operations relate to the hardware state machines. There
++ * are two types of files. One type is for the device, implementing ep0.
++ * The other type is for each IN or OUT endpoint. In both cases, the
++ * user mode driver must configure the hardware before using it.
++ *
++ * - First, dev_config() is called when /dev/gadget/$CHIP is configured
++ * (by writing configuration and device descriptors). Afterwards it
++ * may serve as a source of device events, used to handle all control
++ * requests other than basic enumeration.
++ *
++ * - Then either immediately, or after a SET_CONFIGURATION control request,
++ * ep_config() is called when each /dev/gadget/ep* file is configured
++ * (by writing endpoint descriptors). Afterwards these files are used
++ * to write() IN data or to read() OUT data. To halt the endpoint, a
++ * "wrong direction" request is issued (like reading an IN endpoint).
++ *
++ * Unlike "usbfs" the only ioctl()s are for things that are rare, and maybe
++ * not possible on all hardware. For example, precise fault handling with
++ * respect to data left in endpoint fifos after aborted operations; or
++ * selective clearing of endpoint halts, to implement SET_INTERFACE.
++ */
++
++#define DRIVER_DESC "USB Gadget filesystem"
++#define DRIVER_VERSION "20 Aug 2003"
++
++static const char driver_desc [] = DRIVER_DESC;
++static const char shortname [] = "gadgetfs";
++
++MODULE_DESCRIPTION (DRIVER_DESC);
++MODULE_AUTHOR ("David Brownell");
++MODULE_LICENSE ("GPL");
++
++
++/*----------------------------------------------------------------------*/
++
++#define GADGETFS_MAGIC 0xaee71ee7
++#define DMA_ADDR_INVALID (~(dma_addr_t)0)
++
++/* /dev/gadget/$CHIP represents ep0 and the whole device */
++enum ep0_state {
++ /* DISBLED is the initial state.
++ */
++ STATE_DEV_DISABLED = 0,
++
++ /* Only one open() of /dev/gadget/$CHIP; only one file tracks
++ * ep0/device i/o modes and binding to the controller. Driver
++ * must always write descriptors to initialize the device, then
++ * the device becomes UNCONNECTED until enumeration.
++ */
++ STATE_OPENED,
++
++ /* From then on, ep0 fd is in either of two basic modes:
++ * - (UN)CONNECTED: read usb_gadgetfs_event(s) from it
++ * - SETUP: read/write will transfer control data and succeed;
++ * or if "wrong direction", performs protocol stall
++ */
++ STATE_UNCONNECTED,
++ STATE_CONNECTED,
++ STATE_SETUP,
++
++ /* UNBOUND means the driver closed ep0, so the device won't be
++ * accessible again (DEV_DISABLED) until all fds are closed.
++ */
++ STATE_DEV_UNBOUND,
++};
++
++/* enough for the whole queue: most events invalidate others */
++#define N_EVENT 5
++
++struct dev_data {
++ spinlock_t lock;
++ atomic_t count;
++ enum ep0_state state;
++ struct usb_gadgetfs_event event [N_EVENT];
++ unsigned ev_next;
++ struct fasync_struct *fasync;
++ u8 current_config;
++
++ /* drivers reading ep0 MUST handle control requests (SETUP)
++ * reported that way; else the host will time out.
++ */
++ unsigned usermode_setup : 1,
++ setup_in : 1,
++ setup_can_stall : 1,
++ setup_out_ready : 1,
++ setup_out_error : 1,
++ setup_abort : 1;
++
++ /* the rest is basically write-once */
++ struct usb_config_descriptor *config, *hs_config;
++ struct usb_device_descriptor *dev;
++ struct usb_request *req;
++ struct usb_gadget *gadget;
++ struct list_head epfiles;
++ void *buf;
++ wait_queue_head_t wait;
++ struct super_block *sb;
++ struct dentry *dentry;
++
++ /* except this scratch i/o buffer for ep0 */
++ u8 rbuf [256];
++};
++
++static inline void get_dev (struct dev_data *data)
++{
++ atomic_inc (&data->count);
++}
++
++static void put_dev (struct dev_data *data)
++{
++ if (likely (!atomic_dec_and_test (&data->count)))
++ return;
++ /* needs no more cleanup */
++ BUG_ON (waitqueue_active (&data->wait));
++ kfree (data);
++}
++
++static struct dev_data *dev_new (void)
++{
++ struct dev_data *dev;
++
++ dev = kmalloc (sizeof *dev, GFP_KERNEL);
++ if (!dev)
++ return 0;
++ memset (dev, 0, sizeof *dev);
++ dev->state = STATE_DEV_DISABLED;
++ atomic_set (&dev->count, 1);
++ spin_lock_init (&dev->lock);
++ INIT_LIST_HEAD (&dev->epfiles);
++ init_waitqueue_head (&dev->wait);
++ return dev;
++}
++
++/*----------------------------------------------------------------------*/
++
++/* other /dev/gadget/$ENDPOINT files represent endpoints */
++enum ep_state {
++ STATE_EP_DISABLED = 0,
++ STATE_EP_READY,
++ STATE_EP_DEFER_ENABLE,
++ STATE_EP_ENABLED,
++ STATE_EP_UNBOUND,
++};
++
++struct ep_data {
++ struct semaphore lock;
++ enum ep_state state;
++ atomic_t count;
++ struct dev_data *dev;
++ /* must hold dev->lock before accessing ep or req */
++ struct usb_ep *ep;
++ struct usb_request *req;
++ ssize_t status;
++ char name [16];
++ struct usb_endpoint_descriptor desc, hs_desc;
++ struct list_head epfiles;
++ wait_queue_head_t wait;
++ struct dentry *dentry;
++ struct inode *inode;
++};
++
++static inline void get_ep (struct ep_data *data)
++{
++ atomic_inc (&data->count);
++}
++
++static void put_ep (struct ep_data *data)
++{
++ if (likely (!atomic_dec_and_test (&data->count)))
++ return;
++ put_dev (data->dev);
++ /* needs no more cleanup */
++ BUG_ON (!list_empty (&data->epfiles));
++ BUG_ON (waitqueue_active (&data->wait));
++ BUG_ON (down_trylock (&data->lock) != 0);
++ kfree (data);
++}
++
++/*----------------------------------------------------------------------*/
++
++/* most "how to use the hardware" policy choices are in userspace:
++ * mapping endpoint roles the driver needs to the capabilities that
++ * the usb controller exposes.
++ */
++
++ // FIXME the 2.6 version just probes the controller
++ // driver to find out the chip name; we should too.
++
++#ifdef CONFIG_USB_GADGET_NET2280
++#define CHIP "net2280"
++#define HIGHSPEED
++#endif
++
++#ifdef CONFIG_USB_GADGET_PXA2XX
++#define CHIP "pxa2xx_udc"
++/* earlier hardware doesn't have UDCCFR, races set_{config,interface} */
++#warning works best with pxa255 or newer
++#endif
++
++#ifdef CONFIG_USB_GADGET_GOKU
++#define CHIP "goku_udc"
++#endif
++
++#ifdef CONFIG_USB_GADGET_SA1100
++#define CHIP "sa1100"
++#endif
++
++#ifdef CONFIG_USB_GADGET_SUPERH
++#define CHIP "superh_udc"
++#endif
++
++#ifdef CONFIG_USB_GADGET_N9604
++#define CHIP "n9604_udc"
++#endif
++
++
++/*----------------------------------------------------------------------*/
++
++/* NOTE: don't use dev_printk calls before binding to the gadget
++ * at the end of ep0 configuration, or after unbind.
++ */
++
++/* too wordy: dev_printk(level , &(d)->gadget->dev , fmt , ## args) */
++#define xprintk(d,level,fmt,args...) \
++ printk(level "%s: " fmt , shortname , ## args)
++
++#ifdef DEBUG
++#define DBG(dev,fmt,args...) \
++ xprintk(dev , KERN_DEBUG , fmt , ## args)
++#else
++#define DBG(dev,fmt,args...) \
++ do { } while (0)
++#endif /* DEBUG */
++
++#ifdef VERBOSE
++#define VDEBUG DBG
++#else
++#define VDEBUG(dev,fmt,args...) \
++ do { } while (0)
++#endif /* DEBUG */
++
++#define ERROR(dev,fmt,args...) \
++ xprintk(dev , KERN_ERR , fmt , ## args)
++#define WARN(dev,fmt,args...) \
++ xprintk(dev , KERN_WARNING , fmt , ## args)
++#define INFO(dev,fmt,args...) \
++ xprintk(dev , KERN_INFO , fmt , ## args)
++
++
++/*----------------------------------------------------------------------*/
++
++/* SYNCHRONOUS ENDPOINT OPERATIONS (bulk/intr/iso)
++ *
++ * After opening, configure non-control endpoints. Then use normal
++ * stream read() and write() requests; and maybe ioctl() to get more
++ * precise FIFO status when recovering from cancelation.
++ */
++
++static void epio_complete (struct usb_ep *ep, struct usb_request *req)
++{
++ struct ep_data *epdata = ep->driver_data;
++
++ if (!req->context)
++ return;
++ if (req->status)
++ epdata->status = req->status;
++ else
++ epdata->status = req->actual;
++ complete ((struct completion *)req->context);
++}
++
++/* tasklock endpoint, returning when it's connected.
++ * still need dev->lock to use epdata->ep.
++ */
++static int
++get_ready_ep (unsigned f_flags, struct ep_data *epdata)
++{
++ int val;
++
++ if (f_flags & O_NONBLOCK) {
++ if (down_trylock (&epdata->lock) != 0)
++ goto nonblock;
++ if (epdata->state != STATE_EP_ENABLED) {
++ up (&epdata->lock);
++nonblock:
++ val = -EAGAIN;
++ } else
++ val = 0;
++ return val;
++ }
++
++ if ((val = down_interruptible (&epdata->lock)) < 0)
++ return val;
++newstate:
++ switch (epdata->state) {
++ case STATE_EP_ENABLED:
++ break;
++ case STATE_EP_DEFER_ENABLE:
++ DBG (epdata->dev, "%s wait for host\n", epdata->name);
++ if ((val = wait_event_interruptible (epdata->wait,
++ epdata->state != STATE_EP_DEFER_ENABLE
++ || epdata->dev->state == STATE_DEV_UNBOUND
++ )) < 0)
++ goto fail;
++ goto newstate;
++ // case STATE_EP_DISABLED: /* "can't happen" */
++ // case STATE_EP_READY: /* "can't happen" */
++ default: /* error! */
++ pr_debug ("%s: ep %p not available, state %d\n",
++ shortname, epdata, epdata->state);
++ // FALLTHROUGH
++ case STATE_EP_UNBOUND: /* clean disconnect */
++ val = -ENODEV;
++fail:
++ up (&epdata->lock);
++ }
++ return val;
++}
++
++static ssize_t
++ep_io (struct ep_data *epdata, void *buf, unsigned len)
++{
++ DECLARE_COMPLETION (done);
++ int value;
++
++ spin_lock_irq (&epdata->dev->lock);
++ if (likely (epdata->ep != NULL)) {
++ struct usb_request *req = epdata->req;
++
++ req->context = &done;
++ req->complete = epio_complete;
++ req->buf = buf;
++ req->length = len;
++ value = usb_ep_queue (epdata->ep, req, GFP_ATOMIC);
++ } else
++ value = -ENODEV;
++ spin_unlock_irq (&epdata->dev->lock);
++
++ if (likely (value == 0)) {
++ value = wait_event_interruptible (done.wait, done.done);
++ if (value != 0) {
++ spin_lock_irq (&epdata->dev->lock);
++ if (likely (epdata->ep != NULL)) {
++ DBG (epdata->dev, "%s i/o interrupted\n",
++ epdata->name);
++ usb_ep_dequeue (epdata->ep, epdata->req);
++ spin_unlock_irq (&epdata->dev->lock);
++
++ wait_event (done.wait, done.done);
++ if (epdata->status == -ECONNRESET)
++ epdata->status = -EINTR;
++ } else {
++ spin_unlock_irq (&epdata->dev->lock);
++
++ DBG (epdata->dev, "endpoint gone\n");
++ epdata->status = -ENODEV;
++ }
++ }
++ return epdata->status;
++ }
++ return value;
++}
++
++
++/* handle a synchronous OUT bulk/intr/iso transfer */
++static ssize_t
++ep_read (struct file *fd, char *buf, size_t len, loff_t *ptr)
++{
++ struct ep_data *data = fd->private_data;
++ void *kbuf;
++ ssize_t value;
++
++ if ((value = get_ready_ep (fd->f_flags, data)) < 0)
++ return value;
++
++ /* halt any endpoint by doing a "wrong direction" i/o call */
++ if (data->desc.bEndpointAddress & USB_DIR_IN) {
++ if ((data->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
++ == USB_ENDPOINT_XFER_ISOC)
++ return -EINVAL;
++ DBG (data->dev, "%s halt\n", data->name);
++ spin_lock_irq (&data->dev->lock);
++ if (likely (data->ep != NULL))
++ usb_ep_set_halt (data->ep);
++ spin_unlock_irq (&data->dev->lock);
++ up (&data->lock);
++ return -EBADMSG;
++ }
++
++ /* FIXME readahead for O_NONBLOCK and poll(); careful with ZLPs */
++
++ value = -ENOMEM;
++ kbuf = kmalloc (len, SLAB_KERNEL);
++ if (unlikely (!kbuf))
++ goto free1;
++
++ value = ep_io (data, kbuf, len);
++ VDEBUG (data->dev, "%s read %d OUT, status %d\n",
++ data->name, len, value);
++ if (value >= 0 && copy_to_user (buf, kbuf, value))
++ value = -EFAULT;
++
++free1:
++ up (&data->lock);
++ kfree (kbuf);
++ return value;
++}
++
++/* handle a synchronous IN bulk/intr/iso transfer */
++static ssize_t
++ep_write (struct file *fd, const char *buf, size_t len, loff_t *ptr)
++{
++ struct ep_data *data = fd->private_data;
++ void *kbuf;
++ ssize_t value;
++
++ if ((value = get_ready_ep (fd->f_flags, data)) < 0)
++ return value;
++
++ /* halt any endpoint by doing a "wrong direction" i/o call */
++ if (!(data->desc.bEndpointAddress & USB_DIR_IN)) {
++ if ((data->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
++ == USB_ENDPOINT_XFER_ISOC)
++ return -EINVAL;
++ DBG (data->dev, "%s halt\n", data->name);
++ spin_lock_irq (&data->dev->lock);
++ if (likely (data->ep != NULL))
++ usb_ep_set_halt (data->ep);
++ spin_unlock_irq (&data->dev->lock);
++ up (&data->lock);
++ return -EBADMSG;
++ }
++
++ /* FIXME writebehind for O_NONBLOCK and poll(), qlen = 1 */
++
++ value = -ENOMEM;
++ kbuf = kmalloc (len, SLAB_KERNEL);
++ if (!kbuf)
++ goto free1;
++ if (copy_from_user (kbuf, buf, len)) {
++ value = -EFAULT;
++ goto free1;
++ }
++
++ value = ep_io (data, kbuf, len);
++ VDEBUG (data->dev, "%s write %d IN, status %d\n",
++ data->name, len, value);
++free1:
++ up (&data->lock);
++ kfree (kbuf);
++ return value;
++}
++
++static int
++ep_release (struct inode *inode, struct file *fd)
++{
++ struct ep_data *data = fd->private_data;
++
++ /* clean up if this can be reopened */
++ if (data->state != STATE_EP_UNBOUND) {
++ data->state = STATE_EP_DISABLED;
++ data->desc.bDescriptorType = 0;
++ data->hs_desc.bDescriptorType = 0;
++ }
++ put_ep (data);
++ return 0;
++}
++
++static int ep_ioctl (struct inode *inode, struct file *fd,
++ unsigned code, unsigned long value)
++{
++ struct ep_data *data = fd->private_data;
++ int status;
++
++ if ((status = get_ready_ep (fd->f_flags, data)) < 0)
++ return status;
++
++ spin_lock_irq (&data->dev->lock);
++ if (likely (data->ep != NULL)) {
++ switch (code) {
++ case GADGETFS_FIFO_STATUS:
++ status = usb_ep_fifo_status (data->ep);
++ break;
++ case GADGETFS_FIFO_FLUSH:
++ usb_ep_fifo_flush (data->ep);
++ break;
++ case GADGETFS_CLEAR_HALT:
++ status = usb_ep_clear_halt (data->ep);
++ break;
++ default:
++ status = -ENOTTY;
++ }
++ } else
++ status = -ENODEV;
++ spin_unlock_irq (&data->dev->lock);
++ up (&data->lock);
++ return status;
++}
++
++/* used after endpoint configuration */
++static struct file_operations ep_io_operations = {
++ .owner = THIS_MODULE,
++ .read = ep_read,
++ .write = ep_write,
++ .ioctl = ep_ioctl,
++ .release = ep_release,
++
++ // .aio_read = ep_aio_read,
++ // .aio_write = ep_aio_write,
++};
++
++/* ENDPOINT INITIALIZATION
++ *
++ * fd = open ("/dev/gadget/$ENDPOINT", O_RDWR)
++ * status = write (fd, descriptors, sizeof descriptors)
++ *
++ * That write establishes the endpoint configuration, configuring
++ * the controller to process bulk, interrupt, or isochronous transfers
++ * at the right maxpacket size, and so on.
++ *
++ * The descriptors are message type 1, identified by a host order u32
++ * at the beginning of what's written. Descriptor order is: full/low
++ * speed descriptor, then optional high speed descriptor.
++ */
++static ssize_t
++ep_config (struct file *fd, const char *buf, size_t len, loff_t *ptr)
++{
++ struct ep_data *data = fd->private_data;
++ struct usb_ep *ep;
++ u32 tag;
++ int value;
++
++ if ((value = down_interruptible (&data->lock)) < 0)
++ return value;
++
++ if (data->state != STATE_EP_READY) {
++ value = -EL2HLT;
++ goto fail;
++ }
++
++ value = len;
++ if (len < USB_DT_ENDPOINT_SIZE + 4)
++ goto fail0;
++
++ /* we might need to change message format someday */
++ if (copy_from_user (&tag, buf, 4)) {
++ goto fail1;
++ }
++ if (tag != 1) {
++ DBG(data->dev, "config %s, bad tag %d\n", data->name, tag);
++ goto fail0;
++ }
++ buf += 4;
++ len -= 4;
++
++ /* NOTE: audio endpoint extensions not accepted here;
++ * just don't include the extra bytes.
++ */
++
++ /* full/low speed descriptor, then high speed */
++ if (copy_from_user (&data->desc, buf, USB_DT_ENDPOINT_SIZE)) {
++ goto fail1;
++ }
++ if (data->desc.bLength != USB_DT_ENDPOINT_SIZE
++ || data->desc.bDescriptorType != USB_DT_ENDPOINT)
++ goto fail0;
++ if (len != USB_DT_ENDPOINT_SIZE) {
++ if (len != 2 * USB_DT_ENDPOINT_SIZE)
++ goto fail0;
++ if (copy_from_user (&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE,
++ USB_DT_ENDPOINT_SIZE)) {
++ goto fail1;
++ }
++ if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE
++ || data->hs_desc.bDescriptorType
++ != USB_DT_ENDPOINT) {
++ DBG(data->dev, "config %s, bad hs length or type\n",
++ data->name);
++ goto fail0;
++ }
++ }
++ value = len;
++
++ spin_lock_irq (&data->dev->lock);
++ if (data->dev->state == STATE_DEV_UNBOUND) {
++ value = -ENOENT;
++ goto gone;
++ } else if ((ep = data->ep) == NULL) {
++ value = -ENODEV;
++ goto gone;
++ }
++ switch (data->dev->gadget->speed) {
++ case USB_SPEED_LOW:
++ case USB_SPEED_FULL:
++ value = usb_ep_enable (ep, &data->desc);
++ if (value == 0)
++ data->state = STATE_EP_ENABLED;
++ break;
++#ifdef HIGHSPEED
++ case USB_SPEED_HIGH:
++ /* fails if caller didn't provide that descriptor... */
++ value = usb_ep_enable (ep, &data->hs_desc);
++ if (value == 0)
++ data->state = STATE_EP_ENABLED;
++ break;
++#endif
++ default:
++ DBG (data->dev, "unconnected, %s init deferred\n",
++ data->name);
++ data->state = STATE_EP_DEFER_ENABLE;
++ }
++ if (value == 0)
++ fd->f_op = &ep_io_operations;
++gone:
++ spin_unlock_irq (&data->dev->lock);
++ if (value < 0) {
++fail:
++ data->desc.bDescriptorType = 0;
++ data->hs_desc.bDescriptorType = 0;
++ }
++ up (&data->lock);
++ return value;
++fail0:
++ value = -EINVAL;
++ goto fail;
++fail1:
++ value = -EFAULT;
++ goto fail;
++}
++
++static int
++ep_open (struct inode *inode, struct file *fd)
++{
++ struct ep_data *data = inode->u.generic_ip;
++ int value = -EBUSY;
++
++ if (down_interruptible (&data->lock) != 0)
++ return -EINTR;
++ spin_lock_irq (&data->dev->lock);
++ if (data->dev->state == STATE_DEV_UNBOUND)
++ value = -ENOENT;
++ else if (data->state == STATE_EP_DISABLED) {
++ value = 0;
++ data->state = STATE_EP_READY;
++ get_ep (data);
++ fd->private_data = data;
++ VDEBUG (data->dev, "%s ready\n", data->name);
++ } else
++ DBG (data->dev, "%s state %d\n",
++ data->name, data->state);
++ spin_unlock_irq (&data->dev->lock);
++ up (&data->lock);
++ return value;
++}
++
++/* used before endpoint configuration */
++static struct file_operations ep_config_operations = {
++ .owner = THIS_MODULE,
++ .open = ep_open,
++ .write = ep_config,
++ .release = ep_release,
++};
++
++/*----------------------------------------------------------------------*/
++
++/* EP0 IMPLEMENTATION can be partly in userspace.
++ *
++ * Drivers that use this facility receive various events, including
++ * control requests the kernel doesn't handle. Drivers that don't
++ * use this facility may be too simple-minded for real applications.
++ */
++
++static inline void ep0_readable (struct dev_data *dev)
++{
++ wake_up (&dev->wait);
++ kill_fasync (&dev->fasync, SIGIO, POLL_IN);
++}
++
++static void clean_req (struct usb_ep *ep, struct usb_request *req)
++{
++ struct dev_data *dev = ep->driver_data;
++
++ if (req->buf != dev->rbuf) {
++ usb_ep_free_buffer (ep, req->buf, req->dma, req->length);
++ req->buf = dev->rbuf;
++ req->dma = DMA_ADDR_INVALID;
++ }
++ req->complete = epio_complete;
++ dev->setup_out_ready = 0;
++}
++
++static void ep0_complete (struct usb_ep *ep, struct usb_request *req)
++{
++ struct dev_data *dev = ep->driver_data;
++ int free = 1;
++
++ /* for control OUT, data must still get to userspace */
++ if (!dev->setup_in) {
++ dev->setup_out_error = (req->status != 0);
++ if (!dev->setup_out_error)
++ free = 0;
++ dev->setup_out_ready = 1;
++ ep0_readable (dev);
++ } else if (dev->state == STATE_SETUP)
++ dev->state = STATE_CONNECTED;
++
++ /* clean up as appropriate */
++ if (free && req->buf != &dev->rbuf)
++ clean_req (ep, req);
++ req->complete = epio_complete;
++}
++
++static int setup_req (struct usb_ep *ep, struct usb_request *req, u16 len)
++{
++ struct dev_data *dev = ep->driver_data;
++
++ if (dev->setup_out_ready) {
++ DBG (dev, "ep0 request busy!\n");
++ return -EBUSY;
++ }
++ if (len > sizeof (dev->rbuf))
++ req->buf = usb_ep_alloc_buffer (ep, len, &req->dma, GFP_ATOMIC);
++ if (req->buf == 0) {
++ req->buf = dev->rbuf;
++ return -ENOMEM;
++ }
++ req->complete = ep0_complete;
++ req->length = len;
++ return 0;
++}
++
++static ssize_t
++ep0_read (struct file *fd, char *buf, size_t len, loff_t *ptr)
++{
++ struct dev_data *dev = fd->private_data;
++ ssize_t retval;
++ enum ep0_state state;
++
++ spin_lock_irq (&dev->lock);
++
++ /* report fd mode change before acting on it */
++ if (dev->setup_abort) {
++ dev->setup_abort = 0;
++ retval = -EIDRM;
++ goto done;
++ }
++
++ /* control DATA stage */
++ if ((state = dev->state) == STATE_SETUP) {
++
++ if (dev->setup_in) { /* stall IN */
++ VDEBUG(dev, "ep0in stall\n");
++ (void) usb_ep_set_halt (dev->gadget->ep0);
++ retval = -EL2HLT;
++ dev->state = STATE_CONNECTED;
++
++ } else if (len == 0) { /* ack SET_CONFIGURATION etc */
++ struct usb_ep *ep = dev->gadget->ep0;
++ struct usb_request *req = dev->req;
++
++ if ((retval = setup_req (ep, req, 0)) == 0)
++ retval = usb_ep_queue (ep, req, GFP_ATOMIC);
++ dev->state = STATE_CONNECTED;
++
++ } else { /* collect OUT data */
++ if ((fd->f_flags & O_NONBLOCK) != 0
++ && !dev->setup_out_ready) {
++ retval = -EAGAIN;
++ goto done;
++ }
++ spin_unlock_irq (&dev->lock);
++ retval = wait_event_interruptible (dev->wait,
++ dev->setup_out_ready != 0);
++
++ /* FIXME state could change from under us */
++ spin_lock_irq (&dev->lock);
++ if (retval)
++ goto done;
++ if (dev->setup_out_error)
++ retval = -EIO;
++ else {
++ len = min (len, dev->req->actual);
++// FIXME don't call this with the spinlock held ...
++ if (copy_to_user (buf, &dev->req->buf, len))
++ retval = -EFAULT;
++ clean_req (dev->gadget->ep0, dev->req);
++ /* NOTE userspace can't yet choose to stall */
++ }
++ }
++ goto done;
++ }
++
++ /* else normal: return event data */
++ if (len < sizeof dev->event [0]) {
++ retval = -EINVAL;
++ goto done;
++ }
++ len -= len % sizeof (struct usb_gadgetfs_event);
++ dev->usermode_setup = 1;
++
++scan:
++ /* return queued events right away */
++ if (dev->ev_next != 0) {
++ unsigned i, n;
++ int tmp = dev->ev_next;
++
++ len = min (len, tmp * sizeof (struct usb_gadgetfs_event));
++ n = len / sizeof (struct usb_gadgetfs_event);
++
++ /* ep0 can't deliver events when STATE_SETUP */
++ for (i = 0; i < n; i++) {
++ if (dev->event [i].type == GADGETFS_SETUP) {
++ len = n = i + 1;
++ len *= sizeof (struct usb_gadgetfs_event);
++ n = 0;
++ break;
++ }
++ }
++ spin_unlock_irq (&dev->lock);
++ if (copy_to_user (buf, &dev->event, len))
++ retval = -EFAULT;
++ else
++ retval = len;
++ if (len > 0) {
++ len /= sizeof (struct usb_gadgetfs_event);
++
++ /* NOTE this doesn't guard against broken drivers;
++ * concurrent ep0 readers may lose events.
++ */
++ spin_lock_irq (&dev->lock);
++ dev->ev_next -= len;
++ if (dev->ev_next != 0)
++ memmove (&dev->event, &dev->event [len],
++ sizeof (struct usb_gadgetfs_event)
++ * (tmp - len));
++ if (n == 0)
++ dev->state = STATE_SETUP;
++ spin_unlock_irq (&dev->lock);
++ }
++ return retval;
++ }
++ if (fd->f_flags & O_NONBLOCK) {
++ retval = -EAGAIN;
++ goto done;
++ }
++
++ switch (state) {
++ default:
++ DBG (dev, "fail %s, state %d\n", __FUNCTION__, state);
++ retval = -ESRCH;
++ break;
++ case STATE_UNCONNECTED:
++ case STATE_CONNECTED:
++ spin_unlock_irq (&dev->lock);
++ DBG (dev, "%s wait\n", __FUNCTION__);
++
++ /* wait for events */
++ retval = wait_event_interruptible (dev->wait,
++ dev->ev_next != 0);
++ if (retval < 0)
++ return retval;
++ spin_lock_irq (&dev->lock);
++ goto scan;
++ }
++
++done:
++ spin_unlock_irq (&dev->lock);
++ return retval;
++}
++
++static struct usb_gadgetfs_event *
++next_event (struct dev_data *dev, enum usb_gadgetfs_event_type type)
++{
++ struct usb_gadgetfs_event *event;
++ unsigned i;
++
++ switch (type) {
++ /* these events purge the queue */
++ case GADGETFS_DISCONNECT:
++ if (dev->state == STATE_SETUP)
++ dev->setup_abort = 1;
++ // FALL THROUGH
++ case GADGETFS_CONNECT:
++ dev->ev_next = 0;
++ break;
++ case GADGETFS_SETUP: /* previous request timed out */
++ case GADGETFS_SUSPEND: /* same effect */
++ /* these events can't be repeated */
++ for (i = 0; i != dev->ev_next; i++) {
++ if (dev->event [i].type != type)
++ continue;
++ DBG (dev, "discard old event %d\n", type);
++ dev->ev_next--;
++ if (i == dev->ev_next)
++ break;
++ /* indices start at zero, for simplicity */
++ memmove (&dev->event [i], &dev->event [i + 1],
++ sizeof (struct usb_gadgetfs_event)
++ * (dev->ev_next - i));
++ }
++ break;
++ default:
++ BUG ();
++ }
++ event = &dev->event [dev->ev_next++];
++ BUG_ON (dev->ev_next > N_EVENT);
++ VDEBUG (dev, "ev %d, next %d\n", type, dev->ev_next);
++ memset (event, 0, sizeof *event);
++ event->type = type;
++ return event;
++}
++
++static ssize_t
++ep0_write (struct file *fd, const char *buf, size_t len, loff_t *ptr)
++{
++ struct dev_data *dev = fd->private_data;
++ ssize_t retval = -ESRCH;
++
++ spin_lock_irq (&dev->lock);
++
++ /* report fd mode change before acting on it */
++ if (dev->setup_abort) {
++ dev->setup_abort = 0;
++ retval = -EIDRM;
++
++ /* data and/or status stage for control request */
++ } else if (dev->state == STATE_SETUP) {
++
++ /* IN DATA+STATUS caller makes len <= wLength */
++ if (dev->setup_in) {
++ retval = setup_req (dev->gadget->ep0, dev->req, len);
++ if (retval == 0) {
++ spin_unlock_irq (&dev->lock);
++ if (copy_from_user (dev->req->buf, buf, len))
++ retval = -EFAULT;
++ else
++ retval = usb_ep_queue (
++ dev->gadget->ep0, dev->req,
++ GFP_KERNEL);
++ if (retval < 0) {
++ spin_lock_irq (&dev->lock);
++ clean_req (dev->gadget->ep0, dev->req);
++ spin_unlock_irq (&dev->lock);
++ } else
++ retval = len;
++
++ return retval;
++ }
++
++ /* can stall some OUT transfers */
++ } else if (dev->setup_can_stall) {
++ VDEBUG(dev, "ep0out stall\n");
++ (void) usb_ep_set_halt (dev->gadget->ep0);
++ retval = -EL2HLT;
++ dev->state = STATE_CONNECTED;
++ } else {
++ DBG(dev, "bogus ep0out stall!\n");
++ }
++ } else
++ DBG (dev, "fail %s, state %d\n", __FUNCTION__, dev->state);
++
++ spin_unlock_irq (&dev->lock);
++ return retval;
++}
++
++static int
++ep0_fasync (int f, struct file *fd, int on)
++{
++ struct dev_data *dev = fd->private_data;
++ // caller must F_SETOWN before signal delivery happens
++ VDEBUG (dev, "%s %s\n", __FUNCTION__, on ? "on" : "off");
++ return fasync_helper (f, fd, on, &dev->fasync);
++}
++
++static struct usb_gadget_driver gadgetfs_driver;
++
++static int
++dev_release (struct inode *inode, struct file *fd)
++{
++ struct dev_data *dev = fd->private_data;
++
++ /* closing ep0 === shutdown all */
++
++ usb_gadget_unregister_driver (&gadgetfs_driver);
++
++ /* at this point "good" hardware has disconnected the
++ * device from USB; the host won't see it any more.
++ * alternatively, all host requests will time out.
++ */
++
++ fasync_helper (-1, fd, 0, &dev->fasync);
++ kfree (dev->buf);
++ dev->buf = 0;
++ put_dev (dev);
++
++ /* other endpoints were all decoupled from this device */
++ dev->state = STATE_DEV_DISABLED;
++ return 0;
++}
++
++static int dev_ioctl (struct inode *inode, struct file *fd,
++ unsigned code, unsigned long value)
++{
++ struct dev_data *dev = fd->private_data;
++ struct usb_gadget *gadget = dev->gadget;
++
++ if (gadget->ops->ioctl)
++ return gadget->ops->ioctl (gadget, code, value);
++ return -ENOTTY;
++}
++
++/* used after device configuration */
++static struct file_operations ep0_io_operations = {
++ .owner = THIS_MODULE,
++ .read = ep0_read,
++ .write = ep0_write,
++ .fasync = ep0_fasync,
++ // .poll = ep0_poll,
++ .ioctl = dev_ioctl,
++ .release = dev_release,
++};
++
++/*----------------------------------------------------------------------*/
++
++/* The in-kernel gadget driver handles most ep0 issues, in particular
++ * enumerating the single configuration (as provided from user space).
++ *
++ * Unrecognized ep0 requests may be handled in user space.
++ */
++
++#ifdef HIGHSPEED
++static void make_qualifier (struct dev_data *dev)
++{
++ struct usb_qualifier_descriptor qual;
++ struct usb_device_descriptor *desc;
++
++ qual.bLength = sizeof qual;
++ qual.bDescriptorType = USB_DT_DEVICE_QUALIFIER;
++ qual.bcdUSB = __constant_cpu_to_le16 (0x0200);
++
++ desc = dev->dev;
++ qual.bDeviceClass = desc->bDeviceClass;
++ qual.bDeviceSubClass = desc->bDeviceSubClass;
++ qual.bDeviceProtocol = desc->bDeviceProtocol;
++
++ /* assumes ep0 uses the same value for both speeds ... */
++ qual.bMaxPacketSize0 = desc->bMaxPacketSize0;
++
++ qual.bNumConfigurations = 1;
++ qual.bRESERVED = 0;
++
++ memcpy (dev->rbuf, &qual, sizeof qual);
++}
++#endif
++
++static int
++config_buf (struct dev_data *dev, u8 type, unsigned index)
++{
++ int len;
++#ifdef HIGHSPEED
++ int hs;
++#endif
++
++ /* only one configuration */
++ if (index > 0)
++ return -EINVAL;
++
++#ifdef HIGHSPEED
++ hs = (dev->gadget->speed == USB_SPEED_HIGH);
++ if (type == USB_DT_OTHER_SPEED_CONFIG)
++ hs = !hs;
++ if (hs) {
++ dev->req->buf = dev->hs_config;
++ len = le16_to_cpup (&dev->hs_config->wTotalLength);
++ } else
++#endif
++ {
++ dev->req->buf = dev->config;
++ len = le16_to_cpup (&dev->config->wTotalLength);
++ }
++ ((u8 *)dev->req->buf) [1] = type;
++ return len;
++}
++
++static int
++gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
++{
++ struct dev_data *dev = get_gadget_data (gadget);
++ struct usb_request *req = dev->req;
++ int value = -EOPNOTSUPP;
++ struct usb_gadgetfs_event *event;
++
++ spin_lock (&dev->lock);
++ dev->setup_abort = 0;
++ if (dev->state == STATE_UNCONNECTED) {
++ struct usb_ep *ep;
++ struct ep_data *data;
++
++ dev->state = STATE_CONNECTED;
++ dev->dev->bMaxPacketSize0 = gadget->ep0->maxpacket;
++
++#ifdef HIGHSPEED
++ if (gadget->speed == USB_SPEED_HIGH && dev->hs_config == 0) {
++ ERROR (dev, "no high speed config??\n");
++ return -EINVAL;
++ }
++#endif /* HIGHSPEED */
++
++ INFO (dev, "connected\n");
++ event = next_event (dev, GADGETFS_CONNECT);
++ event->u.speed = gadget->speed;
++ ep0_readable (dev);
++
++ list_for_each_entry (ep, &gadget->ep_list, ep_list) {
++ data = ep->driver_data;
++ /* ... down_trylock (&data->lock) ... */
++ if (data->state != STATE_EP_DEFER_ENABLE)
++ continue;
++#ifdef HIGHSPEED
++ if (gadget->speed == USB_SPEED_HIGH)
++ value = usb_ep_enable (ep, &data->hs_desc);
++ else
++#endif /* HIGHSPEED */
++ value = usb_ep_enable (ep, &data->desc);
++ if (value) {
++ ERROR (dev, "deferred %s enable --> %d\n",
++ data->name, value);
++ continue;
++ }
++ data->state = STATE_EP_ENABLED;
++ wake_up (&data->wait);
++ DBG (dev, "woke up %s waiters\n", data->name);
++ }
++
++ /* host may have given up waiting for response. we can miss control
++ * requests handled lower down (device/endpoint status and features);
++ * then ep0_{read,write} will report the wrong status. controller
++ * driver will have aborted pending i/o.
++ */
++ } else if (dev->state == STATE_SETUP)
++ dev->setup_abort = 1;
++
++ req->buf = dev->rbuf;
++ req->dma = DMA_ADDR_INVALID;
++ req->context = 0;
++ value = -EOPNOTSUPP;
++ switch (ctrl->bRequest) {
++
++ case USB_REQ_GET_DESCRIPTOR:
++ if (ctrl->bRequestType != USB_DIR_IN)
++ goto unrecognized;
++ switch (ctrl->wValue >> 8) {
++
++ case USB_DT_DEVICE:
++ value = min (ctrl->wLength, (u16) sizeof *dev->dev);
++ req->buf = dev->dev;
++ break;
++#ifdef HIGHSPEED
++ case USB_DT_DEVICE_QUALIFIER:
++ if (!dev->hs_config)
++ break;
++ value = min (ctrl->wLength, (u16)
++ sizeof (struct usb_qualifier_descriptor));
++ make_qualifier (dev);
++ break;
++ case USB_DT_OTHER_SPEED_CONFIG:
++ // FALLTHROUGH
++#endif
++ case USB_DT_CONFIG:
++ value = config_buf (dev,
++ ctrl->wValue >> 8,
++ ctrl->wValue & 0xff);
++ if (value >= 0)
++ value = min (ctrl->wLength, (u16) value);
++ break;
++ case USB_DT_STRING:
++ goto unrecognized;
++
++ default: // all others are errors
++ break;
++ }
++ break;
++
++ /* currently one config, two speeds */
++ case USB_REQ_SET_CONFIGURATION:
++ if (ctrl->bRequestType != 0)
++ break;
++ if (0 == (u8) ctrl->wValue) {
++ value = 0;
++ dev->current_config = 0;
++ // user mode expected to disable endpoints
++ } else {
++ u8 config;
++#ifdef HIGHSPEED
++ if (gadget->speed == USB_SPEED_HIGH)
++ config = dev->hs_config->bConfigurationValue;
++ else
++#endif
++ config = dev->config->bConfigurationValue;
++
++ if (config == (u8) ctrl->wValue) {
++ value = 0;
++ dev->current_config = config;
++ }
++ }
++
++ /* report SET_CONFIGURATION like any other control request,
++ * except that usermode may not stall this. the next
++ * request mustn't be allowed start until this finishes:
++ * endpoints and threads set up, etc.
++ *
++ * NOTE: older PXA hardware (before PXA 255: without UDCCFR)
++ * has bad/racey automagic that prevents synchronizing here.
++ * even kernel mode drivers often miss them.
++ */
++ if (value == 0) {
++ INFO (dev, "configuration #%d\n", dev->current_config);
++ if (dev->usermode_setup) {
++ dev->setup_can_stall = 0;
++ goto delegate;
++ }
++ }
++ break;
++
++#ifndef CONFIG_USB_GADGETFS_PXA2XX
++ /* PXA automagically handles this request too */
++ case USB_REQ_GET_CONFIGURATION:
++ if (ctrl->bRequestType != 0x80)
++ break;
++ *(u8 *)req->buf = dev->current_config;
++ value = min (ctrl->wLength, (u16) 1);
++ break;
++#endif
++
++ default:
++unrecognized:
++ VDEBUG (dev, "%s req%02x.%02x v%04x i%04x l%d\n",
++ dev->usermode_setup ? "delegate" : "fail",
++ ctrl->bRequestType, ctrl->bRequest,
++ ctrl->wValue, ctrl->wIndex, ctrl->wLength);
++
++ /* if there's an ep0 reader, don't stall */
++ if (dev->usermode_setup) {
++ dev->setup_can_stall = 1;
++delegate:
++ dev->setup_in = (ctrl->bRequestType & USB_DIR_IN)
++ ? 1 : 0;
++ dev->setup_out_ready = 0;
++ dev->setup_out_error = 0;
++ value = 0;
++
++ /* read DATA stage for OUT right away */
++ if (unlikely (!dev->setup_in && ctrl->wLength)) {
++ value = setup_req (gadget->ep0, dev->req,
++ ctrl->wLength);
++ if (value < 0)
++ break;
++ value = usb_ep_queue (gadget->ep0, dev->req,
++ GFP_ATOMIC);
++ if (value < 0) {
++ clean_req (gadget->ep0, dev->req);
++ break;
++ }
++
++ /* we can't currently stall these */
++ dev->setup_can_stall = 0;
++ }
++
++ /* state changes when reader collects event */
++ event = next_event (dev, GADGETFS_SETUP);
++ event->u.setup = *ctrl;
++ ep0_readable (dev);
++ spin_unlock (&dev->lock);
++ return 0;
++ }
++ }
++
++ /* proceed with data transfer and status phases? */
++ if (value >= 0 && dev->state != STATE_SETUP) {
++ req->length = value;
++ value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC);
++ if (value < 0) {
++ DBG (dev, "ep_queue --> %d\n", value);
++ req->status = 0;
++ }
++ }
++
++ /* device stalls when value < 0 */
++ spin_unlock (&dev->lock);
++ return value;
++}
++
++static void destroy_ep_files (struct dev_data *dev)
++{
++ struct list_head *entry, *tmp;
++
++ DBG (dev, "%s %d\n", __FUNCTION__, dev->state);
++
++ /* dev->state must prevent interference */
++restart:
++ spin_lock_irq (&dev->lock);
++ list_for_each_safe (entry, tmp, &dev->epfiles) {
++ struct ep_data *ep;
++
++ /* break link to FS */
++ ep = list_entry (entry, struct ep_data, epfiles);
++ list_del_init (&ep->epfiles);
++
++ /* break link to controller */
++ if (ep->state == STATE_EP_ENABLED)
++ (void) usb_ep_disable (ep->ep);
++ ep->state = STATE_EP_UNBOUND;
++ usb_ep_free_request (ep->ep, ep->req);
++ ep->ep = 0;
++ wake_up (&ep->wait);
++ put_ep (ep);
++
++ spin_unlock_irq (&dev->lock);
++
++ /* fds may still be open */
++ goto restart;
++ }
++ spin_unlock_irq (&dev->lock);
++}
++
++
++static int activate_ep_files (struct dev_data *dev)
++{
++ struct usb_ep *ep;
++
++ gadget_for_each_ep (ep, dev->gadget) {
++ struct ep_data *data;
++
++ data = kmalloc (sizeof *data, GFP_KERNEL);
++ if (!data)
++ goto enomem;
++ memset (data, 0, sizeof data);
++ data->state = STATE_EP_DISABLED;
++ init_MUTEX (&data->lock);
++ init_waitqueue_head (&data->wait);
++
++ strncpy (data->name, ep->name, sizeof (data->name) - 1);
++ atomic_set (&data->count, 1);
++ data->dev = dev;
++ get_dev (dev);
++
++ data->ep = ep;
++ ep->driver_data = data;
++
++ data->req = usb_ep_alloc_request (ep, GFP_KERNEL);
++ if (!data->req)
++ goto enomem;
++
++ list_add_tail (&data->epfiles, &dev->epfiles);
++ }
++ return 0;
++
++enomem:
++ DBG (dev, "%s enomem\n", __FUNCTION__);
++ destroy_ep_files (dev);
++ return -ENOMEM;
++}
++
++static void
++gadgetfs_unbind (struct usb_gadget *gadget)
++{
++ struct dev_data *dev = get_gadget_data (gadget);
++
++ DBG (dev, "%s\n", __FUNCTION__);
++
++ spin_lock_irq (&dev->lock);
++ dev->state = STATE_DEV_UNBOUND;
++ spin_unlock_irq (&dev->lock);
++
++ destroy_ep_files (dev);
++ gadget->ep0->driver_data = 0;
++ set_gadget_data (gadget, 0);
++
++ /* we've already been disconnected ... no i/o is active */
++ if (dev->req)
++ usb_ep_free_request (gadget->ep0, dev->req);
++ DBG (dev, "%s done\n", __FUNCTION__);
++ put_dev (dev);
++}
++
++static struct dev_data *the_device;
++
++static int
++gadgetfs_bind (struct usb_gadget *gadget)
++{
++ struct dev_data *dev = the_device;
++
++ if (!dev)
++ return -ESRCH;
++ if (0 != strcmp (CHIP, gadget->name)) {
++ printk (KERN_ERR "%s expected " CHIP " controller not %s\n",
++ shortname, gadget->name);
++ return -ENODEV;
++ }
++
++ set_gadget_data (gadget, dev);
++ dev->gadget = gadget;
++ gadget->ep0->driver_data = dev;
++ dev->dev->bMaxPacketSize0 = gadget->ep0->maxpacket;
++
++ /* preallocate control response and buffer */
++ dev->req = usb_ep_alloc_request (gadget->ep0, GFP_KERNEL);
++ if (!dev->req)
++ goto enomem;
++ dev->req->context = 0;
++ dev->req->complete = epio_complete;
++
++ if (activate_ep_files (dev) < 0)
++ goto enomem;
++
++ INFO (dev, "bound to %s driver\n", gadget->name);
++ dev->state = STATE_UNCONNECTED;
++ get_dev (dev);
++ return 0;
++
++enomem:
++ gadgetfs_unbind (gadget);
++ return -ENOMEM;
++}
++
++static void
++gadgetfs_disconnect (struct usb_gadget *gadget)
++{
++ struct dev_data *dev = get_gadget_data (gadget);
++
++ if (dev->state == STATE_UNCONNECTED) {
++ DBG (dev, "already unconnected\n");
++ return;
++ }
++ dev->state = STATE_UNCONNECTED;
++
++ INFO (dev, "disconnected\n");
++ spin_lock (&dev->lock);
++ next_event (dev, GADGETFS_DISCONNECT);
++ ep0_readable (dev);
++ spin_unlock (&dev->lock);
++}
++
++static void
++gadgetfs_suspend (struct usb_gadget *gadget)
++{
++ struct dev_data *dev = get_gadget_data (gadget);
++
++ INFO (dev, "suspended from state %d\n", dev->state);
++ spin_lock (&dev->lock);
++ switch (dev->state) {
++ case STATE_SETUP: // VERY odd... host died??
++ case STATE_CONNECTED:
++ case STATE_UNCONNECTED:
++ next_event (dev, GADGETFS_SUSPEND);
++ ep0_readable (dev);
++ /* FALLTHROUGH */
++ default:
++ break;
++ }
++ spin_unlock (&dev->lock);
++}
++
++static struct usb_gadget_driver gadgetfs_driver = {
++#ifdef HIGHSPEED
++ .speed = USB_SPEED_HIGH,
++#else
++ .speed = USB_SPEED_FULL,
++#endif
++ .function = (char *) driver_desc,
++ .bind = gadgetfs_bind,
++ .unbind = gadgetfs_unbind,
++ .setup = gadgetfs_setup,
++ .disconnect = gadgetfs_disconnect,
++ .suspend = gadgetfs_suspend,
++
++ .driver = {
++ .name = (char *) shortname,
++ // .shutdown = ...
++ // .suspend = ...
++ // .resume = ...
++ },
++};
++
++/*----------------------------------------------------------------------*/
++
++/* DEVICE INITIALIZATION
++ *
++ * fd = open ("/dev/gadget/$CHIP", O_RDWR)
++ * status = write (fd, descriptors, sizeof descriptors)
++ *
++ * That write establishes the device configuration, so the kernel can
++ * bind to the controller ... guaranteeing it can handle enumeration
++ * at all necessary speeds. Descriptor order is:
++ *
++ * . message tag (u32, host order) ... for now, must be zero; it
++ * would change to support features like multi-config devices
++ * . full/low speed config ... all wTotalLength bytes (with interface,
++ * class, altsetting, endpoint, and other descriptors)
++ * . high speed config ... all descriptors, for high speed operation;
++ * this one's optional except for high-speed hardware
++ * . device descriptor
++ *
++ * Endpoints are not yet enabled. Drivers may want to immediately
++ * initialize them, using the /dev/gadget/ep* files that are available
++ * as soon as the kernel sees the configuration, or they can wait
++ * until device configuration and interface altsetting changes create
++ * the need to configure (or unconfigure) them.
++ *
++ * After initialization, the device stays active for as long as that
++ * $CHIP file is open. Events may then be read from that descriptor,
++ * such configuration notifications. More complex drivers will handle
++ * some control requests in user space.
++ */
++
++static int is_valid_config (struct usb_config_descriptor *config)
++{
++ return config->bDescriptorType == USB_DT_CONFIG
++ && config->bLength == USB_DT_CONFIG_SIZE
++ && config->bConfigurationValue != 0
++ && (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0
++ && (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0;
++ /* FIXME check lengths: walk to end */
++}
++
++static ssize_t
++dev_config (struct file *fd, const char *buf, size_t len, loff_t *ptr)
++{
++ struct dev_data *dev = fd->private_data;
++ ssize_t value = len, length = len;
++ unsigned total;
++ u32 tag;
++ char *kbuf;
++
++ if (dev->state != STATE_OPENED)
++ return -EEXIST;
++
++ if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4))
++ return -EINVAL;
++
++ /* we might need to change message format someday */
++ if (copy_from_user (&tag, buf, 4))
++ return -EFAULT;
++ if (tag != 0)
++ return -EINVAL;
++ buf += 4;
++ length -= 4;
++
++ kbuf = kmalloc (length, SLAB_KERNEL);
++ if (!kbuf)
++ return -ENOMEM;
++ if (copy_from_user (kbuf, buf, length)) {
++ kfree (kbuf);
++ return -EFAULT;
++ }
++
++ spin_lock_irq (&dev->lock);
++ value = -EINVAL;
++ if (dev->buf)
++ goto fail;
++ dev->buf = kbuf;
++
++ /* full or low speed config */
++ dev->config = (void *) kbuf;
++ total = le16_to_cpup (&dev->config->wTotalLength);
++ if (!is_valid_config (dev->config) || total >= length)
++ goto fail;
++ kbuf += total;
++ length -= total;
++
++ /* optional high speed config */
++ if (kbuf [1] == USB_DT_CONFIG) {
++ dev->hs_config = (void *) kbuf;
++ total = le16_to_cpup (&dev->hs_config->wTotalLength);
++ if (!is_valid_config (dev->hs_config) || total >= length)
++ goto fail;
++ kbuf += total;
++ length -= total;
++ }
++
++ /* could support multiple configs, using another encoding! */
++
++ /* device descriptor (tweaked for paranoia) */
++ if (length != USB_DT_DEVICE_SIZE)
++ goto fail;
++ dev->dev = (void *)kbuf;
++ if (dev->dev->bLength != USB_DT_DEVICE_SIZE
++ || dev->dev->bDescriptorType != USB_DT_DEVICE
++ || dev->dev->bNumConfigurations != 1)
++ goto fail;
++ dev->dev->bNumConfigurations = 1;
++ dev->dev->bcdUSB = __constant_cpu_to_le16 (0x0200);
++
++ /* triggers gadgetfs_bind(); then we can enumerate. */
++ spin_unlock_irq (&dev->lock);
++ value = usb_gadget_register_driver (&gadgetfs_driver);
++ if (value != 0) {
++ kfree (dev->buf);
++ dev->buf = 0;
++ } else {
++ /* at this point "good" hardware has for the first time
++ * let the USB the host see us. alternatively, if users
++ * unplug/replug that will clear all the error state.
++ *
++ * note: everything running before here was guaranteed
++ * to choke driver model style diagnostics. from here
++ * on, they can work ... except in cleanup paths that
++ * kick in after the ep0 descriptor is closed.
++ */
++ fd->f_op = &ep0_io_operations;
++ value = len;
++ }
++ return value;
++
++fail:
++ spin_unlock_irq (&dev->lock);
++ pr_debug ("%s: %s fail %d, %p\n", shortname, __FUNCTION__, value, dev);
++ kfree (dev->buf);
++ dev->buf = 0;
++ return value;
++}
++
++static int
++dev_open (struct inode *inode, struct file *fd)
++{
++ struct dev_data *dev = inode->u.generic_ip;
++ int value = -EBUSY;
++
++ if (dev->state == STATE_DEV_DISABLED) {
++ dev->ev_next = 0;
++ dev->state = STATE_OPENED;
++ fd->private_data = dev;
++ get_dev (dev);
++ value = 0;
++ }
++ return value;
++}
++
++static struct file_operations dev_init_operations = {
++ .owner = THIS_MODULE,
++ .open = dev_open,
++ .write = dev_config,
++ .fasync = ep0_fasync,
++ .ioctl = dev_ioctl,
++ .release = dev_release,
++};
++
++/*----------------------------------------------------------------------*/
++
++/*
++ * implementation for 2.4 uses character special files
++ * ep0/device file MKDEV (c_major, 0)
++ * first data ep MKDEV (c_major, 1)
++ * second data ep MKDEV (c_major, 2)
++ * ...
++ *
++ * FIXME can do it as a real filesystem on 2.4 too, without libfs
++ */
++static int c_major = 240; /* 240 is local/experimental */
++MODULE_PARM (c_major, "i");
++MODULE_PARM_DESC (c_major, "major number for char special files");
++
++static int gadget_open (struct inode *ino, struct file *fp)
++{
++ int num = minor (ino->i_rdev);
++ struct dev_data *dev;
++ struct file_operations *ops;
++
++ /* ep0 file, "/dev/gadget/$CHIP" */
++ if (num == 0) {
++ int status;
++
++ if (the_device != 0)
++ return -EBUSY;
++ the_device = dev_new ();
++ if (the_device == 0)
++ return -ENOMEM;
++
++ dev = the_device;
++ ino->u.generic_ip = dev;
++ ops = &dev_init_operations;
++ fp->f_op = ops;
++
++ status = ops->open (ino, fp);
++ if (status < 0) {
++ put_dev (dev);
++ the_device = 0;
++ }
++ return status;
++
++ /* ep files, "/dev/gadget/$ENDPOINT" */
++ } else {
++ struct list_head *entry;
++ struct ep_data *data;
++
++ /* unavailable till device is initted */
++ dev = the_device;
++ if (dev == 0)
++ return -ENODEV;
++
++ /* order in controller's name listing matters! */
++ list_for_each (entry, &dev->epfiles) {
++ if (--num == 0)
++ goto found;
++ }
++ return -ENODEV;
++found:
++ data = list_entry (entry, struct ep_data, epfiles);
++ ino->u.generic_ip = data;
++ ops = &ep_config_operations;
++ fp->f_op = ops;
++
++ return ops->open (ino, fp);
++ }
++}
++
++static struct file_operations gadget_fops = {
++ .owner = THIS_MODULE,
++ .open = gadget_open,
++};
++
++/*----------------------------------------------------------------------*/
++
++static int __init init (void)
++{
++ int status;
++
++ status = register_chrdev (c_major, shortname, &gadget_fops);
++ if (status < 0) {
++ printk (KERN_WARNING "%s: can't get major %d\n",
++ shortname, c_major);
++ return status;
++ }
++
++ /* dynamic assignment */
++ if (c_major == 0)
++ c_major = status;
++ status = 0;
++
++ pr_info ("%s: using char major %d\n", shortname, c_major);
++
++ if (status == 0)
++ pr_info ("%s: %s, version " DRIVER_VERSION "\n",
++ shortname, driver_desc);
++ return status;
++}
++module_init (init);
++
++static void __exit cleanup (void)
++{
++ pr_debug ("unregister %s\n", shortname);
++ unregister_chrdev (c_major, shortname);
++}
++module_exit (cleanup);
++
+diff -x '*~' -x '.*' -r -N -u /tmp/kernel/drivers/usb/gadget/n9604.c kernel/drivers/usb/gadget/n9604.c
+--- /tmp/kernel/drivers/usb/gadget/n9604.c 1970-01-01 01:00:00.000000000 +0100
++++ kernel/drivers/usb/gadget/n9604.c 2005-04-22 17:53:19.461535120 +0200
+@@ -0,0 +1,1088 @@
++/*
++ * National 9603/4 USB Device Controller driver
++ * Copyright (C) 2004 Technical Solutions Inc. (support@techsol.ca)
++ * ported from : The Goku-S driver
++ * Copyright (C) 2003 MontaVista Software (source@mvista.com)
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2. This program is licensed "as is" without any
++ * warranty of any kind, whether express or implied.
++ */
++
++/*
++ * This device has ep0 and six semi-configurable bulk/interrupt endpoints.
++ *
++ * - Endpoint numbering is fixed:
++ * Endpoint 0: ep0
++ * Endpoint 1: ep1in (tx)
++ * Endpoint 2: ep2out (rx)
++ * Endpoint 3: ep3in (tx)
++ * Endpoint 4: ep4out (rx)
++ * Endpoint 5: ep5in (tx)
++ * Endpoint 6: ep6out (rx)
++ */
++
++/*
++ * The ep->stage information refers to the state of a setup transaction
++ *
++ * state 0: no setup packet has been received
++ * state 1: setup packet has been received
++ * state 2: data has been sent/received
++ * state 3: ZLP has been received/sent
++ */
++
++#include <linux/config.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/delay.h>
++#include <linux/ioport.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/smp_lock.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/timer.h>
++#include <linux/list.h>
++#include <linux/interrupt.h>
++#include <linux/usb_ch9.h>
++#include <linux/usb_gadget.h>
++
++#include <asm/byteorder.h>
++#include <asm/io.h>
++#include <asm/irq.h>
++#include <asm/system.h>
++#include <asm/unaligned.h>
++
++#include "n9604.h"
++#include "n9604regs.h"
++
++inline void Flush_and_enable(u8 control_reg) {
++ write_9604(RXC_FLUSH, control_reg);
++ while (read_9604(control_reg) & RXC_FLUSH);
++ write_9604(RXC_RX_EN, control_reg);
++}
++inline void Flush(u8 control_reg) {
++ write_9604(RXC_FLUSH, control_reg);
++ while (read_9604(control_reg) & RXC_FLUSH);
++}
++
++#define DRIVER_DESC "N9604 USB Device Controller"
++#define DRIVER_VERSION "29-Oct 2004"
++
++static const char driver_name [] = "n9604_udc";
++static const char driver_desc [] = DRIVER_DESC;
++
++MODULE_AUTHOR("support@techsol.ca");
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL");
++
++static void nuke(struct n9604_ep *ep, int status);
++inline void send_zero_length(int endpoint, struct n9604_udc *dev);
++
++u8 * USBN9604_Offset; //device virtual address
++
++/* FIXME all the IRQ stuff is board-specific
++ */
++
++#define h7201_readl readl
++#define h7201_writel writel
++
++#define ETHER_IRQ_IP_OFFSET 0
++#define ETHER_IRQ_BIT_POS 0
++#define ETHER_IRQ_IM_OFFSET 0
++
++#define IRQ_GPIOC -1
++
++#define USBD_ENABLE_IRQ {h7201_writel( h7201_readl(ETHER_IRQ_IP_OFFSET) | (1 << ETHER_IRQ_BIT_POS), ETHER_IRQ_IP_OFFSET); h7201_writel( h7201_readl(ETHER_IRQ_IM_OFFSET) | (1 << ETHER_IRQ_BIT_POS), ETHER_IRQ_IM_OFFSET);}
++#define USBD_DISABLE_IRQ h7201_writel( h7201_readl(ETHER_IRQ_IM_OFFSET) & ~(1 << ETHER_IRQ_BIT_POS), ETHER_IRQ_IM_OFFSET);
++
++
++/*-------------------------------------------------------------------------*/
++
++//enable an end point, of description desc
++static int n9604_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) {
++ struct n9604_udc *dev;
++ struct n9604_ep *ep;
++ u16 max;
++
++ ep = container_of(_ep, struct n9604_ep, ep);
++
++ if (!_ep || !desc || ep->desc || desc->bDescriptorType != USB_DT_ENDPOINT)
++ return -EINVAL;
++
++ dev = ep->dev;
++ if (!ep->num)
++ return -EINVAL;
++ if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
++ return -ESHUTDOWN;
++ if (ep->num && !(desc->bEndpointAddress & 0x0f))
++ return -EINVAL;
++
++ switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
++ case USB_ENDPOINT_XFER_BULK:
++ case USB_ENDPOINT_XFER_INT:
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ write_9604((ep->numActual & EPC_EP_MASK) | EPC_EP_EN | (EPC_ISO * ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_ISOC)), ep->control);
++ if (ep->is_in)
++ Flush(ep->command);
++ else
++ Flush_and_enable(ep->command);
++
++ max = le16_to_cpu(get_unaligned(&desc->wMaxPacketSize));
++ ep->ep.maxpacket = min_t(u16, max, MAX_FIFO_SIZE);
++ ep->desc = desc;
++
++ return 0;
++}
++
++static int n9604_ep_disable(struct usb_ep *_ep)//ep > 0
++{
++ struct n9604_ep *ep;
++ struct n9604_udc *dev;
++ unsigned long flags;
++
++ ep = container_of(_ep, struct n9604_ep, ep);
++
++ if (!_ep || !ep->desc)
++ return -ENODEV;
++ dev = ep->dev;
++
++ spin_lock_irqsave(&dev->lock, flags);
++ nuke(ep, -ESHUTDOWN);
++ write_9604(0, ep->command);
++ ep->desc = NULL;
++ spin_unlock_irqrestore(&dev->lock, flags);
++
++ return 0;
++}
++
++/*-------------------------------------------------------------------------*/
++
++static struct usb_request *
++n9604_alloc_request(struct usb_ep *_ep, int gfp_flags)
++{
++ struct n9604_request *req;
++
++ if (!_ep)
++ return 0;
++ req = kmalloc(sizeof *req, gfp_flags);
++ if (!req)
++ return 0;
++
++ memset(req, 0, sizeof *req);
++ INIT_LIST_HEAD(&req->queue);
++ return &req->req;
++}
++
++static void
++n9604_free_request(struct usb_ep *_ep, struct usb_request *_req)
++{
++ struct n9604_request *req;
++
++ if (!_ep || !_req)
++ return;
++
++ req = container_of(_req, struct n9604_request, req);
++ WARN_ON(!list_empty(&req->queue));
++ kfree(req);
++}
++
++/*-------------------------------------------------------------------------*/
++
++static void done(struct n9604_ep *ep, struct n9604_request *req, int status);
++
++static inline int
++write_packet(struct n9604_ep *ep, u8 *buf, struct n9604_request *req)
++{
++ unsigned written_length, desired_length, available_length, maximum_length, flags, loop_length;
++
++ u8 fifo = ep->fifo;
++ u8 command = ep->command;
++ u8 status = ep->status;
++ if (!ep->num) {
++ fifo = TXD0;
++ command = TXC0;
++ status = TXS0;
++ }
++
++ if (read_9604(command) & TXC_TX_EN)
++ return -EBUSY;
++ ep->packets++;
++
++ desired_length = req->req.length - req->req.actual;
++ available_length = read_9604(status) & TXS_TCOUNT_MASK;//might be greater
++ written_length = 0;
++ if (ep->num)
++ maximum_length = MAX_FIFO_SIZE;
++ else
++ maximum_length = MAX_EP0_SIZE;
++
++ while ((loop_length = min(min(available_length, desired_length), maximum_length))) {
++ int i = loop_length;
++ while (i) { write_9604(*buf++,fifo); i--; }
++ written_length += loop_length;
++ desired_length -= loop_length;
++ maximum_length -= loop_length;
++ if (desired_length && maximum_length)//check if really need to read the chip again
++ available_length = (read_9604(status) & TXS_TCOUNT_MASK);
++ }
++
++ req->req.actual += written_length;
++
++ flags = TXC_TX_EN;
++ if (ep->num)
++ flags |= TXC_LAST;
++ if (ep->toggle)
++ flags |= TXC_TOGGLE;
++ write_9604(flags, command);
++ ep->toggle = !(ep->toggle);
++ if (!written_length) req->req.zero = 0;//just wrote zero bytes, there is no more need for req.zero
++ return written_length;
++}
++
++// return: 0 = still running, 1 = completed, negative = errno
++static int write_fifo(struct n9604_ep *ep, struct n9604_request *req)
++{
++ struct n9604_udc *dev = ep->dev;
++ u8 *buf;
++ unsigned count;
++ int is_last;
++
++ buf = req->req.buf + req->req.actual;
++ prefetch(buf);
++
++ dev = ep->dev;
++
++ count = write_packet(ep, buf, req);
++ if (count < 0)
++ return count;
++
++ /* last packet often short (sometimes a zlp, especially on ep0) */
++ if ((req->req.length != req->req.actual) || req->req.zero)
++ is_last = 0;
++ else
++ is_last = 1;
++
++ /* requests complete when all IN data is in the FIFO,
++ * or sometimes later, if a zlp was needed.
++ */
++ if (is_last) {
++ done(ep, req, 0);
++ return 1;
++ }
++ return 0;
++}
++
++static inline void pio_irq_enable(struct n9604_ep *ep);
++
++static int read_fifo(struct n9604_ep *ep, struct n9604_request *req)
++{
++ u32 size;
++ u8 *buf;
++ int bufferspace_available, fifospace_left, num_bytes_read;
++ int fifo, status;
++ ep->packets++;
++ if (!ep->num) {
++ fifo = RXD0;
++ status = RXS0;
++ } else {
++ fifo = ep->fifo;
++ status = ep->status;
++ }
++ num_bytes_read = 0;
++ buf = req->req.buf + req->req.actual;
++ bufferspace_available = req->req.length - req->req.actual;
++ size = read_9604(status) & (RXS_RCOUNTMASK | RXS_RX_ERR);//number of bytes ready to be read (15 if greater than 15)
++ if (ep->num && (size & RXS_RX_ERR)) {
++ ERROR(ep->dev, "DATA ERROR!!!! on ep%d\nFlushing Fifo", ep->num);
++ Flush_and_enable(ep->command);
++ goto leave;
++ }
++ size = size & ~RXS_RX_ERR;//clear the bit
++ if (ep->num) fifospace_left = MAX_FIFO_SIZE;
++ else fifospace_left = MAX_EP0_SIZE;
++loop:
++ /* read all bytes from this packet */
++ while (size-- != 0) {
++ u8 byte = read_9604(fifo);
++ if (unlikely(bufferspace_available == 0)) {
++ /* this happens when the driver's buffer
++ * is smaller than what the host sent.
++ * discard the extra data in this packet.
++ */
++ done(ep, req, -EOVERFLOW);
++ return 1;
++ } else {
++ *buf++ = byte;
++ bufferspace_available--;
++ fifospace_left--;
++ num_bytes_read++;
++ }
++ }
++ if ((size = (read_9604(status) & RXS_RCOUNTMASK))) {
++ goto loop;//since there is more data
++ }
++ /* completion */
++ req->req.actual = req->req.actual + num_bytes_read;
++ if (fifospace_left || req->req.actual == req->req.length) {
++ done(ep, req, 0);
++ return 1;
++ }
++leave:
++ pio_irq_enable(ep);//turn the interrupt back on
++ return 0;
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++static inline void
++pio_irq_enable(struct n9604_ep *ep)
++{
++ if (ep->is_in)
++ write_9604(read_9604(TXMSK) | 1 << ep->fifoNum | 0x10 << ep->fifoNum, TXMSK);
++ else {
++ u8 command = ep->command;
++ if (!ep->num) command = RXC0;
++ write_9604(read_9604(RXMSK) | 1 << ep->fifoNum | 0x10 << ep->fifoNum, RXMSK);
++ write_9604(RXC_RX_EN | RXC_RFWL0 | RXC_RFWL1, command);
++ }
++}
++
++static inline void
++pio_irq_disable(struct n9604_ep *ep)//epnum != 0
++{
++ if (ep->is_in)
++ write_9604(read_9604(TXMSK) & ~(1 << ep->fifoNum) & ~(0x10 << ep->fifoNum), TXMSK);
++ else
++ write_9604(read_9604(RXMSK) & ~(1 << ep->fifoNum) & ~(0x10 << ep->fifoNum), RXMSK);
++}
++
++static int request_voodoo = 0;//number of bytes the host requested
++
++static inline void
++pio_advance(struct n9604_ep *ep)
++{
++ struct n9604_request *req;
++
++ if (list_empty (&ep->queue)) {
++ if (!ep->num) {
++ if (ep->is_in && (ep->stage == 2)) {
++ ep->is_in = 0;//switch modes
++ Flush_and_enable(RXC0);//needed to receive a ZLP after tx
++ ep->stage++;//and bump the stage number
++ } else if (ep->stage == 3) {
++ ep->stage = 0;
++ }
++ }
++ return;
++ }
++ req = list_entry(ep->queue.next, struct n9604_request, queue);
++ (ep->is_in ? write_fifo : read_fifo)(ep, req);
++}
++
++/*-------------------------------------------------------------------------*/
++
++static void * n9604_alloc_buffer(struct usb_ep *_ep, unsigned bytes, dma_addr_t *dma, int gfp_flags)
++{
++ return kmalloc(bytes, gfp_flags);
++}
++
++static void n9604_free_buffer(struct usb_ep *_ep, void *buf, dma_addr_t dma, unsigned bytes)
++{
++ kfree (buf);
++}
++
++
++
++/*-------------------------------------------------------------------------*/
++
++static void
++done(struct n9604_ep *ep, struct n9604_request *req, int status)
++{
++ struct n9604_udc *dev;
++
++ list_del_init(&req->queue);
++ ep->queue_active--;
++
++ if (req->req.status == -EINPROGRESS)
++ req->req.status = status;
++ else
++ status = req->req.status;
++
++ dev = ep->dev;
++
++ /* don't modify queue heads during completion callback */
++ if (ep->num)
++ pio_irq_disable(ep);
++ else if (!ep->nuking) {
++ ep->stage++;
++ ep->toggle = 1;//other endpoints stay in their flipping mode between transactions
++ if (ep->stage == 2) {//we are in stage 2 now
++ if (!ep->is_in) {
++ ep->is_in = 1;//switch modes
++ request_voodoo = 1;//prevents n9604_queue from calling us again before doing anything
++ send_zero_length(0, dev);
++ } else {//we have to receive a ZLP
++ //this will happen when the tx is complete, the pio_advance fcn will activate it for us
++ }
++ }
++ }
++
++ req->req.complete(&ep->ep, &req->req);
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++static int
++n9604_queue(struct usb_ep *_ep, struct usb_request *_req, int gfp_flags)
++{
++ struct n9604_request *req;
++ struct n9604_ep *ep;
++ struct n9604_udc *dev;
++ unsigned long flags;
++ int status;
++
++ req = container_of(_req, struct n9604_request, req);
++ if (unlikely(!_req || !_req->complete
++ || !_req->buf || !list_empty(&req->queue)))
++ return -EINVAL;
++ ep = container_of(_ep, struct n9604_ep, ep);
++ if (unlikely(!_ep || (!ep->desc && ep->num != 0)))
++ return -EINVAL;
++ dev = ep->dev;
++ if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) {
++ return -ESHUTDOWN;
++ }
++ if (ep->nuking)
++ return -ESHUTDOWN;
++
++ spin_lock_irqsave(&dev->lock, flags);
++
++ ep->queue_reqs++;
++ ep->queue_active++;
++
++ _req->status = -EINPROGRESS;
++ _req->actual = 0;
++
++ /* for ep0 IN without premature status, zlp is required and
++ * writing EOP starts the status stage (OUT).
++ */
++ if (ep->num == 0) {
++ if ((request_voodoo > _req->length) && !(_req->length % MAX_EP0_SIZE) && (_req->length != 0)) {
++ _req->zero = 1;
++ }
++ if (!request_voodoo && !ep->is_in) {//this is a zero length request
++ spin_unlock_irqrestore(&dev->lock, flags);//David
++ done(ep, req, 0);//this doesn't check if the list is empty (probably not an issue)
++ return 0; //shouldn't this be handled by the rx irq fcn, and passed to pio_advance
++ }//that may conflict with the voodoo stuff, maybe best to leave it
++ }
++
++ /* kickstart this i/o queue? */
++ status = 0;
++ if (list_empty(&ep->queue) && ep->is_in) {
++ status = write_fifo(ep, req);
++ if (status == -EBUSY)
++ ;//we should queue up the request then
++ else {
++ if (status != 0) {
++ if (status > 0)
++ status = 0;
++ req = 0;
++ }
++ }
++ } /* else pio or dma irq handler advances the queue. */
++
++ if (req != 0) {
++ list_add_tail(&req->queue, &ep->queue);
++ pio_irq_enable(ep);
++ }
++
++ spin_unlock_irqrestore(&dev->lock, flags);
++ return status;
++}
++
++/* dequeue ALL requests */
++static void nuke(struct n9604_ep *ep, int status)
++{
++ struct n9604_request *req;
++
++ if (list_empty(&ep->queue))
++ return;
++ ep->nuking = 1;
++ while (!list_empty(&ep->queue)) {
++ req = list_entry(ep->queue.next, struct n9604_request, queue);
++ done(ep, req, status);
++ }
++ ep->nuking = 0;
++}
++
++/* dequeue JUST ONE request */
++static int n9604_dequeue(struct usb_ep *_ep, struct usb_request *_req)
++{
++ struct n9604_request *req;
++ struct n9604_ep *ep;
++ struct n9604_udc *dev;
++ unsigned long flags;
++
++ ep = container_of(_ep, struct n9604_ep, ep);
++ if (!_ep || !_req || (!ep->desc && ep->num != 0))
++ return -EINVAL;
++ dev = ep->dev;
++
++ if (!dev->driver)
++ return -ESHUTDOWN;
++
++ spin_lock_irqsave(&dev->lock, flags);
++
++ /* make sure it's actually queued on this endpoint */
++ list_for_each_entry (req, &ep->queue, queue) {
++ if (&req->req == _req)
++ break;
++ }
++ if (&req->req != _req) {
++ spin_unlock_irqrestore (&dev->lock, flags);
++ return -EINVAL;
++ }
++
++ spin_unlock_irqrestore(&dev->lock, flags);
++
++ return req ? 0 : -EOPNOTSUPP;
++}
++
++static int n9604_clear_halt(struct usb_ep *_ep) {
++ struct n9604_ep *ep;
++ ep = container_of (_ep, struct n9604_ep, ep);
++
++ write_9604(read_9604(ep->control) & ~EPC_STALL, ep->control);
++ pio_advance(ep);
++ return 0;
++}
++
++static int n9604_set_halt(struct usb_ep *_ep, int value) {
++ struct n9604_ep *ep;
++ unsigned long flags;
++ int retval = 0;
++
++ if (!_ep) {
++ retval = -ENODEV; goto exit;
++ }
++ ep = container_of (_ep, struct n9604_ep, ep);
++
++ if (ep->num == 0) {//is this valid?
++ if (!value) {
++ retval = -EINVAL; goto exit; }
++
++ /* don't change EPxSTATUS_EP_INVALID to READY */
++ } else if (!ep->desc) {
++ retval = -EINVAL; goto exit;
++ }
++
++ spin_lock_irqsave(&ep->dev->lock, flags);
++ if (!list_empty(&ep->queue))
++ retval = -EAGAIN;
++ else if (!value)
++ n9604_clear_halt(_ep);
++ else {
++ write_9604(read_9604(ep->control) | EPC_STALL, ep->control);
++ }
++ spin_unlock_irqrestore(&ep->dev->lock, flags);
++exit:
++ return retval;
++}
++
++static int n9604_fifo_status(struct usb_ep *_ep) {//not implemented
++ return -1;
++}
++
++static void n9604_fifo_flush(struct usb_ep *_ep) {//not implemented
++ struct n9604_ep *ep;
++ ep = container_of (_ep, struct n9604_ep, ep);
++}
++
++/*-------------------------------------------------------------------------*/
++
++static struct usb_ep_ops n9604_ep_ops = {
++ .enable = n9604_ep_enable,
++ .disable = n9604_ep_disable,
++
++ .alloc_request = n9604_alloc_request,//io request objects called struct usb_request
++ .free_request = n9604_free_request,
++
++ .alloc_buffer = n9604_alloc_buffer,
++ .free_buffer = n9604_free_buffer,
++
++ .queue = n9604_queue,//submit a struct usb_request object to an endpoint
++ .dequeue = n9604_dequeue,
++
++ .set_halt = n9604_set_halt,//halts an endpoint
++ .fifo_status = n9604_fifo_status,//bytes in FIFO + data ready to go in FIFO
++ .fifo_flush = n9604_fifo_flush,//flush all the data, endpoint is probably been reconfigured
++};
++
++/*-------------------------------------------------------------------------*/
++
++static int n9604_get_frame(struct usb_gadget *_gadget)
++{
++ return -EOPNOTSUPP;
++}
++
++static const struct usb_gadget_ops n9604_ops = {
++ .get_frame = n9604_get_frame,
++};
++
++/*-------------------------------------------------------------------------*/
++
++static void udc_reinit (struct n9604_udc *dev)
++{
++ static char *names [] = { "ep0", "ep1in", "ep2out", "ep3in", "ep4out", "ep5in", "ep6out" };
++ unsigned i;
++
++ INIT_LIST_HEAD (&dev->gadget.ep_list);
++ dev->gadget.ep0 = &dev->ep [0].ep;
++ dev->gadget.speed = USB_SPEED_UNKNOWN;
++ dev->irqs = 0;
++ dev->configured = 0;
++
++ //for (i = 0; i < 7; i++) {
++ for (i = 0; i < ARRAY_SIZE(names); i++) {
++ struct n9604_ep *ep = &dev->ep[i];
++ ep->num = i;
++ ep->numActual = i;
++ ep->ep.name = names[i];
++ ep->irqs = 0;
++ if (i) {
++ ep->fifo = (i * 4) + RXD0; //each FIFO address is 4 bytes away. TXD0 is the first
++ ep->control = ep->fifo - 1;
++ ep->status = ep->fifo + 1;
++ ep->command = ep->fifo + 2;
++ Flush(ep->command);//flush any data in the fifo//we don't care about the previous state
++ read_9604(ep->status);
++ ep->ep.maxpacket = MAX_FIFO_SIZE;
++ } else {//were are endpoint 0
++ ep->fifo = ep->control = ep->status = ep->command = 0xff;//this should force an error
++ //we need to do this since we don't know if
++ //this is tx or rx
++ read_9604(TXS0);
++ Flush(TXC0);
++ Flush(RXC0);//we could potentially (probably) overwriting a pending setup packet
++ if (ep->stage)//if we get a setup packet before we have a chance to finish the reset we have a problem
++ read_9604(RXS0);//fix this by sending stalls or something
++ ep->stage = 0;
++ ep->ep.maxpacket = MAX_EP0_SIZE;
++ }
++ ep->is_in = i % 2;
++ ep->fifoNum = (i + ep->is_in) / 2;//ignored for endpoint 0
++ ep->ep.ops = &n9604_ep_ops;
++ list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list);
++ ep->dev = dev;
++ INIT_LIST_HEAD (&ep->queue);
++ ep->nuking=0;
++ ep->queue_reqs = 0;
++ ep->queue_active = 0;
++ ep->packets = 0;
++ ep->desc = 0;
++ ep->irqs = 0;
++ }
++
++ list_del_init (&dev->ep[0].ep.ep_list);
++
++ write_9604(~WKUP_PNDUSB & ~WKUP_PNDUC & read_9604(WKUP), WKUP);//clear the bits, we've done a reset
++ write_9604(FAR_AD_EN, FAR);//enable the chip to answer requests//address 0
++ dev->address = 0;
++ write_9604(0, EPC0);//clear the control register
++ write_9604(NFSR_NodeOperational, NFSR);//we're going for gold
++}
++
++static void udc_reset(struct n9604_udc *dev)
++{
++ //USBD_DISABLE_IRQ; This disables all interrupts sharing that line
++ write_9604(MCNTRL_SRST,MCNTRL);//software reset -- this also prevents pullup
++ write_9604(0x00, MAMSK); //disable interrupts
++}
++
++
++
++static void udc_enable(struct n9604_udc *dev)
++{
++ udc_reset(dev); //this is to prevent a pullup resistor
++ udc_reinit (dev);
++
++ dev->gadget.speed = USB_SPEED_FULL;
++
++ // enable ep0 interrupts
++ dev->ep[0].is_in = 0;
++
++ write_9604(MAMSK_WARN | MAMSK_ALT | MAMSK_TX_EV | MAMSK_RX_EV | MAMSK_INTR, MAMSK);//for now we turn it all on, except frames & ULD & NAK
++ write_9604(ALTMSK_RESET, ALTMSK);//just turn on reset
++ write_9604(0x11, TXMSK);
++ write_9604(0x11, RXMSK);
++ write_9604(0x0, NAKMSK);
++ write_9604(0x0, FWMSK);
++ write_9604(MCNTRL_NAT | MCNTRL_INTOC_ActHigh, MCNTRL);//this activates the pull-up and turns on interrupts
++ USBD_ENABLE_IRQ;
++}
++
++/*-------------------------------------------------------------------------*/
++
++/* keeping it simple:
++ * - one bus driver, initted first;
++ * - one function driver, initted second
++ */
++
++static struct n9604_udc *the_controller;
++
++/* when a driver is successfully registered, it will receive
++ * control requests including set_configuration(), which enables
++ * non-control requests. then usb traffic follows until a
++ * disconnect is reported. then a host may connect again, or
++ * the driver might get unbound.
++ */
++int usb_gadget_register_driver(struct usb_gadget_driver *driver)
++{
++ struct n9604_udc *dev = the_controller;
++ int retval;
++
++ if (!driver
++ || driver->speed != USB_SPEED_FULL
++ || !driver->bind
++ || !driver->unbind
++ || !driver->disconnect
++ || !driver->setup)
++ return -EINVAL;
++ if (!dev)
++ return -ENODEV;
++ if (dev->driver)
++ return -EBUSY;
++
++ /* hook up the driver */
++ dev->driver = driver;
++ retval = driver->bind(&dev->gadget);
++ if (retval) {
++ dev->driver = 0;
++ return retval;
++ }
++
++ /* then enable host detection and ep0; and we're ready
++ * for set_configuration as well as eventual disconnect.
++ */
++ udc_enable(dev);
++
++ return 0;
++}
++EXPORT_SYMBOL(usb_gadget_register_driver);
++
++int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
++{
++ struct n9604_udc *dev = the_controller;
++ unsigned long flags;
++ int i;
++
++ if (!dev)
++ return -ENODEV;
++ if (!driver || driver != dev->driver)
++ return -EINVAL;
++
++ spin_lock_irqsave(&dev->lock, flags);
++ dev->driver = 0;
++
++ udc_reset(dev);//reset & diable irqs
++ for (i = 0; i < ARRAY_SIZE(dev->ep); i++)
++ nuke(&dev->ep [i], -ESHUTDOWN);
++ spin_unlock_irqrestore(&dev->lock, flags);
++
++ if (dev->gadget.speed != USB_SPEED_UNKNOWN)
++ driver->disconnect(&dev->gadget);
++ driver->unbind(&dev->gadget);
++
++ return 0;
++}
++EXPORT_SYMBOL(usb_gadget_unregister_driver);
++
++
++/*-------------------------------------------------------------------------*/
++
++inline u8 tx_ev_irq(struct n9604_udc *dev) {
++ u8 mask;
++
++ mask = read_9604(TXEV) & read_9604(TXMSK);
++
++ if (mask & TXEV_FIFO0) {
++ write_9604(0, EPC0);//make sure we are not stalled, & not using the default address
++ read_9604(TXS0);//should really check for error conditions
++ dev->ep[0].irqs++;
++ pio_advance(&dev->ep[0]);
++ }
++ if (mask & TXEV_FIFO1) {
++ read_9604(TXS1);
++ dev->ep[1].irqs++;
++ pio_advance(&dev->ep[1]);
++ }
++ if (mask & TXEV_FIFO2) {
++ read_9604(TXS2);
++ dev->ep[3].irqs++;
++ pio_advance(&dev->ep[3]);
++ }
++ if (mask & TXEV_FIFO3) {
++ read_9604(TXS3);
++ dev->ep[5].irqs++;
++ pio_advance(&dev->ep[5]);
++ }
++ return mask;
++}
++
++static void my_req_complete(struct usb_ep *_ep, struct usb_request *req) {//this was for the setup packet, but I guess I could use it for anything
++ n9604_free_buffer(_ep, req->buf, req->dma, req->length);
++ n9604_free_request(_ep, req);
++}
++
++inline void send_dummy_packet(int endpoint, struct n9604_udc *dev, int length) {
++ struct usb_request *my_req;
++ my_req = n9604_alloc_request(&dev->ep[endpoint].ep, GFP_ATOMIC);
++ my_req->length = length;
++ my_req->buf = n9604_alloc_buffer(&dev->ep[endpoint].ep, length, &my_req->dma, GFP_ATOMIC);
++ my_req->complete = my_req_complete;
++ n9604_queue(&dev->ep[endpoint].ep, my_req, GFP_ATOMIC);
++}
++
++inline void send_zero_length(int endpoint, struct n9604_udc *dev) {
++ send_dummy_packet(endpoint, dev, 0);
++}
++
++inline void rx_ev_irq(struct n9604_udc *dev) {
++ u8 mask;
++ struct n9604_ep *ep;
++
++ mask = read_9604(RXEV) & read_9604(RXMSK);
++
++ if (mask & RXEV_FIFO0) {
++ static int read_mode = 0;
++ u8 rxs_mask = read_9604(RXS0);
++ ep = &dev->ep[0];
++ ep->irqs++;
++ if (rxs_mask & RXS_SETUP) {
++ struct usb_ctrlrequest ctrl;
++ ep->packets++;
++ write_9604(0x40, ALTMSK);//someone is talking to us. Make sure we can be reset if we lose this communication
++ ep->stage = 1;
++ rxs_mask = read_9604(RXS0);//2nd read (1st one is for zero length packet)
++ ctrl.bRequestType = read_9604(RXD0);
++ ctrl.bRequest = read_9604(RXD0);
++ ctrl.wValue = read_9604(RXD0) + (read_9604(RXD0) << 8);
++ ctrl.wIndex = read_9604(RXD0) + (read_9604(RXD0) << 8);
++ ctrl.wLength = read_9604(RXD0) + (read_9604(RXD0) << 8);
++ ep->toggle = 1;
++ request_voodoo = ctrl.wLength;
++ if (ctrl.bRequestType & 0x80) {//This is an IN transaction
++ ep->is_in = 1;//David: is this correct for both cases//check with n9604_queue
++ read_mode = 0;
++ if (ctrl.wLength) {//should be followed by ZLP out packet
++ } else {//host expects ZLP out packet
++ ep->stage = 2;
++ }
++ } else {//This is an out transaction
++ if (ctrl.wLength) {
++ ep->is_in = 0;
++ read_mode = 1;
++ } else {//host expects ZLP in packet
++ read_mode = 0;
++ ep->stage = 2;
++ ep->is_in = 1;
++ }
++ }
++ switch (ctrl.bRequest) {
++ case USB_REQ_SET_ADDRESS:
++ write_9604(EPC_DEF, EPC0);//we still want to respond to the default address
++ write_9604(((dev->address = (ctrl.wValue & FAR_AD_MASK))) | FAR_AD_EN, FAR);
++ send_zero_length(0, dev);
++ dev->configured = 1;//we can send longer packets now :)
++ read_9604(ALTEV);
++ write_9604(ALTMSK_RESET, ALTMSK);//we also listen to reset requests too
++ break;
++ case USB_REQ_CLEAR_FEATURE:
++ if (ctrl.wValue == 0 && ctrl.bRequestType == 2) {//endpoint halt
++ int i;
++ for (i = 0; i < ARRAY_SIZE(dev->ep); i++)
++ if ((ctrl.wIndex & 0xF) == dev->ep[i].numActual)
++ n9604_clear_halt(&dev->ep[i].ep);
++ send_zero_length(0, dev);
++ break;
++ }
++ case USB_REQ_SET_DESCRIPTOR:
++ case USB_REQ_SYNCH_FRAME:
++ case USB_REQ_GET_STATUS:
++ case USB_REQ_SET_FEATURE:
++ case USB_REQ_SET_CONFIGURATION:
++ case USB_REQ_GET_DESCRIPTOR:
++ case USB_REQ_GET_CONFIGURATION:
++ case USB_REQ_SET_INTERFACE:
++ case USB_REQ_GET_INTERFACE:
++ default:
++ if (dev->driver->setup(&dev->gadget, &ctrl) < 0)//there was an error
++ if (((ctrl.bRequestType & 0x80) && ctrl.wLength) || (!(ctrl.bRequestType & 0x80) && !ctrl.wLength))
++ send_zero_length(0, dev);
++ }//crtl.bRequest
++ }//setup
++ else if (read_mode)
++ pio_advance(ep);
++ else {
++ ep->stage = 0;
++ ep->packets++;
++ }
++ }//fifo 0
++ if (mask & RXEV_FIFO1) {
++ ep = &dev->ep[2];
++ pio_advance(ep);
++ ep->irqs++;
++ }
++ if (mask & RXEV_FIFO2) {
++ ep = &dev->ep[4];
++ pio_advance(ep);
++ ep->irqs++;
++ }
++ if (mask & RXEV_FIFO3) {
++ ep = &dev->ep[6];
++ pio_advance(ep);
++ ep->irqs++;
++ }
++}
++
++inline void alt_ev_irq(struct n9604_udc *dev) {
++ u8 mask;
++
++ mask = read_9604(ALTEV) & read_9604(ALTMSK);
++
++ if (mask & ALTEV_EOP);
++ if (mask & ALTEV_SD3);
++ if (mask & ALTEV_SD5);
++ if (mask & ALTEV_RESET) {
++ int i;
++ udelay(1200);//no idea why this is needed, but it makes things work
++ write_9604(0x0, FAR);//lets not respond to any packets until we are ready
++ write_9604(NFSR_NodeReset, NFSR);
++ dev->driver->disconnect(&dev->gadget);
++ for (i = 0; i < ARRAY_SIZE(dev->ep); i++)
++ nuke(&dev->ep [i], -ESHUTDOWN);//this should be handled above by disconnect
++ write_9604(0x00, ALTMSK);//make sure reset is turned off, or we will constantly be interrupted
++ write_9604(0x11, TXMSK);
++ write_9604(0x11, RXMSK);
++ udc_reinit(dev);
++ dev->gadget.speed = USB_SPEED_FULL;
++ dev->ep[0].is_in = 0;
++ }
++ if (mask & ALTEV_RESUME); //write_9604(NFSR_NodeOperational, NFSR);
++ if (mask & ALTEV_WKUP);//we don't really sleep
++ if (mask & ALTEV_DMA);
++}
++
++static void n9604_irq(int irq, void *_dev, struct pt_regs *r) {
++ struct n9604_udc *dev = _dev;
++ u8 mask;
++
++ mask = read_9604(MAEV) & read_9604(MAMSK);
++ if (!mask)
++ return;
++
++ if (mask & MAEV_ALT) {
++ alt_ev_irq(dev);
++ mask = read_9604(MAEV) & read_9604(MAMSK);//force a re-read of the current pending interrupts
++ }
++ if (mask & MAEV_TX_EV)
++ tx_ev_irq(dev);
++ if (mask & MAEV_RX_EV)
++ rx_ev_irq(dev);
++ dev->irqs++;
++ return;
++}
++
++/*-------------------------------------------------------------------------*/
++
++static int __init init (void)
++{
++ struct n9604_udc *dev;
++ int ret;
++ u8 * addr;
++
++ if (the_controller)
++ return -EBUSY;
++
++ addr = ioremap(USBN9604_PHYS, 0x2);//ioremap will bump this to 1 page size
++ if (!addr) {
++ ERROR(dev, KERN_ERR "Unable to remap address\n");
++ return -EINVAL;
++ }
++
++ USBN9604_Offset = addr;
++
++ if ((read_9604(RID) & 0xF) != 0x2) { //0x2 is the identifier for 9603/4
++ iounmap(addr);
++ return -ENODEV;
++ }
++
++ /* alloc, and start init */
++ dev = kmalloc(sizeof *dev, SLAB_KERNEL);
++ if (dev == NULL){
++ WARN(dev, "No memory");
++ iounmap(addr);
++ return -ENOMEM;
++ }
++ memset(dev, 0, sizeof *dev);
++ spin_lock_init(&dev->lock);
++ dev->gadget.ops = &n9604_ops;
++ dev->gadget.is_dualspeed = 0;
++
++ /* the "gadget" abstracts/virtualizes the controller */
++ dev->gadget.dev.bus_id = "gadget";
++ dev->gadget.name = driver_name;
++
++ /* initialize the hardware */
++
++ udc_reset(dev);
++
++ write_9604(CCONF_CODIS | 11, CCONF);
++
++ udc_reinit(dev);//this is necessary as it sets up the epx functions
++
++ the_controller = dev;
++
++ if ((ret=request_irq(IRQ_GPIOC, n9604_irq, SA_SHIRQ, driver_name,dev))) {
++ WARN(dev, "Can't get IRQ\n");
++ iounmap(addr);
++ return ret;
++ }
++
++ return 0;
++}
++module_init (init);
++
++static void __exit cleanup (void)
++{
++ struct n9604_udc *dev = the_controller;
++
++ //first kill the interrupts
++ udc_reset(dev);
++ free_irq(IRQ_GPIOC, dev);
++
++ /* start with the driver above us */
++ if (dev->driver) {
++ /* should have been done already by driver model core */
++ WARN(dev, "Warning: Driver '%s' is still registered\n",
++ dev->driver->driver.name);
++ usb_gadget_unregister_driver(dev->driver);
++ }
++ kfree(dev);
++ iounmap(USBN9604_Offset);
++ the_controller = 0;
++
++}
++module_exit (cleanup);
++
++MODULE_PARM_DESC (delayTime, "Delays after reads and writes to the USB chip");
++MODULE_PARM (delayTime, "i");
++
+diff -x '*~' -x '.*' -r -N -u /tmp/kernel/drivers/usb/gadget/n9604.h kernel/drivers/usb/gadget/n9604.h
+--- /tmp/kernel/drivers/usb/gadget/n9604.h 1970-01-01 01:00:00.000000000 +0100
++++ kernel/drivers/usb/gadget/n9604.h 2005-04-22 17:53:19.463534794 +0200
+@@ -0,0 +1,112 @@
++/*
++ * National 9604 USB device controller driver
++ *
++ * Copyright 2003 Technical Solutions Inc.
++ *
++ * ported from:
++ *
++ * Toshiba TC86C001 ("Goku-S") USB Device Controller driver
++ *
++ * Copyright (C) 2000-2002 Lineo
++ * by Stuart Lynne, Tom Rushworth, and Bruce Balden
++ * Copyright (C) 2002 Toshiba Corporation
++ * Copyright (C) 2003 MontaVista Software (source@mvista.com)
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2. This program is licensed "as is" without any
++ * warranty of any kind, whether express or implied.
++ */
++
++#define MAX_FIFO_SIZE 64
++#define MAX_EP0_SIZE 8
++
++struct n9604_ep {
++ struct usb_ep ep;
++ struct n9604_udc *dev;
++ unsigned long irqs;
++ int acct_req_lengths[4];
++ int acct_req_dir[4];//direction
++ unsigned long queue_reqs;//how many times has n9604_queue been called
++ unsigned long queue_active;//how many current requests
++ unsigned long packets;//counter of raw packets
++ unsigned num:4,
++ numActual:4,
++ fifoNum:2,
++ is_in:1,
++ stage:2,//for ep0, 0 = unused, 1 = got setup, 2 = done transfer/ready to send/receive ZLP
++ toggle:1,
++ nuking:1;//are we killing on off this endpoint//only used for ep0 to help with stages
++ /* analogous to a host-side qh */
++ struct list_head queue;
++ const struct usb_endpoint_descriptor *desc;
++
++ u8 control;
++ u8 fifo;
++ u8 status;
++ u8 command;
++};
++
++struct n9604_request {
++ struct usb_request req;
++ struct list_head queue;
++ int complete;//this is added for tx requests
++ //if set the entire request has been written to the fifo, just waiting for confirmation
++ //from the interrupt that it has been sent
++
++ unsigned mapped:1;
++};
++
++struct n9604_udc {
++ struct usb_gadget gadget;
++ spinlock_t lock;
++ struct n9604_ep ep[7];
++ struct usb_gadget_driver *driver;
++ int configured;
++
++ u8 address;
++
++ /* statistics... */
++ unsigned long irqs;
++};
++
++
++/*-------------------------------------------------------------------------*/
++
++#define xprintk(dev,level,fmt,args...) \
++ printk(level "%s %s: " fmt , driver_name , \
++ "S2410 gadget" , ## args)
++
++#define ERROR(dev,fmt,args...) \
++ xprintk(dev , KERN_ERR , fmt , ## args)
++#define WARN(dev,fmt,args...) \
++ xprintk(dev , KERN_WARNING , fmt , ## args)
++#define INFO(dev,fmt,args...) \
++ xprintk(dev , KERN_INFO , fmt , ## args)
++
++/*-------------------------------------------------------------------------*/
++
++/* 2.5 stuff that's sometimes missing in 2.4 */
++
++#ifndef container_of
++#define container_of list_entry
++#endif
++
++#ifndef likely
++#define likely(x) (x)
++#define unlikely(x) (x)
++#endif
++
++#ifndef BUG_ON
++#define BUG_ON(condition) do { if (unlikely((condition)!=0)) BUG(); } while(0)
++#endif
++
++#ifndef WARN_ON
++#define WARN_ON(x) do { } while (0)
++#endif
++
++#ifndef IRQ_NONE
++typedef void irqreturn_t;
++#define IRQ_NONE
++#define IRQ_HANDLED
++#define IRQ_RETVAL(x)
++#endif
+diff -x '*~' -x '.*' -r -N -u /tmp/kernel/drivers/usb/gadget/n9604regs.h kernel/drivers/usb/gadget/n9604regs.h
+--- /tmp/kernel/drivers/usb/gadget/n9604regs.h 1970-01-01 01:00:00.000000000 +0100
++++ kernel/drivers/usb/gadget/n9604regs.h 2005-04-22 17:53:19.466534306 +0200
+@@ -0,0 +1,248 @@
++/* National 9604 registers */
++
++#define USBN9604_PHYS 0x08000000
++
++extern u8 * USBN9604_Offset;
++
++static u8 last_address = 255;//an invalid address
++
++inline u8 read_9604(u8 addr) {
++ u8 tmp;
++ if (addr != last_address) {
++ outb(addr, USBN9604_Offset + 1);
++ last_address = addr;
++ }
++ tmp = inb(USBN9604_Offset);
++ return tmp;
++}
++
++inline void write_9604(u8 value, u8 addr) {
++ if (addr != last_address) {
++ outb(addr, USBN9604_Offset + 1);
++ last_address = addr;
++ }
++ outb(value, USBN9604_Offset);
++}
++
++
++
++#define MCNTRL 0x00
++#define CCONF 0x01
++
++#define RID 0x03
++#define FAR 0x04
++#define NFSR 0x05
++#define MAEV 0x06
++#define MAMSK 0x07
++#define ALTEV 0x08
++#define ALTMSK 0x09
++#define TXEV 0x0A
++#define TXMSK 0x0B
++#define RXEV 0x0C
++#define RXMSK 0x0D
++#define NAKEV 0x0E
++#define NAKMSK 0x0F
++#define FWEV 0x10
++#define FWMSK 0x11
++#define FNH 0x12
++#define FNL 0x13
++#define DMACNTRL 0x14
++#define DMAEV 0x15
++#define DMAMSK 0x16
++#define MIR 0x17
++#define DMACNT 0x18
++#define DMAERR 0x19
++
++#define WKUP 0x1B
++
++
++
++
++#define EPC0 0x20
++#define TXD0 0x21
++#define TXS0 0x22
++#define TXC0 0x23
++
++#define RXD0 0x25
++#define RXS0 0x26
++#define RXC0 0x27
++#define EPC1 0x28
++#define TXD1 0x29
++#define TXS1 0x2A
++#define TXC1 0x2B
++#define EPC2 0x2C
++#define RXD1 0x2D
++#define RXS1 0x2E
++#define RXC1 0x2F
++#define EPC3 0x30
++#define TXD2 0x31
++#define TXS2 0x32
++#define TXC2 0x33
++#define EPC4 0x34
++#define RXD2 0x35
++#define RXS2 0x36
++#define RXC2 0x37
++#define EPC5 0x38
++#define TXD3 0x39
++#define TXS3 0x3A
++#define TXC3 0x3B
++#define EPC6 0x3C
++#define RXD3 0x3D
++#define RXS3 0x3E
++#define RXC3 0x3F
++
++
++/* MCNTRL values */
++#define MCNTRL_SRST (1 << 0)
++#define MCNTRL_VGE (1 << 2)
++#define MCNTRL_NAT (1 << 3)
++#define MCNTRL_INTOC_MASK (3 << 6)
++#define MCNTRL_INTOC_DISABLE 0
++#define MCNTRL_INTOC_ActLowOpen (1 << 6)
++#define MCNTRL_INTOC_ActHigh (2 << 6)
++#define MCNTRL_INTOC_ActLowPP (3 << 6)
++
++/* CCONF values */
++#define CCONF_CLKDIV_MASK 0x0F
++#define CCONF_CODIS (1 << 7)
++
++/* FAR values */
++#define FAR_AD_MASK 0x7F
++#define FAR_AD_EN 0x80
++
++/* NFSR values */
++#define NFSR_NodeReset 0x0
++#define NFSR_NodeResume 0x1
++#define NFSR_NodeOperational 0x2
++#define NFSR_NodeSuspend 0x3
++
++/* MAEV values */
++#define MAEV_WARN (1 << 0)
++#define MAEV_ALT (1 << 1)
++#define MAEV_TX_EV (1 << 2)
++#define MAEV_FRAME (1 << 3)
++#define MAEV_NAK (1 << 4)
++#define MAEV_ULD (1 << 5)
++#define MAEV_RX_EV (1 << 6)
++#define MAEV_INTR (1 << 7)
++
++/* MAMSK values */
++#define MAMSK_WARN (1 << 0)
++#define MAMSK_ALT (1 << 1)
++#define MAMSK_TX_EV (1 << 2)
++#define MAMSK_FRAME (1 << 3)
++#define MAMSK_NAK (1 << 4)
++#define MAMSK_ULD (1 << 5)
++#define MAMSK_RX_EV (1 << 6)
++#define MAMSK_INTR (1 << 7)
++
++/* ALTEV values */
++
++#define ALTEV_WKUP (1 << 1)
++#define ALTEV_DMA (1 << 2)
++#define ALTEV_EOP (1 << 3)
++#define ALTEV_SD3 (1 << 4)
++#define ALTEV_SD5 (1 << 5)
++#define ALTEV_RESET (1 << 6)
++#define ALTEV_RESUME (1 << 7)
++
++/* ALTMSK values */
++
++#define ALTMSK_WKUP (1 << 1)
++#define ALTMSK_DMA (1 << 2)
++#define ALTMSK_EOP (1 << 3)
++#define ALTMSK_SD3 (1 << 4)
++#define ALTMSK_SD5 (1 << 5)
++#define ALTMSK_RESET (1 << 6)
++#define ALTMSK_RESUME (1 << 7)
++
++/* NAKEV values */
++
++#define NAKEV_TXFIFO0 (1 << 0)
++#define NAKEV_TXFIFO1 (1 << 1)
++#define NAKEV_TXFIFO2 (1 << 2)
++#define NAKEV_TXFIFO3 (1 << 3)
++#define NAKEV_RXFIFO0 (1 << 4)
++#define NAKEV_RXFIFO1 (1 << 5)
++#define NAKEV_RXFIFO2 (1 << 6)
++#define NAKEV_RXFIFO3 (1 << 7)
++
++
++/* WKUP values */
++#define WKUP_PNDUSB (1 << 0)
++#define WKUP_PNDUC (1 << 1)
++#define WKUP_ENUSB (1 << 2)
++#define WKUP_ENUC (1 << 3)
++#define WKUP_WKMODE (1 << 5)
++#define WKUP_HOS (1 << 6)
++#define WKUP_FHT (1 << 7)
++
++/* EPC values */
++
++#define EPC_EP_MASK 0x0F //EP0 == 0
++#define EPC_EP_EN (1 << 4)//not EP0
++#define EPC_ISO (1 << 5)//not EP0
++#define EPC_DEF (1 << 6)//EP0 only
++#define EPC_STALL (1 << 7)
++
++/* TXS values */
++
++#define TXS_TCOUNT_MASK 0x1F
++#define TXS_TX_DONE (1 << 5)
++#define TXS_ACK_STAT (1 << 6)
++#define TXS_TX_URUN (1 << 7)
++
++/* TXC values */
++
++#define TXC_TX_EN (1 << 0)
++#define TXC_LAST (1 << 1)//not for endpoint 0
++#define TXC_TOGGLE (1 << 2)//sets DATA1 when set
++#define TXC_FLUSH (1 << 3)
++#define TXC_IGN_IN (1 << 4)//only endpoint 0
++#define TXC_RFF (1 << 4)//not for endpoint 0
++#define TXC_TFWL0 (1 << 5)//"
++#define TXC_TFWL1 (1 << 6)//"
++#define TXC_IGN_ISOMSK (1 << 7)//"
++
++/* TXEV values */
++
++#define TXEV_FIFO0 (1 << 0)
++#define TXEV_FIFO1 (1 << 1)
++#define TXEV_FIFO2 (1 << 2)
++#define TXEV_FIFO3 (1 << 3)
++#define TXEV_UDRRN0 (1 << 4)
++#define TXEV_UDRRN1 (1 << 5)
++#define TXEV_UDRRN2 (1 << 6)
++#define TXEV_UDRRN3 (1 << 7)
++
++
++/* RXEV values */
++
++#define RXEV_FIFO0 (1 << 0)
++#define RXEV_FIFO1 (1 << 1)
++#define RXEV_FIFO2 (1 << 2)
++#define RXEV_FIFO3 (1 << 3)
++#define RXEV_OVRRN0 (1 << 4)
++#define RXEV_OVRRN1 (1 << 5)
++#define RXEV_OVRRN2 (1 << 6)
++#define RXEV_OVRRN3 (1 << 7)
++
++/* RXC values */
++
++#define RXC_RX_EN (1 << 0)
++#define RXC_IGN_OUT (1 << 1)
++#define RXC_IGN_SETUP (1 << 2)
++#define RXC_FLUSH (1 << 3)
++#define RXC_RFWL0 (1 << 5)
++#define RXC_RFWL1 (1 << 6)
++
++/* RXS values */
++
++#define RXS_RCOUNTMASK 0xF
++#define RXS_RX_LAST (1 << 4)
++#define RXS_TOGGLE (1 << 5)
++#define RXS_SETUP (1 << 6)
++#define RXS_RX_ERR (1 << 7)
++
++
++
+diff -x '*~' -x '.*' -r -N -u /tmp/kernel/drivers/usb/gadget/ndis.h kernel/drivers/usb/gadget/ndis.h
+--- /tmp/kernel/drivers/usb/gadget/ndis.h 1970-01-01 01:00:00.000000000 +0100
++++ kernel/drivers/usb/gadget/ndis.h 2005-04-22 17:53:19.469533817 +0200
+@@ -0,0 +1,217 @@
++/*
++ * ndis.h
++ *
++ * ntddndis.h modified by Benedikt Spranger <b.spranger@pengutronix.de>
++ *
++ * Thanks to the cygwin development team,
++ * espacially to Casper S. Hornstrup <chorns@users.sourceforge.net>
++ *
++ * THIS SOFTWARE IS NOT COPYRIGHTED
++ *
++ * This source code is offered for use in the public domain. You may
++ * use, modify or distribute it freely.
++ *
++ * This code is distributed in the hope that it will be useful but
++ * WITHOUT ANY WARRANTY. ALL WARRANTIES, EXPRESS OR IMPLIED ARE HEREBY
++ * DISCLAIMED. This includes but is not limited to warranties of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
++ *
++ */
++
++#ifndef _LINUX_NDIS_H
++#define _LINUX_NDIS_H
++
++
++#define NDIS_STATUS_MULTICAST_FULL 0xC0010009
++#define NDIS_STATUS_MULTICAST_EXISTS 0xC001000A
++#define NDIS_STATUS_MULTICAST_NOT_FOUND 0xC001000B
++
++enum NDIS_DEVICE_POWER_STATE {
++ NdisDeviceStateUnspecified = 0,
++ NdisDeviceStateD0,
++ NdisDeviceStateD1,
++ NdisDeviceStateD2,
++ NdisDeviceStateD3,
++ NdisDeviceStateMaximum
++};
++
++struct NDIS_PM_WAKE_UP_CAPABILITIES {
++ enum NDIS_DEVICE_POWER_STATE MinMagicPacketWakeUp;
++ enum NDIS_DEVICE_POWER_STATE MinPatternWakeUp;
++ enum NDIS_DEVICE_POWER_STATE MinLinkChangeWakeUp;
++};
++
++/* NDIS_PNP_CAPABILITIES.Flags constants */
++#define NDIS_DEVICE_WAKE_UP_ENABLE 0x00000001
++#define NDIS_DEVICE_WAKE_ON_PATTERN_MATCH_ENABLE 0x00000002
++#define NDIS_DEVICE_WAKE_ON_MAGIC_PACKET_ENABLE 0x00000004
++
++struct NDIS_PNP_CAPABILITIES {
++ u32 Flags;
++ struct NDIS_PM_WAKE_UP_CAPABILITIES WakeUpCapabilities;
++};
++
++struct NDIS_PM_PACKET_PATTERN {
++ u32 Priority;
++ u32 Reserved;
++ u32 MaskSize;
++ u32 PatternOffset;
++ u32 PatternSize;
++ u32 PatternFlags;
++};
++
++
++/* Required Object IDs (OIDs) */
++#define OID_GEN_SUPPORTED_LIST 0x00010101
++#define OID_GEN_HARDWARE_STATUS 0x00010102
++#define OID_GEN_MEDIA_SUPPORTED 0x00010103
++#define OID_GEN_MEDIA_IN_USE 0x00010104
++#define OID_GEN_MAXIMUM_LOOKAHEAD 0x00010105
++#define OID_GEN_MAXIMUM_FRAME_SIZE 0x00010106
++#define OID_GEN_LINK_SPEED 0x00010107
++#define OID_GEN_TRANSMIT_BUFFER_SPACE 0x00010108
++#define OID_GEN_RECEIVE_BUFFER_SPACE 0x00010109
++#define OID_GEN_TRANSMIT_BLOCK_SIZE 0x0001010A
++#define OID_GEN_RECEIVE_BLOCK_SIZE 0x0001010B
++#define OID_GEN_VENDOR_ID 0x0001010C
++#define OID_GEN_VENDOR_DESCRIPTION 0x0001010D
++#define OID_GEN_CURRENT_PACKET_FILTER 0x0001010E
++#define OID_GEN_CURRENT_LOOKAHEAD 0x0001010F
++#define OID_GEN_DRIVER_VERSION 0x00010110
++#define OID_GEN_MAXIMUM_TOTAL_SIZE 0x00010111
++#define OID_GEN_PROTOCOL_OPTIONS 0x00010112
++#define OID_GEN_MAC_OPTIONS 0x00010113
++#define OID_GEN_MEDIA_CONNECT_STATUS 0x00010114
++#define OID_GEN_MAXIMUM_SEND_PACKETS 0x00010115
++#define OID_GEN_VENDOR_DRIVER_VERSION 0x00010116
++#define OID_GEN_SUPPORTED_GUIDS 0x00010117
++#define OID_GEN_NETWORK_LAYER_ADDRESSES 0x00010118
++#define OID_GEN_TRANSPORT_HEADER_OFFSET 0x00010119
++#define OID_GEN_MACHINE_NAME 0x0001021A
++#define OID_GEN_RNDIS_CONFIG_PARAMETER 0x0001021B
++#define OID_GEN_VLAN_ID 0x0001021C
++
++/* Optional OIDs */
++#define OID_GEN_MEDIA_CAPABILITIES 0x00010201
++#define OID_GEN_PHYSICAL_MEDIUM 0x00010202
++
++/* Required statistics OIDs */
++#define OID_GEN_XMIT_OK 0x00020101
++#define OID_GEN_RCV_OK 0x00020102
++#define OID_GEN_XMIT_ERROR 0x00020103
++#define OID_GEN_RCV_ERROR 0x00020104
++#define OID_GEN_RCV_NO_BUFFER 0x00020105
++
++/* Optional statistics OIDs */
++#define OID_GEN_DIRECTED_BYTES_XMIT 0x00020201
++#define OID_GEN_DIRECTED_FRAMES_XMIT 0x00020202
++#define OID_GEN_MULTICAST_BYTES_XMIT 0x00020203
++#define OID_GEN_MULTICAST_FRAMES_XMIT 0x00020204
++#define OID_GEN_BROADCAST_BYTES_XMIT 0x00020205
++#define OID_GEN_BROADCAST_FRAMES_XMIT 0x00020206
++#define OID_GEN_DIRECTED_BYTES_RCV 0x00020207
++#define OID_GEN_DIRECTED_FRAMES_RCV 0x00020208
++#define OID_GEN_MULTICAST_BYTES_RCV 0x00020209
++#define OID_GEN_MULTICAST_FRAMES_RCV 0x0002020A
++#define OID_GEN_BROADCAST_BYTES_RCV 0x0002020B
++#define OID_GEN_BROADCAST_FRAMES_RCV 0x0002020C
++#define OID_GEN_RCV_CRC_ERROR 0x0002020D
++#define OID_GEN_TRANSMIT_QUEUE_LENGTH 0x0002020E
++#define OID_GEN_GET_TIME_CAPS 0x0002020F
++#define OID_GEN_GET_NETCARD_TIME 0x00020210
++#define OID_GEN_NETCARD_LOAD 0x00020211
++#define OID_GEN_DEVICE_PROFILE 0x00020212
++#define OID_GEN_INIT_TIME_MS 0x00020213
++#define OID_GEN_RESET_COUNTS 0x00020214
++#define OID_GEN_MEDIA_SENSE_COUNTS 0x00020215
++#define OID_GEN_FRIENDLY_NAME 0x00020216
++#define OID_GEN_MINIPORT_INFO 0x00020217
++#define OID_GEN_RESET_VERIFY_PARAMETERS 0x00020218
++
++/* IEEE 802.3 (Ethernet) OIDs */
++#define NDIS_802_3_MAC_OPTION_PRIORITY 0x00000001
++
++#define OID_802_3_PERMANENT_ADDRESS 0x01010101
++#define OID_802_3_CURRENT_ADDRESS 0x01010102
++#define OID_802_3_MULTICAST_LIST 0x01010103
++#define OID_802_3_MAXIMUM_LIST_SIZE 0x01010104
++#define OID_802_3_MAC_OPTIONS 0x01010105
++#define OID_802_3_RCV_ERROR_ALIGNMENT 0x01020101
++#define OID_802_3_XMIT_ONE_COLLISION 0x01020102
++#define OID_802_3_XMIT_MORE_COLLISIONS 0x01020103
++#define OID_802_3_XMIT_DEFERRED 0x01020201
++#define OID_802_3_XMIT_MAX_COLLISIONS 0x01020202
++#define OID_802_3_RCV_OVERRUN 0x01020203
++#define OID_802_3_XMIT_UNDERRUN 0x01020204
++#define OID_802_3_XMIT_HEARTBEAT_FAILURE 0x01020205
++#define OID_802_3_XMIT_TIMES_CRS_LOST 0x01020206
++#define OID_802_3_XMIT_LATE_COLLISIONS 0x01020207
++
++/* OID_GEN_MINIPORT_INFO constants */
++#define NDIS_MINIPORT_BUS_MASTER 0x00000001
++#define NDIS_MINIPORT_WDM_DRIVER 0x00000002
++#define NDIS_MINIPORT_SG_LIST 0x00000004
++#define NDIS_MINIPORT_SUPPORTS_MEDIA_QUERY 0x00000008
++#define NDIS_MINIPORT_INDICATES_PACKETS 0x00000010
++#define NDIS_MINIPORT_IGNORE_PACKET_QUEUE 0x00000020
++#define NDIS_MINIPORT_IGNORE_REQUEST_QUEUE 0x00000040
++#define NDIS_MINIPORT_IGNORE_TOKEN_RING_ERRORS 0x00000080
++#define NDIS_MINIPORT_INTERMEDIATE_DRIVER 0x00000100
++#define NDIS_MINIPORT_IS_NDIS_5 0x00000200
++#define NDIS_MINIPORT_IS_CO 0x00000400
++#define NDIS_MINIPORT_DESERIALIZE 0x00000800
++#define NDIS_MINIPORT_REQUIRES_MEDIA_POLLING 0x00001000
++#define NDIS_MINIPORT_SUPPORTS_MEDIA_SENSE 0x00002000
++#define NDIS_MINIPORT_NETBOOT_CARD 0x00004000
++#define NDIS_MINIPORT_PM_SUPPORTED 0x00008000
++#define NDIS_MINIPORT_SUPPORTS_MAC_ADDRESS_OVERWRITE 0x00010000
++#define NDIS_MINIPORT_USES_SAFE_BUFFER_APIS 0x00020000
++#define NDIS_MINIPORT_HIDDEN 0x00040000
++#define NDIS_MINIPORT_SWENUM 0x00080000
++#define NDIS_MINIPORT_SURPRISE_REMOVE_OK 0x00100000
++#define NDIS_MINIPORT_NO_HALT_ON_SUSPEND 0x00200000
++#define NDIS_MINIPORT_HARDWARE_DEVICE 0x00400000
++#define NDIS_MINIPORT_SUPPORTS_CANCEL_SEND_PACKETS 0x00800000
++#define NDIS_MINIPORT_64BITS_DMA 0x01000000
++
++#define NDIS_MEDIUM_802_3 0x00000000
++#define NDIS_MEDIUM_802_5 0x00000001
++#define NDIS_MEDIUM_FDDI 0x00000002
++#define NDIS_MEDIUM_WAN 0x00000003
++#define NDIS_MEDIUM_LOCAL_TALK 0x00000004
++#define NDIS_MEDIUM_DIX 0x00000005
++#define NDIS_MEDIUM_ARCENT_RAW 0x00000006
++#define NDIS_MEDIUM_ARCENT_878_2 0x00000007
++#define NDIS_MEDIUM_ATM 0x00000008
++#define NDIS_MEDIUM_WIRELESS_LAN 0x00000009
++#define NDIS_MEDIUM_IRDA 0x0000000A
++#define NDIS_MEDIUM_BPC 0x0000000B
++#define NDIS_MEDIUM_CO_WAN 0x0000000C
++#define NDIS_MEDIUM_1394 0x0000000D
++
++#define NDIS_PACKET_TYPE_DIRECTED 0x00000001
++#define NDIS_PACKET_TYPE_MULTICAST 0x00000002
++#define NDIS_PACKET_TYPE_ALL_MULTICAST 0x00000004
++#define NDIS_PACKET_TYPE_BROADCAST 0x00000008
++#define NDIS_PACKET_TYPE_SOURCE_ROUTING 0x00000010
++#define NDIS_PACKET_TYPE_PROMISCUOUS 0x00000020
++#define NDIS_PACKET_TYPE_SMT 0x00000040
++#define NDIS_PACKET_TYPE_ALL_LOCAL 0x00000080
++#define NDIS_PACKET_TYPE_GROUP 0x00000100
++#define NDIS_PACKET_TYPE_ALL_FUNCTIONAL 0x00000200
++#define NDIS_PACKET_TYPE_FUNCTIONAL 0x00000400
++#define NDIS_PACKET_TYPE_MAC_FRAME 0x00000800
++
++#define NDIS_MEDIA_STATE_CONNECTED 0x00000000
++#define NDIS_MEDIA_STATE_DISCONNECTED 0x00000001
++
++#define NDIS_MAC_OPTION_COPY_LOOKAHEAD_DATA 0x00000001
++#define NDIS_MAC_OPTION_RECEIVE_SERIALIZED 0x00000002
++#define NDIS_MAC_OPTION_TRANSFERS_NOT_PEND 0x00000004
++#define NDIS_MAC_OPTION_NO_LOOPBACK 0x00000008
++#define NDIS_MAC_OPTION_FULL_DUPLEX 0x00000010
++#define NDIS_MAC_OPTION_EOTX_INDICATION 0x00000020
++#define NDIS_MAC_OPTION_8021P_PRIORITY 0x00000040
++#define NDIS_MAC_OPTION_RESERVED 0x80000000
++
++#endif /* _LINUX_NDIS_H */
+diff -x '*~' -x '.*' -r -N -u /tmp/kernel/drivers/usb/gadget/net2280.c kernel/drivers/usb/gadget/net2280.c
+--- /tmp/kernel/drivers/usb/gadget/net2280.c 1970-01-01 01:00:00.000000000 +0100
++++ kernel/drivers/usb/gadget/net2280.c 2005-04-22 17:53:19.478532352 +0200
+@@ -0,0 +1,2918 @@
++/*
++ * Driver for the NetChip 2280 USB device controller.
++ * Specs and errata are available from <http://www.netchip.com>.
++ *
++ * NetChip Technology Inc. supported the development of this driver.
++ *
++ *
++ * CODE STATUS HIGHLIGHTS
++ *
++ * This driver should work well with most "gadget" drivers, including
++ * the File Storage, Serial, and Ethernet/RNDIS gadget drivers
++ * as well as Gadget Zero and Gadgetfs.
++ *
++ * DMA is enabled by default. Drivers using transfer queues might use
++ * DMA chaining to remove IRQ latencies between transfers. (Except when
++ * short OUT transfers happen.) Drivers can use the req->no_interrupt
++ * hint to completely eliminate some IRQs, if a later IRQ is guaranteed
++ * and DMA chaining is enabled.
++ *
++ * Note that almost all the errata workarounds here are only needed for
++ * rev1 chips. Rev1a silicon (0110) fixes almost all of them.
++ */
++
++/*
++ * Copyright (C) 2003 David Brownell
++ * Copyright (C) 2003 NetChip Technologies
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#undef DEBUG /* messages on error and most fault paths */
++#undef VERBOSE /* extra debug messages (success too) */
++
++#include <linux/config.h>
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/kernel.h>
++#include <linux/delay.h>
++#include <linux/ioport.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/smp_lock.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/timer.h>
++#include <linux/list.h>
++#include <linux/interrupt.h>
++
++#include <linux/usb_ch9.h>
++#include <linux/usb_gadget.h>
++
++#include <asm/byteorder.h>
++#include <asm/io.h>
++#include <asm/irq.h>
++#include <asm/system.h>
++#include <asm/unaligned.h>
++
++
++#define DRIVER_DESC "NetChip 2280 USB Peripheral Controller"
++#define DRIVER_VERSION "2004 Jan 14"
++
++#define DMA_ADDR_INVALID (~(dma_addr_t)0)
++#define EP_DONTUSE 13 /* nonzero */
++
++#define USE_RDK_LEDS /* GPIO pins control three LEDs */
++
++
++static const char driver_name [] = "net2280";
++static const char driver_desc [] = DRIVER_DESC;
++
++static const char ep0name [] = "ep0";
++static const char *ep_name [] = {
++ ep0name,
++ "ep-a", "ep-b", "ep-c", "ep-d",
++ "ep-e", "ep-f",
++};
++
++/* use_dma -- general goodness, fewer interrupts, less cpu load (vs PIO)
++ * use_dma_chaining -- dma descriptor queueing gives even more irq reduction
++ *
++ * The net2280 DMA engines are not tightly integrated with their FIFOs;
++ * not all cases are (yet) handled well in this driver or the silicon.
++ * Some gadget drivers work better with the dma support here than others.
++ * These two parameters let you use PIO or more aggressive DMA.
++ */
++static int use_dma = 1;
++static int use_dma_chaining = 0;
++
++MODULE_PARM (use_dma, "i");
++MODULE_PARM_DESC (use_dma, "true to use dma controllers");
++
++MODULE_PARM (use_dma_chaining, "i");
++MODULE_PARM_DESC (use_dma_chaining, "true to use dma descriptor queues");
++
++
++/* mode 0 == ep-{a,b,c,d} 1K fifo each
++ * mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable
++ * mode 2 == ep-a 2K fifo, ep-{b,c} 1K each, ep-d unavailable
++ */
++static ushort fifo_mode = 0;
++
++MODULE_PARM (fifo_mode, "h");
++MODULE_PARM_DESC (fifo_mode, "net2280 fifo mode");
++
++
++#define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out")
++
++#if defined(USE_SYSFS_DEBUG_FILES) || defined (DEBUG)
++static char *type_string (u8 bmAttributes)
++{
++ switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
++ case USB_ENDPOINT_XFER_BULK: return "bulk";
++ case USB_ENDPOINT_XFER_ISOC: return "iso";
++ case USB_ENDPOINT_XFER_INT: return "intr";
++ };
++ return "control";
++}
++#endif
++
++#include "net2280.h"
++
++#define valid_bit __constant_cpu_to_le32 (1 << VALID_BIT)
++#define dma_done_ie __constant_cpu_to_le32 (1 << DMA_DONE_INTERRUPT_ENABLE)
++
++/*-------------------------------------------------------------------------*/
++
++static int
++net2280_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
++{
++ struct net2280 *dev;
++ struct net2280_ep *ep;
++ u32 max, tmp;
++ unsigned long flags;
++
++ ep = container_of (_ep, struct net2280_ep, ep);
++ if (!_ep || !desc || ep->desc || _ep->name == ep0name
++ || desc->bDescriptorType != USB_DT_ENDPOINT)
++ return -EINVAL;
++ dev = ep->dev;
++ if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
++ return -ESHUTDOWN;
++
++ /* erratum 0119 workaround ties up an endpoint number */
++ if ((desc->bEndpointAddress & 0x0f) == EP_DONTUSE)
++ return -EDOM;
++
++ /* sanity check ep-e/ep-f since their fifos are small */
++ max = le16_to_cpu (desc->wMaxPacketSize) & 0x1fff;
++ if (ep->num > 4 && max > 64)
++ return -ERANGE;
++
++ spin_lock_irqsave (&dev->lock, flags);
++ _ep->maxpacket = max & 0x7ff;
++ ep->desc = desc;
++
++ /* ep_reset() has already been called */
++ ep->stopped = 0;
++ ep->out_overflow = 0;
++
++ /* set speed-dependent max packet; may kick in high bandwidth */
++ set_idx_reg (dev->regs, REG_EP_MAXPKT (dev, ep->num), max);
++
++ /* FIFO lines can't go to different packets. PIO is ok, so
++ * use it instead of troublesome (non-bulk) multi-packet DMA.
++ */
++ if (ep->dma && (max % 4) != 0 && use_dma_chaining) {
++ DEBUG (ep->dev, "%s, no dma for maxpacket %d\n",
++ ep->ep.name, ep->ep.maxpacket);
++ ep->dma = NULL;
++ }
++
++ /* set type, direction, address; reset fifo counters */
++ writel ((1 << FIFO_FLUSH), &ep->regs->ep_stat);
++ tmp = (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
++ if (tmp == USB_ENDPOINT_XFER_INT) {
++ /* erratum 0105 workaround prevents hs NYET */
++ if (dev->chiprev == 0100
++ && dev->gadget.speed == USB_SPEED_HIGH
++ && !(desc->bEndpointAddress & USB_DIR_IN))
++ writel ((1 << CLEAR_NAK_OUT_PACKETS_MODE),
++ &ep->regs->ep_rsp);
++ } else if (tmp == USB_ENDPOINT_XFER_BULK) {
++ /* catch some particularly blatant driver bugs */
++ if ((dev->gadget.speed == USB_SPEED_HIGH
++ && max != 512)
++ || (dev->gadget.speed == USB_SPEED_FULL
++ && max > 64)) {
++ spin_unlock_irqrestore (&dev->lock, flags);
++ return -ERANGE;
++ }
++ }
++ ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC) ? 1 : 0;
++ tmp <<= ENDPOINT_TYPE;
++ tmp |= desc->bEndpointAddress;
++ tmp |= (4 << ENDPOINT_BYTE_COUNT); /* default full fifo lines */
++ tmp |= 1 << ENDPOINT_ENABLE;
++ wmb ();
++
++ /* for OUT transfers, block the rx fifo until a read is posted */
++ ep->is_in = (tmp & USB_DIR_IN) != 0;
++ if (!ep->is_in)
++ writel ((1 << SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
++
++ writel (tmp, &ep->regs->ep_cfg);
++
++ /* enable irqs */
++ if (!ep->dma) { /* pio, per-packet */
++ tmp = (1 << ep->num) | readl (&dev->regs->pciirqenb0);
++ writel (tmp, &dev->regs->pciirqenb0);
++
++ tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
++ | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
++ | readl (&ep->regs->ep_irqenb);
++ writel (tmp, &ep->regs->ep_irqenb);
++ } else { /* dma, per-request */
++ tmp = (1 << (8 + ep->num)); /* completion */
++ tmp |= readl (&dev->regs->pciirqenb1);
++ writel (tmp, &dev->regs->pciirqenb1);
++
++ /* for short OUT transfers, dma completions can't
++ * advance the queue; do it pio-style, by hand.
++ * NOTE erratum 0112 workaround #2
++ */
++ if ((desc->bEndpointAddress & USB_DIR_IN) == 0) {
++ tmp = (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE);
++ writel (tmp, &ep->regs->ep_irqenb);
++
++ tmp = (1 << ep->num) | readl (&dev->regs->pciirqenb0);
++ writel (tmp, &dev->regs->pciirqenb0);
++ }
++ }
++
++ tmp = desc->bEndpointAddress;
++ DEBUG (dev, "enabled %s (ep%d%s-%s) %s max %04x\n",
++ _ep->name, tmp & 0x0f, DIR_STRING (tmp),
++ type_string (desc->bmAttributes),
++ ep->dma ? "dma" : "pio", max);
++
++ /* pci writes may still be posted */
++ spin_unlock_irqrestore (&dev->lock, flags);
++ return 0;
++}
++
++static int handshake (u32 *ptr, u32 mask, u32 done, int usec)
++{
++ u32 result;
++
++ do {
++ result = readl (ptr);
++ if (result == ~(u32)0) /* "device unplugged" */
++ return -ENODEV;
++ result &= mask;
++ if (result == done)
++ return 0;
++ udelay (1);
++ usec--;
++ } while (usec > 0);
++ return -ETIMEDOUT;
++}
++
++static struct usb_ep_ops net2280_ep_ops;
++
++static void ep_reset (struct net2280_regs *regs, struct net2280_ep *ep)
++{
++ u32 tmp;
++
++ ep->desc = NULL;
++ INIT_LIST_HEAD (&ep->queue);
++
++ ep->ep.maxpacket = ~0;
++ ep->ep.ops = &net2280_ep_ops;
++
++ /* disable the dma, irqs, endpoint... */
++ if (ep->dma) {
++ writel (0, &ep->dma->dmactl);
++ writel ( (1 << DMA_SCATTER_GATHER_DONE_INTERRUPT)
++ | (1 << DMA_TRANSACTION_DONE_INTERRUPT)
++ | (1 << DMA_ABORT)
++ , &ep->dma->dmastat);
++
++ tmp = readl (&regs->pciirqenb0);
++ tmp &= ~(1 << ep->num);
++ writel (tmp, &regs->pciirqenb0);
++ } else {
++ tmp = readl (&regs->pciirqenb1);
++ tmp &= ~(1 << (8 + ep->num)); /* completion */
++ writel (tmp, &regs->pciirqenb1);
++ }
++ writel (0, &ep->regs->ep_irqenb);
++
++ /* init to our chosen defaults, notably so that we NAK OUT
++ * packets until the driver queues a read (+note erratum 0112)
++ */
++ writel ( (1 << SET_NAK_OUT_PACKETS_MODE)
++ | (1 << SET_NAK_OUT_PACKETS)
++ | (1 << CLEAR_EP_HIDE_STATUS_PHASE)
++ | (1 << CLEAR_INTERRUPT_MODE)
++ | (1 << CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE)
++ | (1 << CLEAR_ENDPOINT_TOGGLE)
++ | (1 << CLEAR_ENDPOINT_HALT)
++ , &ep->regs->ep_rsp);
++
++ /* scrub most status bits, and flush any fifo state */
++ writel ( (1 << TIMEOUT)
++ | (1 << USB_STALL_SENT)
++ | (1 << USB_IN_NAK_SENT)
++ | (1 << USB_IN_ACK_RCVD)
++ | (1 << USB_OUT_PING_NAK_SENT)
++ | (1 << USB_OUT_ACK_SENT)
++ | (1 << FIFO_OVERFLOW)
++ | (1 << FIFO_UNDERFLOW)
++ | (1 << FIFO_FLUSH)
++ | (1 << SHORT_PACKET_OUT_DONE_INTERRUPT)
++ | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)
++ | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
++ | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
++ | (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
++ | (1 << DATA_IN_TOKEN_INTERRUPT)
++ , &ep->regs->ep_stat);
++
++ /* fifo size is handled separately */
++}
++
++static void nuke (struct net2280_ep *);
++
++static int net2280_disable (struct usb_ep *_ep)
++{
++ struct net2280_ep *ep;
++ unsigned long flags;
++
++ ep = container_of (_ep, struct net2280_ep, ep);
++ if (!_ep || !ep->desc || _ep->name == ep0name)
++ return -EINVAL;
++
++ spin_lock_irqsave (&ep->dev->lock, flags);
++ nuke (ep);
++ ep_reset (ep->dev->regs, ep);
++
++ VDEBUG (ep->dev, "disabled %s %s\n",
++ ep->dma ? "dma" : "pio", _ep->name);
++
++ /* synch memory views with the device */
++ (void) readl (&ep->regs->ep_cfg);
++
++ if (use_dma && !ep->dma && ep->num >= 1 && ep->num <= 4)
++ ep->dma = &ep->dev->dma [ep->num - 1];
++
++ spin_unlock_irqrestore (&ep->dev->lock, flags);
++ return 0;
++}
++
++/*-------------------------------------------------------------------------*/
++
++static struct usb_request *
++net2280_alloc_request (struct usb_ep *_ep, int gfp_flags)
++{
++ struct net2280_ep *ep;
++ struct net2280_request *req;
++
++ if (!_ep)
++ return NULL;
++ ep = container_of (_ep, struct net2280_ep, ep);
++
++ req = kmalloc (sizeof *req, gfp_flags);
++ if (!req)
++ return NULL;
++
++ memset (req, 0, sizeof *req);
++ req->req.dma = DMA_ADDR_INVALID;
++ INIT_LIST_HEAD (&req->queue);
++
++ /* this dma descriptor may be swapped with the previous dummy */
++ if (ep->dma) {
++ struct net2280_dma *td;
++
++ td = pci_pool_alloc (ep->dev->requests, gfp_flags,
++ &req->td_dma);
++ if (!td) {
++ kfree (req);
++ return NULL;
++ }
++ td->dmacount = 0; /* not VALID */
++ td->dmaaddr = __constant_cpu_to_le32 (DMA_ADDR_INVALID);
++ td->dmadesc = td->dmaaddr;
++ req->td = td;
++ }
++ return &req->req;
++}
++
++static void
++net2280_free_request (struct usb_ep *_ep, struct usb_request *_req)
++{
++ struct net2280_ep *ep;
++ struct net2280_request *req;
++
++ ep = container_of (_ep, struct net2280_ep, ep);
++ if (!_ep || !_req)
++ return;
++
++ req = container_of (_req, struct net2280_request, req);
++ WARN_ON (!list_empty (&req->queue));
++ if (req->td)
++ pci_pool_free (ep->dev->requests, req->td, req->td_dma);
++ kfree (req);
++}
++
++/*-------------------------------------------------------------------------*/
++
++#undef USE_KMALLOC
++
++/* many common platforms have dma-coherent caches, which means that it's
++ * safe to use kmalloc() memory for all i/o buffers without using any
++ * cache flushing calls. (unless you're trying to share cache lines
++ * between dma and non-dma activities, which is a slow idea in any case.)
++ *
++ * other platforms need more care, with 2.5 having a moderately general
++ * solution (which falls down for allocations smaller than one page)
++ * that improves significantly on the 2.4 PCI allocators by removing
++ * the restriction that memory never be freed in_interrupt().
++ */
++#if defined(CONFIG_X86)
++#define USE_KMALLOC
++
++#elif defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
++#define USE_KMALLOC
++
++#elif defined(CONFIG_MIPS) && !defined(CONFIG_NONCOHERENT_IO)
++#define USE_KMALLOC
++
++/* FIXME there are other cases, including an x86-64 one ... */
++#endif
++
++/* allocating buffers this way eliminates dma mapping overhead, which
++ * on some platforms will mean eliminating a per-io buffer copy. with
++ * some kinds of system caches, further tweaks may still be needed.
++ */
++static void *
++net2280_alloc_buffer (
++ struct usb_ep *_ep,
++ unsigned bytes,
++ dma_addr_t *dma,
++ int gfp_flags
++)
++{
++ void *retval;
++ struct net2280_ep *ep;
++
++ ep = container_of (_ep, struct net2280_ep, ep);
++ if (!_ep)
++ return NULL;
++ *dma = DMA_ADDR_INVALID;
++
++#if defined(USE_KMALLOC)
++ retval = kmalloc(bytes, gfp_flags);
++ if (retval)
++ *dma = virt_to_phys(retval);
++#else
++ if (ep->dma) {
++ /* one problem with this call is that it wastes memory on
++ * typical 1/N page allocations: it allocates 1..N pages.
++ * another is that it always uses GFP_ATOMIC.
++ */
++#warning Using pci_alloc_consistent even with buffers smaller than a page.
++ retval = pci_alloc_consistent(ep->dev->pdev, bytes, dma);
++ } else
++ retval = kmalloc(bytes, gfp_flags);
++#endif
++ return retval;
++}
++
++static void
++net2280_free_buffer (
++ struct usb_ep *_ep,
++ void *buf,
++ dma_addr_t dma,
++ unsigned bytes
++) {
++ /* free memory into the right allocator */
++#ifndef USE_KMALLOC
++ if (dma != DMA_ADDR_INVALID) {
++ struct net2280_ep *ep;
++
++ ep = container_of(_ep, struct net2280_ep, ep);
++ if (!_ep)
++ return;
++ /* one problem with this call is that some platforms
++ * don't allow it to be used in_irq().
++ */
++ pci_free_consistent(ep->dev->pdev, bytes, buf, dma);
++ } else
++#endif
++ kfree (buf);
++}
++
++/*-------------------------------------------------------------------------*/
++
++/* load a packet into the fifo we use for usb IN transfers.
++ * works for all endpoints.
++ *
++ * NOTE: pio with ep-a..ep-d could stuff multiple packets into the fifo
++ * at a time, but this code is simpler because it knows it only writes
++ * one packet. ep-a..ep-d should use dma instead.
++ */
++static void
++write_fifo (struct net2280_ep *ep, struct usb_request *req)
++{
++ struct net2280_ep_regs *regs = ep->regs;
++ u8 *buf;
++ u32 tmp;
++ unsigned count, total;
++
++ /* INVARIANT: fifo is currently empty. (testable) */
++
++ if (req) {
++ buf = req->buf + req->actual;
++ prefetch (buf);
++ total = req->length - req->actual;
++ } else {
++ total = 0;
++ buf = NULL;
++ }
++
++ /* write just one packet at a time */
++ count = ep->ep.maxpacket;
++ if (count > total) /* min() cannot be used on a bitfield */
++ count = total;
++
++ VDEBUG (ep->dev, "write %s fifo (IN) %d bytes%s req %p\n",
++ ep->ep.name, count,
++ (count != ep->ep.maxpacket) ? " (short)" : "",
++ req);
++ while (count >= 4) {
++ /* NOTE be careful if you try to align these. fifo lines
++ * should normally be full (4 bytes) and successive partial
++ * lines are ok only in certain cases.
++ */
++ tmp = get_unaligned ((u32 *)buf);
++ cpu_to_le32s (&tmp);
++ writel (tmp, &regs->ep_data);
++ buf += 4;
++ count -= 4;
++ }
++
++ /* last fifo entry is "short" unless we wrote a full packet.
++ * also explicitly validate last word in (periodic) transfers
++ * when maxpacket is not a multiple of 4 bytes.
++ */
++ if (count || total < ep->ep.maxpacket) {
++ tmp = count ? get_unaligned ((u32 *)buf) : count;
++ cpu_to_le32s (&tmp);
++ set_fifo_bytecount (ep, count & 0x03);
++ writel (tmp, &regs->ep_data);
++ }
++
++ /* pci writes may still be posted */
++}
++
++/* work around erratum 0106: PCI and USB race over the OUT fifo.
++ * caller guarantees chiprev 0100, out endpoint is NAKing, and
++ * there's no real data in the fifo.
++ *
++ * NOTE: also used in cases where that erratum doesn't apply:
++ * where the host wrote "too much" data to us.
++ */
++static void out_flush (struct net2280_ep *ep)
++{
++ u32 *statp, tmp;
++
++ ASSERT_OUT_NAKING (ep);
++
++ statp = &ep->regs->ep_stat;
++ writel ( (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
++ | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
++ , statp);
++ writel ((1 << FIFO_FLUSH), statp);
++ mb ();
++ tmp = readl (statp);
++ if (tmp & (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
++ /* high speed did bulk NYET; fifo isn't filling */
++ && ep->dev->gadget.speed == USB_SPEED_FULL) {
++ unsigned usec;
++
++ usec = 50; /* 64 byte bulk/interrupt */
++ handshake (statp, (1 << USB_OUT_PING_NAK_SENT),
++ (1 << USB_OUT_PING_NAK_SENT), usec);
++ /* NAK done; now CLEAR_NAK_OUT_PACKETS is safe */
++ }
++}
++
++/* unload packet(s) from the fifo we use for usb OUT transfers.
++ * returns true iff the request completed, because of short packet
++ * or the request buffer having filled with full packets.
++ *
++ * for ep-a..ep-d this will read multiple packets out when they
++ * have been accepted.
++ */
++static int
++read_fifo (struct net2280_ep *ep, struct net2280_request *req)
++{
++ struct net2280_ep_regs *regs = ep->regs;
++ u8 *buf = req->req.buf + req->req.actual;
++ unsigned count, tmp, is_short;
++ unsigned cleanup = 0, prevent = 0;
++
++ /* erratum 0106 ... packets coming in during fifo reads might
++ * be incompletely rejected. not all cases have workarounds.
++ */
++ if (ep->dev->chiprev == 0x0100
++ && ep->dev->gadget.speed == USB_SPEED_FULL) {
++ udelay (1);
++ tmp = readl (&ep->regs->ep_stat);
++ if ((tmp & (1 << NAK_OUT_PACKETS)))
++ cleanup = 1;
++ else if ((tmp & (1 << FIFO_FULL))) {
++ start_out_naking (ep);
++ prevent = 1;
++ }
++ /* else: hope we don't see the problem */
++ }
++
++ /* never overflow the rx buffer. the fifo reads packets until
++ * it sees a short one; we might not be ready for them all.
++ */
++ prefetchw (buf);
++ count = readl (&regs->ep_avail);
++ if (unlikely (count == 0)) {
++ udelay (1);
++ tmp = readl (&ep->regs->ep_stat);
++ count = readl (&regs->ep_avail);
++ /* handled that data already? */
++ if (count == 0 && (tmp & (1 << NAK_OUT_PACKETS)) == 0)
++ return 0;
++ }
++
++ tmp = req->req.length - req->req.actual;
++ if (count > tmp) {
++ /* as with DMA, data overflow gets flushed */
++ if ((tmp % ep->ep.maxpacket) != 0) {
++ ERROR (ep->dev,
++ "%s out fifo %d bytes, expected %d\n",
++ ep->ep.name, count, tmp);
++ req->req.status = -EOVERFLOW;
++ cleanup = 1;
++ /* NAK_OUT_PACKETS will be set, so flushing is safe;
++ * the next read will start with the next packet
++ */
++ } /* else it's a ZLP, no worries */
++ count = tmp;
++ }
++ req->req.actual += count;
++
++ is_short = (count == 0) || ((count % ep->ep.maxpacket) != 0);
++
++ VDEBUG (ep->dev, "read %s fifo (OUT) %d bytes%s%s%s req %p %d/%d\n",
++ ep->ep.name, count, is_short ? " (short)" : "",
++ cleanup ? " flush" : "", prevent ? " nak" : "",
++ req, req->req.actual, req->req.length);
++
++ while (count >= 4) {
++ tmp = readl (&regs->ep_data);
++ cpu_to_le32s (&tmp);
++ put_unaligned (tmp, (u32 *)buf);
++ buf += 4;
++ count -= 4;
++ }
++ if (count) {
++ tmp = readl (&regs->ep_data);
++ /* LE conversion is implicit here: */
++ do {
++ *buf++ = (u8) tmp;
++ tmp >>= 8;
++ } while (--count);
++ }
++ if (cleanup)
++ out_flush (ep);
++ if (prevent) {
++ writel ((1 << CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
++ (void) readl (&ep->regs->ep_rsp);
++ }
++
++ return is_short || ((req->req.actual == req->req.length)
++ && !req->req.zero);
++}
++
++/* fill out dma descriptor to match a given request */
++static void
++fill_dma_desc (struct net2280_ep *ep, struct net2280_request *req, int valid)
++{
++ struct net2280_dma *td = req->td;
++ u32 dmacount = req->req.length;
++
++ /* don't let DMA continue after a short OUT packet,
++ * so overruns can't affect the next transfer.
++ * in case of overruns on max-size packets, we can't
++ * stop the fifo from filling but we can flush it.
++ */
++ if (ep->is_in)
++ dmacount |= (1 << DMA_DIRECTION);
++ else if ((dmacount % ep->ep.maxpacket) != 0)
++ dmacount |= (1 << END_OF_CHAIN);
++
++ req->valid = valid;
++ if (valid)
++ dmacount |= (1 << VALID_BIT);
++ if (likely(!req->req.no_interrupt || !use_dma_chaining))
++ dmacount |= (1 << DMA_DONE_INTERRUPT_ENABLE);
++
++ /* td->dmadesc = previously set by caller */
++ td->dmaaddr = cpu_to_le32p (&req->req.dma);
++
++ /* 2280 may be polling VALID_BIT through ep->dma->dmadesc */
++ wmb ();
++ td->dmacount = cpu_to_le32p (&dmacount);
++}
++
++static const u32 dmactl_default =
++ (1 << DMA_SCATTER_GATHER_DONE_INTERRUPT)
++ | (1 << DMA_CLEAR_COUNT_ENABLE)
++ /* erratum 0116 workaround part 1 (use POLLING) */
++ | (POLL_100_USEC << DESCRIPTOR_POLLING_RATE)
++ | (1 << DMA_VALID_BIT_POLLING_ENABLE)
++ | (1 << DMA_VALID_BIT_ENABLE)
++ | (1 << DMA_SCATTER_GATHER_ENABLE)
++ /* erratum 0116 workaround part 2 (no AUTOSTART) */
++ | (1 << DMA_ENABLE);
++
++static inline void spin_stop_dma (struct net2280_dma_regs *dma)
++{
++ handshake (&dma->dmactl, (1 << DMA_ENABLE), 0, 50);
++}
++
++static inline void stop_dma (struct net2280_dma_regs *dma)
++{
++ writel (readl (&dma->dmactl) & ~(1 << DMA_ENABLE), &dma->dmactl);
++ spin_stop_dma (dma);
++}
++
++static void start_queue (struct net2280_ep *ep, u32 dmactl, u32 td_dma)
++{
++ struct net2280_dma_regs *dma = ep->dma;
++
++ writel ((1 << VALID_BIT) | (ep->is_in << DMA_DIRECTION),
++ &dma->dmacount);
++ writel (readl (&dma->dmastat), &dma->dmastat);
++
++ writel (td_dma, &dma->dmadesc);
++ writel (dmactl, &dma->dmactl);
++
++ /* erratum 0116 workaround part 3: pci arbiter away from net2280 */
++ (void) readl (&ep->dev->pci->pcimstctl);
++
++ writel ((1 << DMA_START), &dma->dmastat);
++
++ if (!ep->is_in)
++ stop_out_naking (ep);
++}
++
++static void start_dma (struct net2280_ep *ep, struct net2280_request *req)
++{
++ u32 tmp;
++ struct net2280_dma_regs *dma = ep->dma;
++
++ /* FIXME can't use DMA for ZLPs */
++
++ /* on this path we "know" there's no dma active (yet) */
++ WARN_ON (readl (&dma->dmactl) & (1 << DMA_ENABLE));
++ writel (0, &ep->dma->dmactl);
++
++ /* previous OUT packet might have been short */
++ if (!ep->is_in && ((tmp = readl (&ep->regs->ep_stat))
++ & (1 << NAK_OUT_PACKETS)) != 0) {
++ writel ((1 << SHORT_PACKET_TRANSFERRED_INTERRUPT),
++ &ep->regs->ep_stat);
++
++ tmp = readl (&ep->regs->ep_avail);
++ if (tmp) {
++ writel (readl (&dma->dmastat), &dma->dmastat);
++
++ /* transfer all/some fifo data */
++ writel (req->req.dma, &dma->dmaaddr);
++ tmp = min (tmp, req->req.length);
++
++ /* dma irq, faking scatterlist status */
++ req->td->dmacount = cpu_to_le32 (req->req.length - tmp);
++ writel ((1 << DMA_DONE_INTERRUPT_ENABLE)
++ | tmp, &dma->dmacount);
++ req->td->dmadesc = 0;
++ req->valid = 1;
++
++ writel ((1 << DMA_ENABLE), &dma->dmactl);
++ writel ((1 << DMA_START), &dma->dmastat);
++ return;
++ }
++ }
++
++ tmp = dmactl_default;
++
++ /* force packet boundaries between dma requests, but prevent the
++ * controller from automagically writing a last "short" packet
++ * (zero length) unless the driver explicitly said to do that.
++ */
++ if (ep->is_in) {
++ if (likely ((req->req.length % ep->ep.maxpacket) != 0
++ || req->req.zero)) {
++ tmp |= (1 << DMA_FIFO_VALIDATE);
++ ep->in_fifo_validate = 1;
++ } else
++ ep->in_fifo_validate = 0;
++ }
++
++ /* init req->td, pointing to the current dummy */
++ req->td->dmadesc = cpu_to_le32 (ep->td_dma);
++ fill_dma_desc (ep, req, 1);
++
++ if (!use_dma_chaining)
++ req->td->dmacount |= __constant_cpu_to_le32 (1 << END_OF_CHAIN);
++
++ start_queue (ep, tmp, req->td_dma);
++}
++
++static inline void
++queue_dma (struct net2280_ep *ep, struct net2280_request *req, int valid)
++{
++ struct net2280_dma *end;
++ dma_addr_t tmp;
++
++ /* swap new dummy for old, link; fill and maybe activate */
++ end = ep->dummy;
++ ep->dummy = req->td;
++ req->td = end;
++
++ tmp = ep->td_dma;
++ ep->td_dma = req->td_dma;
++ req->td_dma = tmp;
++
++ end->dmadesc = cpu_to_le32 (ep->td_dma);
++
++ fill_dma_desc (ep, req, valid);
++}
++
++static void
++done (struct net2280_ep *ep, struct net2280_request *req, int status)
++{
++ struct net2280 *dev;
++ unsigned stopped = ep->stopped;
++
++ list_del_init (&req->queue);
++
++ if (req->req.status == -EINPROGRESS)
++ req->req.status = status;
++ else
++ status = req->req.status;
++
++ dev = ep->dev;
++ if (req->mapped) {
++ pci_unmap_single (dev->pdev, req->req.dma, req->req.length,
++ ep->is_in ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
++ req->req.dma = DMA_ADDR_INVALID;
++ req->mapped = 0;
++ }
++
++ if (status && status != -ESHUTDOWN)
++ VDEBUG (dev, "complete %s req %p stat %d len %u/%u\n",
++ ep->ep.name, &req->req, status,
++ req->req.actual, req->req.length);
++
++ /* don't modify queue heads during completion callback */
++ ep->stopped = 1;
++ spin_unlock (&dev->lock);
++ req->req.complete (&ep->ep, &req->req);
++ spin_lock (&dev->lock);
++ ep->stopped = stopped;
++}
++
++/*-------------------------------------------------------------------------*/
++
++static int
++net2280_queue (struct usb_ep *_ep, struct usb_request *_req, int gfp_flags)
++{
++ struct net2280_request *req;
++ struct net2280_ep *ep;
++ struct net2280 *dev;
++ unsigned long flags;
++
++ /* we always require a cpu-view buffer, so that we can
++ * always use pio (as fallback or whatever).
++ */
++ req = container_of (_req, struct net2280_request, req);
++ if (!_req || !_req->complete || !_req->buf
++ || !list_empty (&req->queue))
++ return -EINVAL;
++ if (_req->length > (~0 & DMA_BYTE_COUNT_MASK))
++ return -EDOM;
++ ep = container_of (_ep, struct net2280_ep, ep);
++ if (!_ep || (!ep->desc && ep->num != 0))
++ return -EINVAL;
++ dev = ep->dev;
++ if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
++ return -ESHUTDOWN;
++
++ /* FIXME implement PIO fallback for ZLPs with DMA */
++ if (ep->dma && _req->length == 0)
++ return -EOPNOTSUPP;
++
++ /* set up dma mapping in case the caller didn't */
++ if (ep->dma && _req->dma == DMA_ADDR_INVALID) {
++ _req->dma = pci_map_single (dev->pdev, _req->buf, _req->length,
++ ep->is_in ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
++ req->mapped = 1;
++ }
++
++#if 0
++ VDEBUG (dev, "%s queue req %p, len %d buf %p\n",
++ _ep->name, _req, _req->length, _req->buf);
++#endif
++
++ spin_lock_irqsave (&dev->lock, flags);
++
++ _req->status = -EINPROGRESS;
++ _req->actual = 0;
++
++ /* kickstart this i/o queue? */
++ if (list_empty (&ep->queue) && !ep->stopped) {
++ /* use DMA if the endpoint supports it, else pio */
++ if (ep->dma)
++ start_dma (ep, req);
++ else {
++ /* maybe there's no control data, just status ack */
++ if (ep->num == 0 && _req->length == 0) {
++ allow_status (ep);
++ done (ep, req, 0);
++ VDEBUG (dev, "%s status ack\n", ep->ep.name);
++ goto done;
++ }
++
++ /* PIO ... stuff the fifo, or unblock it. */
++ if (ep->is_in)
++ write_fifo (ep, _req);
++ else if (list_empty (&ep->queue)) {
++ u32 s;
++
++ /* OUT FIFO might have packet(s) buffered */
++ s = readl (&ep->regs->ep_stat);
++ if ((s & (1 << FIFO_EMPTY)) == 0) {
++ /* note: _req->short_not_ok is
++ * ignored here since PIO _always_
++ * stops queue advance here, and
++ * _req->status doesn't change for
++ * short reads (only _req->actual)
++ */
++ if (read_fifo (ep, req)) {
++ done (ep, req, 0);
++ if (ep->num == 0)
++ allow_status (ep);
++ /* don't queue it */
++ req = NULL;
++ } else
++ s = readl (&ep->regs->ep_stat);
++ }
++
++ /* don't NAK, let the fifo fill */
++ if (req && (s & (1 << NAK_OUT_PACKETS)))
++ writel ((1 << CLEAR_NAK_OUT_PACKETS),
++ &ep->regs->ep_rsp);
++ }
++ }
++
++ } else if (ep->dma) {
++ int valid = 1;
++
++ if (ep->is_in) {
++ int expect;
++
++ /* preventing magic zlps is per-engine state, not
++ * per-transfer; irq logic must recover hiccups.
++ */
++ expect = likely (req->req.zero
++ || (req->req.length % ep->ep.maxpacket) != 0);
++ if (expect != ep->in_fifo_validate)
++ valid = 0;
++ }
++ queue_dma (ep, req, valid);
++
++ } /* else the irq handler advances the queue. */
++
++ if (req)
++ list_add_tail (&req->queue, &ep->queue);
++done:
++ spin_unlock_irqrestore (&dev->lock, flags);
++
++ /* pci writes may still be posted */
++ return 0;
++}
++
++static inline void
++dma_done (
++ struct net2280_ep *ep,
++ struct net2280_request *req,
++ u32 dmacount,
++ int status
++)
++{
++ req->req.actual = req->req.length - (DMA_BYTE_COUNT_MASK & dmacount);
++ done (ep, req, status);
++}
++
++static void restart_dma (struct net2280_ep *ep);
++
++static void scan_dma_completions (struct net2280_ep *ep)
++{
++ /* only look at descriptors that were "naturally" retired,
++ * so fifo and list head state won't matter
++ */
++ while (!list_empty (&ep->queue)) {
++ struct net2280_request *req;
++ u32 tmp;
++
++ req = list_entry (ep->queue.next,
++ struct net2280_request, queue);
++ if (!req->valid)
++ break;
++ rmb ();
++ tmp = le32_to_cpup (&req->td->dmacount);
++ if ((tmp & (1 << VALID_BIT)) != 0)
++ break;
++
++ /* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short"
++ * cases where DMA must be aborted; this code handles
++ * all non-abort DMA completions.
++ */
++ if (unlikely (req->td->dmadesc == 0)) {
++ /* paranoia */
++ tmp = readl (&ep->dma->dmacount);
++ if (tmp & DMA_BYTE_COUNT_MASK)
++ break;
++ /* single transfer mode */
++ dma_done (ep, req, tmp, 0);
++ break;
++ } else if (!ep->is_in
++ && (req->req.length % ep->ep.maxpacket) != 0) {
++ tmp = readl (&ep->regs->ep_stat);
++
++ /* AVOID TROUBLE HERE by not issuing short reads from
++ * your gadget driver. That helps avoids errata 0121,
++ * 0122, and 0124; not all cases trigger the warning.
++ */
++ if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
++ WARN (ep->dev, "%s lost packet sync!\n",
++ ep->ep.name);
++ req->req.status = -EOVERFLOW;
++ } else if ((tmp = readl (&ep->regs->ep_avail)) != 0) {
++ /* fifo gets flushed later */
++ ep->out_overflow = 1;
++ DEBUG (ep->dev, "%s dma, discard %d len %d\n",
++ ep->ep.name, tmp,
++ req->req.length);
++ req->req.status = -EOVERFLOW;
++ }
++ }
++ dma_done (ep, req, tmp, 0);
++ }
++}
++
++static void restart_dma (struct net2280_ep *ep)
++{
++ struct net2280_request *req;
++ u32 dmactl = dmactl_default;
++
++ if (ep->stopped)
++ return;
++ req = list_entry (ep->queue.next, struct net2280_request, queue);
++
++ if (!use_dma_chaining) {
++ start_dma (ep, req);
++ return;
++ }
++
++ /* the 2280 will be processing the queue unless queue hiccups after
++ * the previous transfer:
++ * IN: wanted automagic zlp, head doesn't (or vice versa)
++ * DMA_FIFO_VALIDATE doesn't init from dma descriptors.
++ * OUT: was "usb-short", we must restart.
++ */
++ if (ep->is_in && !req->valid) {
++ struct net2280_request *entry, *prev = NULL;
++ int reqmode, done = 0;
++
++ DEBUG (ep->dev, "%s dma hiccup td %p\n", ep->ep.name, req->td);
++ ep->in_fifo_validate = likely (req->req.zero
++ || (req->req.length % ep->ep.maxpacket) != 0);
++ if (ep->in_fifo_validate)
++ dmactl |= (1 << DMA_FIFO_VALIDATE);
++ list_for_each_entry (entry, &ep->queue, queue) {
++ u32 dmacount;
++
++ if (entry == req)
++ continue;
++ dmacount = entry->td->dmacount;
++ if (!done) {
++ reqmode = likely (entry->req.zero
++ || (entry->req.length
++ % ep->ep.maxpacket) != 0);
++ if (reqmode == ep->in_fifo_validate) {
++ entry->valid = 1;
++ dmacount |= valid_bit;
++ entry->td->dmacount = dmacount;
++ prev = entry;
++ continue;
++ } else {
++ /* force a hiccup */
++ prev->td->dmacount |= dma_done_ie;
++ done = 1;
++ }
++ }
++
++ /* walk the rest of the queue so unlinks behave */
++ entry->valid = 0;
++ dmacount &= ~valid_bit;
++ entry->td->dmacount = dmacount;
++ prev = entry;
++ }
++ }
++
++ writel (0, &ep->dma->dmactl);
++ start_queue (ep, dmactl, req->td_dma);
++}
++
++static void abort_dma (struct net2280_ep *ep)
++{
++ /* abort the current transfer */
++ if (likely (!list_empty (&ep->queue))) {
++ /* FIXME work around errata 0121, 0122, 0124 */
++ writel ((1 << DMA_ABORT), &ep->dma->dmastat);
++ spin_stop_dma (ep->dma);
++ } else
++ stop_dma (ep->dma);
++ scan_dma_completions (ep);
++}
++
++/* dequeue ALL requests */
++static void nuke (struct net2280_ep *ep)
++{
++ struct net2280_request *req;
++
++ /* called with spinlock held */
++ ep->stopped = 1;
++ if (ep->dma)
++ abort_dma (ep);
++ while (!list_empty (&ep->queue)) {
++ req = list_entry (ep->queue.next,
++ struct net2280_request,
++ queue);
++ done (ep, req, -ESHUTDOWN);
++ }
++}
++
++/* dequeue JUST ONE request */
++static int net2280_dequeue (struct usb_ep *_ep, struct usb_request *_req)
++{
++ struct net2280_ep *ep;
++ struct net2280_request *req;
++ unsigned long flags;
++ u32 dmactl;
++ int stopped;
++
++ ep = container_of (_ep, struct net2280_ep, ep);
++ if (!_ep || (!ep->desc && ep->num != 0) || !_req)
++ return -EINVAL;
++
++ spin_lock_irqsave (&ep->dev->lock, flags);
++ stopped = ep->stopped;
++
++ /* quiesce dma while we patch the queue */
++ dmactl = 0;
++ ep->stopped = 1;
++ if (ep->dma) {
++ dmactl = readl (&ep->dma->dmactl);
++ /* WARNING erratum 0127 may kick in ... */
++ stop_dma (ep->dma);
++ scan_dma_completions (ep);
++ }
++
++ /* make sure it's still queued on this endpoint */
++ list_for_each_entry (req, &ep->queue, queue) {
++ if (&req->req == _req)
++ break;
++ }
++ if (&req->req != _req) {
++ spin_unlock_irqrestore (&ep->dev->lock, flags);
++ return -EINVAL;
++ }
++
++ /* queue head may be partially complete. */
++ if (ep->queue.next == &req->queue) {
++ if (ep->dma) {
++ DEBUG (ep->dev, "unlink (%s) dma\n", _ep->name);
++ _req->status = -ECONNRESET;
++ abort_dma (ep);
++ if (likely (ep->queue.next == &req->queue)) {
++ // NOTE: misreports single-transfer mode
++ req->td->dmacount = 0; /* invalidate */
++ dma_done (ep, req,
++ readl (&ep->dma->dmacount),
++ -ECONNRESET);
++ }
++ } else {
++ DEBUG (ep->dev, "unlink (%s) pio\n", _ep->name);
++ done (ep, req, -ECONNRESET);
++ }
++ req = NULL;
++
++ /* patch up hardware chaining data */
++ } else if (ep->dma && use_dma_chaining) {
++ if (req->queue.prev == ep->queue.next) {
++ writel (le32_to_cpu (req->td->dmadesc),
++ &ep->dma->dmadesc);
++ if (req->td->dmacount & dma_done_ie)
++ writel (readl (&ep->dma->dmacount)
++ | dma_done_ie,
++ &ep->dma->dmacount);
++ } else {
++ struct net2280_request *prev;
++
++ prev = list_entry (req->queue.prev,
++ struct net2280_request, queue);
++ prev->td->dmadesc = req->td->dmadesc;
++ if (req->td->dmacount & dma_done_ie)
++ prev->td->dmacount |= dma_done_ie;
++ }
++ }
++
++ if (req)
++ done (ep, req, -ECONNRESET);
++ ep->stopped = stopped;
++
++ if (ep->dma) {
++ /* turn off dma on inactive queues */
++ if (list_empty (&ep->queue))
++ stop_dma (ep->dma);
++ else if (!ep->stopped) {
++ /* resume current request, or start new one */
++ if (req)
++ writel (dmactl, &ep->dma->dmactl);
++ else
++ start_dma (ep, list_entry (ep->queue.next,
++ struct net2280_request, queue));
++ }
++ }
++
++ spin_unlock_irqrestore (&ep->dev->lock, flags);
++ return req ? 0 : -EOPNOTSUPP;
++}
++
++/*-------------------------------------------------------------------------*/
++
++static int net2280_fifo_status (struct usb_ep *_ep);
++
++static int
++net2280_set_halt (struct usb_ep *_ep, int value)
++{
++ struct net2280_ep *ep;
++ unsigned long flags;
++ int retval = 0;
++
++ ep = container_of (_ep, struct net2280_ep, ep);
++ if (!_ep || (!ep->desc && ep->num != 0))
++ return -EINVAL;
++ if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
++ return -ESHUTDOWN;
++ if (ep->desc /* not ep0 */ && (ep->desc->bmAttributes & 0x03)
++ == USB_ENDPOINT_XFER_ISOC)
++ return -EINVAL;
++
++ spin_lock_irqsave (&ep->dev->lock, flags);
++ if (!list_empty (&ep->queue))
++ retval = -EAGAIN;
++ else if (ep->is_in && value && net2280_fifo_status (_ep) != 0)
++ retval = -EAGAIN;
++ else {
++ VDEBUG (ep->dev, "%s %s halt\n", _ep->name,
++ value ? "set" : "clear");
++ /* set/clear, then synch memory views with the device */
++ if (value) {
++ if (ep->num == 0)
++ ep->dev->protocol_stall = 1;
++ else
++ set_halt (ep);
++ } else
++ clear_halt (ep);
++ (void) readl (&ep->regs->ep_rsp);
++ }
++ spin_unlock_irqrestore (&ep->dev->lock, flags);
++
++ return retval;
++}
++
++static int
++net2280_fifo_status (struct usb_ep *_ep)
++{
++ struct net2280_ep *ep;
++ u32 avail;
++
++ ep = container_of (_ep, struct net2280_ep, ep);
++ if (!_ep || (!ep->desc && ep->num != 0))
++ return -ENODEV;
++ if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
++ return -ESHUTDOWN;
++
++ avail = readl (&ep->regs->ep_avail) & ((1 << 12) - 1);
++ if (avail > ep->fifo_size)
++ return -EOVERFLOW;
++ if (ep->is_in)
++ avail = ep->fifo_size - avail;
++ return avail;
++}
++
++static void
++net2280_fifo_flush (struct usb_ep *_ep)
++{
++ struct net2280_ep *ep;
++
++ ep = container_of (_ep, struct net2280_ep, ep);
++ if (!_ep || (!ep->desc && ep->num != 0))
++ return;
++ if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
++ return;
++
++ writel ((1 << FIFO_FLUSH), &ep->regs->ep_stat);
++ (void) readl (&ep->regs->ep_rsp);
++}
++
++static struct usb_ep_ops net2280_ep_ops = {
++ .enable = net2280_enable,
++ .disable = net2280_disable,
++
++ .alloc_request = net2280_alloc_request,
++ .free_request = net2280_free_request,
++
++ .alloc_buffer = net2280_alloc_buffer,
++ .free_buffer = net2280_free_buffer,
++
++ .queue = net2280_queue,
++ .dequeue = net2280_dequeue,
++
++ .set_halt = net2280_set_halt,
++ .fifo_status = net2280_fifo_status,
++ .fifo_flush = net2280_fifo_flush,
++};
++
++/*-------------------------------------------------------------------------*/
++
++static int net2280_get_frame (struct usb_gadget *_gadget)
++{
++ struct net2280 *dev;
++ unsigned long flags;
++ u16 retval;
++
++ if (!_gadget)
++ return -ENODEV;
++ dev = container_of (_gadget, struct net2280, gadget);
++ spin_lock_irqsave (&dev->lock, flags);
++ retval = get_idx_reg (dev->regs, REG_FRAME) & 0x03ff;
++ spin_unlock_irqrestore (&dev->lock, flags);
++ return retval;
++}
++
++static int net2280_wakeup (struct usb_gadget *_gadget)
++{
++ struct net2280 *dev;
++ u32 tmp;
++ unsigned long flags;
++
++ if (!_gadget)
++ return 0;
++ dev = container_of (_gadget, struct net2280, gadget);
++
++ spin_lock_irqsave (&dev->lock, flags);
++ tmp = readl (&dev->usb->usbctl);
++ if (tmp & (1 << DEVICE_REMOTE_WAKEUP_ENABLE))
++ writel (1 << GENERATE_RESUME, &dev->usb->usbstat);
++ spin_unlock_irqrestore (&dev->lock, flags);
++
++ /* pci writes may still be posted */
++ return 0;
++}
++
++static int net2280_set_selfpowered (struct usb_gadget *_gadget, int value)
++{
++ struct net2280 *dev;
++ u32 tmp;
++ unsigned long flags;
++
++ if (!_gadget)
++ return 0;
++ dev = container_of (_gadget, struct net2280, gadget);
++
++ spin_lock_irqsave (&dev->lock, flags);
++ tmp = readl (&dev->usb->usbctl);
++ if (value)
++ tmp |= (1 << SELF_POWERED_STATUS);
++ else
++ tmp &= ~(1 << SELF_POWERED_STATUS);
++ writel (tmp, &dev->usb->usbctl);
++ spin_unlock_irqrestore (&dev->lock, flags);
++
++ return 0;
++}
++
++static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
++{
++ struct net2280 *dev;
++ u32 tmp;
++ unsigned long flags;
++
++ if (!_gadget)
++ return -ENODEV;
++ dev = container_of (_gadget, struct net2280, gadget);
++
++ spin_lock_irqsave (&dev->lock, flags);
++ tmp = readl (&dev->usb->usbctl);
++ dev->softconnect = (is_on != 0);
++ if (is_on)
++ tmp |= (1 << USB_DETECT_ENABLE);
++ else
++ tmp &= ~(1 << USB_DETECT_ENABLE);
++ writel (tmp, &dev->usb->usbctl);
++ spin_unlock_irqrestore (&dev->lock, flags);
++
++ return 0;
++}
++
++static const struct usb_gadget_ops net2280_ops = {
++ .get_frame = net2280_get_frame,
++ .wakeup = net2280_wakeup,
++ .set_selfpowered = net2280_set_selfpowered,
++ .pullup = net2280_pullup,
++};
++
++/*-------------------------------------------------------------------------*/
++
++#ifdef USE_SYSFS_DEBUG_FILES
++
++/* "function" sysfs attribute */
++static ssize_t
++show_function (struct device *_dev, char *buf)
++{
++ struct net2280 *dev = dev_get_drvdata (_dev);
++
++ if (!dev->driver
++ || !dev->driver->function
++ || strlen (dev->driver->function) > PAGE_SIZE)
++ return 0;
++ return snprintf (buf, PAGE_SIZE, "%s\n", dev->driver->function);
++}
++static DEVICE_ATTR (function, S_IRUGO, show_function, NULL);
++
++static ssize_t
++show_registers (struct device *_dev, char *buf)
++{
++ struct net2280 *dev;
++ char *next;
++ unsigned size, t;
++ unsigned long flags;
++ int i;
++ u32 t1, t2;
++ char *s;
++
++ dev = dev_get_drvdata (_dev);
++ next = buf;
++ size = PAGE_SIZE;
++ spin_lock_irqsave (&dev->lock, flags);
++
++ if (dev->driver)
++ s = dev->driver->driver.name;
++ else
++ s = "(none)";
++
++ /* Main Control Registers */
++ t = snprintf (next, size, "%s version " DRIVER_VERSION
++ ", chiprev %04x, dma %s\n\n"
++ "devinit %03x fifoctl %08x gadget '%s'\n"
++ "pci irqenb0 %02x irqenb1 %08x "
++ "irqstat0 %04x irqstat1 %08x\n",
++ driver_name, dev->chiprev,
++ use_dma
++ ? (use_dma_chaining ? "chaining" : "enabled")
++ : "disabled",
++ readl (&dev->regs->devinit),
++ readl (&dev->regs->fifoctl),
++ s,
++ readl (&dev->regs->pciirqenb0),
++ readl (&dev->regs->pciirqenb1),
++ readl (&dev->regs->irqstat0),
++ readl (&dev->regs->irqstat1));
++ size -= t;
++ next += t;
++
++ /* USB Control Registers */
++ t1 = readl (&dev->usb->usbctl);
++ t2 = readl (&dev->usb->usbstat);
++ if (t1 & (1 << VBUS_PIN)) {
++ if (t2 & (1 << HIGH_SPEED))
++ s = "high speed";
++ else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
++ s = "powered";
++ else
++ s = "full speed";
++ /* full speed bit (6) not working?? */
++ } else
++ s = "not attached";
++ t = snprintf (next, size,
++ "stdrsp %08x usbctl %08x usbstat %08x "
++ "addr 0x%02x (%s)\n",
++ readl (&dev->usb->stdrsp), t1, t2,
++ readl (&dev->usb->ouraddr), s);
++ size -= t;
++ next += t;
++
++ /* PCI Master Control Registers */
++
++ /* DMA Control Registers */
++
++ /* Configurable EP Control Registers */
++ for (i = 0; i < 7; i++) {
++ struct net2280_ep *ep;
++
++ ep = &dev->ep [i];
++ if (i && !ep->desc)
++ continue;
++
++ t1 = readl (&ep->regs->ep_cfg);
++ t2 = readl (&ep->regs->ep_rsp) & 0xff;
++ t = snprintf (next, size,
++ "\n%s\tcfg %05x rsp (%02x) %s%s%s%s%s%s%s%s"
++ "irqenb %02x\n",
++ ep->ep.name, t1, t2,
++ (t2 & (1 << CLEAR_NAK_OUT_PACKETS))
++ ? "NAK " : "",
++ (t2 & (1 << CLEAR_EP_HIDE_STATUS_PHASE))
++ ? "hide " : "",
++ (t2 & (1 << CLEAR_EP_FORCE_CRC_ERROR))
++ ? "CRC " : "",
++ (t2 & (1 << CLEAR_INTERRUPT_MODE))
++ ? "interrupt " : "",
++ (t2 & (1<<CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE))
++ ? "status " : "",
++ (t2 & (1 << CLEAR_NAK_OUT_PACKETS_MODE))
++ ? "NAKmode " : "",
++ (t2 & (1 << CLEAR_ENDPOINT_TOGGLE))
++ ? "DATA1 " : "DATA0 ",
++ (t2 & (1 << CLEAR_ENDPOINT_HALT))
++ ? "HALT " : "",
++ readl (&ep->regs->ep_irqenb));
++ size -= t;
++ next += t;
++
++ t = snprintf (next, size,
++ "\tstat %08x avail %04x "
++ "(ep%d%s-%s)%s\n",
++ readl (&ep->regs->ep_stat),
++ readl (&ep->regs->ep_avail),
++ t1 & 0x0f, DIR_STRING (t1),
++ type_string (t1 >> 8),
++ ep->stopped ? "*" : "");
++ size -= t;
++ next += t;
++
++ if (!ep->dma)
++ continue;
++
++ t = snprintf (next, size,
++ " dma\tctl %08x stat %08x count %08x\n"
++ "\taddr %08x desc %08x\n",
++ readl (&ep->dma->dmactl),
++ readl (&ep->dma->dmastat),
++ readl (&ep->dma->dmacount),
++ readl (&ep->dma->dmaaddr),
++ readl (&ep->dma->dmadesc));
++ size -= t;
++ next += t;
++
++ }
++
++ /* Indexed Registers */
++ // none yet
++
++ /* Statistics */
++ t = snprintf (next, size, "\nirqs: ");
++ size -= t;
++ next += t;
++ for (i = 0; i < 7; i++) {
++ struct net2280_ep *ep;
++
++ ep = &dev->ep [i];
++ if (i && !ep->irqs)
++ continue;
++ t = snprintf (next, size, " %s/%lu", ep->ep.name, ep->irqs);
++ size -= t;
++ next += t;
++
++ }
++ t = snprintf (next, size, "\n");
++ size -= t;
++ next += t;
++
++ spin_unlock_irqrestore (&dev->lock, flags);
++
++ return PAGE_SIZE - size;
++}
++static DEVICE_ATTR (registers, S_IRUGO, show_registers, NULL);
++
++static ssize_t
++show_queues (struct device *_dev, char *buf)
++{
++ struct net2280 *dev;
++ char *next;
++ unsigned size;
++ unsigned long flags;
++ int i;
++
++ dev = dev_get_drvdata (_dev);
++ next = buf;
++ size = PAGE_SIZE;
++ spin_lock_irqsave (&dev->lock, flags);
++
++ for (i = 0; i < 7; i++) {
++ struct net2280_ep *ep = &dev->ep [i];
++ struct net2280_request *req;
++ int t;
++
++ if (i != 0) {
++ const struct usb_endpoint_descriptor *d;
++
++ d = ep->desc;
++ if (!d)
++ continue;
++ t = d->bEndpointAddress;
++ t = snprintf (next, size,
++ "\n%s (ep%d%s-%s) max %04x %s fifo %d\n",
++ ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK,
++ (t & USB_DIR_IN) ? "in" : "out",
++ ({ char *val;
++ switch (d->bmAttributes & 0x03) {
++ case USB_ENDPOINT_XFER_BULK:
++ val = "bulk"; break;
++ case USB_ENDPOINT_XFER_INT:
++ val = "intr"; break;
++ default:
++ val = "iso"; break;
++ }; val; }),
++ le16_to_cpu (d->wMaxPacketSize) & 0x1fff,
++ ep->dma ? "dma" : "pio", ep->fifo_size
++ );
++ } else /* ep0 should only have one transfer queued */
++ t = snprintf (next, size, "ep0 max 64 pio %s\n",
++ ep->is_in ? "in" : "out");
++ if (t <= 0 || t > size)
++ goto done;
++ size -= t;
++ next += t;
++
++ if (list_empty (&ep->queue)) {
++ t = snprintf (next, size, "\t(nothing queued)\n");
++ if (t <= 0 || t > size)
++ goto done;
++ size -= t;
++ next += t;
++ continue;
++ }
++ list_for_each_entry (req, &ep->queue, queue) {
++ if (ep->dma && req->td_dma == readl (&ep->dma->dmadesc))
++ t = snprintf (next, size,
++ "\treq %p len %d/%d "
++ "buf %p (dmacount %08x)\n",
++ &req->req, req->req.actual,
++ req->req.length, req->req.buf,
++ readl (&ep->dma->dmacount));
++ else
++ t = snprintf (next, size,
++ "\treq %p len %d/%d buf %p\n",
++ &req->req, req->req.actual,
++ req->req.length, req->req.buf);
++ if (t <= 0 || t > size)
++ goto done;
++ size -= t;
++ next += t;
++
++ if (ep->dma) {
++ struct net2280_dma *td;
++
++ td = req->td;
++ t = snprintf (next, size, "\t td %08x "
++ " count %08x buf %08x desc %08x\n",
++ req->td_dma, td->dmacount,
++ td->dmaaddr, td->dmadesc);
++ if (t <= 0 || t > size)
++ goto done;
++ size -= t;
++ next += t;
++ }
++ }
++ }
++
++done:
++ spin_unlock_irqrestore (&dev->lock, flags);
++ return PAGE_SIZE - size;
++}
++static DEVICE_ATTR (queues, S_IRUGO, show_queues, NULL);
++
++
++#else
++
++#define device_create_file(a,b) do {} while (0)
++#define device_remove_file device_create_file
++
++#endif
++
++/*-------------------------------------------------------------------------*/
++
++/* another driver-specific mode might be a request type doing dma
++ * to/from another device fifo instead of to/from memory.
++ */
++
++static void set_fifo_mode (struct net2280 *dev, int mode)
++{
++ /* keeping high bits preserves BAR2 */
++ writel ((0xffff << PCI_BASE2_RANGE) | mode, &dev->regs->fifoctl);
++
++ /* always ep-{a,b,e,f} ... maybe not ep-c or ep-d */
++ INIT_LIST_HEAD (&dev->gadget.ep_list);
++ list_add_tail (&dev->ep [1].ep.ep_list, &dev->gadget.ep_list);
++ list_add_tail (&dev->ep [2].ep.ep_list, &dev->gadget.ep_list);
++ switch (mode) {
++ case 0:
++ list_add_tail (&dev->ep [3].ep.ep_list, &dev->gadget.ep_list);
++ list_add_tail (&dev->ep [4].ep.ep_list, &dev->gadget.ep_list);
++ dev->ep [1].fifo_size = dev->ep [2].fifo_size = 1024;
++ break;
++ case 1:
++ dev->ep [1].fifo_size = dev->ep [2].fifo_size = 2048;
++ break;
++ case 2:
++ list_add_tail (&dev->ep [3].ep.ep_list, &dev->gadget.ep_list);
++ dev->ep [1].fifo_size = 2048;
++ dev->ep [2].fifo_size = 1024;
++ break;
++ }
++ /* fifo sizes for ep0, ep-c, ep-d, ep-e, and ep-f never change */
++ list_add_tail (&dev->ep [5].ep.ep_list, &dev->gadget.ep_list);
++ list_add_tail (&dev->ep [6].ep.ep_list, &dev->gadget.ep_list);
++}
++
++/**
++ * net2280_set_fifo_mode - change allocation of fifo buffers
++ * @gadget: access to the net2280 device that will be updated
++ * @mode: 0 for default, four 1kB buffers (ep-a through ep-d);
++ * 1 for two 2kB buffers (ep-a and ep-b only);
++ * 2 for one 2kB buffer (ep-a) and two 1kB ones (ep-b, ep-c).
++ *
++ * returns zero on success, else negative errno. when this succeeds,
++ * the contents of gadget->ep_list may have changed.
++ *
++ * you may only call this function when endpoints a-d are all disabled.
++ * use it whenever extra hardware buffering can help performance, such
++ * as before enabling "high bandwidth" interrupt endpoints that use
++ * maxpacket bigger than 512 (when double buffering would otherwise
++ * be unavailable).
++ */
++int net2280_set_fifo_mode (struct usb_gadget *gadget, int mode)
++{
++ int i;
++ struct net2280 *dev;
++ int status = 0;
++ unsigned long flags;
++
++ if (!gadget)
++ return -ENODEV;
++ dev = container_of (gadget, struct net2280, gadget);
++
++ spin_lock_irqsave (&dev->lock, flags);
++
++ for (i = 1; i <= 4; i++)
++ if (dev->ep [i].desc) {
++ status = -EINVAL;
++ break;
++ }
++ if (mode < 0 || mode > 2)
++ status = -EINVAL;
++ if (status == 0)
++ set_fifo_mode (dev, mode);
++ spin_unlock_irqrestore (&dev->lock, flags);
++
++ if (status == 0) {
++ if (mode == 1)
++ DEBUG (dev, "fifo: ep-a 2K, ep-b 2K\n");
++ else if (mode == 2)
++ DEBUG (dev, "fifo: ep-a 2K, ep-b 1K, ep-c 1K\n");
++ /* else all are 1K */
++ }
++ return status;
++}
++EXPORT_SYMBOL (net2280_set_fifo_mode);
++
++/*-------------------------------------------------------------------------*/
++
++/* keeping it simple:
++ * - one bus driver, initted first;
++ * - one function driver, initted second
++ *
++ * most of the work to support multiple net2280 controllers would
++ * be to associate this gadget driver (yes?) with all of them, or
++ * perhaps to bind specific drivers to specific devices.
++ */
++
++static struct net2280 *the_controller;
++
++static void usb_reset (struct net2280 *dev)
++{
++ u32 tmp;
++
++ dev->gadget.speed = USB_SPEED_UNKNOWN;
++ (void) readl (&dev->usb->usbctl);
++
++ net2280_led_init (dev);
++
++ /* disable automatic responses, and irqs */
++ writel (0, &dev->usb->stdrsp);
++ writel (0, &dev->regs->pciirqenb0);
++ writel (0, &dev->regs->pciirqenb1);
++
++ /* clear old dma and irq state */
++ for (tmp = 0; tmp < 4; tmp++) {
++ struct net2280_ep *ep = &dev->ep [tmp + 1];
++
++ if (ep->dma)
++ abort_dma (ep);
++ }
++ writel (~0, &dev->regs->irqstat0),
++ writel (~(1 << SUSPEND_REQUEST_INTERRUPT), &dev->regs->irqstat1),
++
++ /* reset, and enable pci */
++ tmp = readl (&dev->regs->devinit)
++ | (1 << PCI_ENABLE)
++ | (1 << FIFO_SOFT_RESET)
++ | (1 << USB_SOFT_RESET)
++ | (1 << M8051_RESET);
++ writel (tmp, &dev->regs->devinit);
++
++ /* standard fifo and endpoint allocations */
++ set_fifo_mode (dev, (fifo_mode <= 2) ? fifo_mode : 0);
++}
++
++static void usb_reinit (struct net2280 *dev)
++{
++ u32 tmp;
++ int init_dma;
++
++ /* use_dma changes are ignored till next device re-init */
++ init_dma = use_dma;
++
++ /* basic endpoint init */
++ for (tmp = 0; tmp < 7; tmp++) {
++ struct net2280_ep *ep = &dev->ep [tmp];
++
++ ep->ep.name = ep_name [tmp];
++ ep->dev = dev;
++ ep->num = tmp;
++
++ if (tmp > 0 && tmp <= 4) {
++ ep->fifo_size = 1024;
++ if (init_dma)
++ ep->dma = &dev->dma [tmp - 1];
++ } else
++ ep->fifo_size = 64;
++ ep->regs = &dev->epregs [tmp];
++ ep_reset (dev->regs, ep);
++ }
++ dev->ep [0].ep.maxpacket = 64;
++ dev->ep [5].ep.maxpacket = 64;
++ dev->ep [6].ep.maxpacket = 64;
++
++ dev->gadget.ep0 = &dev->ep [0].ep;
++ dev->ep [0].stopped = 0;
++ INIT_LIST_HEAD (&dev->gadget.ep0->ep_list);
++
++ /* we want to prevent lowlevel/insecure access from the USB host,
++ * but erratum 0119 means this enable bit is ignored
++ */
++ for (tmp = 0; tmp < 5; tmp++)
++ writel (EP_DONTUSE, &dev->dep [tmp].dep_cfg);
++}
++
++static void ep0_start (struct net2280 *dev)
++{
++ writel ( (1 << CLEAR_EP_HIDE_STATUS_PHASE)
++ | (1 << CLEAR_NAK_OUT_PACKETS)
++ | (1 << CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE)
++ , &dev->epregs [0].ep_rsp);
++
++ /*
++ * hardware optionally handles a bunch of standard requests
++ * that the API hides from drivers anyway. have it do so.
++ * endpoint status/features are handled in software, to
++ * help pass tests for some dubious behavior.
++ */
++ writel ( (1 << SET_TEST_MODE)
++ | (1 << SET_ADDRESS)
++ | (1 << DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP)
++ | (1 << GET_DEVICE_STATUS)
++ | (1 << GET_INTERFACE_STATUS)
++ , &dev->usb->stdrsp);
++ writel ( (1 << USB_ROOT_PORT_WAKEUP_ENABLE)
++ | (1 << SELF_POWERED_USB_DEVICE)
++ /* erratum 0102 workaround */
++ | ((dev->chiprev == 0100) ? 0 : 1) << SUSPEND_IMMEDIATELY
++ | (1 << REMOTE_WAKEUP_SUPPORT)
++ | (dev->softconnect << USB_DETECT_ENABLE)
++ | (1 << SELF_POWERED_STATUS)
++ , &dev->usb->usbctl);
++
++ /* enable irqs so we can see ep0 and general operation */
++ writel ( (1 << SETUP_PACKET_INTERRUPT_ENABLE)
++ | (1 << ENDPOINT_0_INTERRUPT_ENABLE)
++ , &dev->regs->pciirqenb0);
++ writel ( (1 << PCI_INTERRUPT_ENABLE)
++ | (1 << PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE)
++ | (1 << PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE)
++ | (1 << PCI_RETRY_ABORT_INTERRUPT_ENABLE)
++ | (1 << VBUS_INTERRUPT_ENABLE)
++ | (1 << ROOT_PORT_RESET_INTERRUPT_ENABLE)
++ | (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE)
++ , &dev->regs->pciirqenb1);
++
++ /* don't leave any writes posted */
++ (void) readl (&dev->usb->usbctl);
++}
++
++/* when a driver is successfully registered, it will receive
++ * control requests including set_configuration(), which enables
++ * non-control requests. then usb traffic follows until a
++ * disconnect is reported. then a host may connect again, or
++ * the driver might get unbound.
++ */
++int usb_gadget_register_driver (struct usb_gadget_driver *driver)
++{
++ struct net2280 *dev = the_controller;
++ int retval;
++ unsigned i;
++
++ /* insist on high speed support from the driver, since
++ * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
++ * "must not be used in normal operation"
++ */
++ if (!driver
++ || driver->speed != USB_SPEED_HIGH
++ || !driver->bind
++ || !driver->unbind
++ || !driver->setup)
++ return -EINVAL;
++ if (!dev)
++ return -ENODEV;
++ if (dev->driver)
++ return -EBUSY;
++
++ for (i = 0; i < 7; i++)
++ dev->ep [i].irqs = 0;
++
++ /* hook up the driver ... */
++ dev->softconnect = 1;
++ dev->driver = driver;
++ retval = driver->bind (&dev->gadget);
++ if (retval) {
++ DEBUG (dev, "bind to driver %s --> %d\n",
++ driver->driver.name, retval);
++ dev->driver = 0;
++ return retval;
++ }
++
++ /* ... then enable host detection and ep0; and we're ready
++ * for set_configuration as well as eventual disconnect.
++ */
++ net2280_led_active (dev, 1);
++ ep0_start (dev);
++
++ DEBUG (dev, "%s ready, usbctl %08x stdrsp %08x\n",
++ driver->driver.name,
++ readl (&dev->usb->usbctl),
++ readl (&dev->usb->stdrsp));
++
++ /* pci writes may still be posted */
++ return 0;
++}
++EXPORT_SYMBOL (usb_gadget_register_driver);
++
++static void
++stop_activity (struct net2280 *dev, struct usb_gadget_driver *driver)
++{
++ int i;
++
++ /* don't disconnect if it's not connected */
++ if (dev->gadget.speed == USB_SPEED_UNKNOWN)
++ driver = NULL;
++
++ /* stop hardware; prevent new request submissions;
++ * and kill any outstanding requests.
++ */
++ usb_reset (dev);
++ for (i = 0; i < 7; i++)
++ nuke (&dev->ep [i]);
++
++ /* report disconnect; the driver is already quiesced */
++ if (driver) {
++ spin_unlock (&dev->lock);
++ driver->disconnect (&dev->gadget);
++ spin_lock (&dev->lock);
++ }
++
++ usb_reinit (dev);
++}
++
++int usb_gadget_unregister_driver (struct usb_gadget_driver *driver)
++{
++ struct net2280 *dev = the_controller;
++ unsigned long flags;
++
++ if (!dev)
++ return -ENODEV;
++ if (!driver || driver != dev->driver)
++ return -EINVAL;
++
++ spin_lock_irqsave (&dev->lock, flags);
++ stop_activity (dev, driver);
++ spin_unlock_irqrestore (&dev->lock, flags);
++
++ driver->unbind (&dev->gadget);
++ dev->driver = 0;
++
++ net2280_led_active (dev, 0);
++
++ DEBUG (dev, "unregistered driver '%s'\n", driver->driver.name);
++ return 0;
++}
++EXPORT_SYMBOL (usb_gadget_unregister_driver);
++
++
++/*-------------------------------------------------------------------------*/
++
++/* handle ep0, ep-e, ep-f with 64 byte packets: packet per irq.
++ * also works for dma-capable endpoints, in pio mode or just
++ * to manually advance the queue after short OUT transfers.
++ */
++static void handle_ep_small (struct net2280_ep *ep)
++{
++ struct net2280_request *req;
++ u32 t;
++ /* 0 error, 1 mid-data, 2 done */
++ int mode = 1;
++
++ if (!list_empty (&ep->queue))
++ req = list_entry (ep->queue.next,
++ struct net2280_request, queue);
++ else
++ req = NULL;
++
++ /* ack all, and handle what we care about */
++ t = readl (&ep->regs->ep_stat);
++ ep->irqs++;
++#if 0
++ VDEBUG (ep->dev, "%s ack ep_stat %08x, req %p\n",
++ ep->ep.name, t, req ? &req->req : 0);
++#endif
++ writel (t & ~(1 << NAK_OUT_PACKETS), &ep->regs->ep_stat);
++
++ /* for ep0, monitor token irqs to catch data stage length errors
++ * and to synchronize on status.
++ *
++ * also, to defer reporting of protocol stalls ... here's where
++ * data or status first appears, handling stalls here should never
++ * cause trouble on the host side..
++ *
++ * control requests could be slightly faster without token synch for
++ * status, but status can jam up that way.
++ */
++ if (unlikely (ep->num == 0)) {
++ if (ep->is_in) {
++ /* status; stop NAKing */
++ if (t & (1 << DATA_OUT_PING_TOKEN_INTERRUPT)) {
++ if (ep->dev->protocol_stall) {
++ ep->stopped = 1;
++ set_halt (ep);
++ }
++ if (!req)
++ allow_status (ep);
++ mode = 2;
++ /* reply to extra IN data tokens with a zlp */
++ } else if (t & (1 << DATA_IN_TOKEN_INTERRUPT)) {
++ if (ep->dev->protocol_stall) {
++ ep->stopped = 1;
++ set_halt (ep);
++ mode = 2;
++ } else if (!req && ep->stopped)
++ write_fifo (ep, NULL);
++ }
++ } else {
++ /* status; stop NAKing */
++ if (t & (1 << DATA_IN_TOKEN_INTERRUPT)) {
++ if (ep->dev->protocol_stall) {
++ ep->stopped = 1;
++ set_halt (ep);
++ }
++ mode = 2;
++ /* an extra OUT token is an error */
++ } else if (((t & (1 << DATA_OUT_PING_TOKEN_INTERRUPT))
++ && req
++ && req->req.actual == req->req.length)
++ || !req) {
++ ep->dev->protocol_stall = 1;
++ set_halt (ep);
++ ep->stopped = 1;
++ if (req)
++ done (ep, req, -EOVERFLOW);
++ req = NULL;
++ }
++ }
++ }
++
++ if (unlikely (!req))
++ return;
++
++ /* manual DMA queue advance after short OUT */
++ if (likely (ep->dma != 0)) {
++ if (t & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
++ u32 count;
++ int stopped = ep->stopped;
++
++ /* TRANSFERRED works around OUT_DONE erratum 0112.
++ * we expect (N <= maxpacket) bytes; host wrote M.
++ * iff (M < N) we won't ever see a DMA interrupt.
++ */
++ ep->stopped = 1;
++ for (count = 0; ; t = readl (&ep->regs->ep_stat)) {
++
++ /* any preceding dma transfers must finish.
++ * dma handles (M >= N), may empty the queue
++ */
++ scan_dma_completions (ep);
++ if (unlikely (list_empty (&ep->queue)
++ || ep->out_overflow)) {
++ req = NULL;
++ break;
++ }
++ req = list_entry (ep->queue.next,
++ struct net2280_request, queue);
++
++ /* here either (M < N), a "real" short rx;
++ * or (M == N) and the queue didn't empty
++ */
++ if (likely (t & (1 << FIFO_EMPTY))) {
++ count = readl (&ep->dma->dmacount);
++ count &= DMA_BYTE_COUNT_MASK;
++ if (readl (&ep->dma->dmadesc)
++ != req->td_dma)
++ req = NULL;
++ break;
++ }
++ udelay(1);
++ }
++
++ /* stop DMA, leave ep NAKing */
++ writel ((1 << DMA_ABORT), &ep->dma->dmastat);
++ spin_stop_dma (ep->dma);
++
++ if (likely (req != 0)) {
++ req->td->dmacount = 0;
++ t = readl (&ep->regs->ep_avail);
++ dma_done (ep, req, count, t);
++ }
++
++ /* also flush to prevent erratum 0106 trouble */
++ if (unlikely (ep->out_overflow
++ || (ep->dev->chiprev == 0x0100
++ && ep->dev->gadget.speed
++ == USB_SPEED_FULL))) {
++ out_flush (ep);
++ ep->out_overflow = 0;
++ }
++
++ /* (re)start dma if needed, stop NAKing */
++ ep->stopped = stopped;
++ if (!list_empty (&ep->queue))
++ restart_dma (ep);
++ } else
++ DEBUG (ep->dev, "%s dma ep_stat %08x ??\n",
++ ep->ep.name, t);
++ return;
++
++ /* data packet(s) received (in the fifo, OUT) */
++ } else if (t & (1 << DATA_PACKET_RECEIVED_INTERRUPT)) {
++ if (read_fifo (ep, req) && ep->num != 0)
++ mode = 2;
++
++ /* data packet(s) transmitted (IN) */
++ } else if (t & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)) {
++ unsigned len;
++
++ len = req->req.length - req->req.actual;
++ if (len > ep->ep.maxpacket)
++ len = ep->ep.maxpacket;
++ req->req.actual += len;
++
++ /* if we wrote it all, we're usually done */
++ if (req->req.actual == req->req.length) {
++ if (ep->num == 0) {
++ /* wait for control status */
++ if (mode != 2)
++ req = NULL;
++ } else if (!req->req.zero || len != ep->ep.maxpacket)
++ mode = 2;
++ }
++
++ /* there was nothing to do ... */
++ } else if (mode == 1)
++ return;
++
++ /* done */
++ if (mode == 2) {
++ /* stream endpoints often resubmit/unlink in completion */
++ done (ep, req, 0);
++
++ /* maybe advance queue to next request */
++ if (ep->num == 0) {
++ /* NOTE: net2280 could let gadget driver start the
++ * status stage later. since not all controllers let
++ * them control that, the api doesn't (yet) allow it.
++ */
++ if (!ep->stopped)
++ allow_status (ep);
++ req = NULL;
++ } else {
++ if (!list_empty (&ep->queue) && !ep->stopped)
++ req = list_entry (ep->queue.next,
++ struct net2280_request, queue);
++ else
++ req = NULL;
++ if (req && !ep->is_in)
++ stop_out_naking (ep);
++ }
++ }
++
++ /* is there a buffer for the next packet?
++ * for best streaming performance, make sure there is one.
++ */
++ if (req && !ep->stopped) {
++
++ /* load IN fifo with next packet (may be zlp) */
++ if (t & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))
++ write_fifo (ep, &req->req);
++ }
++}
++
++static struct net2280_ep *
++get_ep_by_addr (struct net2280 *dev, u16 wIndex)
++{
++ struct net2280_ep *ep;
++
++ if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
++ return &dev->ep [0];
++ list_for_each_entry (ep, &dev->gadget.ep_list, ep.ep_list) {
++ u8 bEndpointAddress;
++
++ if (!ep->desc)
++ continue;
++ bEndpointAddress = ep->desc->bEndpointAddress;
++ if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
++ continue;
++ if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
++ return ep;
++ }
++ return NULL;
++}
++
++static void handle_stat0_irqs (struct net2280 *dev, u32 stat)
++{
++ struct net2280_ep *ep;
++ u32 num, scratch;
++
++ /* most of these don't need individual acks */
++ stat &= ~(1 << INTA_ASSERTED);
++ if (!stat)
++ return;
++ // DEBUG (dev, "irqstat0 %04x\n", stat);
++
++ /* starting a control request? */
++ if (unlikely (stat & (1 << SETUP_PACKET_INTERRUPT))) {
++ union {
++ u32 raw [2];
++ struct usb_ctrlrequest r;
++ } u;
++ int tmp = 0;
++ struct net2280_request *req;
++
++ if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
++ if (readl (&dev->usb->usbstat) & (1 << HIGH_SPEED))
++ dev->gadget.speed = USB_SPEED_HIGH;
++ else
++ dev->gadget.speed = USB_SPEED_FULL;
++ net2280_led_speed (dev, dev->gadget.speed);
++ DEBUG (dev, "%s speed\n",
++ (dev->gadget.speed == USB_SPEED_HIGH)
++ ? "high" : "full");
++ }
++
++ ep = &dev->ep [0];
++ ep->irqs++;
++
++ /* make sure any leftover request state is cleared */
++ stat &= ~(1 << ENDPOINT_0_INTERRUPT);
++ while (!list_empty (&ep->queue)) {
++ req = list_entry (ep->queue.next,
++ struct net2280_request, queue);
++ done (ep, req, (req->req.actual == req->req.length)
++ ? 0 : -EPROTO);
++ }
++ ep->stopped = 0;
++ dev->protocol_stall = 0;
++ writel ( (1 << TIMEOUT)
++ | (1 << USB_STALL_SENT)
++ | (1 << USB_IN_NAK_SENT)
++ | (1 << USB_IN_ACK_RCVD)
++ | (1 << USB_OUT_PING_NAK_SENT)
++ | (1 << USB_OUT_ACK_SENT)
++ | (1 << FIFO_OVERFLOW)
++ | (1 << FIFO_UNDERFLOW)
++ | (1 << SHORT_PACKET_OUT_DONE_INTERRUPT)
++ | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)
++ | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
++ | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
++ | (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
++ | (1 << DATA_IN_TOKEN_INTERRUPT)
++ , &ep->regs->ep_stat);
++ u.raw [0] = readl (&dev->usb->setup0123);
++ u.raw [1] = readl (&dev->usb->setup4567);
++
++ cpu_to_le32s (&u.raw [0]);
++ cpu_to_le32s (&u.raw [1]);
++
++ le16_to_cpus (&u.r.wValue);
++ le16_to_cpus (&u.r.wIndex);
++ le16_to_cpus (&u.r.wLength);
++
++ /* ack the irq */
++ writel (1 << SETUP_PACKET_INTERRUPT, &dev->regs->irqstat0);
++ stat ^= (1 << SETUP_PACKET_INTERRUPT);
++
++ /* watch control traffic at the token level, and force
++ * synchronization before letting the status stage happen.
++ * FIXME ignore tokens we'll NAK, until driver responds.
++ * that'll mean a lot less irqs for some drivers.
++ */
++ ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
++ if (ep->is_in) {
++ scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
++ | (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
++ | (1 << DATA_IN_TOKEN_INTERRUPT);
++ stop_out_naking (ep);
++ } else
++ scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT)
++ | (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
++ | (1 << DATA_IN_TOKEN_INTERRUPT);
++ writel (scratch, &dev->epregs [0].ep_irqenb);
++
++ /* we made the hardware handle most lowlevel requests;
++ * everything else goes uplevel to the gadget code.
++ */
++ switch (u.r.bRequest) {
++ case USB_REQ_GET_STATUS: {
++ struct net2280_ep *e;
++ u16 status;
++
++ /* hw handles device and interface status */
++ if (u.r.bRequestType != (USB_DIR_IN|USB_RECIP_ENDPOINT))
++ goto delegate;
++ if ((e = get_ep_by_addr (dev, u.r.wIndex)) == 0
++ || u.r.wLength > 2)
++ goto do_stall;
++
++ if (readl (&e->regs->ep_rsp)
++ & (1 << SET_ENDPOINT_HALT))
++ status = __constant_cpu_to_le16 (1);
++ else
++ status = __constant_cpu_to_le16 (0);
++
++ /* don't bother with a request object! */
++ writel (0, &dev->epregs [0].ep_irqenb);
++ set_fifo_bytecount (ep, u.r.wLength);
++ writel (status, &dev->epregs [0].ep_data);
++ allow_status (ep);
++ VDEBUG (dev, "%s stat %02x\n", ep->ep.name, status);
++ goto next_endpoints;
++ }
++ break;
++ case USB_REQ_CLEAR_FEATURE: {
++ struct net2280_ep *e;
++
++ /* hw handles device features */
++ if (u.r.bRequestType != USB_RECIP_ENDPOINT)
++ goto delegate;
++ if (u.r.wValue != USB_ENDPOINT_HALT
++ || u.r.wLength != 0)
++ goto do_stall;
++ if ((e = get_ep_by_addr (dev, u.r.wIndex)) == 0)
++ goto do_stall;
++ clear_halt (e);
++ allow_status (ep);
++ VDEBUG (dev, "%s clear halt\n", ep->ep.name);
++ goto next_endpoints;
++ }
++ break;
++ case USB_REQ_SET_FEATURE: {
++ struct net2280_ep *e;
++
++ /* hw handles device features */
++ if (u.r.bRequestType != USB_RECIP_ENDPOINT)
++ goto delegate;
++ if (u.r.wValue != USB_ENDPOINT_HALT
++ || u.r.wLength != 0)
++ goto do_stall;
++ if ((e = get_ep_by_addr (dev, u.r.wIndex)) == 0)
++ goto do_stall;
++ set_halt (e);
++ allow_status (ep);
++ VDEBUG (dev, "%s set halt\n", ep->ep.name);
++ goto next_endpoints;
++ }
++ break;
++ default:
++delegate:
++ VDEBUG (dev, "setup %02x.%02x v%04x i%04x "
++ "ep_cfg %08x\n",
++ u.r.bRequestType, u.r.bRequest,
++ u.r.wValue, u.r.wIndex,
++ readl (&ep->regs->ep_cfg));
++ spin_unlock (&dev->lock);
++ tmp = dev->driver->setup (&dev->gadget, &u.r);
++ spin_lock (&dev->lock);
++ }
++
++ /* stall ep0 on error */
++ if (tmp < 0) {
++do_stall:
++ VDEBUG (dev, "req %02x.%02x protocol STALL; stat %d\n",
++ u.r.bRequestType, u.r.bRequest, tmp);
++ dev->protocol_stall = 1;
++ }
++
++ /* some in/out token irq should follow; maybe stall then.
++ * driver must queue a request (even zlp) or halt ep0
++ * before the host times out.
++ */
++ }
++
++next_endpoints:
++ /* endpoint data irq ? */
++ scratch = stat & 0x7f;
++ stat &= ~0x7f;
++ for (num = 0; scratch; num++) {
++ u32 t;
++
++ /* do this endpoint's FIFO and queue need tending? */
++ t = 1 << num;
++ if ((scratch & t) == 0)
++ continue;
++ scratch ^= t;
++
++ ep = &dev->ep [num];
++ handle_ep_small (ep);
++ }
++
++ if (stat)
++ DEBUG (dev, "unhandled irqstat0 %08x\n", stat);
++}
++
++#define DMA_INTERRUPTS ( \
++ (1 << DMA_D_INTERRUPT) \
++ | (1 << DMA_C_INTERRUPT) \
++ | (1 << DMA_B_INTERRUPT) \
++ | (1 << DMA_A_INTERRUPT))
++#define PCI_ERROR_INTERRUPTS ( \
++ (1 << PCI_MASTER_ABORT_RECEIVED_INTERRUPT) \
++ | (1 << PCI_TARGET_ABORT_RECEIVED_INTERRUPT) \
++ | (1 << PCI_RETRY_ABORT_INTERRUPT))
++
++static void handle_stat1_irqs (struct net2280 *dev, u32 stat)
++{
++ struct net2280_ep *ep;
++ u32 tmp, num, scratch;
++
++ /* after disconnect there's nothing else to do! */
++ tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT);
++ if (stat & tmp) {
++ writel (tmp, &dev->regs->irqstat1);
++ if (((stat & (1 << ROOT_PORT_RESET_INTERRUPT)) != 0
++ || (readl (&dev->usb->usbctl) & (1 << VBUS_PIN)) == 0
++ ) && dev->gadget.speed != USB_SPEED_UNKNOWN) {
++ DEBUG (dev, "disconnect %s\n",
++ dev->driver->driver.name);
++ stop_activity (dev, dev->driver);
++ ep0_start (dev);
++ return;
++ }
++ stat &= ~tmp;
++
++ /* vBUS can bounce ... one of many reasons to ignore the
++ * notion of hotplug events on bus connect/disconnect!
++ */
++ if (!stat)
++ return;
++ }
++
++ /* NOTE: chip stays in PCI D0 state for now, but it could
++ * enter D1 to save more power
++ */
++ tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT);
++ if (stat & tmp) {
++ writel (tmp, &dev->regs->irqstat1);
++ if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) {
++ if (dev->driver->suspend)
++ dev->driver->suspend (&dev->gadget);
++ /* we use SUSPEND_IMMEDIATELY */
++ stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT);
++ } else {
++ if (dev->driver->resume)
++ dev->driver->resume (&dev->gadget);
++ /* at high speed, note erratum 0133 */
++ }
++ stat &= ~tmp;
++ }
++
++ /* clear any other status/irqs */
++ if (stat)
++ writel (stat, &dev->regs->irqstat1);
++
++ /* some status we can just ignore */
++ stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
++ | (1 << SUSPEND_REQUEST_INTERRUPT)
++ | (1 << RESUME_INTERRUPT)
++ | (1 << SOF_INTERRUPT));
++ if (!stat)
++ return;
++ // DEBUG (dev, "irqstat1 %08x\n", stat);
++
++ /* DMA status, for ep-{a,b,c,d} */
++ scratch = stat & DMA_INTERRUPTS;
++ stat &= ~DMA_INTERRUPTS;
++ scratch >>= 9;
++ for (num = 0; scratch; num++) {
++ struct net2280_dma_regs *dma;
++
++ tmp = 1 << num;
++ if ((tmp & scratch) == 0)
++ continue;
++ scratch ^= tmp;
++
++ ep = &dev->ep [num + 1];
++ dma = ep->dma;
++
++ if (!dma)
++ continue;
++
++ /* clear ep's dma status */
++ tmp = readl (&dma->dmastat);
++ writel (tmp, &dma->dmastat);
++
++ /* chaining should stop on abort, short OUT from fifo,
++ * or (stat0 codepath) short OUT transfer.
++ */
++ if (!use_dma_chaining) {
++ if ((tmp & (1 << DMA_TRANSACTION_DONE_INTERRUPT))
++ == 0) {
++ DEBUG (ep->dev, "%s no xact done? %08x\n",
++ ep->ep.name, tmp);
++ continue;
++ }
++ stop_dma (ep->dma);
++ }
++
++ /* OUT transfers terminate when the data from the
++ * host is in our memory. Process whatever's done.
++ * On this path, we know transfer's last packet wasn't
++ * less than req->length. NAK_OUT_PACKETS may be set,
++ * or the FIFO may already be holding new packets.
++ *
++ * IN transfers can linger in the FIFO for a very
++ * long time ... we ignore that for now, accounting
++ * precisely (like PIO does) needs per-packet irqs
++ */
++ scan_dma_completions (ep);
++
++ /* disable dma on inactive queues; else maybe restart */
++ if (list_empty (&ep->queue)) {
++ if (use_dma_chaining)
++ stop_dma (ep->dma);
++ } else {
++ tmp = readl (&dma->dmactl);
++ if (!use_dma_chaining
++ || (tmp & (1 << DMA_ENABLE)) == 0)
++ restart_dma (ep);
++ else if (ep->is_in && use_dma_chaining) {
++ struct net2280_request *req;
++ u32 dmacount;
++
++ /* the descriptor at the head of the chain
++ * may still have VALID_BIT clear; that's
++ * used to trigger changing DMA_FIFO_VALIDATE
++ * (affects automagic zlp writes).
++ */
++ req = list_entry (ep->queue.next,
++ struct net2280_request, queue);
++ dmacount = req->td->dmacount;
++ dmacount &= __constant_cpu_to_le32 (
++ (1 << VALID_BIT)
++ | DMA_BYTE_COUNT_MASK);
++ if (dmacount && (dmacount & valid_bit) == 0)
++ restart_dma (ep);
++ }
++ }
++ ep->irqs++;
++ }
++
++ /* NOTE: there are other PCI errors we might usefully notice.
++ * if they appear very often, here's where to try recovering.
++ */
++ if (stat & PCI_ERROR_INTERRUPTS) {
++ ERROR (dev, "pci dma error; stat %08x\n", stat);
++ stat &= ~PCI_ERROR_INTERRUPTS;
++ /* these are fatal errors, but "maybe" they won't
++ * happen again ...
++ */
++ stop_activity (dev, dev->driver);
++ ep0_start (dev);
++ stat = 0;
++ }
++
++ if (stat)
++ DEBUG (dev, "unhandled irqstat1 %08x\n", stat);
++}
++
++static irqreturn_t net2280_irq (int irq, void *_dev, struct pt_regs * r)
++{
++ struct net2280 *dev = _dev;
++
++ spin_lock (&dev->lock);
++
++ /* handle disconnect, dma, and more */
++ handle_stat1_irqs (dev, readl (&dev->regs->irqstat1));
++
++ /* control requests and PIO */
++ handle_stat0_irqs (dev, readl (&dev->regs->irqstat0));
++
++ spin_unlock (&dev->lock);
++
++ return IRQ_HANDLED;
++}
++
++/*-------------------------------------------------------------------------*/
++
++/* tear down the binding between this driver and the pci device */
++
++static void net2280_remove (struct pci_dev *pdev)
++{
++ struct net2280 *dev = pci_get_drvdata (pdev);
++
++ /* start with the driver above us */
++ if (dev->driver) {
++ /* should have been done already by driver model core */
++ WARN (dev, "pci remove, driver '%s' is still registered\n",
++ dev->driver->driver.name);
++ usb_gadget_unregister_driver (dev->driver);
++ }
++
++ /* then clean up the resources we allocated during probe() */
++ net2280_led_shutdown (dev);
++ if (dev->requests) {
++ int i;
++ for (i = 1; i < 5; i++) {
++ if (!dev->ep [i].dummy)
++ continue;
++ pci_pool_free (dev->requests, dev->ep [i].dummy,
++ dev->ep [i].td_dma);
++ }
++ pci_pool_destroy (dev->requests);
++ }
++ if (dev->got_irq)
++ free_irq (pdev->irq, dev);
++ if (dev->regs)
++ iounmap (dev->regs);
++ if (dev->region)
++ release_mem_region (pci_resource_start (pdev, 0),
++ pci_resource_len (pdev, 0));
++ if (dev->enabled)
++ pci_disable_device (pdev);
++ pci_set_drvdata (pdev, 0);
++
++ INFO (dev, "unbind from pci %s\n", pdev->slot_name);
++
++ kfree (dev);
++ the_controller = 0;
++}
++
++/* wrap this driver around the specified device, but
++ * don't respond over USB until a gadget driver binds to us.
++ */
++
++static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id)
++{
++ struct net2280 *dev;
++ unsigned long resource, len;
++ void *base = NULL;
++ int retval, i;
++ char buf [8], *bufp;
++
++ /* if you want to support more than one controller in a system,
++ * usb_gadget_driver_{register,unregister}() must change.
++ */
++ if (the_controller) {
++ WARN (the_controller, "ignoring %s\n", pdev->slot_name);
++ return -EBUSY;
++ }
++
++ /* alloc, and start init */
++ dev = kmalloc (sizeof *dev, SLAB_KERNEL);
++ if (dev == NULL){
++ retval = -ENOMEM;
++ goto done;
++ }
++
++ memset (dev, 0, sizeof *dev);
++ spin_lock_init (&dev->lock);
++ dev->pdev = pdev;
++ dev->gadget.ops = &net2280_ops;
++ dev->gadget.is_dualspeed = 1;
++
++ dev->gadget.dev.bus_id = pdev->slot_name;
++ dev->gadget.name = driver_name;
++
++ /* now all the pci goodies ... */
++ if (pci_enable_device (pdev) < 0) {
++ retval = -ENODEV;
++ goto done;
++ }
++ dev->enabled = 1;
++
++ /* BAR 0 holds all the registers
++ * BAR 1 is 8051 memory; unused here (note erratum 0103)
++ * BAR 2 is fifo memory; unused here
++ */
++ resource = pci_resource_start (pdev, 0);
++ len = pci_resource_len (pdev, 0);
++ if (!request_mem_region (resource, len, driver_name)) {
++ DEBUG (dev, "controller already in use\n");
++ retval = -EBUSY;
++ goto done;
++ }
++ dev->region = 1;
++
++ base = ioremap_nocache (resource, len);
++ if (base == NULL) {
++ DEBUG (dev, "can't map memory\n");
++ retval = -EFAULT;
++ goto done;
++ }
++ dev->regs = (struct net2280_regs *) base;
++ dev->usb = (struct net2280_usb_regs *) (base + 0x0080);
++ dev->pci = (struct net2280_pci_regs *) (base + 0x0100);
++ dev->dma = (struct net2280_dma_regs *) (base + 0x0180);
++ dev->dep = (struct net2280_dep_regs *) (base + 0x0200);
++ dev->epregs = (struct net2280_ep_regs *) (base + 0x0300);
++
++ /* put into initial config, link up all endpoints */
++ writel (0, &dev->usb->usbctl);
++ usb_reset (dev);
++ usb_reinit (dev);
++
++ /* irq setup after old hardware is cleaned up */
++ if (!pdev->irq) {
++ ERROR (dev, "No IRQ. Check PCI setup!\n");
++ retval = -ENODEV;
++ goto done;
++ }
++#ifndef __sparc__
++ snprintf (buf, sizeof buf, "%d", pdev->irq);
++ bufp = buf;
++#else
++ bufp = __irq_itoa(pdev->irq);
++#endif
++ if (request_irq (pdev->irq, net2280_irq, SA_SHIRQ, driver_name, dev)
++ != 0) {
++ ERROR (dev, "request interrupt %s failed\n", bufp);
++ retval = -EBUSY;
++ goto done;
++ }
++ dev->got_irq = 1;
++
++ /* DMA setup */
++ dev->requests = pci_pool_create ("requests", pdev,
++ sizeof (struct net2280_dma),
++ 0 /* no alignment requirements */,
++ 0 /* or page-crossing issues */,
++ SLAB_KERNEL /* 2.4 only */ );
++ if (!dev->requests) {
++ DEBUG (dev, "can't get request pool\n");
++ retval = -ENOMEM;
++ goto done;
++ }
++ for (i = 1; i < 5; i++) {
++ struct net2280_dma *td;
++
++ td = pci_pool_alloc (dev->requests, GFP_KERNEL,
++ &dev->ep [i].td_dma);
++ if (!td) {
++ DEBUG (dev, "can't get dummy %d\n", i);
++ retval = -ENOMEM;
++ goto done;
++ }
++ td->dmacount = 0; /* not VALID */
++ td->dmaaddr = __constant_cpu_to_le32 (DMA_ADDR_INVALID);
++ td->dmadesc = td->dmaaddr;
++ dev->ep [i].dummy = td;
++ }
++
++ /* enable lower-overhead pci memory bursts during DMA */
++ writel ( (1 << DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE)
++ // 256 write retries may not be enough...
++ // | (1 << PCI_RETRY_ABORT_ENABLE)
++ | (1 << DMA_READ_MULTIPLE_ENABLE)
++ | (1 << DMA_READ_LINE_ENABLE)
++ , &dev->pci->pcimstctl);
++ /* erratum 0115 shouldn't appear: Linux inits PCI_LATENCY_TIMER */
++ pci_set_master (pdev);
++ pci_set_mwi (pdev);
++
++ /* ... also flushes any posted pci writes */
++ dev->chiprev = get_idx_reg (dev->regs, REG_CHIPREV) & 0xffff;
++
++ /* done */
++ pci_set_drvdata (pdev, dev);
++ INFO (dev, "%s\n", driver_desc);
++ INFO (dev, "irq %s, pci mem %p, chip rev %04x\n",
++ bufp, base, dev->chiprev);
++ INFO (dev, "version: " DRIVER_VERSION "; dma %s\n",
++ use_dma
++ ? (use_dma_chaining ? "chaining" : "enabled")
++ : "disabled");
++ the_controller = dev;
++
++ return 0;
++
++done:
++ if (dev)
++ net2280_remove (pdev);
++ return retval;
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++static struct pci_device_id pci_ids [] = { {
++ .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
++ .class_mask = ~0,
++ .vendor = 0x17cc,
++ .device = 0x2280,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++
++}, { /* end: all zeroes */ }
++};
++MODULE_DEVICE_TABLE (pci, pci_ids);
++
++/* pci driver glue; this is a "new style" PCI driver module */
++static struct pci_driver net2280_pci_driver = {
++ .name = (char *) driver_name,
++ .id_table = pci_ids,
++
++ .probe = net2280_probe,
++ .remove = net2280_remove,
++
++ /* FIXME add power management support */
++};
++
++MODULE_DESCRIPTION (DRIVER_DESC);
++MODULE_AUTHOR ("David Brownell");
++MODULE_LICENSE ("GPL");
++
++static int __init init (void)
++{
++ if (!use_dma)
++ use_dma_chaining = 0;
++ return pci_module_init (&net2280_pci_driver);
++}
++module_init (init);
++
++static void __exit cleanup (void)
++{
++ pci_unregister_driver (&net2280_pci_driver);
++}
++module_exit (cleanup);
+diff -x '*~' -x '.*' -r -N -u /tmp/kernel/drivers/usb/gadget/net2280.h kernel/drivers/usb/gadget/net2280.h
+--- /tmp/kernel/drivers/usb/gadget/net2280.h 1970-01-01 01:00:00.000000000 +0100
++++ kernel/drivers/usb/gadget/net2280.h 2005-04-22 17:53:19.483531538 +0200
+@@ -0,0 +1,756 @@
++/*
++ * NetChip 2280 high/full speed USB device controller.
++ * Unlike many such controllers, this one talks PCI.
++ */
++
++/*
++ * Copyright (C) 2002 NetChip Technology, Inc. (http://www.netchip.com)
++ * Copyright (C) 2003 David Brownell
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++/*-------------------------------------------------------------------------*/
++
++/* NET2280 MEMORY MAPPED REGISTERS
++ *
++ * The register layout came from the chip documentation, and the bit
++ * number definitions were extracted from chip specification.
++ *
++ * Use the shift operator ('<<') to build bit masks, with readl/writel
++ * to access the registers through PCI.
++ */
++
++/* main registers, BAR0 + 0x0000 */
++struct net2280_regs {
++ // offset 0x0000
++ u32 devinit;
++#define LOCAL_CLOCK_FREQUENCY 8
++#define FORCE_PCI_RESET 7
++#define PCI_ID 6
++#define PCI_ENABLE 5
++#define FIFO_SOFT_RESET 4
++#define CFG_SOFT_RESET 3
++#define PCI_SOFT_RESET 2
++#define USB_SOFT_RESET 1
++#define M8051_RESET 0
++ u32 eectl;
++#define EEPROM_ADDRESS_WIDTH 23
++#define EEPROM_CHIP_SELECT_ACTIVE 22
++#define EEPROM_PRESENT 21
++#define EEPROM_VALID 20
++#define EEPROM_BUSY 19
++#define EEPROM_CHIP_SELECT_ENABLE 18
++#define EEPROM_BYTE_READ_START 17
++#define EEPROM_BYTE_WRITE_START 16
++#define EEPROM_READ_DATA 8
++#define EEPROM_WRITE_DATA 0
++ u32 eeclkfreq;
++ u32 _unused0;
++ // offset 0x0010
++
++ u32 pciirqenb0; /* interrupt PCI master ... */
++#define SETUP_PACKET_INTERRUPT_ENABLE 7
++#define ENDPOINT_F_INTERRUPT_ENABLE 6
++#define ENDPOINT_E_INTERRUPT_ENABLE 5
++#define ENDPOINT_D_INTERRUPT_ENABLE 4
++#define ENDPOINT_C_INTERRUPT_ENABLE 3
++#define ENDPOINT_B_INTERRUPT_ENABLE 2
++#define ENDPOINT_A_INTERRUPT_ENABLE 1
++#define ENDPOINT_0_INTERRUPT_ENABLE 0
++ u32 pciirqenb1;
++#define PCI_INTERRUPT_ENABLE 31
++#define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27
++#define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26
++#define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25
++#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20
++#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19
++#define PCI_TARGET_ABORT_ASSERTED_INTERRUPT_ENABLE 18
++#define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17
++#define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16
++#define GPIO_INTERRUPT_ENABLE 13
++#define DMA_D_INTERRUPT_ENABLE 12
++#define DMA_C_INTERRUPT_ENABLE 11
++#define DMA_B_INTERRUPT_ENABLE 10
++#define DMA_A_INTERRUPT_ENABLE 9
++#define EEPROM_DONE_INTERRUPT_ENABLE 8
++#define VBUS_INTERRUPT_ENABLE 7
++#define CONTROL_STATUS_INTERRUPT_ENABLE 6
++#define ROOT_PORT_RESET_INTERRUPT_ENABLE 4
++#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3
++#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2
++#define RESUME_INTERRUPT_ENABLE 1
++#define SOF_INTERRUPT_ENABLE 0
++ u32 cpu_irqenb0; /* ... or onboard 8051 */
++#define SETUP_PACKET_INTERRUPT_ENABLE 7
++#define ENDPOINT_F_INTERRUPT_ENABLE 6
++#define ENDPOINT_E_INTERRUPT_ENABLE 5
++#define ENDPOINT_D_INTERRUPT_ENABLE 4
++#define ENDPOINT_C_INTERRUPT_ENABLE 3
++#define ENDPOINT_B_INTERRUPT_ENABLE 2
++#define ENDPOINT_A_INTERRUPT_ENABLE 1
++#define ENDPOINT_0_INTERRUPT_ENABLE 0
++ u32 cpu_irqenb1;
++#define CPU_INTERRUPT_ENABLE 31
++#define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27
++#define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26
++#define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25
++#define PCI_INTA_INTERRUPT_ENABLE 24
++#define PCI_PME_INTERRUPT_ENABLE 23
++#define PCI_SERR_INTERRUPT_ENABLE 22
++#define PCI_PERR_INTERRUPT_ENABLE 21
++#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20
++#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19
++#define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17
++#define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16
++#define GPIO_INTERRUPT_ENABLE 13
++#define DMA_D_INTERRUPT_ENABLE 12
++#define DMA_C_INTERRUPT_ENABLE 11
++#define DMA_B_INTERRUPT_ENABLE 10
++#define DMA_A_INTERRUPT_ENABLE 9
++#define EEPROM_DONE_INTERRUPT_ENABLE 8
++#define VBUS_INTERRUPT_ENABLE 7
++#define CONTROL_STATUS_INTERRUPT_ENABLE 6
++#define ROOT_PORT_RESET_INTERRUPT_ENABLE 4
++#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3
++#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2
++#define RESUME_INTERRUPT_ENABLE 1
++#define SOF_INTERRUPT_ENABLE 0
++
++ // offset 0x0020
++ u32 _unused1;
++ u32 usbirqenb1;
++#define USB_INTERRUPT_ENABLE 31
++#define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27
++#define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26
++#define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25
++#define PCI_INTA_INTERRUPT_ENABLE 24
++#define PCI_PME_INTERRUPT_ENABLE 23
++#define PCI_SERR_INTERRUPT_ENABLE 22
++#define PCI_PERR_INTERRUPT_ENABLE 21
++#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20
++#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19
++#define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17
++#define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16
++#define GPIO_INTERRUPT_ENABLE 13
++#define DMA_D_INTERRUPT_ENABLE 12
++#define DMA_C_INTERRUPT_ENABLE 11
++#define DMA_B_INTERRUPT_ENABLE 10
++#define DMA_A_INTERRUPT_ENABLE 9
++#define EEPROM_DONE_INTERRUPT_ENABLE 8
++#define VBUS_INTERRUPT_ENABLE 7
++#define CONTROL_STATUS_INTERRUPT_ENABLE 6
++#define ROOT_PORT_RESET_INTERRUPT_ENABLE 4
++#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3
++#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2
++#define RESUME_INTERRUPT_ENABLE 1
++#define SOF_INTERRUPT_ENABLE 0
++ u32 irqstat0;
++#define INTA_ASSERTED 12
++#define SETUP_PACKET_INTERRUPT 7
++#define ENDPOINT_F_INTERRUPT 6
++#define ENDPOINT_E_INTERRUPT 5
++#define ENDPOINT_D_INTERRUPT 4
++#define ENDPOINT_C_INTERRUPT 3
++#define ENDPOINT_B_INTERRUPT 2
++#define ENDPOINT_A_INTERRUPT 1
++#define ENDPOINT_0_INTERRUPT 0
++ u32 irqstat1;
++#define POWER_STATE_CHANGE_INTERRUPT 27
++#define PCI_ARBITER_TIMEOUT_INTERRUPT 26
++#define PCI_PARITY_ERROR_INTERRUPT 25
++#define PCI_INTA_INTERRUPT 24
++#define PCI_PME_INTERRUPT 23
++#define PCI_SERR_INTERRUPT 22
++#define PCI_PERR_INTERRUPT 21
++#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT 20
++#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT 19
++#define PCI_RETRY_ABORT_INTERRUPT 17
++#define PCI_MASTER_CYCLE_DONE_INTERRUPT 16
++#define GPIO_INTERRUPT 13
++#define DMA_D_INTERRUPT 12
++#define DMA_C_INTERRUPT 11
++#define DMA_B_INTERRUPT 10
++#define DMA_A_INTERRUPT 9
++#define EEPROM_DONE_INTERRUPT 8
++#define VBUS_INTERRUPT 7
++#define CONTROL_STATUS_INTERRUPT 6
++#define ROOT_PORT_RESET_INTERRUPT 4
++#define SUSPEND_REQUEST_INTERRUPT 3
++#define SUSPEND_REQUEST_CHANGE_INTERRUPT 2
++#define RESUME_INTERRUPT 1
++#define SOF_INTERRUPT 0
++ // offset 0x0030
++ u32 idxaddr;
++ u32 idxdata;
++ u32 fifoctl;
++#define PCI_BASE2_RANGE 16
++#define IGNORE_FIFO_AVAILABILITY 3
++#define PCI_BASE2_SELECT 2
++#define FIFO_CONFIGURATION_SELECT 0
++ u32 _unused2;
++ // offset 0x0040
++ u32 memaddr;
++#define START 28
++#define DIRECTION 27
++#define FIFO_DIAGNOSTIC_SELECT 24
++#define MEMORY_ADDRESS 0
++ u32 memdata0;
++ u32 memdata1;
++ u32 _unused3;
++ // offset 0x0050
++ u32 gpioctl;
++#define GPIO3_LED_SELECT 12
++#define GPIO3_INTERRUPT_ENABLE 11
++#define GPIO2_INTERRUPT_ENABLE 10
++#define GPIO1_INTERRUPT_ENABLE 9
++#define GPIO0_INTERRUPT_ENABLE 8
++#define GPIO3_OUTPUT_ENABLE 7
++#define GPIO2_OUTPUT_ENABLE 6
++#define GPIO1_OUTPUT_ENABLE 5
++#define GPIO0_OUTPUT_ENABLE 4
++#define GPIO3_DATA 3
++#define GPIO2_DATA 2
++#define GPIO1_DATA 1
++#define GPIO0_DATA 0
++ u32 gpiostat;
++#define GPIO3_INTERRUPT 3
++#define GPIO2_INTERRUPT 2
++#define GPIO1_INTERRUPT 1
++#define GPIO0_INTERRUPT 0
++} __attribute__ ((packed));
++
++/* usb control, BAR0 + 0x0080 */
++struct net2280_usb_regs {
++ // offset 0x0080
++ u32 stdrsp;
++#define STALL_UNSUPPORTED_REQUESTS 31
++#define SET_TEST_MODE 16
++#define GET_OTHER_SPEED_CONFIGURATION 15
++#define GET_DEVICE_QUALIFIER 14
++#define SET_ADDRESS 13
++#define ENDPOINT_SET_CLEAR_HALT 12
++#define DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP 11
++#define GET_STRING_DESCRIPTOR_2 10
++#define GET_STRING_DESCRIPTOR_1 9
++#define GET_STRING_DESCRIPTOR_0 8
++#define GET_SET_INTERFACE 6
++#define GET_SET_CONFIGURATION 5
++#define GET_CONFIGURATION_DESCRIPTOR 4
++#define GET_DEVICE_DESCRIPTOR 3
++#define GET_ENDPOINT_STATUS 2
++#define GET_INTERFACE_STATUS 1
++#define GET_DEVICE_STATUS 0
++ u32 prodvendid;
++#define PRODUCT_ID 16
++#define VENDOR_ID 0
++ u32 relnum;
++ u32 usbctl;
++#define SERIAL_NUMBER_INDEX 16
++#define PRODUCT_ID_STRING_ENABLE 13
++#define VENDOR_ID_STRING_ENABLE 12
++#define USB_ROOT_PORT_WAKEUP_ENABLE 11
++#define VBUS_PIN 10
++#define TIMED_DISCONNECT 9
++#define SUSPEND_IMMEDIATELY 7
++#define SELF_POWERED_USB_DEVICE 6
++#define REMOTE_WAKEUP_SUPPORT 5
++#define PME_POLARITY 4
++#define USB_DETECT_ENABLE 3
++#define PME_WAKEUP_ENABLE 2
++#define DEVICE_REMOTE_WAKEUP_ENABLE 1
++#define SELF_POWERED_STATUS 0
++ // offset 0x0090
++ u32 usbstat;
++#define HIGH_SPEED 7
++#define FULL_SPEED 6
++#define GENERATE_RESUME 5
++#define GENERATE_DEVICE_REMOTE_WAKEUP 4
++ u32 xcvrdiag;
++#define FORCE_HIGH_SPEED_MODE 31
++#define FORCE_FULL_SPEED_MODE 30
++#define USB_TEST_MODE 24
++#define LINE_STATE 16
++#define TRANSCEIVER_OPERATION_MODE 2
++#define TRANSCEIVER_SELECT 1
++#define TERMINATION_SELECT 0
++ u32 setup0123;
++ u32 setup4567;
++ // offset 0x0090
++ u32 _unused0;
++ u32 ouraddr;
++#define FORCE_IMMEDIATE 7
++#define OUR_USB_ADDRESS 0
++ u32 ourconfig;
++} __attribute__ ((packed));
++
++/* pci control, BAR0 + 0x0100 */
++struct net2280_pci_regs {
++ // offset 0x0100
++ u32 pcimstctl;
++#define PCI_ARBITER_PARK_SELECT 13
++#define PCI_MULTI LEVEL_ARBITER 12
++#define PCI_RETRY_ABORT_ENABLE 11
++#define DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE 10
++#define DMA_READ_MULTIPLE_ENABLE 9
++#define DMA_READ_LINE_ENABLE 8
++#define PCI_MASTER_COMMAND_SELECT 6
++#define MEM_READ_OR_WRITE 0
++#define IO_READ_OR_WRITE 1
++#define CFG_READ_OR_WRITE 2
++#define PCI_MASTER_START 5
++#define PCI_MASTER_READ_WRITE 4
++#define PCI_MASTER_WRITE 0
++#define PCI_MASTER_READ 1
++#define PCI_MASTER_BYTE_WRITE_ENABLES 0
++ u32 pcimstaddr;
++ u32 pcimstdata;
++ u32 pcimststat;
++#define PCI_ARBITER_CLEAR 2
++#define PCI_EXTERNAL_ARBITER 1
++#define PCI_HOST_MODE 0
++} __attribute__ ((packed));
++
++/* dma control, BAR0 + 0x0180 ... array of four structs like this,
++ * for channels 0..3. see also struct net2280_dma: descriptor
++ * that can be loaded into some of these registers.
++ */
++struct net2280_dma_regs { /* [11.7] */
++ // offset 0x0180, 0x01a0, 0x01c0, 0x01e0,
++ u32 dmactl;
++#define DMA_SCATTER_GATHER_DONE_INTERRUPT_ENABLE 25
++#define DMA_CLEAR_COUNT_ENABLE 21
++#define DESCRIPTOR_POLLING_RATE 19
++#define POLL_CONTINUOUS 0
++#define POLL_1_USEC 1
++#define POLL_100_USEC 2
++#define POLL_1_MSEC 3
++#define DMA_VALID_BIT_POLLING_ENABLE 18
++#define DMA_VALID_BIT_ENABLE 17
++#define DMA_SCATTER_GATHER_ENABLE 16
++#define DMA_OUT_AUTO_START_ENABLE 4
++#define DMA_PREEMPT_ENABLE 3
++#define DMA_FIFO_VALIDATE 2
++#define DMA_ENABLE 1
++#define DMA_ADDRESS_HOLD 0
++ u32 dmastat;
++#define DMA_SCATTER_GATHER_DONE_INTERRUPT 25
++#define DMA_TRANSACTION_DONE_INTERRUPT 24
++#define DMA_ABORT 1
++#define DMA_START 0
++ u32 _unused0 [2];
++ // offset 0x0190, 0x01b0, 0x01d0, 0x01f0,
++ u32 dmacount;
++#define VALID_BIT 31
++#define DMA_DIRECTION 30
++#define DMA_DONE_INTERRUPT_ENABLE 29
++#define END_OF_CHAIN 28
++#define DMA_BYTE_COUNT_MASK ((1<<24)-1)
++#define DMA_BYTE_COUNT 0
++ u32 dmaaddr;
++ u32 dmadesc;
++ u32 _unused1;
++} __attribute__ ((packed));
++
++/* dedicated endpoint registers, BAR0 + 0x0200 */
++
++struct net2280_dep_regs { /* [11.8] */
++ // offset 0x0200, 0x0210, 0x220, 0x230, 0x240
++ u32 dep_cfg;
++ // offset 0x0204, 0x0214, 0x224, 0x234, 0x244
++ u32 dep_rsp;
++ u32 _unused [2];
++} __attribute__ ((packed));
++
++/* configurable endpoint registers, BAR0 + 0x0300 ... array of seven structs
++ * like this, for ep0 then the configurable endpoints A..F
++ * ep0 reserved for control; E and F have only 64 bytes of fifo
++ */
++struct net2280_ep_regs { /* [11.9] */
++ // offset 0x0300, 0x0320, 0x0340, 0x0360, 0x0380, 0x03a0, 0x03c0
++ u32 ep_cfg;
++#define ENDPOINT_BYTE_COUNT 16
++#define ENDPOINT_ENABLE 10
++#define ENDPOINT_TYPE 8
++#define ENDPOINT_DIRECTION 7
++#define ENDPOINT_NUMBER 0
++ u32 ep_rsp;
++#define SET_NAK_OUT_PACKETS 15
++#define SET_EP_HIDE_STATUS_PHASE 14
++#define SET_EP_FORCE_CRC_ERROR 13
++#define SET_INTERRUPT_MODE 12
++#define SET_CONTROL_STATUS_PHASE_HANDSHAKE 11
++#define SET_NAK_OUT_PACKETS_MODE 10
++#define SET_ENDPOINT_TOGGLE 9
++#define SET_ENDPOINT_HALT 8
++#define CLEAR_NAK_OUT_PACKETS 7
++#define CLEAR_EP_HIDE_STATUS_PHASE 6
++#define CLEAR_EP_FORCE_CRC_ERROR 5
++#define CLEAR_INTERRUPT_MODE 4
++#define CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE 3
++#define CLEAR_NAK_OUT_PACKETS_MODE 2
++#define CLEAR_ENDPOINT_TOGGLE 1
++#define CLEAR_ENDPOINT_HALT 0
++ u32 ep_irqenb;
++#define SHORT_PACKET_OUT_DONE_INTERRUPT_ENABLE 6
++#define SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE 5
++#define DATA_PACKET_RECEIVED_INTERRUPT_ENABLE 3
++#define DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE 2
++#define DATA_OUT_PING_TOKEN_INTERRUPT_ENABLE 1
++#define DATA_IN_TOKEN_INTERRUPT_ENABLE 0
++ u32 ep_stat;
++#define FIFO_VALID_COUNT 24
++#define HIGH_BANDWIDTH_OUT_TRANSACTION_PID 22
++#define TIMEOUT 21
++#define USB_STALL_SENT 20
++#define USB_IN_NAK_SENT 19
++#define USB_IN_ACK_RCVD 18
++#define USB_OUT_PING_NAK_SENT 17
++#define USB_OUT_ACK_SENT 16
++#define FIFO_OVERFLOW 13
++#define FIFO_UNDERFLOW 12
++#define FIFO_FULL 11
++#define FIFO_EMPTY 10
++#define FIFO_FLUSH 9
++#define SHORT_PACKET_OUT_DONE_INTERRUPT 6
++#define SHORT_PACKET_TRANSFERRED_INTERRUPT 5
++#define NAK_OUT_PACKETS 4
++#define DATA_PACKET_RECEIVED_INTERRUPT 3
++#define DATA_PACKET_TRANSMITTED_INTERRUPT 2
++#define DATA_OUT_PING_TOKEN_INTERRUPT 1
++#define DATA_IN_TOKEN_INTERRUPT 0
++ // offset 0x0310, 0x0330, 0x0350, 0x0370, 0x0390, 0x03b0, 0x03d0
++ u32 ep_avail;
++ u32 ep_data;
++ u32 _unused0 [2];
++} __attribute__ ((packed));
++
++/*-------------------------------------------------------------------------*/
++
++#ifdef __KERNEL__
++
++/* indexed registers [11.10] are accessed indirectly
++ * caller must own the device lock.
++ */
++
++static inline u32
++get_idx_reg (struct net2280_regs *regs, u32 index)
++{
++ writel (index, &regs->idxaddr);
++ /* NOTE: synchs device/cpu memory views */
++ return readl (&regs->idxdata);
++}
++
++static inline void
++set_idx_reg (struct net2280_regs *regs, u32 index, u32 value)
++{
++ writel (index, &regs->idxaddr);
++ writel (value, &regs->idxdata);
++ /* posted, may not be visible yet */
++}
++
++#endif /* __KERNEL__ */
++
++
++#define REG_DIAG 0x0
++#define RETRY_COUNTER 16
++#define FORCE_PCI_SERR 11
++#define FORCE_PCI_INTERRUPT 10
++#define FORCE_USB_INTERRUPT 9
++#define FORCE_CPU_INTERRUPT 8
++#define ILLEGAL_BYTE_ENABLES 5
++#define FAST_TIMES 4
++#define FORCE_RECEIVE_ERROR 2
++#define FORCE_TRANSMIT_CRC_ERROR 0
++#define REG_FRAME 0x02 /* from last sof */
++#define REG_CHIPREV 0x03 /* in bcd */
++#define REG_HS_NAK_RATE 0x0a /* NAK per N uframes */
++
++#define CHIPREV_1 0x0100
++#define CHIPREV_1A 0x0110
++
++#ifdef __KERNEL__
++
++/* ep a-f highspeed and fullspeed maxpacket, addresses
++ * computed from ep->num
++ */
++#define REG_EP_MAXPKT(dev,num) (((num) + 1) * 0x10 + \
++ (((dev)->gadget.speed == USB_SPEED_HIGH) ? 0 : 1))
++
++/*-------------------------------------------------------------------------*/
++
++/* [8.3] for scatter/gather i/o
++ * use struct net2280_dma_regs bitfields
++ */
++struct net2280_dma {
++ u32 dmacount;
++ u32 dmaaddr; /* the buffer */
++ u32 dmadesc; /* next dma descriptor */
++ u32 _reserved;
++} __attribute__ ((aligned (16)));
++
++/*-------------------------------------------------------------------------*/
++
++/* DRIVER DATA STRUCTURES and UTILITIES */
++
++struct net2280_ep {
++ struct usb_ep ep;
++ struct net2280_ep_regs *regs;
++ struct net2280_dma_regs *dma;
++ struct net2280_dma *dummy;
++ dma_addr_t td_dma; /* of dummy */
++ struct net2280 *dev;
++ unsigned long irqs;
++
++ /* analogous to a host-side qh */
++ struct list_head queue;
++ const struct usb_endpoint_descriptor *desc;
++ unsigned num : 8,
++ fifo_size : 12,
++ in_fifo_validate : 1,
++ out_overflow : 1,
++ stopped : 1,
++ is_in : 1,
++ is_iso : 1;
++};
++
++static inline void allow_status (struct net2280_ep *ep)
++{
++ /* ep0 only */
++ writel ( (1 << CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE)
++ | (1 << CLEAR_NAK_OUT_PACKETS)
++ | (1 << CLEAR_NAK_OUT_PACKETS_MODE)
++ , &ep->regs->ep_rsp);
++ ep->stopped = 1;
++}
++
++/* count (<= 4) bytes in the next fifo write will be valid */
++static inline void set_fifo_bytecount (struct net2280_ep *ep, unsigned count)
++{
++ writeb (count, 2 + (u8 *) &ep->regs->ep_cfg);
++}
++
++struct net2280_request {
++ struct usb_request req;
++ struct net2280_dma *td;
++ dma_addr_t td_dma;
++ struct list_head queue;
++ unsigned mapped : 1,
++ valid : 1;
++};
++
++struct net2280 {
++ /* each pci device provides one gadget, several endpoints */
++ struct usb_gadget gadget;
++ spinlock_t lock;
++ struct net2280_ep ep [7];
++ struct usb_gadget_driver *driver;
++ unsigned enabled : 1,
++ protocol_stall : 1,
++ softconnect : 1,
++ got_irq : 1,
++ region : 1;
++ u16 chiprev;
++
++ /* pci state used to access those endpoints */
++ struct pci_dev *pdev;
++ struct net2280_regs *regs;
++ struct net2280_usb_regs *usb;
++ struct net2280_pci_regs *pci;
++ struct net2280_dma_regs *dma;
++ struct net2280_dep_regs *dep;
++ struct net2280_ep_regs *epregs;
++
++ struct pci_pool *requests;
++ // statistics...
++};
++
++static inline void set_halt (struct net2280_ep *ep)
++{
++ /* ep0 and bulk/intr endpoints */
++ writel ( (1 << CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE)
++ /* set NAK_OUT for erratum 0114 */
++ | ((ep->dev->chiprev == CHIPREV_1) << SET_NAK_OUT_PACKETS)
++ | (1 << SET_ENDPOINT_HALT)
++ , &ep->regs->ep_rsp);
++}
++
++static inline void clear_halt (struct net2280_ep *ep)
++{
++ /* ep0 and bulk/intr endpoints */
++ writel ( (1 << CLEAR_ENDPOINT_HALT)
++ | (1 << CLEAR_ENDPOINT_TOGGLE)
++ /* unless the gadget driver left a short packet in the
++ * fifo, this reverses the erratum 0114 workaround.
++ */
++ | ((ep->dev->chiprev == CHIPREV_1) << CLEAR_NAK_OUT_PACKETS)
++ , &ep->regs->ep_rsp);
++}
++
++#ifdef USE_RDK_LEDS
++
++static inline void net2280_led_init (struct net2280 *dev)
++{
++ /* LED3 (green) is on during USB activity. note erratum 0113. */
++ writel ((1 << GPIO3_LED_SELECT)
++ | (1 << GPIO3_OUTPUT_ENABLE)
++ | (1 << GPIO2_OUTPUT_ENABLE)
++ | (1 << GPIO1_OUTPUT_ENABLE)
++ | (1 << GPIO0_OUTPUT_ENABLE)
++ , &dev->regs->gpioctl);
++}
++
++/* indicate speed with bi-color LED 0/1 */
++static inline
++void net2280_led_speed (struct net2280 *dev, enum usb_device_speed speed)
++{
++ u32 val = readl (&dev->regs->gpioctl);
++ switch (speed) {
++ case USB_SPEED_HIGH: /* green */
++ val &= ~(1 << GPIO0_DATA);
++ val |= (1 << GPIO1_DATA);
++ break;
++ case USB_SPEED_FULL: /* red */
++ val &= ~(1 << GPIO1_DATA);
++ val |= (1 << GPIO0_DATA);
++ break;
++ default: /* (off/black) */
++ val &= ~((1 << GPIO1_DATA) | (1 << GPIO0_DATA));
++ break;
++ }
++ writel (val, &dev->regs->gpioctl);
++}
++
++/* indicate power with LED 2 */
++static inline void net2280_led_active (struct net2280 *dev, int is_active)
++{
++ u32 val = readl (&dev->regs->gpioctl);
++
++ // FIXME this LED never seems to turn on.
++ if (is_active)
++ val |= GPIO2_DATA;
++ else
++ val &= ~GPIO2_DATA;
++ writel (val, &dev->regs->gpioctl);
++}
++static inline void net2280_led_shutdown (struct net2280 *dev)
++{
++ /* turn off all four GPIO*_DATA bits */
++ writel (readl (&dev->regs->gpioctl) & ~0x0f,
++ &dev->regs->gpioctl);
++}
++
++#else
++
++#define net2280_led_init(dev) do { } while (0)
++#define net2280_led_speed(dev, speed) do { } while (0)
++#define net2280_led_shutdown(dev) do { } while (0)
++
++#endif
++
++/*-------------------------------------------------------------------------*/
++
++#define xprintk(dev,level,fmt,args...) \
++ printk(level "%s %s: " fmt , driver_name , \
++ dev->pdev->slot_name , ## args)
++
++#ifdef DEBUG
++#undef DEBUG
++#define DEBUG(dev,fmt,args...) \
++ xprintk(dev , KERN_DEBUG , fmt , ## args)
++#else
++#define DEBUG(dev,fmt,args...) \
++ do { } while (0)
++#endif /* DEBUG */
++
++#ifdef VERBOSE
++#define VDEBUG DEBUG
++#else
++#define VDEBUG(dev,fmt,args...) \
++ do { } while (0)
++#endif /* VERBOSE */
++
++#define ERROR(dev,fmt,args...) \
++ xprintk(dev , KERN_ERR , fmt , ## args)
++#define WARN(dev,fmt,args...) \
++ xprintk(dev , KERN_WARNING , fmt , ## args)
++#define INFO(dev,fmt,args...) \
++ xprintk(dev , KERN_INFO , fmt , ## args)
++
++/*-------------------------------------------------------------------------*/
++
++static inline void start_out_naking (struct net2280_ep *ep)
++{
++ /* NOTE: hardware races lurk here, and PING protocol issues */
++ writel ((1 << SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
++ /* synch with device */
++ readl (&ep->regs->ep_rsp);
++}
++
++#ifdef DEBUG
++static inline void assert_out_naking (struct net2280_ep *ep, const char *where)
++{
++ u32 tmp = readl (&ep->regs->ep_stat);
++
++ if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
++ DEBUG (ep->dev, "%s %s %08x !NAK\n",
++ ep->ep.name, where, tmp);
++ writel ((1 << SET_NAK_OUT_PACKETS),
++ &ep->regs->ep_rsp);
++ }
++}
++#define ASSERT_OUT_NAKING(ep) assert_out_naking(ep,__FUNCTION__)
++#else
++#define ASSERT_OUT_NAKING(ep) do {} while (0)
++#endif
++
++static inline void stop_out_naking (struct net2280_ep *ep)
++{
++ u32 tmp;
++
++ tmp = readl (&ep->regs->ep_stat);
++ if ((tmp & (1 << NAK_OUT_PACKETS)) != 0)
++ writel ((1 << CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
++}
++
++/*-------------------------------------------------------------------------*/
++
++/* 2.5 and 2.4.older portability changes ... */
++
++#ifndef container_of
++#define container_of list_entry
++#endif
++
++#ifndef likely
++#define likely(x) (x)
++#define unlikely(x) (x)
++#endif
++
++#ifndef BUG_ON
++#define BUG_ON(condition) do { if (unlikely((condition)!=0)) BUG(); } while(0)
++#endif
++
++#ifndef WARN_ON
++#define WARN_ON(x) do { } while (0)
++#endif
++
++#ifndef IRQ_NONE
++typedef void irqreturn_t;
++#define IRQ_NONE
++#define IRQ_HANDLED
++#define IRQ_RETVAL(x)
++#endif
++
++#endif /* __KERNEL__ */
+diff -x '*~' -x '.*' -r -N -u /tmp/kernel/drivers/usb/gadget/pxa2xx_udc.c kernel/drivers/usb/gadget/pxa2xx_udc.c
+--- /tmp/kernel/drivers/usb/gadget/pxa2xx_udc.c 1970-01-01 01:00:00.000000000 +0100
++++ kernel/drivers/usb/gadget/pxa2xx_udc.c 2005-04-22 17:53:19.492530073 +0200
+@@ -0,0 +1,2486 @@
++/*
++ * linux/drivers/usb/gadget/pxa2xx_udc.c
++ * Intel PXA2xx and IXP4xx on-chip full speed USB device controllers
++ *
++ * Copyright (C) 2002 Intrinsyc, Inc. (Frank Becker)
++ * Copyright (C) 2003 Robert Schwebel, Pengutronix
++ * Copyright (C) 2003 Benedikt Spranger, Pengutronix
++ * Copyright (C) 2003 David Brownell
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++
++#define DEBUG 1
++// #define VERBOSE DBG_VERBOSE
++
++#include <linux/config.h>
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/ioport.h>
++#include <linux/types.h>
++#include <linux/version.h>
++#include <linux/errno.h>
++#include <linux/delay.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/init.h>
++#include <linux/timer.h>
++#include <linux/list.h>
++#include <linux/interrupt.h>
++#include <linux/proc_fs.h>
++#include <linux/mm.h>
++// #include <linux/device.h>
++
++#include <asm/byteorder.h>
++#include <asm/dma.h>
++#include <asm/io.h>
++#include <asm/irq.h>
++#include <asm/system.h>
++#include <asm/unaligned.h>
++#include <asm/proc/cache.h>
++
++#include <linux/usb_ch9.h>
++#include <linux/usb_gadget.h>
++
++
++/*
++ * This driver handles the USB Device Controller (UDC) in Intel's PXA 2xx
++ * series processors. The UDC for the IXP 4xx series is very similar.
++ * There are fifteen endpoints, in addition to ep0.
++ *
++ * Such controller drivers work with a gadget driver. The gadget driver
++ * returns descriptors, implements configuration and data protocols used
++ * by the host to interact with this device, and allocates endpoints to
++ * the different protocol interfaces. The controller driver virtualizes
++ * usb hardware so that the gadget drivers will be more portable.
++ *
++ * This UDC hardware wants to implement a bit too much USB protocol, so
++ * it constrains the sorts of USB configuration change events that work.
++ * The errata for these chips are misleading; some "fixed" bugs from
++ * pxa250 a0/a1 b0/b1/b2 sure act like they're still there.
++ */
++
++/* NOTE: the 2.6 driver is probably the most current version */
++#define DRIVER_VERSION "5-Jan-2004"
++#define DRIVER_DESC "PXA 2xx USB Device Controller driver"
++
++static const char driver_name [] = "pxa2xx_udc";
++
++static const char ep0name [] = "ep0";
++
++
++// #define USE_DMA
++// #define USE_OUT_DMA
++// #define DISABLE_TEST_MODE
++
++#ifdef CONFIG_PROC_FS
++#define UDC_PROC_FILE
++#endif
++
++#ifdef CONFIG_ARCH_IXP425
++#undef USE_DMA
++
++/* cpu-specific register addresses are compiled in to this code */
++#ifdef CONFIG_ARCH_PXA
++#error "Can't configure both IXP and PXA"
++#endif
++
++#endif
++
++#ifdef CONFIG_EMBEDDED
++/* few strings, and little code to use them */
++#undef DEBUG
++#undef UDC_PROC_FILE
++#endif
++
++
++#include "pxa2xx_udc.h"
++
++#ifdef USE_DMA
++static int use_dma = 1;
++MODULE_PARM (use_dma, "i");
++MODULE_PARM_DESC (use_dma, "true to use dma");
++
++static void dma_nodesc_handler (int dmach, void *_ep, struct pt_regs *r);
++static void kick_dma(struct pxa2xx_ep *ep, struct pxa2xx_request *req);
++
++#ifdef USE_OUT_DMA
++#define DMASTR " (dma support)"
++#else
++#define DMASTR " (dma in)"
++#endif
++
++#else /* !USE_DMA */
++#define DMASTR " (pio only)"
++#undef USE_OUT_DMA
++#endif
++
++#ifdef CONFIG_USB_PXA2XX_SMALL
++#define SIZE_STR " (small)"
++#else
++#define SIZE_STR ""
++#endif
++
++#ifdef DISABLE_TEST_MODE
++/* (mode == 0) == no undocumented chip tweaks
++ * (mode & 1) == double buffer bulk IN
++ * (mode & 2) == double buffer bulk OUT
++ * ... so mode = 3 (or 7, 15, etc) does it for both
++ */
++static ushort fifo_mode = 0;
++MODULE_PARM (fifo_mode, "h");
++MODULE_PARM_DESC (fifo_mode, "pxa2xx udc fifo mode");
++#endif
++
++/* ---------------------------------------------------------------------------
++ * endpoint related parts of the api to the usb controller hardware,
++ * used by gadget driver; and the inner talker-to-hardware core.
++ * ---------------------------------------------------------------------------
++ */
++
++static void pxa2xx_ep_fifo_flush (struct usb_ep *ep);
++static void nuke (struct pxa2xx_ep *, int status);
++
++static void pio_irq_enable(int bEndpointAddress)
++{
++ bEndpointAddress &= 0xf;
++ if (bEndpointAddress < 8)
++ UICR0 &= ~(1 << bEndpointAddress);
++ else {
++ bEndpointAddress -= 8;
++ UICR1 &= ~(1 << bEndpointAddress);
++ }
++}
++
++static void pio_irq_disable(int bEndpointAddress)
++{
++ bEndpointAddress &= 0xf;
++ if (bEndpointAddress < 8)
++ UICR0 |= 1 << bEndpointAddress;
++ else {
++ bEndpointAddress -= 8;
++ UICR1 |= 1 << bEndpointAddress;
++ }
++}
++
++/* The UDCCR reg contains mask and interrupt status bits,
++ * so using '|=' isn't safe as it may ack an interrupt.
++ */
++#define UDCCR_MASK_BITS (UDCCR_REM | UDCCR_SRM | UDCCR_UDE)
++
++static inline void udc_set_mask_UDCCR(int mask)
++{
++ UDCCR = (UDCCR & UDCCR_MASK_BITS) | (mask & UDCCR_MASK_BITS);
++}
++
++static inline void udc_clear_mask_UDCCR(int mask)
++{
++ UDCCR = (UDCCR & UDCCR_MASK_BITS) & ~(mask & UDCCR_MASK_BITS);
++}
++
++static inline void udc_ack_int_UDCCR(int mask)
++{
++ /* udccr contains the bits we dont want to change */
++ __u32 udccr = UDCCR & UDCCR_MASK_BITS;
++
++ UDCCR = udccr | (mask & ~UDCCR_MASK_BITS);
++}
++
++/*
++ * endpoint enable/disable
++ *
++ * we need to verify the descriptors used to enable endpoints. since pxa2xx
++ * endpoint configurations are fixed, and are pretty much always enabled,
++ * there's not a lot to manage here.
++ *
++ * because pxa2xx can't selectively initialize bulk (or interrupt) endpoints,
++ * (resetting endpoint halt and toggle), SET_INTERFACE is unusable except
++ * for a single interface (with only the default altsetting) and for gadget
++ * drivers that don't halt endpoints (not reset by set_interface). that also
++ * means that if you use ISO, you must violate the USB spec rule that all
++ * iso endpoints must be in non-default altsettings.
++ */
++static int pxa2xx_ep_enable (struct usb_ep *_ep,
++ const struct usb_endpoint_descriptor *desc)
++{
++ struct pxa2xx_ep *ep;
++ struct pxa2xx_udc *dev;
++
++ ep = container_of (_ep, struct pxa2xx_ep, ep);
++ if (!_ep || !desc || ep->desc || _ep->name == ep0name
++ || desc->bDescriptorType != USB_DT_ENDPOINT
++ || ep->bEndpointAddress != desc->bEndpointAddress
++ || ep->fifo_size < le16_to_cpu
++ (desc->wMaxPacketSize)) {
++ DMSG("%s, bad ep or descriptor\n", __FUNCTION__);
++ return -EINVAL;
++ }
++
++ /* xfer types must match, except that interrupt ~= bulk */
++ if (ep->bmAttributes != desc->bmAttributes
++ && ep->bmAttributes != USB_ENDPOINT_XFER_BULK
++ && desc->bmAttributes != USB_ENDPOINT_XFER_INT) {
++ DMSG("%s, %s type mismatch\n", __FUNCTION__, _ep->name);
++ return -EINVAL;
++ }
++
++ /* hardware _could_ do smaller, but driver doesn't */
++ if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK
++ && le16_to_cpu (desc->wMaxPacketSize)
++ != BULK_FIFO_SIZE)
++ || !desc->wMaxPacketSize) {
++ DMSG("%s, bad %s maxpacket\n", __FUNCTION__, _ep->name);
++ return -ERANGE;
++ }
++
++ dev = ep->dev;
++ if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) {
++ DMSG("%s, bogus device state\n", __FUNCTION__);
++ return -ESHUTDOWN;
++ }
++
++ ep->desc = desc;
++ ep->dma = -1;
++ ep->stopped = 0;
++ ep->pio_irqs = ep->dma_irqs = 0;
++ ep->ep.maxpacket = le16_to_cpu (desc->wMaxPacketSize);
++
++ /* flush fifo (mostly for OUT buffers) */
++ pxa2xx_ep_fifo_flush (_ep);
++
++ /* ... reset halt state too, if we could ... */
++
++#ifdef USE_DMA
++ /* for (some) bulk and ISO endpoints, try to get a DMA channel and
++ * bind it to the endpoint. otherwise use PIO.
++ */
++ switch (ep->bmAttributes) {
++ case USB_ENDPOINT_XFER_ISOC:
++ if (le16_to_cpu(desc->wMaxPacketSize) % 32)
++ break;
++ // fall through
++ case USB_ENDPOINT_XFER_BULK:
++ if (!use_dma || !ep->reg_drcmr)
++ break;
++ /* no bulk-out dma yet (pointless w/o descriptors) */
++ if ((ep->bmAttributes == USB_ENDPOINT_XFER_BULK)
++ && (ep->bEndpointAddress & USB_DIR_IN) == 0) {
++ DMSG("%s dma-out NYI\n", _ep->name);
++ break;
++ }
++ ep->dma = pxa_request_dma ((char *)_ep->name,
++ (le16_to_cpu(desc->wMaxPacketSize) > 64)
++ ? DMA_PRIO_MEDIUM /* some iso */
++ : DMA_PRIO_LOW,
++ // FIXME or ep_out_dma .. ..
++ dma_nodesc_handler, ep);
++ if (ep->dma >= 0) {
++ *ep->reg_drcmr = DRCMR_MAPVLD | ep->dma;
++ DMSG("%s using dma%d\n", _ep->name, ep->dma);
++ }
++ }
++#endif
++
++ DBG(DBG_VERBOSE, "enabled %s\n", _ep->name);
++ return 0;
++}
++
++static int pxa2xx_ep_disable (struct usb_ep *_ep)
++{
++ struct pxa2xx_ep *ep;
++
++ ep = container_of (_ep, struct pxa2xx_ep, ep);
++ if (!_ep || !ep->desc) {
++ DMSG("%s, %s not enabled\n", __FUNCTION__,
++ _ep ? ep->ep.name : NULL);
++ return -EINVAL;
++ }
++ nuke (ep, -ESHUTDOWN);
++
++#ifdef USE_DMA
++ if (ep->dma >= 0) {
++ *ep->reg_drcmr = 0;
++ pxa_free_dma (ep->dma);
++ ep->dma = -1;
++ }
++#endif
++
++ /* flush fifo (mostly for IN buffers) */
++ pxa2xx_ep_fifo_flush (_ep);
++
++ ep->desc = 0;
++ ep->stopped = 1;
++
++ DBG(DBG_VERBOSE, "%s disabled\n", _ep->name);
++ return 0;
++}
++
++/*-------------------------------------------------------------------------*/
++
++/* for the pxa2xx, these can just wrap kmalloc/kfree. gadget drivers
++ * must still pass correctly initialized endpoints, since other controller
++ * drivers may care about how it's currently set up (dma issues etc).
++ */
++
++/*
++ * pxa2xx_ep_alloc_request - allocate a request data structure
++ */
++static struct usb_request *
++pxa2xx_ep_alloc_request (struct usb_ep *_ep, int gfp_flags)
++{
++ struct pxa2xx_request *req;
++
++ /* FIXME for bulk out-dma endpoints, preallocate a frame's worth of
++ * (aligned) dma descriptors at the end of the request
++ */
++
++ req = kmalloc (sizeof *req, gfp_flags);
++ if (!req)
++ return 0;
++
++ memset (req, 0, sizeof *req);
++ INIT_LIST_HEAD (&req->queue);
++ return &req->req;
++}
++
++
++/*
++ * pxa2xx_ep_free_request - deallocate a request data structure
++ */
++static void
++pxa2xx_ep_free_request (struct usb_ep *_ep, struct usb_request *_req)
++{
++ struct pxa2xx_request *req;
++
++ req = container_of (_req, struct pxa2xx_request, req);
++ WARN_ON (!list_empty (&req->queue));
++ kfree(req);
++}
++
++
++/* PXA cache needs flushing with DMA I/O (it's dma-incoherent), but there's
++ * no device-affinity and the heap works perfectly well for i/o buffers.
++ */
++static void *
++pxa2xx_ep_alloc_buffer(struct usb_ep *_ep, unsigned bytes,
++ dma_addr_t *dma, int gfp_flags)
++{
++ char *retval;
++
++ retval = kmalloc (bytes, gfp_flags & ~(__GFP_DMA|__GFP_HIGHMEM));
++ if (retval)
++ *dma = virt_to_bus (retval);
++ return retval;
++}
++
++static void
++pxa2xx_ep_free_buffer(struct usb_ep *_ep, void *buf, dma_addr_t dma,
++ unsigned bytes)
++{
++ kfree (buf);
++}
++
++/*-------------------------------------------------------------------------*/
++
++/*
++ * done - retire a request; caller blocked irqs
++ */
++static void done(struct pxa2xx_ep *ep, struct pxa2xx_request *req, int status)
++{
++ unsigned stopped = ep->stopped;
++
++ list_del_init(&req->queue);
++
++ if (likely (req->req.status == -EINPROGRESS))
++ req->req.status = status;
++ else
++ status = req->req.status;
++
++ if (status && status != -ESHUTDOWN)
++ DBG(DBG_VERBOSE, "complete %s req %p stat %d len %u/%u\n",
++ ep->ep.name, &req->req, status,
++ req->req.actual, req->req.length);
++
++ /* don't modify queue heads during completion callback */
++ ep->stopped = 1;
++ req->req.complete(&ep->ep, &req->req);
++ ep->stopped = stopped;
++}
++
++
++static inline void ep0_idle (struct pxa2xx_udc *dev)
++{
++ dev->ep0state = EP0_IDLE;
++ LED_EP0_OFF;
++}
++
++static int
++write_packet(volatile u32 *uddr, struct pxa2xx_request *req, unsigned max)
++{
++ u8 *buf;
++ unsigned length, count;
++
++ buf = req->req.buf + req->req.actual;
++ prefetch(buf);
++
++ /* how big will this packet be? */
++ length = min(req->req.length - req->req.actual, max);
++ req->req.actual += length;
++
++ count = length;
++ while (likely(count--))
++ *uddr = *buf++;
++
++ return length;
++}
++
++/*
++ * write to an IN endpoint fifo, as many packets as possible.
++ * irqs will use this to write the rest later.
++ * caller guarantees at least one packet buffer is ready (or a zlp).
++ */
++static int
++write_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
++{
++ unsigned max;
++
++ max = le16_to_cpu(ep->desc->wMaxPacketSize);
++ do {
++ unsigned count;
++ int is_last, is_short;
++
++ count = write_packet(ep->reg_uddr, req, max);
++
++ /* last packet is usually short (or a zlp) */
++ if (unlikely (count != max))
++ is_last = is_short = 1;
++ else {
++ if (likely(req->req.length != req->req.actual)
++ || req->req.zero)
++ is_last = 0;
++ else
++ is_last = 1;
++ /* interrupt/iso maxpacket may not fill the fifo */
++ is_short = unlikely (max < ep->fifo_size);
++ }
++
++ DBG(DBG_VERY_NOISY, "wrote %s %d bytes%s%s %d left %p\n",
++ ep->ep.name, count,
++ is_last ? "/L" : "", is_short ? "/S" : "",
++ req->req.length - req->req.actual, req);
++
++ /* let loose that packet. maybe try writing another one,
++ * double buffering might work. TSP, TPC, and TFS
++ * bit values are the same for all normal IN endpoints.
++ */
++ *ep->reg_udccs = UDCCS_BI_TPC;
++ if (is_short)
++ *ep->reg_udccs = UDCCS_BI_TSP;
++
++ /* requests complete when all IN data is in the FIFO */
++ if (is_last) {
++ done (ep, req, 0);
++ if (list_empty(&ep->queue) || unlikely(ep->dma >= 0))
++ pio_irq_disable (ep->bEndpointAddress);
++#ifdef USE_DMA
++ if (unlikely(ep->dma >= 0) && !list_empty(&ep->queue)) {
++DMSG("%s pio2dma\n", ep->ep.name);
++ req = list_entry(ep->queue.next,
++ struct pxa2xx_request, queue);
++ kick_dma(ep,req);
++ return 0;
++ }
++#endif
++ return 1;
++ }
++
++ // TODO experiment: how robust can fifo mode tweaking be?
++ // the double buffering could speed up I/O a bunch.
++
++ } while (*ep->reg_udccs & UDCCS_BI_TFS);
++ return 0;
++}
++
++/* caller asserts req->pending (ep0 irq status nyet cleared); starts
++ * ep0 data stage. these chips want very simple state transitions.
++ */
++static inline
++void ep0start(struct pxa2xx_udc *dev, u32 flags, const char *tag)
++{
++ UDCCS0 = flags|UDCCS0_SA|UDCCS0_OPR;
++ USIR0 = USIR0_IR0;
++ dev->req_pending = 0;
++ DBG(DBG_VERY_NOISY, "%s %s, %02x/%02x\n",
++ __FUNCTION__, tag, UDCCS0, flags);
++}
++
++static int
++write_ep0_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
++{
++ unsigned count;
++ int is_short;
++
++ count = write_packet(&UDDR0, req, EP0_FIFO_SIZE);
++ ep->dev->stats.write.bytes += count;
++
++ /* last packet "must be" short (or a zlp) */
++ is_short = (count != EP0_FIFO_SIZE);
++
++ DBG(DBG_VERY_NOISY, "ep0in %d bytes %d left %p\n", count,
++ req->req.length - req->req.actual, req);
++
++ if (unlikely (is_short)) {
++ if (ep->dev->req_pending)
++ ep0start(ep->dev, UDCCS0_IPR, "short IN");
++ else
++ UDCCS0 = UDCCS0_IPR;
++
++ count = req->req.length;
++ done (ep, req, 0);
++ ep0_idle(ep->dev);
++#if 1
++ /* This seems to get rid of lost status irqs in some cases:
++ * host responds quickly, or next request involves config
++ * change automagic, or should have been hidden, or ...
++ *
++ * FIXME get rid of all udelays possible...
++ */
++ if (count >= EP0_FIFO_SIZE) {
++ count = 100;
++ do {
++ if ((UDCCS0 & UDCCS0_OPR) != 0) {
++ /* clear OPR, generate ack */
++ UDCCS0 = UDCCS0_OPR;
++ break;
++ }
++ count--;
++ udelay(1);
++ } while (count);
++ }
++#endif
++ } else if (ep->dev->req_pending)
++ ep0start(ep->dev, 0, "IN");
++ return is_short;
++}
++
++
++/*
++ * read_fifo - unload packet(s) from the fifo we use for usb OUT
++ * transfers and put them into the request. caller should have made
++ * sure there's at least one packet ready.
++ *
++ * returns true if the request completed because of short packet or the
++ * request buffer having filled (and maybe overran till end-of-packet).
++ */
++static int
++read_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
++{
++ for (;;) {
++ u32 udccs;
++ u8 *buf;
++ unsigned bufferspace, count, is_short;
++
++ /* make sure there's a packet in the FIFO.
++ * UDCCS_{BO,IO}_RPC are all the same bit value.
++ * UDCCS_{BO,IO}_RNE are all the same bit value.
++ */
++ udccs = *ep->reg_udccs;
++ if (unlikely ((udccs & UDCCS_BO_RPC) == 0))
++ break;
++ buf = req->req.buf + req->req.actual;
++ prefetchw(buf);
++ bufferspace = req->req.length - req->req.actual;
++
++ /* read all bytes from this packet */
++ if (likely (udccs & UDCCS_BO_RNE)) {
++ count = 1 + (0x0ff & *ep->reg_ubcr);
++ req->req.actual += min (count, bufferspace);
++ } else /* zlp */
++ count = 0;
++ is_short = (count < ep->ep.maxpacket);
++ DBG(DBG_VERY_NOISY, "read %s %02x, %d bytes%s req %p %d/%d\n",
++ ep->ep.name, udccs, count,
++ is_short ? "/S" : "",
++ req, req->req.actual, req->req.length);
++ while (likely (count-- != 0)) {
++ u8 byte = (u8) *ep->reg_uddr;
++
++ if (unlikely (bufferspace == 0)) {
++ /* this happens when the driver's buffer
++ * is smaller than what the host sent.
++ * discard the extra data.
++ */
++ if (req->req.status != -EOVERFLOW)
++ DMSG("%s overflow %d\n",
++ ep->ep.name, count);
++ req->req.status = -EOVERFLOW;
++ } else {
++ *buf++ = byte;
++ bufferspace--;
++ }
++ }
++ *ep->reg_udccs = UDCCS_BO_RPC;
++ /* RPC/RSP/RNE could now reflect the other packet buffer */
++
++ /* iso is one request per packet */
++ if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
++ if (udccs & UDCCS_IO_ROF)
++ req->req.status = -EHOSTUNREACH;
++ /* more like "is_done" */
++ is_short = 1;
++ }
++
++ /* completion */
++ if (is_short || req->req.actual == req->req.length) {
++ done (ep, req, 0);
++ if (list_empty(&ep->queue))
++ pio_irq_disable (ep->bEndpointAddress);
++ return 1;
++ }
++
++ /* finished that packet. the next one may be waiting... */
++ }
++ return 0;
++}
++
++/*
++ * special ep0 version of the above. no UBCR0 or double buffering; status
++ * handshaking is magic. most device protocols don't need control-OUT.
++ * CDC vendor commands (and RNDIS), mass storage CB/CBI, and some other
++ * protocols do use them.
++ */
++static int
++read_ep0_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
++{
++ u8 *buf, byte;
++ unsigned bufferspace;
++
++ buf = req->req.buf + req->req.actual;
++ bufferspace = req->req.length - req->req.actual;
++
++ while (UDCCS0 & UDCCS0_RNE) {
++ byte = (u8) UDDR0;
++
++ if (unlikely (bufferspace == 0)) {
++ /* this happens when the driver's buffer
++ * is smaller than what the host sent.
++ * discard the extra data.
++ */
++ if (req->req.status != -EOVERFLOW)
++ DMSG("%s overflow\n", ep->ep.name);
++ req->req.status = -EOVERFLOW;
++ } else {
++ *buf++ = byte;
++ req->req.actual++;
++ bufferspace--;
++ }
++ }
++
++ UDCCS0 = UDCCS0_OPR | UDCCS0_IPR;
++
++ /* completion */
++ if (req->req.actual >= req->req.length)
++ return 1;
++
++ /* finished that packet. the next one may be waiting... */
++ return 0;
++}
++
++#ifdef USE_DMA
++
++static inline void
++start_dma_nodesc(struct pxa2xx_ep *ep, struct pxa2xx_request *req, int is_in)
++{
++ u32 dcmd = req->req.length;
++ u32 buf = virt_to_bus (req->req.buf);
++ u32 fifo = io_v2p ((u32)ep->reg_uddr);
++
++ /* no-descriptor mode can be simple for bulk-in, iso-in, iso-out */
++ DCSR(ep->dma) = DCSR_NODESC;
++ dcmd |= DCMD_BURST32 | DCMD_ENDIRQEN | DCMD_WIDTH1;
++ if (is_in) {
++ DSADR(ep->dma) = buf;
++ DTADR(ep->dma) = fifo;
++ dcmd |= DCMD_FLOWTRG | DCMD_INCSRCADDR;
++ } else {
++ DSADR(ep->dma) = fifo;
++ DTADR(ep->dma) = buf;
++ dcmd |= DCMD_FLOWSRC | DCMD_INCTRGADDR;
++ }
++ DCMD(ep->dma) = dcmd;
++ DCSR(ep->dma) = DCSR_RUN | DCSR_STOPIRQEN | DCSR_NODESC;
++ /* and later the dma handler gets called */
++}
++
++static void kick_dma(struct pxa2xx_ep *ep, struct pxa2xx_request *req)
++{
++ if (ep->bEndpointAddress & USB_DIR_IN) {
++ /* docs imply we can't preload with pio */
++ if ((((u32)req->req.buf) & 0x0f) != 0) {
++// VERBOSE
++ DMSG("%s bad DMA align %p\n",
++ ep->ep.name, req->req.buf);
++pio_in:
++// FIXME PIO fallback doesn't work right yet (recovery?)
++DMSG("%s dma2pio\n", ep->ep.name);
++ pio_irq_enable(ep->bEndpointAddress);
++ if ((*ep->reg_udccs & UDCCS_BI_TFS) != 0)
++ (void) write_fifo(ep, req);
++ return;
++ }
++ /* dmacount 0 means end-of-transfer */
++ if (unlikely((req->req.length - req->req.actual) == 0)) {
++// VERBOSE
++ DMSG("%s zlp dma write...\n", ep->ep.name);
++ goto pio_in;
++ }
++ start_dma_nodesc(ep, req, USB_DIR_IN);
++ } else {
++ // if ISO, use no-descriptor DMA
++ BUG();
++ }
++}
++
++static void cancel_dma(struct pxa2xx_ep *ep)
++{
++ struct pxa2xx_request *req;
++ u32 tmp;
++
++ if (DCSR(ep->dma) == 0 || list_empty(&ep->queue))
++ return;
++
++ DCSR(ep->dma) = 0;
++ while ((DCSR(ep->dma) & DCSR_STOPSTATE) == 0)
++ cpu_relax();
++
++ req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
++ tmp = DCMD(ep->dma) & DCMD_LENGTH;
++ req->req.actual = req->req.length - (tmp & DCMD_LENGTH);
++
++ /* the last tx packet may be incomplete, so flush the fifo.
++ * FIXME correct req.actual if we can
++ */
++ if (ep->bEndpointAddress & USB_DIR_IN)
++ *ep->reg_udccs = UDCCS_BI_FTF;
++}
++
++static void dma_nodesc_handler(int dmach, void *_ep, struct pt_regs *r)
++{
++ struct pxa2xx_ep *ep = _ep;
++ struct pxa2xx_request *req;
++ u32 tmp;
++
++ req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
++
++ ep->dev->stats.irqs++;
++ HEX_DISPLAY(ep->dev->stats.irqs);
++
++ /* ack/clear */
++ tmp = DCSR(ep->dma);
++ DCSR(ep->dma) = tmp;
++ if ((tmp & DCSR_STOPSTATE) == 0
++ || (DDADR(ep->dma) & DDADR_STOP) != 0) {
++ DBG(DBG_VERBOSE, "%s, dcsr %08x ddadr %08x\n",
++ ep->ep.name, DCSR(ep->dma), DDADR(ep->dma));
++ return;
++ }
++ DCSR(ep->dma) = 0; /* clear DCSR_STOPSTATE */
++
++ /* wrap up the transfer, and collect status */
++ if (unlikely(tmp & DCSR_BUSERR))
++ req->req.status = -EIO;
++ tmp = DCMD(ep->dma);
++ req->req.actual = req->req.length - (tmp & DCMD_LENGTH);
++ tmp = 1; /* normally this is the last packet */
++
++ if (ep->bEndpointAddress & USB_DIR_IN) {
++ /* maybe validate final short packet */
++ if ((ep->bmAttributes == USB_ENDPOINT_XFER_BULK
++ && req->req.actual % BULK_FIFO_SIZE)
++ || (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
++ && req->req.actual % ISO_FIFO_SIZE))
++ *ep->reg_udccs = UDCCS_BI_TSP /*|UDCCS_BI_TPC*/;
++
++ /* or force a zlp, with pio ... */
++ else if (ep->bmAttributes == USB_ENDPOINT_XFER_BULK
++ && req->req.zero) {
++ tmp = 0;
++ }
++ // if iso, maybe report underrun (TUR)
++ } else {
++ BUG();
++ }
++
++ if (likely(tmp != 0))
++ done(ep, req, 0);
++
++ /* maybe re-activate after completion */
++ if (ep->stopped || list_empty(&ep->queue))
++ return;
++ req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
++ kick_dma(ep, req);
++}
++
++#endif
++
++/*-------------------------------------------------------------------------*/
++
++static int
++pxa2xx_ep_queue(struct usb_ep *_ep, struct usb_request *_req, int gfp_flags)
++{
++ struct pxa2xx_request *req;
++ struct pxa2xx_ep *ep;
++ struct pxa2xx_udc *dev;
++ unsigned long flags;
++
++ req = container_of(_req, struct pxa2xx_request, req);
++ if (unlikely (!_req || !_req->complete || !_req->buf
++ || !list_empty(&req->queue))) {
++ DMSG("%s, bad params\n", __FUNCTION__);
++ return -EINVAL;
++ }
++
++ ep = container_of(_ep, struct pxa2xx_ep, ep);
++ if (unlikely (!_ep || (!ep->desc && ep->ep.name != ep0name))) {
++ DMSG("%s, bad ep\n", __FUNCTION__);
++ return -EINVAL;
++ }
++
++ dev = ep->dev;
++ if (unlikely (!dev->driver
++ || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
++ DMSG("%s, bogus device state\n", __FUNCTION__);
++ return -ESHUTDOWN;
++ }
++
++ /* iso is always one packet per request, that's the only way
++ * we can report per-packet status. that also helps with dma.
++ */
++ if (unlikely (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
++ && req->req.length > le16_to_cpu
++ (ep->desc->wMaxPacketSize)))
++ return -EMSGSIZE;
++
++#ifdef USE_DMA
++ if (ep->dma >= 0) {
++ unsigned long start = (unsigned long) _req->buf;
++
++ clean_dcache_range(start, start + _req->length);
++ /* or for USB_DIR_OUT, invalidate_dcache_range (...) */
++ }
++#endif
++
++ DBG(DBG_NOISY, "%s queue req %p, len %d buf %p\n",
++ _ep->name, _req, _req->length, _req->buf);
++
++ local_irq_save(flags);
++
++ _req->status = -EINPROGRESS;
++ _req->actual = 0;
++
++ /* kickstart this i/o queue? */
++ if (list_empty(&ep->queue) && !ep->stopped) {
++ if (ep->desc == 0 /* ep0 */) {
++ unsigned length = _req->length;
++
++ switch (dev->ep0state) {
++ case EP0_IN_DATA_PHASE:
++ dev->stats.write.ops++;
++ if (write_ep0_fifo(ep, req))
++ req = 0;
++ break;
++
++ case EP0_OUT_DATA_PHASE:
++ dev->stats.read.ops++;
++ /* messy ... */
++ if (dev->req_config) {
++ DBG(DBG_VERBOSE, "ep0 config ack%s\n",
++ dev->has_cfr ? "" : " raced");
++ if (dev->has_cfr)
++ UDCCFR = UDCCFR_AREN|UDCCFR_ACM;
++ done(ep, req, 0);
++ dev->ep0state = EP0_END_XFER;
++ local_irq_restore (flags);
++ return 0;
++ }
++ if (dev->req_pending)
++ ep0start(dev, UDCCS0_IPR, "OUT");
++ if (length == 0 || ((UDCCS0 & UDCCS0_RNE) != 0
++ && read_ep0_fifo(ep, req))) {
++ ep0_idle(dev);
++ done(ep, req, 0);
++ req = 0;
++ }
++ break;
++
++ default:
++ DMSG("ep0 i/o, odd state %d\n", dev->ep0state);
++ local_irq_restore (flags);
++ return -EL2HLT;
++ }
++#ifdef USE_DMA
++ /* either start dma or prime pio pump */
++ } else if (ep->dma >= 0) {
++ kick_dma(ep, req);
++#endif
++ /* can the FIFO can satisfy the request immediately? */
++ } else if ((ep->bEndpointAddress & USB_DIR_IN) != 0
++ && (*ep->reg_udccs & UDCCS_BI_TFS) != 0
++ && write_fifo(ep, req)) {
++ req = 0;
++ } else if ((*ep->reg_udccs & UDCCS_BO_RFS) != 0
++ && read_fifo(ep, req)) {
++ req = 0;
++ }
++
++ if (likely (req && ep->desc) && ep->dma < 0)
++ pio_irq_enable(ep->bEndpointAddress);
++ }
++
++ /* pio or dma irq handler advances the queue. */
++ if (likely (req != 0))
++ list_add_tail(&req->queue, &ep->queue);
++ local_irq_restore(flags);
++
++ return 0;
++}
++
++
++/*
++ * nuke - dequeue ALL requests
++ */
++static void nuke(struct pxa2xx_ep *ep, int status)
++{
++ struct pxa2xx_request *req;
++
++ /* called with irqs blocked */
++#ifdef USE_DMA
++ if (ep->dma >= 0 && !ep->stopped)
++ cancel_dma(ep);
++#endif
++ while (!list_empty(&ep->queue)) {
++ req = list_entry(ep->queue.next,
++ struct pxa2xx_request,
++ queue);
++ done(ep, req, status);
++ }
++ if (ep->desc)
++ pio_irq_disable (ep->bEndpointAddress);
++}
++
++
++/* dequeue JUST ONE request */
++static int pxa2xx_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
++{
++ struct pxa2xx_ep *ep;
++ struct pxa2xx_request *req;
++ unsigned long flags;
++
++ ep = container_of(_ep, struct pxa2xx_ep, ep);
++ if (!_ep || ep->ep.name == ep0name)
++ return -EINVAL;
++
++ local_irq_save(flags);
++
++ /* make sure it's actually queued on this endpoint */
++ list_for_each_entry (req, &ep->queue, queue) {
++ if (&req->req == _req)
++ break;
++ }
++ if (&req->req != _req) {
++ local_irq_restore(flags);
++ return -EINVAL;
++ }
++
++#ifdef USE_DMA
++ if (ep->dma >= 0 && ep->queue.next == &req->queue && !ep->stopped) {
++ cancel_dma(ep);
++ done(ep, req, -ECONNRESET);
++ /* restart i/o */
++ if (!list_empty(&ep->queue)) {
++ req = list_entry(ep->queue.next,
++ struct pxa2xx_request, queue);
++ kick_dma(ep, req);
++ }
++ } else
++#endif
++ done(ep, req, -ECONNRESET);
++
++ local_irq_restore(flags);
++ return 0;
++}
++
++/*-------------------------------------------------------------------------*/
++
++static int pxa2xx_ep_set_halt(struct usb_ep *_ep, int value)
++{
++ struct pxa2xx_ep *ep;
++ unsigned long flags;
++
++ ep = container_of(_ep, struct pxa2xx_ep, ep);
++ if (unlikely (!_ep
++ || (!ep->desc && ep->ep.name != ep0name))
++ || ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
++ DMSG("%s, bad ep\n", __FUNCTION__);
++ return -EINVAL;
++ }
++ if (value == 0) {
++ /* this path (reset toggle+halt) is needed to implement
++ * SET_INTERFACE on normal hardware. but it can't be
++ * done from software on the PXA UDC, and the hardware
++ * forgets to do it as part of SET_INTERFACE automagic.
++ */
++ DMSG("only host can clear %s halt\n", _ep->name);
++ return -EROFS;
++ }
++
++ local_irq_save(flags);
++
++ if ((ep->bEndpointAddress & USB_DIR_IN) != 0
++ && ((*ep->reg_udccs & UDCCS_BI_TFS) == 0
++ || !list_empty(&ep->queue))) {
++ local_irq_restore(flags);
++ return -EAGAIN;
++ }
++
++ /* FST bit is the same for control, bulk in, bulk out, interrupt in */
++ *ep->reg_udccs = UDCCS_BI_FST|UDCCS_BI_FTF;
++
++ /* ep0 needs special care */
++ if (!ep->desc) {
++ start_watchdog(ep->dev);
++ ep->dev->req_pending = 0;
++ ep->dev->ep0state = EP0_STALL;
++ LED_EP0_OFF;
++
++ /* and bulk/intr endpoints like dropping stalls too */
++ } else {
++ unsigned i;
++ for (i = 0; i < 1000; i += 20) {
++ if (*ep->reg_udccs & UDCCS_BI_SST)
++ break;
++ udelay(20);
++ }
++ }
++ local_irq_restore(flags);
++
++ DBG(DBG_VERBOSE, "%s halt\n", _ep->name);
++ return 0;
++}
++
++static int pxa2xx_ep_fifo_status(struct usb_ep *_ep)
++{
++ struct pxa2xx_ep *ep;
++
++ ep = container_of(_ep, struct pxa2xx_ep, ep);
++ if (!_ep) {
++ DMSG("%s, bad ep\n", __FUNCTION__);
++ return -ENODEV;
++ }
++ /* pxa can't report unclaimed bytes from IN fifos */
++ if ((ep->bEndpointAddress & USB_DIR_IN) != 0)
++ return -EOPNOTSUPP;
++ if (ep->dev->gadget.speed == USB_SPEED_UNKNOWN
++ || (*ep->reg_udccs & UDCCS_BO_RFS) == 0)
++ return 0;
++ else
++ return (*ep->reg_ubcr & 0xfff) + 1;
++}
++
++static void pxa2xx_ep_fifo_flush(struct usb_ep *_ep)
++{
++ struct pxa2xx_ep *ep;
++
++ ep = container_of(_ep, struct pxa2xx_ep, ep);
++ if (!_ep || ep->ep.name == ep0name || !list_empty(&ep->queue)) {
++ DMSG("%s, bad ep\n", __FUNCTION__);
++ return;
++ }
++
++ /* toggle and halt bits stay unchanged */
++
++ /* for OUT, just read and discard the FIFO contents. */
++ if ((ep->bEndpointAddress & USB_DIR_IN) == 0) {
++ while (((*ep->reg_udccs) & UDCCS_BO_RNE) != 0)
++ (void) *ep->reg_uddr;
++ return;
++ }
++
++ /* most IN status is the same, but ISO can't stall */
++ *ep->reg_udccs = UDCCS_BI_TPC|UDCCS_BI_FTF|UDCCS_BI_TUR
++ | (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC)
++ ? 0 : UDCCS_BI_SST;
++}
++
++
++static struct usb_ep_ops pxa2xx_ep_ops = {
++ .enable = pxa2xx_ep_enable,
++ .disable = pxa2xx_ep_disable,
++
++ .alloc_request = pxa2xx_ep_alloc_request,
++ .free_request = pxa2xx_ep_free_request,
++
++ .alloc_buffer = pxa2xx_ep_alloc_buffer,
++ .free_buffer = pxa2xx_ep_free_buffer,
++
++ .queue = pxa2xx_ep_queue,
++ .dequeue = pxa2xx_ep_dequeue,
++
++ .set_halt = pxa2xx_ep_set_halt,
++ .fifo_status = pxa2xx_ep_fifo_status,
++ .fifo_flush = pxa2xx_ep_fifo_flush,
++};
++
++
++/* ---------------------------------------------------------------------------
++ * device-scoped parts of the api to the usb controller hardware
++ * ---------------------------------------------------------------------------
++ */
++
++static int pxa2xx_udc_get_frame(struct usb_gadget *_gadget)
++{
++ return ((UFNRH & 0x07) << 8) | (UFNRL & 0xff);
++}
++
++static int pxa2xx_udc_wakeup(struct usb_gadget *_gadget)
++{
++ /* host may not have enabled remote wakeup */
++ if ((UDCCS0 & UDCCS0_DRWF) == 0)
++ return -EHOSTUNREACH;
++ udc_set_mask_UDCCR(UDCCR_RSM);
++ return 0;
++}
++
++static const struct usb_gadget_ops pxa2xx_udc_ops = {
++ .get_frame = pxa2xx_udc_get_frame,
++ .wakeup = pxa2xx_udc_wakeup,
++ // current versions must always be self-powered
++};
++
++
++/*-------------------------------------------------------------------------*/
++
++#ifdef UDC_PROC_FILE
++
++static const char proc_node_name [] = "driver/udc";
++
++static int
++udc_proc_read(char *page, char **start, off_t off, int count,
++ int *eof, void *_dev)
++{
++ char *buf = page;
++ struct pxa2xx_udc *dev = _dev;
++ char *next = buf;
++ unsigned size = count;
++ unsigned long flags;
++ int i, t;
++ u32 tmp;
++
++ if (off != 0)
++ return 0;
++
++ local_irq_save(flags);
++
++ /* basic device status */
++ t = snprintf(next, size, DRIVER_DESC "\n"
++ "%s version: %s\nGadget driver: %s\nHost %s\n\n",
++ driver_name, DRIVER_VERSION SIZE_STR DMASTR,
++ dev->driver ? dev->driver->driver.name : "(none)",
++ is_usb_connected() ? "full speed" : "disconnected");
++ size -= t;
++ next += t;
++
++ /* registers for device and ep0 */
++ t = snprintf(next, size,
++ "uicr %02X.%02X, usir %02X.%02x, ufnr %02X.%02X\n",
++ UICR1, UICR0, USIR1, USIR0, UFNRH, UFNRL);
++ size -= t;
++ next += t;
++
++ tmp = UDCCR;
++ t = snprintf(next, size,
++ "udccr %02X =%s%s%s%s%s%s%s%s\n", tmp,
++ (tmp & UDCCR_REM) ? " rem" : "",
++ (tmp & UDCCR_RSTIR) ? " rstir" : "",
++ (tmp & UDCCR_SRM) ? " srm" : "",
++ (tmp & UDCCR_SUSIR) ? " susir" : "",
++ (tmp & UDCCR_RESIR) ? " resir" : "",
++ (tmp & UDCCR_RSM) ? " rsm" : "",
++ (tmp & UDCCR_UDA) ? " uda" : "",
++ (tmp & UDCCR_UDE) ? " ude" : "");
++ size -= t;
++ next += t;
++
++ tmp = UDCCS0;
++ t = snprintf(next, size,
++ "udccs0 %02X =%s%s%s%s%s%s%s%s\n", tmp,
++ (tmp & UDCCS0_SA) ? " sa" : "",
++ (tmp & UDCCS0_RNE) ? " rne" : "",
++ (tmp & UDCCS0_FST) ? " fst" : "",
++ (tmp & UDCCS0_SST) ? " sst" : "",
++ (tmp & UDCCS0_DRWF) ? " dwrf" : "",
++ (tmp & UDCCS0_FTF) ? " ftf" : "",
++ (tmp & UDCCS0_IPR) ? " ipr" : "",
++ (tmp & UDCCS0_OPR) ? " opr" : "");
++ size -= t;
++ next += t;
++
++ if (dev->has_cfr) {
++ tmp = UDCCFR;
++ t = snprintf(next, size,
++ "udccfr %02X =%s%s\n", tmp,
++ (tmp & UDCCFR_AREN) ? " aren" : "",
++ (tmp & UDCCFR_ACM) ? " acm" : "");
++ size -= t;
++ next += t;
++ }
++
++ if (!is_usb_connected() || !dev->driver)
++ goto done;
++
++ t = snprintf(next, size, "ep0 IN %lu/%lu, OUT %lu/%lu\nirqs %lu\n\n",
++ dev->stats.write.bytes, dev->stats.write.ops,
++ dev->stats.read.bytes, dev->stats.read.ops,
++ dev->stats.irqs);
++ size -= t;
++ next += t;
++
++ /* dump endpoint queues */
++ for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
++ struct pxa2xx_ep *ep = &dev->ep [i];
++ struct pxa2xx_request *req;
++ int t;
++
++ if (i != 0) {
++ const struct usb_endpoint_descriptor *d;
++
++ d = ep->desc;
++ if (!d)
++ continue;
++ tmp = *dev->ep [i].reg_udccs;
++ t = snprintf(next, size,
++ "%s max %d %s udccs %02x irqs %lu/%lu\n",
++ ep->ep.name, le16_to_cpu (d->wMaxPacketSize),
++ (ep->dma >= 0) ? "dma" : "pio", tmp,
++ ep->pio_irqs, ep->dma_irqs);
++ /* TODO translate all five groups of udccs bits! */
++
++ } else /* ep0 should only have one transfer queued */
++ t = snprintf(next, size, "ep0 max 16 pio irqs %lu\n",
++ ep->pio_irqs);
++ if (t <= 0 || t > size)
++ goto done;
++ size -= t;
++ next += t;
++
++ if (list_empty(&ep->queue)) {
++ t = snprintf(next, size, "\t(nothing queued)\n");
++ if (t <= 0 || t > size)
++ goto done;
++ size -= t;
++ next += t;
++ continue;
++ }
++ list_for_each_entry(req, &ep->queue, queue) {
++#ifdef USE_DMA
++ if (ep->dma >= 0 && req->queue.prev == &ep->queue)
++ t = snprintf(next, size,
++ "\treq %p len %d/%d "
++ "buf %p (dma%d dcmd %08x)\n",
++ &req->req, req->req.actual,
++ req->req.length, req->req.buf,
++ ep->dma, DCMD(ep->dma)
++ // low 13 bits == bytes-to-go
++ );
++ else
++#endif
++ t = snprintf(next, size,
++ "\treq %p len %d/%d buf %p\n",
++ &req->req, req->req.actual,
++ req->req.length, req->req.buf);
++ if (t <= 0 || t > size)
++ goto done;
++ size -= t;
++ next += t;
++ }
++ }
++
++done:
++ local_irq_restore(flags);
++ *eof = 1;
++ return count - size;
++}
++
++#define create_proc_files() \
++ create_proc_read_entry(proc_node_name, 0, NULL, udc_proc_read, dev)
++#define remove_proc_files() \
++ remove_proc_entry(proc_node_name, NULL)
++
++#else /* !UDC_PROC_FILE */
++#define create_proc_files() do {} while (0)
++#define remove_proc_files() do {} while (0)
++
++#endif /* UDC_PROC_FILE */
++
++/*-------------------------------------------------------------------------*/
++
++/*
++ * udc_disable - disable USB device controller
++ */
++static void udc_disable(struct pxa2xx_udc *dev)
++{
++ /* block all irqs */
++ udc_set_mask_UDCCR(UDCCR_SRM|UDCCR_REM);
++ UICR0 = UICR1 = 0xff;
++ UFNRH = UFNRH_SIM;
++
++ /* if hardware supports it, disconnect from usb */
++ make_usb_disappear();
++
++ udc_clear_mask_UDCCR(UDCCR_UDE);
++
++#ifdef CONFIG_ARCH_PXA
++ /* Disable clock for USB device */
++ CKEN &= ~CKEN11_USB;
++#endif
++
++ ep0_idle (dev);
++ dev->gadget.speed = USB_SPEED_UNKNOWN;
++ LED_CONNECTED_OFF;
++}
++
++
++/*
++ * udc_reinit - initialize software state
++ */
++static void udc_reinit(struct pxa2xx_udc *dev)
++{
++ u32 i;
++
++ /* device/ep0 records init */
++ INIT_LIST_HEAD (&dev->gadget.ep_list);
++ INIT_LIST_HEAD (&dev->gadget.ep0->ep_list);
++ dev->ep0state = EP0_IDLE;
++
++ /* basic endpoint records init */
++ for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
++ struct pxa2xx_ep *ep = &dev->ep[i];
++
++ if (i != 0)
++ list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list);
++
++ ep->desc = 0;
++ ep->stopped = 0;
++ INIT_LIST_HEAD (&ep->queue);
++ ep->pio_irqs = ep->dma_irqs = 0;
++ }
++
++ /* the rest was statically initialized, and is read-only */
++}
++
++/* until it's enabled, this UDC should be completely invisible
++ * to any USB host.
++ */
++static void udc_enable (struct pxa2xx_udc *dev)
++{
++ udc_clear_mask_UDCCR(UDCCR_UDE);
++
++#ifdef CONFIG_ARCH_PXA
++ /* Enable clock for USB device */
++ CKEN |= CKEN11_USB;
++#endif
++
++ /* try to clear these bits before we enable the udc */
++ udc_ack_int_UDCCR(UDCCR_SUSIR|/*UDCCR_RSTIR|*/UDCCR_RESIR);
++
++ ep0_idle(dev);
++ dev->gadget.speed = USB_SPEED_UNKNOWN;
++ dev->stats.irqs = 0;
++
++ /*
++ * sequence taken from chapter 12.5.10, PXA250 AppProcDevManual:
++ * - enable UDC
++ * - if RESET is already in progress, ack interrupt
++ * - unmask reset interrupt
++ */
++ udc_set_mask_UDCCR(UDCCR_UDE);
++ if (!(UDCCR & UDCCR_UDA))
++ udc_ack_int_UDCCR(UDCCR_RSTIR);
++
++ if (dev->has_cfr /* UDC_RES2 is defined */) {
++ /* pxa255 (a0+) can avoid a set_config race that could
++ * prevent gadget drivers from configuring correctly
++ */
++ UDCCFR = UDCCFR_ACM;
++ } else {
++ /* "USB test mode" for pxa250 errata 40-42 (stepping a0, a1)
++ * which could result in missing packets and interrupts.
++ * supposedly one bit per endpoint, controlling whether it
++ * double buffers or not; ACM/AREN bits fit into the holes.
++ * zero bits (like USIR0_IRx) disable double buffering.
++ */
++ UDC_RES1 = 0x00;
++ UDC_RES2 = 0x00;
++ }
++
++#ifdef DISABLE_TEST_MODE
++ /* "test mode" seems to have become the default in later chip
++ * revs, preventing double buffering (and invalidating docs).
++ * this EXPERIMENT enables it for bulk endpoints by tweaking
++ * undefined/reserved register bits (that other drivers clear).
++ * Belcarra code comments noted this usage.
++ */
++ if (fifo_mode & 1) { /* IN endpoints */
++ UDC_RES1 |= USIR0_IR1|USIR0_IR6;
++ UDC_RES2 |= USIR1_IR11;
++ }
++ if (fifo_mode & 2) { /* OUT endpoints */
++ UDC_RES1 |= USIR0_IR2|USIR0_IR7;
++ UDC_RES2 |= USIR1_IR12;
++ }
++#endif
++
++ /* caller must be able to sleep in order to cope
++ * with startup transients.
++ */
++ schedule_timeout(HZ/10);
++
++ /* enable suspend/resume and reset irqs */
++ udc_clear_mask_UDCCR(UDCCR_SRM | UDCCR_REM);
++
++ /* enable ep0 irqs */
++ UICR0 &= ~UICR0_IM0;
++
++ /* if hardware supports it, connect to usb and wait for host */
++ let_usb_appear();
++}
++
++
++/* when a driver is successfully registered, it will receive
++ * control requests including set_configuration(), which enables
++ * non-control requests. then usb traffic follows until a
++ * disconnect is reported. then a host may connect again, or
++ * the driver might get unbound.
++ */
++int usb_gadget_register_driver(struct usb_gadget_driver *driver)
++{
++ struct pxa2xx_udc *dev = the_controller;
++ int retval;
++
++ if (!driver
++ || driver->speed != USB_SPEED_FULL
++ || !driver->bind
++ || !driver->unbind
++ || !driver->disconnect
++ || !driver->setup)
++ return -EINVAL;
++ if (!dev)
++ return -ENODEV;
++ if (dev->driver)
++ return -EBUSY;
++
++ /* first hook up the driver ... */
++ dev->driver = driver;
++
++ retval = driver->bind(&dev->gadget);
++ if (retval) {
++ DMSG("bind to driver %s --> error %d\n",
++ driver->driver.name, retval);
++ dev->driver = 0;
++ return retval;
++ }
++
++ /* ... then enable host detection and ep0; and we're ready
++ * for set_configuration as well as eventual disconnect.
++ * NOTE: this shouldn't power up until later.
++ */
++ DMSG("registered gadget driver '%s'\n", driver->driver.name);
++ udc_enable(dev);
++ dump_state(dev);
++ return 0;
++}
++EXPORT_SYMBOL(usb_gadget_register_driver);
++
++static void
++stop_activity(struct pxa2xx_udc *dev, struct usb_gadget_driver *driver)
++{
++ int i;
++
++ /* don't disconnect drivers more than once */
++ if (dev->gadget.speed == USB_SPEED_UNKNOWN)
++ driver = 0;
++ dev->gadget.speed = USB_SPEED_UNKNOWN;
++
++ /* prevent new request submissions, kill any outstanding requests */
++ for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
++ struct pxa2xx_ep *ep = &dev->ep[i];
++
++ ep->stopped = 1;
++ nuke(ep, -ESHUTDOWN);
++ }
++ del_timer_sync(&dev->timer);
++
++ /* report disconnect; the driver is already quiesced */
++ LED_CONNECTED_OFF;
++ if (driver)
++ driver->disconnect(&dev->gadget);
++
++ /* re-init driver-visible data structures */
++ udc_reinit(dev);
++}
++
++int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
++{
++ struct pxa2xx_udc *dev = the_controller;
++
++ if (!dev)
++ return -ENODEV;
++ if (!driver || driver != dev->driver)
++ return -EINVAL;
++
++ local_irq_disable();
++ udc_disable(dev);
++ stop_activity(dev, driver);
++ local_irq_enable();
++
++ driver->unbind(&dev->gadget);
++ dev->driver = 0;
++
++ DMSG("unregistered gadget driver '%s'\n", driver->driver.name);
++ dump_state(dev);
++ return 0;
++}
++EXPORT_SYMBOL(usb_gadget_unregister_driver);
++
++
++/*-------------------------------------------------------------------------*/
++
++#ifdef CONFIG_ARCH_LUBBOCK
++#ifdef LUBBOCK_USB_DISC_IRQ
++
++/* Lubbock can report connect or disconnect irqs. Likely more hardware
++ * could support it as a timer callback.
++ *
++ * FIXME for better power management, keep the hardware powered down
++ * until a host is powering the link. means scheduling work later
++ * in some task that can udc_enable().
++ */
++
++#define enable_disconnect_irq() \
++ if (machine_is_lubbock()) { enable_irq(LUBBOCK_USB_DISC_IRQ); }
++#define disable_disconnect_irq() \
++ if (machine_is_lubbock()) { disable_irq(LUBBOCK_USB_DISC_IRQ); }
++
++static irqreturn_t
++usb_connection_irq(int irq, void *_dev, struct pt_regs *r)
++{
++ struct pxa2xx_udc *dev = _dev;
++
++ dev->stats.irqs++;
++ HEX_DISPLAY(dev->stats.irqs);
++
++ if (!is_usb_connected()) {
++ LED_CONNECTED_OFF;
++ disable_disconnect_irq();
++ /* report disconnect just once */
++ if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
++ DMSG("disconnect %s\n",
++ dev->driver ? dev->driver->driver.name : 0);
++ stop_activity(dev, dev->driver);
++
++ // udc_disable (dev);
++ // no more udc irqs
++ // maybe "ACTION=disconnect /sbin/hotplug gadget".
++ }
++ } else if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
++ LED_CONNECTED_ON;
++
++ DMSG("?? connect irq ??\n");
++
++ // if there's no driver bound, ignore; else
++ // udc_enable (dev);
++ // UDC irqs drive the rest.
++ // maybe "ACTION=connect /sbin/hotplug gadget".
++ }
++ return IRQ_HANDLED;
++}
++
++#endif
++#endif
++
++#ifndef enable_disconnect_irq
++#warning USB disconnect() is not yet reported.
++#define enable_disconnect_irq() do {} while (0)
++#define disable_disconnect_irq() do {} while (0)
++#endif
++
++
++/*-------------------------------------------------------------------------*/
++
++static inline void clear_ep_state (struct pxa2xx_udc *dev)
++{
++ unsigned i;
++
++ /* hardware SET_{CONFIGURATION,INTERFACE} automagic resets endpoint
++ * fifos, and pending transactions mustn't be continued in any case.
++ */
++ for (i = 1; i < PXA_UDC_NUM_ENDPOINTS; i++)
++ nuke(&dev->ep[i], -ECONNABORTED);
++}
++
++static void udc_watchdog(unsigned long _dev)
++{
++ struct pxa2xx_udc *dev = (void *)_dev;
++
++ local_irq_disable();
++ if (dev->ep0state == EP0_STALL
++ && (UDCCS0 & UDCCS0_FST) == 0
++ && (UDCCS0 & UDCCS0_SST) == 0) {
++ UDCCS0 = UDCCS0_FST|UDCCS0_FTF;
++ DBG(DBG_VERBOSE, "ep0 re-stall\n");
++ start_watchdog(dev);
++ }
++ local_irq_enable();
++}
++
++static void handle_ep0 (struct pxa2xx_udc *dev)
++{
++ u32 udccs0 = UDCCS0;
++ struct pxa2xx_ep *ep = &dev->ep [0];
++ struct pxa2xx_request *req;
++ union {
++ struct usb_ctrlrequest r;
++ u8 raw [8];
++ u32 word [2];
++ } u;
++
++ if (list_empty(&ep->queue))
++ req = 0;
++ else
++ req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
++
++ /* clear stall status */
++ if (udccs0 & UDCCS0_SST) {
++ nuke(ep, -EPIPE);
++ UDCCS0 = UDCCS0_SST;
++ del_timer(&dev->timer);
++ ep0_idle(dev);
++ }
++
++ /* previous request unfinished? non-error iff back-to-back ... */
++ if ((udccs0 & UDCCS0_SA) != 0 && dev->ep0state != EP0_IDLE) {
++ nuke(ep, 0);
++ del_timer(&dev->timer);
++ ep0_idle(dev);
++ }
++
++ switch (dev->ep0state) {
++ case EP0_IDLE:
++ /* late-breaking status? */
++ udccs0 = UDCCS0;
++
++ /* start control request? */
++ if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE))
++ == (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE))) {
++ int i;
++
++ nuke (ep, -EPROTO);
++
++ /* read SETUP packet */
++ for (i = 0; i < 8; i++) {
++ if (unlikely(!(UDCCS0 & UDCCS0_RNE))) {
++bad_setup:
++ DMSG("SETUP %d!\n", i);
++ goto stall;
++ }
++ u.raw [i] = (u8) UDDR0;
++ }
++ if (unlikely((UDCCS0 & UDCCS0_RNE) != 0))
++ goto bad_setup;
++
++got_setup:
++ le16_to_cpus (&u.r.wValue);
++ le16_to_cpus (&u.r.wIndex);
++ le16_to_cpus (&u.r.wLength);
++
++ LED_EP0_ON;
++ DBG(DBG_VERBOSE, "SETUP %02x.%02x v%04x i%04x l%04x\n",
++ u.r.bRequestType, u.r.bRequest,
++ u.r.wValue, u.r.wIndex, u.r.wLength);
++
++ /* cope with automagic for some standard requests. */
++ dev->req_std = (u.r.bRequestType & USB_TYPE_MASK)
++ == USB_TYPE_STANDARD;
++ dev->req_config = 0;
++ dev->req_pending = 1;
++ switch (u.r.bRequest) {
++ /* hardware restricts gadget drivers here! */
++ case USB_REQ_SET_CONFIGURATION:
++ if (u.r.bRequestType == USB_RECIP_DEVICE) {
++ /* reflect hardware's automagic
++ * up to the gadget driver.
++ */
++config_change:
++ dev->req_config = 1;
++ clear_ep_state(dev);
++ /* if !has_cfr, there's no synch
++ * else use AREN (later) not SA|OPR
++ * USIR0_IR0 acts edge sensitive
++ */
++ }
++ break;
++ /* ... and here, even more ... */
++ case USB_REQ_SET_INTERFACE:
++ if (u.r.bRequestType == USB_RECIP_INTERFACE) {
++ /* udc hardware is broken by design:
++ * - altsetting may only be zero;
++ * - hw resets all interfaces' eps;
++ * - ep reset doesn't include halt(?).
++ */
++ DMSG("broken set_interface (%d/%d)\n",
++ u.r.wIndex, u.r.wValue);
++ goto config_change;
++ }
++ break;
++ /* hardware was supposed to hide this */
++ case USB_REQ_SET_ADDRESS:
++ if (u.r.bRequestType == USB_RECIP_DEVICE) {
++ ep0start(dev, 0, "address");
++ return;
++ }
++ break;
++ }
++
++ if (u.r.bRequestType & USB_DIR_IN)
++ dev->ep0state = EP0_IN_DATA_PHASE;
++ else
++ dev->ep0state = EP0_OUT_DATA_PHASE;
++
++ i = dev->driver->setup(&dev->gadget, &u.r);
++ if (i < 0) {
++ /* hardware automagic preventing STALL... */
++ if (dev->req_config) {
++ /* hardware sometimes neglects to tell
++ * tell us about config change events,
++ * so later ones may fail...
++ */
++ WARN("config change %02x fail %d?\n",
++ u.r.bRequest, i);
++ return;
++ /* TODO experiment: if has_cfr,
++ * hardware didn't ACK; maybe we
++ * could actually STALL!
++ */
++ }
++ DBG(DBG_VERBOSE, "protocol STALL, "
++ "%02x err %d\n", UDCCS0, i);
++stall:
++ /* the watchdog timer helps deal with cases
++ * where udc seems to clear FST wrongly, and
++ * then NAKs instead of STALLing.
++ */
++ ep0start(dev, UDCCS0_FST|UDCCS0_FTF, "stall");
++ start_watchdog(dev);
++ dev->ep0state = EP0_STALL;
++ LED_EP0_OFF;
++
++ /* deferred i/o == no response yet */
++ } else if (dev->req_pending) {
++ if (likely(dev->ep0state == EP0_IN_DATA_PHASE
++ || dev->req_std || u.r.wLength))
++ ep0start(dev, 0, "defer");
++ else
++ ep0start(dev, UDCCS0_IPR, "defer/IPR");
++ }
++
++ /* expect at least one data or status stage irq */
++ return;
++
++ } else if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA))
++ == (UDCCS0_OPR|UDCCS0_SA))) {
++ unsigned i;
++
++ /* pxa210/250 erratum 131 for B0/B1 says RNE lies.
++ * still observed on a pxa255 a0.
++ */
++ DBG(DBG_VERBOSE, "e131\n");
++ nuke(ep, -EPROTO);
++
++ /* read SETUP data, but don't trust it too much */
++ for (i = 0; i < 8; i++)
++ u.raw [i] = (u8) UDDR0;
++ if ((u.r.bRequestType & USB_RECIP_MASK)
++ > USB_RECIP_OTHER)
++ goto stall;
++ if (u.word [0] == 0 && u.word [1] == 0)
++ goto stall;
++ goto got_setup;
++ } else {
++ /* some random early IRQ:
++ * - we acked FST
++ * - IPR cleared
++ * - OPR got set, without SA (likely status stage)
++ */
++ UDCCS0 = udccs0 & (UDCCS0_SA|UDCCS0_OPR);
++ }
++ break;
++ case EP0_IN_DATA_PHASE: /* GET_DESCRIPTOR etc */
++ if (udccs0 & UDCCS0_OPR) {
++ UDCCS0 = UDCCS0_OPR|UDCCS0_FTF;
++ DBG(DBG_VERBOSE, "ep0in premature status\n");
++ if (req)
++ done(ep, req, 0);
++ ep0_idle(dev);
++ } else /* irq was IPR clearing */ {
++ if (req) {
++ /* this IN packet might finish the request */
++ (void) write_ep0_fifo(ep, req);
++ } /* else IN token before response was written */
++ }
++ break;
++ case EP0_OUT_DATA_PHASE: /* SET_DESCRIPTOR etc */
++ if (udccs0 & UDCCS0_OPR) {
++ if (req) {
++ /* this OUT packet might finish the request */
++ if (read_ep0_fifo(ep, req))
++ done(ep, req, 0);
++ /* else more OUT packets expected */
++ } /* else OUT token before read was issued */
++ } else /* irq was IPR clearing */ {
++ DBG(DBG_VERBOSE, "ep0out premature status\n");
++ if (req)
++ done(ep, req, 0);
++ ep0_idle(dev);
++ }
++ break;
++ case EP0_END_XFER:
++ if (req)
++ done(ep, req, 0);
++ /* ack control-IN status (maybe in-zlp was skipped)
++ * also appears after some config change events.
++ */
++ if (udccs0 & UDCCS0_OPR)
++ UDCCS0 = UDCCS0_OPR;
++ ep0_idle(dev);
++ break;
++ case EP0_STALL:
++ UDCCS0 = UDCCS0_FST;
++ break;
++ }
++ USIR0 = USIR0_IR0;
++}
++
++static void handle_ep(struct pxa2xx_ep *ep)
++{
++ struct pxa2xx_request *req;
++ int is_in = ep->bEndpointAddress & USB_DIR_IN;
++ int completed;
++ u32 udccs, tmp;
++
++ do {
++ completed = 0;
++ if (likely (!list_empty(&ep->queue)))
++ req = list_entry(ep->queue.next,
++ struct pxa2xx_request, queue);
++ else
++ req = 0;
++
++ // TODO check FST handling
++
++ udccs = *ep->reg_udccs;
++ if (unlikely(is_in)) { /* irq from TPC, SST, or (ISO) TUR */
++ tmp = UDCCS_BI_TUR;
++ if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK))
++ tmp |= UDCCS_BI_SST;
++ tmp &= udccs;
++ if (likely (tmp))
++ *ep->reg_udccs = tmp;
++ if (req && likely ((udccs & UDCCS_BI_TFS) != 0))
++ completed = write_fifo(ep, req);
++
++ } else { /* irq from RPC (or for ISO, ROF) */
++ if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK))
++ tmp = UDCCS_BO_SST | UDCCS_BO_DME;
++ else
++ tmp = UDCCS_IO_ROF | UDCCS_IO_DME;
++ tmp &= udccs;
++ if (likely(tmp))
++ *ep->reg_udccs = tmp;
++
++ /* fifos can hold packets, ready for reading... */
++ if (likely(req != 0)) {
++#ifdef USE_OUT_DMA
++// TODO didn't yet debug out-dma. this approach assumes
++// the worst about short packets and RPC; it might be better.
++
++ if (likely(ep->dma >= 0)) {
++ if (!(udccs & UDCCS_BO_RSP)) {
++ *ep->reg_udccs = UDCCS_BO_RPC;
++ ep->dma_irqs++;
++ return;
++ }
++ }
++#endif
++ completed = read_fifo(ep, req);
++ } else
++ pio_irq_disable (ep->bEndpointAddress);
++ }
++ ep->pio_irqs++;
++ } while (completed);
++}
++
++/*
++ * pxa2xx_udc_irq - interrupt handler
++ *
++ * avoid delays in ep0 processing. the control handshaking isn't always
++ * under software control (pxa250c0 and the pxa255 are better), and delays
++ * could cause usb protocol errors.
++ */
++static irqreturn_t
++pxa2xx_udc_irq(int irq, void *_dev, struct pt_regs *r)
++{
++ struct pxa2xx_udc *dev = _dev;
++ int handled;
++
++ dev->stats.irqs++;
++ HEX_DISPLAY(dev->stats.irqs);
++ do {
++ u32 udccr = UDCCR;
++
++ handled = 0;
++
++ /* SUSpend Interrupt Request */
++ if (unlikely(udccr & UDCCR_SUSIR)) {
++ udc_ack_int_UDCCR(UDCCR_SUSIR);
++ handled = 1;
++ DBG(DBG_VERBOSE, "USB suspend%s\n", is_usb_connected()
++ ? "" : "+disconnect");
++
++ if (!is_usb_connected())
++ stop_activity(dev, dev->driver);
++ else if (dev->gadget.speed != USB_SPEED_UNKNOWN
++ && dev->driver
++ && dev->driver->suspend)
++ dev->driver->suspend(&dev->gadget);
++ ep0_idle (dev);
++ }
++
++ /* RESume Interrupt Request */
++ if (unlikely(udccr & UDCCR_RESIR)) {
++ udc_ack_int_UDCCR(UDCCR_RESIR);
++ handled = 1;
++ DBG(DBG_VERBOSE, "USB resume\n");
++
++ if (dev->gadget.speed != USB_SPEED_UNKNOWN
++ && dev->driver
++ && dev->driver->resume
++ && is_usb_connected())
++ dev->driver->resume(&dev->gadget);
++ }
++
++ /* ReSeT Interrupt Request - USB reset */
++ if (unlikely(udccr & UDCCR_RSTIR)) {
++ udc_ack_int_UDCCR(UDCCR_RSTIR);
++ handled = 1;
++
++ if ((UDCCR & UDCCR_UDA) == 0) {
++ DBG(DBG_VERBOSE, "USB reset start\n");
++ if (dev->gadget.speed != USB_SPEED_UNKNOWN)
++ disable_disconnect_irq();
++
++ /* reset driver and endpoints,
++ * in case that's not yet done
++ */
++ stop_activity (dev, dev->driver);
++
++ } else {
++ INFO("USB reset\n");
++ dev->gadget.speed = USB_SPEED_FULL;
++ LED_CONNECTED_ON;
++ memset(&dev->stats, 0, sizeof dev->stats);
++ /* driver and endpoints are still reset */
++ enable_disconnect_irq();
++ }
++
++ } else {
++ u32 usir0 = USIR0 & ~UICR0;
++ u32 usir1 = USIR1 & ~UICR1;
++ int i;
++
++ if (unlikely (!usir0 && !usir1))
++ continue;
++
++ DBG(DBG_VERY_NOISY, "irq %02x.%02x\n", usir1, usir0);
++
++ /* control traffic */
++ if (usir0 & USIR0_IR0) {
++ dev->ep[0].pio_irqs++;
++ handle_ep0(dev);
++ handled = 1;
++ }
++
++ /* endpoint data transfers */
++ for (i = 0; i < 8; i++) {
++ u32 tmp = 1 << i;
++
++ if (i && (usir0 & tmp)) {
++ handle_ep(&dev->ep[i]);
++ USIR0 |= tmp;
++ handled = 1;
++ }
++ if (usir1 & tmp) {
++ handle_ep(&dev->ep[i+8]);
++ USIR1 |= tmp;
++ handled = 1;
++ }
++ }
++ }
++
++ /* we could also ask for 1 msec SOF (SIR) interrupts */
++
++ } while (handled);
++ return IRQ_HANDLED;
++}
++
++/*-------------------------------------------------------------------------*/
++
++/*
++ * cleanup - free resources allocated during init
++ */
++static void /*__exit and */ __init cleanup(void)
++{
++ struct pxa2xx_udc *dev = the_controller;
++
++ if (!dev)
++ return;
++
++ udc_disable(dev);
++ remove_proc_files();
++ usb_gadget_unregister_driver(dev->driver);
++ if (dev->got_irq) {
++ free_irq(IRQ_USB, dev);
++ dev->got_irq = 0;
++ }
++#ifdef LUBBOCK_USB_DISC_IRQ
++ if (dev->got_disc) {
++ free_irq(LUBBOCK_USB_DISC_IRQ, dev);
++ dev->got_disc = 0;
++ }
++#endif
++ the_controller = 0;
++ release_mem_region(REGISTER_FIRST, REGISTER_LENGTH);
++}
++module_exit (cleanup);
++
++/* this uses load-time allocation and initialization (instead of
++ * doing it at run-time) to save code, eliminate fault paths, and
++ * be more obviously correct.
++ */
++static struct pxa2xx_udc memory = {
++ .gadget = {
++ .ops = &pxa2xx_udc_ops,
++ .ep0 = &memory.ep[0].ep,
++ .name = driver_name,
++ .dev = {
++ .bus_id = "gadget",
++ },
++ },
++
++ /* control endpoint */
++ .ep[0] = {
++ .ep = {
++ .name = ep0name,
++ .ops = &pxa2xx_ep_ops,
++ .maxpacket = EP0_FIFO_SIZE,
++ },
++ .dev = &memory,
++ .reg_udccs = &UDCCS0,
++ .reg_uddr = &UDDR0,
++ },
++
++ /* first group of endpoints */
++ .ep[1] = {
++ .ep = {
++ .name = "ep1in-bulk",
++ .ops = &pxa2xx_ep_ops,
++ .maxpacket = BULK_FIFO_SIZE,
++ },
++ .dev = &memory,
++ .fifo_size = BULK_FIFO_SIZE,
++ .bEndpointAddress = USB_DIR_IN | 1,
++ .bmAttributes = USB_ENDPOINT_XFER_BULK,
++ .reg_udccs = &UDCCS1,
++ .reg_uddr = &UDDR1,
++ drcmr (25)
++ },
++ .ep[2] = {
++ .ep = {
++ .name = "ep2out-bulk",
++ .ops = &pxa2xx_ep_ops,
++ .maxpacket = BULK_FIFO_SIZE,
++ },
++ .dev = &memory,
++ .fifo_size = BULK_FIFO_SIZE,
++ .bEndpointAddress = 2,
++ .bmAttributes = USB_ENDPOINT_XFER_BULK,
++ .reg_udccs = &UDCCS2,
++ .reg_ubcr = &UBCR2,
++ .reg_uddr = &UDDR2,
++ drcmr (26)
++ },
++#ifndef CONFIG_USB_PXA2XX_SMALL
++ .ep[3] = {
++ .ep = {
++ .name = "ep3in-iso",
++ .ops = &pxa2xx_ep_ops,
++ .maxpacket = ISO_FIFO_SIZE,
++ },
++ .dev = &memory,
++ .fifo_size = ISO_FIFO_SIZE,
++ .bEndpointAddress = USB_DIR_IN | 3,
++ .bmAttributes = USB_ENDPOINT_XFER_ISOC,
++ .reg_udccs = &UDCCS3,
++ .reg_uddr = &UDDR3,
++ drcmr (27)
++ },
++ .ep[4] = {
++ .ep = {
++ .name = "ep4out-iso",
++ .ops = &pxa2xx_ep_ops,
++ .maxpacket = ISO_FIFO_SIZE,
++ },
++ .dev = &memory,
++ .fifo_size = ISO_FIFO_SIZE,
++ .bEndpointAddress = 4,
++ .bmAttributes = USB_ENDPOINT_XFER_ISOC,
++ .reg_udccs = &UDCCS4,
++ .reg_ubcr = &UBCR4,
++ .reg_uddr = &UDDR4,
++ drcmr (28)
++ },
++ .ep[5] = {
++ .ep = {
++ .name = "ep5in-int",
++ .ops = &pxa2xx_ep_ops,
++ .maxpacket = INT_FIFO_SIZE,
++ },
++ .dev = &memory,
++ .fifo_size = INT_FIFO_SIZE,
++ .bEndpointAddress = USB_DIR_IN | 5,
++ .bmAttributes = USB_ENDPOINT_XFER_INT,
++ .reg_udccs = &UDCCS5,
++ .reg_uddr = &UDDR5,
++ },
++
++ /* second group of endpoints */
++ .ep[6] = {
++ .ep = {
++ .name = "ep6in-bulk",
++ .ops = &pxa2xx_ep_ops,
++ .maxpacket = BULK_FIFO_SIZE,
++ },
++ .dev = &memory,
++ .fifo_size = BULK_FIFO_SIZE,
++ .bEndpointAddress = USB_DIR_IN | 6,
++ .bmAttributes = USB_ENDPOINT_XFER_BULK,
++ .reg_udccs = &UDCCS6,
++ .reg_uddr = &UDDR6,
++ drcmr (30)
++ },
++ .ep[7] = {
++ .ep = {
++ .name = "ep7out-bulk",
++ .ops = &pxa2xx_ep_ops,
++ .maxpacket = BULK_FIFO_SIZE,
++ },
++ .dev = &memory,
++ .fifo_size = BULK_FIFO_SIZE,
++ .bEndpointAddress = 7,
++ .bmAttributes = USB_ENDPOINT_XFER_BULK,
++ .reg_udccs = &UDCCS7,
++ .reg_ubcr = &UBCR7,
++ .reg_uddr = &UDDR7,
++ drcmr (31)
++ },
++ .ep[8] = {
++ .ep = {
++ .name = "ep8in-iso",
++ .ops = &pxa2xx_ep_ops,
++ .maxpacket = ISO_FIFO_SIZE,
++ },
++ .dev = &memory,
++ .fifo_size = ISO_FIFO_SIZE,
++ .bEndpointAddress = USB_DIR_IN | 8,
++ .bmAttributes = USB_ENDPOINT_XFER_ISOC,
++ .reg_udccs = &UDCCS8,
++ .reg_uddr = &UDDR8,
++ drcmr (32)
++ },
++ .ep[9] = {
++ .ep = {
++ .name = "ep9out-iso",
++ .ops = &pxa2xx_ep_ops,
++ .maxpacket = ISO_FIFO_SIZE,
++ },
++ .dev = &memory,
++ .fifo_size = ISO_FIFO_SIZE,
++ .bEndpointAddress = 9,
++ .bmAttributes = USB_ENDPOINT_XFER_ISOC,
++ .reg_udccs = &UDCCS9,
++ .reg_ubcr = &UBCR9,
++ .reg_uddr = &UDDR9,
++ drcmr (33)
++ },
++ .ep[10] = {
++ .ep = {
++ .name = "ep10in-int",
++ .ops = &pxa2xx_ep_ops,
++ .maxpacket = INT_FIFO_SIZE,
++ },
++ .dev = &memory,
++ .fifo_size = INT_FIFO_SIZE,
++ .bEndpointAddress = USB_DIR_IN | 10,
++ .bmAttributes = USB_ENDPOINT_XFER_INT,
++ .reg_udccs = &UDCCS10,
++ .reg_uddr = &UDDR10,
++ },
++
++ /* third group of endpoints */
++ .ep[11] = {
++ .ep = {
++ .name = "ep11in-bulk",
++ .ops = &pxa2xx_ep_ops,
++ .maxpacket = BULK_FIFO_SIZE,
++ },
++ .dev = &memory,
++ .fifo_size = BULK_FIFO_SIZE,
++ .bEndpointAddress = USB_DIR_IN | 11,
++ .bmAttributes = USB_ENDPOINT_XFER_BULK,
++ .reg_udccs = &UDCCS11,
++ .reg_uddr = &UDDR11,
++ drcmr (35)
++ },
++ .ep[12] = {
++ .ep = {
++ .name = "ep12out-bulk",
++ .ops = &pxa2xx_ep_ops,
++ .maxpacket = BULK_FIFO_SIZE,
++ },
++ .dev = &memory,
++ .fifo_size = BULK_FIFO_SIZE,
++ .bEndpointAddress = 12,
++ .bmAttributes = USB_ENDPOINT_XFER_BULK,
++ .reg_udccs = &UDCCS12,
++ .reg_ubcr = &UBCR12,
++ .reg_uddr = &UDDR12,
++ drcmr (36)
++ },
++ .ep[13] = {
++ .ep = {
++ .name = "ep13in-iso",
++ .ops = &pxa2xx_ep_ops,
++ .maxpacket = ISO_FIFO_SIZE,
++ },
++ .dev = &memory,
++ .fifo_size = ISO_FIFO_SIZE,
++ .bEndpointAddress = USB_DIR_IN | 13,
++ .bmAttributes = USB_ENDPOINT_XFER_ISOC,
++ .reg_udccs = &UDCCS13,
++ .reg_uddr = &UDDR13,
++ drcmr (37)
++ },
++ .ep[14] = {
++ .ep = {
++ .name = "ep14out-iso",
++ .ops = &pxa2xx_ep_ops,
++ .maxpacket = ISO_FIFO_SIZE,
++ },
++ .dev = &memory,
++ .fifo_size = ISO_FIFO_SIZE,
++ .bEndpointAddress = 14,
++ .bmAttributes = USB_ENDPOINT_XFER_ISOC,
++ .reg_udccs = &UDCCS14,
++ .reg_ubcr = &UBCR14,
++ .reg_uddr = &UDDR14,
++ drcmr (38)
++ },
++ .ep[15] = {
++ .ep = {
++ .name = "ep15in-int",
++ .ops = &pxa2xx_ep_ops,
++ .maxpacket = INT_FIFO_SIZE,
++ },
++ .dev = &memory,
++ .fifo_size = INT_FIFO_SIZE,
++ .bEndpointAddress = USB_DIR_IN | 15,
++ .bmAttributes = USB_ENDPOINT_XFER_INT,
++ .reg_udccs = &UDCCS15,
++ .reg_uddr = &UDDR15,
++ },
++#endif /* !CONFIG_USB_PXA2XX_SMALL */
++};
++
++#define CP15R0_VENDOR_MASK 0xffffe000
++
++#if defined(CONFIG_ARCH_PXA)
++#define CP15R0_XSCALE_VALUE 0x69052000 /* intel/arm/xscale */
++
++#elif defined(CONFIG_ARCH_IXP425)
++#define CP15R0_XSCALE_VALUE 0x69054000 /* intel/arm/ixp425 */
++
++#endif
++
++#define CP15R0_PROD_MASK 0x000003f0
++#define PXA25x 0x00000100 /* and PXA26x */
++#define PXA210 0x00000120
++
++#define CP15R0_REV_MASK 0x0000000f
++
++#define CP15R0_PRODREV_MASK (CP15R0_PROD_MASK | CP15R0_REV_MASK)
++
++#define PXA255_A0 0x00000106 /* or PXA260_B1 */
++#define PXA250_C0 0x00000105 /* or PXA26x_B0 */
++#define PXA250_B2 0x00000104
++#define PXA250_B1 0x00000103 /* or PXA260_A0 */
++#define PXA250_B0 0x00000102
++#define PXA250_A1 0x00000101
++#define PXA250_A0 0x00000100
++
++#define PXA210_C0 0x00000125
++#define PXA210_B2 0x00000124
++#define PXA210_B1 0x00000123
++#define PXA210_B0 0x00000122
++
++#define IXP425_A0 0x000001c1
++
++/*
++ * init - allocate resources
++ */
++static int __init init(void)
++{
++ struct pxa2xx_udc *dev;
++ int retval, out_dma = 1;
++ u32 chiprev;
++
++ printk(KERN_INFO "%s: version %s\n", driver_name, DRIVER_VERSION);
++
++ /* insist on Intel/ARM/XScale */
++ asm("mrc%? p15, 0, %0, c0, c0" : "=r" (chiprev));
++ if ((chiprev & CP15R0_VENDOR_MASK) != CP15R0_XSCALE_VALUE) {
++ printk(KERN_ERR "%s: not XScale!\n", driver_name);
++ return -ENODEV;
++ }
++
++ /* allocate resources */
++ if (!request_mem_region(REGISTER_FIRST, REGISTER_LENGTH, driver_name))
++ return -EBUSY;
++
++ /* initialize data */
++ dev = &memory;
++
++ init_timer(&dev->timer);
++ dev->timer.function = udc_watchdog;
++ dev->timer.data = (unsigned long) dev;
++
++ /* trigger chiprev-specific logic */
++ switch (chiprev & CP15R0_PRODREV_MASK) {
++#if defined(CONFIG_ARCH_PXA)
++ case PXA255_A0:
++ dev->has_cfr = 1;
++ break;
++ case PXA250_A0:
++ case PXA250_A1:
++ /* A0/A1 "not released"; ep 13, 15 unusable */
++ /* fall through */
++ case PXA250_B2: case PXA210_B2:
++ case PXA250_B1: case PXA210_B1:
++ case PXA250_B0: case PXA210_B0:
++ out_dma = 0;
++ /* fall through */
++ case PXA250_C0: case PXA210_C0:
++ break;
++#elif defined(CONFIG_ARCH_IXP425)
++ case IXP425_A0:
++ out_dma = 0;
++ break;
++#endif
++ default:
++ out_dma = 0;
++ printk(KERN_ERR "%s: unrecognized processor: %08x\n",
++ driver_name, chiprev);
++ return -ENODEV;
++ }
++
++ pr_debug("%s: IRQ %d%s%s%s\n", driver_name, IRQ_USB,
++ dev->has_cfr ? "" : " (!cfr)",
++ out_dma ? "" : " (broken dma-out)",
++ SIZE_STR DMASTR
++ );
++
++#ifdef USE_DMA
++#ifndef USE_OUT_DMA
++ out_dma = 0;
++#endif
++ /* pxa 250 erratum 130 prevents using OUT dma (fixed C0) */
++ if (!out_dma) {
++ DMSG("disabled OUT dma\n");
++ dev->ep[ 2].reg_drcmr = dev->ep[ 4].reg_drcmr = 0;
++ dev->ep[ 7].reg_drcmr = dev->ep[ 9].reg_drcmr = 0;
++ dev->ep[12].reg_drcmr = dev->ep[14].reg_drcmr = 0;
++ }
++#endif
++
++ the_controller = dev;
++ udc_disable(dev);
++ udc_reinit(dev);
++
++ /* irq setup after old hardware state is cleaned up */
++ retval = request_irq(IRQ_USB, pxa2xx_udc_irq,
++ SA_INTERRUPT, driver_name, dev);
++ if (retval != 0) {
++ printk(KERN_ERR "%s: can't get irq %i, err %d\n",
++ driver_name, IRQ_USB, retval);
++ return -EBUSY;
++ }
++ dev->got_irq = 1;
++
++#ifdef LUBBOCK_USB_DISC_IRQ
++ if (machine_is_lubbock()) {
++ disable_irq(LUBBOCK_USB_DISC_IRQ);
++ retval = request_irq(LUBBOCK_USB_DISC_IRQ,
++ usb_connection_irq,
++ SA_INTERRUPT | SA_SAMPLE_RANDOM,
++ driver_name, dev);
++ if (retval != 0) {
++ enable_irq(LUBBOCK_USB_DISC_IRQ);
++ printk(KERN_ERR "%s: can't get irq %i, err %d\n",
++ driver_name, LUBBOCK_USB_DISC_IRQ, retval);
++ cleanup();
++ return retval;
++ }
++ dev->got_disc = 1;
++ }
++#endif
++
++ create_proc_files();
++ return 0;
++}
++module_init (init);
++
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_AUTHOR("Frank Becker, Robert Schwebel, David Brownell");
++MODULE_LICENSE("GPL");
++
+diff -x '*~' -x '.*' -r -N -u /tmp/kernel/drivers/usb/gadget/pxa2xx_udc.h kernel/drivers/usb/gadget/pxa2xx_udc.h
+--- /tmp/kernel/drivers/usb/gadget/pxa2xx_udc.h 1970-01-01 01:00:00.000000000 +0100
++++ kernel/drivers/usb/gadget/pxa2xx_udc.h 2005-04-22 17:53:19.496529422 +0200
+@@ -0,0 +1,528 @@
++/*
++ * linux/drivers/usb/gadget/pxa2xx_udc.h
++ * Intel PXA2xx on-chip full speed USB device controller
++ *
++ * Copyright (C) 2003 Robert Schwebel <r.schwebel@pengutronix.de>, Pengutronix
++ * Copyright (C) 2003 David Brownell
++ * Copyright (C) 2003 Joshua Wise
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#ifndef __LINUX_USB_GADGET_PXA2XX_H
++#define __LINUX_USB_GADGET_PXA2XX_H
++
++#include <linux/types.h>
++
++/*-------------------------------------------------------------------------*/
++
++/* pxa2xx has this (move to include/asm-arm/arch-pxa/pxa-regs.h) */
++#define UFNRH_SIR (1 << 7) /* SOF interrupt request */
++#define UFNRH_SIM (1 << 6) /* SOF interrupt mask */
++#define UFNRH_IPE14 (1 << 5) /* ISO packet error, ep14 */
++#define UFNRH_IPE9 (1 << 4) /* ISO packet error, ep9 */
++#define UFNRH_IPE4 (1 << 3) /* ISO packet error, ep4 */
++
++/* pxa255 has this (move to include/asm-arm/arch-pxa/pxa-regs.h) */
++#define UDCCFR UDC_RES2 /* UDC Control Function Register */
++#define UDCCFR_AREN (1 << 7) /* ACK response enable (now) */
++#define UDCCFR_ACM (1 << 2) /* ACK control mode (wait for AREN) */
++
++/* for address space reservation */
++#define REGISTER_FIRST ((unsigned long)(&UDCCR))
++#define REGISTER_LAST ((unsigned long)(&UDDR14)) /* not UDDR15! */
++#define REGISTER_LENGTH ((REGISTER_LAST - REGISTER_FIRST) + 4)
++
++/*-------------------------------------------------------------------------*/
++
++struct pxa2xx_udc;
++
++struct pxa2xx_ep {
++ struct usb_ep ep;
++ struct pxa2xx_udc *dev;
++
++ const struct usb_endpoint_descriptor *desc;
++ struct list_head queue;
++ unsigned long pio_irqs;
++ unsigned long dma_irqs;
++ short dma;
++
++ unsigned short fifo_size;
++ u8 bEndpointAddress;
++ u8 bmAttributes;
++
++ unsigned stopped : 1;
++ unsigned dma_fixup : 1;
++
++ /* UDCCS = UDC Control/Status for this EP
++ * UBCR = UDC Byte Count Remaining (contents of OUT fifo)
++ * UDDR = UDC Endpoint Data Register (the fifo)
++ * DRCM = DMA Request Channel Map
++ */
++ volatile u32 *reg_udccs;
++ volatile u32 *reg_ubcr;
++ volatile u32 *reg_uddr;
++#ifdef USE_DMA
++ volatile u32 *reg_drcmr;
++#define drcmr(n) .reg_drcmr = & DRCMR ## n ,
++#else
++#define drcmr(n)
++#endif
++};
++
++struct pxa2xx_request {
++ struct usb_request req;
++ struct list_head queue;
++};
++
++enum ep0_state {
++ EP0_IDLE,
++ EP0_IN_DATA_PHASE,
++ EP0_OUT_DATA_PHASE,
++ EP0_END_XFER,
++ EP0_STALL,
++};
++
++#define EP0_FIFO_SIZE ((unsigned)16)
++#define BULK_FIFO_SIZE ((unsigned)64)
++#define ISO_FIFO_SIZE ((unsigned)256)
++#define INT_FIFO_SIZE ((unsigned)8)
++
++struct udc_stats {
++ struct ep0stats {
++ unsigned long ops;
++ unsigned long bytes;
++ } read, write;
++ unsigned long irqs;
++};
++
++#ifdef CONFIG_USB_PXA2XX_SMALL
++/* when memory's tight, SMALL config saves code+data. */
++#undef USE_DMA
++#define PXA_UDC_NUM_ENDPOINTS 3
++#endif
++
++#ifndef PXA_UDC_NUM_ENDPOINTS
++#define PXA_UDC_NUM_ENDPOINTS 16
++#endif
++
++struct pxa2xx_udc {
++ struct usb_gadget gadget;
++ struct usb_gadget_driver *driver;
++
++ enum ep0_state ep0state;
++ struct udc_stats stats;
++ unsigned got_irq : 1,
++ got_disc : 1,
++ has_cfr : 1,
++ req_pending : 1,
++ req_std : 1,
++ req_config : 1;
++
++#define start_watchdog(dev) mod_timer(&dev->timer, jiffies + (HZ/200))
++ struct timer_list timer;
++
++ struct pxa2xx_ep ep [PXA_UDC_NUM_ENDPOINTS];
++};
++
++/* 2.5 changes ... */
++
++#ifndef container_of
++#define container_of list_entry
++#endif
++
++#ifndef WARN_ON
++#define WARN_ON BUG_ON
++#endif
++
++/*-------------------------------------------------------------------------*/
++
++/* please keep machine-specific defines in alphabetical order. */
++
++// CONFIG_ARCH_ADI_COYOTE behaves
++
++#ifdef CONFIG_ARCH_E7XX
++# include <asm/arch/e7xx-gpio.h>
++#endif
++
++#ifdef CONFIG_ARCH_H1900
++# include <asm/arch/h1900-gpio.h>
++#endif
++
++#ifdef CONFIG_ARCH_H3900
++# include <asm/arch/h3900-gpio.h>
++#endif
++
++#ifdef CONFIG_ARCH_H5400
++# include <asm/arch/h5400-gpio.h>
++#endif
++
++#ifdef CONFIG_ARCH_INNOKOM
++#include <asm/arch/innokom.h>
++#endif
++
++#ifdef CONFIG_ARCH_LUBBOCK
++#include <asm/arch/lubbock.h>
++/* lubbock can also report usb connect/disconnect irqs */
++
++#ifdef DEBUG
++#define HEX_DISPLAY(n) if (machine_is_lubbock()) { LUB_HEXLED = (n); }
++
++#define LED_CONNECTED_ON if (machine_is_lubbock()) { \
++ DISCRETE_LED_ON(D26); }
++#define LED_CONNECTED_OFF if(machine_is_lubbock()) { \
++ DISCRETE_LED_OFF(D26); LUB_HEXLED = 0; }
++#define LED_EP0_ON if (machine_is_lubbock()) { DISCRETE_LED_ON(D25); }
++#define LED_EP0_OFF if (machine_is_lubbock()) { DISCRETE_LED_OFF(D25); }
++#endif /* DEBUG */
++
++#endif
++
++#ifdef CONFIG_ARCH_PXA_CORGI
++/* Sharp Zaurus C-700, C-750, C-760, C-860 */
++#define CORGI_CONNECT_GPIO 45
++/* use the ARM-Linux registered symbol, not a Lineo-private one */
++#define CONFIG_MACH_CORGI
++#endif
++
++#ifdef CONFIG_ARCH_PXA_POODLE
++/* Sharp B-500, SL-5600 */
++#define POODLE_CONNECT_GPIO 20
++/* use the ARM-Linux registered symbol, not a Lineo-private one */
++#define CONFIG_MACH_POODLE
++#endif
++
++/*-------------------------------------------------------------------------*/
++
++/* LEDs are only for debug */
++#ifndef HEX_DISPLAY
++#define HEX_DISPLAY(n) do {} while(0)
++#endif
++
++#ifndef LED_CONNECTED_ON
++#define LED_CONNECTED_ON do {} while(0)
++#define LED_CONNECTED_OFF do {} while(0)
++#endif
++#ifndef LED_EP0_ON
++#define LED_EP0_ON do {} while (0)
++#define LED_EP0_OFF do {} while (0)
++#endif
++
++/*-------------------------------------------------------------------------*/
++
++static struct pxa2xx_udc *the_controller;
++
++/* one GPIO should be used to detect host disconnect */
++static int is_usb_connected(void)
++{
++ static int first = 0;
++
++ // CONFIG_ARCH_ADI_COYOTE cannot detect or force disconnect
++#ifdef CONFIG_ARCH_E7XX
++ if (machine_is_e7xx())
++ return (GPLR(GPIO_E7XX_USB_DISC)
++ & GPIO_bit(GPIO_E7XX_USB_DISC));
++#endif
++#if 0
++#ifdef CONFIG_ARCH_H1900
++ if (machine_is_h1900())
++ return (!(GPLR(GPIO_NR_H1900_USB_DETECT_N)
++ & GPIO_bit(GPIO_NR_H1900_USB_DETECT_N)));
++#endif
++#ifdef CONFIG_ARCH_H3900
++ if (machine_is_h3900())
++ return 1;
++#endif
++#ifdef CONFIG_ARCH_H5400
++ // h5400 ... ?
++#endif
++#endif
++#ifdef CONFIG_ARCH_INNOKOM
++ if (machine_is_innokom())
++ return (GPLR(GPIO_INNOKOM_USB_DISC)
++ & GPIO_bit(GPIO_INNOKOM_USB_DISC));
++#endif
++#ifdef CONFIG_ARCH_LUBBOCK
++ if (machine_is_lubbock())
++ return ((LUB_MISC_RD & (1 << 9)) == 0);
++#endif
++ // Sharp's sources didn't show a corgi or poodle hook
++
++ if (!first) {
++ pr_info("%s: can't check host connect\n", driver_name);
++ first++;
++ }
++ return 1;
++}
++
++static int disc_first = 0;
++
++/* one GPIO should force the host to see this device (or not) */
++static void make_usb_disappear(void)
++{
++ // CONFIG_ARCH_ADI_COYOTE cannot detect or force disconnect
++#ifdef CONFIG_ARCH_E7XX
++ if (machine_is_e7xx()) {
++ GPSR(GPIO_E7XX_USB_PULLUP) = GPIO_bit(GPIO_E7XX_USB_PULLUP);
++ return;
++ }
++#endif
++ // h1900 ... ?
++#ifdef CONFIG_ARCH_H3900
++ if (machine_is_h3900()) {
++ GPDR0 &= ~GPIO_H3900_USBP_PULLUP;
++ return;
++ }
++#endif
++#ifdef CONFIG_ARCH_H5400
++ if (machine_is_h5400()) {
++ GPDR(GPIO_NR_H5400_USB_PULLUP) &=
++ ~GPIO_bit(GPIO_NR_H5400_USB_PULLUP);
++ return;
++ }
++#endif
++#ifdef CONFIG_ARCH_INNOKOM
++ if (machine_is_innokom()) {
++ GPSR(GPIO_INNOKOM_USB_ONOFF) = GPIO_bit(GPIO_INNOKOM_USB_ONOFF);
++ printk("innokom: disappear\n");
++ udelay(5);
++ return;
++ }
++#endif
++#ifdef CONFIG_ARCH_CSB226
++ if (machine_is_csb226()) {
++ GPCR0 |= 0x00000080;
++ printk("csb226: disappear\n");
++ udelay(5);
++ return;
++ }
++#endif
++ // lubbock has no D+ pullup
++#ifdef CONFIG_MACH_CORGI
++ if (machine_is_corgi()) {
++ GPDR(CORGI_CONNECT_GPIO) |= GPIO_bit(CORGI_CONNECT_GPIO);
++ GPCR(CORGI_CONNECT_GPIO) = GPIO_bit(CORGI_CONNECT_GPIO);
++ }
++#endif
++#ifdef CONFIG_MACH_POODLE
++ if (machine_is_poodle()) {
++ GPDR(POODLE_CONNECT_GPIO) |= GPIO_bit(POODLE_CONNECT_GPIO);
++ GPCR(POODLE_CONNECT_GPIO) = GPIO_bit(POODLE_CONNECT_GPIO);
++ }
++#endif
++
++ if (!disc_first) {
++ pr_info("%s: can't force usb disconnect\n", driver_name);
++ disc_first++;
++ }
++}
++
++static void let_usb_appear(void)
++{
++ // CONFIG_ARCH_ADI_COYOTE cannot detect or force disconnect
++#ifdef CONFIG_ARCH_E7XX
++ if (machine_is_e7xx()) {
++ GPCR(GPIO_E7XX_USB_PULLUP) = GPIO_bit(GPIO_E7XX_USB_PULLUP);
++ return;
++ }
++#endif
++ // h1900 ... ?
++#ifdef CONFIG_ARCH_H3900
++ if (machine_is_h3900()) {
++ GPDR0 |= GPIO_H3900_USBP_PULLUP;
++ GPSR0 |= GPIO_H3900_USBP_PULLUP;
++ return;
++ }
++#endif
++#ifdef CONFIG_ARCH_H5400
++ if (machine_is_h5400()) {
++ GPDR(GPIO_NR_H5400_USB_PULLUP) |=
++ GPIO_bit(GPIO_NR_H5400_USB_PULLUP);
++ return;
++ }
++#endif
++#ifdef CONFIG_ARCH_INNOKOM
++ if (machine_is_innokom()) {
++ GPCR(GPIO_INNOKOM_USB_ONOFF) = GPIO_bit(GPIO_INNOKOM_USB_ONOFF);
++ printk("innokom: appear\n");
++ udelay(5);
++ return;
++ }
++#endif
++#ifdef CONFIG_ARCH_CSB226
++ if (machine_is_csb226()) {
++ GPDR0 |= 0x00000080;
++ GPSR0 |= 0x00000080;
++ printk("csb226: appear\n");
++ udelay(5);
++ return;
++ }
++#endif
++ // lubbock has no D+ pullup
++#ifdef CONFIG_MACH_CORGI
++ if (machine_is_corgi()) {
++ GPDR(CORGI_CONNECT_GPIO) |= GPIO_bit(CORGI_CONNECT_GPIO);
++ GPSR(CORGI_CONNECT_GPIO) = GPIO_bit(CORGI_CONNECT_GPIO);
++ }
++#endif
++#ifdef CONFIG_MACH_POODLE
++ if (machine_is_poodle()) {
++ GPDR(POODLE_CONNECT_GPIO) |= GPIO_bit(POODLE_CONNECT_GPIO);
++ GPSR(POODLE_CONNECT_GPIO) = GPIO_bit(POODLE_CONNECT_GPIO);
++ }
++#endif
++
++ if (!disc_first) {
++ pr_info("%s: can't force usb disconnect\n", driver_name);
++ disc_first++;
++ }
++}
++
++/*-------------------------------------------------------------------------*/
++
++/* LEDs are only for debug */
++#ifndef LED_CONNECTED_ON
++#define LED_CONNECTED_ON do {} while(0)
++#define LED_CONNECTED_OFF do {} while(0)
++#endif
++#ifndef LED_EP0_ON
++#define LED_EP0_ON do {} while (0)
++#define LED_EP0_OFF do {} while (0)
++#endif
++
++/*-------------------------------------------------------------------------*/
++
++/*
++ * Debugging support vanishes in non-debug builds. DBG_NORMAL should be
++ * mostly silent during normal use/testing, with no timing side-effects.
++ */
++#define DBG_NORMAL 1 /* error paths, device state transitions */
++#define DBG_VERBOSE 2 /* add some success path trace info */
++#define DBG_NOISY 3 /* ... even more: request level */
++#define DBG_VERY_NOISY 4 /* ... even more: packet level */
++
++#ifdef DEBUG
++
++static const char *state_name[] = {
++ "EP0_IDLE",
++ "EP0_IN_DATA_PHASE", "EP0_OUT_DATA_PHASE",
++ "EP0_END_XFER", "EP0_STALL"
++};
++
++#define DMSG(stuff...) printk(KERN_DEBUG "udc: " stuff)
++
++#ifdef VERBOSE
++# define UDC_DEBUG DBG_VERBOSE
++#else
++# define UDC_DEBUG DBG_NORMAL
++#endif
++
++static void __attribute__ ((__unused__))
++dump_udccr(const char *label)
++{
++ u32 udccr = UDCCR;
++ DMSG("%s %02X =%s%s%s%s%s%s%s%s\n",
++ label, udccr,
++ (udccr & UDCCR_REM) ? " rem" : "",
++ (udccr & UDCCR_RSTIR) ? " rstir" : "",
++ (udccr & UDCCR_SRM) ? " srm" : "",
++ (udccr & UDCCR_SUSIR) ? " susir" : "",
++ (udccr & UDCCR_RESIR) ? " resir" : "",
++ (udccr & UDCCR_RSM) ? " rsm" : "",
++ (udccr & UDCCR_UDA) ? " uda" : "",
++ (udccr & UDCCR_UDE) ? " ude" : "");
++}
++
++static void __attribute__ ((__unused__))
++dump_udccs0(const char *label)
++{
++ u32 udccs0 = UDCCS0;
++
++ DMSG("%s %s %02X =%s%s%s%s%s%s%s%s\n",
++ label, state_name[the_controller->ep0state], udccs0,
++ (udccs0 & UDCCS0_SA) ? " sa" : "",
++ (udccs0 & UDCCS0_RNE) ? " rne" : "",
++ (udccs0 & UDCCS0_FST) ? " fst" : "",
++ (udccs0 & UDCCS0_SST) ? " sst" : "",
++ (udccs0 & UDCCS0_DRWF) ? " dwrf" : "",
++ (udccs0 & UDCCS0_FTF) ? " ftf" : "",
++ (udccs0 & UDCCS0_IPR) ? " ipr" : "",
++ (udccs0 & UDCCS0_OPR) ? " opr" : "");
++}
++
++static void __attribute__ ((__unused__))
++dump_state(struct pxa2xx_udc *dev)
++{
++ u32 tmp;
++ unsigned i;
++
++ DMSG("%s %s, uicr %02X.%02X, usir %02X.%02x, ufnr %02X.%02X\n",
++ is_usb_connected() ? "host " : "disconnected",
++ state_name[dev->ep0state],
++ UICR1, UICR0, USIR1, USIR0, UFNRH, UFNRL);
++ dump_udccr("udccr");
++ if (dev->has_cfr) {
++ tmp = UDCCFR;
++ DMSG("udccfr %02X =%s%s\n", tmp,
++ (tmp & UDCCFR_AREN) ? " aren" : "",
++ (tmp & UDCCFR_ACM) ? " acm" : "");
++ }
++
++ if (!dev->driver) {
++ DMSG("no gadget driver bound\n");
++ return;
++ } else
++ DMSG("ep0 driver '%s'\n", dev->driver->driver.name);
++
++ if (!is_usb_connected())
++ return;
++
++ dump_udccs0 ("udccs0");
++ DMSG("ep0 IN %lu/%lu, OUT %lu/%lu\n",
++ dev->stats.write.bytes, dev->stats.write.ops,
++ dev->stats.read.bytes, dev->stats.read.ops);
++
++ for (i = 1; i < PXA_UDC_NUM_ENDPOINTS; i++) {
++ if (dev->ep [i].desc == 0)
++ continue;
++ DMSG ("udccs%d = %02x\n", i, *dev->ep->reg_udccs);
++ }
++}
++
++#else
++
++#define DMSG(stuff...) do{}while(0)
++
++#define dump_udccr(x) do{}while(0)
++#define dump_udccs0(x) do{}while(0)
++#define dump_state(x) do{}while(0)
++
++#define UDC_DEBUG ((unsigned)0)
++
++#endif
++
++#define DBG(lvl, stuff...) do{if ((lvl) <= UDC_DEBUG) DMSG(stuff);}while(0)
++
++#define WARN(stuff...) printk(KERN_WARNING "udc: " stuff)
++#define INFO(stuff...) printk(KERN_INFO "udc: " stuff)
++
++
++/* 2.4 backport support */
++#define irqreturn_t void
++#define IRQ_HANDLED
++
++
++#endif /* __LINUX_USB_GADGET_PXA2XX_H */
+diff -x '*~' -x '.*' -r -N -u /tmp/kernel/drivers/usb/gadget/rndis.c kernel/drivers/usb/gadget/rndis.c
+--- /tmp/kernel/drivers/usb/gadget/rndis.c 1970-01-01 01:00:00.000000000 +0100
++++ kernel/drivers/usb/gadget/rndis.c 2005-04-22 17:53:19.501528608 +0200
+@@ -0,0 +1,1425 @@
++/*
++ * RNDIS MSG parser
++ *
++ * Version: $Id: rndis.c,v 1.19 2004/03/25 21:33:46 robert Exp $
++ *
++ * Authors: Benedikt Spranger, Pengutronix
++ * Robert Schwebel, Pengutronix
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This software was originally developed in conformance with
++ * Microsoft's Remote NDIS Specification License Agreement.
++ *
++ * 03/12/2004 Kai-Uwe Bloem <linux-development@auerswald.de>
++ * Fixed message length bug in init_response
++ *
++ * 03/25/2004 Kai-Uwe Bloem <linux-development@auerswald.de>
++ * Fixed rndis_rm_hdr length bug.
++ *
++ * Copyright (C) 2004 by David Brownell
++ * updates to merge with Linux 2.6, better match RNDIS spec
++ */
++
++#include <linux/config.h>
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/version.h>
++#include <linux/init.h>
++#include <linux/list.h>
++#include <linux/proc_fs.h>
++#include <linux/netdevice.h>
++
++#include <asm/io.h>
++#include <asm/byteorder.h>
++#include <asm/system.h>
++
++
++#undef RNDIS_PM
++#undef VERBOSE
++
++#include "rndis.h"
++
++
++/* The driver for your USB chip needs to support ep0 OUT to work with
++ * RNDIS, plus all three CDC Ethernet endpoints (interrupt not optional).
++ *
++ * Windows hosts need an INF file like Documentation/usb/linux.inf
++ * and will be happier if you provide the host_addr module parameter.
++ */
++
++#if 0
++#define DEBUG(str,args...) do { \
++ if (rndis_debug) \
++ printk(KERN_DEBUG str , ## args ); \
++ } while (0)
++static int rndis_debug = 0;
++
++module_param (rndis_debug, bool, 0);
++MODULE_PARM_DESC (rndis_debug, "enable debugging");
++
++#else
++
++#define rndis_debug 0
++#define DEBUG(str,args...) do{}while(0)
++#endif
++
++#define RNDIS_MAX_CONFIGS 1
++
++
++static rndis_params rndis_per_dev_params [RNDIS_MAX_CONFIGS];
++
++/* Driver Version */
++static const u32 rndis_driver_version = __constant_cpu_to_le32 (1);
++
++/* Function Prototypes */
++static int rndis_init_response (int configNr, rndis_init_msg_type *buf);
++static int rndis_query_response (int configNr, rndis_query_msg_type *buf);
++static int rndis_set_response (int configNr, rndis_set_msg_type *buf);
++static int rndis_reset_response (int configNr, rndis_reset_msg_type *buf);
++static int rndis_keepalive_response (int configNr,
++ rndis_keepalive_msg_type *buf);
++
++static rndis_resp_t *rndis_add_response (int configNr, u32 length);
++
++
++/* NDIS Functions */
++static int gen_ndis_query_resp (int configNr, u32 OID, rndis_resp_t *r)
++{
++ int retval = -ENOTSUPP;
++ u32 length = 0;
++ u32 *tmp;
++ int i, count;
++ rndis_query_cmplt_type *resp;
++
++ if (!r) return -ENOMEM;
++ resp = (rndis_query_cmplt_type *) r->buf;
++
++ if (!resp) return -ENOMEM;
++
++ switch (OID) {
++
++ /* general oids (table 4-1) */
++
++ /* mandatory */
++ case OID_GEN_SUPPORTED_LIST:
++ DEBUG ("%s: OID_GEN_SUPPORTED_LIST\n", __FUNCTION__);
++ length = sizeof (oid_supported_list);
++ count = length / sizeof (u32);
++ tmp = (u32 *) ((u8 *)resp + 24);
++ for (i = 0; i < count; i++)
++ tmp[i] = cpu_to_le32 (oid_supported_list[i]);
++ retval = 0;
++ break;
++
++ /* mandatory */
++ case OID_GEN_HARDWARE_STATUS:
++ DEBUG("%s: OID_GEN_HARDWARE_STATUS\n", __FUNCTION__);
++ length = 4;
++ /* Bogus question!
++ * Hardware must be ready to receive high level protocols.
++ * BTW:
++ * reddite ergo quae sunt Caesaris Caesari
++ * et quae sunt Dei Deo!
++ */
++ *((u32 *) resp + 6) = __constant_cpu_to_le32 (0);
++ retval = 0;
++ break;
++
++ /* mandatory */
++ case OID_GEN_MEDIA_SUPPORTED:
++ DEBUG("%s: OID_GEN_MEDIA_SUPPORTED\n", __FUNCTION__);
++ length = 4;
++ *((u32 *) resp + 6) = cpu_to_le32 (
++ rndis_per_dev_params [configNr].medium);
++ retval = 0;
++ break;
++
++ /* mandatory */
++ case OID_GEN_MEDIA_IN_USE:
++ DEBUG("%s: OID_GEN_MEDIA_IN_USE\n", __FUNCTION__);
++ length = 4;
++ /* one medium, one transport... (maybe you do it better) */
++ *((u32 *) resp + 6) = cpu_to_le32 (
++ rndis_per_dev_params [configNr].medium);
++ retval = 0;
++ break;
++
++ /* mandatory */
++ case OID_GEN_MAXIMUM_FRAME_SIZE:
++ DEBUG("%s: OID_GEN_MAXIMUM_FRAME_SIZE\n", __FUNCTION__);
++ if (rndis_per_dev_params [configNr].dev) {
++ length = 4;
++ *((u32 *) resp + 6) = cpu_to_le32 (
++ rndis_per_dev_params [configNr].dev->mtu);
++ retval = 0;
++ } else {
++ *((u32 *) resp + 6) = __constant_cpu_to_le32 (0);
++ retval = 0;
++ }
++ break;
++
++ /* mandatory */
++ case OID_GEN_LINK_SPEED:
++ DEBUG("%s: OID_GEN_LINK_SPEED\n", __FUNCTION__);
++ length = 4;
++ if (rndis_per_dev_params [configNr].media_state
++ == NDIS_MEDIA_STATE_DISCONNECTED)
++ *((u32 *) resp + 6) = __constant_cpu_to_le32 (0);
++ else
++ *((u32 *) resp + 6) = cpu_to_le32 (
++ rndis_per_dev_params [configNr].speed);
++ retval = 0;
++ break;
++
++ /* mandatory */
++ case OID_GEN_TRANSMIT_BLOCK_SIZE:
++ DEBUG("%s: OID_GEN_TRANSMIT_BLOCK_SIZE\n", __FUNCTION__);
++ if (rndis_per_dev_params [configNr].dev) {
++ length = 4;
++ *((u32 *) resp + 6) = cpu_to_le32 (
++ rndis_per_dev_params [configNr].dev->mtu);
++ retval = 0;
++ }
++ break;
++
++ /* mandatory */
++ case OID_GEN_RECEIVE_BLOCK_SIZE:
++ DEBUG("%s: OID_GEN_RECEIVE_BLOCK_SIZE\n", __FUNCTION__);
++ if (rndis_per_dev_params [configNr].dev) {
++ length = 4;
++ *((u32 *) resp + 6) = cpu_to_le32 (
++ rndis_per_dev_params [configNr].dev->mtu);
++ retval = 0;
++ }
++ break;
++
++ /* mandatory */
++ case OID_GEN_VENDOR_ID:
++ DEBUG("%s: OID_GEN_VENDOR_ID\n", __FUNCTION__);
++ length = 4;
++ *((u32 *) resp + 6) = cpu_to_le32 (
++ rndis_per_dev_params [configNr].vendorID);
++ retval = 0;
++ break;
++
++ /* mandatory */
++ case OID_GEN_VENDOR_DESCRIPTION:
++ DEBUG("%s: OID_GEN_VENDOR_DESCRIPTION\n", __FUNCTION__);
++ length = strlen (rndis_per_dev_params [configNr].vendorDescr);
++ memcpy ((u8 *) resp + 24,
++ rndis_per_dev_params [configNr].vendorDescr, length);
++ retval = 0;
++ break;
++
++ case OID_GEN_VENDOR_DRIVER_VERSION:
++ DEBUG("%s: OID_GEN_VENDOR_DRIVER_VERSION\n", __FUNCTION__);
++ length = 4;
++ /* Created as LE */
++ *((u32 *) resp + 6) = rndis_driver_version;
++ retval = 0;
++ break;
++
++ /* mandatory */
++ case OID_GEN_CURRENT_PACKET_FILTER:
++ DEBUG("%s: OID_GEN_CURRENT_PACKET_FILTER\n", __FUNCTION__);
++ length = 4;
++ *((u32 *) resp + 6) = cpu_to_le32 (
++ rndis_per_dev_params[configNr].filter);
++ retval = 0;
++ break;
++
++ /* mandatory */
++ case OID_GEN_MAXIMUM_TOTAL_SIZE:
++ DEBUG("%s: OID_GEN_MAXIMUM_TOTAL_SIZE\n", __FUNCTION__);
++ length = 4;
++ *((u32 *) resp + 6) = __constant_cpu_to_le32(
++ RNDIS_MAX_TOTAL_SIZE);
++ retval = 0;
++ break;
++
++ /* mandatory */
++ case OID_GEN_MEDIA_CONNECT_STATUS:
++ DEBUG("%s: OID_GEN_MEDIA_CONNECT_STATUS\n", __FUNCTION__);
++ length = 4;
++ *((u32 *) resp + 6) = cpu_to_le32 (
++ rndis_per_dev_params [configNr]
++ .media_state);
++ retval = 0;
++ break;
++
++ case OID_GEN_PHYSICAL_MEDIUM:
++ DEBUG("%s: OID_GEN_PHYSICAL_MEDIUM\n", __FUNCTION__);
++ length = 4;
++ *((u32 *) resp + 6) = __constant_cpu_to_le32 (0);
++ retval = 0;
++ break;
++
++ /* The RNDIS specification is incomplete/wrong. Some versions
++ * of MS-Windows expect OIDs that aren't specified there. Other
++ * versions emit undefined RNDIS messages. DOCUMENT ALL THESE!
++ */
++ case OID_GEN_MAC_OPTIONS: /* from WinME */
++ DEBUG("%s: OID_GEN_MAC_OPTIONS\n", __FUNCTION__);
++ length = 4;
++ *((u32 *) resp + 6) = __constant_cpu_to_le32(
++ NDIS_MAC_OPTION_RECEIVE_SERIALIZED
++ | NDIS_MAC_OPTION_FULL_DUPLEX);
++ retval = 0;
++ break;
++
++ /* statistics OIDs (table 4-2) */
++
++ /* mandatory */
++ case OID_GEN_XMIT_OK:
++ DEBUG("%s: OID_GEN_XMIT_OK\n", __FUNCTION__);
++ if (rndis_per_dev_params [configNr].stats) {
++ length = 4;
++ *((u32 *) resp + 6) = cpu_to_le32 (
++ rndis_per_dev_params [configNr].stats->tx_packets -
++ rndis_per_dev_params [configNr].stats->tx_errors -
++ rndis_per_dev_params [configNr].stats->tx_dropped);
++ retval = 0;
++ } else {
++ *((u32 *) resp + 6) = __constant_cpu_to_le32 (0);
++ retval = 0;
++ }
++ break;
++
++ /* mandatory */
++ case OID_GEN_RCV_OK:
++ DEBUG("%s: OID_GEN_RCV_OK\n", __FUNCTION__);
++ if (rndis_per_dev_params [configNr].stats) {
++ length = 4;
++ *((u32 *) resp + 6) = cpu_to_le32 (
++ rndis_per_dev_params [configNr].stats->rx_packets -
++ rndis_per_dev_params [configNr].stats->rx_errors -
++ rndis_per_dev_params [configNr].stats->rx_dropped);
++ retval = 0;
++ } else {
++ *((u32 *) resp + 6) = __constant_cpu_to_le32 (0);
++ retval = 0;
++ }
++ break;
++
++ /* mandatory */
++ case OID_GEN_XMIT_ERROR:
++ DEBUG("%s: OID_GEN_XMIT_ERROR\n", __FUNCTION__);
++ if (rndis_per_dev_params [configNr].stats) {
++ length = 4;
++ *((u32 *) resp + 6) = cpu_to_le32 (
++ rndis_per_dev_params [configNr]
++ .stats->tx_errors);
++ retval = 0;
++ } else {
++ *((u32 *) resp + 6) = __constant_cpu_to_le32 (0);
++ retval = 0;
++ }
++ break;
++
++ /* mandatory */
++ case OID_GEN_RCV_ERROR:
++ DEBUG("%s: OID_GEN_RCV_ERROR\n", __FUNCTION__);
++ if (rndis_per_dev_params [configNr].stats) {
++ *((u32 *) resp + 6) = cpu_to_le32 (
++ rndis_per_dev_params [configNr]
++ .stats->rx_errors);
++ retval = 0;
++ } else {
++ *((u32 *) resp + 6) = __constant_cpu_to_le32 (0);
++ retval = 0;
++ }
++ break;
++
++ /* mandatory */
++ case OID_GEN_RCV_NO_BUFFER:
++ DEBUG("%s: OID_GEN_RCV_NO_BUFFER\n", __FUNCTION__);
++ if (rndis_per_dev_params [configNr].stats) {
++ *((u32 *) resp + 6) = cpu_to_le32 (
++ rndis_per_dev_params [configNr]
++ .stats->rx_dropped);
++ retval = 0;
++ } else {
++ *((u32 *) resp + 6) = __constant_cpu_to_le32 (0);
++ retval = 0;
++ }
++ break;
++
++#ifdef RNDIS_OPTIONAL_STATS
++ case OID_GEN_DIRECTED_BYTES_XMIT:
++ DEBUG("%s: OID_GEN_DIRECTED_BYTES_XMIT\n", __FUNCTION__);
++ /*
++ * Aunt Tilly's size of shoes
++ * minus antarctica count of penguins
++ * divided by weight of Alpha Centauri
++ */
++ if (rndis_per_dev_params [configNr].stats) {
++ length = 4;
++ *((u32 *) resp + 6) = cpu_to_le32 (
++ (rndis_per_dev_params [configNr]
++ .stats->tx_packets -
++ rndis_per_dev_params [configNr]
++ .stats->tx_errors -
++ rndis_per_dev_params [configNr]
++ .stats->tx_dropped)
++ * 123);
++ retval = 0;
++ } else {
++ *((u32 *) resp + 6) = __constant_cpu_to_le32 (0);
++ retval = 0;
++ }
++ break;
++
++ case OID_GEN_DIRECTED_FRAMES_XMIT:
++ DEBUG("%s: OID_GEN_DIRECTED_FRAMES_XMIT\n", __FUNCTION__);
++ /* dito */
++ if (rndis_per_dev_params [configNr].stats) {
++ length = 4;
++ *((u32 *) resp + 6) = cpu_to_le32 (
++ (rndis_per_dev_params [configNr]
++ .stats->tx_packets -
++ rndis_per_dev_params [configNr]
++ .stats->tx_errors -
++ rndis_per_dev_params [configNr]
++ .stats->tx_dropped)
++ / 123);
++ retval = 0;
++ } else {
++ *((u32 *) resp + 6) = __constant_cpu_to_le32 (0);
++ retval = 0;
++ }
++ break;
++
++ case OID_GEN_MULTICAST_BYTES_XMIT:
++ DEBUG("%s: OID_GEN_MULTICAST_BYTES_XMIT\n", __FUNCTION__);
++ if (rndis_per_dev_params [configNr].stats) {
++ *((u32 *) resp + 6) = cpu_to_le32 (
++ rndis_per_dev_params [configNr]
++ .stats->multicast*1234);
++ retval = 0;
++ } else {
++ *((u32 *) resp + 6) = __constant_cpu_to_le32 (0);
++ retval = 0;
++ }
++ break;
++
++ case OID_GEN_MULTICAST_FRAMES_XMIT:
++ DEBUG("%s: OID_GEN_MULTICAST_FRAMES_XMIT\n", __FUNCTION__);
++ if (rndis_per_dev_params [configNr].stats) {
++ *((u32 *) resp + 6) = cpu_to_le32 (
++ rndis_per_dev_params [configNr]
++ .stats->multicast);
++ retval = 0;
++ } else {
++ *((u32 *) resp + 6) = __constant_cpu_to_le32 (0);
++ retval = 0;
++ }
++ break;
++
++ case OID_GEN_BROADCAST_BYTES_XMIT:
++ DEBUG("%s: OID_GEN_BROADCAST_BYTES_XMIT\n", __FUNCTION__);
++ if (rndis_per_dev_params [configNr].stats) {
++ *((u32 *) resp + 6) = cpu_to_le32 (
++ rndis_per_dev_params [configNr]
++ .stats->tx_packets/42*255);
++ retval = 0;
++ } else {
++ *((u32 *) resp + 6) = __constant_cpu_to_le32 (0);
++ retval = 0;
++ }
++ break;
++
++ case OID_GEN_BROADCAST_FRAMES_XMIT:
++ DEBUG("%s: OID_GEN_BROADCAST_FRAMES_XMIT\n", __FUNCTION__);
++ if (rndis_per_dev_params [configNr].stats) {
++ *((u32 *) resp + 6) = cpu_to_le32 (
++ rndis_per_dev_params [configNr]
++ .stats->tx_packets/42);
++ retval = 0;
++ } else {
++ *((u32 *) resp + 6) = __constant_cpu_to_le32 (0);
++ retval = 0;
++ }
++ break;
++
++ case OID_GEN_DIRECTED_BYTES_RCV:
++ DEBUG("%s: OID_GEN_DIRECTED_BYTES_RCV\n", __FUNCTION__);
++ *((u32 *) resp + 6) = __constant_cpu_to_le32 (0);
++ retval = 0;
++ break;
++
++ case OID_GEN_DIRECTED_FRAMES_RCV:
++ DEBUG("%s: OID_GEN_DIRECTED_FRAMES_RCV\n", __FUNCTION__);
++ *((u32 *) resp + 6) = __constant_cpu_to_le32 (0);
++ retval = 0;
++ break;
++
++ case OID_GEN_MULTICAST_BYTES_RCV:
++ DEBUG("%s: OID_GEN_MULTICAST_BYTES_RCV\n", __FUNCTION__);
++ if (rndis_per_dev_params [configNr].stats) {
++ *((u32 *) resp + 6) = cpu_to_le32 (
++ rndis_per_dev_params [configNr]
++ .stats->multicast * 1111);
++ retval = 0;
++ } else {
++ *((u32 *) resp + 6) = __constant_cpu_to_le32 (0);
++ retval = 0;
++ }
++ break;
++
++ case OID_GEN_MULTICAST_FRAMES_RCV:
++ DEBUG("%s: OID_GEN_MULTICAST_FRAMES_RCV\n", __FUNCTION__);
++ if (rndis_per_dev_params [configNr].stats) {
++ *((u32 *) resp + 6) = cpu_to_le32 (
++ rndis_per_dev_params [configNr]
++ .stats->multicast);
++ retval = 0;
++ } else {
++ *((u32 *) resp + 6) = __constant_cpu_to_le32 (0);
++ retval = 0;
++ }
++ break;
++
++ case OID_GEN_BROADCAST_BYTES_RCV:
++ DEBUG("%s: OID_GEN_BROADCAST_BYTES_RCV\n", __FUNCTION__);
++ if (rndis_per_dev_params [configNr].stats) {
++ *((u32 *) resp + 6) = cpu_to_le32 (
++ rndis_per_dev_params [configNr]
++ .stats->rx_packets/42*255);
++ retval = 0;
++ } else {
++ *((u32 *) resp + 6) = __constant_cpu_to_le32 (0);
++ retval = 0;
++ }
++ break;
++
++ case OID_GEN_BROADCAST_FRAMES_RCV:
++ DEBUG("%s: OID_GEN_BROADCAST_FRAMES_RCV\n", __FUNCTION__);
++ if (rndis_per_dev_params [configNr].stats) {
++ *((u32 *) resp + 6) = cpu_to_le32 (
++ rndis_per_dev_params [configNr]
++ .stats->rx_packets/42);
++ retval = 0;
++ } else {
++ *((u32 *) resp + 6) = __constant_cpu_to_le32 (0);
++ retval = 0;
++ }
++ break;
++
++ case OID_GEN_RCV_CRC_ERROR:
++ DEBUG("%s: OID_GEN_RCV_CRC_ERROR\n", __FUNCTION__);
++ if (rndis_per_dev_params [configNr].stats) {
++ *((u32 *) resp + 6) = cpu_to_le32 (
++ rndis_per_dev_params [configNr]
++ .stats->rx_crc_errors);
++ retval = 0;
++ } else {
++ *((u32 *) resp + 6) = __constant_cpu_to_le32 (0);
++ retval = 0;
++ }
++ break;
++
++ case OID_GEN_TRANSMIT_QUEUE_LENGTH:
++ DEBUG("%s: OID_GEN_TRANSMIT_QUEUE_LENGTH\n", __FUNCTION__);
++ *((u32 *) resp + 6) = __constant_cpu_to_le32 (0);
++ retval = 0;
++ break;
++#endif /* RNDIS_OPTIONAL_STATS */
++
++ /* ieee802.3 OIDs (table 4-3) */
++
++ /* mandatory */
++ case OID_802_3_PERMANENT_ADDRESS:
++ DEBUG("%s: OID_802_3_PERMANENT_ADDRESS\n", __FUNCTION__);
++ if (rndis_per_dev_params [configNr].dev) {
++ length = ETH_ALEN;
++ memcpy ((u8 *) resp + 24,
++ rndis_per_dev_params [configNr].host_mac,
++ length);
++ retval = 0;
++ } else {
++ *((u32 *) resp + 6) = __constant_cpu_to_le32 (0);
++ retval = 0;
++ }
++ break;
++
++ /* mandatory */
++ case OID_802_3_CURRENT_ADDRESS:
++ DEBUG("%s: OID_802_3_CURRENT_ADDRESS\n", __FUNCTION__);
++ if (rndis_per_dev_params [configNr].dev) {
++ length = ETH_ALEN;
++ memcpy ((u8 *) resp + 24,
++ rndis_per_dev_params [configNr].host_mac,
++ length);
++ retval = 0;
++ }
++ break;
++
++ /* mandatory */
++ case OID_802_3_MULTICAST_LIST:
++ DEBUG("%s: OID_802_3_MULTICAST_LIST\n", __FUNCTION__);
++ length = 4;
++ /* Multicast base address only */
++ *((u32 *) resp + 6) = __constant_cpu_to_le32 (0xE0000000);
++ retval = 0;
++ break;
++
++ /* mandatory */
++ case OID_802_3_MAXIMUM_LIST_SIZE:
++ DEBUG("%s: OID_802_3_MAXIMUM_LIST_SIZE\n", __FUNCTION__);
++ length = 4;
++ /* Multicast base address only */
++ *((u32 *) resp + 6) = __constant_cpu_to_le32 (1);
++ retval = 0;
++ break;
++
++ case OID_802_3_MAC_OPTIONS:
++ DEBUG("%s: OID_802_3_MAC_OPTIONS\n", __FUNCTION__);
++ break;
++
++ /* ieee802.3 statistics OIDs (table 4-4) */
++
++ /* mandatory */
++ case OID_802_3_RCV_ERROR_ALIGNMENT:
++ DEBUG("%s: OID_802_3_RCV_ERROR_ALIGNMENT\n", __FUNCTION__);
++ if (rndis_per_dev_params [configNr].stats)
++ {
++ length = 4;
++ *((u32 *) resp + 6) = cpu_to_le32 (
++ rndis_per_dev_params [configNr]
++ .stats->rx_frame_errors);
++ retval = 0;
++ }
++ break;
++
++ /* mandatory */
++ case OID_802_3_XMIT_ONE_COLLISION:
++ DEBUG("%s: OID_802_3_XMIT_ONE_COLLISION\n", __FUNCTION__);
++ length = 4;
++ *((u32 *) resp + 6) = __constant_cpu_to_le32 (0);
++ retval = 0;
++ break;
++
++ /* mandatory */
++ case OID_802_3_XMIT_MORE_COLLISIONS:
++ DEBUG("%s: OID_802_3_XMIT_MORE_COLLISIONS\n", __FUNCTION__);
++ length = 4;
++ *((u32 *) resp + 6) = __constant_cpu_to_le32 (0);
++ retval = 0;
++ break;
++
++#ifdef RNDIS_OPTIONAL_STATS
++ case OID_802_3_XMIT_DEFERRED:
++ DEBUG("%s: OID_802_3_XMIT_DEFERRED\n", __FUNCTION__);
++ /* TODO */
++ break;
++
++ case OID_802_3_XMIT_MAX_COLLISIONS:
++ DEBUG("%s: OID_802_3_XMIT_MAX_COLLISIONS\n", __FUNCTION__);
++ /* TODO */
++ break;
++
++ case OID_802_3_RCV_OVERRUN:
++ DEBUG("%s: OID_802_3_RCV_OVERRUN\n", __FUNCTION__);
++ /* TODO */
++ break;
++
++ case OID_802_3_XMIT_UNDERRUN:
++ DEBUG("%s: OID_802_3_XMIT_UNDERRUN\n", __FUNCTION__);
++ /* TODO */
++ break;
++
++ case OID_802_3_XMIT_HEARTBEAT_FAILURE:
++ DEBUG("%s: OID_802_3_XMIT_HEARTBEAT_FAILURE\n", __FUNCTION__);
++ /* TODO */
++ break;
++
++ case OID_802_3_XMIT_TIMES_CRS_LOST:
++ DEBUG("%s: OID_802_3_XMIT_TIMES_CRS_LOST\n", __FUNCTION__);
++ /* TODO */
++ break;
++
++ case OID_802_3_XMIT_LATE_COLLISIONS:
++ DEBUG("%s: OID_802_3_XMIT_LATE_COLLISIONS\n", __FUNCTION__);
++ /* TODO */
++ break;
++#endif /* RNDIS_OPTIONAL_STATS */
++
++#ifdef RNDIS_PM
++ /* power management OIDs (table 4-5) */
++ case OID_PNP_CAPABILITIES:
++ DEBUG("%s: OID_PNP_CAPABILITIES\n", __FUNCTION__);
++
++ /* just PM, and remote wakeup on link status change
++ * (not magic packet or pattern match)
++ */
++ length = sizeof (struct NDIS_PNP_CAPABILITIES);
++ memset (resp, 0, length);
++ {
++ struct NDIS_PNP_CAPABILITIES *caps = (void *) resp;
++
++ caps->Flags = NDIS_DEVICE_WAKE_UP_ENABLE;
++ caps->WakeUpCapabilities.MinLinkChangeWakeUp
++ = NdisDeviceStateD3;
++
++ /* FIXME then use usb_gadget_wakeup(), and
++ * set USB_CONFIG_ATT_WAKEUP in config desc
++ */
++ }
++ retval = 0;
++ break;
++ case OID_PNP_QUERY_POWER:
++ DEBUG("%s: OID_PNP_QUERY_POWER\n", __FUNCTION__);
++ /* sure, handle any power state that maps to USB suspend */
++ retval = 0;
++ break;
++#endif
++
++ default:
++ printk (KERN_WARNING "%s: query unknown OID 0x%08X\n",
++ __FUNCTION__, OID);
++ }
++
++ resp->InformationBufferOffset = __constant_cpu_to_le32 (16);
++ resp->InformationBufferLength = cpu_to_le32 (length);
++ resp->MessageLength = cpu_to_le32 (24 + length);
++ r->length = 24 + length;
++ return retval;
++}
++
++static int gen_ndis_set_resp (u8 configNr, u32 OID, u8 *buf, u32 buf_len,
++ rndis_resp_t *r)
++{
++ rndis_set_cmplt_type *resp;
++ int i, retval = -ENOTSUPP;
++ struct rndis_params *params;
++
++ if (!r)
++ return -ENOMEM;
++ resp = (rndis_set_cmplt_type *) r->buf;
++ if (!resp)
++ return -ENOMEM;
++
++ DEBUG("set OID %08x value, len %d:\n", OID, buf_len);
++ for (i = 0; i < buf_len; i += 16) {
++ DEBUG ("%03d: "
++ " %02x %02x %02x %02x"
++ " %02x %02x %02x %02x"
++ " %02x %02x %02x %02x"
++ " %02x %02x %02x %02x"
++ "\n",
++ i,
++ buf[i], buf [i+1],
++ buf[i+2], buf[i+3],
++ buf[i+4], buf [i+5],
++ buf[i+6], buf[i+7],
++ buf[i+8], buf [i+9],
++ buf[i+10], buf[i+11],
++ buf[i+12], buf [i+13],
++ buf[i+14], buf[i+15]);
++ }
++
++ switch (OID) {
++ case OID_GEN_CURRENT_PACKET_FILTER:
++ params = &rndis_per_dev_params [configNr];
++ retval = 0;
++
++ /* FIXME use these NDIS_PACKET_TYPE_* bitflags to
++ * filter packets in hard_start_xmit()
++ * NDIS_PACKET_TYPE_x == USB_CDC_PACKET_TYPE_x for x in:
++ * PROMISCUOUS, DIRECTED,
++ * MULTICAST, ALL_MULTICAST, BROADCAST
++ */
++ params->filter = cpu_to_le32p((u32 *)buf);
++ DEBUG("%s: OID_GEN_CURRENT_PACKET_FILTER %08x\n",
++ __FUNCTION__, params->filter);
++
++ /* this call has a significant side effect: it's
++ * what makes the packet flow start and stop, like
++ * activating the CDC Ethernet altsetting.
++ */
++ if (params->filter) {
++ params->state = RNDIS_DATA_INITIALIZED;
++ netif_carrier_on(params->dev);
++ if (netif_running(params->dev))
++ netif_wake_queue (params->dev);
++ } else {
++ params->state = RNDIS_INITIALIZED;
++ netif_carrier_off (params->dev);
++ netif_stop_queue (params->dev);
++ }
++ break;
++
++ case OID_802_3_MULTICAST_LIST:
++ /* I think we can ignore this */
++ DEBUG("%s: OID_802_3_MULTICAST_LIST\n", __FUNCTION__);
++ retval = 0;
++ break;
++#if 0
++ case OID_GEN_RNDIS_CONFIG_PARAMETER:
++ {
++ struct rndis_config_parameter *param;
++ param = (struct rndis_config_parameter *) buf;
++ DEBUG("%s: OID_GEN_RNDIS_CONFIG_PARAMETER '%*s'\n",
++ __FUNCTION__,
++ min(cpu_to_le32(param->ParameterNameLength),80),
++ buf + param->ParameterNameOffset);
++ retval = 0;
++ }
++ break;
++#endif
++
++#ifdef RNDIS_PM
++ case OID_PNP_SET_POWER:
++ DEBUG ("OID_PNP_SET_POWER\n");
++ /* sure, handle any power state that maps to USB suspend */
++ retval = 0;
++ break;
++
++ case OID_PNP_ENABLE_WAKE_UP:
++ /* always-connected ... */
++ DEBUG ("OID_PNP_ENABLE_WAKE_UP\n");
++ retval = 0;
++ break;
++
++ // no PM resume patterns supported (specified where?)
++ // so OID_PNP_{ADD,REMOVE}_WAKE_UP_PATTERN always fails
++#endif
++
++ default:
++ printk (KERN_WARNING "%s: set unknown OID 0x%08X, size %d\n",
++ __FUNCTION__, OID, buf_len);
++ }
++
++ return retval;
++}
++
++/*
++ * Response Functions
++ */
++
++static int rndis_init_response (int configNr, rndis_init_msg_type *buf)
++{
++ rndis_init_cmplt_type *resp;
++ rndis_resp_t *r;
++
++ if (!rndis_per_dev_params [configNr].dev) return -ENOTSUPP;
++
++ r = rndis_add_response (configNr, sizeof (rndis_init_cmplt_type));
++
++ if (!r) return -ENOMEM;
++
++ resp = (rndis_init_cmplt_type *) r->buf;
++
++ if (!resp) return -ENOMEM;
++
++ resp->MessageType = __constant_cpu_to_le32 (
++ REMOTE_NDIS_INITIALIZE_CMPLT);
++ resp->MessageLength = __constant_cpu_to_le32 (52);
++ resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
++ resp->Status = __constant_cpu_to_le32 (RNDIS_STATUS_SUCCESS);
++ resp->MajorVersion = __constant_cpu_to_le32 (RNDIS_MAJOR_VERSION);
++ resp->MinorVersion = __constant_cpu_to_le32 (RNDIS_MINOR_VERSION);
++ resp->DeviceFlags = __constant_cpu_to_le32 (RNDIS_DF_CONNECTIONLESS);
++ resp->Medium = __constant_cpu_to_le32 (RNDIS_MEDIUM_802_3);
++ resp->MaxPacketsPerTransfer = __constant_cpu_to_le32 (1);
++ resp->MaxTransferSize = cpu_to_le32 (
++ rndis_per_dev_params [configNr].dev->mtu
++ + sizeof (struct ethhdr)
++ + sizeof (struct rndis_packet_msg_type)
++ + 22);
++ resp->PacketAlignmentFactor = __constant_cpu_to_le32 (0);
++ resp->AFListOffset = __constant_cpu_to_le32 (0);
++ resp->AFListSize = __constant_cpu_to_le32 (0);
++
++ if (rndis_per_dev_params [configNr].ack)
++ rndis_per_dev_params [configNr].ack (
++ rndis_per_dev_params [configNr].dev);
++
++ return 0;
++}
++
++static int rndis_query_response (int configNr, rndis_query_msg_type *buf)
++{
++ rndis_query_cmplt_type *resp;
++ rndis_resp_t *r;
++
++ // DEBUG("%s: OID = %08X\n", __FUNCTION__, cpu_to_le32(buf->OID));
++ if (!rndis_per_dev_params [configNr].dev) return -ENOTSUPP;
++
++ /*
++ * we need more memory:
++ * oid_supported_list is the largest answer
++ */
++ r = rndis_add_response (configNr, sizeof (oid_supported_list));
++
++ if (!r) return -ENOMEM;
++ resp = (rndis_query_cmplt_type *) r->buf;
++
++ if (!resp) return -ENOMEM;
++
++ resp->MessageType = __constant_cpu_to_le32 (REMOTE_NDIS_QUERY_CMPLT);
++ resp->MessageLength = __constant_cpu_to_le32 (24);
++ resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
++
++ if (gen_ndis_query_resp (configNr, cpu_to_le32 (buf->OID), r)) {
++ /* OID not supported */
++ resp->Status = __constant_cpu_to_le32 (
++ RNDIS_STATUS_NOT_SUPPORTED);
++ resp->InformationBufferLength = __constant_cpu_to_le32 (0);
++ resp->InformationBufferOffset = __constant_cpu_to_le32 (0);
++ } else
++ resp->Status = __constant_cpu_to_le32 (RNDIS_STATUS_SUCCESS);
++
++ if (rndis_per_dev_params [configNr].ack)
++ rndis_per_dev_params [configNr].ack (
++ rndis_per_dev_params [configNr].dev);
++ return 0;
++}
++
++static int rndis_set_response (int configNr, rndis_set_msg_type *buf)
++{
++ u32 BufLength, BufOffset;
++ rndis_set_cmplt_type *resp;
++ rndis_resp_t *r;
++
++ r = rndis_add_response (configNr, sizeof (rndis_set_cmplt_type));
++
++ if (!r) return -ENOMEM;
++ resp = (rndis_set_cmplt_type *) r->buf;
++ if (!resp) return -ENOMEM;
++
++ BufLength = cpu_to_le32 (buf->InformationBufferLength);
++ BufOffset = cpu_to_le32 (buf->InformationBufferOffset);
++
++#ifdef VERBOSE
++ DEBUG("%s: Length: %d\n", __FUNCTION__, BufLength);
++ DEBUG("%s: Offset: %d\n", __FUNCTION__, BufOffset);
++ DEBUG("%s: InfoBuffer: ", __FUNCTION__);
++
++ for (i = 0; i < BufLength; i++) {
++ DEBUG ("%02x ", *(((u8 *) buf) + i + 8 + BufOffset));
++ }
++
++ DEBUG ("\n");
++#endif
++
++ resp->MessageType = __constant_cpu_to_le32 (REMOTE_NDIS_SET_CMPLT);
++ resp->MessageLength = __constant_cpu_to_le32 (16);
++ resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
++ if (gen_ndis_set_resp (configNr, cpu_to_le32 (buf->OID),
++ ((u8 *) buf) + 8 + BufOffset, BufLength, r))
++ resp->Status = __constant_cpu_to_le32 (RNDIS_STATUS_NOT_SUPPORTED);
++ else resp->Status = __constant_cpu_to_le32 (RNDIS_STATUS_SUCCESS);
++
++ if (rndis_per_dev_params [configNr].ack)
++ rndis_per_dev_params [configNr].ack (
++ rndis_per_dev_params [configNr].dev);
++
++ return 0;
++}
++
++static int rndis_reset_response (int configNr, rndis_reset_msg_type *buf)
++{
++ rndis_reset_cmplt_type *resp;
++ rndis_resp_t *r;
++
++ r = rndis_add_response (configNr, sizeof (rndis_reset_cmplt_type));
++
++ if (!r) return -ENOMEM;
++ resp = (rndis_reset_cmplt_type *) r->buf;
++ if (!resp) return -ENOMEM;
++
++ resp->MessageType = __constant_cpu_to_le32 (REMOTE_NDIS_RESET_CMPLT);
++ resp->MessageLength = __constant_cpu_to_le32 (16);
++ resp->Status = __constant_cpu_to_le32 (RNDIS_STATUS_SUCCESS);
++ /* resent information */
++ resp->AddressingReset = __constant_cpu_to_le32 (1);
++
++ if (rndis_per_dev_params [configNr].ack)
++ rndis_per_dev_params [configNr].ack (
++ rndis_per_dev_params [configNr].dev);
++
++ return 0;
++}
++
++static int rndis_keepalive_response (int configNr,
++ rndis_keepalive_msg_type *buf)
++{
++ rndis_keepalive_cmplt_type *resp;
++ rndis_resp_t *r;
++
++ /* host "should" check only in RNDIS_DATA_INITIALIZED state */
++
++ r = rndis_add_response (configNr, sizeof (rndis_keepalive_cmplt_type));
++ resp = (rndis_keepalive_cmplt_type *) r->buf;
++ if (!resp) return -ENOMEM;
++
++ resp->MessageType = __constant_cpu_to_le32 (
++ REMOTE_NDIS_KEEPALIVE_CMPLT);
++ resp->MessageLength = __constant_cpu_to_le32 (16);
++ resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
++ resp->Status = __constant_cpu_to_le32 (RNDIS_STATUS_SUCCESS);
++
++ if (rndis_per_dev_params [configNr].ack)
++ rndis_per_dev_params [configNr].ack (
++ rndis_per_dev_params [configNr].dev);
++
++ return 0;
++}
++
++
++/*
++ * Device to Host Comunication
++ */
++static int rndis_indicate_status_msg (int configNr, u32 status)
++{
++ rndis_indicate_status_msg_type *resp;
++ rndis_resp_t *r;
++
++ if (rndis_per_dev_params [configNr].state == RNDIS_UNINITIALIZED)
++ return -ENOTSUPP;
++
++ r = rndis_add_response (configNr,
++ sizeof (rndis_indicate_status_msg_type));
++ if (!r) return -ENOMEM;
++
++ resp = (rndis_indicate_status_msg_type *) r->buf;
++ if (!resp) return -ENOMEM;
++
++ resp->MessageType = __constant_cpu_to_le32 (
++ REMOTE_NDIS_INDICATE_STATUS_MSG);
++ resp->MessageLength = __constant_cpu_to_le32 (20);
++ resp->Status = cpu_to_le32 (status);
++ resp->StatusBufferLength = __constant_cpu_to_le32 (0);
++ resp->StatusBufferOffset = __constant_cpu_to_le32 (0);
++
++ if (rndis_per_dev_params [configNr].ack)
++ rndis_per_dev_params [configNr].ack (
++ rndis_per_dev_params [configNr].dev);
++ return 0;
++}
++
++int rndis_signal_connect (int configNr)
++{
++ rndis_per_dev_params [configNr].media_state
++ = NDIS_MEDIA_STATE_CONNECTED;
++ return rndis_indicate_status_msg (configNr,
++ RNDIS_STATUS_MEDIA_CONNECT);
++}
++
++int rndis_signal_disconnect (int configNr)
++{
++ rndis_per_dev_params [configNr].media_state
++ = NDIS_MEDIA_STATE_DISCONNECTED;
++ return rndis_indicate_status_msg (configNr,
++ RNDIS_STATUS_MEDIA_DISCONNECT);
++}
++
++void rndis_set_host_mac (int configNr, const u8 *addr)
++{
++ rndis_per_dev_params [configNr].host_mac = addr;
++}
++
++/*
++ * Message Parser
++ */
++int rndis_msg_parser (u8 configNr, u8 *buf)
++{
++ u32 MsgType, MsgLength, *tmp;
++ struct rndis_params *params;
++
++ if (!buf)
++ return -ENOMEM;
++
++ tmp = (u32 *) buf;
++ MsgType = cpu_to_le32p(tmp++);
++ MsgLength = cpu_to_le32p(tmp++);
++
++ if (configNr >= RNDIS_MAX_CONFIGS)
++ return -ENOTSUPP;
++ params = &rndis_per_dev_params [configNr];
++
++ /* For USB: responses may take up to 10 seconds */
++ switch (MsgType)
++ {
++ case REMOTE_NDIS_INITIALIZE_MSG:
++ DEBUG("%s: REMOTE_NDIS_INITIALIZE_MSG\n",
++ __FUNCTION__ );
++ params->state = RNDIS_INITIALIZED;
++ return rndis_init_response (configNr,
++ (rndis_init_msg_type *) buf);
++
++ case REMOTE_NDIS_HALT_MSG:
++ DEBUG("%s: REMOTE_NDIS_HALT_MSG\n",
++ __FUNCTION__ );
++ params->state = RNDIS_UNINITIALIZED;
++ if (params->dev) {
++ netif_carrier_off (params->dev);
++ netif_stop_queue (params->dev);
++ }
++ return 0;
++
++ case REMOTE_NDIS_QUERY_MSG:
++ return rndis_query_response (configNr,
++ (rndis_query_msg_type *) buf);
++
++ case REMOTE_NDIS_SET_MSG:
++ return rndis_set_response (configNr,
++ (rndis_set_msg_type *) buf);
++
++ case REMOTE_NDIS_RESET_MSG:
++ DEBUG("%s: REMOTE_NDIS_RESET_MSG\n",
++ __FUNCTION__ );
++ return rndis_reset_response (configNr,
++ (rndis_reset_msg_type *) buf);
++
++ case REMOTE_NDIS_KEEPALIVE_MSG:
++ /* For USB: host does this every 5 seconds */
++#ifdef VERBOSE
++ DEBUG("%s: REMOTE_NDIS_KEEPALIVE_MSG\n",
++ __FUNCTION__ );
++#endif
++ return rndis_keepalive_response (configNr,
++ (rndis_keepalive_msg_type *)
++ buf);
++
++ default:
++ /* At least Windows XP emits some undefined RNDIS messages.
++ * In one case those messages seemed to relate to the host
++ * suspending itself.
++ */
++ printk (KERN_WARNING
++ "%s: unknown RNDIS message 0x%08X len %d\n",
++ __FUNCTION__ , MsgType, MsgLength);
++ {
++ unsigned i;
++ for (i = 0; i < MsgLength; i += 16) {
++ DEBUG ("%03d: "
++ " %02x %02x %02x %02x"
++ " %02x %02x %02x %02x"
++ " %02x %02x %02x %02x"
++ " %02x %02x %02x %02x"
++ "\n",
++ i,
++ buf[i], buf [i+1],
++ buf[i+2], buf[i+3],
++ buf[i+4], buf [i+5],
++ buf[i+6], buf[i+7],
++ buf[i+8], buf [i+9],
++ buf[i+10], buf[i+11],
++ buf[i+12], buf [i+13],
++ buf[i+14], buf[i+15]);
++ }
++ }
++ break;
++ }
++
++ return -ENOTSUPP;
++}
++
++int rndis_register (int (* rndis_control_ack) (struct net_device *))
++{
++ u8 i;
++
++ for (i = 0; i < RNDIS_MAX_CONFIGS; i++) {
++ if (!rndis_per_dev_params [i].used) {
++ rndis_per_dev_params [i].used = 1;
++ rndis_per_dev_params [i].ack = rndis_control_ack;
++ DEBUG("%s: configNr = %d\n", __FUNCTION__, i);
++ return i;
++ }
++ }
++ DEBUG("failed\n");
++
++ return -1;
++}
++
++void rndis_deregister (int configNr)
++{
++ DEBUG("%s: \n", __FUNCTION__ );
++
++ if (configNr >= RNDIS_MAX_CONFIGS) return;
++ rndis_per_dev_params [configNr].used = 0;
++
++ return;
++}
++
++int rndis_set_param_dev (u8 configNr, struct net_device *dev,
++ struct net_device_stats *stats)
++{
++ DEBUG("%s:\n", __FUNCTION__ );
++ if (!dev || !stats) return -1;
++ if (configNr >= RNDIS_MAX_CONFIGS) return -1;
++
++ rndis_per_dev_params [configNr].dev = dev;
++ rndis_per_dev_params [configNr].stats = stats;
++
++ return 0;
++}
++
++int rndis_set_param_vendor (u8 configNr, u32 vendorID, const char *vendorDescr)
++{
++ DEBUG("%s:\n", __FUNCTION__ );
++ if (!vendorDescr) return -1;
++ if (configNr >= RNDIS_MAX_CONFIGS) return -1;
++
++ rndis_per_dev_params [configNr].vendorID = vendorID;
++ rndis_per_dev_params [configNr].vendorDescr = vendorDescr;
++
++ return 0;
++}
++
++int rndis_set_param_medium (u8 configNr, u32 medium, u32 speed)
++{
++ DEBUG("%s:\n", __FUNCTION__ );
++ if (configNr >= RNDIS_MAX_CONFIGS) return -1;
++
++ rndis_per_dev_params [configNr].medium = medium;
++ rndis_per_dev_params [configNr].speed = speed;
++
++ return 0;
++}
++
++void rndis_add_hdr (struct sk_buff *skb)
++{
++ if (!skb) return;
++ skb_push (skb, sizeof (struct rndis_packet_msg_type));
++ memset (skb->data, 0, sizeof (struct rndis_packet_msg_type));
++ *((u32 *) skb->data) = __constant_cpu_to_le32 (1);
++ *((u32 *) skb->data + 1) = cpu_to_le32(skb->len);
++ *((u32 *) skb->data + 2) = __constant_cpu_to_le32 (36);
++ *((u32 *) skb->data + 3) = cpu_to_le32(skb->len - 44);
++
++ return;
++}
++
++void rndis_free_response (int configNr, u8 *buf)
++{
++ rndis_resp_t *r;
++ struct list_head *act, *tmp;
++
++ list_for_each_safe (act, tmp,
++ &(rndis_per_dev_params [configNr].resp_queue))
++ {
++ r = list_entry (act, rndis_resp_t, list);
++ if (r && r->buf == buf) {
++ list_del (&r->list);
++ kfree (r);
++ }
++ }
++}
++
++u8 *rndis_get_next_response (int configNr, u32 *length)
++{
++ rndis_resp_t *r;
++ struct list_head *act, *tmp;
++
++ if (!length) return NULL;
++
++ list_for_each_safe (act, tmp,
++ &(rndis_per_dev_params [configNr].resp_queue))
++ {
++ r = list_entry (act, rndis_resp_t, list);
++ if (!r->send) {
++ r->send = 1;
++ *length = r->length;
++ return r->buf;
++ }
++ }
++
++ return NULL;
++}
++
++static rndis_resp_t *rndis_add_response (int configNr, u32 length)
++{
++ rndis_resp_t *r;
++
++ r = kmalloc (sizeof (rndis_resp_t) + length, GFP_ATOMIC);
++ if (!r) return NULL;
++
++ r->buf = (u8 *) (r + 1);
++ r->length = length;
++ r->send = 0;
++
++ list_add_tail (&r->list,
++ &(rndis_per_dev_params [configNr].resp_queue));
++ return r;
++}
++
++int rndis_rm_hdr (u8 *buf, u32 *length)
++{
++ u32 i, messageLen, dataOffset, *tmp;
++
++ tmp = (u32 *) buf;
++
++ if (!buf || !length) return -1;
++ if (cpu_to_le32p(tmp++) != 1) return -1;
++
++ messageLen = cpu_to_le32p(tmp++);
++ dataOffset = cpu_to_le32p(tmp++) + 8;
++
++ if (messageLen < dataOffset || messageLen > *length) return -1;
++
++ for (i = dataOffset; i < messageLen; i++)
++ buf [i - dataOffset] = buf [i];
++
++ *length = messageLen - dataOffset;
++
++ return 0;
++}
++
++#ifdef CONFIG_USB_GADGET_DEBUG_FILES
++
++static int rndis_proc_read (char *page, char **start, off_t off, int count, int *eof,
++ void *data)
++{
++ char *out = page;
++ int len;
++ rndis_params *param = (rndis_params *) data;
++
++ out += snprintf (out, count,
++ "Config Nr. %d\n"
++ "used : %s\n"
++ "state : %s\n"
++ "medium : 0x%08X\n"
++ "speed : %d\n"
++ "cable : %s\n"
++ "vendor ID : 0x%08X\n"
++ "vendor : %s\n",
++ param->confignr, (param->used) ? "y" : "n",
++ ({ char *s = "?";
++ switch (param->state) {
++ case RNDIS_UNINITIALIZED:
++ s = "RNDIS_UNINITIALIZED"; break;
++ case RNDIS_INITIALIZED:
++ s = "RNDIS_INITIALIZED"; break;
++ case RNDIS_DATA_INITIALIZED:
++ s = "RNDIS_DATA_INITIALIZED"; break;
++ }; s; }),
++ param->medium,
++ (param->media_state) ? 0 : param->speed*100,
++ (param->media_state) ? "disconnected" : "connected",
++ param->vendorID, param->vendorDescr);
++
++ len = out - page;
++ len -= off;
++
++ if (len < count) {
++ *eof = 1;
++ if (len <= 0)
++ return 0;
++ } else
++ len = count;
++
++ *start = page + off;
++ return len;
++}
++
++static int rndis_proc_write (struct file *file, const char __user *buffer,
++ unsigned long count, void *data)
++{
++ rndis_params *p = data;
++ u32 speed = 0;
++ int i, fl_speed = 0;
++
++ for (i = 0; i < count; i++) {
++ char c;
++ if (get_user(c, buffer))
++ return -EFAULT;
++ switch (c) {
++ case '0':
++ case '1':
++ case '2':
++ case '3':
++ case '4':
++ case '5':
++ case '6':
++ case '7':
++ case '8':
++ case '9':
++ fl_speed = 1;
++ speed = speed*10 + c - '0';
++ break;
++ case 'C':
++ case 'c':
++ rndis_signal_connect (p->confignr);
++ break;
++ case 'D':
++ case 'd':
++ rndis_signal_disconnect(p->confignr);
++ break;
++ default:
++ if (fl_speed) p->speed = speed;
++ else DEBUG ("%c is not valid\n", c);
++ break;
++ }
++
++ buffer++;
++ }
++
++ return count;
++}
++
++#define NAME_TEMPLATE "driver/rndis-%03d"
++
++static struct proc_dir_entry *rndis_connect_state [RNDIS_MAX_CONFIGS];
++
++#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
++
++
++int __init rndis_init (void)
++{
++ u8 i;
++
++ for (i = 0; i < RNDIS_MAX_CONFIGS; i++) {
++#ifdef CONFIG_USB_GADGET_DEBUG_FILES
++ char name [20];
++
++ sprintf (name, NAME_TEMPLATE, i);
++ if (!(rndis_connect_state [i]
++ = create_proc_entry (name, 0660, NULL)))
++ {
++ DEBUG ("%s :remove entries", __FUNCTION__);
++ while (i) {
++ sprintf (name, NAME_TEMPLATE, --i);
++ remove_proc_entry (name, NULL);
++ }
++ DEBUG ("\n");
++ return -EIO;
++ }
++
++ rndis_connect_state [i]->nlink = 1;
++ rndis_connect_state [i]->write_proc = rndis_proc_write;
++ rndis_connect_state [i]->read_proc = rndis_proc_read;
++ rndis_connect_state [i]->data = (void *)
++ (rndis_per_dev_params + i);
++#endif
++ rndis_per_dev_params [i].confignr = i;
++ rndis_per_dev_params [i].used = 0;
++ rndis_per_dev_params [i].state = RNDIS_UNINITIALIZED;
++ rndis_per_dev_params [i].media_state
++ = NDIS_MEDIA_STATE_DISCONNECTED;
++ INIT_LIST_HEAD (&(rndis_per_dev_params [i].resp_queue));
++ }
++
++ return 0;
++}
++
++void rndis_exit (void)
++{
++#ifdef CONFIG_USB_GADGET_DEBUG_FILES
++ u8 i;
++ char name [20];
++
++ for (i = 0; i < RNDIS_MAX_CONFIGS; i++) {
++ sprintf (name, NAME_TEMPLATE, i);
++ remove_proc_entry (name, NULL);
++ }
++#endif
++}
++
+diff -x '*~' -x '.*' -r -N -u /tmp/kernel/drivers/usb/gadget/rndis.h kernel/drivers/usb/gadget/rndis.h
+--- /tmp/kernel/drivers/usb/gadget/rndis.h 1970-01-01 01:00:00.000000000 +0100
++++ kernel/drivers/usb/gadget/rndis.h 2005-04-22 17:53:19.504528119 +0200
+@@ -0,0 +1,348 @@
++/*
++ * RNDIS Definitions for Remote NDIS
++ *
++ * Version: $Id: rndis.h,v 1.15 2004/03/25 21:33:46 robert Exp $
++ *
++ * Authors: Benedikt Spranger, Pengutronix
++ * Robert Schwebel, Pengutronix
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This software was originally developed in conformance with
++ * Microsoft's Remote NDIS Specification License Agreement.
++ */
++
++#ifndef _LINUX_RNDIS_H
++#define _LINUX_RNDIS_H
++
++#include "ndis.h"
++
++#define RNDIS_MAXIMUM_FRAME_SIZE 1518
++#define RNDIS_MAX_TOTAL_SIZE 1558
++
++/* Remote NDIS Versions */
++#define RNDIS_MAJOR_VERSION 1
++#define RNDIS_MINOR_VERSION 0
++
++/* Status Values */
++#define RNDIS_STATUS_SUCCESS 0x00000000U /* Success */
++#define RNDIS_STATUS_FAILURE 0xC0000001U /* Unspecified error */
++#define RNDIS_STATUS_INVALID_DATA 0xC0010015U /* Invalid data */
++#define RNDIS_STATUS_NOT_SUPPORTED 0xC00000BBU /* Unsupported request */
++#define RNDIS_STATUS_MEDIA_CONNECT 0x4001000BU /* Device connected */
++#define RNDIS_STATUS_MEDIA_DISCONNECT 0x4001000CU /* Device disconnected */
++/* For all not specified status messages:
++ * RNDIS_STATUS_Xxx -> NDIS_STATUS_Xxx
++ */
++
++/* Message Set for Connectionless (802.3) Devices */
++#define REMOTE_NDIS_INITIALIZE_MSG 0x00000002U /* Initialize device */
++#define REMOTE_NDIS_HALT_MSG 0x00000003U
++#define REMOTE_NDIS_QUERY_MSG 0x00000004U
++#define REMOTE_NDIS_SET_MSG 0x00000005U
++#define REMOTE_NDIS_RESET_MSG 0x00000006U
++#define REMOTE_NDIS_INDICATE_STATUS_MSG 0x00000007U
++#define REMOTE_NDIS_KEEPALIVE_MSG 0x00000008U
++
++/* Message completion */
++#define REMOTE_NDIS_INITIALIZE_CMPLT 0x80000002U
++#define REMOTE_NDIS_QUERY_CMPLT 0x80000004U
++#define REMOTE_NDIS_SET_CMPLT 0x80000005U
++#define REMOTE_NDIS_RESET_CMPLT 0x80000006U
++#define REMOTE_NDIS_KEEPALIVE_CMPLT 0x80000008U
++
++/* Device Flags */
++#define RNDIS_DF_CONNECTIONLESS 0x00000001U
++#define RNDIS_DF_CONNECTION_ORIENTED 0x00000002U
++
++#define RNDIS_MEDIUM_802_3 0x00000000U
++
++/* from drivers/net/sk98lin/h/skgepnmi.h */
++#define OID_PNP_CAPABILITIES 0xFD010100
++#define OID_PNP_SET_POWER 0xFD010101
++#define OID_PNP_QUERY_POWER 0xFD010102
++#define OID_PNP_ADD_WAKE_UP_PATTERN 0xFD010103
++#define OID_PNP_REMOVE_WAKE_UP_PATTERN 0xFD010104
++#define OID_PNP_ENABLE_WAKE_UP 0xFD010106
++
++
++/* supported OIDs */
++static const u32 oid_supported_list [] =
++{
++ /* the general stuff */
++ OID_GEN_SUPPORTED_LIST,
++ OID_GEN_HARDWARE_STATUS,
++ OID_GEN_MEDIA_SUPPORTED,
++ OID_GEN_MEDIA_IN_USE,
++ OID_GEN_MAXIMUM_FRAME_SIZE,
++ OID_GEN_LINK_SPEED,
++ OID_GEN_TRANSMIT_BLOCK_SIZE,
++ OID_GEN_RECEIVE_BLOCK_SIZE,
++ OID_GEN_VENDOR_ID,
++ OID_GEN_VENDOR_DESCRIPTION,
++ OID_GEN_VENDOR_DRIVER_VERSION,
++ OID_GEN_CURRENT_PACKET_FILTER,
++ OID_GEN_MAXIMUM_TOTAL_SIZE,
++ OID_GEN_MEDIA_CONNECT_STATUS,
++ OID_GEN_PHYSICAL_MEDIUM,
++#if 0
++ OID_GEN_RNDIS_CONFIG_PARAMETER,
++#endif
++
++ /* the statistical stuff */
++ OID_GEN_XMIT_OK,
++ OID_GEN_RCV_OK,
++ OID_GEN_XMIT_ERROR,
++ OID_GEN_RCV_ERROR,
++ OID_GEN_RCV_NO_BUFFER,
++#ifdef RNDIS_OPTIONAL_STATS
++ OID_GEN_DIRECTED_BYTES_XMIT,
++ OID_GEN_DIRECTED_FRAMES_XMIT,
++ OID_GEN_MULTICAST_BYTES_XMIT,
++ OID_GEN_MULTICAST_FRAMES_XMIT,
++ OID_GEN_BROADCAST_BYTES_XMIT,
++ OID_GEN_BROADCAST_FRAMES_XMIT,
++ OID_GEN_DIRECTED_BYTES_RCV,
++ OID_GEN_DIRECTED_FRAMES_RCV,
++ OID_GEN_MULTICAST_BYTES_RCV,
++ OID_GEN_MULTICAST_FRAMES_RCV,
++ OID_GEN_BROADCAST_BYTES_RCV,
++ OID_GEN_BROADCAST_FRAMES_RCV,
++ OID_GEN_RCV_CRC_ERROR,
++ OID_GEN_TRANSMIT_QUEUE_LENGTH,
++#endif /* RNDIS_OPTIONAL_STATS */
++
++ /* mandatory 802.3 */
++ /* the general stuff */
++ OID_802_3_PERMANENT_ADDRESS,
++ OID_802_3_CURRENT_ADDRESS,
++ OID_802_3_MULTICAST_LIST,
++ OID_802_3_MAC_OPTIONS,
++ OID_802_3_MAXIMUM_LIST_SIZE,
++
++ /* the statistical stuff */
++ OID_802_3_RCV_ERROR_ALIGNMENT,
++ OID_802_3_XMIT_ONE_COLLISION,
++ OID_802_3_XMIT_MORE_COLLISIONS,
++#ifdef RNDIS_OPTIONAL_STATS
++ OID_802_3_XMIT_DEFERRED,
++ OID_802_3_XMIT_MAX_COLLISIONS,
++ OID_802_3_RCV_OVERRUN,
++ OID_802_3_XMIT_UNDERRUN,
++ OID_802_3_XMIT_HEARTBEAT_FAILURE,
++ OID_802_3_XMIT_TIMES_CRS_LOST,
++ OID_802_3_XMIT_LATE_COLLISIONS,
++#endif /* RNDIS_OPTIONAL_STATS */
++
++#ifdef RNDIS_PM
++ /* PM and wakeup are mandatory for USB: */
++
++ /* power management */
++ OID_PNP_CAPABILITIES,
++ OID_PNP_QUERY_POWER,
++ OID_PNP_SET_POWER,
++
++ /* wake up host */
++ OID_PNP_ENABLE_WAKE_UP,
++ OID_PNP_ADD_WAKE_UP_PATTERN,
++ OID_PNP_REMOVE_WAKE_UP_PATTERN,
++#endif
++};
++
++
++typedef struct rndis_init_msg_type
++{
++ u32 MessageType;
++ u32 MessageLength;
++ u32 RequestID;
++ u32 MajorVersion;
++ u32 MinorVersion;
++ u32 MaxTransferSize;
++} rndis_init_msg_type;
++
++typedef struct rndis_init_cmplt_type
++{
++ u32 MessageType;
++ u32 MessageLength;
++ u32 RequestID;
++ u32 Status;
++ u32 MajorVersion;
++ u32 MinorVersion;
++ u32 DeviceFlags;
++ u32 Medium;
++ u32 MaxPacketsPerTransfer;
++ u32 MaxTransferSize;
++ u32 PacketAlignmentFactor;
++ u32 AFListOffset;
++ u32 AFListSize;
++} rndis_init_cmplt_type;
++
++typedef struct rndis_halt_msg_type
++{
++ u32 MessageType;
++ u32 MessageLength;
++ u32 RequestID;
++} rndis_halt_msg_type;
++
++typedef struct rndis_query_msg_type
++{
++ u32 MessageType;
++ u32 MessageLength;
++ u32 RequestID;
++ u32 OID;
++ u32 InformationBufferLength;
++ u32 InformationBufferOffset;
++ u32 DeviceVcHandle;
++} rndis_query_msg_type;
++
++typedef struct rndis_query_cmplt_type
++{
++ u32 MessageType;
++ u32 MessageLength;
++ u32 RequestID;
++ u32 Status;
++ u32 InformationBufferLength;
++ u32 InformationBufferOffset;
++} rndis_query_cmplt_type;
++
++typedef struct rndis_set_msg_type
++{
++ u32 MessageType;
++ u32 MessageLength;
++ u32 RequestID;
++ u32 OID;
++ u32 InformationBufferLength;
++ u32 InformationBufferOffset;
++ u32 DeviceVcHandle;
++} rndis_set_msg_type;
++
++typedef struct rndis_set_cmplt_type
++{
++ u32 MessageType;
++ u32 MessageLength;
++ u32 RequestID;
++ u32 Status;
++} rndis_set_cmplt_type;
++
++typedef struct rndis_reset_msg_type
++{
++ u32 MessageType;
++ u32 MessageLength;
++ u32 Reserved;
++} rndis_reset_msg_type;
++
++typedef struct rndis_reset_cmplt_type
++{
++ u32 MessageType;
++ u32 MessageLength;
++ u32 Status;
++ u32 AddressingReset;
++} rndis_reset_cmplt_type;
++
++typedef struct rndis_indicate_status_msg_type
++{
++ u32 MessageType;
++ u32 MessageLength;
++ u32 Status;
++ u32 StatusBufferLength;
++ u32 StatusBufferOffset;
++} rndis_indicate_status_msg_type;
++
++typedef struct rndis_keepalive_msg_type
++{
++ u32 MessageType;
++ u32 MessageLength;
++ u32 RequestID;
++} rndis_keepalive_msg_type;
++
++typedef struct rndis_keepalive_cmplt_type
++{
++ u32 MessageType;
++ u32 MessageLength;
++ u32 RequestID;
++ u32 Status;
++} rndis_keepalive_cmplt_type;
++
++struct rndis_packet_msg_type
++{
++ u32 MessageType;
++ u32 MessageLength;
++ u32 DataOffset;
++ u32 DataLength;
++ u32 OOBDataOffset;
++ u32 OOBDataLength;
++ u32 NumOOBDataElements;
++ u32 PerPacketInfoOffset;
++ u32 PerPacketInfoLength;
++ u32 VcHandle;
++ u32 Reserved;
++};
++
++struct rndis_config_parameter
++{
++ u32 ParameterNameOffset;
++ u32 ParameterNameLength;
++ u32 ParameterType;
++ u32 ParameterValueOffset;
++ u32 ParameterValueLength;
++};
++
++/* implementation specific */
++enum rndis_state
++{
++ RNDIS_UNINITIALIZED,
++ RNDIS_INITIALIZED,
++ RNDIS_DATA_INITIALIZED,
++};
++
++typedef struct rndis_resp_t
++{
++ struct list_head list;
++ u8 *buf;
++ u32 length;
++ int send;
++} rndis_resp_t;
++
++typedef struct rndis_params
++{
++ u8 confignr;
++ int used;
++ enum rndis_state state;
++ u32 filter;
++ u32 medium;
++ u32 speed;
++ u32 media_state;
++ const u8 *host_mac;
++ struct net_device *dev;
++ struct net_device_stats *stats;
++ u32 vendorID;
++ const char *vendorDescr;
++ int (*ack) (struct net_device *);
++ struct list_head resp_queue;
++} rndis_params;
++
++/* RNDIS Message parser and other useless functions */
++int rndis_msg_parser (u8 configNr, u8 *buf);
++int rndis_register (int (*rndis_control_ack) (struct net_device *));
++void rndis_deregister (int configNr);
++int rndis_set_param_dev (u8 configNr, struct net_device *dev,
++ struct net_device_stats *stats);
++int rndis_set_param_vendor (u8 configNr, u32 vendorID,
++ const char *vendorDescr);
++int rndis_set_param_medium (u8 configNr, u32 medium, u32 speed);
++void rndis_add_hdr (struct sk_buff *skb);
++int rndis_rm_hdr (u8 *buf, u32 *length);
++u8 *rndis_get_next_response (int configNr, u32 *length);
++void rndis_free_response (int configNr, u8 *buf);
++
++int rndis_signal_connect (int configNr);
++int rndis_signal_disconnect (int configNr);
++int rndis_state (int configNr);
++extern void rndis_set_host_mac (int configNr, const u8 *addr);
++
++int __init rndis_init (void);
++void rndis_exit (void);
++
++#endif /* _LINUX_RNDIS_H */
+diff -x '*~' -x '.*' -r -N -u /tmp/kernel/drivers/usb/gadget/superh_udc.c kernel/drivers/usb/gadget/superh_udc.c
+--- /tmp/kernel/drivers/usb/gadget/superh_udc.c 1970-01-01 01:00:00.000000000 +0100
++++ kernel/drivers/usb/gadget/superh_udc.c 2005-04-22 17:53:19.510527142 +0200
+@@ -0,0 +1,1819 @@
++/*
++ * Renesas SuperH USB 1.1 device controller (found on SH7705, SH7727...)
++ *
++ * Copyright (C) 2003 Renesas Technology Europe Limited
++ * Copyright (C) 2003 Julian Back (jback@mpc-data.co.uk), MPC Data Limited
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++/*
++ * This is a driver for the USB Device Controller found on Renesas SH
++ * processors. This is a full-speed controller which has four
++ * endpoints in a single fixed configuration.
++ *
++ * Limitations
++ *
++ * Only tested on SH7705. Mostly tested with Mass Storage gadget
++ * using Bulk-Only Transport. It has been tested with Linux 2.4,
++ * Linux 2.6, Windows 2000 and Windows XP hosts.
++ *
++ * DMA is not (yet) implemented.
++ *
++ * Handling of application stalls is tricky. We set a bit to stall an
++ * endpoint. When the host tries to access the ep it gets a stall and
++ * another stall bit is latched by the device. The host clears the
++ * stall with a clear feature but the hardware doesn't inform us, the
++ * latched bit is cleared but not the bit we have set, so the next
++ * time the host accesses the ep it will get another stall and the
++ * latch will be set again unless we have cleared our stall bit. The
++ * solution adopted in this driver is to use a timer to clear the
++ * application stall bit some time after setting the stall. This
++ * seems to work most of the time but is not 100% reliable. Because
++ * of this it is best to avoid USB protocols that require the USB
++ * device to stall the host. Unfortunately USB mass storage does
++ * require the device to stall when it gets unsupported commands,
++ * Linux hosts don't send any of these unsupported commands but
++ * Windows hosts do.
++ *
++ * Another place where the hardware is too clever is in the handling
++ * of setup packets. Many setup packets including SET_INTERFACE and
++ * SET_CONFIGURATION are handled by the hardware without informing the
++ * driver software. But we need to inform the gadget driver of at
++ * least one of these as it uses this to kick of it's data processing.
++ * The solution adopted is that after we have recieved N setup packets
++ * following a bus reset a fake SET_CONFIGURATION is sent to the
++ * gadget. We also have to arrange things so that the reply to the
++ * fake packet is not sent out.
++ *
++ */
++
++#include <linux/config.h>
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/ioport.h>
++#include <linux/types.h>
++#include <linux/version.h>
++#include <linux/errno.h>
++#include <linux/delay.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/init.h>
++#include <linux/timer.h>
++#include <linux/list.h>
++#include <linux/interrupt.h>
++#include <linux/proc_fs.h>
++#include <linux/mm.h>
++
++#include <asm/atomic.h>
++#include <asm/byteorder.h>
++#include <asm/dma.h>
++#include <asm/io.h>
++#include <asm/irq.h>
++#include <asm/system.h>
++#include <asm/unaligned.h>
++
++#include <linux/usb_ch9.h>
++#include <linux/usb_gadget.h>
++
++#undef DEBUG
++#undef VERY_NOISY
++
++#define DRIVER_DESC "SuperH USB Peripheral Controller"
++#define DRIVER_VERSION "alpha (11 November 2003)"
++
++#ifdef USE_DMA
++#error "DMA not supported"
++#endif
++
++static const char driver_name [] = "superh_udc";
++static const char driver_desc [] = DRIVER_DESC;
++
++static const char ep0name [] = "ep0";
++static const char *ep_name [] = {
++ ep0name,
++ "ep1out-bulk",
++ "ep2in-bulk",
++ "ep3in-bulk",
++};
++
++static struct superh_udc *the_controller;
++
++#include "superh_udc.h"
++
++/* High priority interrupts */
++#define F0_HIGH (EP1_FULL | EP2_TR | EP2_EMPTY )
++#define F1_HIGH (0)
++
++/* Low priority interrupts */
++#define F0_LOW (BRST | SETUP_TS | EP0o_TS | EP0i_TR | EP0i_TS)
++#define F1_LOW (EP3_TR | EP3_TS | VBUSF)
++
++/* How long to leave the stall bit set - this value is quite critical
++ * to making stalls work. Unfortunately it doesn't seem possible to
++ * get a value that will work reliably with both fast and slow
++ * machines.
++ */
++#define STALL_TIME (HZ/75)
++
++/* Number of endpoints to check in the unstall timer. It should not
++ * be necessary to unstall bulk endpoints using the timer as long as
++ * the gadget code is aware that this device cannot stall properly
++ * (see the file backed storage gadget for an example). But if the
++ * UDC driver stalls ep0 due to a bad SETUP then the timer is still
++ * required otherwise the stall will never get cleared. If it is
++ * necessary to unstall all endpoints using the timer then set this to
++ * 4.
++ */
++#define EP_TO_UNSTALL 1
++
++/* Number of packets to wait for before sending a fake
++ * SET_CONFIGURATION to the gadget driver
++ */
++#define DEFAULT_SETUP_COUNT 7
++#define RESET_SETUP_COUNT 2
++
++/* How long to wait for the number of packets specified above */
++#define SETUP_TIME (HZ/10 )
++
++static void superh_ep_fifo_flush(struct usb_ep *_ep);
++static void stop_activity(struct superh_udc *dev, struct usb_gadget_driver *driver);
++static int superh_ep_set_halt(struct usb_ep *_ep, int value);
++static void udc_timer(unsigned long _dev);
++static struct superh_request* process_ep_req(struct superh_ep *ep,
++ struct superh_request *req);
++static void done(struct superh_ep *ep, struct superh_request *req, int status);
++
++/*
++ * IO
++ */
++
++static inline void and_b(u8 mask, unsigned long addr)
++{
++ ctrl_outb(ctrl_inb(addr) & mask, addr);
++}
++
++
++static inline void or_b(u8 mask, unsigned long addr)
++{
++ ctrl_outb(ctrl_inb(addr) | mask, addr);
++}
++
++
++static inline void ep0_idle (struct superh_udc *dev)
++{
++ DBG(DBG_VERY_NOISY, "ep0_idle\n");
++ dev->ep0state = EP0_IDLE;
++}
++
++
++static void init_udc_timer(struct superh_udc *dev)
++{
++ init_timer(&dev->timer);
++ dev->timer.function = udc_timer;
++ dev->timer.data = (unsigned long) dev;
++ dev->timer.expires = jiffies + STALL_TIME;
++ add_timer(&dev->timer);
++}
++
++/* Send a fake SET_CONFIGURATION to the gadget to start it up.
++ * Needed because the hardware doesn't let us know when the real packet
++ * has arrived.
++ */
++static void send_fake_config(struct superh_udc *dev)
++{
++ struct usb_ctrlrequest r;
++ dev->fake_config = 1;
++ dev->setup_countdown = 0;
++ r.bRequestType = USB_DIR_OUT | USB_TYPE_STANDARD
++ | USB_RECIP_DEVICE;
++ r.bRequest = USB_REQ_SET_CONFIGURATION;
++ r.wValue = 1; /* configuration to select */
++ r.wIndex = 0;
++ r.wLength = 0;
++ if (dev->driver->setup(&dev->gadget, &r) < 0) {
++ DMSG("SET_CONFIGURATION failed.\n");
++ }
++}
++
++/*
++ * Timer function. Clears stall from any stalled endpoints as we
++ * don't get informed when the host has sent a clear feature.
++ */
++static void udc_timer(unsigned long _dev)
++{
++ struct superh_udc *dev = (void *)_dev;
++ int i;
++ unsigned long flags;
++
++ local_irq_save(flags);
++
++ if (atomic_read(&dev->in_interrupt) == 0) {
++
++ /* Check if a bus reset has been done and we haven't faked a SET_CONFIGURATION */
++ if (dev->gadget.speed != USB_SPEED_UNKNOWN
++ && dev->setup_countdown > 0
++ && jiffies - dev->reset_time > SETUP_TIME
++ &&list_empty(&dev->ep[0].queue)) {
++ send_fake_config(dev);
++ }
++
++ /* Check if any end points are halted and restart them */
++ for (i = 0; i < EP_TO_UNSTALL; i++) {
++ struct superh_ep *ep = &dev->ep[i];
++ if (ep->halted) {
++ DBG(DBG_VERBOSE, "unstalling ep %d\n", i);
++ superh_ep_set_halt(&ep->ep, 0);
++ if (likely (!list_empty(&ep->queue))) {
++ struct superh_request *req
++ = list_entry(ep->queue.next,
++ struct superh_request, queue);
++ process_ep_req(ep, req);
++ }
++ }
++ }
++ }
++
++ init_udc_timer(dev);
++
++ local_irq_restore(flags);
++}
++
++/*
++ * done - retire a request; caller blocked irqs
++ */
++static void done(struct superh_ep *ep, struct superh_request *req, int status)
++{
++ unsigned stopped = ep->stopped;
++
++ DBG(DBG_NOISY, "done: %s %p %d\n", ep->ep.name, req, status);
++
++ list_del_init(&req->queue);
++
++ if (likely (req->req.status == -EINPROGRESS))
++ req->req.status = status;
++ else
++ status = req->req.status;
++
++ if (status && status != -ESHUTDOWN)
++ DBG(DBG_VERBOSE, "complete %s req %p stat %d len %u/%u\n",
++ ep->ep.name, &req->req, status,
++ req->req.actual, req->req.length);
++
++ /* don't modify queue heads during completion callback */
++ ep->stopped = 1;
++ req->req.complete(&ep->ep, &req->req);
++ ep->stopped = stopped;
++}
++
++/*
++ * Enable interrupts for the specified endpoint
++ */
++static inline void pio_irq_enable(struct superh_ep *ep)
++{
++ or_b(ep->interrupt_mask, ep->interrupt_reg);
++}
++
++/*
++ * Disable interrupts for the specified endpoint
++ */
++static inline void pio_irq_disable(struct superh_ep *ep)
++{
++ and_b(~ep->interrupt_mask, ep->interrupt_reg);
++}
++
++/*
++ * nuke - dequeue ALL requests
++ */
++static void nuke(struct superh_ep *ep, int status)
++{
++ struct superh_request *req;
++
++ DBG(DBG_NOISY, "nuke %s %d\n", ep->ep.name, status);
++
++ /* called with irqs blocked */
++#ifdef USE_DMA
++ if (ep->dma >= 0 && !ep->stopped)
++ cancel_dma(ep);
++#endif
++ while (!list_empty(&ep->queue)) {
++ req = list_entry(ep->queue.next,
++ struct superh_request,
++ queue);
++ done(ep, req, status);
++ }
++
++ if (ep->desc)
++ pio_irq_disable (ep);
++}
++
++static inline void clear_ep_state (struct superh_udc *dev)
++{
++ unsigned i;
++
++ /* hardware SET_{CONFIGURATION,INTERFACE} automagic resets endpoint
++ * fifos, and pending transactions mustn't be continued in any case.
++ */
++ for (i = 1; i < 4; i++)
++ nuke(&dev->ep[i], -ECONNABORTED);
++}
++
++/*
++ * write a packet to an endpoint data register
++ */
++static int
++write_packet(u32 epdr, struct superh_request *req, unsigned max)
++{
++ u8 *buf;
++ unsigned length, count;
++
++ buf = req->req.buf + req->req.actual;
++ prefetch(buf);
++
++ /* how big will this packet be? */
++ length = min(req->req.length - req->req.actual, max);
++ req->req.actual += length;
++
++ count = length;
++ while (likely(count--))
++ ctrl_outb(*buf++, epdr);
++
++ return length;
++}
++
++static int
++write_ep0_fifo (struct superh_ep *ep, struct superh_request *req)
++{
++ unsigned count;
++ int is_short;
++
++ count = write_packet(USBEPDR0I, req, EP0_FIFO_SIZE);
++ ep->dev->stats.write.bytes += count;
++
++ /* last packet "must be" short (or a zlp) */
++ is_short = (count != EP0_FIFO_SIZE);
++
++ DBG(DBG_VERY_NOISY, "ep0in %d bytes %d left %p\n", count,
++ req->req.length - req->req.actual, req);
++
++ ctrl_outb(EP0i_PKTE, USBTRG);
++
++ if (unlikely (is_short)) {
++ ep->dev->ep0state = EP0_END_XFER;
++
++ count = req->req.length;
++ done (ep, req, 0);
++ /*
++ * If we have received a specified number of setups
++ * after a bus reset or connect then fake a
++ * SET_CONFIGURATION to the driver (as we don't get
++ * them from the hardware).
++ */
++ if (ep->dev->setup_countdown >= 0)
++ ep->dev->setup_countdown--;
++ if (ep->dev->setup_countdown == 0) {
++ send_fake_config(ep->dev);
++ }
++ }
++
++ return is_short;
++}
++
++/*
++ * handle_ep0_setup
++ *
++ * Handles a SETUP request on EP0
++ */
++static void handle_ep0_setup(struct superh_udc* dev)
++{
++ int i;
++ union { u8 raw [8]; struct usb_ctrlrequest r; } u;
++
++ for (i = 0; i < 8; i++) {
++ u.raw[i] = ctrl_inb(USBEPDR0S);
++ }
++
++ /* Send ACK */
++ ctrl_outb(EP0s_RDFN, USBTRG);
++
++ le16_to_cpus (&u.r.wValue);
++ le16_to_cpus (&u.r.wIndex);
++ le16_to_cpus (&u.r.wLength);
++
++ DBG(DBG_VERBOSE, "SETUP %02x.%02x v%04x i%04x l%04x\n",
++ u.r.bRequestType, u.r.bRequest,
++ u.r.wValue, u.r.wIndex, u.r.wLength);
++
++ if (u.r.bRequestType & USB_DIR_IN) {
++ DBG(DBG_VERY_NOISY, "handle_ep0_setup: EP0_IN_DATA_PHASE\n");
++ dev->ep0state = EP0_IN_DATA_PHASE;
++ }
++ else {
++ DBG(DBG_VERY_NOISY, "handle_ep0_setup: EP0_OUT_DATA_PHASE\n");
++ dev->ep0state = EP0_OUT_DATA_PHASE;
++ }
++
++ i = dev->driver->setup(&dev->gadget, &u.r);
++ if (i < 0) {
++ DMSG("SETUP %02x.%02x v%04x i%04x l%04x failed\n",
++ u.r.bRequestType, u.r.bRequest,
++ u.r.wValue, u.r.wIndex, u.r.wLength);
++ superh_ep_set_halt(&dev->ep[0].ep, 1);
++ }
++}
++
++/*
++ * write to an IN endpoint fifo, as many packets as possible.
++ * irqs will use this to write the rest later.
++ * caller guarantees at least one packet buffer is ready.
++ */
++static int
++write_fifo (struct superh_ep *ep, struct superh_request *req)
++{
++ unsigned max;
++
++ DBG(DBG_VERY_NOISY, "write_fifo\n");
++
++ if ((ep->bEndpointAddress & USB_DIR_IN) != USB_DIR_IN) {
++ DMSG("write_fifo from invalid EP (%s)\n", ep->ep.name);
++ return -EINVAL;
++ }
++
++ max = ep->desc->wMaxPacketSize;
++ do {
++ unsigned count;
++ int is_last, is_short;
++
++ count = write_packet(ep->fifo_reg, req, max);
++
++ /* last packet is usually short (or a zlp) */
++ if (unlikely (count != max))
++ is_last = is_short = 1;
++ else {
++ if (likely(req->req.length != req->req.actual)
++ || req->req.zero)
++ is_last = 0;
++ else
++ is_last = 1;
++ /* interrupt/iso maxpacket may not fill the fifo */
++ is_short = unlikely (max < ep->ep.maxpacket);
++
++ /* FIXME ep.maxpacket should be the current size,
++ * modified (for periodic endpoints) when the
++ * ep is enabled. do that, re-init as needed,
++ * and change maxpacket refs accordingly.
++ */
++ }
++
++ DBG(DBG_VERY_NOISY, "wrote %s %d bytes%s%s %d left %p\n",
++ ep->ep.name, count,
++ is_last ? "/L" : "", is_short ? "/S" : "",
++ req->req.length - req->req.actual, req);
++
++ /* let loose that packet. maybe try writing another one,
++ * double buffering might work.
++ */
++ or_b(ep->packet_enable_mask, USBTRG);
++
++ /* requests complete when all IN data is in the FIFO */
++ if (is_last) {
++ done (ep, req, 0);
++ if (list_empty(&ep->queue) || unlikely(ep->dma >= 0)) {
++ pio_irq_disable (ep);
++ }
++#ifdef USE_DMA
++ /* TODO */
++ if (unlikely(ep->dma >= 0) && !list_empty(&ep->queue)) {
++ DMSG("%s pio2dma\n", ep->ep.name);
++ req = list_entry(ep->queue.next,
++ struct superh_request, queue);
++ kick_dma(ep,req);
++ return 0;
++ }
++#endif
++ return 1;
++ }
++ /* Only loop if on EP2 as it is double buffered */
++ } while (ep->bEndpointAddress == (2|USB_DIR_IN)
++ && ctrl_inb(USBIFR0) & EP2_EMPTY);
++ return 0;
++}
++
++/*
++ * read_ep0_fifo - unload packets from ep0 control-out fifo. caller
++ * should have made sure there's at least one packet ready.
++ *
++ * returns true if the request completed because of short packet or the
++ * request buffer having filled (and maybe overran till end-of-packet).
++ */
++static int
++read_ep0_fifo(struct superh_ep *ep, struct superh_request *req)
++{
++ u8 *buf;
++ unsigned bufferspace, count;
++
++ DBG(DBG_VERY_NOISY, "read_ep0_fifo\n");
++
++ if (!ep) {
++ DMSG("read_ep0_fifo invalid ep\n");
++ return -EINVAL;
++ }
++
++ if (!req) {
++ DMSG("read_ep0_fifo invalid req\n");
++ return -EINVAL;
++ }
++
++ if (ep->desc != 0) {
++ DMSG("read_ep0_fifo from invalid EP (%s)\n", ep->ep.name);
++ return -EINVAL;
++ }
++
++ /* make sure there's a packet in the FIFO.
++ */
++ if (likely ((ctrl_inb(USBIFR0) & EP0o_TS) == 0)) {
++ buf = req->req.buf + req->req.actual;
++ bufferspace = req->req.length - req->req.actual;
++
++ /* read all bytes from this packet */
++ count = ctrl_inb(USBEPSZ0O);
++ req->req.actual += min (count, bufferspace);
++ DBG(DBG_VERY_NOISY, "read %s %d bytes req %p %d/%d\n",
++ ep->ep.name, count,
++ req, req->req.actual, req->req.length);
++ while (likely (count-- != 0)) {
++ u8 byte = ctrl_inb(USBEPDR0O);
++
++ if (unlikely (bufferspace == 0)) {
++ /* this happens when the driver's buffer
++ * is smaller than what the host sent.
++ * discard the extra data.
++ */
++ if (req->req.status != -EOVERFLOW)
++ DMSG("%s overflow %d\n",
++ ep->ep.name, count);
++ req->req.status = -EOVERFLOW;
++ } else {
++ *buf++ = byte;
++ bufferspace--;
++ }
++ }
++
++ /* Send ACK */
++ or_b(EP0o_RDFN, USBTRG);
++
++ /* completion */
++ if (req->req.actual >= req->req.length) {
++ done (ep, req, 0);
++ ep0_idle(ep->dev);
++ return 1;
++ }
++ }
++
++ return 0;
++}
++
++/*
++ * read_fifo - unload packet(s) from the fifo we use for usb OUT
++ * transfers and put them into the request. caller should have made
++ * sure there's at least one packet ready.
++ *
++ * returns true if the request completed because of short packet or the
++ * request buffer having filled (and maybe overran till end-of-packet).
++ */
++static int
++read_fifo (struct superh_ep *ep, struct superh_request *req)
++{
++ DBG(DBG_VERY_NOISY, "read_fifo\n");
++
++ if ((ep->bEndpointAddress & 0x0f) != 1) {
++ DMSG("read_fifo from invalid EP (%s)\n", ep->ep.name);
++ return -EINVAL;
++ }
++
++ for (;;) {
++ u8 *buf;
++ unsigned bufferspace, count, is_short;
++
++ /* make sure there's a packet in the FIFO.
++ */
++ if (unlikely ((ctrl_inb(USBIFR0) & EP1_FULL) == 0))
++ break;
++ buf = req->req.buf + req->req.actual;
++ bufferspace = req->req.length - req->req.actual;
++
++ /* read all bytes from this packet */
++ count = ctrl_inb(USBEPSZ1);
++ req->req.actual += min (count, bufferspace);
++ is_short = (count < ep->desc->wMaxPacketSize);
++ DBG(DBG_VERY_NOISY, "read %s %d bytes%s req %p %d/%d\n",
++ ep->ep.name, count,
++ is_short ? "/S" : "",
++ req, req->req.actual, req->req.length);
++ while (likely (count-- != 0)) {
++ u8 byte = ctrl_inb(USBEPDR1);
++
++ if (unlikely (bufferspace == 0)) {
++ /* this happens when the driver's buffer
++ * is smaller than what the host sent.
++ * discard the extra data.
++ */
++ if (req->req.status != -EOVERFLOW)
++ DMSG("%s overflow %d\n",
++ ep->ep.name, count);
++ req->req.status = -EOVERFLOW;
++ } else {
++ *buf++ = byte;
++ bufferspace--;
++ }
++ }
++
++ or_b(EP1_RDFN, USBTRG);
++ /* There could now be another packet because of dual buffer */
++
++ /* completion */
++ if (is_short || req->req.actual == req->req.length) {
++ done (ep, req, 0);
++ if (list_empty(&ep->queue))
++ pio_irq_disable (ep);
++ return 1;
++ }
++
++ /* finished that packet. the next one may be waiting... */
++ }
++ return 0;
++}
++
++/*--------------------------------------------------------------------------*/
++/* Interrupt Handler(s)
++ */
++
++/*
++ * superh_udc_irq_f0 - high priority interrupt handler
++ * this deals with data to & from the bulk pipes
++ */
++static void superh_udc_irq_f0(int irq, void *_dev, struct pt_regs *regs)
++{
++ unsigned char f0_status;
++ struct superh_udc *dev = (struct superh_udc*) _dev;
++ struct superh_request *req;
++ struct superh_ep *ep;
++
++ DBG(DBG_NOISY, "superh_udc_irq_f0 %p\n", dev);
++
++ atomic_inc(&dev->in_interrupt);
++
++ dev->stats.irqs++;
++ dev->stats.irq0s++;
++ f0_status = ctrl_inb(USBIFR0);
++
++ /* Acknowledge interrupts */
++ ctrl_outb(~(f0_status & F0_HIGH), USBIFR0);
++
++ if (f0_status & EP1_FULL) {
++ DBG(DBG_NOISY, "superh_udc_irq_f0 %p: EP1 FULL\n", dev);
++ ep = &dev->ep[1];
++
++ if (likely (!list_empty(&ep->queue)))
++ req = list_entry(ep->queue.next,
++ struct superh_request, queue);
++ else
++ req = 0;
++
++ if (req)
++ read_fifo(ep, req);
++ else
++ pio_irq_disable(ep);
++ }
++
++ if ( f0_status & (EP2_TR | EP2_EMPTY) ) {
++ DBG(DBG_NOISY, "superh_udc_irq_f0 %p: EP2 TR | EP2_EMPTY\n", dev);
++ ep = &dev->ep[2];
++
++ if (likely (!list_empty(&ep->queue)))
++ req = list_entry(ep->queue.next,
++ struct superh_request, queue);
++ else
++ req = 0;
++
++ if (req) {
++ if ((f0_status & EP2_TR) && (f0_status & EP2_EMPTY))
++ write_fifo(ep, req);
++ else
++ and_b(~EP2_EMPTY, USBIER0);
++
++ }
++ else {
++ pio_irq_disable(ep);
++ }
++ }
++
++ atomic_dec(&dev->in_interrupt);
++}
++
++/**
++ * superh_udc_irq_f1 - low priority interrupt handler
++ *
++ */
++static void superh_udc_irq_f1(int irq, void *_dev, struct pt_regs *regs)
++{
++ unsigned char f0_status;
++ unsigned char f1_status;
++ struct superh_udc *dev = (struct superh_udc*) _dev;
++
++ atomic_inc(&dev->in_interrupt);;
++
++ dev->stats.irqs++;
++ dev->stats.irq1s++;
++
++ f0_status = ctrl_inb(USBIFR0);
++ f1_status = ctrl_inb(USBIFR1);
++
++ /* Acknowledge interrupts */
++ ctrl_outb(~(f0_status & F0_LOW), USBIFR0);
++ ctrl_outb(~(f1_status & F1_LOW), USBIFR1);
++
++ /* VBUSF indicates the USB being connected/disconnected */
++ if (f1_status & VBUSF) {
++ DBG(DBG_VERY_NOISY, "superh_udc_irq_f1[%lx] VBUSF\n", dev->stats.irqs);
++ if (!is_usb_connected) {
++ /* report disconnect just once */
++ if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
++ DMSG("disconnect %s\n",
++ dev->driver ? dev->driver->driver.name : 0);
++ stop_activity(dev, dev->driver);
++ }
++ }
++ else if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
++ DMSG("connect\n");
++ dev->setup_countdown = DEFAULT_SETUP_COUNT;
++ }
++ }
++
++
++
++ /* Bus Reset */
++ if (f0_status & BRST) {
++ int i;
++ DBG(DBG_VERBOSE, "superh_udc_irq_f1[%lx]: BRST bus reset\n", dev->stats.irqs);
++ /* kill any outstanding requests */
++ for (i = 0; i < 4; i++) {
++ struct superh_ep *ep = &dev->ep[i];
++ nuke(ep, -ESHUTDOWN);
++ ep->halted = 0;
++ ep->stopped = 0;
++ }
++
++ /* reset fifo's and stall's */
++ ctrl_outb( EP3_CLEAR | EP1_CLEAR | EP2_CLEAR | EP0o_CLEAR | EP0i_CLEAR, USBFCLR );
++ ctrl_outb( 0, USBEPSTL );
++ DMSG("gadget driver '%s', address zero\n", dev->driver->driver.name);
++ if (dev->gadget.speed == USB_SPEED_UNKNOWN)
++ init_udc_timer(dev);
++ dev->gadget.speed = USB_SPEED_FULL;
++ memset(&dev->stats, 0, sizeof dev->stats);
++ if (dev->setup_countdown < 0)
++ dev->setup_countdown = RESET_SETUP_COUNT;
++ dev->reset_time = jiffies;
++ dev->fake_config = 0;
++ ep0_idle(dev);
++ }
++
++ /* EPOi Transmit Complete - data to host on EP0 ACKed
++ * EP0i Transfer Request - no data in FIFO to send on EP0
++ * either way we send next data if there is any and the FIFO is not busy
++ * it will interrupt again if we later if we don't send anything.
++ */
++ if ((f0_status & EP0i_TR || f0_status & EP0i_TS)
++ && (ctrl_inb(USBDASTS) & EP0i_DE) == 0) {
++ struct superh_ep *ep = &dev->ep[0];
++ struct superh_request *req;
++ DBG(DBG_VERY_NOISY, "superh_udc_irq_f1[%lx]: ep0i TR\n", dev->stats.irqs);
++ if (!list_empty(&ep->queue)) {
++ req = list_entry(ep->queue.next, struct superh_request, queue);
++ write_ep0_fifo(ep, req);
++ }
++ or_b(EP0i_PKTE, USBTRG);
++ }
++
++ /* Setup Command Receive Complete */
++ if (f0_status & SETUP_TS) {
++ DBG(DBG_NOISY, "superh_udc_irq_f1[%lx]: SETUP TS\n", dev->stats.irqs);
++ or_b( EP0o_CLEAR | EP0i_CLEAR, USBFCLR);
++ handle_ep0_setup(dev);
++ }
++
++ /* EPOo Receive Complete - EP0 has received data from host */
++ if (f0_status & EP0o_TS) {
++ struct superh_request *req;
++ struct superh_ep *ep;
++ DBG(DBG_VERY_NOISY, "superh_int_hndlr_f1[%lx]: ep0o TS\n", dev->stats.irqs);
++ ep = &dev->ep[0];
++
++ if (likely (!list_empty(&ep->queue)))
++ req = list_entry(ep->queue.next,
++ struct superh_request, queue);
++ else
++ req = 0;
++
++ if (req)
++ read_ep0_fifo(ep, req);
++ }
++
++ /* EP3 Transmit Request & Transmit Complete */
++ if ( f1_status & (EP3_TR | EP3_TS) ) {
++ struct superh_request *req;
++ struct superh_ep *ep;
++ DBG(DBG_VERY_NOISY, "superh_udc_irq_f1[%lx]: EP3 TR | EP3_TS (%x)\n", dev->stats.irqs, f1_status);
++ ep = &dev->ep[3];
++
++ if (likely (!list_empty(&ep->queue)))
++ req = list_entry(ep->queue.next,
++ struct superh_request, queue);
++ else
++ req = 0;
++
++ if (req) {
++ if ((f1_status & EP3_TR) && (ctrl_inb(USBDASTS) & EP3_DE) == 0)
++ write_fifo(ep, req);
++
++ }
++ else {
++ pio_irq_disable(ep);
++ }
++ }
++
++ atomic_dec(&dev->in_interrupt);;
++}
++
++
++/*--------------------------------------------------------------------------*/
++
++/*
++ * endpoint enable/disable
++ *
++ * we need to verify the descriptors used to enable endpoints. since superh
++ * endpoint configurations are fixed, and are pretty much always enabled,
++ * there's not a lot to manage here.
++ *
++ */
++static int superh_ep_enable (struct usb_ep *_ep,
++ const struct usb_endpoint_descriptor *desc)
++{
++ struct superh_ep *ep;
++ struct superh_udc *dev;
++
++ DBG(DBG_NOISY, "superh_ep_enable\n");
++
++ ep = container_of (_ep, struct superh_ep, ep);
++ if (!_ep || !desc || ep->desc || _ep->name == ep0name
++ || desc->bDescriptorType != USB_DT_ENDPOINT
++ || ep->bEndpointAddress != desc->bEndpointAddress
++ || ep->ep.maxpacket < desc->wMaxPacketSize) {
++ DMSG("%s, bad ep or descriptor\n", __FUNCTION__);
++ return -EINVAL;
++ }
++
++ /* xfer types must match, except that interrupt ~= bulk */
++ if (ep->bmAttributes != desc->bmAttributes
++ && ep->bmAttributes != USB_ENDPOINT_XFER_BULK
++ && desc->bmAttributes != USB_ENDPOINT_XFER_INT) {
++ DMSG("%s, %s type mismatch\n", __FUNCTION__, _ep->name);
++ return -EINVAL;
++ }
++
++#if 0
++ /* hardware _could_ do smaller, but driver doesn't */
++ if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK
++ && desc->wMaxPacketSize != BULK_FIFO_SIZE)
++ || !desc->wMaxPacketSize) {
++ DMSG("%s, bad %s maxpacket\n", __FUNCTION__, _ep->name);
++ return -ERANGE;
++ }
++#endif
++
++ dev = ep->dev;
++ if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) {
++ DMSG("%s, bogus device state\n", __FUNCTION__);
++ return -ESHUTDOWN;
++ }
++
++ ep->desc = desc;
++ ep->dma = -1;
++ ep->stopped = 0;
++
++ /* flush fifo (mostly for OUT buffers), enable irq */
++ superh_ep_fifo_flush (_ep);
++
++ /* ... reset halt state too, if we could ... */
++
++#ifdef USE_DMA
++
++#endif
++
++ DBG(DBG_VERBOSE, "enabled %s\n", _ep->name);
++ return 0;
++}
++
++static int superh_ep_disable (struct usb_ep *_ep)
++{
++ struct superh_ep *ep;
++
++ DBG(DBG_NOISY, "superh_ep_disable\n");
++
++ ep = container_of (_ep, struct superh_ep, ep);
++ if (!_ep || !ep->desc) {
++ DMSG("%s, %s not enabled\n", __FUNCTION__,
++ _ep ? ep->ep.name : NULL);
++ return -EINVAL;
++ }
++ nuke (ep, -ESHUTDOWN);
++
++#ifdef USE_DMA
++ /* TODO */
++ if (ep->dma >= 0) {
++ *ep->reg_drcmr = 0;
++ pxa_free_dma (ep->dma);
++ ep->dma = -1;
++ }
++#endif
++
++ /* flush fifo (mostly for IN buffers) */
++ superh_ep_fifo_flush (_ep);
++
++ ep->desc = 0;
++ ep->stopped = 1;
++
++ DBG(DBG_VERBOSE, "%s disabled\n", _ep->name);
++ return 0;
++}
++
++/* for the superh, these can just wrap kmalloc/kfree. gadget drivers
++ * must still pass correctly initialized endpoints, since other controller
++ * drivers may care about how it's currently set up (dma issues etc).
++ */
++
++/*
++ * superh_ep_alloc_request - allocate a request data structure
++ */
++static struct usb_request *
++superh_ep_alloc_request (struct usb_ep *_ep, int gfp_flags)
++{
++ struct superh_request *req;
++
++ /* FIXME for bulk out-dma endpoints, preallocate a frame's worth of
++ * (aligned) dma descriptors at the end of the request
++ */
++
++ req = kmalloc (sizeof *req, gfp_flags);
++ if (!req)
++ return 0;
++
++ memset (req, 0, sizeof *req);
++ INIT_LIST_HEAD (&req->queue);
++ DBG(DBG_VERY_NOISY, "superh_ep_alloc_request: %p %d\n", req, list_empty(&req->queue));
++
++ return &req->req;
++}
++
++/*
++ * superh_ep_free_request - deallocate a request data structure
++ */
++static void
++superh_ep_free_request (struct usb_ep *_ep, struct usb_request *_req)
++{
++ struct superh_request *req;
++
++ req = container_of (_req, struct superh_request, req);
++ WARN_ON (!list_empty (&req->queue));
++ kfree(req);
++}
++
++/* SH cache needs flushing with DMA I/O (it's dma-incoherent), but there's
++ * no device-affinity and the heap works perfectly well for i/o buffers.
++ * TODO: check this
++ */
++static void *
++superh_ep_alloc_buffer(struct usb_ep *_ep, unsigned bytes,
++ dma_addr_t *dma, int gfp_flags)
++{
++ char *retval;
++
++ retval = kmalloc (bytes, gfp_flags);
++ if (retval)
++ *dma = virt_to_bus (retval);
++ return retval;
++}
++
++static void
++superh_ep_free_buffer(struct usb_ep *_ep, void *buf, dma_addr_t dma,
++ unsigned bytes)
++{
++ kfree (buf);
++}
++
++static struct superh_request*
++process_ep_req(struct superh_ep *ep, struct superh_request *req)
++{
++ struct superh_udc *dev = ep->dev;
++
++ if (ep->desc == 0 /* ep0 */) {
++ switch (dev->ep0state) {
++ case EP0_IN_DATA_PHASE:
++ DBG(DBG_VERY_NOISY, "superh_ep_queue: EP0_IN_DATA_PHASE\n");
++ dev->stats.write.ops++;
++ if (write_ep0_fifo(ep, req))
++ req = 0;
++ break;
++
++ case EP0_OUT_DATA_PHASE:
++ DBG(DBG_VERY_NOISY, "superh_ep_queue: EP0_OUT_DATA_PHASE\n");
++ dev->stats.read.ops++;
++ if (read_ep0_fifo(ep, req))
++ req = 0;
++ break;
++
++ default:
++ DMSG("ep0 i/o, odd state %d\n", dev->ep0state);
++ return 0;
++ }
++#ifdef USE_DMA
++ /* either start dma or prime pio pump */
++ }
++ else if (ep->dma >= 0) {
++ kick_dma(ep, req);
++#endif
++ /* can the FIFO can satisfy the request immediately? */
++ }
++ else if ((ep->bEndpointAddress & USB_DIR_IN) != 0) {
++ if ((ep->desc->bEndpointAddress & 0x0f) == 2
++ && (ctrl_inb(USBIFR0) & EP2_TR) != 0
++ && write_fifo(ep, req)) {
++ req = 0;
++ }
++ else if ((ep->desc->bEndpointAddress & 0x0f) == 3
++ && (ctrl_inb(USBIFR1) & EP3_TR) != 0
++ && write_fifo(ep, req)) {
++ req = 0;
++ }
++ }
++
++ if (likely (((req && ep->desc) && ep->dma < 0) || ep->desc == 0))
++ pio_irq_enable(ep);
++
++ return req;
++}
++
++
++static int
++superh_ep_queue(struct usb_ep *_ep, struct usb_request *_req, int gfp_flags)
++{
++ struct superh_request *req;
++ struct superh_ep *ep;
++ struct superh_udc *dev;
++ unsigned long flags;
++
++ req = container_of(_req, struct superh_request, req);
++ ep = container_of(_ep, struct superh_ep, ep);
++
++ DBG(DBG_VERY_NOISY, "superh_ep_queue\n");
++
++ /* If we have just sent a fake configuration request then
++ * this is the reply. We don't want to send it to the host
++ * so just ignore it.
++ */
++ if (ep->desc == 0 /* ep0 */ && ep->dev->fake_config) {
++ DBG(DBG_NOISY, "Ignoring bogus SET_CONFIGURATION response\n");
++ done(ep, req, 0);
++ ep->dev->fake_config = 0;
++ return 1;
++ }
++
++ if (unlikely (!_req || !_req->complete || !_req->buf
++ || !list_empty(&req->queue))) {
++ DMSG("%s, bad params %s, %p, %p, %p, %d\n", __FUNCTION__,
++ ep->ep.name, _req, _req->complete, _req->buf,
++ list_empty(&req->queue));
++ return -EINVAL;
++ }
++
++ if (unlikely (!_ep || (!ep->desc && ep->ep.name != ep0name))) {
++ DMSG("%s, bad ep\n", __FUNCTION__);
++ return -EINVAL;
++ }
++
++ dev = ep->dev;
++ if (unlikely (!dev->driver
++ || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
++ DMSG("%s, bogus device state\n", __FUNCTION__);
++ return -ESHUTDOWN;
++ }
++
++#ifdef USE_DMA
++ /* TODO */
++ if (ep->dma >= 0) {
++ unsigned long start = (unsigned long) _req->buf;
++
++ clean_dcache_range(start, start + _req->length);
++ /* or for USB_DIR_OUT, invalidate_dcache_range (...) */
++ }
++#endif
++
++ DBG(DBG_NOISY, "%s queue req %p, len %d buf %p\n",
++ _ep->name, _req, _req->length, _req->buf);
++
++ local_irq_save(flags);
++
++ _req->status = -EINPROGRESS;
++ _req->actual = 0;
++
++ /* kickstart this i/o queue? */
++ if (list_empty(&ep->queue) && !ep->stopped && !ep->halted) {
++ req = process_ep_req(ep, req);
++ }
++
++ /* pio or dma irq handler advances the queue. */
++ if (likely (req != 0))
++ list_add_tail(&req->queue, &ep->queue);
++
++ local_irq_restore(flags);
++
++ return 0;
++}
++
++static int
++superh_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
++{
++ struct superh_ep *ep;
++ struct superh_request *req;
++ unsigned long flags;
++
++ DBG(DBG_NOISY, "superh_ep_dequeue %s\n", _ep->name);
++
++ ep = container_of(_ep, struct superh_ep, ep);
++ req = container_of(_req, struct superh_request, req);
++ if (!_ep || !_req || ep->ep.name == ep0name)
++ return -EINVAL;
++
++ local_irq_save(flags);
++#ifdef USE_DMA
++ if (ep->dma >= 0 && ep->queue.next == &req->queue && !ep->stopped) {
++ cancel_dma(ep);
++ done(ep, req, -ECONNRESET);
++ /* restart i/o */
++ if (!list_empty(&ep->queue)) {
++ req = list_entry(ep->queue.next,
++ struct superh_request, queue);
++ kick_dma(ep, req);
++ }
++ } else
++#endif
++ if (!list_empty(&req->queue))
++ done(ep, req, -ECONNRESET);
++ else
++ req = 0;
++ local_irq_restore(flags);
++
++ return req ? 0 : -EOPNOTSUPP;
++}
++
++/* stall/unstall an endpoint, 0 clears the stall, 1 sets it */
++static int
++superh_ep_set_halt(struct usb_ep *_ep, int value)
++{
++ struct superh_ep *ep;
++ unsigned long flags;
++
++ ep = container_of(_ep, struct superh_ep, ep);
++ if (unlikely (!_ep
++ || (!ep->desc && ep->ep.name != ep0name))
++ || ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
++ DMSG("%s, bad ep\n", __FUNCTION__);
++ return -EINVAL;
++ }
++
++ if (ep->halted == value)
++ return 0;
++
++ local_irq_save(flags);
++
++ if (value == 1 && (ep->bEndpointAddress & USB_DIR_IN) != 0
++ && ((ctrl_inb(USBDASTS) & ep->data_present_mask) != 0
++ || !list_empty(&ep->queue))) {
++ local_irq_restore(flags);
++ DBG(DBG_VERBOSE, "Can't %s on %s\n", value ? " halt" : "clear halt", _ep->name);
++ return -EAGAIN;
++ }
++
++ if (value) {
++ or_b(ep->stall_mask, USBEPSTL);
++ if (!ep->desc) {
++ ep->dev->ep0state = EP0_STALL;
++ }
++ /* disable ep interrupts and set a timer to clear the stall */
++ pio_irq_disable(ep);
++ mod_timer(&ep->dev->timer, jiffies + STALL_TIME);
++ }
++ else {
++ and_b(~ep->stall_mask, USBEPSTL);
++ }
++
++ ep->halted = value;
++
++ local_irq_restore(flags);
++
++ DBG(DBG_VERBOSE, "%s %s\n", _ep->name, value ? " halt" : "clear halt");
++
++ return 0;
++}
++
++static int superh_ep_fifo_status(struct usb_ep *_ep)
++{
++ struct superh_ep *ep;
++
++ DBG(DBG_NOISY, "superh_ep_fifo_status\n");
++
++ ep = container_of(_ep, struct superh_ep, ep);
++ if (!_ep) {
++ DMSG("%s, bad ep\n", __FUNCTION__);
++ return -ENODEV;
++ }
++ if ((ep->bEndpointAddress & USB_DIR_IN) != 0)
++ return -EOPNOTSUPP;
++ if (ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
++ return 0;
++ else {
++ switch (ep->desc->bEndpointAddress & 0x0f) {
++ case 0:
++ return ctrl_inb(USBEPSZ0O);
++ case 1:
++ return ctrl_inb(USBEPSZ1);
++ }
++ }
++
++ return 0;
++}
++
++static void superh_ep_fifo_flush(struct usb_ep *_ep)
++{
++ struct superh_ep *ep;
++
++ DBG(DBG_NOISY, "superh_ep_fifo_flush\n");
++
++ ep = container_of(_ep, struct superh_ep, ep);
++ if (!_ep || ep->ep.name == ep0name || !list_empty(&ep->queue)) {
++ DMSG("%s, bad ep\n", __FUNCTION__);
++ return;
++ }
++
++ or_b(ep->clear_mask, USBFCLR);
++}
++
++static struct usb_ep_ops superh_ep_ops = {
++ .enable = superh_ep_enable,
++ .disable = superh_ep_disable,
++
++ .alloc_request = superh_ep_alloc_request,
++ .free_request = superh_ep_free_request,
++
++ .alloc_buffer = superh_ep_alloc_buffer,
++ .free_buffer = superh_ep_free_buffer,
++
++ .queue = superh_ep_queue,
++ .dequeue = superh_ep_dequeue,
++
++ .set_halt = superh_ep_set_halt,
++ .fifo_status = superh_ep_fifo_status,
++ .fifo_flush = superh_ep_fifo_flush,
++};
++
++/* ---------------------------------------------------------------------------
++ * device-scoped parts of the api to the usb controller hardware
++ * ---------------------------------------------------------------------------
++ */
++
++static int superh_udc_get_frame(struct usb_gadget *_gadget)
++{
++ DBG(DBG_VERY_NOISY, "superh_udc_get_frame\n");
++
++ return -EOPNOTSUPP;
++}
++
++static const struct usb_gadget_ops superh_udc_ops = {
++ .get_frame = superh_udc_get_frame,
++ // no remote wakeup
++ // always selfpowered
++};
++
++
++/* if we're trying to save space, don't bother with this proc file */
++
++#if defined(CONFIG_PROC_FS) && !defined(CONFIG_EMBEDDED)
++# define UDC_PROC_FILE
++#endif
++
++#ifdef UDC_PROC_FILE
++
++static const char proc_node_name [] = "driver/udc";
++
++static int
++udc_proc_read(char *page, char **start, off_t off, int count,
++ int *eof, void *_dev)
++{
++ char *buf = page;
++ struct superh_udc *dev = _dev;
++ char *next = buf;
++ unsigned size = count;
++ unsigned long flags;
++ int t;
++ int i;
++
++ local_irq_save(flags);
++
++ /* basic device status */
++ t = snprintf(next, size,
++ "%s\n%s version: %s\nGadget driver: %s\nHost %s\n\n",
++ driver_desc,
++ driver_name, DRIVER_VERSION,
++ dev->driver ? dev->driver->driver.name : "(none)",
++ is_usb_connected ? "full speed" : "disconnected");
++ size -= t;
++ next += t;
++
++ /* device registers */
++ t = snprintf(next, size,
++ "ifr0 %02X, ifr1 %02X, isr0 %02X, isr1 %02X, ier0 %02X, ier1 %02X\n",
++ ctrl_inb(USBIFR0), ctrl_inb(USBIFR1),
++ ctrl_inb(USBISR0), ctrl_inb(USBISR1),
++ ctrl_inb(USBIER0), ctrl_inb(USBIER1));
++ size -= t;
++ next += t;
++
++ t = snprintf(next, size,
++ "epsz0o %02X, epsz1 %02X, dasts %02X, dmar %02X\n",
++ ctrl_inb(USBEPSZ0O), ctrl_inb(USBEPSZ1),
++ ctrl_inb(USBDASTS), ctrl_inb(USBDMA));
++ size -= t;
++ next += t;
++
++ t = snprintf(next, size,
++ "epstl %02X, xvercr %02X\n",
++ ctrl_inb(USBEPSTL), ctrl_inb(USBXVERCR));
++ size -= t;
++ next += t;
++
++ if (!is_usb_connected || !dev->driver)
++ goto done;
++
++ t = snprintf(next, size, "ep0 IN %lu/%lu, OUT %lu/%lu; irq0s %lu; irq1s %lu\n\n",
++ dev->stats.write.bytes, dev->stats.write.ops,
++ dev->stats.read.bytes, dev->stats.read.ops,
++ dev->stats.irq0s, dev->stats.irq1s);
++ size -= t;
++ next += t;
++
++ /* dump endpoint queues */
++ for (i = 0; i < 4; i++) {
++ struct superh_ep *ep = &dev->ep [i];
++ struct superh_request *req;
++ int t;
++
++ if (i != 0) {
++ const struct usb_endpoint_descriptor *d;
++
++ d = ep->desc;
++ if (!d)
++ continue;
++ t = snprintf(next, size,
++ "%s max %d %s\n",
++ ep->ep.name, le16_to_cpu (d->wMaxPacketSize),
++ (ep->dma >= 0) ? "dma" : "pio");
++
++ } else /* ep0 should only have one transfer queued */
++ t = snprintf(next, size, "ep0 max 8 pio\n");
++ if (t <= 0 || t > size)
++ goto done;
++ size -= t;
++ next += t;
++
++ if (list_empty(&ep->queue)) {
++ t = snprintf(next, size, "\t(nothing queued)\n");
++ if (t <= 0 || t > size)
++ goto done;
++ size -= t;
++ next += t;
++ continue;
++ }
++ list_for_each_entry(req, &ep->queue, queue) {
++#ifdef USE_DMA
++ if (ep->dma >= 0 && req->queue.prev == &ep->queue)
++ t = snprintf(next, size,
++ "\treq %p len %d/%d "
++ "buf %p (dma%d dcmd %08x)\n",
++ &req->req, req->req.actual,
++ req->req.length, req->req.buf,
++ ep->dma, DCMD(ep->dma)
++ // low 13 bits == bytes-to-go
++ );
++ else
++#endif
++ t = snprintf(next, size,
++ "\treq %p len %d/%d buf %p\n",
++ &req->req, req->req.actual,
++ req->req.length, req->req.buf);
++ if (t <= 0 || t > size)
++ goto done;
++ size -= t;
++ next += t;
++ }
++ }
++
++done:
++ local_irq_restore(flags);
++ return count - size;
++}
++
++#endif /* UDC_PROC_FILE */
++
++/*-------------------------------------------------------------------------*/
++
++/*
++ * udc_disable - disable USB device controller
++ */
++static void udc_disable(struct superh_udc *dev)
++{
++ /* block all irqs */
++ ctrl_outb( 0, USBIER0);
++ ctrl_outb( 0, USBIER1);
++
++ /* Disable the USB module */
++ or_b(0x80, STBCR3);
++
++ /* Disable the USB clock */
++ ctrl_outw(0xA500, UCLKCR);
++
++ ep0_idle (dev);
++ dev->gadget.speed = USB_SPEED_UNKNOWN;
++}
++
++/*
++ * udc_reinit - initialize software state
++ */
++static void udc_reinit(struct superh_udc *dev)
++{
++ u32 i;
++
++ /* device/ep0 records init */
++ INIT_LIST_HEAD (&dev->gadget.ep_list);
++ dev->gadget.ep0 = &dev->ep[0].ep;
++ INIT_LIST_HEAD (&dev->gadget.ep0->ep_list);
++ dev->ep0state = EP0_IDLE;
++
++ /* basic endpoint records init */
++ for (i = 0; i < 4; i++) {
++ struct superh_ep *ep = &dev->ep[i];
++
++ ep->ep.name = ep_name[i];
++ ep->ep.ops = &superh_ep_ops;
++ if (i != 0)
++ list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list);
++
++ ep->dev = dev;
++ ep->desc = 0;
++ ep->stopped = 0;
++ ep->halted = 0;
++ ep->dma = -1;
++ INIT_LIST_HEAD (&ep->queue);
++
++ /* address may need USB_DIR_IN, attributes likely wrong */
++ ep->bEndpointAddress = i;
++ ep->bmAttributes = USB_ENDPOINT_XFER_BULK;
++ }
++
++ /* TODO at least from here on, static initialization
++ * would work just as well and would need less code space
++ */
++
++ /* ep0 == control */
++ dev->ep[ 0].ep.maxpacket = EP0_FIFO_SIZE;
++ dev->ep[ 0].data_present_mask = EP0i_DE;
++ dev->ep[ 0].stall_mask = EP0_STL;
++ dev->ep[ 0].interrupt_mask = EP0o_TS | EP0i_TR | EP0i_TS;
++ dev->ep[ 0].interrupt_reg = USBIER0;
++ dev->ep[ 0].clear_mask = EP0i_CLEAR | EP0o_CLEAR;
++ dev->ep[ 0].fifo_reg = 0;
++ dev->ep[ 0].packet_enable_mask = 0;
++
++ dev->ep[ 1].ep.maxpacket = BULK_FIFO_SIZE;
++ dev->ep[ 1].bEndpointAddress |= USB_DIR_OUT;
++ dev->ep[ 1].data_present_mask = 0x00;
++ dev->ep[ 1].stall_mask = EP1_STL;
++ dev->ep[ 1].interrupt_mask = EP1_FULL;
++ dev->ep[ 1].interrupt_reg = USBIER0;
++ dev->ep[ 1].clear_mask = EP1_CLEAR;
++ dev->ep[ 1].fifo_reg = 0;
++ dev->ep[ 1].packet_enable_mask = 0;
++
++ dev->ep[ 2].ep.maxpacket = BULK_FIFO_SIZE;
++ dev->ep[ 2].bEndpointAddress |= USB_DIR_IN;
++ dev->ep[ 2].data_present_mask = EP2_DE;
++ dev->ep[ 2].stall_mask = EP2_STL;
++ dev->ep[ 2].interrupt_mask = EP2_TR | EP2_EMPTY;
++ dev->ep[ 2].interrupt_reg = USBIER0;
++ dev->ep[ 2].clear_mask = EP2_CLEAR;
++ dev->ep[ 2].fifo_reg = USBEPDR2;
++ dev->ep[ 2].packet_enable_mask = EP2_PKTE;
++
++ dev->ep[ 3].ep.maxpacket = INT_FIFO_SIZE;
++ dev->ep[ 3].bEndpointAddress |= USB_DIR_IN;
++ dev->ep[ 3].data_present_mask = EP3_DE;
++ dev->ep[ 3].stall_mask = EP3_STL;
++ dev->ep[ 3].interrupt_mask = EP3_TR | EP3_TS;
++ dev->ep[ 3].interrupt_reg = USBIER1;
++ dev->ep[ 3].clear_mask = EP3_CLEAR;
++ dev->ep[ 3].fifo_reg = USBEPDR3;
++ dev->ep[ 3].packet_enable_mask = EP3_PKTE;
++}
++
++/* until it's enabled, this UDC should be completely invisible
++ * to any USB host.
++ */
++static void udc_enable (struct superh_udc *dev)
++{
++#if defined(CONFIG_CPU_SUBTYPE_SH7727)
++ // Reset and then Select Function USB1_pwr_en out (USB) c.f. Section 26, Table 26.1 PTE2
++ and_w(PN_PB2_MSK, PECR);
++ or_w(PN_PB2_OF, PECR);
++
++ // Reset and then Select Function UCLK c.f. Section 26, Table 26.1, PTD6
++ and_w(PN_PB6_MSK, PDCR);
++ or_w(PN_PB6_OF, PDCR);
++
++ // Stop USB module prior to setting clocks c.f. Section 9.2.3
++ and_b(~MSTP14, STBCR3);
++ or_b(MSTP14, STBCR3);
++
++ // Select external clock, 1/1 divisor c.f. Section 11.3.1
++ or_b(USBDIV_11|USBCKS_EC, EXCPGCR);
++
++ // Start USB c.f. Section 9.2.3
++ and_b(~MSTP14, STBCR3);
++
++ // Disable pullup c.f. Section 23.5.19
++ or_b(PULLUP_E, USBDMA);
++ //and_b(~PULLUP_E, USBDMA);
++
++ // Set port 1 to function, disabled c.f. Section 22.2.1
++ or_w(USB_TRANS_TRAN | USB_SEL_FUNC, EXPFC);
++
++ // Enable pullup c.f. Section 23.5.19a
++ and_b(~PULLUP_E, USBDMA);
++ //or_b(PULLUP_E, USBDMA);
++#elif defined(CONFIG_CPU_SUBTYPE_SH7705)
++ /* Disable the USB module */
++ or_b(0x80, STBCR3);
++
++ /* Set the clock to external & enable */
++ ctrl_outw(0xA5E0, UCLKCR);
++
++ /* Enable the USB module */
++ and_b(0x7f, STBCR3);
++
++ /* Enable USB pins. */
++ ctrl_outw(0x01FD, PMCR); /* VBUS */
++ or_b(PULLUP_E, PMDR);
++#endif
++ dev->gadget.speed = USB_SPEED_UNKNOWN;
++ dev->stats.irqs = 0;
++ dev->stats.irq0s = 0;
++ dev->stats.irq1s = 0;
++
++ // reset fifo's and stall's
++ or_b( EP3_CLEAR | EP1_CLEAR | EP2_CLEAR | EP0o_CLEAR | EP0i_CLEAR, USBFCLR);
++ or_b(0, USBEPSTL);
++
++ /* Setup interrupt priority by using the interrupt select registers */
++ ctrl_outb(F0_LOW, USBISR0);
++ ctrl_outb(F1_LOW, USBISR1);
++
++ /* Enable some interrupts */
++ or_b( BRST | SETUP_TS | EP0o_TS | EP0i_TR | EP0i_TS, USBIER0);
++ or_b( VBUSF, USBIER1);
++}
++
++/* when a driver is successfully registered, it will receive
++ * control requests including set_configuration(), which enables
++ * non-control requests. then usb traffic follows until a
++ * disconnect is reported. then a host may connect again, or
++ * the driver might get unbound.
++ */
++int usb_gadget_register_driver(struct usb_gadget_driver *driver)
++{
++ struct superh_udc *dev = the_controller;
++ int retval;
++
++ if (!driver
++ /*|| driver->speed != USB_SPEED_FULL
++ || !driver->bind
++ || !driver->unbind
++ || !driver->disconnect
++ || !driver->setup*/)
++ return -EINVAL;
++ if (!dev)
++ return -ENODEV;
++ if (dev->driver)
++ return -EBUSY;
++
++ /* first hook up the driver ... */
++ dev->driver = driver;
++
++ retval = driver->bind(&dev->gadget);
++ if (retval) {
++ DMSG("bind to driver %s --> error %d\n",
++ driver->driver.name, retval);
++ dev->driver = 0;
++ return retval;
++ }
++
++ /* ... then enable host detection and ep0; and we're ready
++ * for set_configuration as well as eventual disconnect.
++ * NOTE: this shouldn't power up until later.
++ */
++ udc_enable(dev);
++
++ DMSG("registered gadget driver '%s'\n", driver->driver.name);
++ dump_state(dev);
++ return 0;
++}
++
++EXPORT_SYMBOL(usb_gadget_register_driver);
++
++static void
++stop_activity(struct superh_udc *dev, struct usb_gadget_driver *driver)
++{
++ int i;
++
++ /* don't disconnect drivers more than once */
++ if (dev->gadget.speed == USB_SPEED_UNKNOWN)
++ driver = 0;
++ dev->gadget.speed = USB_SPEED_UNKNOWN;
++
++ /* prevent new request submissions, kill any outstanding requests */
++ for (i = 0; i < 4; i++) {
++ struct superh_ep *ep = &dev->ep[i];
++
++ ep->stopped = 1;
++ nuke(ep, -ESHUTDOWN);
++ }
++
++ del_timer_sync(&dev->timer);
++
++ /* report disconnect; the driver is already quiesced */
++ if (driver)
++ driver->disconnect(&dev->gadget);
++
++ /* re-init driver-visible data structures */
++ udc_reinit(dev);
++}
++
++int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
++{
++ struct superh_udc *dev = the_controller;
++
++ if (!dev)
++ return -ENODEV;
++ if (!driver || driver != dev->driver)
++ return -EINVAL;
++
++ local_irq_disable();
++ udc_disable(dev);
++ stop_activity(dev, driver);
++ driver->unbind(&dev->gadget);
++ dev->driver = 0;
++ local_irq_enable();
++
++ DMSG("unregistered gadget driver '%s'\n", driver->driver.name);
++ dump_state(dev);
++ return 0;
++}
++
++EXPORT_SYMBOL(usb_gadget_unregister_driver);
++
++
++/*-------------------------------------------------------------------------*/
++
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,40)
++MODULE_DESCRIPTION(driver_desc);
++#endif
++MODULE_AUTHOR("Julian Back");
++MODULE_LICENSE("GPL");
++
++/*
++ * cleanup - free resources allocated during init
++ */
++static void /*__exit and */ __init cleanup(void)
++{
++ struct superh_udc *dev = the_controller;
++
++ if (!dev)
++ return;
++
++ udc_disable(dev);
++#ifdef UDC_PROC_FILE
++ remove_proc_entry(proc_node_name, NULL);
++#endif
++ usb_gadget_unregister_driver(dev->driver);
++
++ if (dev->got_irq0) {
++ free_irq(USBF0_IRQ, dev);
++ dev->got_irq0 = 0;
++ }
++
++ if (dev->got_irq1) {
++ free_irq(USBF1_IRQ, dev);
++ dev->got_irq1 = 0;
++ }
++
++ the_controller = 0;
++}
++module_exit (cleanup);
++
++/*
++ * init - allocate resources
++ */
++static int __init init(void)
++{
++ static struct superh_udc memory;
++
++ struct superh_udc *dev;
++ int retval;
++
++ printk(KERN_DEBUG "%s: version %s\n", driver_name, DRIVER_VERSION);
++
++ /* initialize data */
++ dev = &memory;
++
++ memset(dev, 0, sizeof *dev);
++ dev->gadget.ops = &superh_udc_ops;
++ dev->gadget.name = driver_name;
++ dev->gadget.dev.bus_id = "udc";
++ dev->gadget.speed = USB_SPEED_UNKNOWN;
++
++ dev->vbusmn = 0;
++
++ atomic_set(&dev->in_interrupt, 0);
++
++ the_controller = dev;
++ udc_disable(dev);
++ udc_reinit(dev);
++
++ /* irq setup after old hardware state is cleaned up */
++ retval = request_irq(USBF0_IRQ, superh_udc_irq_f0,
++ 0/*SA_INTERRUPT | SA_SAMPLE_RANDOM*/,
++ driver_name, dev);
++ if (retval != 0) {
++ printk(KERN_ERR "%s: can't get irq %i, err %d\n",
++ driver_name, USBF0_IRQ, retval);
++ goto failed;
++ }
++ dev->got_irq0 = 1;
++
++ retval = request_irq(USBF1_IRQ, superh_udc_irq_f1,
++ 0/*SA_INTERRUPT | SA_SAMPLE_RANDOM*/,
++ driver_name, dev);
++ if (retval != 0) {
++ printk(KERN_ERR "%s: can't get irq %i, err %d\n",
++ driver_name, USBF1_IRQ, retval);
++ goto failed;
++ }
++ dev->got_irq1 = 1;
++
++ printk(KERN_INFO "%s, IRQs %d %d\n", driver_desc,
++ USBF0_IRQ, USBF1_IRQ);
++ dump_state(dev);
++
++ dev->setup_countdown = DEFAULT_SETUP_COUNT;
++
++#ifdef UDC_PROC_FILE
++ create_proc_read_entry(proc_node_name, 0, NULL, udc_proc_read, dev);
++#endif
++
++ return 0;
++
++failed:
++ cleanup();
++ return retval;
++}
++module_init (init);
+diff -x '*~' -x '.*' -r -N -u /tmp/kernel/drivers/usb/gadget/superh_udc.h kernel/drivers/usb/gadget/superh_udc.h
+--- /tmp/kernel/drivers/usb/gadget/superh_udc.h 1970-01-01 01:00:00.000000000 +0100
++++ kernel/drivers/usb/gadget/superh_udc.h 2005-04-22 17:53:19.513526654 +0200
+@@ -0,0 +1,363 @@
++/*
++ * Renesas SuperH USB 1.1 device controller (found on SH7705, SH7727...)
++ *
++ * Copyright (C) 2003 Renesas Technology Europe Limited
++ * Copyright (C) 2003 Julian Back (jback@mpc-data.co.uk), MPC Data Limited
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#ifndef __LINUX_USB_GADGET_SUPERH_UDC_H
++#define __LINUX_USB_GADGET_SUPERH_UDC_H
++
++#include <linux/types.h>
++
++struct superh_udc;
++
++struct superh_ep {
++ struct usb_ep ep;
++ struct superh_udc *dev;
++
++ const struct usb_endpoint_descriptor *desc;
++ struct list_head queue;
++ int dma;
++
++ u8 bEndpointAddress;
++ u8 bmAttributes;
++
++ unsigned stopped : 1;
++ unsigned halted : 1;
++
++ u8 data_present_mask;
++ u8 stall_mask;
++ u8 interrupt_mask;
++ u8 clear_mask;
++ u8 packet_enable_mask;
++ unsigned interrupt_reg;
++ unsigned fifo_reg;
++};
++
++struct superh_request {
++ struct usb_request req;
++ struct list_head queue;
++};
++
++enum ep0_state {
++ EP0_IDLE,
++ EP0_IN_DATA_PHASE,
++ EP0_OUT_DATA_PHASE,
++ EP0_END_XFER,
++ EP0_STALL,
++};
++
++#define EP0_FIFO_SIZE ((unsigned)8)
++#define BULK_FIFO_SIZE ((unsigned)64)
++#define ISO_FIFO_SIZE ((unsigned)0)
++#define INT_FIFO_SIZE ((unsigned)8)
++
++struct udc_stats {
++ struct ep0stats {
++ unsigned long ops;
++ unsigned long bytes;
++ } read, write;
++ unsigned long irqs;
++ unsigned long irq0s;
++ unsigned long irq1s;
++};
++
++struct superh_udc {
++ struct usb_gadget gadget;
++ struct usb_gadget_driver *driver;
++ atomic_t in_interrupt;
++
++ enum ep0_state ep0state;
++ struct udc_stats stats;
++ unsigned int vbusmn;
++ unsigned long vbusf_time;
++ unsigned got_irq0 : 1,
++ got_irq1 : 1,
++ fake_config: 1;
++ int setup_countdown;
++ unsigned long reset_time;
++ struct timer_list timer;
++ struct superh_ep ep [4];
++};
++
++/* 2.5 changes ... */
++
++#ifndef container_of
++#define container_of list_entry
++#endif
++
++#ifndef WARN_ON
++#define WARN_ON BUG_ON
++#endif
++
++/* one I/O pin should be used to detect disconnect */
++#define is_usb_connected ((ctrl_inb(USBIFR1) & VBUSF) != 0)
++
++/* Register addresses - should really be in include/asm-sh */
++
++#ifdef CONFIG_CPU_SUBTYPE_SH7705
++
++#define USBEPDR0I 0xA4480000
++#define USBEPDR0O 0xA4480004
++#define USBEPDR0S 0xA4480008
++#define USBEPDR1 0xA448000C
++#define USBEPDR2 0xA4480010
++#define USBEPDR3 0xA4480014
++#define USBIFR0 0xA4480018
++#define USBIFR1 0xA448001C
++#define USBTRG 0xA4480020
++#define USBFCLR 0xA4480024
++#define USBEPSZ0O 0xA4480028
++#define USBDASTS 0xA448002C
++#define USBEPSTL 0xA4480030
++#define USBIER0 0xA4480034
++#define USBIER1 0xA4480038
++#define USBEPSZ1 0xA448003C
++#define USBDMA 0xA4480040
++#define USBISR0 0xA4480044
++#define USBISR1 0xA4480048
++
++#define USBXVERCR 0xA4480060
++
++#define STBCR3 0xA40A0000
++#define UCLKCR 0xA40A0008
++
++#define PMCR 0xA4000118
++#define PNCR 0xA400011A
++#define PNCR2 0xA405015A
++
++#define PMDR 0xA4000138
++
++#endif
++
++/*
++ * Standby Control Register (STBCR3) c.f. 9.2.3
++ */
++
++#define MSTP14 0x10
++
++/*
++ * EXCPG Control Register (EXCPGCR) c.f. Section 11.3.1
++ */
++
++#define USBDIVS_EL0 0x00
++#define USBDIVS_EL1 0x01
++#define USBDIVS_EL2 0x02
++
++#define USBCKS_EL1 0x04
++#define USBCKS_EL2 0x10
++#define USBCKS_EL3 0x20
++
++#define USBDIV_11 0x00
++#define USBDIV_12 0x01
++#define USBDIV_13 0x02
++
++#define USBCKS_PC 0x00
++#define USBCKS_IC 0x20
++#define USBCKS_BC 0x24
++#define USBCKS_EC 0x30
++
++
++/*
++ * Extra Pin Function Controller (EXPFC) c.f. Section 22.2.1
++ */
++
++#define USB_TRANS_TRAN 0x00
++#define USB_TRANS_DIG 0x02
++
++#define USB_SEL_HOST 0x00
++#define USB_SEL_FUNC 0x01
++
++
++/*
++ * USBDMA Setting Register (USBDMAR) c.f. Section 23.5.19
++ */
++
++#define EP1_DMAE 0x01
++#define EP2_DMAE 0x02
++
++#if defined(CONFIG_CPU_SUBTYPE_SH7727)
++#define PULLUP_E 0x04
++#endif
++
++#if defined(CONFIG_SH_EDOSK7705)
++#define PULLUP_E 0x01
++#endif
++
++/*
++ * USB Interrupt Flag Register 0 (USBIFR0) c.f. Section 23.5.7
++ */
++
++#define BRST 0x80
++#define EP1_FULL 0x40
++#define EP2_TR 0x20
++#define EP2_EMPTY 0x10
++#define SETUP_TS 0x08
++#define EP0o_TS 0x04
++#define EP0i_TR 0x02
++#define EP0i_TS 0x01
++
++
++/*
++ * USB Interrupt Flag Register 1 (USBIFR1) c.f. Section 23.5.8
++ */
++
++#define VBUSMN 0x08
++#define EP3_TR 0x04
++#define EP3_TS 0x02
++#define VBUSF 0x01
++
++/*
++ * USB Trigger Register (USBTRG) c.f. Section 23.5.9
++ */
++
++#define EP0i_PKTE 0x01
++#define EP0o_PKTE 0x02
++#define EP0o_RDFN 0x02
++#define EP0s_PKTE 0x04
++#define EP0s_RDFN 0x04
++
++#define EP2_PKTE 0x10
++#define EP1_PKTE 0x20
++#define EP1_RDFN 0x20
++#define EP3_PKTE 0x40
++
++
++/*
++ * USBFIFO Clear Register (USBFCLR) c.f. Section 23.5.10
++ */
++
++#define EP3_CLEAR 0x40
++#define EP1_CLEAR 0x20
++#define EP2_CLEAR 0x10
++#define EP0o_CLEAR 0x02
++#define EP0i_CLEAR 0x01
++
++
++/*
++ * USBEPSTL Endpoint Stall Register
++ */
++#define EP3_STL 0x08
++#define EP2_STL 0x04
++#define EP1_STL 0x02
++#define EP0_STL 0x01
++
++/*
++ * USBDASTS Data Status Register
++ */
++#define EP3_DE 0x20
++#define EP2_DE 0x10
++#define EP0i_DE 0x01
++
++/*
++ * Port Control Registers (PNCR) c.f. Section 26.2
++ */
++#define PN_PB0_OF 0x0000
++#define PN_PB0_PO 0x0001
++#define PN_PB0_PI_ON 0x0002
++#define PN_PB0_PI_OFF 0x0003
++#define PN_PB0_MSK ~0x0003
++
++#define PN_PB1_OF 0x0000
++#define PN_PB1_PO 0x0004
++#define PN_PB1_PI_ON 0x0008
++#define PN_PB1_PI_OFF 0x000c
++#define PN_PB1_MSK ~0x000c
++
++#define PN_PB2_OF 0x0000
++#define PN_PB2_PO 0x0010
++#define PN_PB2_PI_ON 0x0020
++#define PN_PB2_PI_OFF 0x0030
++#define PN_PB2_MSK ~0x0030
++
++#define PN_PB3_OF 0x0000
++#define PN_PB3_PO 0x0040
++#define PN_PB3_PI_ON 0x0080
++#define PN_PB3_PI_OFF 0x00c0
++#define PN_PB3_MSK ~0x00c0
++
++#define PN_PB4_OF 0x0000
++#define PN_PB4_PO 0x0100
++#define PN_PB4_PI_ON 0x0200
++#define PN_PB4_PI_OFF 0x0300
++#define PN_PB4_MSK ~0x0300
++
++#define PN_PB5_OF 0x0000
++#define PN_PB5_PO 0x0400
++#define PN_PB5_PI_ON 0x0800
++#define PN_PB5_PI_OFF 0x0c00
++#define PN_PB5_MSK ~0x0c00
++
++#define PN_PB6_OF 0x0000
++#define PN_PB6_PO 0x1000
++#define PN_PB6_PI_ON 0x2000
++#define PN_PB6_PI_OFF 0x3000
++#define PN_PB6_MSK ~0x3000
++
++#define PN_PB7_OF 0x0000
++#define PN_PB7_PO 0x4000
++#define PN_PB7_PI_ON 0x8000
++#define PN_PB7_PI_OFF 0xc000
++#define PN_PB7_MSK ~0xc000
++
++/*
++ * Debugging support vanishes in non-debug builds. DBG_NORMAL should be
++ * mostly silent during normal use/testing, with no timing side-effects.
++ */
++#define DBG_NORMAL 1 /* error paths, device state transitions */
++#define DBG_VERBOSE 2 /* add some success path trace info */
++#define DBG_NOISY 3 /* ... even more: request level */
++#define DBG_VERY_NOISY 4 /* ... even more: packet level */
++
++#ifdef DEBUG
++
++#define DMSG(stuff...) printk(KERN_DEBUG "udc: " stuff)
++
++#if defined(VERY_NOISY)
++# define UDC_DEBUG DBG_VERY_NOISY
++#elif defined(NOISY)
++# define UDC_DEBUG DBG_NOISY
++#elif defined(VERBOSE)
++# define UDC_DEBUG DBG_VERBOSE
++#else
++# define UDC_DEBUG DBG_NORMAL
++#endif
++
++static void __attribute__ ((__unused__))
++dump_state(struct superh_udc *dev)
++{
++ if (!is_usb_connected)
++ return;
++}
++
++
++#else
++
++#define DMSG(stuff...) do{}while(0)
++
++#define UDC_DEBUG ((unsigned)0)
++
++#define dump_state(x) do{}while(0)
++
++#endif
++
++#define DBG(lvl, stuff...) do{if ((lvl) <= UDC_DEBUG) DMSG(stuff);}while(0)
++
++#define WARN(stuff...) printk(KERN_WARNING "udc: " stuff)
++
++#endif /* __LINUX_USB_GADGET_SUPERH_UDC_H */
+diff -x '*~' -x '.*' -r -N -u /tmp/kernel/drivers/usb/gadget/usbstring.c kernel/drivers/usb/gadget/usbstring.c
+--- /tmp/kernel/drivers/usb/gadget/usbstring.c 1970-01-01 01:00:00.000000000 +0100
++++ kernel/drivers/usb/gadget/usbstring.c 2005-04-22 17:53:19.516526166 +0200
+@@ -0,0 +1,136 @@
++/*
++ * Copyright (C) 2003 David Brownell
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License as published
++ * by the Free Software Foundation; either version 2.1 of the License, or
++ * (at your option) any later version.
++ */
++
++#include <linux/errno.h>
++#include <linux/kernel.h>
++#include <linux/list.h>
++#include <linux/string.h>
++#include <linux/init.h>
++
++#include <linux/usb_ch9.h>
++#include <linux/usb_gadget.h>
++
++#include <asm/byteorder.h>
++#include <asm/unaligned.h>
++
++
++static int utf8_to_utf16le(const char *s, u16 *cp, unsigned len)
++{
++ int count = 0;
++ u8 c;
++ u16 uchar;
++
++ /* this insists on correct encodings, though not minimal ones.
++ * BUT it currently rejects legit 4-byte UTF-8 code points,
++ * which need surrogate pairs. (Unicode 3.1 can use them.)
++ */
++ while (len != 0 && (c = (u8) *s++) != 0) {
++ if (unlikely(c & 0x80)) {
++ // 2-byte sequence:
++ // 00000yyyyyxxxxxx = 110yyyyy 10xxxxxx
++ if ((c & 0xe0) == 0xc0) {
++ uchar = (c & 0x1f) << 6;
++
++ c = (u8) *s++;
++ if ((c & 0xc0) != 0xc0)
++ goto fail;
++ c &= 0x3f;
++ uchar |= c;
++
++ // 3-byte sequence (most CJKV characters):
++ // zzzzyyyyyyxxxxxx = 1110zzzz 10yyyyyy 10xxxxxx
++ } else if ((c & 0xf0) == 0xe0) {
++ uchar = (c & 0x0f) << 12;
++
++ c = (u8) *s++;
++ if ((c & 0xc0) != 0xc0)
++ goto fail;
++ c &= 0x3f;
++ uchar |= c << 6;
++
++ c = (u8) *s++;
++ if ((c & 0xc0) != 0xc0)
++ goto fail;
++ c &= 0x3f;
++ uchar |= c;
++
++ /* no bogus surrogates */
++ if (0xd800 <= uchar && uchar <= 0xdfff)
++ goto fail;
++
++ // 4-byte sequence (surrogate pairs, currently rare):
++ // 11101110wwwwzzzzyy + 110111yyyyxxxxxx
++ // = 11110uuu 10uuzzzz 10yyyyyy 10xxxxxx
++ // (uuuuu = wwww + 1)
++ // FIXME accept the surrogate code points (only)
++
++ } else
++ goto fail;
++ } else
++ uchar = c;
++ put_unaligned (cpu_to_le16 (uchar), cp++);
++ count++;
++ len--;
++ }
++ return count;
++fail:
++ return -1;
++}
++
++
++/**
++ * usb_gadget_get_string - fill out a string descriptor
++ * @table: of c strings encoded using UTF-8
++ * @id: string id, from low byte of wValue in get string descriptor
++ * @buf: at least 256 bytes
++ *
++ * Finds the UTF-8 string matching the ID, and converts it into a
++ * string descriptor in utf16-le.
++ * Returns length of descriptor (always even) or negative errno
++ *
++ * If your driver needs stings in multiple languages, you'll probably
++ * "switch (wIndex) { ... }" in your ep0 string descriptor logic,
++ * using this routine after choosing which set of UTF-8 strings to use.
++ * Note that US-ASCII is a strict subset of UTF-8; any string bytes with
++ * the eighth bit set will be multibyte UTF-8 characters, not ISO-8859/1
++ * characters (which are also widely used in C strings).
++ */
++int
++usb_gadget_get_string (struct usb_gadget_strings *table, int id, u8 *buf)
++{
++ struct usb_string *s;
++ int len;
++
++ /* descriptor 0 has the language id */
++ if (id == 0) {
++ buf [0] = 4;
++ buf [1] = USB_DT_STRING;
++ buf [2] = (u8) table->language;
++ buf [3] = (u8) (table->language >> 8);
++ return 4;
++ }
++ for (s = table->strings; s && s->s; s++)
++ if (s->id == id)
++ break;
++
++ /* unrecognized: stall. */
++ if (!s || !s->s)
++ return -EINVAL;
++
++ /* string descriptors have length, tag, then UTF16-LE text */
++ len = min ((size_t) 126, strlen (s->s));
++ memset (buf + 2, 0, 2 * len); /* zero all the bytes */
++ len = utf8_to_utf16le(s->s, (u16 *)&buf[2], len);
++ if (len < 0)
++ return -EINVAL;
++ buf [0] = (len + 1) * 2;
++ buf [1] = USB_DT_STRING;
++ return buf [0];
++}
++
+diff -x '*~' -x '.*' -r -N -u /tmp/kernel/drivers/usb/gadget/zero.c kernel/drivers/usb/gadget/zero.c
+--- /tmp/kernel/drivers/usb/gadget/zero.c 1970-01-01 01:00:00.000000000 +0100
++++ kernel/drivers/usb/gadget/zero.c 2005-04-22 17:53:19.521525352 +0200
+@@ -0,0 +1,1363 @@
++/*
++ * zero.c -- Gadget Zero, for USB development
++ *
++ * Copyright (C) 2003-2004 David Brownell
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ * notice, this list of conditions, and the following disclaimer,
++ * without modification.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * 3. The names of the above-listed copyright holders may not be used
++ * to endorse or promote products derived from this software without
++ * specific prior written permission.
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
++ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
++ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
++ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
++ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
++ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
++ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
++ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
++ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
++ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/*
++ * Gadget Zero only needs two bulk endpoints, and is an example of how you
++ * can write a hardware-agnostic gadget driver running inside a USB device.
++ *
++ * Hardware details are visible (see CONFIG_USB_ZERO_* below) but don't
++ * affect most of the driver.
++ *
++ * Use it with the Linux host/master side "usbtest" driver to get a basic
++ * functional test of your device-side usb stack, or with "usb-skeleton".
++ *
++ * It supports two similar configurations. One sinks whatever the usb host
++ * writes, and in return sources zeroes. The other loops whatever the host
++ * writes back, so the host can read it. Module options include:
++ *
++ * buflen=N default N=4096, buffer size used
++ * qlen=N default N=32, how many buffers in the loopback queue
++ * loopdefault default false, list loopback config first
++ *
++ * Many drivers will only have one configuration, letting them be much
++ * simpler if they also don't support high speed operation (like this
++ * driver does).
++ */
++
++#define DEBUG 1
++// #define VERBOSE
++
++#include <linux/config.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/delay.h>
++#include <linux/ioport.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/smp_lock.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/timer.h>
++#include <linux/list.h>
++#include <linux/interrupt.h>
++#include <linux/uts.h>
++#include <linux/version.h>
++
++#include <asm/byteorder.h>
++#include <asm/io.h>
++#include <asm/irq.h>
++#include <asm/system.h>
++#include <asm/unaligned.h>
++
++#include <linux/usb_ch9.h>
++#include <linux/usb_gadget.h>
++
++#include "gadget_chips.h"
++
++
++/*-------------------------------------------------------------------------*/
++
++#define DRIVER_VERSION "St Patrick's Day 2004"
++
++static const char shortname [] = "zero";
++static const char longname [] = "Gadget Zero";
++
++static const char source_sink [] = "source and sink data";
++static const char loopback [] = "loop input to output";
++
++/*-------------------------------------------------------------------------*/
++
++/*
++ * driver assumes self-powered hardware, and
++ * has no way for users to trigger remote wakeup.
++ *
++ * this version autoconfigures as much as possible,
++ * which is reasonable for most "bulk-only" drivers.
++ */
++static const char *EP_IN_NAME; /* source */
++static const char *EP_OUT_NAME; /* sink */
++
++/*-------------------------------------------------------------------------*/
++
++/* big enough to hold our biggest descriptor */
++#define USB_BUFSIZ 256
++
++struct zero_dev {
++ spinlock_t lock;
++ struct usb_gadget *gadget;
++ struct usb_request *req; /* for control responses */
++
++ /* when configured, we have one of two configs:
++ * - source data (in to host) and sink it (out from host)
++ * - or loop it back (out from host back in to host)
++ */
++ u8 config;
++ struct usb_ep *in_ep, *out_ep;
++
++ /* autoresume timer */
++ struct timer_list resume;
++};
++
++#define xprintk(d,level,fmt,args...) \
++ printk(level "%s %s: " fmt , shortname , (d)->gadget->dev.bus_id , \
++ ## args)
++
++#ifdef DEBUG
++#define DBG(dev,fmt,args...) \
++ xprintk(dev , KERN_DEBUG , fmt , ## args)
++#else
++#define DBG(dev,fmt,args...) \
++ do { } while (0)
++#endif /* DEBUG */
++
++#ifdef VERBOSE
++#define VDBG DBG
++#else
++#define VDBG(dev,fmt,args...) \
++ do { } while (0)
++#endif /* VERBOSE */
++
++#define ERROR(dev,fmt,args...) \
++ xprintk(dev , KERN_ERR , fmt , ## args)
++#define WARN(dev,fmt,args...) \
++ xprintk(dev , KERN_WARNING , fmt , ## args)
++#define INFO(dev,fmt,args...) \
++ xprintk(dev , KERN_INFO , fmt , ## args)
++
++/*-------------------------------------------------------------------------*/
++
++static unsigned buflen = 4096;
++static unsigned qlen = 32;
++static unsigned pattern = 0;
++
++/*
++ * Normally the "loopback" configuration is second (index 1) so
++ * it's not the default. Here's where to change that order, to
++ * work better with hosts where config changes are problematic.
++ * Or controllers (like superh) that only support one config.
++ */
++static int loopdefault = 0;
++
++
++MODULE_PARM (buflen, "i");
++MODULE_PARM_DESC (buflen, "size of i/o buffers");
++
++MODULE_PARM (qlen, "i");
++MODULE_PARM_DESC (qlen, "depth of loopback buffering");
++
++MODULE_PARM (pattern, "i");
++MODULE_PARM_DESC (pattern, "0 for default all-zeroes, 1 for mod63");
++
++MODULE_PARM (loopdefault, "b");
++MODULE_PARM_DESC (loopdefault, "true to have default config be loopback");
++
++/*
++ * if it's nonzero, autoresume says how many seconds to wait
++ * before trying to wake up the host after suspend.
++ */
++static unsigned autoresume = 0;
++MODULE_PARM (autoresume, "i");
++
++/*-------------------------------------------------------------------------*/
++
++/* Thanks to NetChip Technologies for donating this product ID.
++ *
++ * DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!!
++ * Instead: allocate your own, using normal USB-IF procedures.
++ */
++#ifndef CONFIG_USB_ZERO_HNPTEST
++#define DRIVER_VENDOR_NUM 0x0525 /* NetChip */
++#define DRIVER_PRODUCT_NUM 0xa4a0 /* Linux-USB "Gadget Zero" */
++#else
++#define DRIVER_VENDOR_NUM 0x1a0a /* OTG test device IDs */
++#define DRIVER_PRODUCT_NUM 0xbadd
++#endif
++
++/*-------------------------------------------------------------------------*/
++
++/*
++ * DESCRIPTORS ... most are static, but strings and (full)
++ * configuration descriptors are built on demand.
++ */
++
++#define STRING_MANUFACTURER 25
++#define STRING_PRODUCT 42
++#define STRING_SERIAL 101
++#define STRING_SOURCE_SINK 250
++#define STRING_LOOPBACK 251
++
++/*
++ * This device advertises two configurations; these numbers work
++ * on a pxa250 as well as more flexible hardware.
++ */
++#define CONFIG_SOURCE_SINK 3
++#define CONFIG_LOOPBACK 2
++
++static struct usb_device_descriptor
++device_desc = {
++ .bLength = sizeof device_desc,
++ .bDescriptorType = USB_DT_DEVICE,
++
++ .bcdUSB = __constant_cpu_to_le16 (0x0200),
++ .bDeviceClass = USB_CLASS_VENDOR_SPEC,
++
++ .idVendor = __constant_cpu_to_le16 (DRIVER_VENDOR_NUM),
++ .idProduct = __constant_cpu_to_le16 (DRIVER_PRODUCT_NUM),
++ .iManufacturer = STRING_MANUFACTURER,
++ .iProduct = STRING_PRODUCT,
++ .iSerialNumber = STRING_SERIAL,
++ .bNumConfigurations = 2,
++};
++
++static struct usb_config_descriptor
++source_sink_config = {
++ .bLength = sizeof source_sink_config,
++ .bDescriptorType = USB_DT_CONFIG,
++
++ /* compute wTotalLength on the fly */
++ .bNumInterfaces = 1,
++ .bConfigurationValue = CONFIG_SOURCE_SINK,
++ .iConfiguration = STRING_SOURCE_SINK,
++ .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
++ .bMaxPower = 1, /* self-powered */
++};
++
++static struct usb_config_descriptor
++loopback_config = {
++ .bLength = sizeof loopback_config,
++ .bDescriptorType = USB_DT_CONFIG,
++
++ /* compute wTotalLength on the fly */
++ .bNumInterfaces = 1,
++ .bConfigurationValue = CONFIG_LOOPBACK,
++ .iConfiguration = STRING_LOOPBACK,
++ .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
++ .bMaxPower = 1, /* self-powered */
++};
++
++static struct usb_otg_descriptor
++otg_descriptor = {
++ .bLength = sizeof otg_descriptor,
++ .bDescriptorType = USB_DT_OTG,
++
++ .bmAttributes = USB_OTG_SRP,
++};
++
++/* one interface in each configuration */
++
++static const struct usb_interface_descriptor
++source_sink_intf = {
++ .bLength = sizeof source_sink_intf,
++ .bDescriptorType = USB_DT_INTERFACE,
++
++ .bNumEndpoints = 2,
++ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
++ .iInterface = STRING_SOURCE_SINK,
++};
++
++static const struct usb_interface_descriptor
++loopback_intf = {
++ .bLength = sizeof loopback_intf,
++ .bDescriptorType = USB_DT_INTERFACE,
++
++ .bNumEndpoints = 2,
++ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
++ .iInterface = STRING_LOOPBACK,
++};
++
++/* two full speed bulk endpoints; their use is config-dependent */
++
++static struct usb_endpoint_descriptor
++fs_source_desc = {
++ .bLength = USB_DT_ENDPOINT_SIZE,
++ .bDescriptorType = USB_DT_ENDPOINT,
++
++ .bEndpointAddress = USB_DIR_IN,
++ .bmAttributes = USB_ENDPOINT_XFER_BULK,
++};
++
++static struct usb_endpoint_descriptor
++fs_sink_desc = {
++ .bLength = USB_DT_ENDPOINT_SIZE,
++ .bDescriptorType = USB_DT_ENDPOINT,
++
++ .bEndpointAddress = USB_DIR_OUT,
++ .bmAttributes = USB_ENDPOINT_XFER_BULK,
++};
++
++static const struct usb_descriptor_header *fs_source_sink_function [] = {
++ (struct usb_descriptor_header *) &otg_descriptor,
++ (struct usb_descriptor_header *) &source_sink_intf,
++ (struct usb_descriptor_header *) &fs_sink_desc,
++ (struct usb_descriptor_header *) &fs_source_desc,
++ NULL,
++};
++
++static const struct usb_descriptor_header *fs_loopback_function [] = {
++ (struct usb_descriptor_header *) &otg_descriptor,
++ (struct usb_descriptor_header *) &loopback_intf,
++ (struct usb_descriptor_header *) &fs_sink_desc,
++ (struct usb_descriptor_header *) &fs_source_desc,
++ NULL,
++};
++
++#ifdef CONFIG_USB_GADGET_DUALSPEED
++
++/*
++ * usb 2.0 devices need to expose both high speed and full speed
++ * descriptors, unless they only run at full speed.
++ *
++ * that means alternate endpoint descriptors (bigger packets)
++ * and a "device qualifier" ... plus more construction options
++ * for the config descriptor.
++ */
++
++static struct usb_endpoint_descriptor
++hs_source_desc = {
++ .bLength = USB_DT_ENDPOINT_SIZE,
++ .bDescriptorType = USB_DT_ENDPOINT,
++
++ .bmAttributes = USB_ENDPOINT_XFER_BULK,
++ .wMaxPacketSize = __constant_cpu_to_le16 (512),
++};
++
++static struct usb_endpoint_descriptor
++hs_sink_desc = {
++ .bLength = USB_DT_ENDPOINT_SIZE,
++ .bDescriptorType = USB_DT_ENDPOINT,
++
++ .bmAttributes = USB_ENDPOINT_XFER_BULK,
++ .wMaxPacketSize = __constant_cpu_to_le16 (512),
++};
++
++static struct usb_qualifier_descriptor
++dev_qualifier = {
++ .bLength = sizeof dev_qualifier,
++ .bDescriptorType = USB_DT_DEVICE_QUALIFIER,
++
++ .bcdUSB = __constant_cpu_to_le16 (0x0200),
++ .bDeviceClass = USB_CLASS_VENDOR_SPEC,
++
++ .bNumConfigurations = 2,
++};
++
++static const struct usb_descriptor_header *hs_source_sink_function [] = {
++ (struct usb_descriptor_header *) &otg_descriptor,
++ (struct usb_descriptor_header *) &source_sink_intf,
++ (struct usb_descriptor_header *) &hs_source_desc,
++ (struct usb_descriptor_header *) &hs_sink_desc,
++ NULL,
++};
++
++static const struct usb_descriptor_header *hs_loopback_function [] = {
++ (struct usb_descriptor_header *) &otg_descriptor,
++ (struct usb_descriptor_header *) &loopback_intf,
++ (struct usb_descriptor_header *) &hs_source_desc,
++ (struct usb_descriptor_header *) &hs_sink_desc,
++ NULL,
++};
++
++/* maxpacket and other transfer characteristics vary by speed. */
++#define ep_desc(g,hs,fs) (((g)->speed==USB_SPEED_HIGH)?(hs):(fs))
++
++#else
++
++/* if there's no high speed support, maxpacket doesn't change. */
++#define ep_desc(g,hs,fs) fs
++
++#endif /* !CONFIG_USB_GADGET_DUALSPEED */
++
++static char manufacturer [50];
++static char serial [40];
++
++/* static strings, in UTF-8 */
++static struct usb_string strings [] = {
++ { STRING_MANUFACTURER, manufacturer, },
++ { STRING_PRODUCT, longname, },
++ { STRING_SERIAL, serial, },
++ { STRING_LOOPBACK, loopback, },
++ { STRING_SOURCE_SINK, source_sink, },
++ { } /* end of list */
++};
++
++static struct usb_gadget_strings stringtab = {
++ .language = 0x0409, /* en-us */
++ .strings = strings,
++};
++
++/*
++ * config descriptors are also handcrafted. these must agree with code
++ * that sets configurations, and with code managing interfaces and their
++ * altsettings. other complexity may come from:
++ *
++ * - high speed support, including "other speed config" rules
++ * - multiple configurations
++ * - interfaces with alternate settings
++ * - embedded class or vendor-specific descriptors
++ *
++ * this handles high speed, and has a second config that could as easily
++ * have been an alternate interface setting (on most hardware).
++ *
++ * NOTE: to demonstrate (and test) more USB capabilities, this driver
++ * should include an altsetting to test interrupt transfers, including
++ * high bandwidth modes at high speed. (Maybe work like Intel's test
++ * device?)
++ */
++static int
++config_buf (struct usb_gadget *gadget,
++ u8 *buf, u8 type, unsigned index)
++{
++ int is_source_sink;
++ int len;
++ const struct usb_descriptor_header **function;
++#ifdef CONFIG_USB_GADGET_DUALSPEED
++ int hs = (gadget->speed == USB_SPEED_HIGH);
++#endif
++
++ /* two configurations will always be index 0 and index 1 */
++ if (index > 1)
++ return -EINVAL;
++ is_source_sink = loopdefault ? (index == 1) : (index == 0);
++
++#ifdef CONFIG_USB_GADGET_DUALSPEED
++ if (type == USB_DT_OTHER_SPEED_CONFIG)
++ hs = !hs;
++ if (hs)
++ function = is_source_sink
++ ? hs_source_sink_function
++ : hs_loopback_function;
++ else
++#endif
++ function = is_source_sink
++ ? fs_source_sink_function
++ : fs_loopback_function;
++
++ /* for now, don't advertise srp-only devices */
++ if (!gadget->is_otg)
++ function++;
++
++ len = usb_gadget_config_buf (is_source_sink
++ ? &source_sink_config
++ : &loopback_config,
++ buf, USB_BUFSIZ, function);
++ if (len < 0)
++ return len;
++ ((struct usb_config_descriptor *) buf)->bDescriptorType = type;
++ return len;
++}
++
++/*-------------------------------------------------------------------------*/
++
++static struct usb_request *
++alloc_ep_req (struct usb_ep *ep, unsigned length)
++{
++ struct usb_request *req;
++
++ req = usb_ep_alloc_request (ep, GFP_ATOMIC);
++ if (req) {
++ req->length = length;
++ req->buf = usb_ep_alloc_buffer (ep, length,
++ &req->dma, GFP_ATOMIC);
++ if (!req->buf) {
++ usb_ep_free_request (ep, req);
++ req = NULL;
++ }
++ }
++ return req;
++}
++
++static void free_ep_req (struct usb_ep *ep, struct usb_request *req)
++{
++ if (req->buf)
++ usb_ep_free_buffer (ep, req->buf, req->dma, req->length);
++ usb_ep_free_request (ep, req);
++}
++
++/*-------------------------------------------------------------------------*/
++
++/* optionally require specific source/sink data patterns */
++
++static int
++check_read_data (
++ struct zero_dev *dev,
++ struct usb_ep *ep,
++ struct usb_request *req
++)
++{
++ unsigned i;
++ u8 *buf = req->buf;
++
++ for (i = 0; i < req->actual; i++, buf++) {
++ switch (pattern) {
++ /* all-zeroes has no synchronization issues */
++ case 0:
++ if (*buf == 0)
++ continue;
++ break;
++ /* mod63 stays in sync with short-terminated transfers,
++ * or otherwise when host and gadget agree on how large
++ * each usb transfer request should be. resync is done
++ * with set_interface or set_config.
++ */
++ case 1:
++ if (*buf == (u8)(i % 63))
++ continue;
++ break;
++ }
++ ERROR (dev, "bad OUT byte, buf [%d] = %d\n", i, *buf);
++ usb_ep_set_halt (ep);
++ return -EINVAL;
++ }
++ return 0;
++}
++
++static void
++reinit_write_data (
++ struct zero_dev *dev,
++ struct usb_ep *ep,
++ struct usb_request *req
++)
++{
++ unsigned i;
++ u8 *buf = req->buf;
++
++ switch (pattern) {
++ case 0:
++ memset (req->buf, 0, req->length);
++ break;
++ case 1:
++ for (i = 0; i < req->length; i++)
++ *buf++ = (u8) (i % 63);
++ break;
++ }
++}
++
++/* if there is only one request in the queue, there'll always be an
++ * irq delay between end of one request and start of the next.
++ * that prevents using hardware dma queues.
++ */
++static void source_sink_complete (struct usb_ep *ep, struct usb_request *req)
++{
++ struct zero_dev *dev = ep->driver_data;
++ int status = req->status;
++
++ switch (status) {
++
++ case 0: /* normal completion? */
++ if (ep == dev->out_ep)
++ check_read_data (dev, ep, req);
++ else
++ reinit_write_data (dev, ep, req);
++ break;
++
++ /* this endpoint is normally active while we're configured */
++ case -ECONNABORTED: /* hardware forced ep reset */
++ case -ECONNRESET: /* request dequeued */
++ case -ESHUTDOWN: /* disconnect from host */
++ VDBG (dev, "%s gone (%d), %d/%d\n", ep->name, status,
++ req->actual, req->length);
++ if (ep == dev->out_ep)
++ check_read_data (dev, ep, req);
++ free_ep_req (ep, req);
++ return;
++
++ case -EOVERFLOW: /* buffer overrun on read means that
++ * we didn't provide a big enough
++ * buffer.
++ */
++ default:
++#if 1
++ DBG (dev, "%s complete --> %d, %d/%d\n", ep->name,
++ status, req->actual, req->length);
++#endif
++ case -EREMOTEIO: /* short read */
++ break;
++ }
++
++ status = usb_ep_queue (ep, req, GFP_ATOMIC);
++ if (status) {
++ ERROR (dev, "kill %s: resubmit %d bytes --> %d\n",
++ ep->name, req->length, status);
++ usb_ep_set_halt (ep);
++ /* FIXME recover later ... somehow */
++ }
++}
++
++static struct usb_request *
++source_sink_start_ep (struct usb_ep *ep, int gfp_flags)
++{
++ struct usb_request *req;
++ int status;
++
++ req = alloc_ep_req (ep, buflen);
++ if (!req)
++ return NULL;
++
++ memset (req->buf, 0, req->length);
++ req->complete = source_sink_complete;
++
++ if (strcmp (ep->name, EP_IN_NAME) == 0)
++ reinit_write_data (ep->driver_data, ep, req);
++
++ status = usb_ep_queue (ep, req, gfp_flags);
++ if (status) {
++ struct zero_dev *dev = ep->driver_data;
++
++ ERROR (dev, "start %s --> %d\n", ep->name, status);
++ free_ep_req (ep, req);
++ req = NULL;
++ }
++
++ return req;
++}
++
++static int
++set_source_sink_config (struct zero_dev *dev, int gfp_flags)
++{
++ int result = 0;
++ struct usb_ep *ep;
++ struct usb_gadget *gadget = dev->gadget;
++
++ gadget_for_each_ep (ep, gadget) {
++ const struct usb_endpoint_descriptor *d;
++
++ /* one endpoint writes (sources) zeroes in (to the host) */
++ if (strcmp (ep->name, EP_IN_NAME) == 0) {
++ d = ep_desc (gadget, &hs_source_desc, &fs_source_desc);
++ result = usb_ep_enable (ep, d);
++ if (result == 0) {
++ ep->driver_data = dev;
++ if (source_sink_start_ep (ep, gfp_flags) != 0) {
++ dev->in_ep = ep;
++ continue;
++ }
++ usb_ep_disable (ep);
++ result = -EIO;
++ }
++
++ /* one endpoint reads (sinks) anything out (from the host) */
++ } else if (strcmp (ep->name, EP_OUT_NAME) == 0) {
++ d = ep_desc (gadget, &hs_sink_desc, &fs_sink_desc);
++ result = usb_ep_enable (ep, d);
++ if (result == 0) {
++ ep->driver_data = dev;
++ if (source_sink_start_ep (ep, gfp_flags) != 0) {
++ dev->out_ep = ep;
++ continue;
++ }
++ usb_ep_disable (ep);
++ result = -EIO;
++ }
++
++ /* ignore any other endpoints */
++ } else
++ continue;
++
++ /* stop on error */
++ ERROR (dev, "can't start %s, result %d\n", ep->name, result);
++ break;
++ }
++ if (result == 0)
++ DBG (dev, "buflen %d\n", buflen);
++
++ /* caller is responsible for cleanup on error */
++ return result;
++}
++
++/*-------------------------------------------------------------------------*/
++
++static void loopback_complete (struct usb_ep *ep, struct usb_request *req)
++{
++ struct zero_dev *dev = ep->driver_data;
++ int status = req->status;
++
++ switch (status) {
++
++ case 0: /* normal completion? */
++ if (ep == dev->out_ep) {
++ /* loop this OUT packet back IN to the host */
++ req->zero = (req->actual < req->length);
++ req->length = req->actual;
++ status = usb_ep_queue (dev->in_ep, req, GFP_ATOMIC);
++ if (status == 0)
++ return;
++
++ /* "should never get here" */
++ ERROR (dev, "can't loop %s to %s: %d\n",
++ ep->name, dev->in_ep->name,
++ status);
++ }
++
++ /* queue the buffer for some later OUT packet */
++ req->length = buflen;
++ status = usb_ep_queue (dev->out_ep, req, GFP_ATOMIC);
++ if (status == 0)
++ return;
++
++ /* "should never get here" */
++ /* FALLTHROUGH */
++
++ default:
++ ERROR (dev, "%s loop complete --> %d, %d/%d\n", ep->name,
++ status, req->actual, req->length);
++ /* FALLTHROUGH */
++
++ /* NOTE: since this driver doesn't maintain an explicit record
++ * of requests it submitted (just maintains qlen count), we
++ * rely on the hardware driver to clean up on disconnect or
++ * endpoint disable.
++ */
++ case -ECONNABORTED: /* hardware forced ep reset */
++ case -ECONNRESET: /* request dequeued */
++ case -ESHUTDOWN: /* disconnect from host */
++ free_ep_req (ep, req);
++ return;
++ }
++}
++
++static int
++set_loopback_config (struct zero_dev *dev, int gfp_flags)
++{
++ int result = 0;
++ struct usb_ep *ep;
++ struct usb_gadget *gadget = dev->gadget;
++
++ gadget_for_each_ep (ep, gadget) {
++ const struct usb_endpoint_descriptor *d;
++
++ /* one endpoint writes data back IN to the host */
++ if (strcmp (ep->name, EP_IN_NAME) == 0) {
++ d = ep_desc (gadget, &hs_source_desc, &fs_source_desc);
++ result = usb_ep_enable (ep, d);
++ if (result == 0) {
++ ep->driver_data = dev;
++ dev->in_ep = ep;
++ continue;
++ }
++
++ /* one endpoint just reads OUT packets */
++ } else if (strcmp (ep->name, EP_OUT_NAME) == 0) {
++ d = ep_desc (gadget, &hs_sink_desc, &fs_sink_desc);
++ result = usb_ep_enable (ep, d);
++ if (result == 0) {
++ ep->driver_data = dev;
++ dev->out_ep = ep;
++ continue;
++ }
++
++ /* ignore any other endpoints */
++ } else
++ continue;
++
++ /* stop on error */
++ ERROR (dev, "can't enable %s, result %d\n", ep->name, result);
++ break;
++ }
++
++ /* allocate a bunch of read buffers and queue them all at once.
++ * we buffer at most 'qlen' transfers; fewer if any need more
++ * than 'buflen' bytes each.
++ */
++ if (result == 0) {
++ struct usb_request *req;
++ unsigned i;
++
++ ep = dev->out_ep;
++ for (i = 0; i < qlen && result == 0; i++) {
++ req = alloc_ep_req (ep, buflen);
++ if (req) {
++ req->complete = loopback_complete;
++ result = usb_ep_queue (ep, req, GFP_ATOMIC);
++ if (result)
++ DBG (dev, "%s queue req --> %d\n",
++ ep->name, result);
++ } else
++ result = -ENOMEM;
++ }
++ }
++ if (result == 0)
++ DBG (dev, "qlen %d, buflen %d\n", qlen, buflen);
++
++ /* caller is responsible for cleanup on error */
++ return result;
++}
++
++/*-------------------------------------------------------------------------*/
++
++static void zero_reset_config (struct zero_dev *dev)
++{
++ if (dev->config == 0)
++ return;
++
++ DBG (dev, "reset config\n");
++
++ /* just disable endpoints, forcing completion of pending i/o.
++ * all our completion handlers free their requests in this case.
++ */
++ if (dev->in_ep) {
++ usb_ep_disable (dev->in_ep);
++ dev->in_ep = NULL;
++ }
++ if (dev->out_ep) {
++ usb_ep_disable (dev->out_ep);
++ dev->out_ep = NULL;
++ }
++ dev->config = 0;
++ del_timer (&dev->resume);
++}
++
++/* change our operational config. this code must agree with the code
++ * that returns config descriptors, and altsetting code.
++ *
++ * it's also responsible for power management interactions. some
++ * configurations might not work with our current power sources.
++ *
++ * note that some device controller hardware will constrain what this
++ * code can do, perhaps by disallowing more than one configuration or
++ * by limiting configuration choices (like the pxa2xx).
++ */
++static int
++zero_set_config (struct zero_dev *dev, unsigned number, int gfp_flags)
++{
++ int result = 0;
++ struct usb_gadget *gadget = dev->gadget;
++
++ if (number == dev->config)
++ return 0;
++
++ if (gadget_is_sa1100 (gadget) && dev->config) {
++ /* tx fifo is full, but we can't clear it...*/
++ INFO (dev, "can't change configurations\n");
++ return -ESPIPE;
++ }
++ zero_reset_config (dev);
++
++ switch (number) {
++ case CONFIG_SOURCE_SINK:
++ result = set_source_sink_config (dev, gfp_flags);
++ break;
++ case CONFIG_LOOPBACK:
++ result = set_loopback_config (dev, gfp_flags);
++ break;
++ default:
++ result = -EINVAL;
++ /* FALL THROUGH */
++ case 0:
++ return result;
++ }
++
++ if (!result && (!dev->in_ep || !dev->out_ep))
++ result = -ENODEV;
++ if (result)
++ zero_reset_config (dev);
++ else {
++ char *speed;
++
++ switch (gadget->speed) {
++ case USB_SPEED_LOW: speed = "low"; break;
++ case USB_SPEED_FULL: speed = "full"; break;
++ case USB_SPEED_HIGH: speed = "high"; break;
++ default: speed = "?"; break;
++ }
++
++ dev->config = number;
++ INFO (dev, "%s speed config #%d: %s\n", speed, number,
++ (number == CONFIG_SOURCE_SINK)
++ ? source_sink : loopback);
++ }
++ return result;
++}
++
++/*-------------------------------------------------------------------------*/
++
++static void zero_setup_complete (struct usb_ep *ep, struct usb_request *req)
++{
++ if (req->status || req->actual != req->length)
++ DBG ((struct zero_dev *) ep->driver_data,
++ "setup complete --> %d, %d/%d\n",
++ req->status, req->actual, req->length);
++}
++
++/*
++ * The setup() callback implements all the ep0 functionality that's
++ * not handled lower down, in hardware or the hardware driver (like
++ * device and endpoint feature flags, and their status). It's all
++ * housekeeping for the gadget function we're implementing. Most of
++ * the work is in config-specific setup.
++ */
++static int
++zero_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
++{
++ struct zero_dev *dev = get_gadget_data (gadget);
++ struct usb_request *req = dev->req;
++ int value = -EOPNOTSUPP;
++
++ /* usually this stores reply data in the pre-allocated ep0 buffer,
++ * but config change events will reconfigure hardware.
++ */
++ req->zero = 0;
++ switch (ctrl->bRequest) {
++
++ case USB_REQ_GET_DESCRIPTOR:
++ if (ctrl->bRequestType != USB_DIR_IN)
++ goto unknown;
++ switch (ctrl->wValue >> 8) {
++
++ case USB_DT_DEVICE:
++ value = min (ctrl->wLength, (u16) sizeof device_desc);
++ memcpy (req->buf, &device_desc, value);
++ break;
++#ifdef CONFIG_USB_GADGET_DUALSPEED
++ case USB_DT_DEVICE_QUALIFIER:
++ if (!gadget->is_dualspeed)
++ break;
++ value = min (ctrl->wLength, (u16) sizeof dev_qualifier);
++ memcpy (req->buf, &dev_qualifier, value);
++ break;
++
++ case USB_DT_OTHER_SPEED_CONFIG:
++ if (!gadget->is_dualspeed)
++ break;
++ // FALLTHROUGH
++#endif /* CONFIG_USB_GADGET_DUALSPEED */
++ case USB_DT_CONFIG:
++ value = config_buf (gadget, req->buf,
++ ctrl->wValue >> 8,
++ ctrl->wValue & 0xff);
++ if (value >= 0)
++ value = min (ctrl->wLength, (u16) value);
++ break;
++
++ case USB_DT_STRING:
++ /* wIndex == language code.
++ * this driver only handles one language, you can
++ * add string tables for other languages, using
++ * any UTF-8 characters
++ */
++ value = usb_gadget_get_string (&stringtab,
++ ctrl->wValue & 0xff, req->buf);
++ if (value >= 0)
++ value = min (ctrl->wLength, (u16) value);
++ break;
++ }
++ break;
++
++ /* currently two configs, two speeds */
++ case USB_REQ_SET_CONFIGURATION:
++ if (ctrl->bRequestType != 0)
++ goto unknown;
++ if (gadget->a_hnp_support)
++ DBG (dev, "HNP available\n");
++ else if (gadget->a_alt_hnp_support)
++ DBG (dev, "HNP needs a different root port\n");
++ else
++ VDBG (dev, "HNP inactive\n");
++ spin_lock (&dev->lock);
++ value = zero_set_config (dev, ctrl->wValue, GFP_ATOMIC);
++ spin_unlock (&dev->lock);
++ break;
++ case USB_REQ_GET_CONFIGURATION:
++ if (ctrl->bRequestType != USB_DIR_IN)
++ goto unknown;
++ *(u8 *)req->buf = dev->config;
++ value = min (ctrl->wLength, (u16) 1);
++ break;
++
++ /* until we add altsetting support, or other interfaces,
++ * only 0/0 are possible. pxa2xx only supports 0/0 (poorly)
++ * and already killed pending endpoint I/O.
++ */
++ case USB_REQ_SET_INTERFACE:
++ if (ctrl->bRequestType != USB_RECIP_INTERFACE)
++ goto unknown;
++ spin_lock (&dev->lock);
++ if (dev->config && ctrl->wIndex == 0 && ctrl->wValue == 0) {
++ u8 config = dev->config;
++
++ /* resets interface configuration, forgets about
++ * previous transaction state (queued bufs, etc)
++ * and re-inits endpoint state (toggle etc)
++ * no response queued, just zero status == success.
++ * if we had more than one interface we couldn't
++ * use this "reset the config" shortcut.
++ */
++ zero_reset_config (dev);
++ zero_set_config (dev, config, GFP_ATOMIC);
++ value = 0;
++ }
++ spin_unlock (&dev->lock);
++ break;
++ case USB_REQ_GET_INTERFACE:
++ if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE))
++ goto unknown;
++ if (!dev->config)
++ break;
++ if (ctrl->wIndex != 0) {
++ value = -EDOM;
++ break;
++ }
++ *(u8 *)req->buf = 0;
++ value = min (ctrl->wLength, (u16) 1);
++ break;
++
++ /*
++ * These are the same vendor-specific requests supported by
++ * Intel's USB 2.0 compliance test devices. We exceed that
++ * device spec by allowing multiple-packet requests.
++ */
++ case 0x5b: /* control WRITE test -- fill the buffer */
++ if (ctrl->bRequestType != (USB_DIR_OUT|USB_TYPE_VENDOR))
++ goto unknown;
++ if (ctrl->wValue || ctrl->wIndex)
++ break;
++ /* just read that many bytes into the buffer */
++ if (ctrl->wLength > USB_BUFSIZ)
++ break;
++ value = ctrl->wLength;
++ break;
++ case 0x5c: /* control READ test -- return the buffer */
++ if (ctrl->bRequestType != (USB_DIR_IN|USB_TYPE_VENDOR))
++ goto unknown;
++ if (ctrl->wValue || ctrl->wIndex)
++ break;
++ /* expect those bytes are still in the buffer; send back */
++ if (ctrl->wLength > USB_BUFSIZ
++ || ctrl->wLength != req->length)
++ break;
++ value = ctrl->wLength;
++ break;
++
++ default:
++unknown:
++ VDBG (dev,
++ "unknown control req%02x.%02x v%04x i%04x l%d\n",
++ ctrl->bRequestType, ctrl->bRequest,
++ ctrl->wValue, ctrl->wIndex, ctrl->wLength);
++ }
++
++ /* respond with data transfer before status phase? */
++ if (value >= 0) {
++ req->length = value;
++ req->zero = value < ctrl->wLength
++ && (value % gadget->ep0->maxpacket) == 0;
++ value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC);
++ if (value < 0) {
++ DBG (dev, "ep_queue --> %d\n", value);
++ req->status = 0;
++ zero_setup_complete (gadget->ep0, req);
++ }
++ }
++
++ /* device either stalls (value < 0) or reports success */
++ return value;
++}
++
++static void
++zero_disconnect (struct usb_gadget *gadget)
++{
++ struct zero_dev *dev = get_gadget_data (gadget);
++ unsigned long flags;
++
++ spin_lock_irqsave (&dev->lock, flags);
++ zero_reset_config (dev);
++
++ /* a more significant application might have some non-usb
++ * activities to quiesce here, saving resources like power
++ * or pushing the notification up a network stack.
++ */
++ spin_unlock_irqrestore (&dev->lock, flags);
++
++ /* next we may get setup() calls to enumerate new connections;
++ * or an unbind() during shutdown (including removing module).
++ */
++}
++
++static void
++zero_autoresume (unsigned long _dev)
++{
++ struct zero_dev *dev = (struct zero_dev *) _dev;
++ int status;
++
++ /* normally the host would be woken up for something
++ * more significant than just a timer firing...
++ */
++ if (dev->gadget->speed != USB_SPEED_UNKNOWN) {
++ status = usb_gadget_wakeup (dev->gadget);
++ DBG (dev, "wakeup --> %d\n", status);
++ }
++}
++
++/*-------------------------------------------------------------------------*/
++
++static void
++zero_unbind (struct usb_gadget *gadget)
++{
++ struct zero_dev *dev = get_gadget_data (gadget);
++
++ DBG (dev, "unbind\n");
++
++ /* we've already been disconnected ... no i/o is active */
++ if (dev->req)
++ free_ep_req (gadget->ep0, dev->req);
++ del_timer_sync (&dev->resume);
++ kfree (dev);
++ set_gadget_data (gadget, NULL);
++}
++
++static int
++zero_bind (struct usb_gadget *gadget)
++{
++ struct zero_dev *dev;
++ struct usb_ep *ep;
++
++ /* Bulk-only drivers like this one SHOULD be able to
++ * autoconfigure on any sane usb controller driver,
++ * but there may also be important quirks to address.
++ */
++ usb_ep_autoconfig_reset (gadget);
++ ep = usb_ep_autoconfig (gadget, &fs_source_desc);
++ if (!ep) {
++autoconf_fail:
++ printk (KERN_ERR "%s: can't autoconfigure on %s\n",
++ shortname, gadget->name);
++ return -ENODEV;
++ }
++ EP_IN_NAME = ep->name;
++ ep->driver_data = ep; /* claim */
++
++ ep = usb_ep_autoconfig (gadget, &fs_sink_desc);
++ if (!ep)
++ goto autoconf_fail;
++ EP_OUT_NAME = ep->name;
++ ep->driver_data = ep; /* claim */
++
++
++ /*
++ * DRIVER POLICY CHOICE: you may want to do this differently.
++ * One thing to avoid is reusing a bcdDevice revision code
++ * with different host-visible configurations or behavior
++ * restrictions -- using ep1in/ep2out vs ep1out/ep3in, etc
++ */
++ if (gadget_is_net2280 (gadget)) {
++ device_desc.bcdDevice = __constant_cpu_to_le16 (0x0201);
++ } else if (gadget_is_pxa (gadget)) {
++ device_desc.bcdDevice = __constant_cpu_to_le16 (0x0203);
++#if 0
++ } else if (gadget_is_sh(gadget)) {
++ device_desc.bcdDevice = __constant_cpu_to_le16 (0x0204);
++ /* SH has only one configuration; see "loopdefault" */
++ device_desc.bNumConfigurations = 1;
++ /* FIXME make 1 == default.bConfigurationValue */
++#endif
++ } else if (gadget_is_sa1100 (gadget)) {
++ device_desc.bcdDevice = __constant_cpu_to_le16 (0x0205);
++ } else if (gadget_is_goku (gadget)) {
++ device_desc.bcdDevice = __constant_cpu_to_le16 (0x0206);
++ } else if (gadget_is_mq11xx (gadget)) {
++ device_desc.bcdDevice = __constant_cpu_to_le16 (0x0207);
++ } else if (gadget_is_omap (gadget)) {
++ device_desc.bcdDevice = __constant_cpu_to_le16 (0x0208);
++ } else if (gadget_is_lh7a40x(gadget)) {
++ device_desc.bcdDevice = __constant_cpu_to_le16 (0x0209);
++ } else if (gadget_is_n9604(gadget)) {
++ device_desc.bcdDevice = __constant_cpu_to_le16 (0x0210);
++ } else if (gadget_is_pxa27x(gadget)) {
++ device_desc.bcdDevice = __constant_cpu_to_le16 (0x0211);
++ } else if (gadget_is_s3c2410(gadget)) {
++ device_desc.bcdDevice = __constant_cpu_to_le16 (0x0212);
++ } else if (gadget_is_at91(gadget)) {
++ device_desc.bcdDevice = __constant_cpu_to_le16 (0x0213);
++ } else {
++ /* gadget zero is so simple (for now, no altsettings) that
++ * it SHOULD NOT have problems with bulk-capable hardware.
++ * so warn about unrcognized controllers, don't panic.
++ *
++ * things like configuration and altsetting numbering
++ * can need hardware-specific attention though.
++ */
++ printk (KERN_WARNING "%s: controller '%s' not recognized\n",
++ shortname, gadget->name);
++ device_desc.bcdDevice = __constant_cpu_to_le16 (0x9999);
++ }
++
++
++ /* ok, we made sense of the hardware ... */
++ dev = kmalloc (sizeof *dev, SLAB_KERNEL);
++ if (!dev)
++ return -ENOMEM;
++ memset (dev, 0, sizeof *dev);
++ spin_lock_init (&dev->lock);
++ dev->gadget = gadget;
++ set_gadget_data (gadget, dev);
++
++ /* preallocate control response and buffer */
++ dev->req = usb_ep_alloc_request (gadget->ep0, GFP_KERNEL);
++ if (!dev->req)
++ goto enomem;
++ dev->req->buf = usb_ep_alloc_buffer (gadget->ep0, USB_BUFSIZ,
++ &dev->req->dma, GFP_KERNEL);
++ if (!dev->req->buf)
++ goto enomem;
++
++ dev->req->complete = zero_setup_complete;
++
++ device_desc.bMaxPacketSize0 = gadget->ep0->maxpacket;
++
++#ifdef CONFIG_USB_GADGET_DUALSPEED
++ /* assume ep0 uses the same value for both speeds ... */
++ dev_qualifier.bMaxPacketSize0 = device_desc.bMaxPacketSize0;
++
++ /* and that all endpoints are dual-speed */
++ hs_source_desc.bEndpointAddress = fs_source_desc.bEndpointAddress;
++ hs_sink_desc.bEndpointAddress = fs_sink_desc.bEndpointAddress;
++#endif
++
++ if (gadget->is_otg) {
++ otg_descriptor.bmAttributes |= USB_OTG_HNP,
++ source_sink_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
++ loopback_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
++ }
++
++ if (gadget->is_otg) {
++ otg_descriptor.bmAttributes |= USB_OTG_HNP,
++ source_sink_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
++ loopback_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
++ }
++
++ usb_gadget_set_selfpowered (gadget);
++
++ init_timer (&dev->resume);
++ dev->resume.function = zero_autoresume;
++ dev->resume.data = (unsigned long) dev;
++ if (autoresume) {
++ source_sink_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
++ loopback_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
++ }
++
++ gadget->ep0->driver_data = dev;
++
++ INFO (dev, "%s, version: " DRIVER_VERSION "\n", longname);
++ INFO (dev, "using %s, OUT %s IN %s\n", gadget->name,
++ EP_OUT_NAME, EP_IN_NAME);
++
++ snprintf (manufacturer, sizeof manufacturer,
++ UTS_SYSNAME " " UTS_RELEASE " with %s",
++ gadget->name);
++
++ return 0;
++
++enomem:
++ zero_unbind (gadget);
++ return -ENOMEM;
++}
++
++/*-------------------------------------------------------------------------*/
++
++static void
++zero_suspend (struct usb_gadget *gadget)
++{
++ struct zero_dev *dev = get_gadget_data (gadget);
++
++ if (gadget->speed == USB_SPEED_UNKNOWN)
++ return;
++
++ if (autoresume) {
++ mod_timer (&dev->resume, jiffies + (HZ * autoresume));
++ DBG (dev, "suspend, wakeup in %d seconds\n", autoresume);
++ } else
++ DBG (dev, "suspend\n");
++}
++
++static void
++zero_resume (struct usb_gadget *gadget)
++{
++ struct zero_dev *dev = get_gadget_data (gadget);
++
++ DBG (dev, "resume\n");
++ del_timer (&dev->resume);
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++static struct usb_gadget_driver zero_driver = {
++#ifdef CONFIG_USB_GADGET_DUALSPEED
++ .speed = USB_SPEED_HIGH,
++#else
++ .speed = USB_SPEED_FULL,
++#endif
++ .function = (char *) longname,
++ .bind = zero_bind,
++ .unbind = zero_unbind,
++
++ .setup = zero_setup,
++ .disconnect = zero_disconnect,
++
++ .suspend = zero_suspend,
++ .resume = zero_resume,
++
++ .driver = {
++ .name = (char *) shortname,
++ // .shutdown = ...
++ // .suspend = ...
++ // .resume = ...
++ },
++};
++
++MODULE_AUTHOR ("David Brownell");
++MODULE_LICENSE ("Dual BSD/GPL");
++
++
++static int __init init (void)
++{
++ /* a real value would likely come through some id prom
++ * or module option. this one takes at least two packets.
++ */
++ strncpy (serial, "0123456789.0123456789.0123456789", sizeof serial);
++ serial [sizeof serial - 1] = 0;
++
++ return usb_gadget_register_driver (&zero_driver);
++}
++module_init (init);
++
++static void __exit cleanup (void)
++{
++ usb_gadget_unregister_driver (&zero_driver);
++}
++module_exit (cleanup);
++
+diff -x '*~' -x '.*' -r -N -u /tmp/kernel/include/linux/moduleparam.h kernel/include/linux/moduleparam.h
+--- /tmp/kernel/include/linux/moduleparam.h 1970-01-01 01:00:00.000000000 +0100
++++ kernel/include/linux/moduleparam.h 2005-04-22 17:53:19.357552052 +0200
+@@ -0,0 +1,25 @@
++#ifndef _LINUX_MODULE_PARAMS_H
++#define _LINUX_MODULE_PARAMS_H
++/* Macros for (very simple) module parameter compatibility with 2.6. */
++#include <linux/module.h>
++
++/* type is byte, short, ushort, int, uint, long, ulong, bool. (2.6
++ has more, but they are not supported). perm is permissions when
++ it appears in sysfs: 0 means doens't appear, 0444 means read-only
++ by everyone, 0644 means changable dynamically by root, etc. name
++ must be in scope (unlike MODULE_PARM).
++*/
++#define module_param(name, type, perm) \
++ static inline void *__check_existence_##name(void) { return &name; } \
++ MODULE_PARM(name, _MODULE_PARM_STRING_ ## type)
++
++#define _MODULE_PARM_STRING_byte "b"
++#define _MODULE_PARM_STRING_short "h"
++#define _MODULE_PARM_STRING_ushort "h"
++#define _MODULE_PARM_STRING_int "i"
++#define _MODULE_PARM_STRING_uint "i"
++#define _MODULE_PARM_STRING_long "l"
++#define _MODULE_PARM_STRING_ulong "l"
++#define _MODULE_PARM_STRING_bool "i"
++
++#endif /* _LINUX_MODULE_PARAM_TYPES_H */
+diff -x '*~' -x '.*' -r -N -u /tmp/kernel/include/linux/usb_cdc.h kernel/include/linux/usb_cdc.h
+--- /tmp/kernel/include/linux/usb_cdc.h 1970-01-01 01:00:00.000000000 +0100
++++ kernel/include/linux/usb_cdc.h 2005-04-22 17:53:19.360551563 +0200
+@@ -0,0 +1,204 @@
++/*
++ * USB Communications Device Class (CDC) definitions
++ *
++ * CDC says how to talk to lots of different types of network adapters,
++ * notably ethernet adapters and various modems. It's used mostly with
++ * firmware based USB peripherals.
++ *
++ * (C) Copyright 2005 by David Brownell
++ * All Rights Reserved.
++ *
++ * This software is licensed under the GNU GPL version 2.
++ */
++
++#define USB_CDC_SUBCLASS_ACM 0x02
++#define USB_CDC_SUBCLASS_ETHERNET 0x06
++#define USB_CDC_SUBCLASS_WHCM 0x08
++#define USB_CDC_SUBCLASS_DMM 0x09
++#define USB_CDC_SUBCLASS_MDLM 0x0a
++#define USB_CDC_SUBCLASS_OBEX 0x0b
++
++#define USB_CDC_PROTO_NONE 0
++
++#define USB_CDC_ACM_PROTO_AT_V25TER 1
++#define USB_CDC_ACM_PROTO_AT_PCCA101 2
++#define USB_CDC_ACM_PROTO_AT_PCCA101_WAKE 3
++#define USB_CDC_ACM_PROTO_AT_GSM 4
++#define USB_CDC_ACM_PROTO_AT_3G 5
++#define USB_CDC_ACM_PROTO_AT_CDMA 6
++#define USB_CDC_ACM_PROTO_VENDOR 0xff
++
++/*-------------------------------------------------------------------------*/
++
++/* 2.6 "sparse" support for checking beyond what GCC does */
++
++#define __le16 u16
++#define __le32 u32
++
++/*-------------------------------------------------------------------------*/
++
++/*
++ * Class-Specific descriptors ... there are a couple dozen of them
++ */
++
++#define USB_CDC_HEADER_TYPE 0x00 /* header_desc */
++#define USB_CDC_CALL_MANAGEMENT_TYPE 0x01 /* call_mgmt_descriptor */
++#define USB_CDC_ACM_TYPE 0x02 /* acm_descriptor */
++#define USB_CDC_UNION_TYPE 0x06 /* union_desc */
++#define USB_CDC_COUNTRY_TYPE 0x07
++#define USB_CDC_ETHERNET_TYPE 0x0f /* ether_desc */
++#define USB_CDC_WHCM_TYPE 0x11
++#define USB_CDC_MDLM_TYPE 0x12 /* mdlm_desc */
++#define USB_CDC_MDLM_DETAIL_TYPE 0x13 /* mdlm_detail_desc */
++#define USB_CDC_DMM_TYPE 0x14
++#define USB_CDC_OBEX_TYPE 0x15
++
++/* "Header Functional Descriptor" from CDC spec 5.2.3.1 */
++struct usb_cdc_header_desc {
++ __u8 bLength;
++ __u8 bDescriptorType;
++ __u8 bDescriptorSubType;
++
++ __le16 bcdCDC;
++} __attribute__ ((packed));
++
++/* "Call Management Descriptor" from CDC spec 5.2.3.2 */
++struct usb_cdc_call_mgmt_descriptor {
++ __u8 bLength;
++ __u8 bDescriptorType;
++ __u8 bDescriptorSubType;
++
++ __u8 bmCapabilities;
++#define USB_CDC_CALL_MGMT_CAP_CALL_MGMT 0x01
++#define USB_CDC_CALL_MGMT_CAP_DATA_INTF 0x02
++
++ __u8 bDataInterface;
++} __attribute__ ((packed));
++
++/* "Abstract Control Management Descriptor" from CDC spec 5.2.3.3 */
++struct usb_cdc_acm_descriptor {
++ __u8 bLength;
++ __u8 bDescriptorType;
++ __u8 bDescriptorSubType;
++
++ __u8 bmCapabilities;
++} __attribute__ ((packed));
++
++/* "Union Functional Descriptor" from CDC spec 5.2.3.8 */
++struct usb_cdc_union_desc {
++ __u8 bLength;
++ __u8 bDescriptorType;
++ __u8 bDescriptorSubType;
++
++ __u8 bMasterInterface0;
++ __u8 bSlaveInterface0;
++ /* ... and there could be other slave interfaces */
++} __attribute__ ((packed));
++
++/* "Ethernet Networking Functional Descriptor" from CDC spec 5.2.3.16 */
++struct usb_cdc_ether_desc {
++ __u8 bLength;
++ __u8 bDescriptorType;
++ __u8 bDescriptorSubType;
++
++ __u8 iMACAddress;
++ __le32 bmEthernetStatistics;
++ __le16 wMaxSegmentSize;
++ __le16 wNumberMCFilters;
++ __u8 bNumberPowerFilters;
++} __attribute__ ((packed));
++
++/* "MDLM Functional Descriptor" from CDC WMC spec 6.7.2.3 */
++struct usb_cdc_mdlm_desc {
++ __u8 bLength;
++ __u8 bDescriptorType;
++ __u8 bDescriptorSubType;
++
++ __le16 bcdVersion;
++ __u8 bGUID[16];
++} __attribute__ ((packed));
++
++/* "MDLM Detail Functional Descriptor" from CDC WMC spec 6.7.2.4 */
++struct usb_cdc_mdlm_detail_desc {
++ __u8 bLength;
++ __u8 bDescriptorType;
++ __u8 bDescriptorSubType;
++
++ /* type is associated with mdlm_desc.bGUID */
++ __u8 bGuidDescriptorType;
++ __u8 bDetailData[];
++} __attribute__ ((packed));
++
++/*-------------------------------------------------------------------------*/
++
++/*
++ * Class-Specific Control Requests (6.2)
++ *
++ * section 3.6.2.1 table 4 has the ACM profile, for modems.
++ * section 3.8.2 table 10 has the ethernet profile.
++ *
++ * Microsoft's RNDIS stack for Ethernet is a vendor-specific CDC ACM variant,
++ * heavily dependent on the encapsulated (proprietary) command mechanism.
++ */
++
++#define USB_CDC_SEND_ENCAPSULATED_COMMAND 0x00
++#define USB_CDC_GET_ENCAPSULATED_RESPONSE 0x01
++#define USB_CDC_REQ_SET_LINE_CODING 0x20
++#define USB_CDC_REQ_GET_LINE_CODING 0x21
++#define USB_CDC_REQ_SET_CONTROL_LINE_STATE 0x22
++#define USB_CDC_REQ_SEND_BREAK 0x23
++#define USB_CDC_SET_ETHERNET_MULTICAST_FILTERS 0x40
++#define USB_CDC_SET_ETHERNET_PM_PATTERN_FILTER 0x41
++#define USB_CDC_GET_ETHERNET_PM_PATTERN_FILTER 0x42
++#define USB_CDC_SET_ETHERNET_PACKET_FILTER 0x43
++#define USB_CDC_GET_ETHERNET_STATISTIC 0x44
++
++/* Line Coding Structure from CDC spec 6.2.13 */
++struct usb_cdc_line_coding {
++ __le32 dwDTERate;
++ __u8 bCharFormat;
++#define USB_CDC_1_STOP_BITS 0
++#define USB_CDC_1_5_STOP_BITS 1
++#define USB_CDC_2_STOP_BITS 2
++
++ __u8 bParityType;
++#define USB_CDC_NO_PARITY 0
++#define USB_CDC_ODD_PARITY 1
++#define USB_CDC_EVEN_PARITY 2
++#define USB_CDC_MARK_PARITY 3
++#define USB_CDC_SPACE_PARITY 4
++
++ __u8 bDataBits;
++} __attribute__ ((packed));
++
++/* table 62; bits in multicast filter */
++#define USB_CDC_PACKET_TYPE_PROMISCUOUS (1 << 0)
++#define USB_CDC_PACKET_TYPE_ALL_MULTICAST (1 << 1) /* no filter */
++#define USB_CDC_PACKET_TYPE_DIRECTED (1 << 2)
++#define USB_CDC_PACKET_TYPE_BROADCAST (1 << 3)
++#define USB_CDC_PACKET_TYPE_MULTICAST (1 << 4) /* filtered */
++
++
++/*-------------------------------------------------------------------------*/
++
++/*
++ * Class-Specific Notifications (6.3) sent by interrupt transfers
++ *
++ * section 3.8.2 table 11 of the CDC spec lists Ethernet notifications
++ * section 3.6.2.1 table 5 specifies ACM notifications, accepted by RNDIS
++ * RNDIS also defines its own bit-incompatible notifications
++ */
++
++#define USB_CDC_NOTIFY_NETWORK_CONNECTION 0x00
++#define USB_CDC_NOTIFY_RESPONSE_AVAILABLE 0x01
++#define USB_CDC_NOTIFY_SERIAL_STATE 0x20
++#define USB_CDC_NOTIFY_SPEED_CHANGE 0x2a
++
++struct usb_cdc_notification {
++ __u8 bmRequestType;
++ __u8 bNotificationType;
++ __le16 wValue;
++ __le16 wIndex;
++ __le16 wLength;
++} __attribute__ ((packed));
++
+diff -x '*~' -x '.*' -r -N -u /tmp/kernel/include/linux/usb_ch9.h kernel/include/linux/usb_ch9.h
+--- /tmp/kernel/include/linux/usb_ch9.h 1970-01-01 01:00:00.000000000 +0100
++++ kernel/include/linux/usb_ch9.h 2005-04-22 17:53:19.363551075 +0200
+@@ -0,0 +1,384 @@
++/*
++ * This file holds USB constants and structures that are needed for USB
++ * device APIs. These are used by the USB device model, which is defined
++ * in chapter 9 of the USB 2.0 specification. Linux has several APIs in C
++ * that need these:
++ *
++ * - the master/host side Linux-USB kernel driver API;
++ * - the "usbfs" user space API; and
++ * - (eventually) a Linux "gadget" slave/device side driver API.
++ *
++ * USB 2.0 adds an additional "On The Go" (OTG) mode, which lets systems
++ * act either as a USB master/host or as a USB slave/device. That means
++ * the master and slave side APIs will benefit from working well together.
++ */
++
++#ifndef __LINUX_USB_CH9_H
++#define __LINUX_USB_CH9_H
++
++#include <asm/types.h> /* __u8 etc */
++
++/*-------------------------------------------------------------------------*/
++
++/* CONTROL REQUEST SUPPORT */
++
++/*
++ * USB directions
++ *
++ * This bit flag is used in endpoint descriptors' bEndpointAddress field.
++ * It's also one of three fields in control requests bRequestType.
++ */
++#define USB_DIR_OUT 0 /* to device */
++#define USB_DIR_IN 0x80 /* to host */
++
++/*
++ * USB types, the second of three bRequestType fields
++ */
++#define USB_TYPE_MASK (0x03 << 5)
++#define USB_TYPE_STANDARD (0x00 << 5)
++#define USB_TYPE_CLASS (0x01 << 5)
++#define USB_TYPE_VENDOR (0x02 << 5)
++#define USB_TYPE_RESERVED (0x03 << 5)
++
++/*
++ * USB recipients, the third of three bRequestType fields
++ */
++#define USB_RECIP_MASK 0x1f
++#define USB_RECIP_DEVICE 0x00
++#define USB_RECIP_INTERFACE 0x01
++#define USB_RECIP_ENDPOINT 0x02
++#define USB_RECIP_OTHER 0x03
++
++/*
++ * Standard requests, for the bRequest field of a SETUP packet.
++ *
++ * These are qualified by the bRequestType field, so that for example
++ * TYPE_CLASS or TYPE_VENDOR specific feature flags could be retrieved
++ * by a GET_STATUS request.
++ */
++#define USB_REQ_GET_STATUS 0x00
++#define USB_REQ_CLEAR_FEATURE 0x01
++#define USB_REQ_SET_FEATURE 0x03
++#define USB_REQ_SET_ADDRESS 0x05
++#define USB_REQ_GET_DESCRIPTOR 0x06
++#define USB_REQ_SET_DESCRIPTOR 0x07
++#define USB_REQ_GET_CONFIGURATION 0x08
++#define USB_REQ_SET_CONFIGURATION 0x09
++#define USB_REQ_GET_INTERFACE 0x0A
++#define USB_REQ_SET_INTERFACE 0x0B
++#define USB_REQ_SYNCH_FRAME 0x0C
++
++/*
++ * USB feature flags are written using USB_REQ_{CLEAR,SET}_FEATURE, and
++ * are read as a bit array returned by USB_REQ_GET_STATUS. (So there
++ * are at most sixteen features of each type.)
++ */
++#define USB_DEVICE_SELF_POWERED 0 /* (read only) */
++#define USB_DEVICE_REMOTE_WAKEUP 1 /* dev may initiate wakeup */
++#define USB_DEVICE_TEST_MODE 2 /* (high speed only) */
++#define USB_DEVICE_B_HNP_ENABLE 3 /* dev may initiate HNP */
++#define USB_DEVICE_A_HNP_SUPPORT 4 /* RH port supports HNP */
++#define USB_DEVICE_A_ALT_HNP_SUPPORT 5 /* other RH port does */
++#define USB_DEVICE_DEBUG_MODE 6 /* (special devices only) */
++
++#define USB_ENDPOINT_HALT 0 /* IN/OUT will STALL */
++
++
++/**
++ * struct usb_ctrlrequest - SETUP data for a USB device control request
++ * @bRequestType: matches the USB bmRequestType field
++ * @bRequest: matches the USB bRequest field
++ * @wValue: matches the USB wValue field (le16 byte order)
++ * @wIndex: matches the USB wIndex field (le16 byte order)
++ * @wLength: matches the USB wLength field (le16 byte order)
++ *
++ * This structure is used to send control requests to a USB device. It matches
++ * the different fields of the USB 2.0 Spec section 9.3, table 9-2. See the
++ * USB spec for a fuller description of the different fields, and what they are
++ * used for.
++ *
++ * Note that the driver for any interface can issue control requests.
++ * For most devices, interfaces don't coordinate with each other, so
++ * such requests may be made at any time.
++ */
++struct usb_ctrlrequest {
++ __u8 bRequestType;
++ __u8 bRequest;
++ __u16 wValue;
++ __u16 wIndex;
++ __u16 wLength;
++} __attribute__ ((packed));
++
++/*-------------------------------------------------------------------------*/
++
++/*
++ * STANDARD DESCRIPTORS ... as returned by GET_DESCRIPTOR, or
++ * (rarely) accepted by SET_DESCRIPTOR.
++ *
++ * Note that all multi-byte values here are encoded in little endian
++ * byte order "on the wire". But when exposed through Linux-USB APIs,
++ * they've been converted to cpu byte order.
++ */
++
++/*
++ * Descriptor types ... USB 2.0 spec table 9.5
++ */
++#define USB_DT_DEVICE 0x01
++#define USB_DT_CONFIG 0x02
++#define USB_DT_STRING 0x03
++#define USB_DT_INTERFACE 0x04
++#define USB_DT_ENDPOINT 0x05
++#define USB_DT_DEVICE_QUALIFIER 0x06
++#define USB_DT_OTHER_SPEED_CONFIG 0x07
++#define USB_DT_INTERFACE_POWER 0x08
++/* these are from a minor usb 2.0 revision (ECN) */
++#define USB_DT_OTG 0x09
++#define USB_DT_DEBUG 0x0a
++#define USB_DT_INTERFACE_ASSOCIATION 0x0b
++
++/* conventional codes for class-specific descriptors */
++#define USB_DT_CS_DEVICE 0x21
++#define USB_DT_CS_CONFIG 0x22
++#define USB_DT_CS_STRING 0x23
++#define USB_DT_CS_INTERFACE 0x24
++#define USB_DT_CS_ENDPOINT 0x25
++
++/* All standard descriptors have these 2 fields at the beginning */
++struct usb_descriptor_header {
++ __u8 bLength;
++ __u8 bDescriptorType;
++} __attribute__ ((packed));
++
++
++/*-------------------------------------------------------------------------*/
++
++/* USB_DT_DEVICE: Device descriptor */
++struct usb_device_descriptor {
++ __u8 bLength;
++ __u8 bDescriptorType;
++
++ __u16 bcdUSB;
++ __u8 bDeviceClass;
++ __u8 bDeviceSubClass;
++ __u8 bDeviceProtocol;
++ __u8 bMaxPacketSize0;
++ __u16 idVendor;
++ __u16 idProduct;
++ __u16 bcdDevice;
++ __u8 iManufacturer;
++ __u8 iProduct;
++ __u8 iSerialNumber;
++ __u8 bNumConfigurations;
++} __attribute__ ((packed));
++
++#define USB_DT_DEVICE_SIZE 18
++
++
++/*
++ * Device and/or Interface Class codes
++ * as found in bDeviceClass or bInterfaceClass
++ * and defined by www.usb.org documents
++ */
++#define USB_CLASS_PER_INTERFACE 0 /* for DeviceClass */
++#define USB_CLASS_AUDIO 1
++#define USB_CLASS_COMM 2
++#define USB_CLASS_HID 3
++#define USB_CLASS_PHYSICAL 5
++#define USB_CLASS_STILL_IMAGE 6
++#define USB_CLASS_PRINTER 7
++#define USB_CLASS_MASS_STORAGE 8
++#define USB_CLASS_HUB 9
++#define USB_CLASS_CDC_DATA 0x0a
++#define USB_CLASS_CSCID 0x0b /* chip+ smart card */
++#define USB_CLASS_CONTENT_SEC 0x0d /* content security */
++#define USB_CLASS_VIDEO 0x0e
++#define USB_CLASS_APP_SPEC 0xfe
++#define USB_CLASS_VENDOR_SPEC 0xff
++
++/*-------------------------------------------------------------------------*/
++
++/* USB_DT_CONFIG: Configuration descriptor information.
++ *
++ * USB_DT_OTHER_SPEED_CONFIG is the same descriptor, except that the
++ * descriptor type is different. Highspeed-capable devices can look
++ * different depending on what speed they're currently running. Only
++ * devices with a USB_DT_DEVICE_QUALIFIER have any OTHER_SPEED_CONFIG
++ * descriptors.
++ */
++struct usb_config_descriptor {
++ __u8 bLength;
++ __u8 bDescriptorType;
++
++ __u16 wTotalLength;
++ __u8 bNumInterfaces;
++ __u8 bConfigurationValue;
++ __u8 iConfiguration;
++ __u8 bmAttributes;
++ __u8 bMaxPower;
++} __attribute__ ((packed));
++
++#define USB_DT_CONFIG_SIZE 9
++
++/* from config descriptor bmAttributes */
++#define USB_CONFIG_ATT_ONE (1 << 7) /* must be set */
++#define USB_CONFIG_ATT_SELFPOWER (1 << 6) /* self powered */
++#define USB_CONFIG_ATT_WAKEUP (1 << 5) /* can wakeup */
++
++/*-------------------------------------------------------------------------*/
++
++/* USB_DT_STRING: String descriptor */
++struct usb_string_descriptor {
++ __u8 bLength;
++ __u8 bDescriptorType;
++
++ __u16 wData[1]; /* UTF-16LE encoded */
++} __attribute__ ((packed));
++
++/* note that "string" zero is special, it holds language codes that
++ * the device supports, not Unicode characters.
++ */
++
++/*-------------------------------------------------------------------------*/
++
++/* USB_DT_INTERFACE: Interface descriptor */
++struct usb_interface_descriptor {
++ __u8 bLength;
++ __u8 bDescriptorType;
++
++ __u8 bInterfaceNumber;
++ __u8 bAlternateSetting;
++ __u8 bNumEndpoints;
++ __u8 bInterfaceClass;
++ __u8 bInterfaceSubClass;
++ __u8 bInterfaceProtocol;
++ __u8 iInterface;
++} __attribute__ ((packed));
++
++#define USB_DT_INTERFACE_SIZE 9
++
++/*-------------------------------------------------------------------------*/
++
++/* USB_DT_ENDPOINT: Endpoint descriptor */
++struct usb_endpoint_descriptor {
++ __u8 bLength;
++ __u8 bDescriptorType;
++
++ __u8 bEndpointAddress;
++ __u8 bmAttributes;
++ __u16 wMaxPacketSize;
++ __u8 bInterval;
++
++ // NOTE: these two are _only_ in audio endpoints.
++ // use USB_DT_ENDPOINT*_SIZE in bLength, not sizeof.
++ __u8 bRefresh;
++ __u8 bSynchAddress;
++} __attribute__ ((packed));
++
++#define USB_DT_ENDPOINT_SIZE 7
++#define USB_DT_ENDPOINT_AUDIO_SIZE 9 /* Audio extension */
++
++
++/*
++ * Endpoints
++ */
++#define USB_ENDPOINT_NUMBER_MASK 0x0f /* in bEndpointAddress */
++#define USB_ENDPOINT_DIR_MASK 0x80
++
++#define USB_ENDPOINT_XFERTYPE_MASK 0x03 /* in bmAttributes */
++#define USB_ENDPOINT_XFER_CONTROL 0
++#define USB_ENDPOINT_XFER_ISOC 1
++#define USB_ENDPOINT_XFER_BULK 2
++#define USB_ENDPOINT_XFER_INT 3
++
++
++/*-------------------------------------------------------------------------*/
++
++/* USB_DT_DEVICE_QUALIFIER: Device Qualifier descriptor */
++struct usb_qualifier_descriptor {
++ __u8 bLength;
++ __u8 bDescriptorType;
++
++ __u16 bcdUSB;
++ __u8 bDeviceClass;
++ __u8 bDeviceSubClass;
++ __u8 bDeviceProtocol;
++ __u8 bMaxPacketSize0;
++ __u8 bNumConfigurations;
++ __u8 bRESERVED;
++} __attribute__ ((packed));
++
++
++/*-------------------------------------------------------------------------*/
++
++/* USB_DT_OTG (from OTG 1.0a supplement) */
++struct usb_otg_descriptor {
++ __u8 bLength;
++ __u8 bDescriptorType;
++
++ __u8 bmAttributes; /* support for HNP, SRP, etc */
++} __attribute__ ((packed));
++
++/* from usb_otg_descriptor.bmAttributes */
++#define USB_OTG_SRP (1 << 0)
++#define USB_OTG_HNP (1 << 1) /* swap host/device roles */
++
++/*-------------------------------------------------------------------------*/
++
++/* USB_DT_DEBUG: for special highspeed devices, replacing serial console */
++struct usb_debug_descriptor {
++ __u8 bLength;
++ __u8 bDescriptorType;
++
++ /* bulk endpoints with 8 byte maxpacket */
++ __u8 bDebugInEndpoint;
++ __u8 bDebugOutEndpoint;
++};
++
++/*-------------------------------------------------------------------------*/
++
++/* USB_DT_INTERFACE_ASSOCIATION: groups interfaces */
++struct usb_interface_assoc_descriptor {
++ __u8 bLength;
++ __u8 bDescriptorType;
++
++ __u8 bFirstInterface;
++ __u8 bInterfaceCount;
++ __u8 bFunctionClass;
++ __u8 bFunctionSubClass;
++ __u8 bFunctionProtocol;
++ __u8 iFunction;
++} __attribute__ ((packed));
++
++
++/*-------------------------------------------------------------------------*/
++
++/* USB 2.0 defines three speeds, here's how Linux identifies them */
++
++enum usb_device_speed {
++ USB_SPEED_UNKNOWN = 0, /* enumerating */
++ USB_SPEED_LOW, USB_SPEED_FULL, /* usb 1.1 */
++ USB_SPEED_HIGH /* usb 2.0 */
++};
++
++enum usb_device_state {
++ /* NOTATTACHED isn't in the USB spec, and this state acts
++ * the same as ATTACHED ... but it's clearer this way.
++ */
++ USB_STATE_NOTATTACHED = 0,
++
++ /* the chapter 9 device states */
++ USB_STATE_ATTACHED,
++ USB_STATE_POWERED,
++ USB_STATE_DEFAULT, /* limited function */
++ USB_STATE_ADDRESS,
++ USB_STATE_CONFIGURED, /* most functions */
++
++ USB_STATE_SUSPENDED
++
++ /* NOTE: there are actually four different SUSPENDED
++ * states, returning to POWERED, DEFAULT, ADDRESS, or
++ * CONFIGURED respectively when SOF tokens flow again.
++ */
++};
++
++#endif /* __LINUX_USB_CH9_H */
+diff -x '*~' -x '.*' -r -N -u /tmp/kernel/include/linux/usb_gadget.h kernel/include/linux/usb_gadget.h
+--- /tmp/kernel/include/linux/usb_gadget.h 1970-01-01 01:00:00.000000000 +0100
++++ kernel/include/linux/usb_gadget.h 2005-04-22 17:53:19.367550424 +0200
+@@ -0,0 +1,896 @@
++/*
++ * <linux/usb_gadget.h>
++ *
++ * We call the USB code inside a Linux-based peripheral device a "gadget"
++ * driver, except for the hardware-specific bus glue. One USB host can
++ * master many USB gadgets, but the gadgets are only slaved to one host.
++ *
++ *
++ * (C) Copyright 2002-2004 by David Brownell
++ * All Rights Reserved.
++ *
++ * This software is licensed under the GNU GPL version 2.
++ */
++
++#ifndef __LINUX_USB_GADGET_H
++#define __LINUX_USB_GADGET_H
++
++#ifdef __KERNEL__
++
++struct usb_ep;
++
++/**
++ * struct usb_request - describes one i/o request
++ * @buf: Buffer used for data. Always provide this; some controllers
++ * only use PIO, or don't use DMA for some endpoints.
++ * @dma: DMA address corresponding to 'buf'. If you don't set this
++ * field, and the usb controller needs one, it is responsible
++ * for mapping and unmapping the buffer.
++ * @length: Length of that data
++ * @no_interrupt: If true, hints that no completion irq is needed.
++ * Helpful sometimes with deep request queues that are handled
++ * directly by DMA controllers.
++ * @zero: If true, when writing data, makes the last packet be "short"
++ * by adding a zero length packet as needed;
++ * @short_not_ok: When reading data, makes short packets be
++ * treated as errors (queue stops advancing till cleanup).
++ * @complete: Function called when request completes, so this request and
++ * its buffer may be re-used.
++ * Reads terminate with a short packet, or when the buffer fills,
++ * whichever comes first. When writes terminate, some data bytes
++ * will usually still be in flight (often in a hardware fifo).
++ * Errors (for reads or writes) stop the queue from advancing
++ * until the completion function returns, so that any transfers
++ * invalidated by the error may first be dequeued.
++ * @context: For use by the completion callback
++ * @list: For use by the gadget driver.
++ * @status: Reports completion code, zero or a negative errno.
++ * Normally, faults block the transfer queue from advancing until
++ * the completion callback returns.
++ * Code "-ESHUTDOWN" indicates completion caused by device disconnect,
++ * or when the driver disabled the endpoint.
++ * @actual: Reports bytes transferred to/from the buffer. For reads (OUT
++ * transfers) this may be less than the requested length. If the
++ * short_not_ok flag is set, short reads are treated as errors
++ * even when status otherwise indicates successful completion.
++ * Note that for writes (IN transfers) some data bytes may still
++ * reside in a device-side FIFO when the request is reported as
++ * complete.
++ *
++ * These are allocated/freed through the endpoint they're used with. The
++ * hardware's driver can add extra per-request data to the memory it returns,
++ * which often avoids separate memory allocations (potential failures),
++ * later when the request is queued.
++ *
++ * Request flags affect request handling, such as whether a zero length
++ * packet is written (the "zero" flag), whether a short read should be
++ * treated as an error (blocking request queue advance, the "short_not_ok"
++ * flag), or hinting that an interrupt is not required (the "no_interrupt"
++ * flag, for use with deep request queues).
++ *
++ * Bulk endpoints can use any size buffers, and can also be used for interrupt
++ * transfers. interrupt-only endpoints can be much less functional.
++ */
++ // NOTE this is analagous to 'struct urb' on the host side,
++ // except that it's thinner and promotes more pre-allocation.
++
++struct usb_request {
++ void *buf;
++ unsigned length;
++ dma_addr_t dma;
++
++ unsigned no_interrupt:1;
++ unsigned zero:1;
++ unsigned short_not_ok:1;
++
++ void (*complete)(struct usb_ep *ep,
++ struct usb_request *req);
++ void *context;
++ struct list_head list;
++
++ int status;
++ unsigned actual;
++};
++
++/*-------------------------------------------------------------------------*/
++
++/* endpoint-specific parts of the api to the usb controller hardware.
++ * unlike the urb model, (de)multiplexing layers are not required.
++ * (so this api could slash overhead if used on the host side...)
++ *
++ * note that device side usb controllers commonly differ in how many
++ * endpoints they support, as well as their capabilities.
++ */
++struct usb_ep_ops {
++ int (*enable) (struct usb_ep *ep,
++ const struct usb_endpoint_descriptor *desc);
++ int (*disable) (struct usb_ep *ep);
++
++ struct usb_request *(*alloc_request) (struct usb_ep *ep,
++ int gfp_flags);
++ void (*free_request) (struct usb_ep *ep, struct usb_request *req);
++
++ void *(*alloc_buffer) (struct usb_ep *ep, unsigned bytes,
++ dma_addr_t *dma, int gfp_flags);
++ void (*free_buffer) (struct usb_ep *ep, void *buf, dma_addr_t dma,
++ unsigned bytes);
++ // NOTE: on 2.6, drivers may also use dma_map() and
++ // dma_sync_single_*() to directly manage dma overhead.
++
++ int (*queue) (struct usb_ep *ep, struct usb_request *req,
++ int gfp_flags);
++ int (*dequeue) (struct usb_ep *ep, struct usb_request *req);
++
++ int (*set_halt) (struct usb_ep *ep, int value);
++ int (*fifo_status) (struct usb_ep *ep);
++ void (*fifo_flush) (struct usb_ep *ep);
++};
++
++/**
++ * struct usb_ep - device side representation of USB endpoint
++ * @name:identifier for the endpoint, such as "ep-a" or "ep9in-bulk"
++ * @ops: Function pointers used to access hardware-specific operations.
++ * @ep_list:the gadget's ep_list holds all of its endpoints
++ * @maxpacket:The maximum packet size used on this endpoint. The initial
++ * value can sometimes be reduced (hardware allowing), according to
++ * the endpoint descriptor used to configure the endpoint.
++ * @driver_data:for use by the gadget driver. all other fields are
++ * read-only to gadget drivers.
++ *
++ * the bus controller driver lists all the general purpose endpoints in
++ * gadget->ep_list. the control endpoint (gadget->ep0) is not in that list,
++ * and is accessed only in response to a driver setup() callback.
++ */
++struct usb_ep {
++ void *driver_data;
++
++ const char *name;
++ const struct usb_ep_ops *ops;
++ struct list_head ep_list;
++ unsigned maxpacket:16;
++};
++
++/*-------------------------------------------------------------------------*/
++
++/**
++ * usb_ep_enable - configure endpoint, making it usable
++ * @ep:the endpoint being configured. may not be the endpoint named "ep0".
++ * drivers discover endpoints through the ep_list of a usb_gadget.
++ * @desc:descriptor for desired behavior. caller guarantees this pointer
++ * remains valid until the endpoint is disabled; the data byte order
++ * is little-endian (usb-standard).
++ *
++ * when configurations are set, or when interface settings change, the driver
++ * will enable or disable the relevant endpoints. while it is enabled, an
++ * endpoint may be used for i/o until the driver receives a disconnect() from
++ * the host or until the endpoint is disabled.
++ *
++ * the ep0 implementation (which calls this routine) must ensure that the
++ * hardware capabilities of each endpoint match the descriptor provided
++ * for it. for example, an endpoint named "ep2in-bulk" would be usable
++ * for interrupt transfers as well as bulk, but it likely couldn't be used
++ * for iso transfers or for endpoint 14. some endpoints are fully
++ * configurable, with more generic names like "ep-a". (remember that for
++ * USB, "in" means "towards the USB master".)
++ *
++ * returns zero, or a negative error code.
++ */
++static inline int
++usb_ep_enable (struct usb_ep *ep, const struct usb_endpoint_descriptor *desc)
++{
++ return ep->ops->enable (ep, desc);
++}
++
++/**
++ * usb_ep_disable - endpoint is no longer usable
++ * @ep:the endpoint being unconfigured. may not be the endpoint named "ep0".
++ *
++ * no other task may be using this endpoint when this is called.
++ * any pending and uncompleted requests will complete with status
++ * indicating disconnect (-ESHUTDOWN) before this call returns.
++ * gadget drivers must call usb_ep_enable() again before queueing
++ * requests to the endpoint.
++ *
++ * returns zero, or a negative error code.
++ */
++static inline int
++usb_ep_disable (struct usb_ep *ep)
++{
++ return ep->ops->disable (ep);
++}
++
++/**
++ * usb_ep_alloc_request - allocate a request object to use with this endpoint
++ * @ep:the endpoint to be used with with the request
++ * @gfp_flags:GFP_* flags to use
++ *
++ * Request objects must be allocated with this call, since they normally
++ * need controller-specific setup and may even need endpoint-specific
++ * resources such as allocation of DMA descriptors.
++ * Requests may be submitted with usb_ep_queue(), and receive a single
++ * completion callback. Free requests with usb_ep_free_request(), when
++ * they are no longer needed.
++ *
++ * Returns the request, or null if one could not be allocated.
++ */
++static inline struct usb_request *
++usb_ep_alloc_request (struct usb_ep *ep, int gfp_flags)
++{
++ return ep->ops->alloc_request (ep, gfp_flags);
++}
++
++/**
++ * usb_ep_free_request - frees a request object
++ * @ep:the endpoint associated with the request
++ * @req:the request being freed
++ *
++ * Reverses the effect of usb_ep_alloc_request().
++ * Caller guarantees the request is not queued, and that it will
++ * no longer be requeued (or otherwise used).
++ */
++static inline void
++usb_ep_free_request (struct usb_ep *ep, struct usb_request *req)
++{
++ ep->ops->free_request (ep, req);
++}
++
++/**
++ * usb_ep_alloc_buffer - allocate an I/O buffer
++ * @ep:the endpoint associated with the buffer
++ * @len:length of the desired buffer
++ * @dma:pointer to the buffer's DMA address; must be valid
++ * @gfp_flags:GFP_* flags to use
++ *
++ * Returns a new buffer, or null if one could not be allocated.
++ * The buffer is suitably aligned for dma, if that endpoint uses DMA,
++ * and the caller won't have to care about dma-inconsistency
++ * or any hidden "bounce buffer" mechanism. No additional per-request
++ * DMA mapping will be required for such buffers.
++ * Free it later with usb_ep_free_buffer().
++ *
++ * You don't need to use this call to allocate I/O buffers unless you
++ * want to make sure drivers don't incur costs for such "bounce buffer"
++ * copies or per-request DMA mappings.
++ */
++static inline void *
++usb_ep_alloc_buffer (struct usb_ep *ep, unsigned len, dma_addr_t *dma,
++ int gfp_flags)
++{
++ return ep->ops->alloc_buffer (ep, len, dma, gfp_flags);
++}
++
++/**
++ * usb_ep_free_buffer - frees an i/o buffer
++ * @ep:the endpoint associated with the buffer
++ * @buf:CPU view address of the buffer
++ * @dma:the buffer's DMA address
++ * @len:length of the buffer
++ *
++ * reverses the effect of usb_ep_alloc_buffer().
++ * caller guarantees the buffer will no longer be accessed
++ */
++static inline void
++usb_ep_free_buffer (struct usb_ep *ep, void *buf, dma_addr_t dma, unsigned len)
++{
++ ep->ops->free_buffer (ep, buf, dma, len);
++}
++
++/**
++ * usb_ep_queue - queues (submits) an I/O request to an endpoint.
++ * @ep:the endpoint associated with the request
++ * @req:the request being submitted
++ * @gfp_flags: GFP_* flags to use in case the lower level driver couldn't
++ * pre-allocate all necessary memory with the request.
++ *
++ * This tells the device controller to perform the specified request through
++ * that endpoint (reading or writing a buffer). When the request completes,
++ * including being canceled by usb_ep_dequeue(), the request's completion
++ * routine is called to return the request to the driver. Any endpoint
++ * (except control endpoints like ep0) may have more than one transfer
++ * request queued; they complete in FIFO order. Once a gadget driver
++ * submits a request, that request may not be examined or modified until it
++ * is given back to that driver through the completion callback.
++ *
++ * Each request is turned into one or more packets. The controller driver
++ * never merges adjacent requests into the same packet. OUT transfers
++ * will sometimes use data that's already buffered in the hardware.
++ * Drivers can rely on the fact that the first byte of the request's buffer
++ * always corresponds to the first byte of some USB packet, for both
++ * IN and OUT transfers.
++ *
++ * Bulk endpoints can queue any amount of data; the transfer is packetized
++ * automatically. The last packet will be short if the request doesn't fill it
++ * out completely. Zero length packets (ZLPs) should be avoided in portable
++ * protocols since not all usb hardware can successfully handle zero length
++ * packets. (ZLPs may be explicitly written, and may be implicitly written if
++ * the request 'zero' flag is set.) Bulk endpoints may also be used
++ * for interrupt transfers; but the reverse is not true, and some endpoints
++ * won't support every interrupt transfer. (Such as 768 byte packets.)
++ *
++ * Interrupt-only endpoints are less functional than bulk endpoints, for
++ * example by not supporting queueing or not handling buffers that are
++ * larger than the endpoint's maxpacket size. They may also treat data
++ * toggle differently.
++ *
++ * Control endpoints ... after getting a setup() callback, the driver queues
++ * one response (even if it would be zero length). That enables the
++ * status ack, after transfering data as specified in the response. Setup
++ * functions may return negative error codes to generate protocol stalls.
++ * (Note that some USB device controllers disallow protocol stall responses
++ * in some cases.) When control responses are deferred (the response is
++ * written after the setup callback returns), then usb_ep_set_halt() may be
++ * used on ep0 to trigger protocol stalls.
++ *
++ * For periodic endpoints, like interrupt or isochronous ones, the usb host
++ * arranges to poll once per interval, and the gadget driver usually will
++ * have queued some data to transfer at that time.
++ *
++ * Returns zero, or a negative error code. Endpoints that are not enabled
++ * report errors; errors will also be
++ * reported when the usb peripheral is disconnected.
++ */
++static inline int
++usb_ep_queue (struct usb_ep *ep, struct usb_request *req, int gfp_flags)
++{
++ return ep->ops->queue (ep, req, gfp_flags);
++}
++
++/**
++ * usb_ep_dequeue - dequeues (cancels, unlinks) an I/O request from an endpoint
++ * @ep:the endpoint associated with the request
++ * @req:the request being canceled
++ *
++ * if the request is still active on the endpoint, it is dequeued and its
++ * completion routine is called (with status -ECONNRESET); else a negative
++ * error code is returned.
++ *
++ * note that some hardware can't clear out write fifos (to unlink the request
++ * at the head of the queue) except as part of disconnecting from usb. such
++ * restrictions prevent drivers from supporting configuration changes,
++ * even to configuration zero (a "chapter 9" requirement).
++ */
++static inline int usb_ep_dequeue (struct usb_ep *ep, struct usb_request *req)
++{
++ return ep->ops->dequeue (ep, req);
++}
++
++/**
++ * usb_ep_set_halt - sets the endpoint halt feature.
++ * @ep: the non-isochronous endpoint being stalled
++ *
++ * Use this to stall an endpoint, perhaps as an error report.
++ * Except for control endpoints,
++ * the endpoint stays halted (will not stream any data) until the host
++ * clears this feature; drivers may need to empty the endpoint's request
++ * queue first, to make sure no inappropriate transfers happen.
++ *
++ * Note that while an endpoint CLEAR_FEATURE will be invisible to the
++ * gadget driver, a SET_INTERFACE will not be. To reset endpoints for the
++ * current altsetting, see usb_ep_clear_halt(). When switching altsettings,
++ * it's simplest to use usb_ep_enable() or usb_ep_disable() for the endpoints.
++ *
++ * Returns zero, or a negative error code. On success, this call sets
++ * underlying hardware state that blocks data transfers.
++ * Attempts to halt IN endpoints will fail (returning -EAGAIN) if any
++ * transfer requests are still queued, or if the controller hardware
++ * (usually a FIFO) still holds bytes that the host hasn't collected.
++ */
++static inline int
++usb_ep_set_halt (struct usb_ep *ep)
++{
++ return ep->ops->set_halt (ep, 1);
++}
++
++/**
++ * usb_ep_clear_halt - clears endpoint halt, and resets toggle
++ * @ep:the bulk or interrupt endpoint being reset
++ *
++ * Use this when responding to the standard usb "set interface" request,
++ * for endpoints that aren't reconfigured, after clearing any other state
++ * in the endpoint's i/o queue.
++ *
++ * Returns zero, or a negative error code. On success, this call clears
++ * the underlying hardware state reflecting endpoint halt and data toggle.
++ * Note that some hardware can't support this request (like pxa2xx_udc),
++ * and accordingly can't correctly implement interface altsettings.
++ */
++static inline int
++usb_ep_clear_halt (struct usb_ep *ep)
++{
++ return ep->ops->set_halt (ep, 0);
++}
++
++/**
++ * usb_ep_fifo_status - returns number of bytes in fifo, or error
++ * @ep: the endpoint whose fifo status is being checked.
++ *
++ * FIFO endpoints may have "unclaimed data" in them in certain cases,
++ * such as after aborted transfers. Hosts may not have collected all
++ * the IN data written by the gadget driver (and reported by a request
++ * completion). The gadget driver may not have collected all the data
++ * written OUT to it by the host. Drivers that need precise handling for
++ * fault reporting or recovery may need to use this call.
++ *
++ * This returns the number of such bytes in the fifo, or a negative
++ * errno if the endpoint doesn't use a FIFO or doesn't support such
++ * precise handling.
++ */
++static inline int
++usb_ep_fifo_status (struct usb_ep *ep)
++{
++ if (ep->ops->fifo_status)
++ return ep->ops->fifo_status (ep);
++ else
++ return -EOPNOTSUPP;
++}
++
++/**
++ * usb_ep_fifo_flush - flushes contents of a fifo
++ * @ep: the endpoint whose fifo is being flushed.
++ *
++ * This call may be used to flush the "unclaimed data" that may exist in
++ * an endpoint fifo after abnormal transaction terminations. The call
++ * must never be used except when endpoint is not being used for any
++ * protocol translation.
++ */
++static inline void
++usb_ep_fifo_flush (struct usb_ep *ep)
++{
++ if (ep->ops->fifo_flush)
++ ep->ops->fifo_flush (ep);
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++struct usb_gadget;
++
++/* the rest of the api to the controller hardware: device operations,
++ * which don't involve endpoints (or i/o).
++ */
++struct usb_gadget_ops {
++ int (*get_frame)(struct usb_gadget *);
++ int (*wakeup)(struct usb_gadget *);
++ int (*set_selfpowered) (struct usb_gadget *, int is_selfpowered);
++ int (*vbus_session) (struct usb_gadget *, int is_active);
++ int (*vbus_draw) (struct usb_gadget *, unsigned mA);
++ int (*pullup) (struct usb_gadget *, int is_on);
++ int (*ioctl)(struct usb_gadget *,
++ unsigned code, unsigned long param);
++};
++
++/**
++ * struct usb_gadget - represents a usb slave device
++ * @ops: Function pointers used to access hardware-specific operations.
++ * @ep0: Endpoint zero, used when reading or writing responses to
++ * driver setup() requests
++ * @ep_list: List of other endpoints supported by the device.
++ * @speed: Speed of current connection to USB host.
++ * @is_dualspeed: True if the controller supports both high and full speed
++ * operation. If it does, the gadget driver must also support both.
++ * @is_otg: True if the USB device port uses a Mini-AB jack, so that the
++ * gadget driver must provide a USB OTG descriptor.
++ * @is_a_peripheral: False unless is_otg, the "A" end of a USB cable
++ * is in the Mini-AB jack, and HNP has been used to switch roles
++ * so that the "A" device currently acts as A-Peripheral, not A-Host.
++ * @a_hnp_support: OTG device feature flag, indicating that the A-Host
++ * supports HNP at this port.
++ * @a_alt_hnp_support: OTG device feature flag, indicating that the A-Host
++ * only supports HNP on a different root port.
++ * @b_hnp_enable: OTG device feature flag, indicating that the A-Host
++ * enabled HNP support.
++ * @name: Identifies the controller hardware type. Used in diagnostics
++ * and sometimes configuration.
++ * @dev: Driver model state for this abstract device.
++ *
++ * Gadgets have a mostly-portable "gadget driver" implementing device
++ * functions, handling all usb configurations and interfaces. Gadget
++ * drivers talk to hardware-specific code indirectly, through ops vectors.
++ * That insulates the gadget driver from hardware details, and packages
++ * the hardware endpoints through generic i/o queues. The "usb_gadget"
++ * and "usb_ep" interfaces provide that insulation from the hardware.
++ *
++ * Except for the driver data, all fields in this structure are
++ * read-only to the gadget driver. That driver data is part of the
++ * "driver model" infrastructure in 2.6 (and later) kernels, and for
++ * earlier systems is grouped in a similar structure that's not known
++ * to the rest of the kernel.
++ *
++ * Values of the three OTG device feature flags are updated before the
++ * setup() call corresponding to USB_REQ_SET_CONFIGURATION, and before
++ * driver suspend() calls. They are valid only when is_otg, and when the
++ * device is acting as a B-Peripheral (so is_a_peripheral is false).
++ */
++struct usb_gadget {
++ /* readonly to gadget driver */
++ const struct usb_gadget_ops *ops;
++ struct usb_ep *ep0;
++ struct list_head ep_list; /* of usb_ep */
++ enum usb_device_speed speed;
++ unsigned is_dualspeed:1;
++ unsigned is_otg:1;
++ unsigned is_a_peripheral:1;
++ unsigned b_hnp_enable:1;
++ unsigned a_hnp_support:1;
++ unsigned a_alt_hnp_support:1;
++ const char *name;
++
++ struct __gadget_device {
++ const char *bus_id;
++ void *driver_data;
++ } dev;
++};
++
++static inline void set_gadget_data (struct usb_gadget *gadget, void *data)
++ { gadget->dev.driver_data = data; }
++static inline void *get_gadget_data (struct usb_gadget *gadget)
++ { return gadget->dev.driver_data; }
++
++
++/* iterates the non-control endpoints; 'tmp' is a struct usb_ep pointer */
++#define gadget_for_each_ep(tmp,gadget) \
++ list_for_each_entry(tmp, &(gadget)->ep_list, ep_list)
++
++#ifndef list_for_each_entry
++/* not available in 2.4.18 */
++#define list_for_each_entry(pos, head, member) \
++ for (pos = list_entry((head)->next, typeof(*pos), member), \
++ prefetch(pos->member.next); \
++ &pos->member != (head); \
++ pos = list_entry(pos->member.next, typeof(*pos), member), \
++ prefetch(pos->member.next))
++#endif
++
++
++/**
++ * usb_gadget_frame_number - returns the current frame number
++ * @gadget: controller that reports the frame number
++ *
++ * Returns the usb frame number, normally eleven bits from a SOF packet,
++ * or negative errno if this device doesn't support this capability.
++ */
++static inline int usb_gadget_frame_number (struct usb_gadget *gadget)
++{
++ return gadget->ops->get_frame (gadget);
++}
++
++/**
++ * usb_gadget_wakeup - tries to wake up the host connected to this gadget
++ * @gadget: controller used to wake up the host
++ *
++ * Returns zero on success, else negative error code if the hardware
++ * doesn't support such attempts, or its support has not been enabled
++ * by the usb host. Drivers must return device descriptors that report
++ * their ability to support this, or hosts won't enable it.
++ *
++ * This may also try to use SRP to wake the host and start enumeration,
++ * even if OTG isn't otherwise in use. OTG devices may also start
++ * remote wakeup even when hosts don't explicitly enable it.
++ */
++static inline int usb_gadget_wakeup (struct usb_gadget *gadget)
++{
++ if (!gadget->ops->wakeup)
++ return -EOPNOTSUPP;
++ return gadget->ops->wakeup (gadget);
++}
++
++/**
++ * usb_gadget_set_selfpowered - sets the device selfpowered feature.
++ * @gadget:the device being declared as self-powered
++ *
++ * this affects the device status reported by the hardware driver
++ * to reflect that it now has a local power supply.
++ *
++ * returns zero on success, else negative errno.
++ */
++static inline int
++usb_gadget_set_selfpowered (struct usb_gadget *gadget)
++{
++ if (!gadget->ops->set_selfpowered)
++ return -EOPNOTSUPP;
++ return gadget->ops->set_selfpowered (gadget, 1);
++}
++
++/**
++ * usb_gadget_clear_selfpowered - clear the device selfpowered feature.
++ * @gadget:the device being declared as bus-powered
++ *
++ * this affects the device status reported by the hardware driver.
++ * some hardware may not support bus-powered operation, in which
++ * case this feature's value can never change.
++ *
++ * returns zero on success, else negative errno.
++ */
++static inline int
++usb_gadget_clear_selfpowered (struct usb_gadget *gadget)
++{
++ if (!gadget->ops->set_selfpowered)
++ return -EOPNOTSUPP;
++ return gadget->ops->set_selfpowered (gadget, 0);
++}
++
++/**
++ * usb_gadget_vbus_connect - Notify controller that VBUS is powered
++ * @gadget:The device which now has VBUS power.
++ *
++ * This call is used by a driver for an external transceiver (or GPIO)
++ * that detects a VBUS power session starting. Common responses include
++ * resuming the controller, activating the D+ (or D-) pullup to let the
++ * host detect that a USB device is attached, and starting to draw power
++ * (8mA or possibly more, especially after SET_CONFIGURATION).
++ *
++ * Returns zero on success, else negative errno.
++ */
++static inline int
++usb_gadget_vbus_connect(struct usb_gadget *gadget)
++{
++ if (!gadget->ops->vbus_session)
++ return -EOPNOTSUPP;
++ return gadget->ops->vbus_session (gadget, 1);
++}
++
++/**
++ * usb_gadget_vbus_draw - constrain controller's VBUS power usage
++ * @gadget:The device whose VBUS usage is being described
++ * @mA:How much current to draw, in milliAmperes. This should be twice
++ * the value listed in the configuration descriptor bMaxPower field.
++ *
++ * This call is used by gadget drivers during SET_CONFIGURATION calls,
++ * reporting how much power the device may consume. For example, this
++ * could affect how quickly batteries are recharged.
++ *
++ * Returns zero on success, else negative errno.
++ */
++static inline int
++usb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
++{
++ if (!gadget->ops->vbus_draw)
++ return -EOPNOTSUPP;
++ return gadget->ops->vbus_draw (gadget, mA);
++}
++
++/**
++ * usb_gadget_vbus_disconnect - notify controller about VBUS session end
++ * @gadget:the device whose VBUS supply is being described
++ *
++ * This call is used by a driver for an external transceiver (or GPIO)
++ * that detects a VBUS power session ending. Common responses include
++ * reversing everything done in usb_gadget_vbus_connect().
++ *
++ * Returns zero on success, else negative errno.
++ */
++static inline int
++usb_gadget_vbus_disconnect(struct usb_gadget *gadget)
++{
++ if (!gadget->ops->vbus_session)
++ return -EOPNOTSUPP;
++ return gadget->ops->vbus_session (gadget, 0);
++}
++
++/**
++ * usb_gadget_connect - software-controlled connect to USB host
++ * @gadget:the peripheral being connected
++ *
++ * Enables the D+ (or potentially D-) pullup. The host will start
++ * enumerating this gadget when the pullup is active and a VBUS session
++ * is active (the link is powered). This pullup is always enabled unless
++ * usb_gadget_disconnect() has been used to disable it.
++ *
++ * Returns zero on success, else negative errno.
++ */
++static inline int
++usb_gadget_connect (struct usb_gadget *gadget)
++{
++ if (!gadget->ops->pullup)
++ return -EOPNOTSUPP;
++ return gadget->ops->pullup (gadget, 1);
++}
++
++/**
++ * usb_gadget_disconnect - software-controlled disconnect from USB host
++ * @gadget:the peripheral being disconnected
++ *
++ * Disables the D+ (or potentially D-) pullup, which the host may see
++ * as a disconnect (when a VBUS session is active). Not all systems
++ * support software pullup controls.
++ *
++ * This routine may be used during the gadget driver bind() call to prevent
++ * the peripheral from ever being visible to the USB host, unless later
++ * usb_gadget_connect() is called. For example, user mode components may
++ * need to be activated before the system can talk to hosts.
++ *
++ * Returns zero on success, else negative errno.
++ */
++static inline int
++usb_gadget_disconnect (struct usb_gadget *gadget)
++{
++ if (!gadget->ops->pullup)
++ return -EOPNOTSUPP;
++ return gadget->ops->pullup (gadget, 0);
++}
++
++
++
++/*-------------------------------------------------------------------------*/
++
++/**
++ * struct usb_gadget_driver - driver for usb 'slave' devices
++ * @function: String describing the gadget's function
++ * @speed: Highest speed the driver handles.
++ * @bind: Invoked when the driver is bound to a gadget, usually
++ * after registering the driver.
++ * At that point, ep0 is fully initialized, and ep_list holds
++ * the currently-available endpoints.
++ * Called in a context that permits sleeping.
++ * @setup: Invoked for ep0 control requests that aren't handled by
++ * the hardware level driver. Most calls must be handled by
++ * the gadget driver, including descriptor and configuration
++ * management. The 16 bit members of the setup data are in
++ * cpu order. Called in_interrupt; this may not sleep. Driver
++ * queues a response to ep0, or returns negative to stall.
++ * @disconnect: Invoked after all transfers have been stopped,
++ * when the host is disconnected. May be called in_interrupt; this
++ * may not sleep. Some devices can't detect disconnect, so this might
++ * not be called except as part of controller shutdown.
++ * @unbind: Invoked when the driver is unbound from a gadget,
++ * usually from rmmod (after a disconnect is reported).
++ * Called in a context that permits sleeping.
++ * @suspend: Invoked on USB suspend. May be called in_interrupt.
++ * @resume: Invoked on USB resume. May be called in_interrupt.
++ * @driver: Driver model state for this driver.
++ *
++ * Devices are disabled till a gadget driver successfully bind()s, which
++ * means the driver will handle setup() requests needed to enumerate (and
++ * meet "chapter 9" requirements) then do some useful work.
++ *
++ * If gadget->is_otg is true, the gadget driver must provide an OTG
++ * descriptor during enumeration, or else fail the bind() call. In such
++ * cases, no USB traffic may flow until both bind() returns without
++ * having called usb_gadget_disconnect(), and the USB host stack has
++ * initialized.
++ *
++ * Drivers use hardware-specific knowledge to configure the usb hardware.
++ * endpoint addressing is only one of several hardware characteristics that
++ * are in descriptors the ep0 implementation returns from setup() calls.
++ *
++ * Except for ep0 implementation, most driver code shouldn't need change to
++ * run on top of different usb controllers. It'll use endpoints set up by
++ * that ep0 implementation.
++ *
++ * The usb controller driver handles a few standard usb requests. Those
++ * include set_address, and feature flags for devices, interfaces, and
++ * endpoints (the get_status, set_feature, and clear_feature requests).
++ *
++ * Accordingly, the driver's setup() callback must always implement all
++ * get_descriptor requests, returning at least a device descriptor and
++ * a configuration descriptor. Drivers must make sure the endpoint
++ * descriptors match any hardware constraints. Some hardware also constrains
++ * other descriptors. (The pxa250 allows only configurations 1, 2, or 3).
++ *
++ * The driver's setup() callback must also implement set_configuration,
++ * and should also implement set_interface, get_configuration, and
++ * get_interface. Setting a configuration (or interface) is where
++ * endpoints should be activated or (config 0) shut down.
++ *
++ * (Note that only the default control endpoint is supported. Neither
++ * hosts nor devices generally support control traffic except to ep0.)
++ *
++ * Most devices will ignore USB suspend/resume operations, and so will
++ * not provide those callbacks. However, some may need to change modes
++ * when the host is not longer directing those activities. For example,
++ * local controls (buttons, dials, etc) may need to be re-enabled since
++ * the (remote) host can't do that any longer; or an error state might
++ * be cleared, to make the device behave identically whether or not
++ * power is maintained.
++ */
++struct usb_gadget_driver {
++ char *function;
++ enum usb_device_speed speed;
++ int (*bind)(struct usb_gadget *);
++ void (*unbind)(struct usb_gadget *);
++ int (*setup)(struct usb_gadget *,
++ const struct usb_ctrlrequest *);
++ void (*disconnect)(struct usb_gadget *);
++ void (*suspend)(struct usb_gadget *);
++ void (*resume)(struct usb_gadget *);
++
++ // FIXME support safe rmmod
++ struct __gadget_driver {
++ const char *name;
++ void *driver_data;
++ } driver;
++};
++
++
++
++/*-------------------------------------------------------------------------*/
++
++/* driver modules register and unregister, as usual.
++ * these calls must be made in a context that can sleep.
++ *
++ * these will usually be implemented directly by the hardware-dependent
++ * usb bus interface driver, which will only support a single driver.
++ */
++
++/**
++ * usb_gadget_register_driver - register a gadget driver
++ * @driver:the driver being registered
++ *
++ * Call this in your gadget driver's module initialization function,
++ * to tell the underlying usb controller driver about your driver.
++ * The driver's bind() function will be called to bind it to a
++ * gadget. This function must be called in a context that can sleep.
++ */
++int usb_gadget_register_driver (struct usb_gadget_driver *driver);
++
++/**
++ * usb_gadget_unregister_driver - unregister a gadget driver
++ * @driver:the driver being unregistered
++ *
++ * Call this in your gadget driver's module cleanup function,
++ * to tell the underlying usb controller that your driver is
++ * going away. If the controller is connected to a USB host,
++ * it will first disconnect(). The driver is also requested
++ * to unbind() and clean up any device state, before this procedure
++ * finally returns.
++ * This function must be called in a context that can sleep.
++ */
++int usb_gadget_unregister_driver (struct usb_gadget_driver *driver);
++
++/*-------------------------------------------------------------------------*/
++
++/* utility to simplify dealing with string descriptors */
++
++/**
++ * struct usb_string - wraps a C string and its USB id
++ * @id:the (nonzero) ID for this string
++ * @s:the string, in UTF-8 encoding
++ *
++ * If you're using usb_gadget_get_string(), use this to wrap a string
++ * together with its ID.
++ */
++struct usb_string {
++ u8 id;
++ const char *s;
++};
++
++/**
++ * struct usb_gadget_strings - a set of USB strings in a given language
++ * @language:identifies the strings' language (0x0409 for en-us)
++ * @strings:array of strings with their ids
++ *
++ * If you're using usb_gadget_get_string(), use this to wrap all the
++ * strings for a given language.
++ */
++struct usb_gadget_strings {
++ u16 language; /* 0x0409 for en-us */
++ struct usb_string *strings;
++};
++
++/* put descriptor for string with that id into buf (buflen >= 256) */
++int usb_gadget_get_string (struct usb_gadget_strings *table, int id, u8 *buf);
++
++/*-------------------------------------------------------------------------*/
++
++/* utility to simplify managing config descriptors */
++
++/* write vector of descriptors into buffer */
++int usb_descriptor_fillbuf(void *, unsigned,
++ const struct usb_descriptor_header **);
++
++/* build config descriptor from single descriptor vector */
++int usb_gadget_config_buf(const struct usb_config_descriptor *config,
++ void *buf, unsigned buflen, const struct usb_descriptor_header **desc);
++
++/*-------------------------------------------------------------------------*/
++
++/* utility wrapping a simple endpoint selection policy */
++
++extern struct usb_ep *usb_ep_autoconfig (struct usb_gadget *,
++ struct usb_endpoint_descriptor *) __init;
++
++extern void usb_ep_autoconfig_reset (struct usb_gadget *) __init;
++
++#endif /* __KERNEL__ */
++
++#endif /* __LINUX_USB_GADGET_H */
+diff -x '*~' -x '.*' -r -N -u /tmp/kernel/include/linux/usb_gadgetfs.h kernel/include/linux/usb_gadgetfs.h
+--- /tmp/kernel/include/linux/usb_gadgetfs.h 1970-01-01 01:00:00.000000000 +0100
++++ kernel/include/linux/usb_gadgetfs.h 2005-04-22 17:53:19.370549935 +0200
+@@ -0,0 +1,75 @@
++
++#include <asm/types.h>
++#include <asm/ioctl.h>
++
++#include <linux/usb_ch9.h>
++
++/*
++ * Filesystem based user-mode API to USB Gadget controller hardware
++ *
++ * Almost everything can be done with only read and write operations,
++ * on endpoint files found in one directory. They are configured by
++ * writing descriptors, and then may be used for normal stream style
++ * i/o requests. When ep0 is configured, the device can enumerate;
++ * when it's closed, the device disconnects from usb.
++ *
++ * Configuration and device descriptors get written to /dev/gadget/$CHIP,
++ * which may then be used to read usb_gadgetfs_event structs. The driver
++ * may activate endpoints as it handles SET_CONFIGURATION setup events,
++ * or earlier; writing endpoint descriptors to /dev/gadget/$ENDPOINT
++ * then performing data transfers by reading or writing.
++ */
++
++/*
++ * Events are delivered on the ep0 file descriptor, if the user mode driver
++ * reads from this file descriptor after writing the descriptors. Don't
++ * stop polling this descriptor, if you write that kind of driver.
++ */
++
++enum usb_gadgetfs_event_type {
++ GADGETFS_NOP = 0,
++
++ GADGETFS_CONNECT,
++ GADGETFS_DISCONNECT,
++ GADGETFS_SETUP,
++ GADGETFS_SUSPEND,
++ // and likely more !
++};
++
++struct usb_gadgetfs_event {
++ enum usb_gadgetfs_event_type type;
++ union {
++ // NOP, DISCONNECT, SUSPEND: nothing
++ // ... some hardware can't report disconnection
++
++ // CONNECT: just the speed
++ enum usb_device_speed speed;
++
++ // SETUP: packet; DATA phase i/o precedes next event
++ // (setup.bmRequestType & USB_DIR_IN) flags direction
++ // ... includes SET_CONFIGURATION, SET_INTERFACE
++ struct usb_ctrlrequest setup;
++ } u;
++};
++
++
++/* endpoint ioctls */
++
++/* IN transfers may be reported to the gadget driver as complete
++ * when the fifo is loaded, before the host reads the data;
++ * OUT transfers may be reported to the host's "client" driver as
++ * complete when they're sitting in the FIFO unread.
++ * THIS returns how many bytes are "unclaimed" in the endpoint fifo
++ * (needed for precise fault handling, when the hardware allows it)
++ */
++#define GADGETFS_FIFO_STATUS _IO('g',1)
++
++/* discards any unclaimed data in the fifo. */
++#define GADGETFS_FIFO_FLUSH _IO('g',2)
++
++/* resets endpoint halt+toggle; used to implement set_interface.
++ * some hardware (like pxa2xx) can't support this.
++ */
++#define GADGETFS_CLEAR_HALT _IO('g',3)
++
++
+diff -x '*~' -x '.*' -r -N -u /tmp/kernel/include/linux/usb_scanner_ioctl.h kernel/include/linux/usb_scanner_ioctl.h
+--- /tmp/kernel/include/linux/usb_scanner_ioctl.h 1970-01-01 01:00:00.000000000 +0100
++++ kernel/include/linux/usb_scanner_ioctl.h 2005-04-22 17:53:19.372549610 +0200
+@@ -0,0 +1,9 @@
++/* USB Scanner IOCTLS */
++
++/* read vendor and product IDs from the scanner */
++#define SCANNER_IOCTL_VENDOR _IOR('U', 0x20, int)
++#define SCANNER_IOCTL_PRODUCT _IOR('U', 0x21, int)
++/* send/recv a control message to the scanner */
++#define SCANNER_IOCTL_CTRLMSG _IOWR('U', 0x22, struct usb_ctrlrequest )
++
++
diff --git a/packages/linux/files/linux-2.4.18-list_move.patch b/packages/linux/files/linux-2.4.18-list_move.patch
index e69de29bb2..faec56330b 100644
--- a/packages/linux/files/linux-2.4.18-list_move.patch
+++ b/packages/linux/files/linux-2.4.18-list_move.patch
@@ -0,0 +1,32 @@
+--- linux/include/linux/list.h~ 2001-12-21 17:42:03.000000000 +0000
++++ linux/include/linux/list.h 2004-06-14 23:41:33.000000000 +0100
+@@ -105,6 +105,29 @@
+ }
+
+ /**
++ * list_move - delete from one list and add as another's head
++ * @list: the entry to move
++ * @head: the head that will precede our entry
++ */
++static inline void list_move(struct list_head *list, struct list_head *head)
++{
++ __list_del(list->prev, list->next);
++ list_add(list, head);
++}
++
++/**
++ * list_move_tail - delete from one list and add as another's tail
++ * @list: the entry to move
++ * @head: the head that will follow our entry
++ */
++static inline void list_move_tail(struct list_head *list,
++ struct list_head *head)
++{
++ __list_del(list->prev, list->next);
++ list_add_tail(list, head);
++}
++
++/**
+ * list_empty - tests whether a list is empty
+ * @head: the list to test.
+ */
diff --git a/packages/linux/files/mipv6-1.1-v2.4.25.patch b/packages/linux/files/mipv6-1.1-v2.4.25.patch
index e69de29bb2..b7f9b8fc8a 100644
--- a/packages/linux/files/mipv6-1.1-v2.4.25.patch
+++ b/packages/linux/files/mipv6-1.1-v2.4.25.patch
@@ -0,0 +1,19832 @@
+diff -uprN linux-2.4.25.old/Documentation/Configure.help linux-2.4.25/Documentation/Configure.help
+--- linux-2.4.25.old/Documentation/Configure.help 2004-06-26 11:22:00.000000000 +0100
++++ linux-2.4.25/Documentation/Configure.help 2004-06-26 11:29:29.000000000 +0100
+@@ -6204,6 +6204,57 @@ CONFIG_IPV6
+
+ It is safe to say N here for now.
+
++IPv6: IPv6 over IPv6 Tunneling (EXPERIMENTAL)
++CONFIG_IPV6_TUNNEL
++ Experimental IP6-IP6 tunneling. You must select this, if you want
++ to use CONFIG_IPV6_MOBILITY. More information in MIPL Mobile IPv6
++ instructions.
++
++ If you don't want IP6-IP6 tunnels and Mobile IPv6, say N.
++
++IPv6: Mobility Support (EXPERIMENTAL)
++CONFIG_IPV6_MOBILITY
++ This is experimental support for the upcoming specification of
++ Mobile IPv6. Mobile IPv6 allows nodes to seamlessly move between
++ networks without changing their IP addresses, thus allowing them to
++ maintain upper layer connections (e.g. TCP). Selecting this option
++ allows your computer to act as a Correspondent Node (CN). A MIPv6
++ Mobile Node will be able to communicate with the CN and use route
++ optimization.
++
++ For more information and configuration details, see
++ http://www.mipl.mediapoli.com/.
++
++ If unsure, say N.
++
++MIPv6: Mobile Node Support
++CONFIG_IPV6_MOBILITY_MN
++ If you want your computer to be a MIPv6 Mobile Node (MN), select
++ this option. You must configure MN using the userspace tools
++ available at http://www.mipl.mediapoli.com/download/mipv6-tools/.
++
++ If your computer is stationary, or you are unsure if you need this,
++ say N. Note that you will need a properly configured MIPv6 Home
++ Agent to use any Mobile Nodes.
++
++MIPv6: Home Agent Support
++CONFIG_IPV6_MOBILITY_HA
++ If you want your router to serve as a MIPv6 Home Agent (HA), select
++ this option. You must configure HA using the userspace tools
++ available at http://www.mipl.mediapoli.com/download/mipv6-tools/.
++
++ If your computer is not a router, or you are unsure if you need
++ this, say N.
++
++MIPv6: Debug messages
++CONFIG_IPV6_MOBILITY_DEBUG
++ MIPL Mobile IPv6 can produce a lot of debugging messages. There are
++ eight debug levels (0 through 7) and the level is controlled via
++ /proc/sys/net/ipv6/mobility/debuglevel. Since MIPL is still
++ experimental, you might want to say Y here.
++
++ Be sure to say Y and record debug messages when submitting a bug
++ report.
+ The SCTP Protocol (EXPERIMENTAL)
+ CONFIG_IP_SCTP
+ Stream Control Transmission Protocol
+diff -uprN linux-2.4.25.old/Documentation/DocBook/Makefile linux-2.4.25/Documentation/DocBook/Makefile
+--- linux-2.4.25.old/Documentation/DocBook/Makefile 2002-11-28 23:53:08.000000000 +0000
++++ linux-2.4.25/Documentation/DocBook/Makefile 2004-06-26 11:29:29.000000000 +0100
+@@ -2,7 +2,7 @@ BOOKS := wanbook.sgml z8530book.sgml mca
+ kernel-api.sgml parportbook.sgml kernel-hacking.sgml \
+ kernel-locking.sgml via-audio.sgml mousedrivers.sgml sis900.sgml \
+ deviceiobook.sgml procfs-guide.sgml tulip-user.sgml \
+- journal-api.sgml
++ journal-api.sgml mip6-func.sgml
+
+ PS := $(patsubst %.sgml, %.ps, $(BOOKS))
+ PDF := $(patsubst %.sgml, %.pdf, $(BOOKS))
+@@ -86,6 +86,9 @@ videobook.sgml: videobook.tmpl $(TOPDIR)
+ procfs-guide.sgml: procfs-guide.tmpl procfs_example.sgml
+ $(TOPDIR)/scripts/docgen < procfs-guide.tmpl >$@
+
++mip6-func.sgml: mip6-func.tmpl
++ $(TOPDIR)/scripts/docgen <$< >$@
++
+ APISOURCES := $(TOPDIR)/drivers/media/video/videodev.c \
+ $(TOPDIR)/arch/i386/kernel/irq.c \
+ $(TOPDIR)/arch/i386/kernel/mca.c \
+diff -uprN linux-2.4.25.old/Documentation/DocBook/mip6-func.tmpl linux-2.4.25/Documentation/DocBook/mip6-func.tmpl
+--- linux-2.4.25.old/Documentation/DocBook/mip6-func.tmpl 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/Documentation/DocBook/mip6-func.tmpl 2004-06-26 11:29:29.000000000 +0100
+@@ -0,0 +1,756 @@
++<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook V3.1//EN"[]>
++<book id="LinuxMobileIPv6">
++ <bookinfo>
++ <title>MIPL Mobile IPv6 Function Reference Guide</title>
++
++ <authorgroup>
++ <author>
++ <othername>MIPL Mobile IPv6 for Linux Team</othername>
++ <affiliation>
++ <orgname>Helsinki University of Technology</orgname>
++ <orgdiv>Telecommunications Software and Multimedia Lab</orgdiv>
++ <address>
++ <pob>PO BOX 9201</pob>
++ <postcode>FIN-02015 HUT</postcode>
++ <country>Finland</country>
++ <email>mipl@list.mipl.mediapoli.com</email>
++ </address>
++ </affiliation>
++ </author>
++ </authorgroup>
++
++ <copyright>
++ <year>2000-2001</year>
++ <holder>Helsinki University of Technology</holder>
++ </copyright>
++
++ <legalnotice>
++ <para>
++ Copyright (c) 2001, 2002 MIPL Mobile IPv6 for Linux Team.
++ </para>
++ <para>
++ Permission is granted to copy, distribute and/or modify this
++ document under the terms of the GNU Free Documentation License,
++ Version 1.1 published by the Free Software Foundation; with the
++ Invariant Sections being "Introduction", with the Front-Cover
++ Texts being "MIPL Mobile IPv6 Function Reference Guide", "MIPL
++ Mobile IPv6 for Linux Team" and "Helsinki University of
++ Technology". A copy of the license is included in <xref
++ linkend="gfdl">.
++ </para>
++
++ </legalnotice>
++ </bookinfo>
++
++<toc></toc>
++
++ <preface id="intro">
++ <title>Introduction</title>
++
++ <para>
++ MIPL Mobile IPv6 for Linux is an implementation of Mobility
++ Support in IPv6 IETF mobile-ip working groups Internet-Draft
++ (draft-ietf-mobileip-ipv6). This implementation has been
++ developed in the Telecommunications Software and Multimedia
++ Laboratory at Helsinki University of Technology.
++ </para>
++
++ <para>
++ MIPL is fully open source, licensed under the GNU General
++ Public License. Latest source for MIPL can be downloaded from
++ the MIPL website at:
++ </para>
++ <programlisting>
++ http://www.mipl.mediapoli.com/.
++ </programlisting>
++ <para>
++ Developers and users interested in MIPL can subscribe to the
++ MIPL mailing list by sending e-mail to
++ <email>majordomo@list.mipl.mediapoli.com</email> with
++ </para>
++ <programlisting>
++ subscribe mipl
++ </programlisting>
++ <para>
++ in the body of the message.
++ </para>
++
++ <para>
++ This document is a reference guide to MIPL functions. Intended
++ audience is developers wishing to contribute to the project.
++ Hopefully this document will make it easier and quicker to
++ understand and adopt the inner workings of MIPL Mobile IPv6.
++ </para>
++
++ <para>
++ MIPL Mobile IPv6 for Linux Team members (past and present):
++
++ <itemizedlist>
++ <listitem>
++ <address>
++ Sami Kivisaari <email>Sami.Kivisaari@hut.fi</email>
++ </address>
++ </listitem>
++ <listitem>
++ <address>
++ Niklas Kampe <email>Niklas.Kampe@hut.fi</email>
++ </address>
++ </listitem>
++ <listitem>
++ <address>
++ Juha Mynttinen <email>Juha.Mynttinen@hut.fi</email>
++ </address>
++ </listitem>
++ <listitem>
++ <address>
++ Toni Nykanen <email>Toni.Nykanen@iki.fi</email>
++ </address>
++ </listitem>
++ <listitem>
++ <address>
++ Henrik Petander <email>Henrik.Petander@hut.fi</email>
++ </address>
++ </listitem>
++ <listitem>
++ <address>
++ Antti Tuominen <email>ajtuomin@tml.hut.fi</email>
++ </address>
++ </listitem>
++ </itemizedlist>
++
++ <itemizedlist>
++ <listitem>
++ <address>
++ Marko Myllynen
++ </address>
++ </listitem>
++ <listitem>
++ <address>
++ Ville Nuorvala <email>vnuorval@tcs.hut.fi</email>
++ </address>
++ </listitem>
++ <listitem>
++ <address>
++ Jaakko Laine <email>Jaakko.Laine@hut.fi</email>
++ </address>
++ </listitem>
++ </itemizedlist>
++ </para>
++
++ </preface>
++
++ <chapter id="common">
++ <title>Common functions for all entities</title>
++
++ <sect1><title>Low-level functions</title>
++ <para>
++ These functions implement memory allocation used by others.
++ Hashlist functions implement a linked list with hash lookup,
++ which is used with Binding Update List, Binding Cache, Home
++ Agents List etc.
++ </para>
++!Inet/ipv6/mobile_ip6/mempool.h
++!Inet/ipv6/mobile_ip6/hashlist.h
++ </sect1>
++
++ <sect1><title>Debug functions</title>
++ <para>
++ Debug and utility functions. These functions are available if
++ <constant>CONFIG_IPV6_MOBILITY_DEBUG</constant> is set.
++ Otherwise macros expand to no operation.
++ </para>
++!Inet/ipv6/mobile_ip6/debug.h
++!Inet/ipv6/mobile_ip6/mipv6.c
++ </sect1>
++
++ <sect1><title>Extension Header functions</title>
++ <para>
++ These functions create and handle extension headers that are
++ specific to MIPv6.
++ </para>
++!Inet/ipv6/mobile_ip6/exthdrs.c
++ </sect1>
++
++ <sect1><title>Mobility Header functions</title>
++ <para>
++ MIPv6 specifies a new protocol called Mobility Header.
++ Mobility Header has several message types. Messages may also
++ carry Mobility Options. These functions are used to create and
++ handle Mobility Headers and Mobility Options.
++ </para>
++!Inet/ipv6/mobile_ip6/sendopts.c
++!Inet/ipv6/mobile_ip6/mh_recv.c
++!Inet/ipv6/mobile_ip6/auth_subopt.c
++ </sect1>
++
++ <sect1><title>Binding Cache</title>
++ <para>
++ All Mobile IPv6 entities have a binding cache. These functions
++ provide easy manipulation of the binding cache.
++ </para>
++!Inet/ipv6/mobile_ip6/bcache.c
++ </sect1>
++
++ <sect1><title>Security</title>
++
++ <para>
++ These functions are common authentication functions and
++ implement Draft 13 style IPSec AH support for Binding Updates.
++ </para>
++!Inet/ipv6/mobile_ip6/ah_algo.c
++!Inet/ipv6/mobile_ip6/sadb.c
++!Inet/ipv6/mobile_ip6/ah.c
++ </sect1>
++
++ <sect1><title>Utility functions</title>
++
++ <para>
++ These functions are general utility functions commonly used by
++ all entities.
++ </para>
++!Inet/ipv6/mobile_ip6/util.c
++ </sect1>
++
++ </chapter>
++
++ <chapter id="mn">
++ <title>Mobile Node functions</title>
++ <sect1><title>General functions</title>
++ <para>
++ </para>
++!Inet/ipv6/mobile_ip6/mn.c
++ </sect1>
++
++ <sect1><title>Binding Update List</title>
++ <para>
++ Mobile Node keeps track of sent binding updates in Binding
++ Update List.
++ </para>
++!Inet/ipv6/mobile_ip6/bul.c
++ </sect1>
++
++ <sect1><title>Movement detection</title>
++
++ <para>
++ These functions are used by the mobile node for movement
++ detection.
++ </para>
++!Inet/ipv6/mobile_ip6/mdetect.c
++ </sect1>
++ </chapter>
++
++ <chapter id="ha">
++ <title>Home Agent functions</title>
++ <sect1><title>General functions</title>
++ <para>
++ </para>
++!Inet/ipv6/mobile_ip6/ha.c
++ </sect1>
++
++ <sect1><title>Duplicate Address Detection functions</title>
++ <para>
++ Home Agent does Duplicate Address Detection for Mobile Nodes'
++ addresses. These functions implement MIPv6 specific DAD
++ functionality.
++ </para>
++!Inet/ipv6/mobile_ip6/dad.c
++ </sect1>
++
++ </chapter>
++ <appendix id="gfdl">
++ <title>GNU Free Documentation License</title>
++
++ <para>
++ Version 1.1, March 2000
++ </para>
++
++ <programlisting>
++ Copyright (C) 2000 Free Software Foundation, Inc.
++ 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ Everyone is permitted to copy and distribute verbatim copies
++ of this license document, but changing it is not allowed.
++ </programlisting>
++
++ <sect1><title>0. PREAMBLE</title>
++
++ <para>
++ The purpose of this License is to make a manual, textbook, or
++ other written document "free" in the sense of freedom: to
++ assure everyone the effective freedom to copy and redistribute
++ it, with or without modifying it, either commercially or
++ noncommercially. Secondarily, this License preserves for the
++ author and publisher a way to get credit for their work, while
++ not being considered responsible for modifications made by
++ others.
++ </para>
++
++ <para>
++ This License is a kind of "copyleft", which means that
++ derivative works of the document must themselves be free in the
++ same sense. It complements the GNU General Public License,
++ which is a copyleft license designed for free software.
++ </para>
++
++ <para>
++ We have designed this License in order to use it for manuals
++ for free software, because free software needs free
++ documentation: a free program should come with manuals
++ providing the same freedoms that the software does. But this
++ License is not limited to software manuals; it can be used for
++ any textual work, regardless of subject matter or whether it is
++ published as a printed book. We recommend this License
++ principally for works whose purpose is instruction or
++ reference.
++ </para>
++
++ </sect1>
++ <sect1><title>1. APPLICABILITY AND DEFINITIONS</title>
++
++ <para>
++ This License applies to any manual or other work that contains
++ a notice placed by the copyright holder saying it can be
++ distributed under the terms of this License. The "Document",
++ below, refers to any such manual or work. Any member of the
++ public is a licensee, and is addressed as "you".
++ </para>
++
++ <para>
++ A "Modified Version" of the Document means any work containing
++ the Document or a portion of it, either copied verbatim, or
++ with modifications and/or translated into another language.
++ </para>
++
++ <para>
++ A "Secondary Section" is a named appendix or a front-matter
++ section of the Document that deals exclusively with the
++ relationship of the publishers or authors of the Document to
++ the Document's overall subject (or to related matters) and
++ contains nothing that could fall directly within that overall
++ subject. (For example, if the Document is in part a textbook of
++ mathematics, a Secondary Section may not explain any
++ mathematics.) The relationship could be a matter of historical
++ connection with the subject or with related matters, or of
++ legal, commercial, philosophical, ethical or political position
++ regarding them.
++ </para>
++
++ <para>
++ The "Invariant Sections" are certain Secondary Sections whose
++ titles are designated, as being those of Invariant Sections, in
++ the notice that says that the Document is released under this
++ License.
++ </para>
++
++ <para>
++ The "Cover Texts" are certain short passages of text that are
++ listed, as Front-Cover Texts or Back-Cover Texts, in the notice
++ that says that the Document is released under this License.
++ </para>
++
++ <para>
++ A "Transparent" copy of the Document means a machine-readable
++ copy, represented in a format whose specification is available
++ to the general public, whose contents can be viewed and edited
++ directly and straightforwardly with generic text editors or
++ (for images composed of pixels) generic paint programs or (for
++ drawings) some widely available drawing editor, and that is
++ suitable for input to text formatters or for automatic
++ translation to a variety of formats suitable for input to text
++ formatters. A copy made in an otherwise Transparent file format
++ whose markup has been designed to thwart or discourage
++ subsequent modification by readers is not Transparent. A copy
++ that is not "Transparent" is called "Opaque".
++ </para>
++
++ <para>
++ Examples of suitable formats for Transparent copies include
++ plain ASCII without markup, Texinfo input format, LaTeX input
++ format, SGML or XML using a publicly available DTD, and
++ standard-conforming simple HTML designed for human
++ modification. Opaque formats include PostScript, PDF,
++ proprietary formats that can be read and edited only by
++ proprietary word processors, SGML or XML for which the DTD
++ and/or processing tools are not generally available, and the
++ machine-generated HTML produced by some word processors for
++ output purposes only.
++ </para>
++
++ <para>
++ The "Title Page" means, for a printed book, the title page
++ itself, plus such following pages as are needed to hold,
++ legibly, the material this License requires to appear in the
++ title page. For works in formats which do not have any title
++ page as such, "Title Page" means the text near the most
++ prominent appearance of the work's title, preceding the
++ beginning of the body of the text.
++ </para>
++
++ </sect1>
++ <sect1><title>2. VERBATIM COPYING</title>
++
++ <para>
++ You may copy and distribute the Document in any medium, either
++ commercially or noncommercially, provided that this License,
++ the copyright notices, and the license notice saying this
++ License applies to the Document are reproduced in all copies,
++ and that you add no other conditions whatsoever to those of
++ this License. You may not use technical measures to obstruct or
++ control the reading or further copying of the copies you make
++ or distribute. However, you may accept compensation in exchange
++ for copies. If you distribute a large enough number of copies
++ you must also follow the conditions in section 3.
++ </para>
++
++ <para>
++ You may also lend copies, under the same conditions stated
++ above, and you may publicly display copies.
++ </para>
++
++ </sect1>
++ <sect1><title>3. COPYING IN QUANTITY</title>
++
++ <para>
++ If you publish printed copies of the Document numbering more
++ than 100, and the Document's license notice requires Cover
++ Texts, you must enclose the copies in covers that carry,
++ clearly and legibly, all these Cover Texts: Front-Cover Texts
++ on the front cover, and Back-Cover Texts on the back
++ cover. Both covers must also clearly and legibly identify you
++ as the publisher of these copies. The front cover must present
++ the full title with all words of the title equally prominent
++ and visible. You may add other material on the covers in
++ addition. Copying with changes limited to the covers, as long
++ as they preserve the title of the Document and satisfy these
++ conditions, can be treated as verbatim copying in other
++ respects.
++ </para>
++
++ <para>
++ If the required texts for either cover are too voluminous to
++ fit legibly, you should put the first ones listed (as many as
++ fit reasonably) on the actual cover, and continue the rest onto
++ adjacent pages.
++ </para>
++
++ <para>
++ If you publish or distribute Opaque copies of the Document
++ numbering more than 100, you must either include a
++ machine-readable Transparent copy along with each Opaque copy,
++ or state in or with each Opaque copy a publicly-accessible
++ computer-network location containing a complete Transparent
++ copy of the Document, free of added material, which the general
++ network-using public has access to download anonymously at no
++ charge using public-standard network protocols. If you use the
++ latter option, you must take reasonably prudent steps, when you
++ begin distribution of Opaque copies in quantity, to ensure that
++ this Transparent copy will remain thus accessible at the stated
++ location until at least one year after the last time you
++ distribute an Opaque copy (directly or through your agents or
++ retailers) of that edition to the public.
++ </para>
++
++ <para>
++ It is requested, but not required, that you contact the authors
++ of the Document well before redistributing any large number of
++ copies, to give them a chance to provide you with an updated
++ version of the Document.
++ </para>
++
++ </sect1>
++ <sect1><title>4. MODIFICATIONS</title>
++
++ <para>
++ You may copy and distribute a Modified Version of the Document
++ under the conditions of sections 2 and 3 above, provided that
++ you release the Modified Version under precisely this License,
++ with the Modified Version filling the role of the Document,
++ thus licensing distribution and modification of the Modified
++ Version to whoever possesses a copy of it. In addition, you
++ must do these things in the Modified Version:
++ </para>
++
++ <para>
++ <itemizedlist spacing=compact>
++ <listitem>
++ <para>
++ A. Use in the Title Page (and on the covers, if any) a title
++ distinct from that of the Document, and from those of previous
++ versions (which should, if there were any, be listed in the
++ History section of the Document). You may use the same title
++ as a previous version if the original publisher of that
++ version gives permission.
++ </para>
++ </listitem>
++ <listitem>
++ <para>
++ B. List on the Title Page, as authors, one or more persons
++ or entities responsible for authorship of the modifications in
++ the Modified Version, together with at least five of the
++ principal authors of the Document (all of its principal
++ authors, if it has less than five).
++ </para>
++ </listitem>
++ <listitem>
++ <para>
++ C. State on the Title page the name of the publisher of the
++ Modified Version, as the publisher.
++ </para>
++ </listitem>
++ <listitem>
++ <para>
++ D. Preserve all the copyright notices of the Document.
++ </para>
++ </listitem>
++ <listitem>
++ <para>
++ E. Add an appropriate copyright notice for your
++ modifications adjacent to the other copyright notices.
++ </para>
++ </listitem>
++ <listitem>
++ <para>
++ F. Include, immediately after the copyright notices, a
++ license notice giving the public permission to use the
++ Modified Version under the terms of this License, in the form
++ shown in the Addendum below.
++ </para>
++ </listitem>
++ <listitem>
++ <para>
++ G. Preserve in that license notice the full lists of
++ Invariant Sections and required Cover Texts given in the
++ Document's license notice.
++ </para>
++ </listitem>
++ <listitem>
++ <para>
++ H. Include an unaltered copy of this License.
++ </para>
++ </listitem>
++ <listitem>
++ <para>
++ I. Preserve the section entitled "History", and its title,
++ and add to it an item stating at least the title, year, new
++ authors, and publisher of the Modified Version as given on the
++ Title Page. If there is no section entitled "History" in the
++ Document, create one stating the title, year, authors, and
++ publisher of the Document as given on its Title Page, then add
++ an item describing the Modified Version as stated in the
++ previous sentence.
++ </para>
++ </listitem>
++ <listitem>
++ <para>
++ J. Preserve the network location, if any, given in the
++ Document for public access to a Transparent copy of the
++ Document, and likewise the network locations given in the
++ Document for previous versions it was based on. These may be
++ placed in the "History" section. You may omit a network
++ location for a work that was published at least four years
++ before the Document itself, or if the original publisher of
++ the version it refers to gives permission.
++ </para>
++ </listitem>
++ <listitem>
++ <para>
++ K. In any section entitled "Acknowledgements" or
++ "Dedications", preserve the section's title, and preserve in
++ the section all the substance and tone of each of the
++ contributor acknowledgements and/or dedications given therein.
++ </para>
++ </listitem>
++ <listitem>
++ <para>
++ L. Preserve all the Invariant Sections of the Document,
++ unaltered in their text and in their titles. Section numbers
++ or the equivalent are not considered part of the section
++ titles.
++ </para>
++ </listitem>
++ <listitem>
++ <para>
++ M. Delete any section entitled "Endorsements". Such a
++ section may not be included in the Modified Version.
++ </para>
++ </listitem>
++ <listitem>
++ <para>
++ N. Do not retitle any existing section as "Endorsements" or
++ to conflict in title with any Invariant Section.
++ </para>
++ </listitem>
++ </itemizedlist>
++ </para>
++
++ <para>
++ If the Modified Version includes new front-matter sections or
++ appendices that qualify as Secondary Sections and contain no
++ material copied from the Document, you may at your option
++ designate some or all of these sections as invariant. To do
++ this, add their titles to the list of Invariant Sections in the
++ Modified Version's license notice. These titles must be
++ distinct from any other section titles.
++ </para>
++
++ <para>
++ You may add a section entitled "Endorsements", provided it
++ contains nothing but endorsements of your Modified Version by
++ various parties--for example, statements of peer review or that
++ the text has been approved by an organization as the
++ authoritative definition of a standard.
++ </para>
++
++ <para>
++ You may add a passage of up to five words as a Front-Cover
++ Text, and a passage of up to 25 words as a Back-Cover Text, to
++ the end of the list of Cover Texts in the Modified
++ Version. Only one passage of Front-Cover Text and one of
++ Back-Cover Text may be added by (or through arrangements made
++ by) any one entity. If the Document already includes a cover
++ text for the same cover, previously added by you or by
++ arrangement made by the same entity you are acting on behalf
++ of, you may not add another; but you may replace the old one,
++ on explicit permission from the previous publisher that added
++ the old one.
++ </para>
++
++ <para>
++ The author(s) and publisher(s) of the Document do not by this
++ License give permission to use their names for publicity for or
++ to assert or imply endorsement of any Modified Version.
++ </para>
++
++ </sect1>
++ <sect1><title>5. COMBINING DOCUMENTS</title>
++
++ <para>
++ You may combine the Document with other documents released
++ under this License, under the terms defined in section 4 above
++ for modified versions, provided that you include in the
++ combination all of the Invariant Sections of all of the
++ original documents, unmodified, and list them all as Invariant
++ Sections of your combined work in its license notice.
++ </para>
++
++ <para>
++ The combined work need only contain one copy of this License,
++ and multiple identical Invariant Sections may be replaced with
++ a single copy. If there are multiple Invariant Sections with
++ the same name but different contents, make the title of each
++ such section unique by adding at the end of it, in parentheses,
++ the name of the original author or publisher of that section if
++ known, or else a unique number. Make the same adjustment to the
++ section titles in the list of Invariant Sections in the license
++ notice of the combined work.
++ </para>
++
++ <para>
++ In the combination, you must combine any sections entitled
++ "History" in the various original documents, forming one
++ section entitled "History"; likewise combine any sections
++ entitled "Acknowledgements", and any sections entitled
++ "Dedications". You must delete all sections entitled
++ "Endorsements."
++ </para>
++
++ </sect1>
++ <sect1><title>6. COLLECTIONS OF DOCUMENTS</title>
++
++ <para>
++ You may make a collection consisting of the Document and other
++ documents released under this License, and replace the
++ individual copies of this License in the various documents with
++ a single copy that is included in the collection, provided that
++ you follow the rules of this License for verbatim copying of
++ each of the documents in all other respects.
++ </para>
++
++ <para>
++ You may extract a single document from such a collection, and
++ distribute it individually under this License, provided you
++ insert a copy of this License into the extracted document, and
++ follow this License in all other respects regarding verbatim
++ copying of that document.
++ </para>
++
++ </sect1>
++ <sect1><title>7. AGGREGATION WITH INDEPENDENT WORKS</title>
++
++ <para>
++ A compilation of the Document or its derivatives with other
++ separate and independent documents or works, in or on a volume
++ of a storage or distribution medium, does not as a whole count
++ as a Modified Version of the Document, provided no compilation
++ copyright is claimed for the compilation. Such a compilation is
++ called an "aggregate", and this License does not apply to the
++ other self-contained works thus compiled with the Document, on
++ account of their being thus compiled, if they are not
++ themselves derivative works of the Document.
++ </para>
++
++ <para>
++ If the Cover Text requirement of section 3 is applicable to
++ these copies of the Document, then if the Document is less than
++ one quarter of the entire aggregate, the Document's Cover Texts
++ may be placed on covers that surround only the Document within
++ the aggregate. Otherwise they must appear on covers around the
++ whole aggregate.
++ </para>
++
++ </sect1>
++ <sect1><title>8. TRANSLATION</title>
++
++ <para>
++ Translation is considered a kind of modification, so you may
++ distribute translations of the Document under the terms of
++ section 4. Replacing Invariant Sections with translations
++ requires special permission from their copyright holders, but
++ you may include translations of some or all Invariant Sections
++ in addition to the original versions of these Invariant
++ Sections. You may include a translation of this License
++ provided that you also include the original English version of
++ this License. In case of a disagreement between the translation
++ and the original English version of this License, the original
++ English version will prevail.
++ </para>
++
++ </sect1>
++ <sect1><title>9. TERMINATION</title>
++
++ <para>
++ You may not copy, modify, sublicense, or distribute the
++ Document except as expressly provided for under this
++ License. Any other attempt to copy, modify, sublicense or
++ distribute the Document is void, and will automatically
++ terminate your rights under this License. However, parties who
++ have received copies, or rights, from you under this License
++ will not have their licenses terminated so long as such parties
++ remain in full compliance.
++ </para>
++
++ </sect1>
++ <sect1><title>10. FUTURE REVISIONS OF THIS LICENSE</title>
++
++ <para>
++ The Free Software Foundation may publish new, revised versions
++ of the GNU Free Documentation License from time to time. Such
++ new versions will be similar in spirit to the present version,
++ but may differ in detail to address new problems or
++ concerns. See http://www.gnu.org/copyleft/.
++ </para>
++
++ <para>
++ Each version of the License is given a distinguishing version
++ number. If the Document specifies that a particular numbered
++ version of this License "or any later version" applies to it,
++ you have the option of following the terms and conditions
++ either of that specified version or of any later version that
++ has been published (not as a draft) by the Free Software
++ Foundation. If the Document does not specify a version number
++ of this License, you may choose any version ever published (not
++ as a draft) by the Free Software Foundation.
++ </para>
++
++ </sect1>
++ </appendix>
++</book>
+diff -uprN linux-2.4.25.old/include/linux/icmpv6.h linux-2.4.25/include/linux/icmpv6.h
+--- linux-2.4.25.old/include/linux/icmpv6.h 2003-08-25 12:44:44.000000000 +0100
++++ linux-2.4.25/include/linux/icmpv6.h 2004-06-26 11:29:29.000000000 +0100
+@@ -40,14 +40,16 @@ struct icmp6hdr {
+ struct icmpv6_nd_ra {
+ __u8 hop_limit;
+ #if defined(__LITTLE_ENDIAN_BITFIELD)
+- __u8 reserved:6,
++ __u8 reserved:5,
++ home_agent:1,
+ other:1,
+ managed:1;
+
+ #elif defined(__BIG_ENDIAN_BITFIELD)
+ __u8 managed:1,
+ other:1,
+- reserved:6;
++ home_agent:1,
++ reserved:5;
+ #else
+ #error "Please fix <asm/byteorder.h>"
+ #endif
+@@ -70,6 +72,7 @@ struct icmp6hdr {
+ #define icmp6_addrconf_managed icmp6_dataun.u_nd_ra.managed
+ #define icmp6_addrconf_other icmp6_dataun.u_nd_ra.other
+ #define icmp6_rt_lifetime icmp6_dataun.u_nd_ra.rt_lifetime
++#define icmp6_home_agent icmp6_dataun.u_nd_ra.home_agent
+ };
+
+
+diff -uprN linux-2.4.25.old/include/linux/if_arp.h linux-2.4.25/include/linux/if_arp.h
+--- linux-2.4.25.old/include/linux/if_arp.h 2002-02-25 19:38:13.000000000 +0000
++++ linux-2.4.25/include/linux/if_arp.h 2004-06-26 11:29:29.000000000 +0100
+@@ -59,7 +59,7 @@
+ #define ARPHRD_RAWHDLC 518 /* Raw HDLC */
+
+ #define ARPHRD_TUNNEL 768 /* IPIP tunnel */
+-#define ARPHRD_TUNNEL6 769 /* IPIP6 tunnel */
++#define ARPHRD_TUNNEL6 769 /* IP6IP6 tunnel */
+ #define ARPHRD_FRAD 770 /* Frame Relay Access Device */
+ #define ARPHRD_SKIP 771 /* SKIP vif */
+ #define ARPHRD_LOOPBACK 772 /* Loopback device */
+diff -uprN linux-2.4.25.old/include/linux/in6.h linux-2.4.25/include/linux/in6.h
+--- linux-2.4.25.old/include/linux/in6.h 2003-06-13 15:51:38.000000000 +0100
++++ linux-2.4.25/include/linux/in6.h 2004-06-26 11:29:29.000000000 +0100
+@@ -142,6 +142,11 @@ struct in6_flowlabel_req
+ #define IPV6_TLV_JUMBO 194
+
+ /*
++ * Mobile IPv6 TLV options.
++ */
++#define MIPV6_TLV_HOMEADDR 201
++
++/*
+ * IPV6 socket options
+ */
+
+diff -uprN linux-2.4.25.old/include/linux/ipv6.h linux-2.4.25/include/linux/ipv6.h
+--- linux-2.4.25.old/include/linux/ipv6.h 2003-11-28 18:26:21.000000000 +0000
++++ linux-2.4.25/include/linux/ipv6.h 2004-06-26 11:29:29.000000000 +0100
+@@ -29,6 +29,7 @@ struct in6_ifreq {
+
+ #define IPV6_SRCRT_STRICT 0x01 /* this hop must be a neighbor */
+ #define IPV6_SRCRT_TYPE_0 0 /* IPv6 type 0 Routing Header */
++#define IPV6_SRCRT_TYPE_2 2 /* type 2 for Mobile IPv6 */
+
+ /*
+ * routing header
+@@ -71,6 +72,19 @@ struct rt0_hdr {
+ struct in6_addr addr[0];
+
+ #define rt0_type rt_hdr.type
++
++};
++
++/*
++ * routing header type 2
++ */
++
++struct rt2_hdr {
++ struct ipv6_rt_hdr rt_hdr;
++ __u32 reserved;
++ struct in6_addr addr;
++
++#define rt2_type rt_hdr.type;
+ };
+
+ /*
+@@ -156,12 +170,16 @@ enum {
+ struct inet6_skb_parm
+ {
+ int iif;
++ __u8 mipv6_flags;
+ __u16 ra;
+ __u16 hop;
+ __u16 auth;
+ __u16 dst0;
+ __u16 srcrt;
++ __u16 srcrt2;
++ __u16 hao;
+ __u16 dst1;
++ struct in6_addr hoa;
+ };
+
+ #endif
+diff -uprN linux-2.4.25.old/include/linux/ipv6_route.h linux-2.4.25/include/linux/ipv6_route.h
+--- linux-2.4.25.old/include/linux/ipv6_route.h 2003-11-28 18:26:21.000000000 +0000
++++ linux-2.4.25/include/linux/ipv6_route.h 2004-06-26 11:29:29.000000000 +0100
+@@ -33,6 +33,7 @@ enum
+ #define RTF_CACHE 0x01000000 /* cache entry */
+ #define RTF_FLOW 0x02000000 /* flow significant route */
+ #define RTF_POLICY 0x04000000 /* policy route */
++#define RTF_MOBILENODE 0x10000000 /* for routing to Mobile Node */
+
+ #define RTF_LOCAL 0x80000000
+
+diff -uprN linux-2.4.25.old/include/linux/ipv6_tunnel.h linux-2.4.25/include/linux/ipv6_tunnel.h
+--- linux-2.4.25.old/include/linux/ipv6_tunnel.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/include/linux/ipv6_tunnel.h 2004-06-26 11:29:29.000000000 +0100
+@@ -0,0 +1,34 @@
++/*
++ * $Id$
++ */
++
++#ifndef _IPV6_TUNNEL_H
++#define _IPV6_TUNNEL_H
++
++#define IPV6_TLV_TNL_ENCAP_LIMIT 4
++#define IPV6_DEFAULT_TNL_ENCAP_LIMIT 4
++
++/* don't add encapsulation limit if one isn't present in inner packet */
++#define IP6_TNL_F_IGN_ENCAP_LIMIT 0x1
++/* copy the traffic class field from the inner packet */
++#define IP6_TNL_F_USE_ORIG_TCLASS 0x2
++/* copy the flowlabel from the inner packet */
++#define IP6_TNL_F_USE_ORIG_FLOWLABEL 0x4
++/* created and maintained from within the kernel */
++#define IP6_TNL_F_KERNEL_DEV 0x8
++/* being used for Mobile IPv6 */
++#define IP6_TNL_F_MIP6_DEV 0x10
++
++struct ip6_tnl_parm {
++ char name[IFNAMSIZ]; /* name of tunnel device */
++ int link; /* ifindex of underlying L2 interface */
++ __u8 proto; /* tunnel protocol */
++ __u8 encap_limit; /* encapsulation limit for tunnel */
++ __u8 hop_limit; /* hop limit for tunnel */
++ __u32 flowinfo; /* traffic class and flowlabel for tunnel */
++ __u32 flags; /* tunnel flags */
++ struct in6_addr laddr; /* local tunnel end-point address */
++ struct in6_addr raddr; /* remote tunnel end-point address */
++};
++
++#endif
+diff -uprN linux-2.4.25.old/include/linux/rtnetlink.h linux-2.4.25/include/linux/rtnetlink.h
+--- linux-2.4.25.old/include/linux/rtnetlink.h 2004-02-18 13:36:32.000000000 +0000
++++ linux-2.4.25/include/linux/rtnetlink.h 2004-06-26 11:29:29.000000000 +0100
+@@ -309,15 +309,17 @@ enum
+ IFA_LABEL,
+ IFA_BROADCAST,
+ IFA_ANYCAST,
+- IFA_CACHEINFO
++ IFA_CACHEINFO,
++ IFA_HOMEAGENT
+ };
+
+-#define IFA_MAX IFA_CACHEINFO
++#define IFA_MAX IFA_HOMEAGENT
+
+ /* ifa_flags */
+
+ #define IFA_F_SECONDARY 0x01
+
++#define IFA_F_HOMEADDR 0x10
+ #define IFA_F_DEPRECATED 0x20
+ #define IFA_F_TENTATIVE 0x40
+ #define IFA_F_PERMANENT 0x80
+diff -uprN linux-2.4.25.old/include/linux/skbuff.h linux-2.4.25/include/linux/skbuff.h
+--- linux-2.4.25.old/include/linux/skbuff.h 2003-08-25 12:44:44.000000000 +0100
++++ linux-2.4.25/include/linux/skbuff.h 2004-06-26 11:29:29.000000000 +0100
+@@ -177,7 +177,7 @@ struct sk_buff {
+ * want to keep them across layers you have to do a skb_clone()
+ * first. This is owned by whoever has the skb queued ATM.
+ */
+- char cb[48];
++ char cb[64];
+
+ unsigned int len; /* Length of actual data */
+ unsigned int data_len;
+diff -uprN linux-2.4.25.old/include/linux/sysctl.h linux-2.4.25/include/linux/sysctl.h
+--- linux-2.4.25.old/include/linux/sysctl.h 2004-02-18 13:36:32.000000000 +0000
++++ linux-2.4.25/include/linux/sysctl.h 2004-06-26 11:29:29.000000000 +0100
+@@ -387,7 +387,24 @@ enum {
+ NET_IPV6_NEIGH=17,
+ NET_IPV6_ROUTE=18,
+ NET_IPV6_ICMP=19,
+- NET_IPV6_BINDV6ONLY=20
++ NET_IPV6_BINDV6ONLY=20,
++ NET_IPV6_MOBILITY=26
++};
++
++/* /proc/sys/net/ipv6/mobility */
++enum {
++ NET_IPV6_MOBILITY_DEBUG=1,
++ NET_IPV6_MOBILITY_TUNNEL_SITELOCAL=2,
++ NET_IPV6_MOBILITY_ROUTER_SOLICITATION_MAX_SENDTIME=3,
++ NET_IPV6_MOBILITY_ROUTER_REACH=4,
++ NET_IPV6_MOBILITY_MDETECT_MECHANISM=5,
++ NET_IPV6_MOBILITY_RETROUT=6,
++ NET_IPV6_MOBILITY_MAX_TNLS=7,
++ NET_IPV6_MOBILITY_MIN_TNLS=8,
++ NET_IPV6_MOBILITY_BINDING_REFRESH=9,
++ NET_IPV6_MOBILITY_BU_F_LLADDR=10,
++ NET_IPV6_MOBILITY_BU_F_KEYMGM=11,
++ NET_IPV6_MOBILITY_BU_F_CN_ACK=12
+ };
+
+ enum {
+diff -uprN linux-2.4.25.old/include/net/addrconf.h linux-2.4.25/include/net/addrconf.h
+--- linux-2.4.25.old/include/net/addrconf.h 2003-08-25 12:44:44.000000000 +0100
++++ linux-2.4.25/include/net/addrconf.h 2004-06-26 11:29:29.000000000 +0100
+@@ -16,9 +16,11 @@ struct prefix_info {
+ #if defined(__BIG_ENDIAN_BITFIELD)
+ __u8 onlink : 1,
+ autoconf : 1,
+- reserved : 6;
++ router_address : 1,
++ reserved : 5;
+ #elif defined(__LITTLE_ENDIAN_BITFIELD)
+- __u8 reserved : 6,
++ __u8 reserved : 5,
++ router_address : 1,
+ autoconf : 1,
+ onlink : 1;
+ #else
+@@ -55,6 +57,7 @@ extern int ipv6_chk_addr(struct in6_ad
+ struct net_device *dev);
+ extern struct inet6_ifaddr * ipv6_get_ifaddr(struct in6_addr *addr,
+ struct net_device *dev);
++extern void ipv6_del_addr(struct inet6_ifaddr *ifp);
+ extern int ipv6_get_saddr(struct dst_entry *dst,
+ struct in6_addr *daddr,
+ struct in6_addr *saddr);
+@@ -85,7 +88,9 @@ extern void ipv6_mc_up(struct inet6_dev
+ extern void ipv6_mc_down(struct inet6_dev *idev);
+ extern void ipv6_mc_init_dev(struct inet6_dev *idev);
+ extern void ipv6_mc_destroy_dev(struct inet6_dev *idev);
++extern void addrconf_dad_start(struct inet6_ifaddr *ifp, int flags);
+ extern void addrconf_dad_failure(struct inet6_ifaddr *ifp);
++extern void addrconf_dad_completed(struct inet6_ifaddr *ifp);
+
+ extern int ipv6_chk_mcast_addr(struct net_device *dev, struct in6_addr *group,
+ struct in6_addr *src_addr);
+@@ -116,6 +121,9 @@ extern int ipv6_chk_acast_addr(struct
+ extern int register_inet6addr_notifier(struct notifier_block *nb);
+ extern int unregister_inet6addr_notifier(struct notifier_block *nb);
+
++extern int ipv6_generate_eui64(u8 *eui, struct net_device *dev);
++extern int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev);
++
+ static inline struct inet6_dev *
+ __in6_dev_get(struct net_device *dev)
+ {
+diff -uprN linux-2.4.25.old/include/net/ip6_route.h linux-2.4.25/include/net/ip6_route.h
+--- linux-2.4.25.old/include/net/ip6_route.h 2003-06-13 15:51:39.000000000 +0100
++++ linux-2.4.25/include/net/ip6_route.h 2004-06-26 11:29:29.000000000 +0100
+@@ -2,6 +2,7 @@
+ #define _NET_IP6_ROUTE_H
+
+ #define IP6_RT_PRIO_FW 16
++#define IP6_RT_PRIO_MIPV6 64
+ #define IP6_RT_PRIO_USER 1024
+ #define IP6_RT_PRIO_ADDRCONF 256
+ #define IP6_RT_PRIO_KERN 512
+@@ -40,6 +41,9 @@ extern int ipv6_route_ioctl(unsigned i
+
+ extern int ip6_route_add(struct in6_rtmsg *rtmsg,
+ struct nlmsghdr *);
++
++extern int ip6_route_del(struct in6_rtmsg *rtmsg,
++ struct nlmsghdr *);
+ extern int ip6_del_rt(struct rt6_info *,
+ struct nlmsghdr *);
+
+@@ -99,7 +103,8 @@ extern rwlock_t rt6_lock;
+ */
+
+ static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst,
+- struct in6_addr *daddr)
++ struct in6_addr *daddr,
++ struct in6_addr *saddr)
+ {
+ struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6;
+ struct rt6_info *rt = (struct rt6_info *) dst;
+@@ -107,6 +112,9 @@ static inline void ip6_dst_store(struct
+ write_lock(&sk->dst_lock);
+ __sk_dst_set(sk, dst);
+ np->daddr_cache = daddr;
++#ifdef CONFIG_IPV6_SUBTREES
++ np->saddr_cache = saddr;
++#endif
+ np->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
+ write_unlock(&sk->dst_lock);
+ }
+diff -uprN linux-2.4.25.old/include/net/ipv6.h linux-2.4.25/include/net/ipv6.h
+--- linux-2.4.25.old/include/net/ipv6.h 2003-11-28 18:26:21.000000000 +0000
++++ linux-2.4.25/include/net/ipv6.h 2004-06-26 11:29:29.000000000 +0100
+@@ -37,6 +37,7 @@
+ #define NEXTHDR_ICMP 58 /* ICMP for IPv6. */
+ #define NEXTHDR_NONE 59 /* No next header */
+ #define NEXTHDR_DEST 60 /* Destination options header. */
++#define NEXTHDR_MH 135 /* Mobility header, RFC 3775 */
+
+ #define NEXTHDR_MAX 255
+
+@@ -145,9 +146,12 @@ struct ipv6_txoptions
+ __u16 opt_flen; /* after fragment hdr */
+ __u16 opt_nflen; /* before fragment hdr */
+
++ __u8 mipv6_flags; /* flags set by MIPv6 */
++
+ struct ipv6_opt_hdr *hopopt;
+ struct ipv6_opt_hdr *dst0opt;
+- struct ipv6_rt_hdr *srcrt; /* Routing Header */
++ struct ipv6_rt_hdr *srcrt; /* Routing Header Type 0 */
++ struct ipv6_rt_hdr *srcrt2; /* Routing Header Type 2 */
+ struct ipv6_opt_hdr *auth;
+ struct ipv6_opt_hdr *dst1opt;
+
+@@ -256,6 +260,38 @@ static inline int ipv6_addr_any(const st
+ a->s6_addr32[2] | a->s6_addr32[3] ) == 0);
+ }
+
++static inline void ipv6_addr_prefix(struct in6_addr *pfx,
++ const struct in6_addr *addr, int plen)
++{
++ /* caller must guarantee 0 <= plen <= 128 */
++ int o = plen >> 3,
++ b = plen & 0x7;
++
++ memcpy(pfx->s6_addr, addr, o);
++ if (b != 0) {
++ pfx->s6_addr[o] = addr->s6_addr[o] & (0xff00 >> b);
++ o++;
++ }
++ if (o < 16)
++ memset(pfx->s6_addr + o, 0, 16 - o);
++}
++
++static inline int ipv6_prefix_cmp(const struct in6_addr *p1,
++ const struct in6_addr *p2, int plen)
++{
++ int b = plen&0x7;
++ int o = plen>>3;
++ int res = 0;
++
++ if (o > 0)
++ res = memcmp(&p1->s6_addr[0], &p2->s6_addr[0], o);
++ if (res == 0 && b > 0) {
++ __u8 m = (0xff00 >> b) & 0xff;
++ res = (p1->s6_addr[o] & m) - (p2->s6_addr[o] & m);
++ }
++ return res;
++}
++
+ /*
+ * Prototypes exported by ipv6
+ */
+diff -uprN linux-2.4.25.old/include/net/ipv6_tunnel.h linux-2.4.25/include/net/ipv6_tunnel.h
+--- linux-2.4.25.old/include/net/ipv6_tunnel.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/include/net/ipv6_tunnel.h 2004-06-26 11:29:29.000000000 +0100
+@@ -0,0 +1,92 @@
++/*
++ * $Id$
++ */
++
++#ifndef _NET_IPV6_TUNNEL_H
++#define _NET_IPV6_TUNNEL_H
++
++#include <linux/ipv6.h>
++#include <linux/netdevice.h>
++#include <linux/ipv6_tunnel.h>
++#include <linux/skbuff.h>
++#include <asm/atomic.h>
++
++/* capable of sending packets */
++#define IP6_TNL_F_CAP_XMIT 0x10000
++/* capable of receiving packets */
++#define IP6_TNL_F_CAP_RCV 0x20000
++
++#define IP6_TNL_MAX 128
++
++/* IPv6 tunnel */
++
++struct ip6_tnl {
++ struct ip6_tnl *next; /* next tunnel in list */
++ struct net_device *dev; /* virtual device associated with tunnel */
++ struct net_device_stats stat; /* statistics for tunnel device */
++ int recursion; /* depth of hard_start_xmit recursion */
++ struct ip6_tnl_parm parms; /* tunnel configuration paramters */
++ struct flowi fl; /* flowi template for xmit */
++ atomic_t refcnt; /* nr of identical tunnels used by kernel */
++ struct socket *sock;
++};
++
++#define IP6_TNL_PRE_ENCAP 0
++#define IP6_TNL_PRE_DECAP 1
++#define IP6_TNL_MAXHOOKS 2
++
++#define IP6_TNL_DROP 0
++#define IP6_TNL_ACCEPT 1
++
++typedef int ip6_tnl_hookfn(struct ip6_tnl *t, struct sk_buff *skb);
++
++struct ip6_tnl_hook_ops {
++ struct list_head list;
++ unsigned int hooknum;
++ int priority;
++ ip6_tnl_hookfn *hook;
++};
++
++enum ip6_tnl_hook_priorities {
++ IP6_TNL_PRI_FIRST = INT_MIN,
++ IP6_TNL_PRI_LAST = INT_MAX
++};
++
++/* Tunnel encapsulation limit destination sub-option */
++
++struct ipv6_tlv_tnl_enc_lim {
++ __u8 type; /* type-code for option */
++ __u8 length; /* option length */
++ __u8 encap_limit; /* tunnel encapsulation limit */
++} __attribute__ ((packed));
++
++#ifdef __KERNEL__
++extern int ip6ip6_tnl_create(struct ip6_tnl_parm *p, struct ip6_tnl **pt);
++
++extern struct ip6_tnl *ip6ip6_tnl_lookup(struct in6_addr *remote,
++ struct in6_addr *local);
++
++void ip6ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p);
++
++extern int ip6ip6_kernel_tnl_add(struct ip6_tnl_parm *p);
++
++extern int ip6ip6_kernel_tnl_del(struct ip6_tnl *t);
++
++extern unsigned int ip6ip6_tnl_inc_max_kdev_count(unsigned int n);
++
++extern unsigned int ip6ip6_tnl_dec_max_kdev_count(unsigned int n);
++
++extern unsigned int ip6ip6_tnl_inc_min_kdev_count(unsigned int n);
++
++extern unsigned int ip6ip6_tnl_dec_min_kdev_count(unsigned int n);
++
++extern void ip6ip6_tnl_register_hook(struct ip6_tnl_hook_ops *reg);
++
++extern void ip6ip6_tnl_unregister_hook(struct ip6_tnl_hook_ops *reg);
++
++#ifdef CONFIG_IPV6_TUNNEL
++extern int __init ip6_tunnel_init(void);
++extern void ip6_tunnel_cleanup(void);
++#endif
++#endif
++#endif
+diff -uprN linux-2.4.25.old/include/net/mipglue.h linux-2.4.25/include/net/mipglue.h
+--- linux-2.4.25.old/include/net/mipglue.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/include/net/mipglue.h 2004-06-26 11:29:29.000000000 +0100
+@@ -0,0 +1,266 @@
++/*
++ * Glue for Mobility support integration to IPv6
++ *
++ * Authors:
++ * Antti Tuominen <ajtuomin@cc.hut.fi>
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ *
++ */
++
++#ifndef _NET_MIPGLUE_H
++#define _NET_MIPGLUE_H
++
++#ifndef USE_IPV6_MOBILITY
++#if defined(CONFIG_IPV6_MOBILITY) || defined(CONFIG_IPV6_MOBILITY_MODULE)
++#define USE_IPV6_MOBILITY
++#endif
++#endif
++
++/* symbols to indicate whether destination options received should take
++ * effect or not (see exthdrs.c, procrcv.c)
++ */
++#define MIPV6_DSTOPTS_ACCEPT 1
++#define MIPV6_DSTOPTS_DISCARD 0
++
++#define MIPV6_IGN_RTR 0
++#define MIPV6_ADD_RTR 1
++#define MIPV6_CHG_RTR 2
++
++/* MIPV6: Approximate maximum for mobile IPv6 options and headers */
++#define MIPV6_HEADERS 48
++
++#ifdef __KERNEL__
++#include <net/mipv6.h>
++#include <linux/slab.h>
++#include <net/ipv6.h>
++
++struct sk_buff;
++struct ndisc_options;
++struct sock;
++struct ipv6_txoptions;
++struct flowi;
++struct dst_entry;
++struct in6_addr;
++struct inet6_ifaddr;
++
++#ifdef USE_IPV6_MOBILITY
++
++/* calls a procedure from mipv6-module */
++#define MIPV6_CALLPROC(X) if(mipv6_functions.X) mipv6_functions.X
++
++/* calls a function from mipv6-module, default-value if function not defined
++ */
++#define MIPV6_CALLFUNC(X,Y) (!mipv6_functions.X)?(Y):mipv6_functions.X
++
++/* sets a handler-function to process a call */
++#define MIPV6_SETCALL(X,Y) if(mipv6_functions.X) printk("mipv6: Warning, function assigned twice!\n"); \
++ mipv6_functions.X = Y
++#define MIPV6_RESETCALL(X) mipv6_functions.X = NULL
++
++/* pointers to mipv6 callable functions */
++struct mipv6_callable_functions {
++ void (*mipv6_initialize_dstopt_rcv) (struct sk_buff *skb);
++ int (*mipv6_finalize_dstopt_rcv) (int process);
++ int (*mipv6_handle_homeaddr) (struct sk_buff *skb, int optoff);
++ int (*mipv6_ra_rcv) (struct sk_buff *skb,
++ struct ndisc_options *ndopts);
++ void (*mipv6_icmp_rcv) (struct sk_buff *skb);
++ struct ipv6_txoptions * (*mipv6_modify_txoptions) (
++ struct sock *sk,
++ struct sk_buff *skb,
++ struct ipv6_txoptions *opt,
++ struct flowi *fl,
++ struct dst_entry **dst);
++ void (*mipv6_set_home) (int ifindex, struct in6_addr *homeaddr,
++ int plen, struct in6_addr *homeagent,
++ int plen2);
++ void (*mipv6_get_home_address) (struct in6_addr *home_addr);
++ void (*mipv6_get_care_of_address)(struct in6_addr *homeaddr,
++ struct in6_addr *coa);
++ int (*mipv6_is_home_addr)(struct in6_addr *addr);
++ void (*mipv6_change_router)(void);
++ void (*mipv6_check_dad)(struct in6_addr *home_addr);
++ void (*mipv6_icmp_swap_addrs)(struct sk_buff *skb);
++ int (*mipv6_forward)(struct sk_buff *skb);
++ int (*mipv6_mn_ha_probe)(struct inet6_ifaddr *ifp, u8 *lladdr);
++};
++
++extern struct mipv6_callable_functions mipv6_functions;
++
++extern void mipv6_invalidate_calls(void);
++
++extern int mipv6_handle_dstopt(struct sk_buff *skb, int optoff);
++
++static inline int
++ndisc_mip_mn_ha_probe(struct inet6_ifaddr *ifp, u8 *lladdr)
++{
++ return MIPV6_CALLFUNC(mipv6_mn_ha_probe, 0)(ifp, lladdr);
++}
++
++/* Must only be called for HA, no checks here */
++static inline int ip6_mipv6_forward(struct sk_buff *skb)
++{
++ return MIPV6_CALLFUNC(mipv6_forward, 0)(skb);
++}
++
++/*
++ * Avoid adding new default routers if the old one is still in use
++ */
++
++static inline int ndisc_mipv6_ra_rcv(struct sk_buff *skb,
++ struct ndisc_options *ndopts)
++{
++ return MIPV6_CALLFUNC(mipv6_ra_rcv, MIPV6_ADD_RTR)(skb, ndopts);
++}
++
++static inline int ipv6_chk_mip_home_addr(struct in6_addr *addr)
++{
++ return MIPV6_CALLFUNC(mipv6_is_home_addr, 0)(addr);
++}
++
++static inline void ndisc_mipv6_change_router(int change_rtr)
++{
++ if (change_rtr == MIPV6_CHG_RTR)
++ MIPV6_CALLPROC(mipv6_change_router)();
++}
++
++static inline void ndisc_check_mipv6_dad(struct in6_addr *target)
++{
++ MIPV6_CALLPROC(mipv6_check_dad)(target);
++}
++
++static inline void icmpv6_swap_mipv6_addrs(struct sk_buff *skb)
++{
++ MIPV6_CALLPROC(mipv6_icmp_swap_addrs)(skb);
++}
++
++static inline void mipv6_icmp_rcv(struct sk_buff *skb)
++{
++ MIPV6_CALLPROC(mipv6_icmp_rcv)(skb);
++}
++
++static inline int tcp_v6_get_mipv6_header_len(void)
++{
++ return MIPV6_HEADERS;
++}
++
++static inline struct in6_addr *
++mipv6_get_fake_hdr_daddr(struct in6_addr *hdaddr, struct in6_addr *daddr)
++{
++ return daddr;
++}
++
++static inline void
++addrconf_set_mipv6_mn_home(int ifindex, struct in6_addr *homeaddr, int plen,
++ struct in6_addr *homeagent, int plen2)
++{
++ MIPV6_CALLPROC(mipv6_set_home)(ifindex, homeaddr, plen, homeagent, plen2);
++}
++
++static inline void addrconf_get_mipv6_home_address(struct in6_addr *saddr)
++{
++ MIPV6_CALLPROC(mipv6_get_home_address)(saddr);
++}
++
++static inline struct ipv6_txoptions *
++ip6_add_mipv6_txoptions(struct sock *sk, struct sk_buff *skb,
++ struct ipv6_txoptions *opt, struct flowi *fl,
++ struct dst_entry **dst)
++{
++ return MIPV6_CALLFUNC(mipv6_modify_txoptions, opt)(sk, skb, opt, fl, dst);
++
++}
++
++static inline void
++ip6_mark_mipv6_packet(struct ipv6_txoptions *txopt, struct sk_buff *skb)
++{
++ struct inet6_skb_parm *opt;
++ if (txopt) {
++ opt = (struct inet6_skb_parm *)skb->cb;
++ opt->mipv6_flags = txopt->mipv6_flags;
++ }
++}
++
++static inline void
++ip6_free_mipv6_txoptions(struct ipv6_txoptions *opt,
++ struct ipv6_txoptions *orig_opt)
++{
++ if (opt && opt != orig_opt)
++ kfree(opt);
++}
++
++#else /* USE_IPV6_MOBILITY */
++
++#define mipv6_handle_dstopt ip6_tlvopt_unknown
++
++static inline int
++ndisc_mip_mn_ha_probe(struct inet6_ifaddr *ifp, u8 *lladdr)
++{
++ return 0;
++}
++
++static inline int ip6_mipv6_forward(struct sk_buff *skb)
++{
++ return 0;
++}
++
++static inline int ndisc_mipv6_ra_rcv(struct sk_buff *skb,
++ struct ndisc_options *ndopts)
++{
++ return MIPV6_ADD_RTR;
++}
++
++static inline int ipv6_chk_mip_home_addr(struct in6_addr *addr)
++{
++ return 0;
++}
++
++static inline void ndisc_mipv6_change_router(int change_rtr) {}
++
++static inline void ndisc_check_mipv6_dad(struct in6_addr *target) {}
++
++static inline void icmpv6_swap_mipv6_addrs(struct sk_buff *skb) {}
++
++static inline void mipv6_icmp_rcv(struct sk_buff *skb) {}
++
++static inline int tcp_v6_get_mipv6_header_len(void)
++{
++ return 0;
++}
++
++static inline struct in6_addr *
++mipv6_get_fake_hdr_daddr(struct in6_addr *hdaddr, struct in6_addr *daddr)
++{
++ return hdaddr;
++}
++
++static inline void
++addrconf_set_mipv6_mn_home(int ifindex, struct in6_addr *homeaddr, int plen,
++ struct in6_addr *homeagent, int plen2) {}
++
++static inline void addrconf_get_mipv6_home_address(struct in6_addr *saddr) {}
++
++static inline struct ipv6_txoptions *
++ip6_add_mipv6_txoptions(struct sock *sk, struct sk_buff *skb,
++ struct ipv6_txoptions *opt, struct flowi *fl,
++ struct dst_entry **dst)
++{
++ return opt;
++}
++
++static inline void
++ip6_mark_mipv6_packet(struct ipv6_txoptions *txopt, struct sk_buff *skb) {}
++
++static inline void
++ip6_free_mipv6_txoptions(struct ipv6_txoptions *opt,
++ struct ipv6_txoptions *orig_opt) {}
++
++#endif /* USE_IPV6_MOBILITY */
++#endif /* __KERNEL__ */
++#endif /* _NET_MIPGLUE_H */
+diff -uprN linux-2.4.25.old/include/net/mipv6.h linux-2.4.25/include/net/mipv6.h
+--- linux-2.4.25.old/include/net/mipv6.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/include/net/mipv6.h 2004-06-26 11:29:29.000000000 +0100
+@@ -0,0 +1,258 @@
++/*
++ * Mobile IPv6 header-file
++ *
++ * Authors:
++ * Sami Kivisaari <skivisaa@cc.hut.fi>
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ *
++ */
++
++#ifndef _NET_MIPV6_H
++#define _NET_MIPV6_H
++
++#include <linux/types.h>
++#include <asm/byteorder.h>
++#include <linux/in6.h>
++
++/*
++ *
++ * Mobile IPv6 Protocol constants
++ *
++ */
++#define DHAAD_RETRIES 4 /* transmissions */
++#define INITIAL_BINDACK_TIMEOUT 1 /* seconds */
++#define INITIAL_DHAAD_TIMEOUT 3 /* seconds */
++#define INITIAL_SOLICIT_TIMER 3 /* seconds */
++#define MAX_BINDACK_TIMEOUT 32 /* seconds */
++#define MAX_NONCE_LIFE 240 /* seconds */
++#define MAX_TOKEN_LIFE 210 /* seconds */
++#define MAX_RR_BINDING_LIFE 420 /* seconds */
++#define MAX_UPDATE_RATE 3 /* 1/s (min delay=1s) */
++#define PREFIX_ADV_RETRIES 3 /* transmissions */
++#define PREFIX_ADV_TIMEOUT 3 /* seconds */
++
++#define MAX_FAST_UPDATES 5 /* transmissions */
++#define MAX_PFX_ADV_DELAY 1000 /* seconds */
++#define SLOW_UPDATE_RATE 10 /* 1/10s (max delay=10s)*/
++#define INITIAL_BINDACK_DAD_TIMEOUT 2 /* seconds */
++
++/*
++ *
++ * Mobile IPv6 (RFC 3775) Protocol configuration variable defaults
++ *
++ */
++#define DefHomeRtrAdvInterval 1000 /* seconds */
++#define DefMaxMobPfxAdvInterval 86400 /* seconds */
++#define DefMinDelayBetweenRAs 3 /* seconds (min 0.03) */
++#define DefMinMobPfxAdvInterval 600 /* seconds */
++#define DefInitialBindackTimeoutFirstReg 1.5 /* seconds */
++
++/* This is not actually specified in the draft, but is needed to avoid
++ * prefix solicitation storm when valid lifetime of a prefix is smaller
++ * than MAX_PFX_ADV_DELAY
++ */
++#define MIN_PFX_SOL_DELAY 5 /* seconds */
++
++/* Mobile IPv6 ICMP types */
++/*
++ * Official numbers from RFC 3775
++ */
++#define MIPV6_DHAAD_REQUEST 144
++#define MIPV6_DHAAD_REPLY 145
++#define MIPV6_PREFIX_SOLICIT 146
++#define MIPV6_PREFIX_ADV 147
++
++/* Binding update flag codes */
++#define MIPV6_BU_F_ACK 0x80
++#define MIPV6_BU_F_HOME 0x40
++#define MIPV6_BU_F_LLADDR 0x20
++#define MIPV6_BU_F_KEYMGM 0x10
++
++/* Binding ackknowledgment flag codes */
++#define MIPV6_BA_F_KEYMGM 0x80
++
++/* Binding error status */
++#define MIPV6_BE_HAO_WO_BINDING 1
++#define MIPV6_BE_UNKNOWN_MH_TYPE 2
++
++/* Mobility Header */
++struct mipv6_mh
++{
++ __u8 payload; /* Payload Protocol */
++ __u8 length; /* MH Length */
++ __u8 type; /* MH Type */
++ __u8 reserved; /* Reserved */
++ __u16 checksum; /* Checksum */
++ __u8 data[0]; /* Message specific data */
++} __attribute__ ((packed));
++
++/* Mobility Header type */
++#define IPPROTO_MOBILITY 135 /* RFC 3775*/
++/* Mobility Header Message Types */
++
++#define MIPV6_MH_BRR 0
++#define MIPV6_MH_HOTI 1
++#define MIPV6_MH_COTI 2
++#define MIPV6_MH_HOT 3
++#define MIPV6_MH_COT 4
++#define MIPV6_MH_BU 5
++#define MIPV6_MH_BA 6
++#define MIPV6_MH_BE 7
++
++/*
++ * Status codes for Binding Acknowledgements
++ */
++#define SUCCESS 0
++#define REASON_UNSPECIFIED 128
++#define ADMINISTRATIVELY_PROHIBITED 129
++#define INSUFFICIENT_RESOURCES 130
++#define HOME_REGISTRATION_NOT_SUPPORTED 131
++#define NOT_HOME_SUBNET 132
++#define NOT_HA_FOR_MN 133
++#define DUPLICATE_ADDR_DETECT_FAIL 134
++#define SEQUENCE_NUMBER_OUT_OF_WINDOW 135
++#define EXPIRED_HOME_NONCE_INDEX 136
++#define EXPIRED_CAREOF_NONCE_INDEX 137
++#define EXPIRED_NONCES 138
++#define REG_TYPE_CHANGE_FORBIDDEN 139
++/*
++ * Values for mipv6_flags in struct inet6_skb_parm
++ */
++
++#define MIPV6_RCV_TUNNEL 0x1
++#define MIPV6_SND_HAO 0x2
++#define MIPV6_SND_BU 0x4
++
++/*
++ * Mobility Header Message structures
++ */
++
++struct mipv6_mh_brr
++{
++ __u16 reserved;
++ /* Mobility options */
++} __attribute__ ((packed));
++
++struct mipv6_mh_bu
++{
++ __u16 sequence; /* sequence number of BU */
++ __u8 flags; /* flags */
++ __u8 reserved; /* reserved bits */
++ __u16 lifetime; /* lifetime of BU */
++ /* Mobility options */
++} __attribute__ ((packed));
++
++struct mipv6_mh_ba
++{
++ __u8 status; /* statuscode */
++ __u8 reserved; /* reserved bits */
++ __u16 sequence; /* sequence number of BA */
++ __u16 lifetime; /* lifetime in CN's bcache */
++ /* Mobility options */
++} __attribute__ ((packed));
++
++struct mipv6_mh_be
++{
++ __u8 status;
++ __u8 reserved;
++ struct in6_addr home_addr;
++ /* Mobility options */
++} __attribute__ ((packed));
++
++struct mipv6_mh_addr_ti
++{
++ __u16 reserved; /* Reserved */
++ u_int8_t init_cookie[8]; /* HoT/CoT Init Cookie */
++ /* Mobility options */
++} __attribute__ ((packed));
++
++struct mipv6_mh_addr_test
++{
++ __u16 nonce_index; /* Home/Care-of Nonce Index */
++ u_int8_t init_cookie[8]; /* HoT/CoT Init Cookie */
++ u_int8_t kgen_token[8]; /* Home/Care-of key generation token */
++ /* Mobility options */
++} __attribute__ ((packed));
++
++/*
++ * Mobility Options for various MH types.
++ */
++#define MIPV6_OPT_PAD1 0x00
++#define MIPV6_OPT_PADN 0x01
++#define MIPV6_OPT_BIND_REFRESH_ADVICE 0x02
++#define MIPV6_OPT_ALTERNATE_COA 0x03
++#define MIPV6_OPT_NONCE_INDICES 0x04
++#define MIPV6_OPT_AUTH_DATA 0x05
++
++#define MIPV6_SEQ_GT(x,y) \
++ ((short int)(((__u16)(x)) - ((__u16)(y))) > 0)
++
++/*
++ * Mobility Option structures
++ */
++
++struct mipv6_mo
++{
++ __u8 type;
++ __u8 length;
++ __u8 value[0]; /* type specific data */
++} __attribute__ ((packed));
++
++struct mipv6_mo_pad1
++{
++ __u8 type;
++} __attribute__ ((packed));
++
++struct mipv6_mo_padn
++{
++ __u8 type;
++ __u8 length;
++ __u8 data[0];
++} __attribute__ ((packed));
++
++struct mipv6_mo_alt_coa
++{
++ __u8 type;
++ __u8 length;
++ struct in6_addr addr; /* alternate care-of-address */
++} __attribute__ ((packed));
++
++struct mipv6_mo_nonce_indices
++{
++ __u8 type;
++ __u8 length;
++ __u16 home_nonce_i; /* Home Nonce Index */
++ __u16 careof_nonce_i; /* Careof Nonce Index */
++} __attribute__ ((packed));
++
++struct mipv6_mo_bauth_data
++{
++ __u8 type;
++ __u8 length;
++ __u8 data[0];
++} __attribute__ ((packed));
++
++struct mipv6_mo_br_advice
++{
++ __u8 type;
++ __u8 length;
++ __u16 refresh_interval; /* Refresh Interval */
++} __attribute__ ((packed));
++
++/*
++ * Home Address Destination Option structure
++ */
++struct mipv6_dstopt_homeaddr
++{
++ __u8 type; /* type-code for option */
++ __u8 length; /* option length */
++ struct in6_addr addr; /* home address */
++} __attribute__ ((packed));
++
++#endif /* _NET_MIPV6_H */
+diff -uprN linux-2.4.25.old/include/net/ndisc.h linux-2.4.25/include/net/ndisc.h
+--- linux-2.4.25.old/include/net/ndisc.h 2002-11-28 23:53:15.000000000 +0000
++++ linux-2.4.25/include/net/ndisc.h 2004-06-26 11:29:29.000000000 +0100
+@@ -21,6 +21,10 @@
+ #define ND_OPT_REDIRECT_HDR 4
+ #define ND_OPT_MTU 5
+
++/* Mobile IPv6 specific ndisc options */
++#define ND_OPT_RTR_ADV_INTERVAL 7
++#define ND_OPT_HOME_AGENT_INFO 8
++
+ #define MAX_RTR_SOLICITATION_DELAY HZ
+
+ #define ND_REACHABLE_TIME (30*HZ)
+@@ -57,7 +61,7 @@ struct nd_opt_hdr {
+ } __attribute__((__packed__));
+
+ struct ndisc_options {
+- struct nd_opt_hdr *nd_opt_array[7];
++ struct nd_opt_hdr *nd_opt_array[10];
+ struct nd_opt_hdr *nd_opt_piend;
+ };
+
+@@ -67,6 +71,8 @@ struct ndisc_options {
+ #define nd_opts_pi_end nd_opt_piend
+ #define nd_opts_rh nd_opt_array[ND_OPT_REDIRECT_HDR]
+ #define nd_opts_mtu nd_opt_array[ND_OPT_MTU]
++#define nd_opts_rai nd_opt_array[ND_OPT_RTR_ADV_INTERVAL]
++#define nd_opts_hai nd_opt_array[ND_OPT_HOME_AGENT_INFO]
+
+ extern struct nd_opt_hdr *ndisc_next_option(struct nd_opt_hdr *cur, struct nd_opt_hdr *end);
+ extern struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len, struct ndisc_options *ndopts);
+@@ -83,6 +89,15 @@ extern void ndisc_send_ns(struct net_d
+ struct in6_addr *daddr,
+ struct in6_addr *saddr);
+
++extern void ndisc_send_na(struct net_device *dev,
++ struct neighbour *neigh,
++ struct in6_addr *daddr,
++ struct in6_addr *solicited_addr,
++ int router,
++ int solicited,
++ int override,
++ int inc_opt);
++
+ extern void ndisc_send_rs(struct net_device *dev,
+ struct in6_addr *saddr,
+ struct in6_addr *daddr);
+diff -uprN linux-2.4.25.old/include/net/sock.h linux-2.4.25/include/net/sock.h
+--- linux-2.4.25.old/include/net/sock.h 2004-02-18 13:36:32.000000000 +0000
++++ linux-2.4.25/include/net/sock.h 2004-06-26 11:29:30.000000000 +0100
+@@ -149,7 +149,9 @@ struct ipv6_pinfo {
+ struct in6_addr rcv_saddr;
+ struct in6_addr daddr;
+ struct in6_addr *daddr_cache;
+-
++#if defined(CONFIG_IPV6_SUBTREES)
++ struct in6_addr *saddr_cache;
++#endif
+ __u32 flow_label;
+ __u32 frag_size;
+ int hop_limit;
+diff -uprN linux-2.4.25.old/net/Makefile linux-2.4.25/net/Makefile
+--- linux-2.4.25.old/net/Makefile 2004-06-26 11:22:00.000000000 +0100
++++ linux-2.4.25/net/Makefile 2004-06-26 11:29:30.000000000 +0100
+@@ -7,7 +7,7 @@
+
+ O_TARGET := network.o
+
+-mod-subdirs := ipv4/netfilter ipv6/netfilter ipx irda bluetooth atm netlink sched core sctp
++mod-subdirs := ipv4/netfilter ipv6/netfilter ipx irda bluetooth atm netlink sched core sctp ipv6
+ export-objs := netsyms.o
+
+ subdir-y := core ethernet
+@@ -25,6 +25,7 @@ subdir-$(CONFIG_IP_SCTP) += sctp
+ ifneq ($(CONFIG_IPV6),n)
+ ifneq ($(CONFIG_IPV6),)
+ subdir-$(CONFIG_NETFILTER) += ipv6/netfilter
++subdir-$(CONFIG_IPV6_MOBILITY) += ipv6/mobile_ip6
+ endif
+ endif
+
+diff -uprN linux-2.4.25.old/net/core/neighbour.c linux-2.4.25/net/core/neighbour.c
+--- linux-2.4.25.old/net/core/neighbour.c 2004-02-18 13:36:32.000000000 +0000
++++ linux-2.4.25/net/core/neighbour.c 2004-06-26 11:29:30.000000000 +0100
+@@ -386,7 +386,7 @@ struct pneigh_entry * pneigh_lookup(stru
+ if (!creat)
+ return NULL;
+
+- n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
++ n = kmalloc(sizeof(*n) + key_len, GFP_ATOMIC);
+ if (n == NULL)
+ return NULL;
+
+diff -uprN linux-2.4.25.old/net/ipv6/Config.in linux-2.4.25/net/ipv6/Config.in
+--- linux-2.4.25.old/net/ipv6/Config.in 2001-12-21 17:42:05.000000000 +0000
++++ linux-2.4.25/net/ipv6/Config.in 2004-06-26 11:29:30.000000000 +0100
+@@ -1,10 +1,16 @@
+ #
+ # IPv6 configuration
+ #
+-
++bool ' IPv6: routing by source address (EXPERIMENTAL)' CONFIG_IPV6_SUBTREES
+ #bool ' IPv6: flow policy support' CONFIG_RT6_POLICY
+ #bool ' IPv6: firewall support' CONFIG_IPV6_FIREWALL
+
++if [ "$CONFIG_IPV6" != "n" ]; then
++ dep_tristate ' IPv6: IPv6 over IPv6 Tunneling (EXPERIMENTAL)' CONFIG_IPV6_TUNNEL $CONFIG_IPV6
++fi
++
++source net/ipv6/mobile_ip6/Config.in
++
+ if [ "$CONFIG_NETFILTER" != "n" ]; then
+ source net/ipv6/netfilter/Config.in
+ fi
+diff -uprN linux-2.4.25.old/net/ipv6/Makefile linux-2.4.25/net/ipv6/Makefile
+--- linux-2.4.25.old/net/ipv6/Makefile 2003-11-28 18:26:21.000000000 +0000
++++ linux-2.4.25/net/ipv6/Makefile 2004-06-26 11:29:30.000000000 +0100
+@@ -6,18 +6,28 @@
+ # unless it's something special (ie not a .c file).
+ #
+
++export-objs := ipv6_syms.o ipv6_tunnel.o
+
+-O_TARGET := ipv6.o
++#list-multi := ipv6.o ipv6_tunnel.o
+
+-obj-y := af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o sit.o \
+- route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o raw.o \
+- protocol.o icmp.o mcast.o reassembly.o tcp_ipv6.o \
+- exthdrs.o sysctl_net_ipv6.o datagram.o proc.o \
+- ip6_flowlabel.o ipv6_syms.o
++ipv6-objs := af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o \
++ sit.o route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o \
++ raw.o protocol.o icmp.o mcast.o reassembly.o tcp_ipv6.o \
++ exthdrs.o sysctl_net_ipv6.o datagram.o proc.o \
++ ip6_flowlabel.o ipv6_syms.o
++
++ifneq ($(CONFIG_IPV6_MOBILITY),n)
++ifneq ($(CONFIG_IPV6_MOBILITY),)
++ipv6-objs += mipglue.o
++endif
++endif
+
+-export-objs := ipv6_syms.o
++obj-$(CONFIG_IPV6) += ipv6.o
++obj-$(CONFIG_IPV6_TUNNEL) += ipv6_tunnel.o
++
++ipv6.o: $(ipv6-objs)
++ $(LD) -r -o $@ $(ipv6-objs)
+
+-obj-m := $(O_TARGET)
+
+ #obj-$(CONFIG_IPV6_FIREWALL) += ip6_fw.o
+
+diff -uprN linux-2.4.25.old/net/ipv6/addrconf.c linux-2.4.25/net/ipv6/addrconf.c
+--- linux-2.4.25.old/net/ipv6/addrconf.c 2003-11-28 18:26:21.000000000 +0000
++++ linux-2.4.25/net/ipv6/addrconf.c 2004-06-26 11:29:30.000000000 +0100
+@@ -68,6 +68,8 @@
+
+ #include <asm/uaccess.h>
+
++#include <net/mipglue.h>
++
+ #define IPV6_MAX_ADDRESSES 16
+
+ /* Set to 3 to get tracing... */
+@@ -103,9 +105,9 @@ static spinlock_t addrconf_verify_lock =
+
+ static int addrconf_ifdown(struct net_device *dev, int how);
+
+-static void addrconf_dad_start(struct inet6_ifaddr *ifp, int flags);
++void addrconf_dad_start(struct inet6_ifaddr *ifp, int flags);
+ static void addrconf_dad_timer(unsigned long data);
+-static void addrconf_dad_completed(struct inet6_ifaddr *ifp);
++void addrconf_dad_completed(struct inet6_ifaddr *ifp);
+ static void addrconf_rs_timer(unsigned long data);
+ static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
+
+@@ -330,38 +332,6 @@ static struct inet6_dev * ipv6_find_idev
+ return idev;
+ }
+
+-void ipv6_addr_prefix(struct in6_addr *prefix,
+- struct in6_addr *addr, int prefix_len)
+-{
+- unsigned long mask;
+- int ncopy, nbits;
+-
+- memset(prefix, 0, sizeof(*prefix));
+-
+- if (prefix_len <= 0)
+- return;
+- if (prefix_len > 128)
+- prefix_len = 128;
+-
+- ncopy = prefix_len / 32;
+- switch (ncopy) {
+- case 4: prefix->s6_addr32[3] = addr->s6_addr32[3];
+- case 3: prefix->s6_addr32[2] = addr->s6_addr32[2];
+- case 2: prefix->s6_addr32[1] = addr->s6_addr32[1];
+- case 1: prefix->s6_addr32[0] = addr->s6_addr32[0];
+- case 0: break;
+- }
+- nbits = prefix_len % 32;
+- if (nbits == 0)
+- return;
+-
+- mask = ~((1 << (32 - nbits)) - 1);
+- mask = htonl(mask);
+-
+- prefix->s6_addr32[ncopy] = addr->s6_addr32[ncopy] & mask;
+-}
+-
+-
+ static void dev_forward_change(struct inet6_dev *idev)
+ {
+ struct net_device *dev;
+@@ -513,7 +483,7 @@ ipv6_add_addr(struct inet6_dev *idev, co
+
+ /* This function wants to get referenced ifp and releases it before return */
+
+-static void ipv6_del_addr(struct inet6_ifaddr *ifp)
++void ipv6_del_addr(struct inet6_ifaddr *ifp)
+ {
+ struct inet6_ifaddr *ifa, **ifap;
+ struct inet6_dev *idev = ifp->idev;
+@@ -662,6 +632,12 @@ out:
+ if (match)
+ in6_ifa_put(match);
+
++ /* The home address is always used as source address in
++ * MIPL mobile IPv6
++ */
++ if (scope != IFA_HOST && scope != IFA_LINK)
++ addrconf_get_mipv6_home_address(saddr);
++
+ return err;
+ }
+
+@@ -815,7 +791,7 @@ void addrconf_leave_solict(struct net_de
+ }
+
+
+-static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
++int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
+ {
+ switch (dev->type) {
+ case ARPHRD_ETHER:
+@@ -840,7 +816,7 @@ static int ipv6_generate_eui64(u8 *eui,
+ return -1;
+ }
+
+-static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)
++int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)
+ {
+ int err = -1;
+ struct inet6_ifaddr *ifp;
+@@ -1407,6 +1383,24 @@ static void addrconf_sit_config(struct n
+ sit_route_add(dev);
+ }
+
++/**
++ * addrconf_ipv6_tunnel_config - configure IPv6 tunnel device
++ * @dev: tunnel device
++ **/
++
++static void addrconf_ipv6_tunnel_config(struct net_device *dev)
++{
++ struct inet6_dev *idev;
++
++ ASSERT_RTNL();
++
++ /* Assign inet6_dev structure to tunnel device */
++ if ((idev = ipv6_find_idev(dev)) == NULL) {
++ printk(KERN_DEBUG "init ipv6 tunnel: add_dev failed\n");
++ return;
++ }
++}
++
+
+ int addrconf_notify(struct notifier_block *this, unsigned long event,
+ void * data)
+@@ -1421,6 +1415,10 @@ int addrconf_notify(struct notifier_bloc
+ addrconf_sit_config(dev);
+ break;
+
++ case ARPHRD_TUNNEL6:
++ addrconf_ipv6_tunnel_config(dev);
++ break;
++
+ case ARPHRD_LOOPBACK:
+ init_loopback(dev);
+ break;
+@@ -1602,7 +1600,7 @@ out:
+ /*
+ * Duplicate Address Detection
+ */
+-static void addrconf_dad_start(struct inet6_ifaddr *ifp, int flags)
++void addrconf_dad_start(struct inet6_ifaddr *ifp, int flags)
+ {
+ struct net_device *dev;
+ unsigned long rand_num;
+@@ -1667,7 +1665,7 @@ static void addrconf_dad_timer(unsigned
+ in6_ifa_put(ifp);
+ }
+
+-static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
++void addrconf_dad_completed(struct inet6_ifaddr *ifp)
+ {
+ struct net_device * dev = ifp->idev->dev;
+
+@@ -1676,7 +1674,7 @@ static void addrconf_dad_completed(struc
+ */
+
+ ipv6_ifa_notify(RTM_NEWADDR, ifp);
+-
++ notifier_call_chain(&inet6addr_chain,NETDEV_UP,ifp);
+ /* If added prefix is link local and forwarding is off,
+ start sending router solicitations.
+ */
+@@ -1877,8 +1875,20 @@ inet6_rtm_newaddr(struct sk_buff *skb, s
+ if (rta[IFA_LOCAL-1]) {
+ if (pfx && memcmp(pfx, RTA_DATA(rta[IFA_LOCAL-1]), sizeof(*pfx)))
+ return -EINVAL;
++ if (ifm->ifa_flags & IFA_F_HOMEADDR && !rta[IFA_HOMEAGENT-1])
++ return -EINVAL;
+ pfx = RTA_DATA(rta[IFA_LOCAL-1]);
+ }
++ if (rta[IFA_HOMEAGENT-1]) {
++ struct in6_addr *ha;
++ if (pfx == NULL || !(ifm->ifa_flags & IFA_F_HOMEADDR))
++ return -EINVAL;
++ if (RTA_PAYLOAD(rta[IFA_HOMEAGENT-1]) < sizeof(*ha))
++ return -EINVAL;
++ ha = RTA_DATA(rta[IFA_HOMEAGENT-1]);
++ addrconf_set_mipv6_mn_home(ifm->ifa_index, pfx, ifm->ifa_prefixlen,
++ ha, ifm->ifa_prefixlen);
++ }
+ if (pfx == NULL)
+ return -EINVAL;
+
+diff -uprN linux-2.4.25.old/net/ipv6/af_inet6.c linux-2.4.25/net/ipv6/af_inet6.c
+--- linux-2.4.25.old/net/ipv6/af_inet6.c 2003-11-28 18:26:21.000000000 +0000
++++ linux-2.4.25/net/ipv6/af_inet6.c 2004-06-26 11:29:30.000000000 +0100
+@@ -58,6 +58,9 @@
+ #include <net/transp_v6.h>
+ #include <net/ip6_route.h>
+ #include <net/addrconf.h>
++#ifdef CONFIG_IPV6_TUNNEL
++#include <net/ipv6_tunnel.h>
++#endif
+
+ #include <asm/uaccess.h>
+ #include <asm/system.h>
+@@ -646,6 +649,11 @@ static int __init inet6_init(void)
+ err = ndisc_init(&inet6_family_ops);
+ if (err)
+ goto ndisc_fail;
++#ifdef CONFIG_IPV6_TUNNEL
++ err = ip6_tunnel_init();
++ if (err)
++ goto ip6_tunnel_fail;
++#endif
+ err = igmp6_init(&inet6_family_ops);
+ if (err)
+ goto igmp_fail;
+@@ -698,6 +706,10 @@ proc_raw6_fail:
+ #endif
+ igmp_fail:
+ ndisc_cleanup();
++#ifdef CONFIG_IPV6_TUNNEL
++ ip6_tunnel_cleanup();
++ip6_tunnel_fail:
++#endif
+ ndisc_fail:
+ icmpv6_cleanup();
+ icmp_fail:
+@@ -730,6 +742,9 @@ static void inet6_exit(void)
+ ip6_route_cleanup();
+ ipv6_packet_cleanup();
+ igmp6_cleanup();
++#ifdef CONFIG_IPV6_TUNNEL
++ ip6_tunnel_cleanup();
++#endif
+ ndisc_cleanup();
+ icmpv6_cleanup();
+ #ifdef CONFIG_SYSCTL
+diff -uprN linux-2.4.25.old/net/ipv6/exthdrs.c linux-2.4.25/net/ipv6/exthdrs.c
+--- linux-2.4.25.old/net/ipv6/exthdrs.c 2003-08-25 12:44:44.000000000 +0100
++++ linux-2.4.25/net/ipv6/exthdrs.c 2004-06-26 11:29:30.000000000 +0100
+@@ -41,6 +41,9 @@
+ #include <net/ip6_route.h>
+ #include <net/addrconf.h>
+
++#include <net/mipglue.h>
++#include <net/mipv6.h>
++
+ #include <asm/uaccess.h>
+
+ /*
+@@ -160,7 +163,8 @@ bad:
+ *****************************/
+
+ struct tlvtype_proc tlvprocdestopt_lst[] = {
+- /* No destination options are defined now */
++ /* Mobility Support destination options */
++ {MIPV6_TLV_HOMEADDR, mipv6_handle_dstopt},
+ {-1, NULL}
+ };
+
+@@ -210,6 +214,7 @@ static int ipv6_routing_header(struct sk
+
+ struct ipv6_rt_hdr *hdr;
+ struct rt0_hdr *rthdr;
++ struct rt2_hdr *rt2hdr;
+
+ if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+8) ||
+ !pskb_may_pull(skb, (skb->h.raw-skb->data)+((skb->h.raw[1]+1)<<3))) {
+@@ -225,17 +230,25 @@ static int ipv6_routing_header(struct sk
+ kfree_skb(skb);
+ return -1;
+ }
+-
++ /* Silently discard invalid packets containing RTH type 2 */
++ if (hdr->type == IPV6_SRCRT_TYPE_2 &&
++ (hdr->hdrlen != 2 || hdr->segments_left != 1)) {
++ kfree_skb(skb);
++ return -1;
++ }
+ looped_back:
+ if (hdr->segments_left == 0) {
+- opt->srcrt = skb->h.raw - skb->nh.raw;
++ if (hdr->type == IPV6_SRCRT_TYPE_0)
++ opt->srcrt = skb->h.raw - skb->nh.raw;
++ else if (hdr->type == IPV6_SRCRT_TYPE_2)
++ opt->srcrt2 = skb->h.raw - skb->nh.raw;
+ skb->h.raw += (hdr->hdrlen + 1) << 3;
+ opt->dst0 = opt->dst1;
+ opt->dst1 = 0;
+ return (&hdr->nexthdr) - skb->nh.raw;
+ }
+
+- if (hdr->type != IPV6_SRCRT_TYPE_0) {
++ if (hdr->type != IPV6_SRCRT_TYPE_0 && hdr->type != IPV6_SRCRT_TYPE_2) {
+ icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->type) - skb->nh.raw);
+ return -1;
+ }
+@@ -275,9 +288,20 @@ looped_back:
+
+ i = n - --hdr->segments_left;
+
+- rthdr = (struct rt0_hdr *) hdr;
+- addr = rthdr->addr;
+- addr += i - 1;
++ if (hdr->type == IPV6_SRCRT_TYPE_0) {
++ rthdr = (struct rt0_hdr *) hdr;
++ addr = rthdr->addr;
++ addr += i - 1;
++ } else {
++ /* check that address is this node's home address */
++ rt2hdr = (struct rt2_hdr *) hdr;
++ addr = &rt2hdr->addr;
++ if (!ipv6_chk_addr(addr, NULL) ||
++ !ipv6_chk_mip_home_addr(addr)) {
++ kfree_skb(skb);
++ return -1;
++ }
++ }
+
+ addr_type = ipv6_addr_type(addr);
+
+@@ -330,6 +354,10 @@ looped_back:
+ temporary (or permanent) backdoor.
+ If listening socket set IPV6_RTHDR to 2, then we invert header.
+ --ANK (980729)
++
++ By the Mobile IPv6 specification Type 2 routing header MUST NOT be
++ inverted.
++ --AJT (20020917)
+ */
+
+ struct ipv6_txoptions *
+@@ -352,6 +380,18 @@ ipv6_invert_rthdr(struct sock *sk, struc
+ struct ipv6_txoptions *opt;
+ int hdrlen = ipv6_optlen(hdr);
+
++ if (hdr->type == IPV6_SRCRT_TYPE_2) {
++ opt = sock_kmalloc(sk, sizeof(*opt) + hdrlen, GFP_ATOMIC);
++ if (opt == NULL)
++ return NULL;
++ memset(opt, 0, sizeof(*opt));
++ opt->tot_len = sizeof(*opt) + hdrlen;
++ opt->srcrt = (void*)(opt+1);
++ opt->opt_nflen = hdrlen;
++ memcpy(opt->srcrt, hdr, sizeof(struct rt2_hdr));
++ return opt;
++ }
++
+ if (hdr->segments_left ||
+ hdr->type != IPV6_SRCRT_TYPE_0 ||
+ hdr->hdrlen & 0x01)
+@@ -622,8 +662,18 @@ u8 *ipv6_build_nfrag_opts(struct sk_buff
+ if (opt) {
+ if (opt->dst0opt)
+ prev_hdr = ipv6_build_exthdr(skb, prev_hdr, NEXTHDR_DEST, opt->dst0opt);
+- if (opt->srcrt)
+- prev_hdr = ipv6_build_rthdr(skb, prev_hdr, opt->srcrt, daddr);
++ if (opt->srcrt) {
++ if (opt->srcrt2) {
++ struct in6_addr *rt2_hop = &((struct rt2_hdr *)opt->srcrt2)->addr;
++ prev_hdr = ipv6_build_rthdr(skb, prev_hdr, opt->srcrt, rt2_hop);
++ } else
++ prev_hdr = ipv6_build_rthdr(skb, prev_hdr, opt->srcrt, daddr);
++ }
++ if (opt->srcrt2) {
++ struct inet6_skb_parm *parm = (struct inet6_skb_parm *)skb->cb;
++ ipv6_addr_copy(&parm->hoa, daddr);
++ prev_hdr = ipv6_build_rthdr(skb, prev_hdr, opt->srcrt2, daddr);
++ }
+ }
+ return prev_hdr;
+ }
+@@ -684,6 +734,11 @@ void ipv6_push_nfrag_opts(struct sk_buff
+ u8 *proto,
+ struct in6_addr **daddr)
+ {
++ if (opt->srcrt2) {
++ struct inet6_skb_parm *parm = (struct inet6_skb_parm *)skb->cb;
++ ipv6_addr_copy(&parm->hoa, *daddr);
++ ipv6_push_rthdr(skb, proto, opt->srcrt2, daddr);
++ }
+ if (opt->srcrt)
+ ipv6_push_rthdr(skb, proto, opt->srcrt, daddr);
+ if (opt->dst0opt)
+@@ -719,6 +774,8 @@ ipv6_dup_options(struct sock *sk, struct
+ *((char**)&opt2->auth) += dif;
+ if (opt2->srcrt)
+ *((char**)&opt2->srcrt) += dif;
++ if (opt2->srcrt2)
++ *((char**)&opt2->srcrt2) += dif;
+ }
+ return opt2;
+ }
+diff -uprN linux-2.4.25.old/net/ipv6/icmp.c linux-2.4.25/net/ipv6/icmp.c
+--- linux-2.4.25.old/net/ipv6/icmp.c 2003-11-28 18:26:21.000000000 +0000
++++ linux-2.4.25/net/ipv6/icmp.c 2004-06-26 11:29:30.000000000 +0100
+@@ -61,6 +61,8 @@
+ #include <net/addrconf.h>
+ #include <net/icmp.h>
+
++#include <net/mipglue.h>
++
+ #include <asm/uaccess.h>
+ #include <asm/system.h>
+
+@@ -364,6 +366,8 @@ void icmpv6_send(struct sk_buff *skb, in
+
+ msg.len = len;
+
++ icmpv6_swap_mipv6_addrs(skb);
++
+ ip6_build_xmit(sk, icmpv6_getfrag, &msg, &fl, len, NULL, -1,
+ MSG_DONTWAIT);
+ if (type >= ICMPV6_DEST_UNREACH && type <= ICMPV6_PARAMPROB)
+@@ -562,13 +566,13 @@ int icmpv6_rcv(struct sk_buff *skb)
+ rt6_pmtu_discovery(&orig_hdr->daddr, &orig_hdr->saddr, dev,
+ ntohl(hdr->icmp6_mtu));
+
+- /*
+- * Drop through to notify
+- */
++ icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
++ break;
+
+ case ICMPV6_DEST_UNREACH:
+- case ICMPV6_TIME_EXCEED:
+ case ICMPV6_PARAMPROB:
++ mipv6_icmp_rcv(skb);
++ case ICMPV6_TIME_EXCEED:
+ icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
+ break;
+
+@@ -597,6 +601,13 @@ int icmpv6_rcv(struct sk_buff *skb)
+ case ICMPV6_MGM_REDUCTION:
+ break;
+
++ case MIPV6_DHAAD_REQUEST:
++ case MIPV6_DHAAD_REPLY:
++ case MIPV6_PREFIX_SOLICIT:
++ case MIPV6_PREFIX_ADV:
++ mipv6_icmp_rcv(skb);
++ break;
++
+ default:
+ if (net_ratelimit())
+ printk(KERN_DEBUG "icmpv6: msg of unkown type\n");
+diff -uprN linux-2.4.25.old/net/ipv6/ip6_fib.c linux-2.4.25/net/ipv6/ip6_fib.c
+--- linux-2.4.25.old/net/ipv6/ip6_fib.c 2003-08-25 12:44:44.000000000 +0100
++++ linux-2.4.25/net/ipv6/ip6_fib.c 2004-06-26 11:29:30.000000000 +0100
+@@ -18,6 +18,7 @@
+ * Yuji SEKIYA @USAGI: Support default route on router node;
+ * remove ip6_null_entry from the top of
+ * routing table.
++ * Ville Nuorvala: Fixes to source address based routing
+ */
+ #include <linux/config.h>
+ #include <linux/errno.h>
+@@ -40,7 +41,6 @@
+ #include <net/ip6_route.h>
+
+ #define RT6_DEBUG 2
+-#undef CONFIG_IPV6_SUBTREES
+
+ #if RT6_DEBUG >= 3
+ #define RT6_TRACE(x...) printk(KERN_DEBUG x)
+@@ -500,6 +500,8 @@ static __inline__ void fib6_start_gc(str
+ mod_timer(&ip6_fib_timer, jiffies + ip6_rt_gc_interval);
+ }
+
++static struct rt6_info * fib6_find_prefix(struct fib6_node *fn);
++
+ /*
+ * Add routing information to the routing tree.
+ * <destination addr>/<source addr>
+@@ -508,17 +510,19 @@ static __inline__ void fib6_start_gc(str
+
+ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nlmsghdr *nlh)
+ {
+- struct fib6_node *fn;
++ struct fib6_node *fn = root;
+ int err = -ENOMEM;
+
+- fn = fib6_add_1(root, &rt->rt6i_dst.addr, sizeof(struct in6_addr),
+- rt->rt6i_dst.plen, (u8*) &rt->rt6i_dst - (u8*) rt);
++#ifdef CONFIG_IPV6_SUBTREES
++ struct fib6_node *pn = NULL;
+
++ fn = fib6_add_1(root, &rt->rt6i_src.addr, sizeof(struct in6_addr),
++ rt->rt6i_src.plen, (u8*) &rt->rt6i_src - (u8*) rt);
++
+ if (fn == NULL)
+ goto out;
+
+-#ifdef CONFIG_IPV6_SUBTREES
+- if (rt->rt6i_src.plen) {
++ if (rt->rt6i_dst.plen) {
+ struct fib6_node *sn;
+
+ if (fn->subtree == NULL) {
+@@ -546,9 +550,9 @@ int fib6_add(struct fib6_node *root, str
+
+ /* Now add the first leaf node to new subtree */
+
+- sn = fib6_add_1(sfn, &rt->rt6i_src.addr,
+- sizeof(struct in6_addr), rt->rt6i_src.plen,
+- (u8*) &rt->rt6i_src - (u8*) rt);
++ sn = fib6_add_1(sfn, &rt->rt6i_dst.addr,
++ sizeof(struct in6_addr), rt->rt6i_dst.plen,
++ (u8*) &rt->rt6i_dst - (u8*) rt);
+
+ if (sn == NULL) {
+ /* If it is failed, discard just allocated
+@@ -562,21 +566,30 @@ int fib6_add(struct fib6_node *root, str
+ /* Now link new subtree to main tree */
+ sfn->parent = fn;
+ fn->subtree = sfn;
+- if (fn->leaf == NULL) {
+- fn->leaf = rt;
+- atomic_inc(&rt->rt6i_ref);
+- }
+ } else {
+- sn = fib6_add_1(fn->subtree, &rt->rt6i_src.addr,
+- sizeof(struct in6_addr), rt->rt6i_src.plen,
+- (u8*) &rt->rt6i_src - (u8*) rt);
++ sn = fib6_add_1(fn->subtree, &rt->rt6i_dst.addr,
++ sizeof(struct in6_addr), rt->rt6i_dst.plen,
++ (u8*) &rt->rt6i_dst - (u8*) rt);
+
+ if (sn == NULL)
+ goto st_failure;
+ }
+
++ /* fib6_add_1 might have cleared the old leaf pointer */
++ if (fn->leaf == NULL) {
++ fn->leaf = rt;
++ atomic_inc(&rt->rt6i_ref);
++ }
++
++ pn = fn;
+ fn = sn;
+ }
++#else
++ fn = fib6_add_1(root, &rt->rt6i_dst.addr, sizeof(struct in6_addr),
++ rt->rt6i_dst.plen, (u8*) &rt->rt6i_dst - (u8*) rt);
++
++ if (fn == NULL)
++ goto out;
+ #endif
+
+ err = fib6_add_rt2node(fn, rt, nlh);
+@@ -588,8 +601,30 @@ int fib6_add(struct fib6_node *root, str
+ }
+
+ out:
+- if (err)
++ if (err) {
++#ifdef CONFIG_IPV6_SUBTREES
++
++ /* If fib6_add_1 has cleared the old leaf pointer in the
++ super-tree leaf node, we have to find a new one for it.
++
++ This situation will never arise in the sub-tree since
++ the node will at least have the route that caused
++ fib6_add_rt2node to fail.
++ */
++
++ if (pn && !(pn->fn_flags & RTN_RTINFO)) {
++ pn->leaf = fib6_find_prefix(pn);
++#if RT6_DEBUG >= 2
++ if (!pn->leaf) {
++ BUG_TRAP(pn->leaf);
++ pn->leaf = &ip6_null_entry;
++ }
++#endif
++ atomic_inc(&pn->leaf->rt6i_ref);
++ }
++#endif
+ dst_free(&rt->u.dst);
++ }
+ return err;
+
+ #ifdef CONFIG_IPV6_SUBTREES
+@@ -597,8 +632,8 @@ out:
+ is orphan. If it is, shoot it.
+ */
+ st_failure:
+- if (fn && !(fn->fn_flags&RTN_RTINFO|RTN_ROOT))
+- fib_repair_tree(fn);
++ if (fn && !(fn->fn_flags & (RTN_RTINFO | RTN_ROOT)))
++ fib6_repair_tree(fn);
+ dst_free(&rt->u.dst);
+ return err;
+ #endif
+@@ -641,22 +676,28 @@ static struct fib6_node * fib6_lookup_1(
+ break;
+ }
+
+- while ((fn->fn_flags & RTN_ROOT) == 0) {
++ for (;;) {
+ #ifdef CONFIG_IPV6_SUBTREES
+ if (fn->subtree) {
+- struct fib6_node *st;
+- struct lookup_args *narg;
+-
+- narg = args + 1;
+-
+- if (narg->addr) {
+- st = fib6_lookup_1(fn->subtree, narg);
++ struct rt6key *key;
+
+- if (st && !(st->fn_flags & RTN_ROOT))
+- return st;
++ key = (struct rt6key *) ((u8 *) fn->leaf +
++ args->offset);
++
++ if (addr_match(&key->addr, args->addr, key->plen)) {
++ struct fib6_node *st;
++ struct lookup_args *narg = args + 1;
++ if (!ipv6_addr_any(narg->addr)) {
++ st = fib6_lookup_1(fn->subtree, narg);
++
++ if (st && !(st->fn_flags & RTN_ROOT))
++ return st;
++ }
+ }
+ }
+ #endif
++ if (fn->fn_flags & RTN_ROOT)
++ break;
+
+ if (fn->fn_flags & RTN_RTINFO) {
+ struct rt6key *key;
+@@ -680,13 +721,22 @@ struct fib6_node * fib6_lookup(struct fi
+ struct lookup_args args[2];
+ struct rt6_info *rt = NULL;
+ struct fib6_node *fn;
++#ifdef CONFIG_IPV6_SUBTREES
++ struct in6_addr saddr_buf;
++#endif
+
++#ifdef CONFIG_IPV6_SUBTREES
++ if (saddr == NULL) {
++ memset(&saddr_buf, 0, sizeof(struct in6_addr));
++ saddr = &saddr_buf;
++ }
++ args[0].offset = (u8*) &rt->rt6i_src - (u8*) rt;
++ args[0].addr = saddr;
++ args[1].offset = (u8*) &rt->rt6i_dst - (u8*) rt;
++ args[1].addr = daddr;
++#else
+ args[0].offset = (u8*) &rt->rt6i_dst - (u8*) rt;
+ args[0].addr = daddr;
+-
+-#ifdef CONFIG_IPV6_SUBTREES
+- args[1].offset = (u8*) &rt->rt6i_src - (u8*) rt;
+- args[1].addr = saddr;
+ #endif
+
+ fn = fib6_lookup_1(root, args);
+@@ -739,19 +789,25 @@ struct fib6_node * fib6_locate(struct fi
+ {
+ struct rt6_info *rt = NULL;
+ struct fib6_node *fn;
+-
+- fn = fib6_locate_1(root, daddr, dst_len,
+- (u8*) &rt->rt6i_dst - (u8*) rt);
+-
+ #ifdef CONFIG_IPV6_SUBTREES
+- if (src_len) {
+- BUG_TRAP(saddr!=NULL);
+- if (fn == NULL)
+- fn = fn->subtree;
++ struct in6_addr saddr_buf;
++
++ if (saddr == NULL) {
++ memset(&saddr_buf, 0, sizeof(struct in6_addr));
++ saddr = &saddr_buf;
++ }
++ fn = fib6_locate_1(root, saddr, src_len,
++ (u8*) &rt->rt6i_src - (u8*) rt);
++ if (dst_len) {
+ if (fn)
+- fn = fib6_locate_1(fn, saddr, src_len,
+- (u8*) &rt->rt6i_src - (u8*) rt);
++ fn = fib6_locate_1(fn->subtree, daddr, dst_len,
++ (u8*) &rt->rt6i_dst - (u8*) rt);
++ else
++ return NULL;
+ }
++#else
++ fn = fib6_locate_1(root, daddr, dst_len,
++ (u8*) &rt->rt6i_dst - (u8*) rt);
+ #endif
+
+ if (fn && fn->fn_flags&RTN_RTINFO)
+@@ -939,7 +995,7 @@ static void fib6_del_route(struct fib6_n
+ }
+ fn = fn->parent;
+ }
+- /* No more references are possiible at this point. */
++ /* No more references are possible at this point. */
+ if (atomic_read(&rt->rt6i_ref) != 1) BUG();
+ }
+
+diff -uprN linux-2.4.25.old/net/ipv6/ip6_input.c linux-2.4.25/net/ipv6/ip6_input.c
+--- linux-2.4.25.old/net/ipv6/ip6_input.c 2003-08-25 12:44:44.000000000 +0100
++++ linux-2.4.25/net/ipv6/ip6_input.c 2004-06-26 11:29:30.000000000 +0100
+@@ -40,13 +40,42 @@
+ #include <net/ip6_route.h>
+ #include <net/addrconf.h>
+
++static inline int ip6_proxy_chk(struct sk_buff *skb)
++{
++ struct ipv6hdr *hdr = skb->nh.ipv6h;
+
+-
++ if (ipv6_addr_type(&hdr->daddr)&IPV6_ADDR_UNICAST &&
++ pneigh_lookup(&nd_tbl, &hdr->daddr, skb->dev, 0)) {
++ u8 nexthdr = hdr->nexthdr;
++ int offset;
++ struct icmp6hdr msg;
++
++ if (ipv6_ext_hdr(nexthdr)) {
++ offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr,
++ skb->len - sizeof(*hdr));
++ if (offset < 0)
++ return 0;
++ } else
++ offset = sizeof(*hdr);
++
++ /* capture unicast NUD probes on behalf of the proxied node */
++
++ if (nexthdr == IPPROTO_ICMPV6 &&
++ !skb_copy_bits(skb, offset, &msg, sizeof(msg)) &&
++ msg.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
++ return 1;
++ }
++ }
++ return 0;
++}
++
+ static inline int ip6_rcv_finish( struct sk_buff *skb)
+ {
+- if (skb->dst == NULL)
+- ip6_route_input(skb);
+-
++ if (skb->dst == NULL) {
++ if (ip6_proxy_chk(skb))
++ return ip6_input(skb);
++ ip6_route_input(skb);
++ }
+ return skb->dst->input(skb);
+ }
+
+diff -uprN linux-2.4.25.old/net/ipv6/ip6_output.c linux-2.4.25/net/ipv6/ip6_output.c
+--- linux-2.4.25.old/net/ipv6/ip6_output.c 2003-08-25 12:44:44.000000000 +0100
++++ linux-2.4.25/net/ipv6/ip6_output.c 2004-06-26 11:29:30.000000000 +0100
+@@ -50,6 +50,8 @@
+ #include <net/rawv6.h>
+ #include <net/icmp.h>
+
++#include <net/mipglue.h>
++
+ static __inline__ void ipv6_select_ident(struct sk_buff *skb, struct frag_hdr *fhdr)
+ {
+ static u32 ipv6_fragmentation_id = 1;
+@@ -194,7 +196,14 @@ int ip6_xmit(struct sock *sk, struct sk_
+ u8 proto = fl->proto;
+ int seg_len = skb->len;
+ int hlimit;
++ int retval;
++ struct ipv6_txoptions *orig_opt = opt;
++
++ opt = ip6_add_mipv6_txoptions(sk, skb, orig_opt, fl, &dst);
+
++ if(orig_opt && !opt)
++ return -ENOMEM;
++
+ if (opt) {
+ int head_room;
+
+@@ -209,8 +218,11 @@ int ip6_xmit(struct sock *sk, struct sk_
+ struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
+ kfree_skb(skb);
+ skb = skb2;
+- if (skb == NULL)
++ if (skb == NULL) {
++ ip6_free_mipv6_txoptions(opt, orig_opt);
++
+ return -ENOBUFS;
++ }
+ if (sk)
+ skb_set_owner_w(skb, sk);
+ }
+@@ -242,7 +254,10 @@ int ip6_xmit(struct sock *sk, struct sk_
+
+ if (skb->len <= dst->pmtu) {
+ IP6_INC_STATS(Ip6OutRequests);
+- return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, ip6_maybe_reroute);
++ ip6_mark_mipv6_packet(opt, skb);
++ retval = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, ip6_maybe_reroute);
++ ip6_free_mipv6_txoptions(opt, orig_opt);
++ return retval;
+ }
+
+ if (net_ratelimit())
+@@ -250,6 +265,9 @@ int ip6_xmit(struct sock *sk, struct sk_
+ skb->dev = dst->dev;
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst->pmtu, skb->dev);
+ kfree_skb(skb);
++
++ ip6_free_mipv6_txoptions(opt, orig_opt);
++
+ return -EMSGSIZE;
+ }
+
+@@ -473,6 +491,7 @@ static int ip6_frag_xmit(struct sock *sk
+
+ IP6_INC_STATS(Ip6FragCreates);
+ IP6_INC_STATS(Ip6OutRequests);
++ ip6_mark_mipv6_packet(opt, skb);
+ err = NF_HOOK(PF_INET6,NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, ip6_maybe_reroute);
+ if (err) {
+ kfree_skb(last_skb);
+@@ -499,6 +518,7 @@ static int ip6_frag_xmit(struct sock *sk
+ IP6_INC_STATS(Ip6FragCreates);
+ IP6_INC_STATS(Ip6FragOKs);
+ IP6_INC_STATS(Ip6OutRequests);
++ ip6_mark_mipv6_packet(opt, last_skb);
+ return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, last_skb, NULL,dst->dev, ip6_maybe_reroute);
+ }
+
+@@ -509,26 +529,43 @@ int ip6_build_xmit(struct sock *sk, inet
+ struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6;
+ struct in6_addr *final_dst = NULL;
+ struct dst_entry *dst;
++ struct rt6_info *rt;
+ int err = 0;
+ unsigned int pktlength, jumbolen, mtu;
+ struct in6_addr saddr;
++ struct ipv6_txoptions *orig_opt = opt;
++#ifdef CONFIG_IPV6_SUBTREES
++ struct dst_entry *org_dst;
++#endif
++
++ opt = ip6_add_mipv6_txoptions(sk, NULL, orig_opt, fl, NULL);
++
++ if(orig_opt && !opt)
++ return -ENOMEM;
+
+ if (opt && opt->srcrt) {
+ struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
+ final_dst = fl->fl6_dst;
+ fl->fl6_dst = rt0->addr;
+- }
++ } else if (opt && opt->srcrt2) {
++ struct rt2_hdr *rt2 = (struct rt2_hdr *) opt->srcrt2;
++ final_dst = fl->fl6_dst;
++ fl->fl6_dst = &rt2->addr;
++ }
+
+ if (!fl->oif && ipv6_addr_is_multicast(fl->nl_u.ip6_u.daddr))
+ fl->oif = np->mcast_oif;
+
+ dst = __sk_dst_check(sk, np->dst_cookie);
++#ifdef CONFIG_IPV6_SUBTREES
++ org_dst = dst;
++#endif
+ if (dst) {
+- struct rt6_info *rt = (struct rt6_info*)dst;
++ rt = (struct rt6_info*)dst;
+
+ /* Yes, checking route validity in not connected
+ case is not very simple. Take into account,
+- that we do not support routing by source, TOS,
++ that we do not support routing by TOS,
+ and MSG_DONTROUTE --ANK (980726)
+
+ 1. If route was host route, check that
+@@ -548,6 +585,13 @@ int ip6_build_xmit(struct sock *sk, inet
+ ipv6_addr_cmp(fl->fl6_dst, &rt->rt6i_dst.addr))
+ && (np->daddr_cache == NULL ||
+ ipv6_addr_cmp(fl->fl6_dst, np->daddr_cache)))
++#ifdef CONFIG_IPV6_SUBTREES
++ || (fl->fl6_src != NULL
++ && (rt->rt6i_src.plen != 128 ||
++ ipv6_addr_cmp(fl->fl6_src, &rt->rt6i_src.addr))
++ && (np->saddr_cache == NULL ||
++ ipv6_addr_cmp(fl->fl6_src, np->saddr_cache)))
++#endif
+ || (fl->oif && fl->oif != dst->dev->ifindex)) {
+ dst = NULL;
+ } else
+@@ -560,21 +604,42 @@ int ip6_build_xmit(struct sock *sk, inet
+ if (dst->error) {
+ IP6_INC_STATS(Ip6OutNoRoutes);
+ dst_release(dst);
++ ip6_free_mipv6_txoptions(opt, orig_opt);
+ return -ENETUNREACH;
+ }
+
+ if (fl->fl6_src == NULL) {
+ err = ipv6_get_saddr(dst, fl->fl6_dst, &saddr);
+-
+ if (err) {
+ #if IP6_DEBUG >= 2
+ printk(KERN_DEBUG "ip6_build_xmit: "
+ "no available source address\n");
+ #endif
++
++#ifdef CONFIG_IPV6_SUBTREES
++ if (dst != org_dst) {
++ dst_release(dst);
++ dst = org_dst;
++ }
++#endif
+ goto out;
+ }
+ fl->fl6_src = &saddr;
+ }
++#ifdef CONFIG_IPV6_SUBTREES
++ rt = (struct rt6_info*)dst;
++ if (dst != org_dst || rt->rt6i_src.plen != 128 ||
++ ipv6_addr_cmp(fl->fl6_src, &rt->rt6i_src.addr)) {
++ dst_release(dst);
++ dst = ip6_route_output(sk, fl);
++ if (dst->error) {
++ IP6_INC_STATS(Ip6OutNoRoutes);
++ dst_release(dst);
++ ip6_free_mipv6_txoptions(opt, orig_opt);
++ return -ENETUNREACH;
++ }
++ }
++#endif
+ pktlength = length;
+
+ if (hlimit < 0) {
+@@ -667,6 +732,7 @@ int ip6_build_xmit(struct sock *sk, inet
+
+ if (!err) {
+ IP6_INC_STATS(Ip6OutRequests);
++ ip6_mark_mipv6_packet(opt, skb);
+ err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, ip6_maybe_reroute);
+ } else {
+ err = -EFAULT;
+@@ -688,9 +754,14 @@ int ip6_build_xmit(struct sock *sk, inet
+ * cleanup
+ */
+ out:
+- ip6_dst_store(sk, dst, fl->nl_u.ip6_u.daddr == &np->daddr ? &np->daddr : NULL);
++ ip6_dst_store(sk, dst,
++ fl->nl_u.ip6_u.daddr == &np->daddr ? &np->daddr : NULL,
++ fl->nl_u.ip6_u.saddr == &np->saddr ? &np->saddr : NULL);
+ if (err > 0)
+ err = np->recverr ? net_xmit_errno(err) : 0;
++
++ ip6_free_mipv6_txoptions(opt, orig_opt);
++
+ return err;
+ }
+
+@@ -769,6 +840,15 @@ int ip6_forward(struct sk_buff *skb)
+ return -ETIMEDOUT;
+ }
+
++ /* The proxying router can't forward traffic sent to a link-local
++ address, so signal the sender and discard the packet. This
++ behavior is required by the MIPv6 specification. */
++
++ if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL &&
++ skb->dev && pneigh_lookup(&nd_tbl, &hdr->daddr, skb->dev, 0)) {
++ dst_link_failure(skb);
++ goto drop;
++ }
+ /* IPv6 specs say nothing about it, but it is clear that we cannot
+ send redirects to source routed frames.
+ */
+diff -uprN linux-2.4.25.old/net/ipv6/ipv6_syms.c linux-2.4.25/net/ipv6/ipv6_syms.c
+--- linux-2.4.25.old/net/ipv6/ipv6_syms.c 2003-11-28 18:26:21.000000000 +0000
++++ linux-2.4.25/net/ipv6/ipv6_syms.c 2004-06-26 11:29:30.000000000 +0100
+@@ -6,6 +6,8 @@
+ #include <net/ipv6.h>
+ #include <net/addrconf.h>
+ #include <net/ip6_route.h>
++#include <net/ndisc.h>
++#include <net/mipglue.h>
+
+ EXPORT_SYMBOL(ipv6_addr_type);
+ EXPORT_SYMBOL(icmpv6_send);
+@@ -33,3 +35,48 @@ EXPORT_SYMBOL(inet6_ioctl);
+ EXPORT_SYMBOL(ipv6_get_saddr);
+ EXPORT_SYMBOL(ipv6_chk_addr);
+ EXPORT_SYMBOL(in6_dev_finish_destroy);
++
++#if defined(CONFIG_IPV6_TUNNEL_MODULE) || defined(CONFIG_IPV6_MOBILITY_MODULE)
++EXPORT_SYMBOL(ip6_build_xmit);
++EXPORT_SYMBOL(rt6_lookup);
++EXPORT_SYMBOL(ipv6_ext_hdr);
++#endif
++#ifdef CONFIG_IPV6_MOBILITY_MODULE
++EXPORT_SYMBOL(mipv6_functions);
++EXPORT_SYMBOL(mipv6_invalidate_calls);
++#if defined(CONFIG_IPV6_MOBILITY_HA_MODULE) || defined(CONFIG_IPV6_MOBILITY_MN_MODULE)
++EXPORT_SYMBOL(ip6_route_add);
++EXPORT_SYMBOL(ip6_route_del);
++EXPORT_SYMBOL(ipv6_get_lladdr);
++EXPORT_SYMBOL(ipv6_get_ifaddr);
++EXPORT_SYMBOL(nd_tbl);
++EXPORT_SYMBOL(ndisc_send_ns);
++EXPORT_SYMBOL(ndisc_send_na);
++EXPORT_SYMBOL(ndisc_next_option);
++EXPORT_SYMBOL(inet6_ifa_finish_destroy);
++#endif
++#ifdef CONFIG_IPV6_MOBILITY_HA_MODULE
++EXPORT_SYMBOL(ipv6_dev_ac_dec);
++EXPORT_SYMBOL(ipv6_dev_ac_inc);
++EXPORT_SYMBOL(ipv6_dev_mc_dec);
++EXPORT_SYMBOL(ipv6_dev_mc_inc);
++EXPORT_SYMBOL(ip6_forward);
++EXPORT_SYMBOL(ip6_input);
++EXPORT_SYMBOL(ipv6_chk_acast_addr);
++#endif
++#ifdef CONFIG_IPV6_MOBILITY_MN_MODULE
++#endif
++EXPORT_SYMBOL(addrconf_add_ifaddr);
++EXPORT_SYMBOL(addrconf_del_ifaddr);
++EXPORT_SYMBOL(addrconf_dad_start);
++EXPORT_SYMBOL(ip6_del_rt);
++EXPORT_SYMBOL(ip6_routing_table);
++EXPORT_SYMBOL(rt6_get_dflt_router);
++EXPORT_SYMBOL(rt6_purge_dflt_routers);
++EXPORT_SYMBOL(rt6_lock);
++EXPORT_SYMBOL(ndisc_send_rs);
++EXPORT_SYMBOL(fib6_clean_tree);
++EXPORT_SYMBOL(ipv6_del_addr);
++EXPORT_SYMBOL(ipv6_generate_eui64);
++EXPORT_SYMBOL(ipv6_inherit_eui64);
++#endif
+diff -uprN linux-2.4.25.old/net/ipv6/ipv6_tunnel.c linux-2.4.25/net/ipv6/ipv6_tunnel.c
+--- linux-2.4.25.old/net/ipv6/ipv6_tunnel.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/ipv6_tunnel.c 2004-06-26 11:29:30.000000000 +0100
+@@ -0,0 +1,1604 @@
++/*
++ * IPv6 over IPv6 tunnel device
++ * Linux INET6 implementation
++ *
++ * Authors:
++ * Ville Nuorvala <vnuorval@tcs.hut.fi>
++ *
++ * $Id$
++ *
++ * Based on:
++ * linux/net/ipv6/sit.c
++ *
++ * RFC 2473
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ *
++ */
++
++#include <linux/config.h>
++#include <linux/module.h>
++#include <linux/errno.h>
++#include <linux/types.h>
++#include <linux/socket.h>
++#include <linux/sockios.h>
++#include <linux/if.h>
++#include <linux/in.h>
++#include <linux/ip.h>
++#include <linux/if_tunnel.h>
++#include <linux/net.h>
++#include <linux/in6.h>
++#include <linux/netdevice.h>
++#include <linux/if_arp.h>
++#include <linux/icmpv6.h>
++#include <linux/init.h>
++#include <linux/route.h>
++#include <linux/rtnetlink.h>
++#include <linux/tqueue.h>
++
++#include <asm/uaccess.h>
++#include <asm/atomic.h>
++
++#include <net/sock.h>
++#include <net/ipv6.h>
++#include <net/protocol.h>
++#include <net/ip6_route.h>
++#include <net/addrconf.h>
++#include <net/ipv6_tunnel.h>
++
++MODULE_AUTHOR("Ville Nuorvala");
++MODULE_DESCRIPTION("IPv6-in-IPv6 tunnel");
++MODULE_LICENSE("GPL");
++
++#define IPV6_TLV_TEL_DST_SIZE 8
++
++#ifdef IP6_TNL_DEBUG
++#define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __FUNCTION__)
++#else
++#define IP6_TNL_TRACE(x...) do {;} while(0)
++#endif
++
++#define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK)
++
++#define HASH_SIZE 32
++
++#define HASH(addr) (((addr)->s6_addr32[0] ^ (addr)->s6_addr32[1] ^ \
++ (addr)->s6_addr32[2] ^ (addr)->s6_addr32[3]) & \
++ (HASH_SIZE - 1))
++
++static int ip6ip6_fb_tnl_dev_init(struct net_device *dev);
++static int ip6ip6_tnl_dev_init(struct net_device *dev);
++
++/* the IPv6 IPv6 tunnel fallback device */
++static struct net_device ip6ip6_fb_tnl_dev = {
++ name: "ip6tnl0",
++ init: ip6ip6_fb_tnl_dev_init
++};
++
++/* the IPv6 IPv6 fallback tunnel */
++static struct ip6_tnl ip6ip6_fb_tnl = {
++ dev: &ip6ip6_fb_tnl_dev,
++ parms:{name: "ip6tnl0", proto: IPPROTO_IPV6}
++};
++
++/* lists for storing tunnels in use */
++static struct ip6_tnl *tnls_r_l[HASH_SIZE];
++static struct ip6_tnl *tnls_wc[1];
++static struct ip6_tnl **tnls[2] = { tnls_wc, tnls_r_l };
++
++/* list for unused cached kernel tunnels */
++static struct ip6_tnl *tnls_kernel[1];
++/* maximum number of cached kernel tunnels */
++static unsigned int max_kdev_count = 0;
++/* minimum number of cached kernel tunnels */
++static unsigned int min_kdev_count = 0;
++/* current number of cached kernel tunnels */
++static unsigned int kdev_count = 0;
++
++/* lists for tunnel hook functions */
++static struct list_head hooks[IP6_TNL_MAXHOOKS];
++
++/* locks for the different lists */
++static rwlock_t ip6ip6_lock = RW_LOCK_UNLOCKED;
++static rwlock_t ip6ip6_kernel_lock = RW_LOCK_UNLOCKED;
++static rwlock_t ip6ip6_hook_lock = RW_LOCK_UNLOCKED;
++
++/* flag indicating if the module is being removed */
++static int shutdown = 0;
++
++/**
++ * ip6ip6_tnl_lookup - fetch tunnel matching the end-point addresses
++ * @remote: the address of the tunnel exit-point
++ * @local: the address of the tunnel entry-point
++ *
++ * Return:
++ * tunnel matching given end-points if found,
++ * else fallback tunnel if its device is up,
++ * else %NULL
++ **/
++
++struct ip6_tnl *
++ip6ip6_tnl_lookup(struct in6_addr *remote, struct in6_addr *local)
++{
++ unsigned h0 = HASH(remote);
++ unsigned h1 = HASH(local);
++ struct ip6_tnl *t;
++
++ for (t = tnls_r_l[h0 ^ h1]; t; t = t->next) {
++ if (!ipv6_addr_cmp(local, &t->parms.laddr) &&
++ !ipv6_addr_cmp(remote, &t->parms.raddr) &&
++ (t->dev->flags & IFF_UP))
++ return t;
++ }
++ if ((t = tnls_wc[0]) != NULL && (t->dev->flags & IFF_UP))
++ return t;
++
++ return NULL;
++}
++
++/**
++ * ip6ip6_bucket - get head of list matching given tunnel parameters
++ * @p: parameters containing tunnel end-points
++ *
++ * Description:
++ * ip6ip6_bucket() returns the head of the list matching the
++ * &struct in6_addr entries laddr and raddr in @p.
++ *
++ * Return: head of IPv6 tunnel list
++ **/
++
++static struct ip6_tnl **
++ip6ip6_bucket(struct ip6_tnl_parm *p)
++{
++ struct in6_addr *remote = &p->raddr;
++ struct in6_addr *local = &p->laddr;
++ unsigned h = 0;
++ int prio = 0;
++
++ if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
++ prio = 1;
++ h = HASH(remote) ^ HASH(local);
++ }
++ return &tnls[prio][h];
++}
++
++/**
++ * ip6ip6_kernel_tnl_link - add new kernel tunnel to cache
++ * @t: kernel tunnel
++ *
++ * Note:
++ * %IP6_TNL_F_KERNEL_DEV is assumed to be raised in t->parms.flags.
++ * See the comments on ip6ip6_kernel_tnl_add() for more information.
++ **/
++
++static inline void
++ip6ip6_kernel_tnl_link(struct ip6_tnl *t)
++{
++ write_lock_bh(&ip6ip6_kernel_lock);
++ t->next = tnls_kernel[0];
++ tnls_kernel[0] = t;
++ kdev_count++;
++ write_unlock_bh(&ip6ip6_kernel_lock);
++}
++
++/**
++ * ip6ip6_kernel_tnl_unlink - remove first kernel tunnel from cache
++ *
++ * Return: first free kernel tunnel
++ *
++ * Note:
++ * See the comments on ip6ip6_kernel_tnl_add() for more information.
++ **/
++
++static inline struct ip6_tnl *
++ip6ip6_kernel_tnl_unlink(void)
++{
++ struct ip6_tnl *t;
++
++ write_lock_bh(&ip6ip6_kernel_lock);
++ if ((t = tnls_kernel[0]) != NULL) {
++ tnls_kernel[0] = t->next;
++ kdev_count--;
++ }
++ write_unlock_bh(&ip6ip6_kernel_lock);
++ return t;
++}
++
++/**
++ * ip6ip6_tnl_link - add tunnel to hash table
++ * @t: tunnel to be added
++ **/
++
++static void
++ip6ip6_tnl_link(struct ip6_tnl *t)
++{
++ struct ip6_tnl **tp = ip6ip6_bucket(&t->parms);
++
++ write_lock_bh(&ip6ip6_lock);
++ t->next = *tp;
++ *tp = t;
++ write_unlock_bh(&ip6ip6_lock);
++}
++
++/**
++ * ip6ip6_tnl_unlink - remove tunnel from hash table
++ * @t: tunnel to be removed
++ **/
++
++static void
++ip6ip6_tnl_unlink(struct ip6_tnl *t)
++{
++ struct ip6_tnl **tp;
++
++ write_lock_bh(&ip6ip6_lock);
++ for (tp = ip6ip6_bucket(&t->parms); *tp; tp = &(*tp)->next) {
++ if (t == *tp) {
++ *tp = t->next;
++ break;
++ }
++ }
++ write_unlock_bh(&ip6ip6_lock);
++}
++
++/**
++ * ip6ip6_tnl_create() - create a new tunnel
++ * @p: tunnel parameters
++ * @pt: pointer to new tunnel
++ *
++ * Description:
++ * Create tunnel matching given parameters. New kernel managed devices are
++ * not put in the normal hash structure, but are instead cached for later
++ * use.
++ *
++ * Return:
++ * 0 on success
++ **/
++
++
++static int __ip6ip6_tnl_create(struct ip6_tnl_parm *p,
++ struct ip6_tnl **pt,
++ int kernel_list)
++{
++ struct net_device *dev;
++ int err = -ENOBUFS;
++ struct ip6_tnl *t;
++
++ MOD_INC_USE_COUNT;
++ dev = kmalloc(sizeof (*dev) + sizeof (*t), GFP_KERNEL);
++ if (!dev) {
++ MOD_DEC_USE_COUNT;
++ return err;
++ }
++ memset(dev, 0, sizeof (*dev) + sizeof (*t));
++ dev->priv = (void *) (dev + 1);
++ t = (struct ip6_tnl *) dev->priv;
++ t->dev = dev;
++ dev->init = ip6ip6_tnl_dev_init;
++ dev->features |= NETIF_F_DYNALLOC;
++ if (kernel_list) {
++ memcpy(t->parms.name, p->name, IFNAMSIZ - 1);
++ t->parms.proto = IPPROTO_IPV6;
++ t->parms.flags = IP6_TNL_F_KERNEL_DEV;
++ } else {
++ memcpy(&t->parms, p, sizeof (*p));
++ }
++ t->parms.name[IFNAMSIZ - 1] = '\0';
++ strcpy(dev->name, t->parms.name);
++ if (!dev->name[0]) {
++ int i;
++ for (i = 0; i < IP6_TNL_MAX; i++) {
++ sprintf(dev->name, "ip6tnl%d", i);
++ if (__dev_get_by_name(dev->name) == NULL)
++ break;
++ }
++
++ if (i == IP6_TNL_MAX) {
++ goto failed;
++ }
++ memcpy(t->parms.name, dev->name, IFNAMSIZ);
++ }
++ if ((err = register_netdevice(dev)) < 0) {
++ goto failed;
++ }
++ dev_hold(dev);
++ if (kernel_list) {
++ ip6ip6_kernel_tnl_link(t);
++ } else {
++ ip6ip6_tnl_link(t);
++ }
++ *pt = t;
++ return 0;
++failed:
++ kfree(dev);
++ MOD_DEC_USE_COUNT;
++ return err;
++}
++
++
++int ip6ip6_tnl_create(struct ip6_tnl_parm *p, struct ip6_tnl **pt)
++{
++ return __ip6ip6_tnl_create(p, pt, 0);
++}
++
++
++static void manage_kernel_tnls(void *foo);
++
++static struct tq_struct manager_task = {
++ routine:manage_kernel_tnls,
++ data:NULL
++};
++
++/**
++ * manage_kernel_tnls() - create and destroy kernel tunnels
++ *
++ * Description:
++ * manage_kernel_tnls() creates new kernel devices if there
++ * are less than $min_kdev_count of them and deletes old ones if
++ * there are less than $max_kdev_count of them in the cache
++ *
++ * Note:
++ * Schedules itself to be run later in process context if called from
++ * interrupt. Therefore only works synchronously when called from process
++ * context.
++ **/
++
++static void
++manage_kernel_tnls(void *foo)
++{
++ struct ip6_tnl *t = NULL;
++ struct ip6_tnl_parm parm;
++
++ /* We can't do this processing in interrupt
++ context so schedule it for later */
++ if (in_interrupt()) {
++ read_lock(&ip6ip6_kernel_lock);
++ if (!shutdown &&
++ (kdev_count < min_kdev_count ||
++ kdev_count > max_kdev_count)) {
++ schedule_task(&manager_task);
++ }
++ read_unlock(&ip6ip6_kernel_lock);
++ return;
++ }
++
++ rtnl_lock();
++ read_lock_bh(&ip6ip6_kernel_lock);
++ memset(&parm, 0, sizeof (parm));
++ parm.flags = IP6_TNL_F_KERNEL_DEV;
++ /* Create tunnels until there are at least min_kdev_count */
++ while (kdev_count < min_kdev_count) {
++ read_unlock_bh(&ip6ip6_kernel_lock);
++ if (!__ip6ip6_tnl_create(&parm, &t, 1)) {
++ dev_open(t->dev);
++ } else {
++ goto err;
++ }
++ read_lock_bh(&ip6ip6_kernel_lock);
++ }
++
++ /* Destroy tunnels until there are at most max_kdev_count */
++ while (kdev_count > max_kdev_count) {
++ read_unlock_bh(&ip6ip6_kernel_lock);
++ if ((t = ip6ip6_kernel_tnl_unlink()) != NULL) {
++ unregister_netdevice(t->dev);
++ } else {
++ goto err;
++ }
++ read_lock_bh(&ip6ip6_kernel_lock);
++ }
++ read_unlock_bh(&ip6ip6_kernel_lock);
++err:
++ rtnl_unlock();
++}
++
++/**
++ * ip6ip6_tnl_inc_max_kdev_count() - increase max kernel dev cache size
++ * @n: size increase
++ * Description:
++ * Increase the upper limit for the number of kernel devices allowed in the
++ * cache at any on time.
++ **/
++
++unsigned int
++ip6ip6_tnl_inc_max_kdev_count(unsigned int n)
++{
++ write_lock_bh(&ip6ip6_kernel_lock);
++ max_kdev_count += n;
++ write_unlock_bh(&ip6ip6_kernel_lock);
++ manage_kernel_tnls(NULL);
++ return max_kdev_count;
++}
++
++/**
++ * ip6ip6_tnl_dec_max_kdev_count() - decrease max kernel dev cache size
++ * @n: size decrement
++ * Description:
++ * Decrease the upper limit for the number of kernel devices allowed in the
++ * cache at any on time.
++ **/
++
++unsigned int
++ip6ip6_tnl_dec_max_kdev_count(unsigned int n)
++{
++ write_lock_bh(&ip6ip6_kernel_lock);
++ max_kdev_count -= min(max_kdev_count, n);
++ if (max_kdev_count < min_kdev_count)
++ min_kdev_count = max_kdev_count;
++ write_unlock_bh(&ip6ip6_kernel_lock);
++ manage_kernel_tnls(NULL);
++ return max_kdev_count;
++}
++
++/**
++ * ip6ip6_tnl_inc_min_kdev_count() - increase min kernel dev cache size
++ * @n: size increase
++ * Description:
++ * Increase the lower limit for the number of kernel devices allowed in the
++ * cache at any on time.
++ **/
++
++unsigned int
++ip6ip6_tnl_inc_min_kdev_count(unsigned int n)
++{
++ write_lock_bh(&ip6ip6_kernel_lock);
++ min_kdev_count += n;
++ if (min_kdev_count > max_kdev_count)
++ max_kdev_count = min_kdev_count;
++ write_unlock_bh(&ip6ip6_kernel_lock);
++ manage_kernel_tnls(NULL);
++ return min_kdev_count;
++}
++
++/**
++ * ip6ip6_tnl_dec_min_kdev_count() - decrease min kernel dev cache size
++ * @n: size decrement
++ * Description:
++ * Decrease the lower limit for the number of kernel devices allowed in the
++ * cache at any on time.
++ **/
++
++unsigned int
++ip6ip6_tnl_dec_min_kdev_count(unsigned int n)
++{
++ write_lock_bh(&ip6ip6_kernel_lock);
++ min_kdev_count -= min(min_kdev_count, n);
++ write_unlock_bh(&ip6ip6_kernel_lock);
++ manage_kernel_tnls(NULL);
++ return min_kdev_count;
++}
++
++/**
++ * ip6ip6_tnl_locate - find or create tunnel matching given parameters
++ * @p: tunnel parameters
++ * @create: != 0 if allowed to create new tunnel if no match found
++ *
++ * Description:
++ * ip6ip6_tnl_locate() first tries to locate an existing tunnel
++ * based on @parms. If this is unsuccessful, but @create is set a new
++ * tunnel device is created and registered for use.
++ *
++ * Return:
++ * 0 if tunnel located or created,
++ * -EINVAL if parameters incorrect,
++ * -ENODEV if no matching tunnel available
++ **/
++
++int ip6ip6_tnl_locate(struct ip6_tnl_parm *p, struct ip6_tnl **pt, int create)
++{
++ struct in6_addr *remote = &p->raddr;
++ struct in6_addr *local = &p->laddr;
++ struct ip6_tnl *t;
++
++ if (p->proto != IPPROTO_IPV6)
++ return -EINVAL;
++
++ for (t = *ip6ip6_bucket(p); t; t = t->next) {
++ if (!ipv6_addr_cmp(local, &t->parms.laddr) &&
++ !ipv6_addr_cmp(remote, &t->parms.raddr)) {
++ *pt = t;
++ return (create ? -EEXIST : 0);
++ }
++ }
++ return ip6ip6_tnl_create(p, pt);
++}
++
++/**
++ * ip6ip6_tnl_dev_destructor - tunnel device destructor
++ * @dev: the device to be destroyed
++ **/
++
++static void
++ip6ip6_tnl_dev_destructor(struct net_device *dev)
++{
++ if (dev != &ip6ip6_fb_tnl_dev) {
++ MOD_DEC_USE_COUNT;
++ }
++}
++
++/**
++ * ip6ip6_tnl_dev_uninit - tunnel device uninitializer
++ * @dev: the device to be destroyed
++ *
++ * Description:
++ * ip6ip6_tnl_dev_uninit() removes tunnel from its list
++ **/
++
++static void
++ip6ip6_tnl_dev_uninit(struct net_device *dev)
++{
++ struct ip6_tnl *t = (struct ip6_tnl *) dev->priv;
++
++ if (dev == &ip6ip6_fb_tnl_dev) {
++ write_lock_bh(&ip6ip6_lock);
++ tnls_wc[0] = NULL;
++ write_unlock_bh(&ip6ip6_lock);
++ } else {
++ ip6ip6_tnl_unlink(t);
++ }
++ sock_release(t->sock);
++ dev_put(dev);
++}
++
++/**
++ * parse_tvl_tnl_enc_lim - handle encapsulation limit option
++ * @skb: received socket buffer
++ *
++ * Return:
++ * 0 if none was found,
++ * else index to encapsulation limit
++ **/
++
++static __u16
++parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw)
++{
++ struct ipv6hdr *ipv6h = (struct ipv6hdr *) raw;
++ __u8 nexthdr = ipv6h->nexthdr;
++ __u16 off = sizeof (*ipv6h);
++
++ while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
++ __u16 optlen = 0;
++ struct ipv6_opt_hdr *hdr;
++ if (raw + off + sizeof (*hdr) > skb->data &&
++ !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr)))
++ break;
++
++ hdr = (struct ipv6_opt_hdr *) (raw + off);
++ if (nexthdr == NEXTHDR_FRAGMENT) {
++ struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
++ if (frag_hdr->frag_off)
++ break;
++ optlen = 8;
++ } else if (nexthdr == NEXTHDR_AUTH) {
++ optlen = (hdr->hdrlen + 2) << 2;
++ } else {
++ optlen = ipv6_optlen(hdr);
++ }
++ if (nexthdr == NEXTHDR_DEST) {
++ __u16 i = off + 2;
++ while (1) {
++ struct ipv6_tlv_tnl_enc_lim *tel;
++
++ /* No more room for encapsulation limit */
++ if (i + sizeof (*tel) > off + optlen)
++ break;
++
++ tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i];
++ /* return index of option if found and valid */
++ if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
++ tel->length == 1)
++ return i;
++ /* else jump to next option */
++ if (tel->type)
++ i += tel->length + 2;
++ else
++ i++;
++ }
++ }
++ nexthdr = hdr->nexthdr;
++ off += optlen;
++ }
++ return 0;
++}
++
++/**
++ * ip6ip6_err - tunnel error handler
++ *
++ * Description:
++ * ip6ip6_err() should handle errors in the tunnel according
++ * to the specifications in RFC 2473.
++ **/
++
++void ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
++ int type, int code, int offset, __u32 info)
++{
++ struct ipv6hdr *ipv6h = (struct ipv6hdr *) skb->data;
++ struct ip6_tnl *t;
++ int rel_msg = 0;
++ int rel_type = ICMPV6_DEST_UNREACH;
++ int rel_code = ICMPV6_ADDR_UNREACH;
++ __u32 rel_info = 0;
++ __u16 len;
++
++ /* If the packet doesn't contain the original IPv6 header we are
++ in trouble since we might need the source address for furter
++ processing of the error. */
++
++ read_lock(&ip6ip6_lock);
++ if ((t = ip6ip6_tnl_lookup(&ipv6h->daddr, &ipv6h->saddr)) == NULL)
++ goto out;
++
++ switch (type) {
++ __u32 teli;
++ struct ipv6_tlv_tnl_enc_lim *tel;
++ __u32 mtu;
++ case ICMPV6_DEST_UNREACH:
++ if (net_ratelimit())
++ printk(KERN_WARNING
++ "%s: Path to destination invalid "
++ "or inactive!\n", t->parms.name);
++ rel_msg = 1;
++ break;
++ case ICMPV6_TIME_EXCEED:
++ if (code == ICMPV6_EXC_HOPLIMIT) {
++ if (net_ratelimit())
++ printk(KERN_WARNING
++ "%s: Too small hop limit or "
++ "routing loop in tunnel!\n",
++ t->parms.name);
++ rel_msg = 1;
++ }
++ break;
++ case ICMPV6_PARAMPROB:
++ /* ignore if parameter problem not caused by a tunnel
++ encapsulation limit sub-option */
++ if (code != ICMPV6_HDR_FIELD) {
++ break;
++ }
++ teli = parse_tlv_tnl_enc_lim(skb, skb->data);
++
++ if (teli && teli == ntohl(info) - 2) {
++ tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
++ if (tel->encap_limit == 0) {
++ if (net_ratelimit())
++ printk(KERN_WARNING
++ "%s: Too small encapsulation "
++ "limit or routing loop in "
++ "tunnel!\n", t->parms.name);
++ rel_msg = 1;
++ }
++ }
++ break;
++ case ICMPV6_PKT_TOOBIG:
++ mtu = ntohl(info) - offset;
++ if (mtu < IPV6_MIN_MTU)
++ mtu = IPV6_MIN_MTU;
++ t->dev->mtu = mtu;
++
++ if ((len = sizeof (*ipv6h) + ipv6h->payload_len) > mtu) {
++ rel_type = ICMPV6_PKT_TOOBIG;
++ rel_code = 0;
++ rel_info = mtu;
++ rel_msg = 1;
++ }
++ break;
++ }
++ if (rel_msg && pskb_may_pull(skb, offset + sizeof (*ipv6h))) {
++ struct rt6_info *rt;
++ struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
++ if (!skb2)
++ goto out;
++
++ dst_release(skb2->dst);
++ skb2->dst = NULL;
++ skb_pull(skb2, offset);
++ skb2->nh.raw = skb2->data;
++
++ /* Try to guess incoming interface */
++ rt = rt6_lookup(&skb2->nh.ipv6h->saddr, NULL, 0, 0);
++
++ if (rt && rt->rt6i_dev)
++ skb2->dev = rt->rt6i_dev;
++
++ icmpv6_send(skb2, rel_type, rel_code, rel_info, skb2->dev);
++
++ if (rt)
++ dst_release(&rt->u.dst);
++
++ kfree_skb(skb2);
++ }
++out:
++ read_unlock(&ip6ip6_lock);
++}
++
++/**
++ * call_hooks - call ipv6 tunnel hooks
++ * @hooknum: hook number, either %IP6_TNL_PRE_ENCAP, or
++ * %IP6_TNL_PRE_DECAP
++ * @t: the current tunnel
++ * @skb: the tunneled packet
++ *
++ * Description:
++ * Pass packet to all the hook functions until %IP6_TNL_DROP
++ *
++ * Return:
++ * %IP6_TNL_ACCEPT or %IP6_TNL_DROP
++ **/
++
++static inline int
++call_hooks(unsigned int hooknum, struct ip6_tnl *t, struct sk_buff *skb)
++{
++ struct ip6_tnl_hook_ops *h;
++ int accept = IP6_TNL_ACCEPT;
++
++ if (hooknum < IP6_TNL_MAXHOOKS) {
++ struct list_head *i;
++ read_lock(&ip6ip6_hook_lock);
++ for (i = hooks[hooknum].next; i != &hooks[hooknum]; i = i->next) {
++ h = (struct ip6_tnl_hook_ops *) i;
++
++ if (h->hook) {
++ accept = h->hook(t, skb);
++
++ if (accept != IP6_TNL_ACCEPT)
++ break;
++ }
++ }
++ read_unlock(&ip6ip6_hook_lock);
++ }
++ return accept;
++}
++
++/**
++ * ip6ip6_rcv - decapsulate IPv6 packet and retransmit it locally
++ * @skb: received socket buffer
++ *
++ * Return: 0
++ **/
++
++int ip6ip6_rcv(struct sk_buff *skb)
++{
++ struct ipv6hdr *ipv6h;
++ struct ip6_tnl *t;
++
++ if (!pskb_may_pull(skb, sizeof (*ipv6h)))
++ goto discard;
++
++ ipv6h = skb->nh.ipv6h;
++
++ read_lock(&ip6ip6_lock);
++
++ if ((t = ip6ip6_tnl_lookup(&ipv6h->saddr, &ipv6h->daddr)) != NULL) {
++ if (!(t->parms.flags & IP6_TNL_F_CAP_RCV) ||
++ call_hooks(IP6_TNL_PRE_DECAP, t, skb) != IP6_TNL_ACCEPT) {
++ t->stat.rx_dropped++;
++ read_unlock(&ip6ip6_lock);
++ goto discard;
++ }
++ skb->mac.raw = skb->nh.raw;
++ skb->nh.raw = skb->data;
++ skb->protocol = htons(ETH_P_IPV6);
++ skb->pkt_type = PACKET_HOST;
++ memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
++ skb->dev = t->dev;
++ dst_release(skb->dst);
++ skb->dst = NULL;
++ t->stat.rx_packets++;
++ t->stat.rx_bytes += skb->len;
++ netif_rx(skb);
++ read_unlock(&ip6ip6_lock);
++ return 0;
++ }
++ read_unlock(&ip6ip6_lock);
++ icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0, skb->dev);
++discard:
++ kfree_skb(skb);
++ return 0;
++}
++
++static inline struct ipv6_txoptions *create_tel(__u8 encap_limit)
++{
++ struct ipv6_tlv_tnl_enc_lim *tel;
++ struct ipv6_txoptions *opt;
++ __u8 *raw;
++
++ int opt_len = sizeof(*opt) + IPV6_TLV_TEL_DST_SIZE;
++
++ if (!(opt = kmalloc(opt_len, GFP_ATOMIC))) {
++ return NULL;
++ }
++ memset(opt, 0, opt_len);
++ opt->tot_len = opt_len;
++ opt->dst0opt = (struct ipv6_opt_hdr *) (opt + 1);
++ opt->opt_nflen = 8;
++
++ tel = (struct ipv6_tlv_tnl_enc_lim *) (opt->dst0opt + 1);
++ tel->type = IPV6_TLV_TNL_ENCAP_LIMIT;
++ tel->length = 1;
++ tel->encap_limit = encap_limit;
++
++ raw = (__u8 *) opt->dst0opt;
++ raw[5] = IPV6_TLV_PADN;
++ raw[6] = 1;
++
++ return opt;
++}
++
++static int
++ip6ip6_getfrag(const void *data, struct in6_addr *addr,
++ char *buff, unsigned int offset, unsigned int len)
++{
++ memcpy(buff, data + offset, len);
++ return 0;
++}
++
++/**
++ * ip6ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
++ * @t: the outgoing tunnel device
++ * @hdr: IPv6 header from the incoming packet
++ *
++ * Description:
++ * Avoid trivial tunneling loop by checking that tunnel exit-point
++ * doesn't match source of incoming packet.
++ *
++ * Return:
++ * 1 if conflict,
++ * 0 else
++ **/
++
++static inline int
++ip6ip6_tnl_addr_conflict(struct ip6_tnl *t, struct ipv6hdr *hdr)
++{
++ return !ipv6_addr_cmp(&t->parms.raddr, &hdr->saddr);
++}
++
++/**
++ * ip6ip6_tnl_xmit - encapsulate packet and send
++ * @skb: the outgoing socket buffer
++ * @dev: the outgoing tunnel device
++ *
++ * Description:
++ * Build new header and do some sanity checks on the packet before sending
++ * it to ip6_build_xmit().
++ *
++ * Return:
++ * 0
++ **/
++
++int ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++ struct ip6_tnl *t = (struct ip6_tnl *) dev->priv;
++ struct net_device_stats *stats = &t->stat;
++ struct ipv6hdr *ipv6h = skb->nh.ipv6h;
++ struct ipv6_txoptions *opt = NULL;
++ int encap_limit = -1;
++ __u16 offset;
++ struct flowi fl;
++ int err = 0;
++ struct dst_entry *dst;
++ struct sock *sk = t->sock->sk;
++ struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6;
++ int mtu;
++
++ if (t->recursion++) {
++ stats->collisions++;
++ goto tx_err;
++ }
++ if (skb->protocol != htons(ETH_P_IPV6) ||
++ !(t->parms.flags & IP6_TNL_F_CAP_XMIT) ||
++ ip6ip6_tnl_addr_conflict(t, ipv6h)) {
++ goto tx_err;
++ }
++ if ((offset = parse_tlv_tnl_enc_lim(skb, skb->nh.raw)) > 0) {
++ struct ipv6_tlv_tnl_enc_lim *tel;
++ tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->nh.raw[offset];
++ if (tel->encap_limit == 0) {
++ icmpv6_send(skb, ICMPV6_PARAMPROB,
++ ICMPV6_HDR_FIELD, offset + 2, skb->dev);
++ goto tx_err;
++ }
++ encap_limit = tel->encap_limit - 1;
++ } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) {
++ encap_limit = t->parms.encap_limit;
++ }
++ if (call_hooks(IP6_TNL_PRE_ENCAP, t, skb) != IP6_TNL_ACCEPT)
++ goto discard;
++ memcpy(&fl, &t->fl, sizeof (fl));
++
++ if ((t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS))
++ fl.fl6_flowlabel |= (*(__u32 *) ipv6h & IPV6_TCLASS_MASK);
++ if ((t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL))
++ fl.fl6_flowlabel |= (*(__u32 *) ipv6h & IPV6_FLOWLABEL_MASK);
++
++ if (encap_limit >= 0 && (opt = create_tel(encap_limit)) == NULL)
++ goto tx_err;
++
++ dst = __sk_dst_check(sk, np->dst_cookie);
++
++ if (dst) {
++ if (np->daddr_cache == NULL ||
++ ipv6_addr_cmp(fl.fl6_dst, np->daddr_cache) ||
++#ifdef CONFIG_IPV6_SUBTREES
++ np->saddr_cache == NULL ||
++ ipv6_addr_cmp(fl.fl6_src, np->saddr_cache) ||
++#endif
++ (fl.oif && fl.oif != dst->dev->ifindex)) {
++ dst = NULL;
++ } else {
++ dst_hold(dst);
++ }
++ }
++ if (dst == NULL) {
++ dst = ip6_route_output(sk, &fl);
++ if (dst->error) {
++ stats->tx_carrier_errors++;
++ dst_link_failure(skb);
++ goto tx_err_dst_release;
++ }
++ /* local routing loop */
++ if (dst->dev == dev) {
++ stats->collisions++;
++ if (net_ratelimit())
++ printk(KERN_WARNING
++ "%s: Local routing loop detected!\n",
++ t->parms.name);
++ goto tx_err_dst_release;
++ }
++ }
++ mtu = dst->pmtu - sizeof (*ipv6h);
++ if (opt) {
++ mtu -= (opt->opt_nflen + opt->opt_flen);
++ }
++ if (mtu < IPV6_MIN_MTU)
++ mtu = IPV6_MIN_MTU;
++ if (skb->dst && mtu < skb->dst->pmtu) {
++ struct rt6_info *rt = (struct rt6_info *) skb->dst;
++ rt->rt6i_flags |= RTF_MODIFIED;
++ rt->u.dst.pmtu = mtu;
++ }
++ if (skb->len > mtu) {
++ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
++ goto tx_err_dst_release;
++ }
++ ip6_dst_store(sk, dst, &np->daddr, &np->saddr);
++ err = ip6_build_xmit(sk, ip6ip6_getfrag, (void *) skb->nh.raw,
++ &fl, skb->len, opt, t->parms.hop_limit,
++ MSG_DONTWAIT);
++
++ if (err == NET_XMIT_SUCCESS || err == NET_XMIT_CN) {
++ stats->tx_bytes += skb->len;
++ stats->tx_packets++;
++ } else {
++ stats->tx_errors++;
++ stats->tx_aborted_errors++;
++ }
++ if (opt)
++ kfree(opt);
++ kfree_skb(skb);
++ t->recursion--;
++ return 0;
++tx_err_dst_release:
++ dst_release(dst);
++ if (opt)
++ kfree(opt);
++tx_err:
++ stats->tx_errors++;
++discard:
++ stats->tx_dropped++;
++ kfree_skb(skb);
++ t->recursion--;
++ return 0;
++}
++
++static void ip6_tnl_set_cap(struct ip6_tnl *t)
++{
++ struct ip6_tnl_parm *p = &t->parms;
++ struct in6_addr *laddr = &p->laddr;
++ struct in6_addr *raddr = &p->raddr;
++ int ltype = ipv6_addr_type(laddr);
++ int rtype = ipv6_addr_type(raddr);
++
++ p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV);
++
++ if (ltype != IPV6_ADDR_ANY && rtype != IPV6_ADDR_ANY &&
++ ((ltype|rtype) &
++ (IPV6_ADDR_UNICAST|
++ IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL|
++ IPV6_ADDR_MAPPED|IPV6_ADDR_RESERVED)) == IPV6_ADDR_UNICAST) {
++ struct net_device *ldev = NULL;
++ int l_ok = 1;
++ int r_ok = 1;
++
++ if (p->link)
++ ldev = dev_get_by_index(p->link);
++
++ if ((ltype&IPV6_ADDR_UNICAST) && !ipv6_chk_addr(laddr, ldev))
++ l_ok = 0;
++
++ if ((rtype&IPV6_ADDR_UNICAST) && ipv6_chk_addr(raddr, NULL))
++ r_ok = 0;
++
++ if (l_ok && r_ok) {
++ if (ltype&IPV6_ADDR_UNICAST)
++ p->flags |= IP6_TNL_F_CAP_XMIT;
++ if (rtype&IPV6_ADDR_UNICAST)
++ p->flags |= IP6_TNL_F_CAP_RCV;
++ }
++ if (ldev)
++ dev_put(ldev);
++ }
++}
++
++static void ip6ip6_tnl_link_config(struct ip6_tnl *t)
++{
++ struct net_device *dev = t->dev;
++ struct ip6_tnl_parm *p = &t->parms;
++ struct flowi *fl = &t->fl;
++
++ /* Set up flowi template */
++ fl->fl6_src = &p->laddr;
++ fl->fl6_dst = &p->raddr;
++ fl->oif = p->link;
++ fl->fl6_flowlabel = 0;
++
++ if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
++ fl->fl6_flowlabel |= IPV6_TCLASS_MASK & htonl(p->flowinfo);
++ if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
++ fl->fl6_flowlabel |= IPV6_FLOWLABEL_MASK & htonl(p->flowinfo);
++
++ ip6_tnl_set_cap(t);
++
++ if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV)
++ dev->flags |= IFF_POINTOPOINT;
++ else
++ dev->flags &= ~IFF_POINTOPOINT;
++
++ if (p->flags & IP6_TNL_F_CAP_XMIT) {
++ struct rt6_info *rt = rt6_lookup(&p->raddr, &p->laddr,
++ p->link, 0);
++
++ if (rt == NULL)
++ return;
++
++ if (rt->rt6i_dev) {
++ dev->iflink = rt->rt6i_dev->ifindex;
++
++ dev->hard_header_len = rt->rt6i_dev->hard_header_len +
++ sizeof (struct ipv6hdr);
++
++ dev->mtu = rt->rt6i_dev->mtu - sizeof (struct ipv6hdr);
++
++ if (dev->mtu < IPV6_MIN_MTU)
++ dev->mtu = IPV6_MIN_MTU;
++ }
++ dst_release(&rt->u.dst);
++ }
++}
++
++/**
++ * __ip6ip6_tnl_change - update the tunnel parameters
++ * @t: tunnel to be changed
++ * @p: tunnel configuration parameters
++ *
++ * Description:
++ * __ip6ip6_tnl_change() updates the tunnel parameters
++ **/
++
++static void
++__ip6ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p)
++{
++ ipv6_addr_copy(&t->parms.laddr, &p->laddr);
++ ipv6_addr_copy(&t->parms.raddr, &p->raddr);
++ t->parms.flags = p->flags;
++ t->parms.hop_limit = p->hop_limit;
++ t->parms.encap_limit = p->encap_limit;
++ t->parms.flowinfo = p->flowinfo;
++ ip6ip6_tnl_link_config(t);
++}
++
++void ip6ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p)
++{
++ ip6ip6_tnl_unlink(t);
++ __ip6ip6_tnl_change(t, p);
++ ip6ip6_tnl_link(t);
++}
++
++/**
++ * ip6ip6_kernel_tnl_add - configure and add kernel tunnel to hash
++ * @p: kernel tunnel configuration parameters
++ *
++ * Description:
++ * ip6ip6_kernel_tnl_add() fetches an unused kernel tunnel configures
++ * it according to @p and places it among the active tunnels.
++ *
++ * Return:
++ * number of references to tunnel on success,
++ * %-EEXIST if there is already a device matching description
++ * %-EINVAL if p->flags doesn't have %IP6_TNL_F_KERNEL_DEV raised,
++ * %-ENODEV if there are no unused kernel tunnels available
++ *
++ * Note:
++ * The code for creating, opening, closing and destroying network devices
++ * must be called from process context, while the Mobile IP code, which
++ * needs the tunnel devices, unfortunately runs in interrupt context.
++ *
++ * The devices must be created and opened in advance, then placed in a
++ * list where the kernel can fetch and ready them for use at a later time.
++ *
++ **/
++
++int
++ip6ip6_kernel_tnl_add(struct ip6_tnl_parm *p)
++{
++ struct ip6_tnl *t;
++
++ if (!(p->flags & IP6_TNL_F_KERNEL_DEV))
++ return -EINVAL;
++ if ((t = ip6ip6_tnl_lookup(&p->raddr, &p->laddr)) != NULL &&
++ t != &ip6ip6_fb_tnl) {
++ /* Handle duplicate tunnels by incrementing
++ reference count */
++ atomic_inc(&t->refcnt);
++ goto out;
++ }
++ if ((t = ip6ip6_kernel_tnl_unlink()) == NULL)
++ return -ENODEV;
++ __ip6ip6_tnl_change(t, p);
++
++ atomic_inc(&t->refcnt);
++
++ ip6ip6_tnl_link(t);
++
++ manage_kernel_tnls(NULL);
++out:
++ return atomic_read(&t->refcnt);
++}
++
++/**
++ * ip6ip6_kernel_tnl_del - delete no longer needed kernel tunnel
++ * @t: kernel tunnel to be removed from hash
++ *
++ * Description:
++ * ip6ip6_kernel_tnl_del() removes and deconfigures the tunnel @t
++ * and places it among the unused kernel devices.
++ *
++ * Return:
++ * number of references on success,
++ * %-EINVAL if p->flags doesn't have %IP6_TNL_F_KERNEL_DEV raised,
++ *
++ * Note:
++ * See the comments on ip6ip6_kernel_tnl_add() for more information.
++ **/
++
++int
++ip6ip6_kernel_tnl_del(struct ip6_tnl *t)
++{
++ if (!t)
++ return -ENODEV;
++
++ if (!(t->parms.flags & IP6_TNL_F_KERNEL_DEV))
++ return -EINVAL;
++
++ if (atomic_dec_and_test(&t->refcnt)) {
++ struct ip6_tnl_parm p;
++ ip6ip6_tnl_unlink(t);
++ memset(&p, 0, sizeof (p));
++ p.flags = IP6_TNL_F_KERNEL_DEV;
++
++ __ip6ip6_tnl_change(t, &p);
++
++ ip6ip6_kernel_tnl_link(t);
++
++ manage_kernel_tnls(NULL);
++ }
++ return atomic_read(&t->refcnt);
++}
++
++/**
++ * ip6ip6_tnl_ioctl - configure ipv6 tunnels from userspace
++ * @dev: virtual device associated with tunnel
++ * @ifr: parameters passed from userspace
++ * @cmd: command to be performed
++ *
++ * Description:
++ * ip6ip6_tnl_ioctl() is used for managing IPv6 tunnels
++ * from userspace.
++ *
++ * The possible commands are the following:
++ * %SIOCGETTUNNEL: get tunnel parameters for device
++ * %SIOCADDTUNNEL: add tunnel matching given tunnel parameters
++ * %SIOCCHGTUNNEL: change tunnel parameters to those given
++ * %SIOCDELTUNNEL: delete tunnel
++ *
++ * The fallback device "ip6tnl0", created during module
++ * initialization, can be used for creating other tunnel devices.
++ *
++ * Return:
++ * 0 on success,
++ * %-EFAULT if unable to copy data to or from userspace,
++ * %-EPERM if current process hasn't %CAP_NET_ADMIN set or attempting
++ * to configure kernel devices from userspace,
++ * %-EINVAL if passed tunnel parameters are invalid,
++ * %-EEXIST if changing a tunnel's parameters would cause a conflict
++ * %-ENODEV if attempting to change or delete a nonexisting device
++ *
++ * Note:
++ * See the comments on ip6ip6_kernel_tnl_add() for more information
++ * about kernel tunnels.
++ * **/
++
++static int
++ip6ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
++{
++ int err = 0;
++ int create;
++ struct ip6_tnl_parm p;
++ struct ip6_tnl *t = NULL;
++
++ MOD_INC_USE_COUNT;
++
++ switch (cmd) {
++ case SIOCGETTUNNEL:
++ if (dev == &ip6ip6_fb_tnl_dev) {
++ if (copy_from_user(&p,
++ ifr->ifr_ifru.ifru_data,
++ sizeof (p))) {
++ err = -EFAULT;
++ break;
++ }
++ if ((err = ip6ip6_tnl_locate(&p, &t, 0)) == -ENODEV)
++ t = (struct ip6_tnl *) dev->priv;
++ else if (err)
++ break;
++ } else
++ t = (struct ip6_tnl *) dev->priv;
++
++ memcpy(&p, &t->parms, sizeof (p));
++ if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof (p))) {
++ err = -EFAULT;
++ }
++ break;
++ case SIOCADDTUNNEL:
++ case SIOCCHGTUNNEL:
++ err = -EPERM;
++ create = (cmd == SIOCADDTUNNEL);
++ if (!capable(CAP_NET_ADMIN))
++ break;
++ if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) {
++ err = -EFAULT;
++ break;
++ }
++ if (p.flags & IP6_TNL_F_KERNEL_DEV) {
++ break;
++ }
++ if (!create && dev != &ip6ip6_fb_tnl_dev) {
++ t = (struct ip6_tnl *) dev->priv;
++ }
++ if (!t && (err = ip6ip6_tnl_locate(&p, &t, create))) {
++ break;
++ }
++ if (cmd == SIOCCHGTUNNEL) {
++ if (t->dev != dev) {
++ err = -EEXIST;
++ break;
++ }
++ if (t->parms.flags & IP6_TNL_F_KERNEL_DEV) {
++ err = -EPERM;
++ break;
++ }
++ ip6ip6_tnl_change(t, &p);
++ netdev_state_change(dev);
++ }
++ if (copy_to_user(ifr->ifr_ifru.ifru_data,
++ &t->parms, sizeof (p))) {
++ err = -EFAULT;
++ } else {
++ err = 0;
++ }
++ break;
++ case SIOCDELTUNNEL:
++ err = -EPERM;
++ if (!capable(CAP_NET_ADMIN))
++ break;
++
++ if (dev == &ip6ip6_fb_tnl_dev) {
++ if (copy_from_user(&p, ifr->ifr_ifru.ifru_data,
++ sizeof (p))) {
++ err = -EFAULT;
++ break;
++ }
++ err = ip6ip6_tnl_locate(&p, &t, 0);
++ if (err)
++ break;
++ if (t == &ip6ip6_fb_tnl) {
++ err = -EPERM;
++ break;
++ }
++ } else {
++ t = (struct ip6_tnl *) dev->priv;
++ }
++ if (t->parms.flags & IP6_TNL_F_KERNEL_DEV)
++ err = -EPERM;
++ else
++ err = unregister_netdevice(t->dev);
++ break;
++ default:
++ err = -EINVAL;
++ }
++ MOD_DEC_USE_COUNT;
++ return err;
++}
++
++/**
++ * ip6ip6_tnl_get_stats - return the stats for tunnel device
++ * @dev: virtual device associated with tunnel
++ *
++ * Return: stats for device
++ **/
++
++static struct net_device_stats *
++ip6ip6_tnl_get_stats(struct net_device *dev)
++{
++ return &(((struct ip6_tnl *) dev->priv)->stat);
++}
++
++/**
++ * ip6ip6_tnl_change_mtu - change mtu manually for tunnel device
++ * @dev: virtual device associated with tunnel
++ * @new_mtu: the new mtu
++ *
++ * Return:
++ * 0 on success,
++ * %-EINVAL if mtu too small
++ **/
++
++static int
++ip6ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
++{
++ if (new_mtu < IPV6_MIN_MTU) {
++ return -EINVAL;
++ }
++ dev->mtu = new_mtu;
++ return 0;
++}
++
++/**
++ * ip6ip6_tnl_dev_init_gen - general initializer for all tunnel devices
++ * @dev: virtual device associated with tunnel
++ *
++ * Description:
++ * Set function pointers and initialize the &struct flowi template used
++ * by the tunnel.
++ **/
++
++static int
++ip6ip6_tnl_dev_init_gen(struct net_device *dev)
++{
++ struct ip6_tnl *t = (struct ip6_tnl *) dev->priv;
++ struct flowi *fl = &t->fl;
++ int err;
++ struct sock *sk;
++
++ if ((err = sock_create(PF_INET6, SOCK_RAW, IPPROTO_IPV6, &t->sock))) {
++ printk(KERN_ERR
++ "Failed to create IPv6 tunnel socket (err %d).\n", err);
++ return err;
++ }
++ t->sock->inode->i_uid = 0;
++ t->sock->inode->i_gid = 0;
++
++ sk = t->sock->sk;
++ sk->allocation = GFP_ATOMIC;
++ sk->net_pinfo.af_inet6.hop_limit = 254;
++ sk->net_pinfo.af_inet6.mc_loop = 0;
++ sk->prot->unhash(sk);
++
++ memset(fl, 0, sizeof (*fl));
++ fl->proto = IPPROTO_IPV6;
++
++ dev->destructor = ip6ip6_tnl_dev_destructor;
++ dev->uninit = ip6ip6_tnl_dev_uninit;
++ dev->hard_start_xmit = ip6ip6_tnl_xmit;
++ dev->get_stats = ip6ip6_tnl_get_stats;
++ dev->do_ioctl = ip6ip6_tnl_ioctl;
++ dev->change_mtu = ip6ip6_tnl_change_mtu;
++
++ dev->type = ARPHRD_TUNNEL6;
++ dev->hard_header_len = LL_MAX_HEADER + sizeof (struct ipv6hdr);
++ dev->mtu = ETH_DATA_LEN - sizeof (struct ipv6hdr);
++ dev->flags |= IFF_NOARP;
++ dev->iflink = 0;
++ /* Hmm... MAX_ADDR_LEN is 8, so the ipv6 addresses can't be
++ copied to dev->dev_addr and dev->broadcast, like the ipv4
++ addresses were in ipip.c, ip_gre.c and sit.c. */
++ dev->addr_len = 0;
++ return 0;
++}
++
++/**
++ * ip6ip6_tnl_dev_init - initializer for all non fallback tunnel devices
++ * @dev: virtual device associated with tunnel
++ **/
++
++static int
++ip6ip6_tnl_dev_init(struct net_device *dev)
++{
++ struct ip6_tnl *t = (struct ip6_tnl *) dev->priv;
++ ip6ip6_tnl_dev_init_gen(dev);
++ ip6ip6_tnl_link_config(t);
++ return 0;
++}
++
++#ifdef MODULE
++
++/**
++ * ip6ip6_fb_tnl_open - function called when fallback device opened
++ * @dev: fallback device
++ *
++ * Return: 0
++ **/
++
++static int
++ip6ip6_fb_tnl_open(struct net_device *dev)
++{
++ MOD_INC_USE_COUNT;
++ return 0;
++}
++
++/**
++ * ip6ip6_fb_tnl_close - function called when fallback device closed
++ * @dev: fallback device
++ *
++ * Return: 0
++ **/
++
++static int
++ip6ip6_fb_tnl_close(struct net_device *dev)
++{
++ MOD_DEC_USE_COUNT;
++ return 0;
++}
++#endif
++
++/**
++ * ip6ip6_fb_tnl_dev_init - initializer for fallback tunnel device
++ * @dev: fallback device
++ *
++ * Return: 0
++ **/
++
++int __init
++ip6ip6_fb_tnl_dev_init(struct net_device *dev)
++{
++ ip6ip6_tnl_dev_init_gen(dev);
++#ifdef MODULE
++ dev->open = ip6ip6_fb_tnl_open;
++ dev->stop = ip6ip6_fb_tnl_close;
++#endif
++ dev_hold(dev);
++ tnls_wc[0] = &ip6ip6_fb_tnl;
++ return 0;
++}
++
++/**
++ * ip6ip6_tnl_register_hook - add hook for processing of tunneled packets
++ * @reg: hook function and its parameters
++ *
++ * Description:
++ * Add a netfilter like hook function for special handling of tunneled
++ * packets. The hook functions are called before encapsulation
++ * (%IP6_TNL_PRE_ENCAP) and before decapsulation
++ * (%IP6_TNL_PRE_DECAP). The possible return values by the hook
++ * functions are %IP6_TNL_DROP, %IP6_TNL_ACCEPT and
++ * %IP6_TNL_STOLEN (in case the hook function took care of the packet
++ * and it doesn't have to be processed any further).
++ **/
++
++void
++ip6ip6_tnl_register_hook(struct ip6_tnl_hook_ops *reg)
++{
++ if (reg->hooknum < IP6_TNL_MAXHOOKS) {
++ struct list_head *i;
++
++ write_lock_bh(&ip6ip6_hook_lock);
++ for (i = hooks[reg->hooknum].next;
++ i != &hooks[reg->hooknum]; i = i->next) {
++ if (reg->priority <
++ ((struct ip6_tnl_hook_ops *) i)->priority) {
++ break;
++ }
++ }
++ list_add(&reg->list, i->prev);
++ write_unlock_bh(&ip6ip6_hook_lock);
++ }
++}
++
++/**
++ * ip6ip6_tnl_unregister_hook - remove tunnel hook
++ * @reg: hook function and its parameters
++ **/
++
++void
++ip6ip6_tnl_unregister_hook(struct ip6_tnl_hook_ops *reg)
++{
++ if (reg->hooknum < IP6_TNL_MAXHOOKS) {
++ write_lock_bh(&ip6ip6_hook_lock);
++ list_del(&reg->list);
++ write_unlock_bh(&ip6ip6_hook_lock);
++ }
++}
++
++
++/* the IPv6 over IPv6 protocol structure */
++static struct inet6_protocol ip6ip6_protocol = {
++ ip6ip6_rcv, /* IPv6 handler */
++ ip6ip6_err, /* IPv6 error control */
++ NULL, /* next */
++ IPPROTO_IPV6, /* protocol ID */
++ 0, /* copy */
++ NULL, /* data */
++ "IPv6 over IPv6" /* name */
++};
++
++/**
++ * ip6_tunnel_init - register protocol and reserve needed resources
++ *
++ * Return: 0 on success
++ **/
++
++int __init ip6_tunnel_init(void)
++{
++ int i, err;
++
++ ip6ip6_fb_tnl_dev.priv = (void *) &ip6ip6_fb_tnl;
++
++ for (i = 0; i < IP6_TNL_MAXHOOKS; i++) {
++ INIT_LIST_HEAD(&hooks[i]);
++ }
++ if ((err = register_netdev(&ip6ip6_fb_tnl_dev)))
++ return err;
++
++ inet6_add_protocol(&ip6ip6_protocol);
++ return 0;
++}
++
++/**
++ * ip6_tunnel_cleanup - free resources and unregister protocol
++ **/
++
++void ip6_tunnel_cleanup(void)
++{
++ write_lock_bh(&ip6ip6_kernel_lock);
++ shutdown = 1;
++ write_unlock_bh(&ip6ip6_kernel_lock);
++ flush_scheduled_tasks();
++ manage_kernel_tnls(NULL);
++ inet6_del_protocol(&ip6ip6_protocol);
++ unregister_netdev(&ip6ip6_fb_tnl_dev);
++}
++
++#ifdef MODULE
++module_init(ip6_tunnel_init);
++module_exit(ip6_tunnel_cleanup);
++#endif
++
++#if defined(CONFIG_IPV6_MOBILITY_HA_MODULE) || defined(CONFIG_IPV6_MOBILITY_MN_MODULE)
++EXPORT_SYMBOL(ip6ip6_tnl_register_hook);
++EXPORT_SYMBOL(ip6ip6_tnl_unregister_hook);
++#endif
++#ifdef CONFIG_IPV6_MOBILITY_HA_MODULE
++EXPORT_SYMBOL(ip6ip6_tnl_dec_max_kdev_count);
++EXPORT_SYMBOL(ip6ip6_tnl_inc_max_kdev_count);
++EXPORT_SYMBOL(ip6ip6_tnl_dec_min_kdev_count);
++EXPORT_SYMBOL(ip6ip6_tnl_inc_min_kdev_count);
++EXPORT_SYMBOL(ip6ip6_kernel_tnl_add);
++EXPORT_SYMBOL(ip6ip6_kernel_tnl_del);
++EXPORT_SYMBOL(ip6ip6_tnl_lookup);
++#endif
++#ifdef CONFIG_IPV6_MOBILITY_MN_MODULE
++EXPORT_SYMBOL(ip6ip6_tnl_create);
++EXPORT_SYMBOL(ip6ip6_tnl_change);
++#endif
++
+diff -uprN linux-2.4.25.old/net/ipv6/mipglue.c linux-2.4.25/net/ipv6/mipglue.c
+--- linux-2.4.25.old/net/ipv6/mipglue.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mipglue.c 2004-06-26 11:29:30.000000000 +0100
+@@ -0,0 +1,63 @@
++/*
++ * Glue for Mobility support integration to IPv6
++ *
++ * Authors:
++ * Antti Tuominen <ajtuomin@cc.hut.fi>
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ *
++ */
++
++#include <linux/sched.h>
++
++#include <net/ipv6.h>
++#include <net/addrconf.h>
++#include <net/neighbour.h>
++#include <net/mipglue.h>
++
++extern int ip6_tlvopt_unknown(struct sk_buff *skb, int optoff);
++
++/* Initialize all zero */
++struct mipv6_callable_functions mipv6_functions = { NULL };
++
++/* Sets mipv6_functions struct to zero to invalidate all successive
++ * calls to mipv6 functions. Used on module unload. */
++
++void mipv6_invalidate_calls(void)
++{
++ memset(&mipv6_functions, 0, sizeof(mipv6_functions));
++}
++
++
++/* Selects correct handler for tlv encoded destination option. Called
++ * by ip6_parse_tlv. Checks if mipv6 calls are valid before calling. */
++
++int mipv6_handle_dstopt(struct sk_buff *skb, int optoff)
++{
++ int ret;
++
++ switch (skb->nh.raw[optoff]) {
++ case MIPV6_TLV_HOMEADDR:
++ ret = MIPV6_CALLFUNC(mipv6_handle_homeaddr, 0)(skb, optoff);
++ break;
++ default:
++ /* Should never happen */
++ printk(KERN_ERR __FILE__ ": Invalid destination option code (%d)\n",
++ skb->nh.raw[optoff]);
++ ret = 1;
++ break;
++ }
++
++ /* If mipv6 handlers are not valid, pass the packet to
++ * ip6_tlvopt_unknown() for correct handling. */
++ if (!ret)
++ return ip6_tlvopt_unknown(skb, optoff);
++
++ return ret;
++}
++
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/Config.in linux-2.4.25/net/ipv6/mobile_ip6/Config.in
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/Config.in 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/Config.in 2004-06-26 11:29:30.000000000 +0100
+@@ -0,0 +1,12 @@
++#
++# Mobile IPv6 Configuration
++#
++dep_tristate ' IPv6: Mobility Support (Correspondent Node)' CONFIG_IPV6_MOBILITY $CONFIG_IPV6
++if [ "$CONFIG_IPV6_IPV6_TUNNEL" != "n" ]; then
++ dep_tristate ' MIPv6: Mobile Node Support' CONFIG_IPV6_MOBILITY_MN $CONFIG_IPV6_MOBILITY
++
++ dep_tristate ' MIPv6: Home Agent Support' CONFIG_IPV6_MOBILITY_HA $CONFIG_IPV6_MOBILITY
++fi
++if [ "$CONFIG_IPV6_MOBILITY" != "n" ]; then
++ bool ' MIPv6: Debug messages' CONFIG_IPV6_MOBILITY_DEBUG
++fi
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/Makefile linux-2.4.25/net/ipv6/mobile_ip6/Makefile
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/Makefile 2004-06-26 11:29:30.000000000 +0100
+@@ -0,0 +1,35 @@
++#
++# Makefile for the MIPL Mobile IPv6 for Linux.
++#
++# Note! Dependencies are done automagically by 'make dep', which also
++# removes any old dependencies. DON'T put your own dependencies here
++# unless it's something special (ie not a .c file).
++#
++
++
++O_TARGET := mip6_base.o
++
++list-multi := mip6_ha.o mip6_mn.o
++
++obj-y := hashlist.o bcache.o mobhdr_common.o stats.o exthdrs.o \
++ rr_crypto.o hmac.o auth_opt.o mipv6_icmp.o module_cn.o
++
++obj-m := $(O_TARGET)
++
++mip6_ha-objs := halist.o mipv6_icmp_ha.o tunnel_ha.o \
++ ndisc_ha.o ha.o module_ha.o
++
++mip6_mn-objs := mipv6_icmp_mn.o ioctl_mn.o tunnel_mn.o \
++ mdetect.o bul.o multiaccess_ctl.o mobhdr_mn.o mn.o \
++ module_mn.o
++
++obj-$(CONFIG_IPV6_MOBILITY_HA) += mip6_ha.o
++obj-$(CONFIG_IPV6_MOBILITY_MN) += mip6_mn.o
++
++include $(TOPDIR)/Rules.make
++
++mip6_ha.o: $(mip6_ha-objs)
++ $(LD) -r -o $@ $(mip6_ha-objs)
++
++mip6_mn.o: $(mip6_mn-objs)
++ $(LD) -r -o $@ $(mip6_mn-objs)
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/README linux-2.4.25/net/ipv6/mobile_ip6/README
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/README 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/README 2004-06-26 11:29:30.000000000 +0100
+@@ -0,0 +1,15 @@
++MIPL Mobile IPv6 for Linux
++
++More information at http://www.mipl.mediapoli.com/.
++
++To join MIPL Mobile IPv6 for Linux mailing lists go to:
++
++ http://www.mipl.mediapoli.com/cgi-bin/mailman/listinfo
++
++Or send mail with subject "subscribe" for the general list to:
++
++ mipl-request@list.mipl.mediapoli.com
++
++or for the developer list to:
++
++ mipl-devel-request@list.mail.mediapoli.com
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/auth_opt.c linux-2.4.25/net/ipv6/mobile_ip6/auth_opt.c
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/auth_opt.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/auth_opt.c 2004-06-26 11:29:30.000000000 +0100
+@@ -0,0 +1,121 @@
++/*
++ * MIPv6 Binding Authentication Data Option functions
++ *
++ * Authors:
++ * Henrik Petander <lpetande@tml.hut.fi>
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#include <linux/autoconf.h>
++#include <linux/icmpv6.h>
++#include <net/mipv6.h>
++
++#include "debug.h"
++#include "hmac.h"
++#include "mobhdr.h"
++
++#define DBG_KEY 5
++
++int mipv6_auth_build(struct in6_addr *cn_addr, struct in6_addr *coa,
++ __u8 *mh, __u8 *aud_data, __u8 *k_bu)
++{
++ /* First look up the peer from sadb based on his address */
++ struct ah_processing ahp;
++
++ /* Don't add any other options or this system is screwed */
++
++ __u8 buf[MAX_HASH_LENGTH];
++
++
++ if (!k_bu) {
++ DEBUG(DBG_ERROR, "k_bu missing, aborting");
++ return -1;
++ }
++ DEBUG(DBG_KEY, "Key for building authenticator:");
++ debug_print_buffer(DBG_KEY, k_bu, HMAC_SHA1_KEY_SIZE);
++
++ if (ah_hmac_sha1_init(&ahp, k_bu, HMAC_SHA1_KEY_SIZE) < 0) {
++ DEBUG(DBG_ERROR, "Failed to initialize hmac sha1");
++ return -1;
++ }
++
++ DEBUG(DBG_KEY, "coa: ");
++ debug_print_buffer(DBG_KEY, coa, 16);
++ DEBUG(DBG_KEY, "cn_addr: ");
++ debug_print_buffer(DBG_KEY, cn_addr, 16);
++ DEBUG(DBG_KEY, "MH contents: ");
++ debug_print_buffer(DBG_KEY, mh, aud_data - mh);
++
++ /* First the common part */
++ ah_hmac_sha1_loop(&ahp, coa, sizeof(struct in6_addr));
++ ah_hmac_sha1_loop(&ahp, cn_addr, sizeof(struct in6_addr));
++ ah_hmac_sha1_loop(&ahp, mh, aud_data - mh);
++ ah_hmac_sha1_result(&ahp, buf);
++
++ memcpy(aud_data, buf, MIPV6_RR_MAC_LENGTH);
++
++ return 0;
++}
++
++int mipv6_auth_check(struct in6_addr *cn_addr, struct in6_addr *coa,
++ __u8 *opt, __u8 optlen,
++ struct mipv6_mo_bauth_data *aud, __u8 *k_bu)
++{
++ int ret = -1;
++ struct ah_processing ahp;
++ __u8 htarget[MAX_HASH_LENGTH];
++
++ /* Look up peer by home address */
++ if (!k_bu) {
++ DEBUG(DBG_ERROR, "k_bu missing, aborting");
++ return -1;
++ }
++
++ DEBUG(DBG_KEY, "Key for checking authenticator:");
++ debug_print_buffer(DBG_KEY, k_bu, HMAC_SHA1_KEY_SIZE);
++
++ if (!aud || !coa) {
++ DEBUG(DBG_INFO, "%s is NULL", aud ? "coa" : "aud");
++ goto out;
++ }
++
++ if (aud->length != MIPV6_RR_MAC_LENGTH) {
++ DEBUG(DBG_ERROR,
++ ": Incorrect authentication option length %d", aud->length);
++ goto out;
++ }
++
++ if (ah_hmac_sha1_init(&ahp, k_bu, HMAC_SHA1_KEY_SIZE) < 0) {
++ DEBUG(DBG_ERROR,
++ "internal error in initialization of authentication algorithm");
++ goto out;
++ }
++ DEBUG(DBG_KEY, "coa: ");
++ debug_print_buffer(DBG_KEY, coa, 16);
++ DEBUG(DBG_KEY, "cn_addr: ");
++ debug_print_buffer(DBG_KEY, cn_addr, 16);
++ DEBUG(DBG_KEY, "MH contents: ");
++ debug_print_buffer(DBG_KEY, opt, (u8*) aud->data - opt);
++
++ ah_hmac_sha1_loop(&ahp, coa, sizeof(struct in6_addr));
++ ah_hmac_sha1_loop(&ahp, cn_addr, sizeof(struct in6_addr));
++
++ /*
++ * Process MH + options till the start of the authenticator in
++ * Auth. data option
++ */
++ ah_hmac_sha1_loop(&ahp, opt, (u8 *)aud->data - opt);
++ ah_hmac_sha1_result(&ahp, htarget);
++ if (memcmp(htarget, aud->data, MIPV6_RR_MAC_LENGTH) == 0)
++ ret = 0;
++
++ DEBUG(DBG_ERROR, "returning %d", ret);
++out:
++ return ret;
++}
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/bcache.c linux-2.4.25/net/ipv6/mobile_ip6/bcache.c
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/bcache.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/bcache.c 2004-06-26 11:29:30.000000000 +0100
+@@ -0,0 +1,746 @@
++/*
++ * Binding Cache
++ *
++ * Authors:
++ * Juha Mynttinen <jmynttin@cc.hut.fi>
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++/*
++ * Changes:
++ *
++ * Nanno Langstraat : Timer code cleaned up, active socket
++ * test rewritten
++ */
++
++#include <linux/autoconf.h>
++#include <linux/sched.h>
++#include <linux/timer.h>
++#include <linux/in6.h>
++#include <linux/init.h>
++#include <linux/spinlock.h>
++#include <linux/proc_fs.h>
++#include <linux/ipv6_route.h>
++#include <net/ipv6.h>
++#include <net/addrconf.h>
++#include <net/tcp.h>
++#include <net/udp.h>
++#include <net/ip6_route.h>
++#include <net/mipv6.h>
++
++#include "bcache.h"
++#include "hashlist.h"
++#include "debug.h"
++#include "mobhdr.h"
++#include "tunnel.h"
++#include "config.h"
++
++#define TIMERDELAY HZ/10
++
++struct mipv6_bcache {
++ struct hashlist *entries;
++ __u32 size;
++ struct timer_list callback_timer;
++};
++
++struct in6_addr_pair {
++ struct in6_addr *a1;
++ struct in6_addr *a2;
++};
++
++static rwlock_t bcache_lock = RW_LOCK_UNLOCKED;
++
++static struct mipv6_bcache bcache;
++
++static int bcache_proc_info(char *buffer, char **start, off_t offset,
++ int length);
++
++#define MIPV6_BCACHE_HASHSIZE 32
++
++/* Moment of transmission of a BR, in seconds before bcache entry expiry */
++#define BCACHE_BR_SEND_LEAD 3
++
++#define MIPV6_MAX_BRR 3 /* Send 3 BRRs before deleting BC entry */
++#define MIPV6_BRR_RATE HZ /* Send BRRs once per second */
++
++/*
++ * Internal functions.
++ */
++
++struct cache_entry_iterator_args {
++ struct mipv6_bce **entry;
++};
++
++static int find_first_cache_entry_iterator(void *data, void *args,
++ unsigned long *lifetime)
++{
++ struct mipv6_bce *entry =
++ (struct mipv6_bce *) data;
++ struct cache_entry_iterator_args *state =
++ (struct cache_entry_iterator_args *) args;
++
++ ASSERT(entry != NULL);
++
++ if (entry->type == CACHE_ENTRY) {
++ *(state->entry) = entry;
++ return ITERATOR_STOP; /* stop iteration */
++ } else {
++ return ITERATOR_CONT; /* continue iteration */
++ }
++}
++
++
++/*
++ * Get memory for a new bcache entry. If bcache is full, a cache
++ * entry may be deleted to get space for a home registration, but not
++ * vice versa.
++ */
++static struct mipv6_bce *mipv6_bce_alloc(__u8 type)
++{
++ struct mipv6_bce *entry;
++ struct cache_entry_iterator_args args;
++
++ DEBUG_FUNC();
++
++ entry = (struct mipv6_bce *)
++ hashlist_alloc(bcache.entries, SLAB_ATOMIC);
++
++ /* Cache replacement policy: always replace the CACHE_ENTRY
++ closest to expiration. Type HOME_REGISTRATION entry may
++ never be deleted before expiration. */
++ if (entry == NULL) {
++ /* cache full, try to delete a CACHE_ENTRY */
++ args.entry = &entry;
++ hashlist_iterate(bcache.entries, &args,
++ find_first_cache_entry_iterator);
++ if (entry == NULL)
++ return NULL;
++ hashlist_delete(bcache.entries,
++ (struct hashlist_entry *)entry);
++ entry = (struct mipv6_bce *)
++ hashlist_alloc(bcache.entries, SLAB_ATOMIC);
++ }
++ return entry;
++}
++
++/*
++ * Frees entry's memory allocated with mipv6_bce_alloc
++ */
++static void mipv6_bce_free(struct mipv6_bce *entry)
++{
++ hashlist_free(bcache.entries, (void *) entry);
++}
++
++/*
++ * Removes all expired entries
++ */
++static void expire(void)
++{
++ struct mipv6_bce *entry;
++ struct br_addrs {
++ struct in6_addr daddr;
++ struct in6_addr saddr;
++ struct br_addrs *next;
++ };
++ struct br_addrs *br_info = NULL;
++
++ DEBUG_FUNC();
++
++ write_lock(&bcache_lock);
++
++ while ((entry = (struct mipv6_bce *)
++ hashlist_get_first(bcache.entries)) != NULL) {
++ struct rt6_info *rt;
++ if (time_after_eq(jiffies, entry->callback_time)) {
++
++ DEBUG(DBG_INFO, "an entry expired");
++
++ if (entry->type & HOME_REGISTRATION) {
++ mip6_fn.proxy_del(&entry->home_addr, entry);
++ }
++ hashlist_delete(bcache.entries, (void *)entry);
++ mipv6_bce_free(entry);
++ entry = NULL;
++ } else if (entry->br_callback_time != 0 &&
++ time_after_eq(jiffies, entry->br_callback_time) &&
++ entry->br_count < MIPV6_MAX_BRR &&
++ (rt = rt6_lookup(&entry->home_addr, &entry->our_addr, 0, 0)) != NULL){
++ /* Do we have a destination cache entry for the home address */
++ if (rt->rt6i_flags & RTF_CACHE) {
++ struct br_addrs *tmp;
++ tmp = br_info;
++ DEBUG(DBG_INFO,
++ "bcache entry recently used. Sending BR.");
++ /* queue for sending */
++ br_info = kmalloc(sizeof(struct br_addrs),
++ GFP_ATOMIC);
++ if (br_info) {
++ ipv6_addr_copy(&br_info->saddr,
++ &entry->our_addr);
++ ipv6_addr_copy(&br_info->daddr,
++ &entry->home_addr);
++ br_info->next = tmp;
++ entry->last_br = jiffies;
++ entry->br_callback_time = jiffies + MIPV6_BRR_RATE;
++ entry->br_count++;
++ } else {
++ br_info = tmp;
++ DEBUG(DBG_ERROR, "Out of memory");
++ }
++
++ } else
++ entry->br_callback_time = 0;
++ dst_release(&rt->u.dst);
++ } else {
++ entry->br_callback_time = 0;
++ break;
++ }
++ }
++ write_unlock(&bcache_lock);
++
++ while (br_info) {
++ struct br_addrs *tmp = br_info->next;
++ if (mipv6_send_brr(&br_info->saddr, &br_info->daddr, NULL) < 0)
++ DEBUG(DBG_WARNING,
++ "BR send for %x:%x:%x:%x:%x:%x:%x:%x failed",
++ NIPV6ADDR(&br_info->daddr));
++ kfree(br_info);
++ br_info = tmp;
++ }
++}
++
++static void set_timer(void)
++{
++ struct mipv6_bce *entry;
++ unsigned long callback_time;
++
++ DEBUG_FUNC();
++
++ entry = (struct mipv6_bce *)
++ hashlist_get_first(bcache.entries);
++ if (entry != NULL) {
++ if (entry->br_callback_time > 0 &&
++ time_after(entry->br_callback_time, jiffies))
++ callback_time = entry->br_callback_time;
++ else if (time_after(entry->callback_time, jiffies))
++ callback_time = entry->callback_time;
++ else {
++ DEBUG(DBG_WARNING,
++ "bcache timer attempted to schedule"
++ " for a historical jiffies count!");
++ callback_time = jiffies + TIMERDELAY;
++ }
++
++ DEBUG(DBG_INFO, "setting timer to now");
++ mod_timer(&bcache.callback_timer, callback_time);
++ } else {
++ del_timer(&bcache.callback_timer);
++ DEBUG(DBG_INFO, "BC empty, not setting a new timer");
++ }
++}
++
++/*
++ * The function that is scheduled to do the callback functions. May be
++ * modified e.g to allow Binding Requests, now only calls expire() and
++ * schedules a new timer.
++ */
++static void timer_handler(unsigned long dummy)
++{
++ expire();
++ write_lock(&bcache_lock);
++ set_timer();
++ write_unlock(&bcache_lock);
++}
++
++/*
++ * Interface functions visible to other modules
++ */
++
++/**
++ * mipv6_bcache_add - add Binding Cache entry
++ * @ifindex: interface index
++ * @our_addr: own address
++ * @home_addr_org: MN's home address
++ * @coa: MN's care-of address
++ * @lifetime: lifetime for this binding
++ * @prefix: prefix length
++ * @seq: sequence number
++ * @flags: flags received in BU
++ * @type: type of entry
++ *
++ * Adds an entry for this @home_addr_org in the Binding Cache. If entry
++ * already exists, old entry is updated. @type may be %CACHE_ENTRY or
++ * %HOME_REGISTRATION.
++ **/
++int mipv6_bcache_add(int ifindex,
++ struct in6_addr *our_addr,
++ struct in6_addr *home_addr,
++ struct in6_addr *coa,
++ __u32 lifetime, __u16 seq, __u8 flags, __u8 type)
++{
++ struct mipv6_bce *entry;
++ int update = 0;
++ int create_tunnel = 0;
++ unsigned long now = jiffies;
++ struct in6_addr_pair hashkey;
++ int ret = -1;
++
++ DEBUG_FUNC();
++
++ hashkey.a1 = home_addr;
++ hashkey.a2 = our_addr;
++
++ write_lock(&bcache_lock);
++
++ if (type == HOME_REGISTRATION && !(mip6node_cnf.capabilities&CAP_HA))
++ return 0;
++
++ if (unlikely(bcache.entries == NULL)) {
++ ret = -ENOMEM;
++ goto err;
++ }
++
++ if ((entry = (struct mipv6_bce *)
++ hashlist_get(bcache.entries, &hashkey)) != NULL) {
++ /* if an entry for this home_addr exists (with smaller
++ * seq than the new seq), update it by removing it
++ * first
++ */
++ if (!MIPV6_SEQ_GT(seq, entry->seq)) {
++ DEBUG(DBG_INFO, "smaller seq than existing, not updating");
++ goto out;
++ }
++ DEBUG(DBG_INFO, "updating an existing entry");
++ update = 1;
++
++ /* H-flag is already checked in BU handler. */
++ /* XXX: Should we care about the other flags?*/
++ if (flags != entry->flags) {
++ DEBUG(DBG_INFO, "entry/BU flag mismatch");
++ }
++
++ if (type == HOME_REGISTRATION) {
++ create_tunnel = (ipv6_addr_cmp(&entry->coa, coa) ||
++ entry->ifindex != ifindex);
++ }
++ } else {
++ /* no entry for this home_addr, try to create a new entry */
++ DEBUG(DBG_INFO, "creating a new entry");
++ update = 0;
++
++ entry = mipv6_bce_alloc(type);
++ if (entry == NULL) {
++ DEBUG(DBG_INFO, "cache full, entry not added");
++ goto err;
++ }
++
++ create_tunnel = (type == HOME_REGISTRATION);
++ }
++
++ if (create_tunnel) {
++ if (update)
++ mip6_fn.proxy_del(&entry->home_addr, entry);
++ if (mip6_fn.proxy_create(flags, ifindex, coa, our_addr, home_addr) < 0) {
++ goto err_proxy;
++ }
++ }
++
++ ipv6_addr_copy(&(entry->our_addr), our_addr);
++ ipv6_addr_copy(&(entry->home_addr), home_addr);
++ ipv6_addr_copy(&(entry->coa), coa);
++ entry->ifindex = ifindex;
++ entry->seq = seq;
++ entry->type = type;
++ entry->flags = flags;
++
++ entry->last_br = 0;
++ entry->destunr_count = 0;
++ entry->callback_time = now + lifetime * HZ;
++ if (entry->type & HOME_REGISTRATION)
++ entry->br_callback_time = 0;
++ else
++ entry->br_callback_time = now +
++ (lifetime - BCACHE_BR_SEND_LEAD) * HZ;
++
++ if (update) {
++ DEBUG(DBG_INFO, "updating entry : %x", entry);
++ hashlist_reposition(bcache.entries, (void *)entry,
++ entry->callback_time);
++ } else {
++ DEBUG(DBG_INFO, "adding entry: %x", entry);
++ if ((hashlist_add(bcache.entries,
++ &hashkey,
++ entry->callback_time, entry)) < 0) {
++
++ DEBUG(DBG_ERROR, "Hash add failed");
++ goto err_hashlist;
++ }
++ }
++
++ set_timer();
++
++out:
++ write_unlock(&bcache_lock);
++ return 0;
++
++err_hashlist:
++ if (create_tunnel) {
++ mip6_fn.proxy_del(home_addr, entry);
++ }
++err_proxy:
++ if (update) {
++ hashlist_delete(bcache.entries, (void *)entry);
++ }
++ mipv6_bce_free(entry);
++err:
++ write_unlock(&bcache_lock);
++ return ret;
++}
++
++int mipv6_bcache_icmp_err(struct in6_addr *home_addr,
++ struct in6_addr *our_addr,
++ int destunr_count)
++{
++ struct mipv6_bce *entry;
++ struct in6_addr_pair hashkey;
++
++ int ret = -ENOENT;
++
++ DEBUG_FUNC();
++
++ hashkey.a1 = home_addr;
++ hashkey.a2 = our_addr;
++
++ write_lock(&bcache_lock);
++ if (unlikely(bcache.entries == NULL)) {
++ ret = -ENOMEM;
++ goto err;
++ }
++
++ if ((entry = (struct mipv6_bce *)
++ hashlist_get(bcache.entries, &hashkey)) != NULL) {
++ entry->last_destunr = jiffies;
++ entry->destunr_count = destunr_count;
++ ret = 0;
++ }
++err:
++ write_unlock(&bcache_lock);
++ return ret;
++}
++
++
++/**
++ * mipv6_bcache_delete - delete Binding Cache entry
++ * @home_addr: MN's home address
++ * @our_addr: our address
++ * @type: type of entry
++ *
++ * Deletes an entry associated with @home_addr from Binding Cache.
++ * Valid values for @type are %CACHE_ENTRY, %HOME_REGISTRATION and
++ * %ANY_ENTRY. %ANY_ENTRY deletes any type of entry.
++ **/
++int mipv6_bcache_delete(struct in6_addr *home_addr,
++ struct in6_addr *our_addr, __u8 type)
++{
++ struct mipv6_bce *entry;
++ struct in6_addr_pair hashkey;
++ int err = 0;
++
++ DEBUG_FUNC();
++
++ if (home_addr == NULL || our_addr == NULL) {
++ DEBUG(DBG_INFO, "error in arguments");
++ return -EINVAL;
++ }
++
++ hashkey.a1 = home_addr;
++ hashkey.a2 = our_addr;
++
++ write_lock(&bcache_lock);
++
++ if (unlikely(bcache.entries == NULL) ||
++ (entry = (struct mipv6_bce *)
++ hashlist_get(bcache.entries, &hashkey)) == NULL ||
++ !(entry->type & type)) {
++ DEBUG(DBG_INFO, "No matching entry found");
++ err = -ENOENT;
++ goto out;
++ }
++
++ hashlist_delete(bcache.entries, (void *) entry);
++ mipv6_bce_free(entry);
++
++ set_timer();
++out:
++ write_unlock(&bcache_lock);
++ return err;
++}
++
++/**
++ * mipv6_bcache_exists - check if entry exists
++ * @home_addr: home address to check
++ * @our_addr: our address
++ *
++ * Determines if a binding exists for @home_addr. Returns type of the
++ * entry or negative if entry does not exist.
++ **/
++int mipv6_bcache_exists(struct in6_addr *home_addr,
++ struct in6_addr *our_addr)
++{
++ struct mipv6_bce *entry;
++ struct in6_addr_pair hashkey;
++ int type = -ENOENT;
++
++ DEBUG_FUNC();
++
++ if (home_addr == NULL || our_addr == NULL)
++ return -EINVAL;
++
++ hashkey.a1 = home_addr;
++ hashkey.a2 = our_addr;
++
++ read_lock(&bcache_lock);
++ if (likely(bcache.entries != NULL) &&
++ (entry = (struct mipv6_bce *)
++ hashlist_get(bcache.entries, &hashkey)) != NULL) {
++ type = entry->type;
++ }
++ read_unlock(&bcache_lock);
++
++ return type;
++}
++
++/**
++ * mipv6_bcache_get - get entry from Binding Cache
++ * @home_addr: home address to search
++ * @our_addr: our address
++ * @entry: pointer to buffer
++ *
++ * Gets a copy of Binding Cache entry for @home_addr. If entry
++ * exists entry is copied to @entry and zero is returned.
++ * Otherwise returns negative.
++ **/
++int mipv6_bcache_get(struct in6_addr *home_addr,
++ struct in6_addr *our_addr,
++ struct mipv6_bce *entry)
++{
++ struct mipv6_bce *entry2;
++ struct in6_addr_pair hashkey;
++ int ret = -ENOENT;
++
++ DEBUG_FUNC();
++
++ if (home_addr == NULL || our_addr == NULL || entry == NULL)
++ return -EINVAL;
++
++ hashkey.a1 = home_addr;
++ hashkey.a2 = our_addr;
++
++ read_lock_bh(&bcache_lock);
++
++ entry2 = (struct mipv6_bce *)
++ hashlist_get(bcache.entries, &hashkey);
++ if (entry2 != NULL) {
++ memcpy(entry, entry2, sizeof(struct mipv6_bce));
++ ret = 0;
++ }
++ read_unlock_bh(&bcache_lock);
++ return ret;
++}
++
++int mipv6_bcache_iterate(hashlist_iterator_t func, void *args)
++{
++ int ret;
++
++ read_lock_bh(&bcache_lock);
++ ret = hashlist_iterate(bcache.entries, args, func);
++ read_unlock_bh(&bcache_lock);
++
++ return ret;
++}
++
++/*
++ * Proc-filesystem functions
++ */
++
++#define BC_INFO_LEN 80
++
++struct procinfo_iterator_args {
++ char *buffer;
++ int offset;
++ int length;
++ int skip;
++ int len;
++};
++
++static int procinfo_iterator(void *data, void *args, unsigned long *pref)
++{
++ struct procinfo_iterator_args *arg =
++ (struct procinfo_iterator_args *) args;
++ struct mipv6_bce *entry =
++ (struct mipv6_bce *) data;
++
++ ASSERT(entry != NULL);
++
++ if (arg->skip < arg->offset / BC_INFO_LEN) {
++ arg->skip++;
++ return ITERATOR_CONT;
++ }
++
++ if (arg->len >= arg->length)
++ return ITERATOR_CONT;
++
++ /* HoA CoA CallbackInSecs Type */
++ arg->len += sprintf(arg->buffer + arg->len,
++ "%08x%08x%08x%08x %08x%08x%08x%08x %010lu %02d\n",
++ ntohl(entry->home_addr.s6_addr32[0]),
++ ntohl(entry->home_addr.s6_addr32[1]),
++ ntohl(entry->home_addr.s6_addr32[2]),
++ ntohl(entry->home_addr.s6_addr32[3]),
++ ntohl(entry->coa.s6_addr32[0]),
++ ntohl(entry->coa.s6_addr32[1]),
++ ntohl(entry->coa.s6_addr32[2]),
++ ntohl(entry->coa.s6_addr32[3]),
++ ((entry->callback_time) - jiffies) / HZ,
++ (int) entry->type);
++
++ return ITERATOR_CONT;
++}
++
++ /*
++ * Callback function for proc filesystem.
++ */
++static int bcache_proc_info(char *buffer, char **start, off_t offset,
++ int length)
++{
++ struct procinfo_iterator_args args;
++
++ DEBUG_FUNC();
++
++ args.buffer = buffer;
++ args.offset = offset;
++ args.length = length;
++ args.skip = 0;
++ args.len = 0;
++
++ read_lock_bh(&bcache_lock);
++ hashlist_iterate(bcache.entries, &args, procinfo_iterator);
++ read_unlock_bh(&bcache_lock);
++
++ *start = buffer;
++ if (offset)
++ *start += offset % BC_INFO_LEN;
++
++ args.len -= offset % BC_INFO_LEN;
++
++ if (args.len > length)
++ args.len = length;
++ if (args.len < 0)
++ args.len = 0;
++
++ return args.len;
++}
++
++static int bcache_compare(void *data, void *hashkey)
++{
++ struct in6_addr_pair *p = (struct in6_addr_pair *) hashkey;
++ struct mipv6_bce *e = (struct mipv6_bce *) data;
++
++ if (ipv6_addr_cmp(&e->home_addr, p->a1) == 0
++ && ipv6_addr_cmp(&e->our_addr, p->a2) == 0)
++ return 0;
++ else
++ return -1;
++}
++
++static __u32 bcache_hash(void *hashkey)
++{
++ struct in6_addr_pair *p = (struct in6_addr_pair *) hashkey;
++
++ return p->a1->s6_addr32[0] ^ p->a1->s6_addr32[1] ^
++ p->a2->s6_addr32[2] ^ p->a2->s6_addr32[3];
++}
++
++/*
++ * Initialization and shutdown functions
++ */
++
++int __init mipv6_bcache_init(__u32 size)
++{
++ if (size < 1) {
++ DEBUG(DBG_ERROR, "Binding cache size must be at least 1");
++ return -EINVAL;
++ }
++ bcache.entries = hashlist_create(MIPV6_BCACHE_HASHSIZE, size,
++ sizeof(struct mipv6_bce),
++ "mip6_bcache", NULL, NULL,
++ bcache_compare, bcache_hash);
++
++ if (bcache.entries == NULL) {
++ DEBUG(DBG_ERROR, "Failed to initialize hashlist");
++ return -ENOMEM;
++ }
++
++ init_timer(&bcache.callback_timer);
++ bcache.callback_timer.data = 0;
++ bcache.callback_timer.function = timer_handler;
++ bcache.size = size;
++
++ proc_net_create("mip6_bcache", 0, bcache_proc_info);
++
++ DEBUG(DBG_INFO, "Binding cache initialized");
++ return 0;
++}
++
++static int
++bce_cleanup_iterator(void *rawentry, void *args, unsigned long *sortkey)
++{
++ int type = (int) args;
++ struct mipv6_bce *entry = (struct mipv6_bce *) rawentry;
++ if (entry->type == type) {
++ if (entry->type & HOME_REGISTRATION) {
++ if (unlikely(mip6_fn.proxy_del == NULL))
++ DEBUG(DBG_ERROR, "proxy_del unitialized");
++ else
++ mip6_fn.proxy_del(&entry->home_addr, entry);
++ }
++ return ITERATOR_DELETE_ENTRY;
++ }
++ return ITERATOR_CONT;
++
++}
++
++void mipv6_bcache_cleanup(int type)
++{
++ write_lock_bh(&bcache_lock);
++ hashlist_iterate(bcache.entries,(void *) type, bce_cleanup_iterator);
++ write_unlock_bh(&bcache_lock);
++}
++
++int __exit mipv6_bcache_exit(void)
++{
++ struct hashlist *entries;
++
++ DEBUG_FUNC();
++
++ proc_net_remove("mip6_bcache");
++
++ write_lock_bh(&bcache_lock);
++ DEBUG(DBG_INFO, "Stopping the bcache timer");
++ del_timer(&bcache.callback_timer);
++ hashlist_iterate(bcache.entries,(void *)CACHE_ENTRY,
++ bce_cleanup_iterator);
++
++ entries = bcache.entries;
++ bcache.entries = NULL;
++ write_unlock_bh(&bcache_lock);
++
++ hashlist_destroy(entries);
++ return 0;
++}
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/bcache.h linux-2.4.25/net/ipv6/mobile_ip6/bcache.h
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/bcache.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/bcache.h 2004-06-26 11:29:30.000000000 +0100
+@@ -0,0 +1,72 @@
++/*
++ * MIPL Mobile IPv6 Binding Cache header file
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#ifndef _BCACHE_H
++#define _BCACHE_H
++
++#include <linux/in6.h>
++#include <linux/timer.h>
++#include "hashlist.h"
++
++#define CACHE_ENTRY 1 /* this and HOME_REGISTRATION are the entry types */
++#define HOME_REGISTRATION 2
++#define ANY_ENTRY 3
++
++#define MIPV6_MAX_DESTUNREACH 5 /* Delete CN BCEs after 5 destination unreachables */
++#define MIPV6_DEST_UNR_IVAL 10 /* What is the max interval of destination
++ unreacahable error messages for them to be persistent*/
++
++struct mipv6_bce {
++ struct hashlist_entry e;
++ int ifindex; /* Interface identifier */
++ struct in6_addr our_addr; /* our address (as seen by the MN) */
++ struct in6_addr home_addr; /* MN home address */
++ struct in6_addr coa; /* MN care-of address */
++ unsigned long callback_time; /* time of expiration (in jiffies) */
++ unsigned long br_callback_time; /* time for sending a BR (in jiffies) */
++ int (*callback_function)(struct mipv6_bce *entry);
++ __u8 type; /* home registration */
++ __u8 router; /* mn is router */
++ __u8 flags; /* flags received in BU */
++ __u16 seq; /* sequence number */
++ unsigned long last_br; /* time when last BR sent */
++ unsigned long last_destunr; /* time when last ICMP destination unreachable received */
++ int br_count; /* How many BRRs have sent */
++ int destunr_count; /* Number of destination unreachables received */
++};
++
++int mipv6_bcache_add(int ifindex, struct in6_addr *our_addr,
++ struct in6_addr *home_addr, struct in6_addr *coa,
++ __u32 lifetime, __u16 seq, __u8 flags, __u8 type);
++
++int mipv6_bcache_icmp_err(struct in6_addr *home_addr,
++ struct in6_addr *our_addr,
++ int destunr_count);
++
++int mipv6_bcache_delete(struct in6_addr *home_addr, struct in6_addr *our_addr,
++ __u8 type);
++
++int mipv6_bcache_exists(struct in6_addr *home_addr,
++ struct in6_addr *our_addr);
++
++int mipv6_bcache_get(struct in6_addr *home_addr,
++ struct in6_addr *our_addr,
++ struct mipv6_bce *entry);
++
++int mipv6_bcache_iterate(int (*func)(void *, void *, unsigned long *), void *args);
++
++void mipv6_bcache_cleanup(int type);
++
++int mipv6_bcache_init(__u32 size);
++
++int mipv6_bcache_exit(void);
++
++#endif /* _BCACHE_H */
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/bul.c linux-2.4.25/net/ipv6/mobile_ip6/bul.c
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/bul.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/bul.c 2004-06-26 11:29:30.000000000 +0100
+@@ -0,0 +1,634 @@
++/*
++ * Binding update list
++ *
++ * Authors:
++ * Juha Mynttinen <jmynttin@cc.hut.fi>
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++/*
++ * Changes:
++ *
++ * Nanno Langstraat : Timer code cleaned up
++ */
++
++#include <linux/autoconf.h>
++#include <linux/sched.h>
++#include <linux/timer.h>
++#include <linux/in6.h>
++#include <linux/init.h>
++#include <linux/spinlock.h>
++#include <net/ipv6.h>
++#include <net/mipv6.h>
++#include <linux/proc_fs.h>
++
++#include "bul.h"
++#include "debug.h"
++#include "hashlist.h"
++#include "tunnel_mn.h"
++#include "mobhdr.h"
++
++#define MIPV6_BUL_HASHSIZE 32
++
++rwlock_t bul_lock = RW_LOCK_UNLOCKED;
++
++struct mipv6_bul {
++ struct hashlist *entries;
++ struct timer_list callback_timer;
++};
++
++static struct mipv6_bul bul;
++
++struct in6_addr_pair {
++ struct in6_addr *a1;
++ struct in6_addr *a2;
++};
++
++/**********************************************************************
++ *
++ * Private functions
++ *
++ **********************************************************************/
++
++static int bul_compare(void *data, void *hashkey)
++{
++ struct in6_addr_pair *p = (struct in6_addr_pair *)hashkey;
++ struct mipv6_bul_entry *e = (struct mipv6_bul_entry *)data;
++
++ if (ipv6_addr_cmp(&e->cn_addr, p->a1) == 0
++ && ipv6_addr_cmp(&e->home_addr, p->a2) == 0)
++ return 0;
++ else
++ return -1;
++}
++
++struct test_keys {
++ struct in6_addr *addr;
++ u8 *cookie;
++};
++
++static int bul_compare_cookie(void *data, void *keys)
++{
++ struct test_keys *p = (struct test_keys *)keys;
++ struct mipv6_bul_entry *e = (struct mipv6_bul_entry *)data;
++
++ if (ipv6_addr_cmp(&e->cn_addr, p->addr) == 0 && e->rr
++ && memcmp(&e->rr->cot_cookie, p->cookie, 8) == 0)
++ return 0;
++ else
++ return -1;
++}
++
++static u32 bul_hash(void *hashkey)
++{
++ struct in6_addr_pair *p = (struct in6_addr_pair *)hashkey;
++
++ return p->a1->s6_addr32[0] ^
++ p->a1->s6_addr32[1] ^
++ p->a1->s6_addr32[2] ^
++ p->a1->s6_addr32[3];
++}
++
++static int bul_proc_info(char *buffer, char **start, off_t offset,
++ int length);
++
++static struct mipv6_bul_entry *mipv6_bul_get_entry(void)
++{
++ DEBUG_FUNC();
++ return ((struct mipv6_bul_entry *)
++ hashlist_alloc(bul.entries, SLAB_ATOMIC));
++}
++
++static void mipv6_bul_entry_free(struct mipv6_bul_entry *entry)
++{
++ DEBUG_FUNC();
++
++ if (entry->rr) {
++ if (entry->rr->kbu)
++ kfree(entry->rr->kbu);
++ kfree(entry->rr);
++ }
++ if (entry->ops)
++ kfree(entry->ops);
++ hashlist_free(bul.entries, (void *)entry);
++}
++
++static __inline__ int del_bul_entry_tnl(struct mipv6_bul_entry *entry)
++{
++ if (entry->flags & MIPV6_BU_F_HOME) {
++ return mipv6_mv_tnl_to_ha(&entry->cn_addr,
++ &entry->coa,
++ &entry->home_addr);
++ }
++ return 0;
++}
++
++static void timer_update(void)
++{
++ struct mipv6_bul_entry *entry;
++
++ DEBUG_FUNC();
++
++ entry = hashlist_get_first(bul.entries);
++
++ while (entry && time_after_eq(jiffies, entry->callback_time)) {
++ if (time_after_eq(jiffies, entry->expire) ||
++ entry->callback(entry) != 0) {
++ /*
++ * Either the entry has expired, or the callback
++ * indicated that it should be deleted.
++ */
++ hashlist_delete(bul.entries, (void *)entry);
++
++ del_bul_entry_tnl(entry);
++ mipv6_bul_entry_free(entry);
++ DEBUG(DBG_INFO, "Entry deleted (was expired) from "
++ "binding update list");
++ } else {
++ /* move entry to its right place in the hashlist */
++ DEBUG(DBG_INFO, "Rescheduling");
++ hashlist_reposition(bul.entries, (void *)entry,
++ entry->callback_time);
++ }
++ entry = (struct mipv6_bul_entry *)
++ hashlist_get_first(bul.entries);
++ }
++
++ if (entry == NULL) {
++ DEBUG(DBG_INFO, "bul empty, not setting a new timer");
++ del_timer(&bul.callback_timer);
++ } else {
++ mod_timer(&bul.callback_timer, entry->callback_time);
++ }
++}
++
++static void timer_handler(unsigned long dummy)
++{
++ DEBUG_FUNC();
++
++ write_lock(&bul_lock);
++ timer_update();
++ write_unlock(&bul_lock);
++}
++
++/**********************************************************************
++ *
++ * Public interface functions
++ *
++ **********************************************************************/
++
++/**
++ * mipv6_bul_iterate - apply interator function to all entries
++ * @func: function to apply
++ * @args: extra arguments for iterator
++ *
++ * Applies @func for each entry in Binding Update List. Extra
++ * arguments given in @args are also passed to the iterator function.
++ * Caller must hold @bul_lock.
++ **/
++int mipv6_bul_iterate(hashlist_iterator_t func, void *args)
++{
++ DEBUG_FUNC();
++
++ return hashlist_iterate(bul.entries, args, func);
++}
++
++/**
++ * mipv6_bul_exists - check if Binding Update List entry exists
++ * @cn: address to check
++ *
++ * Checks if Binding Update List has an entry for @cn. Returns true
++ * if entry exists, false otherwise. Caller may not hold @bul_lock.
++ **/
++int mipv6_bul_exists(struct in6_addr *cn, struct in6_addr *haddr)
++{
++ int exists;
++ struct in6_addr_pair hashkey;
++
++ DEBUG_FUNC();
++
++ hashkey.a1 = cn;
++ hashkey.a2 = haddr;
++
++ read_lock_bh(&bul_lock);
++
++ if (unlikely(bul.entries == NULL))
++ exists = 0;
++ else
++ exists = (hashlist_get(bul.entries, &hashkey) != NULL);
++
++ read_unlock_bh(&bul_lock);
++ return exists;
++}
++
++/**
++ * mipv6_bul_get - get Binding Update List entry
++ * @cn_addr: CN address to search
++ * @home_addr: home address to search
++ *
++ * Returns Binding Update List entry for @cn_addr if it exists.
++ * Otherwise returns %NULL. Caller must hold @bul_lock.
++ **/
++struct mipv6_bul_entry *mipv6_bul_get(struct in6_addr *cn_addr,
++ struct in6_addr *home_addr)
++{
++ struct mipv6_bul_entry *entry;
++ struct in6_addr_pair hashkey;
++
++ DEBUG_FUNC();
++
++ if (unlikely(bul.entries == NULL)) {
++ return NULL;
++ }
++ hashkey.a1 = cn_addr;
++ hashkey.a2 = home_addr;
++
++ entry = (struct mipv6_bul_entry *)
++ hashlist_get(bul.entries, &hashkey);
++
++ return entry;
++}
++
++struct mipv6_bul_entry *mipv6_bul_get_by_ccookie(
++ struct in6_addr *cn_addr, u8 *cookie)
++{
++ struct test_keys key;
++
++ DEBUG_FUNC();
++
++ if (unlikely(bul.entries == NULL))
++ return NULL;
++ key.addr = cn_addr;
++ key.cookie = cookie;
++
++ return (struct mipv6_bul_entry *)
++ hashlist_get_ex(bul.entries, &key,
++ bul_compare_cookie);
++}
++
++/**
++ * mipv6_bul_reschedule - reschedule Binding Update List entry
++ * @entry: entry to reschedule
++ *
++ * Reschedules a Binding Update List entry. Must be called after
++ * modifying entry lifetime. Caller must hold @bul_lock (write).
++ **/
++void mipv6_bul_reschedule(struct mipv6_bul_entry *entry)
++{
++ DEBUG_FUNC();
++
++ hashlist_reposition(bul.entries,
++ (void *)entry,
++ entry->callback_time);
++ timer_update();
++}
++
++/**
++ * mipv6_bul_add - add binding update to Binding Update List
++ * @cn_addr: IPv6 address where BU was sent
++ * @home_addr: Home address for this binding
++ * @coa: Care-of address for this binding
++ * @lifetime: expiration time of the binding in seconds
++ * @seq: sequence number of the BU
++ * @flags: %MIPV6_BU_F_* flags
++ * @callback: callback function called on expiration
++ * @callback_time: expiration time for callback
++ * @state: binding send state
++ * @delay: retransmission delay
++ * @maxdelay: retransmission maximum delay
++ * @ops: Mobility header options for BU
++ * @rr: Return routability information
++ *
++ * Adds a binding update sent to @cn_addr for @home_addr to the
++ * Binding Update List. If entry already exists, it is updated.
++ * Entry is set to expire in @lifetime seconds. Entry has a callback
++ * function @callback that is called at @callback_time. Entry @state
++ * controls resending of this binding update and it can be set to
++ * %ACK_OK, %RESEND_EXP or %ACK_ERROR. Returns a pointer to the newly
++ * created or updated entry. Caller must hold @bul_lock (write).
++ **/
++struct mipv6_bul_entry *mipv6_bul_add(
++ struct in6_addr *cn_addr, struct in6_addr *home_addr,
++ struct in6_addr *coa,
++ __u32 lifetime, __u16 seq, __u8 flags,
++ int (*callback)(struct mipv6_bul_entry *entry),
++ __u32 callback_time,
++ __u8 state, __u32 delay, __u32 maxdelay,
++ struct mipv6_mh_opt *ops,
++ struct mipv6_rr_info *rr)
++{
++ struct mipv6_bul_entry *entry;
++ int update = 0;
++ struct in6_addr_pair hashkey;
++
++ DEBUG_FUNC();
++
++ if (unlikely(bul.entries == NULL))
++ return NULL;
++
++ if (cn_addr == NULL || home_addr == NULL || coa == NULL ||
++ lifetime < 0 || callback == NULL || callback_time < 0 ||
++ (state != ACK_OK && state != RESEND_EXP && state != ACK_ERROR) ||
++ delay < 0 || maxdelay < 0) {
++ DEBUG(DBG_ERROR, "invalid arguments");
++ return NULL;
++ }
++ DEBUG(DBG_INFO, "cn_addr: %x:%x:%x:%x:%x:%x:%x:%x, "
++ "home_addr: %x:%x:%x:%x:%x:%x:%x:%x"
++ "coaddr: %x:%x:%x:%x:%x:%x:%x:%x", NIPV6ADDR(cn_addr),
++ NIPV6ADDR(home_addr), NIPV6ADDR(coa));
++ hashkey.a1 = cn_addr;
++ hashkey.a2 = home_addr;
++
++ /*
++ * decide whether to add a new entry or update existing, also
++ * check if there's room for a new entry when adding a new
++ * entry (latter is handled by mipv6_bul_get_entry()
++ */
++ if ((entry = (struct mipv6_bul_entry *)
++ hashlist_get(bul.entries, &hashkey)) != NULL) {
++ /* if an entry for this cn_addr exists (with smaller
++ * seq than the new entry's seq), update it */
++
++ if (MIPV6_SEQ_GT(seq, entry->seq)) {
++ DEBUG(DBG_INFO, "updating an existing entry");
++ update = 1;
++ } else {
++ DEBUG(DBG_INFO, "smaller seq than existing, not updating");
++ return NULL;
++ }
++ } else {
++ entry = mipv6_bul_get_entry();
++ if (entry == NULL) {
++ DEBUG(DBG_WARNING, "binding update list full, can't add!!!");
++ return NULL;
++ }
++ memset(entry, 0, sizeof(*entry));
++ /* First BU send happens here, save count in the entry */
++ entry->consecutive_sends = 1;
++ }
++
++ if (!update) {
++ ipv6_addr_copy(&(entry->cn_addr), cn_addr);
++ ipv6_addr_copy(&(entry->home_addr), home_addr);
++ entry->ops = ops;
++ }
++ /* Add Return Routability info to bul entry */
++ if (rr) {
++ if(entry->rr)
++ kfree(entry->rr);
++ entry->rr = rr;
++ }
++
++ ipv6_addr_copy(&(entry->coa), coa);
++ entry->lifetime = lifetime;
++ if (lifetime)
++ entry->expire = jiffies + lifetime * HZ;
++ else if (flags & MIPV6_BU_F_ACK)
++ entry->expire = jiffies + HOME_RESEND_EXPIRE * HZ;
++ entry->seq = seq;
++ entry->flags = flags;
++ entry->lastsend = jiffies; /* current time = last use of the entry */
++ entry->state = state;
++ entry->delay = delay;
++ entry->maxdelay = maxdelay;
++ entry->callback_time = jiffies + callback_time * HZ;
++ entry->callback = callback;
++
++ if (flags & MIPV6_BU_F_HOME &&
++ mipv6_mv_tnl_to_ha(cn_addr, coa, home_addr)) {
++ DEBUG(DBG_ERROR, "reconfiguration of the tunnel failed");
++ }
++ if (update) {
++ DEBUG(DBG_INFO, "updating entry: %x", entry);
++ hashlist_reposition(bul.entries, (void *)entry,
++ entry->callback_time);
++ } else {
++ DEBUG(DBG_INFO, "adding entry: %x", entry);
++
++ hashkey.a1 = &entry->cn_addr;
++ hashkey.a2 = &entry->home_addr;
++
++ if ((hashlist_add(bul.entries, &hashkey,
++ entry->callback_time,
++ entry)) < 0) {
++ DEBUG(DBG_ERROR, "Hash add failed");
++ mipv6_bul_entry_free(entry);
++ return NULL;
++ }
++ }
++ timer_update();
++
++ return entry;
++}
++
++/**
++ * mipv6_bul_delete - delete Binding Update List entry
++ * @cn_addr: address for entry to delete
++ *
++ * Deletes the entry for @cn_addr from the Binding Update List.
++ * Returns zero if entry was deleted succesfully, otherwise returns
++ * negative. Caller may not hold @bul_lock.
++ **/
++int mipv6_bul_delete(struct in6_addr *cn_addr, struct in6_addr *home_addr)
++{
++ struct mipv6_bul_entry *entry;
++ struct in6_addr_pair hashkey;
++
++ DEBUG_FUNC();
++
++ hashkey.a1 = cn_addr;
++ hashkey.a2 = home_addr;
++
++ write_lock(&bul_lock);
++
++ if (unlikely(bul.entries == NULL) ||
++ (entry = (struct mipv6_bul_entry *)
++ hashlist_get(bul.entries, &hashkey)) == NULL) {
++ write_unlock(&bul_lock);
++ DEBUG(DBG_INFO, "No such entry");
++ return -ENOENT;
++ }
++
++ hashlist_delete(bul.entries, (void *)entry);
++
++ del_bul_entry_tnl(entry);
++
++ mipv6_bul_entry_free(entry);
++ timer_update();
++ write_unlock(&bul_lock);
++
++ DEBUG(DBG_INFO, "Binding update list entry deleted");
++
++ return 0;
++}
++
++/**********************************************************************
++ *
++ * Proc interface functions
++ *
++ **********************************************************************/
++
++#define BUL_INFO_LEN 152
++
++struct procinfo_iterator_args {
++ char *buffer;
++ int offset;
++ int length;
++ int skip;
++ int len;
++};
++
++static int procinfo_iterator(void *data, void *args,
++ unsigned long *sortkey)
++{
++ struct procinfo_iterator_args *arg =
++ (struct procinfo_iterator_args *)args;
++ struct mipv6_bul_entry *entry =
++ (struct mipv6_bul_entry *)data;
++ unsigned long callback_seconds;
++
++ DEBUG_FUNC();
++
++ if (entry == NULL) return ITERATOR_ERR;
++
++ if (time_after(jiffies, entry->callback_time))
++ callback_seconds = 0;
++ else
++ callback_seconds = (entry->callback_time - jiffies) / HZ;
++
++ if (arg->skip < arg->offset / BUL_INFO_LEN) {
++ arg->skip++;
++ return ITERATOR_CONT;
++ }
++
++ if (arg->len >= arg->length)
++ return ITERATOR_CONT;
++
++ /* CN HoA CoA ExpInSecs SeqNum State Delay MaxDelay CallbackInSecs */
++ arg->len += sprintf(arg->buffer + arg->len,
++ "%08x%08x%08x%08x %08x%08x%08x%08x %08x%08x%08x%08x\n"
++ "%010lu %05d %02d %010d %010d %010lu\n",
++ ntohl(entry->cn_addr.s6_addr32[0]),
++ ntohl(entry->cn_addr.s6_addr32[1]),
++ ntohl(entry->cn_addr.s6_addr32[2]),
++ ntohl(entry->cn_addr.s6_addr32[3]),
++ ntohl(entry->home_addr.s6_addr32[0]),
++ ntohl(entry->home_addr.s6_addr32[1]),
++ ntohl(entry->home_addr.s6_addr32[2]),
++ ntohl(entry->home_addr.s6_addr32[3]),
++ ntohl(entry->coa.s6_addr32[0]),
++ ntohl(entry->coa.s6_addr32[1]),
++ ntohl(entry->coa.s6_addr32[2]),
++ ntohl(entry->coa.s6_addr32[3]),
++ (entry->expire - jiffies) / HZ,
++ entry->seq, entry->state, entry->delay,
++ entry->maxdelay, callback_seconds);
++
++ return ITERATOR_CONT;
++}
++
++
++/*
++ * Callback function for proc filesystem.
++ */
++static int bul_proc_info(char *buffer, char **start, off_t offset,
++ int length)
++{
++ struct procinfo_iterator_args args;
++
++ DEBUG_FUNC();
++
++ args.buffer = buffer;
++ args.offset = offset;
++ args.length = length;
++ args.skip = 0;
++ args.len = 0;
++
++ read_lock_bh(&bul_lock);
++ hashlist_iterate(bul.entries, &args, procinfo_iterator);
++ read_unlock_bh(&bul_lock);
++
++ *start = buffer;
++ if (offset)
++ *start += offset % BUL_INFO_LEN;
++
++ args.len -= offset % BUL_INFO_LEN;
++
++ if (args.len > length)
++ args.len = length;
++ if (args.len < 0)
++ args.len = 0;
++
++ return args.len;
++}
++
++/**********************************************************************
++ *
++ * Code module init/fini functions
++ *
++ **********************************************************************/
++
++int __init mipv6_bul_init(__u32 size)
++{
++ DEBUG_FUNC();
++
++ if (size < 1) {
++ DEBUG(DBG_CRITICAL,
++ "Binding update list size must be at least 1");
++ return -EINVAL;
++ }
++ bul.entries = hashlist_create(MIPV6_BUL_HASHSIZE, size,
++ sizeof(struct mipv6_bul_entry),
++ "mip6_bul", NULL, NULL,
++ bul_compare, bul_hash);
++
++ if (bul.entries == NULL) {
++ DEBUG(DBG_CRITICAL, "Couldn't allocate memory for "
++ "hashlist when creating a binding update list");
++ return -ENOMEM;
++ }
++ init_timer(&bul.callback_timer);
++ bul.callback_timer.data = 0;
++ bul.callback_timer.function = timer_handler;
++ proc_net_create("mip6_bul", 0, bul_proc_info);
++ DEBUG(DBG_INFO, "Binding update list initialized");
++ return 0;
++}
++
++void __exit mipv6_bul_exit()
++{
++ struct mipv6_bul_entry *entry;
++ struct hashlist *entries;
++
++ DEBUG_FUNC();
++
++ proc_net_remove("mip6_bul");
++
++ write_lock_bh(&bul_lock);
++
++ DEBUG(DBG_INFO, "Stopping the bul timer");
++ del_timer(&bul.callback_timer);
++
++ while ((entry = (struct mipv6_bul_entry *)
++ hashlist_get_first(bul.entries)) != NULL) {
++ hashlist_delete(bul.entries, (void *)entry);
++
++ del_bul_entry_tnl(entry);
++
++ mipv6_bul_entry_free(entry);
++ }
++ entries = bul.entries;
++ bul.entries = NULL;
++ write_unlock_bh(&bul_lock);
++
++ hashlist_destroy(entries);
++
++ DEBUG(DBG_INFO, "binding update list destroyed");
++}
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/bul.h linux-2.4.25/net/ipv6/mobile_ip6/bul.h
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/bul.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/bul.h 2004-06-26 11:29:30.000000000 +0100
+@@ -0,0 +1,91 @@
++/*
++ * MIPL Mobile IPv6 Binding Update List header file
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#ifndef _BUL_H
++#define _BUL_H
++
++#include "hashlist.h"
++
++#define ACK_OK 0x01
++#define RESEND_EXP 0x02
++#define ACK_ERROR 0x04
++
++#define HOME_RESEND_EXPIRE 3600
++#define MIPV6_COOKIE_LEN 8
++struct mipv6_rr_info {
++ /* RR information */
++ u16 rr_state; /* State of the RR */
++ u16 rr_flags; /* Flags for the RR */
++ u8 hot_cookie[MIPV6_COOKIE_LEN]; /* HoT Cookie */
++ u8 cot_cookie[MIPV6_COOKIE_LEN]; /* CoT Cookie */
++ u8 home_cookie[MIPV6_COOKIE_LEN]; /* Home Cookie */
++ u8 careof_cookie[MIPV6_COOKIE_LEN]; /* Careof Cookie */
++ u32 lastsend_hoti; /* When HoTI was last sent (jiffies) */
++ u32 lastsend_coti; /* When CoTI was last sent (jiffies) */
++ u32 home_time; /* when Care-of cookie was received */
++ u32 careof_time; /* when Home cookie was received */
++ int home_nonce_index; /* Home cookie nonce index */
++ int careof_nonce_index; /* Care-of cookie nonce index */
++ u8 *kbu; /* Binding authentication key */
++};
++struct mipv6_bul_entry {
++ struct hashlist_entry e;
++ struct in6_addr cn_addr; /* CN to which BU was sent */
++ struct in6_addr home_addr; /* home address of this binding */
++ struct in6_addr coa; /* care-of address of the sent BU */
++
++ unsigned long expire; /* entry's expiration time (jiffies) */
++ __u32 lifetime; /* lifetime sent in this BU */
++ __u32 lastsend; /* last time when BU sent (jiffies) */
++ __u32 consecutive_sends; /* Number of consecutive BU's sent */
++ __u16 seq; /* sequence number of the latest BU */
++ __u8 flags; /* BU send flags */
++ __u8 state; /* resend state */
++ __u32 initdelay; /* initial ack wait */
++ __u32 delay; /* current ack wait */
++ __u32 maxdelay; /* maximum ack wait */
++
++ struct mipv6_rr_info *rr;
++ struct mipv6_mh_opt *ops; /* saved option values */
++
++ unsigned long callback_time;
++ int (*callback)(struct mipv6_bul_entry *entry);
++};
++
++extern rwlock_t bul_lock;
++
++int mipv6_bul_init(__u32 size);
++
++void mipv6_bul_exit(void);
++
++struct mipv6_bul_entry *mipv6_bul_add(
++ struct in6_addr *cn_addr, struct in6_addr *home_addr,
++ struct in6_addr *coa, __u32 lifetime, __u16 seq, __u8 flags,
++ int (*callback)(struct mipv6_bul_entry *entry), __u32 callback_time,
++ __u8 state, __u32 delay, __u32 maxdelay, struct mipv6_mh_opt *ops,
++ struct mipv6_rr_info *rr);
++
++int mipv6_bul_delete(struct in6_addr *cn_addr, struct in6_addr *home_addr);
++
++int mipv6_bul_exists(struct in6_addr *cnaddr, struct in6_addr *home_addr);
++
++struct mipv6_bul_entry *mipv6_bul_get(struct in6_addr *cnaddr,
++ struct in6_addr *home_addr);
++struct mipv6_bul_entry *mipv6_bul_get_by_ccookie(struct in6_addr *cn_addr,
++ u8 *cookie);
++
++int bul_entry_expired(struct mipv6_bul_entry *bulentry);
++
++void mipv6_bul_reschedule(struct mipv6_bul_entry *entry);
++
++int mipv6_bul_iterate(int (*func)(void *, void *, unsigned long *), void *args);
++
++#endif /* BUL_H */
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/config.h linux-2.4.25/net/ipv6/mobile_ip6/config.h
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/config.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/config.h 2004-06-26 11:29:30.000000000 +0100
+@@ -0,0 +1,72 @@
++/*
++ * Configuration parameters
++ *
++ * $Id$
++ */
++
++#define MIPV6VERSION "D24"
++#define MIPLVERSION "v1.0"
++
++#define CAP_CN 0x01
++#define CAP_HA 0x02
++#define CAP_MN 0x04
++
++struct mip6_conf {
++ int capabilities;
++ int debug_level;
++ int accept_ret_rout;
++ int max_rtr_reachable_time;
++ int eager_cell_switching;
++ int max_num_tunnels;
++ int min_num_tunnels;
++ int binding_refresh_advice;
++ int bu_lladdr;
++ int bu_keymgm;
++ int bu_cn_ack;
++};
++
++extern struct mip6_conf mip6node_cnf;
++
++struct mipv6_bce;
++
++struct mip6_func {
++ void (*bce_home_add) (int ifindex, struct in6_addr *daddr,
++ struct in6_addr *haddr, struct in6_addr *coa,
++ struct in6_addr *rep_coa, __u32 lifetime,
++ __u16 sequence, __u8 flags, __u8 *k_bu);
++ void (*bce_cache_add) (int ifindex, struct in6_addr *daddr,
++ struct in6_addr *haddr, struct in6_addr *coa,
++ struct in6_addr *rep_coa, __u32 lifetime,
++ __u16 sequence, __u8 flags, __u8 *k_bu);
++ void (*bce_home_del) (struct in6_addr *daddr, struct in6_addr *haddr,
++ struct in6_addr *coa, struct in6_addr *rep_coa,
++ __u16 sequence, __u8 flags,
++ __u8 *k_bu);
++ void (*bce_cache_del) (struct in6_addr *daddr, struct in6_addr *haddr,
++ struct in6_addr *coa, struct in6_addr *rep_coa,
++ __u16 sequence, __u8 flags,
++ __u8 *k_bu);
++
++ int (*bce_tnl_rt_add) (struct in6_addr *coa,
++ struct in6_addr *ha_addr,
++ struct in6_addr *home_addr);
++
++ void (*bce_tnl_rt_del) (struct in6_addr *coa,
++ struct in6_addr *ha_addr,
++ struct in6_addr *home_addr);
++
++ void (*proxy_del) (struct in6_addr *home_addr, struct mipv6_bce *entry);
++ int (*proxy_create) (int flags, int ifindex, struct in6_addr *coa,
++ struct in6_addr *our_addr, struct in6_addr *home_addr);
++
++ int (*icmpv6_dhaad_rep_rcv) (struct sk_buff *skb);
++ int (*icmpv6_dhaad_req_rcv) (struct sk_buff *skb);
++ int (*icmpv6_pfxadv_rcv) (struct sk_buff *skb);
++ int (*icmpv6_pfxsol_rcv) (struct sk_buff *skb);
++ int (*icmpv6_paramprob_rcv) (struct sk_buff *skb);
++
++ int (*mn_use_hao) (struct in6_addr *daddr, struct in6_addr *saddr);
++ void (*mn_check_tunneled_packet) (struct sk_buff *skb);
++};
++
++extern struct mip6_func mip6_fn;
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/debug.h linux-2.4.25/net/ipv6/mobile_ip6/debug.h
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/debug.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/debug.h 2004-06-26 11:29:30.000000000 +0100
+@@ -0,0 +1,112 @@
++/*
++ * MIPL Mobile IPv6 Debugging macros and functions
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#ifndef _DEBUG_H
++#define _DEBUG_H
++
++#include <linux/autoconf.h>
++
++/* priorities for different debug conditions */
++
++#define DBG_CRITICAL 0 /* unrecoverable error */
++#define DBG_ERROR 1 /* error (recoverable) */
++#define DBG_WARNING 2 /* unusual situation but not a real error */
++#define DBG_INFO 3 /* generally useful information */
++#define DBG_EXTRA 4 /* extra information */
++#define DBG_FUNC_ENTRY 6 /* use to indicate function entry and exit */
++#define DBG_DATADUMP 7 /* packet dumps, etc. lots of flood */
++
++/**
++ * NIPV6ADDR - macro for IPv6 addresses
++ * @addr: Network byte order IPv6 address
++ *
++ * Macro for printing IPv6 addresses. Used in conjunction with
++ * printk() or derivatives (such as DEBUG macro).
++ **/
++#define NIPV6ADDR(addr) \
++ ntohs(((u16 *)addr)[0]), \
++ ntohs(((u16 *)addr)[1]), \
++ ntohs(((u16 *)addr)[2]), \
++ ntohs(((u16 *)addr)[3]), \
++ ntohs(((u16 *)addr)[4]), \
++ ntohs(((u16 *)addr)[5]), \
++ ntohs(((u16 *)addr)[6]), \
++ ntohs(((u16 *)addr)[7])
++
++#ifdef CONFIG_IPV6_MOBILITY_DEBUG
++extern int mipv6_debug;
++
++/**
++ * debug_print - print debug message
++ * @debug_level: message priority
++ * @fname: calling function's name
++ * @fmt: printf-style formatting string
++ *
++ * Prints a debug message to system log if @debug_level is less or
++ * equal to @mipv6_debug. Should always be called using DEBUG()
++ * macro, not directly.
++ **/
++static void debug_print(int debug_level, const char *fname, const char* fmt, ...)
++{
++ char s[1024];
++ va_list args;
++
++ if (mipv6_debug < debug_level)
++ return;
++
++ va_start(args, fmt);
++ vsprintf(s, fmt, args);
++ printk("mip6[%s]: %s\n", fname, s);
++ va_end(args);
++}
++
++/**
++ * debug_print_buffer - print arbitrary buffer to system log
++ * @debug_level: message priority
++ * @data: pointer to buffer
++ * @len: number of bytes to print
++ *
++ * Prints @len bytes from buffer @data to system log. @debug_level
++ * tells on which debug level message gets printed. For
++ * debug_print_buffer() priority %DBG_DATADUMP should be used.
++ **/
++#define debug_print_buffer(debug_level,data,len) { \
++ if (mipv6_debug >= debug_level) { \
++ int i; \
++ for (i=0; i<len; i++) { \
++ if (i%16 == 0) printk("\n%04x: ", i); \
++ printk("%02x ", ((unsigned char *)data)[i]); \
++ } \
++ printk("\n\n"); \
++ } \
++}
++
++#define DEBUG(x,y,z...) debug_print(x,__FUNCTION__,y,##z)
++#define DEBUG_FUNC() \
++DEBUG(DBG_FUNC_ENTRY, "%s(%d)/%s: ", __FILE__,__LINE__,__FUNCTION__)
++
++#else
++#define DEBUG(x,y,z...)
++#define DEBUG_FUNC()
++#define debug_print_buffer(x,y,z)
++#endif
++
++#undef ASSERT
++#define ASSERT(expression) { \
++ if (!(expression)) { \
++ (void)printk(KERN_ERR \
++ "Assertion \"%s\" failed: file \"%s\", function \"%s\", line %d\n", \
++ #expression, __FILE__, __FUNCTION__, __LINE__); \
++ BUG(); \
++ } \
++}
++
++#endif /* _DEBUG_H */
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/exthdrs.c linux-2.4.25/net/ipv6/mobile_ip6/exthdrs.c
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/exthdrs.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/exthdrs.c 2004-06-26 11:29:30.000000000 +0100
+@@ -0,0 +1,394 @@
++/*
++ * Extension Header handling and adding code
++ *
++ * Authors:
++ * Sami Kivisaari <skivisaa@cc.hut.fi>
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#include <linux/types.h>
++#include <linux/slab.h>
++
++#include <net/ipv6.h>
++#include <net/ip6_route.h>
++#include <net/addrconf.h>
++#include <net/mipv6.h>
++
++#include "debug.h"
++#include "stats.h"
++#include "mobhdr.h"
++#include "bcache.h"
++#include "config.h"
++
++/**
++ * mipv6_append_home_addr - Add Home Address Option
++ * @opt: buffer for Home Address Option
++ * @offset: offset from beginning of @opt
++ * @addr: address for HAO
++ *
++ * Adds a Home Address Option to a packet. Option is stored in
++ * @offset from beginning of @opt. The option is created but the
++ * original source address in IPv6 header is left intact. The source
++ * address will be changed from home address to CoA after the checksum
++ * has been calculated in getfrag. Padding is done automatically, and
++ * @opt must have allocated space for both actual option and pad.
++ * Returns offset from @opt to end of options.
++ **/
++int mipv6_append_home_addr(__u8 *opt, int offset, struct in6_addr *addr)
++{
++ int pad;
++ struct mipv6_dstopt_homeaddr *ho;
++
++ DEBUG(DBG_DATADUMP, "HAO: %x:%x:%x:%x:%x:%x:%x:%x",
++ NIPV6ADDR(addr));
++
++ pad = (6 - offset) & 7;
++ mipv6_add_pad(opt + offset, pad);
++
++ ho = (struct mipv6_dstopt_homeaddr *)(opt + offset + pad);
++ ho->type = MIPV6_TLV_HOMEADDR;
++ ho->length = sizeof(*ho) - 2;
++ ipv6_addr_copy(&ho->addr, addr);
++
++ return offset + pad + sizeof(*ho);
++}
++static inline int check_hao_validity(struct mipv6_dstopt_homeaddr *haopt,
++ u8 *dst1,
++ struct in6_addr *saddr,
++ struct in6_addr *daddr)
++{
++ int addr_type = ipv6_addr_type(&haopt->addr);
++ struct mipv6_bce bc_entry;
++
++ if (addr_type & IPV6_ADDR_LINKLOCAL ||
++ !(addr_type & IPV6_ADDR_UNICAST)) {
++ DEBUG(DBG_INFO, "HAO with link local or non-unicast HoA, "
++ "not sending BE to "
++ "home address "
++ "%x:%x:%x:%x:%x:%x:%x:%x ",
++ "care-of address %x:%x:%x:%x:%x:%x:%x:%x",
++ NIPV6ADDR(&haopt->addr),
++ NIPV6ADDR(saddr));
++ return -EINVAL;
++ } else if (dst1[0] != IPPROTO_MOBILITY &&
++ (mipv6_bcache_get(&haopt->addr,
++ daddr, &bc_entry) != 0 ||
++ ipv6_addr_cmp(saddr, &bc_entry.coa))) {
++ DEBUG(DBG_INFO, "HAO without binding or incorrect CoA, "
++ "sending BE code 1: "
++ "home address %x:%x:%x:%x:%x:%x:%x:%x",
++ "to care-of address %x:%x:%x:%x:%x:%x:%x:%x",
++ NIPV6ADDR(&haopt->addr),
++ NIPV6ADDR(saddr));
++ return -ENOENT;
++ }
++ return 0;
++}
++/**
++ * mipv6_handle_homeaddr - Home Address Destination Option handler
++ * @skb: packet buffer
++ * @optoff: offset to where option begins
++ *
++ * Handles Home Address Option in IPv6 Destination Option header.
++ * Packet and offset to option are passed. If HAO is used without
++ * binding, sends a Binding Error code 1. When sending BE, notify bit
++ * is cleared to prevent IPv6 error handling from sending ICMP
++ * Parameter Problem. Returns 1 on success, otherwise zero.
++ **/
++int mipv6_handle_homeaddr(struct sk_buff *skb, int optoff)
++{
++ struct in6_addr *saddr = &skb->nh.ipv6h->saddr;
++ struct in6_addr coaddr;
++ struct inet6_skb_parm *opt = (struct inet6_skb_parm *) skb->cb;
++ struct mipv6_dstopt_homeaddr *haopt =
++ (struct mipv6_dstopt_homeaddr *) &skb->nh.raw[optoff];
++ u8 *dst1;
++ int err;
++
++ DEBUG_FUNC();
++
++ if (haopt->length != sizeof(*haopt) - 2) {
++ DEBUG(DBG_WARNING, "HAO has invalid length");
++ MIPV6_INC_STATS(n_ha_drop.invalid);
++ return 0;
++ }
++ dst1 = (u8 *)skb->h.raw;
++ err = check_hao_validity(haopt, dst1, saddr, &skb->nh.ipv6h->daddr);
++
++ if (err) {
++ haopt->type &= ~(0x80); /* clear notify bit */
++ if (err == -ENOENT)
++ mipv6_send_be(&skb->nh.ipv6h->daddr, saddr,
++ &haopt->addr, MIPV6_BE_HAO_WO_BINDING);
++ MIPV6_INC_STATS(n_ha_drop.misc);
++ return 0;
++ }
++ ipv6_addr_copy(&coaddr, saddr);
++ ipv6_addr_copy(saddr, &haopt->addr);
++ ipv6_addr_copy(&haopt->addr, &coaddr);
++ opt->hao = optoff;
++ if (mip6_fn.mn_check_tunneled_packet != NULL)
++ mip6_fn.mn_check_tunneled_packet(skb);
++
++ MIPV6_INC_STATS(n_ha_rcvd);
++ return 1;
++}
++
++/**
++ * mipv6_icmp_swap_addrs - Switch HAO and src and RT2 and dest for ICMP errors
++ * @skb: packet buffer
++ *
++ * Reset the source address and the Home Address option in skb before
++ * appending it to an ICMP error message, so original packet appears
++ * in the error message rather than mangled.
++ **/
++void mipv6_icmp_swap_addrs(struct sk_buff *skb)
++{
++ struct inet6_skb_parm *opt = (struct inet6_skb_parm *)skb->cb;
++ struct in6_addr tmp;
++ struct in6_addr *hoa;
++ DEBUG_FUNC();
++ if (opt->srcrt2) {
++ struct rt2_hdr *rt2;
++ rt2 = (struct rt2_hdr *)(skb->nh.raw + opt->srcrt2);
++ hoa = &rt2->addr;
++
++ ipv6_addr_copy(&tmp, hoa);
++ ipv6_addr_copy(hoa, &skb->nh.ipv6h->daddr);
++ ipv6_addr_copy(&skb->nh.ipv6h->daddr, &tmp);
++ rt2->rt_hdr.segments_left++;
++ skb->nh.ipv6h->hop_limit++;
++ }
++ if (opt->hao) {
++ struct mipv6_dstopt_homeaddr *hao;
++ hao = (struct mipv6_dstopt_homeaddr *)(skb->nh.raw + opt->hao);
++ hoa = &hao->addr;
++
++ ipv6_addr_copy(&tmp, hoa);
++ ipv6_addr_copy(hoa, &skb->nh.ipv6h->saddr);
++ ipv6_addr_copy(&skb->nh.ipv6h->saddr, &tmp);
++ }
++}
++
++/**
++ * mipv6_append_rt2hdr - Add Type 2 Routing Header
++ * @rt: buffer for new routing header
++ * @addr: intermediate hop address
++ *
++ * Adds a Routing Header Type 2 in a packet. Stores newly created
++ * routing header in buffer @rt. Type 2 RT only carries one address,
++ * so there is no need to process old routing header. @rt must have
++ * allocated space for 24 bytes.
++ **/
++void mipv6_append_rt2hdr(struct ipv6_rt_hdr *rt, struct in6_addr *addr)
++{
++ struct rt2_hdr *rt2 = (struct rt2_hdr *)rt;
++
++ DEBUG(DBG_DATADUMP, "RT2: %x:%x:%x:%x:%x:%x:%x:%x",
++ NIPV6ADDR(addr));
++
++ if (ipv6_addr_type(addr) == IPV6_ADDR_MULTICAST) {
++ DEBUG(DBG_ERROR, "destination address not unicast");
++ return;
++ }
++
++ memset(rt2, 0, sizeof(*rt2));
++ rt2->rt_hdr.type = 2;
++ rt2->rt_hdr.hdrlen = 2;
++ rt2->rt_hdr.segments_left = 1;
++ ipv6_addr_copy(&rt2->addr, addr);
++}
++
++/**
++ * mipv6_append_dst1opts - Add Destination Option (1) Headers
++ * @dst1opt: buffer for new destination options
++ * @saddr: address for Home Address Option
++ * @old_dst1opt: old destination options
++ * @len: length of options
++ *
++ * Adds Destination Option (1) Header to a packet. New options are
++ * stored in @dst1opt. If old destination options exist, they are
++ * copied from @old_dst1opt. Only Home Address Option is destination
++ * option. @dstopt must have allocated space for @len bytes. @len
++ * includes Destination Option Header (2 bytes), Home Address Option
++ * (18 bytes) and possible HAO pad (8n+6).
++ **/
++/*
++ * ISSUE: Home Address Destination Option should really be added to a
++ * new destination option header specified in Mobile IPv6 spec which
++ * should be placed after routing header(s), but before fragmentation
++ * header. Putting HAO in DO1 works for now, but support for the new
++ * placement should be added to the IPv6 stack.
++ */
++void
++mipv6_append_dst1opts(struct ipv6_opt_hdr *dst1opt, struct in6_addr *saddr,
++ struct ipv6_opt_hdr *old_dst1opt, int len)
++{
++ int offset;
++
++ if (old_dst1opt) {
++ memcpy(dst1opt, old_dst1opt, ipv6_optlen(old_dst1opt));
++ offset = ipv6_optlen(old_dst1opt);
++ } else {
++ offset = sizeof (*dst1opt);
++ }
++ dst1opt->hdrlen = (len >> 3) - 1;
++ mipv6_append_home_addr((__u8 *) dst1opt, offset, saddr);
++}
++
++/**
++ * mipv6_modify_txoptions - Modify outgoing packets
++ * @sk: socket
++ * @skb: packet buffer for outgoing packet
++ * @old_opt: transmit options
++ * @fl: packet flow structure
++ * @dst: pointer to destination cache entry
++ *
++ * Adds Home Address Option (for MN packets, when not at home) and
++ * Routing Header Type 2 (for CN packets when sending to an MN) to
++ * data packets. Old extension headers are copied from @old_opt (if
++ * any). Extension headers are _explicitly_ added for packets with
++ * Mobility Header. Returns the new header structure, or old if no
++ * changes.
++ **/
++struct ipv6_txoptions *
++mipv6_modify_txoptions(struct sock *sk, struct sk_buff *skb,
++ struct ipv6_txoptions *old_opt, struct flowi *fl,
++ struct dst_entry **dst)
++{
++ struct ipv6_opt_hdr *old_hopopt = NULL;
++ struct ipv6_opt_hdr *old_dst1opt = NULL;
++ struct ipv6_rt_hdr *old_srcrt = NULL;
++
++ int srcrtlen = 0, dst1len = 0;
++ int tot_len, use_hao = 0;
++ struct ipv6_txoptions *opt;
++ struct mipv6_bce bc_entry;
++ struct in6_addr tmpaddr, *saddr, *daddr, coaddr;
++ __u8 *opt_ptr;
++
++ DEBUG_FUNC();
++
++ if (fl->proto == IPPROTO_MOBILITY) return old_opt;
++ /*
++ * we have to be prepared to the fact that saddr might not be present,
++ * if that is the case, we acquire saddr just as kernel does.
++ */
++ saddr = fl ? fl->fl6_src : NULL;
++ daddr = fl ? fl->fl6_dst : NULL;
++
++ if (daddr == NULL)
++ return old_opt;
++ if (saddr == NULL) {
++ int err = ipv6_get_saddr(NULL, daddr, &tmpaddr);
++ if (err)
++ return old_opt;
++ else
++ saddr = &tmpaddr;
++ }
++
++ DEBUG(DBG_DATADUMP,
++ "dest. address of packet: %x:%x:%x:%x:%x:%x:%x:%x",
++ NIPV6ADDR(daddr));
++ DEBUG(DBG_DATADUMP, " and src. address: %x:%x:%x:%x:%x:%x:%x:%x",
++ NIPV6ADDR(saddr));
++
++ if (old_opt) {
++ old_hopopt = old_opt->hopopt;
++ old_dst1opt = old_opt->dst1opt;
++ old_srcrt = old_opt->srcrt;
++ }
++
++ if (mip6_fn.mn_use_hao != NULL)
++ use_hao = mip6_fn.mn_use_hao(daddr, saddr);
++
++ if (use_hao) {
++ if (old_dst1opt)
++ dst1len = ipv6_optlen(old_dst1opt);
++ dst1len += sizeof(struct mipv6_dstopt_homeaddr) +
++ ((6 - dst1len) & 7); /* padding */
++ }
++
++ if (mipv6_bcache_get(daddr, saddr, &bc_entry) == 0)
++ srcrtlen = sizeof(struct rt2_hdr);
++
++ if ((tot_len = srcrtlen + dst1len) == 0) {
++ return old_opt;
++ }
++
++ tot_len += sizeof(*opt);
++
++ if (!(opt = kmalloc(tot_len, GFP_ATOMIC))) {
++ return NULL;
++ }
++ memset(opt, 0, tot_len);
++ opt->tot_len = tot_len;
++ opt_ptr = (__u8 *) (opt + 1);
++
++ if (old_srcrt) {
++ opt->srcrt = old_srcrt;
++ opt->opt_nflen += ipv6_optlen(old_srcrt);
++ }
++
++ if (srcrtlen) {
++ DEBUG(DBG_DATADUMP, "Binding exists. Adding routing header");
++
++ opt->srcrt2 = (struct ipv6_rt_hdr *) opt_ptr;
++ opt->opt_nflen += srcrtlen;
++ opt_ptr += srcrtlen;
++
++ /*
++ * Append care-of-address to routing header (original
++ * destination address is home address, the first
++ * source route segment gets put to the destination
++ * address and the home address gets to the last
++ * segment of source route (just as it should))
++ */
++
++ ipv6_addr_copy(&coaddr, &bc_entry.coa);
++
++ mipv6_append_rt2hdr(opt->srcrt2, &coaddr);
++
++ /*
++ * reroute output (we have to do this in case of TCP
++ * segment) unless a routing header of type 0 is also added
++ */
++ if (dst && !opt->srcrt) {
++ struct in6_addr *tmp = fl->fl6_dst;
++ fl->fl6_dst = &coaddr;
++
++ dst_release(*dst);
++ *dst = ip6_route_output(sk, fl);
++ if (skb)
++ skb->dst = *dst;
++ fl->fl6_dst = tmp;
++
++ DEBUG(DBG_DATADUMP, "Rerouted outgoing packet");
++ }
++ }
++
++ /* Only home address option is inserted to first dst opt header */
++ if (dst1len) {
++ opt->dst1opt = (struct ipv6_opt_hdr *) opt_ptr;
++ opt->opt_flen += dst1len;
++ opt_ptr += dst1len;
++ mipv6_append_dst1opts(opt->dst1opt, saddr,
++ old_dst1opt, dst1len);
++ opt->mipv6_flags = MIPV6_SND_HAO;
++ } else if (old_dst1opt) {
++ opt->dst1opt = old_dst1opt;
++ opt->opt_flen += ipv6_optlen(old_dst1opt);
++ }
++ if (old_hopopt) {
++ opt->hopopt = old_hopopt;
++ opt->opt_nflen += ipv6_optlen(old_hopopt);
++ }
++
++ return opt;
++}
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/exthdrs.h linux-2.4.25/net/ipv6/mobile_ip6/exthdrs.h
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/exthdrs.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/exthdrs.h 2004-06-26 11:29:30.000000000 +0100
+@@ -0,0 +1,47 @@
++/*
++ * MIPL Mobile IPv6 Extension Headers header file
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#ifndef _MIPV6_EXTHDRS_H
++#define _MIPV6_EXTHDRS_H
++
++struct in6_addr;
++struct sk_buff;
++struct ipv6_rt_hdr;
++struct ipv6_opt_hdr;
++struct ipv6_txoptions;
++struct flowi;
++struct dst_entry;
++/*
++ * Home Address Destination Option function prototypes
++ */
++int mipv6_append_home_addr(__u8 *opt, int offset, struct in6_addr *addr);
++
++int mipv6_handle_homeaddr(struct sk_buff *skb, int optoff);
++
++void mipv6_icmp_swap_addrs(struct sk_buff *skb);
++
++/*
++ * Creates a routing header of type 2.
++ */
++void mipv6_append_rt2hdr(struct ipv6_rt_hdr *srcrt, struct in6_addr *addr);
++
++/* Function to add the first destination option header, which may
++ * include a home address option.
++ */
++void mipv6_append_dst1opts(struct ipv6_opt_hdr *dst1opt, struct in6_addr *saddr,
++ struct ipv6_opt_hdr *old_dst1opt, int len);
++
++struct ipv6_txoptions *mipv6_modify_txoptions(
++ struct sock *sk, struct sk_buff *skb,
++ struct ipv6_txoptions *old_opt, struct flowi *fl,
++ struct dst_entry **dst);
++
++#endif /* _MIPV6_EXTHDRS_H */
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/ha.c linux-2.4.25/net/ipv6/mobile_ip6/ha.c
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/ha.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/ha.c 2004-06-26 11:29:30.000000000 +0100
+@@ -0,0 +1,553 @@
++/*
++ * Home-agent functionality
++ *
++ * Authors:
++ * Sami Kivisaari <skivisaa@cc.hut.fi>
++ * Henrik Petander <lpetande@cc.hut.fi>
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ *
++ * Changes: Venkata Jagana,
++ * Krishna Kumar : Statistics fix
++ * Masahide Nakamura : Use of mipv6_forward
++ *
++ */
++
++#include <linux/autoconf.h>
++#include <linux/net.h>
++#include <linux/skbuff.h>
++#include <linux/if_ether.h>
++#include <linux/netdevice.h>
++#include <linux/in6.h>
++#include <linux/init.h>
++#include <linux/netfilter.h>
++#include <linux/netfilter_ipv6.h>
++#ifdef CONFIG_SYSCTL
++#include <linux/sysctl.h>
++#endif
++
++#include <net/neighbour.h>
++#include <net/ipv6.h>
++#include <net/ip6_fib.h>
++#include <net/ip6_route.h>
++#include <net/ndisc.h>
++#include <net/addrconf.h>
++#include <net/neighbour.h>
++
++#include "tunnel_ha.h"
++#include "bcache.h"
++#include "stats.h"
++#include "debug.h"
++#include "util.h"
++#include "ha.h"
++#include "config.h"
++#include "mobhdr.h"
++
++static int mipv6_ha_tunnel_sitelocal = 0;
++
++#ifdef CONFIG_SYSCTL
++
++static struct ctl_table_header *mipv6_ha_sysctl_header;
++
++static struct mipv6_ha_sysctl_table
++{
++ struct ctl_table_header *sysctl_header;
++ ctl_table mipv6_vars[3];
++ ctl_table mipv6_mobility_table[2];
++ ctl_table mipv6_proto_table[2];
++ ctl_table mipv6_root_table[2];
++} mipv6_ha_sysctl = {
++ NULL,
++
++ {{NET_IPV6_MOBILITY_TUNNEL_SITELOCAL, "tunnel_sitelocal",
++ &mipv6_ha_tunnel_sitelocal, sizeof(int), 0644, NULL,
++ &proc_dointvec},
++ {0}},
++
++ {{NET_IPV6_MOBILITY, "mobility", NULL, 0, 0555,
++ mipv6_ha_sysctl.mipv6_vars}, {0}},
++ {{NET_IPV6, "ipv6", NULL, 0, 0555,
++ mipv6_ha_sysctl.mipv6_mobility_table}, {0}},
++ {{CTL_NET, "net", NULL, 0, 0555,
++ mipv6_ha_sysctl.mipv6_proto_table}, {0}}
++};
++
++#endif /* CONFIG_SYSCTL */
++
++
++/* this is defined in kernel IPv6 module (sockglue.c) */
++extern struct packet_type ipv6_packet_type;
++
++/* mipv6_forward: Intercept NS packets destined to home address of MN */
++int mipv6_forward(struct sk_buff *skb)
++{
++ struct ipv6hdr *ipv6h;
++ struct in6_addr *daddr, *saddr;
++ __u8 nexthdr;
++ int nhoff;
++
++ if (skb == NULL) return 0;
++
++ ipv6h = skb->nh.ipv6h;
++ daddr = &ipv6h->daddr;
++ saddr = &ipv6h->saddr;
++
++ nexthdr = ipv6h->nexthdr;
++ nhoff = sizeof(*ipv6h);
++
++ if (ipv6_ext_hdr(nexthdr))
++ nhoff = ipv6_skip_exthdr(skb, nhoff, &nexthdr,
++ skb->len - sizeof(*ipv6h));
++
++ /* Do not to forward Neighbor Solicitation to Home Address of MN */
++ if (nexthdr == IPPROTO_ICMPV6) {
++ struct icmp6hdr *icmp6h;
++ int dest_type;
++
++ if (nhoff < 0 || !pskb_may_pull(skb, nhoff +
++ sizeof(struct icmp6hdr))) {
++ kfree_skb(skb);
++ return 0;
++ }
++
++ dest_type = ipv6_addr_type(daddr);
++ icmp6h = (struct icmp6hdr *)&skb->nh.raw[nhoff];
++
++ /* Intercepts NS to HoA of MN */
++
++ if ((icmp6h->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) ||
++ ((dest_type & IPV6_ADDR_MULTICAST) &&
++ (icmp6h->icmp6_type == NDISC_ROUTER_ADVERTISEMENT))) {
++ ip6_input(skb);
++ } else {
++ ip6_forward(skb);
++ }
++ } else {
++ ip6_forward(skb);
++ }
++ return 0;
++}
++
++
++/**
++ * mipv6_proxy_nd_rem - stop acting as a proxy for @home_address
++ * @home_addr: address to remove
++ * @ha_addr: home agent's address on home link
++ * @linklocal: link-local compatibility bit
++ *
++ * When Home Agent acts as a proxy for an address it must leave the
++ * solicited node multicast group for that address and stop responding
++ * to neighbour solicitations.
++ **/
++static int mipv6_proxy_nd_rem(struct in6_addr *home_addr,
++ int ifindex, int linklocal)
++{
++ /* When MN returns home HA leaves the solicited mcast groups
++ * for MNs home addresses
++ */
++ int err;
++ struct net_device *dev;
++
++ DEBUG_FUNC();
++
++ if ((dev = dev_get_by_index(ifindex)) == NULL) {
++ DEBUG(DBG_ERROR, "couldn't get dev");
++ return -ENODEV;
++ }
++#if 1 /* TEST */
++ /* Remove link-local entry */
++ if (linklocal) {
++ struct in6_addr ll_addr;
++ mipv6_generate_ll_addr(&ll_addr, home_addr);
++ if ((err = pneigh_delete(&nd_tbl, &ll_addr, dev)) < 0) {
++ DEBUG(DBG_INFO,
++ "peigh_delete failed for "
++ "%x:%x:%x:%x:%x:%x:%x:%x",
++ NIPV6ADDR(&ll_addr));
++ }
++ }
++#endif
++ /* Remove global (or site-local) entry */
++ if ((err = pneigh_delete(&nd_tbl, home_addr, dev)) < 0) {
++ DEBUG(DBG_INFO,
++ "peigh_delete failed for "
++ "%x:%x:%x:%x:%x:%x:%x:%x",
++ NIPV6ADDR(home_addr));
++ }
++ dev_put(dev);
++ return err;
++}
++
++/**
++ * mipv6_proxy_nd - join multicast group for this address
++ * @home_addr: address to defend
++ * @ha_addr: home agent's address on home link
++ * @linklocal: link-local compatibility bit
++ *
++ * While Mobile Node is away from home, Home Agent acts as a proxy for
++ * @home_address. HA responds to neighbour solicitations for @home_address
++ * thus getting all packets destined to home address of MN.
++ **/
++static int mipv6_proxy_nd(struct in6_addr *home_addr,
++ int ifindex, int linklocal)
++{
++ /* The HA sends a proxy ndisc_na message to all hosts on MN's
++ * home subnet by sending a neighbor advertisement with the
++ * home address or all addresses of the mobile node if the
++ * prefix is not 0. The addresses are formed by combining the
++ * suffix or the host part of the address with each subnet
++ * prefix that exists in the home subnet
++ */
++
++ /* Since no previous entry for MN exists a proxy_nd advertisement
++ * is sent to all nodes link local multicast address
++ */
++ int err = -1;
++
++ struct net_device *dev;
++ struct in6_addr na_saddr;
++ struct in6_addr ll_addr;
++ struct pneigh_entry *ll_pneigh;
++ struct in6_addr mcdest;
++ int send_ll_na = 0;
++ int inc_opt = 1;
++ int solicited = 0;
++ int override = 1;
++
++ DEBUG_FUNC();
++
++ if ((dev = dev_get_by_index(ifindex)) == NULL) {
++ DEBUG(DBG_ERROR, "couldn't get dev");
++ return -ENODEV;
++ }
++
++ if (!pneigh_lookup(&nd_tbl, home_addr, dev, 1)) {
++ DEBUG(DBG_INFO,
++ "peigh_lookup failed for "
++ "%x:%x:%x:%x:%x:%x:%x:%x",
++ NIPV6ADDR(home_addr));
++ goto free_dev;
++ }
++#if 1 /* TEST */
++ if (linklocal) {
++ mipv6_generate_ll_addr(&ll_addr, home_addr);
++
++ if ((ll_pneigh = pneigh_lookup(&nd_tbl, &ll_addr,
++ dev, 1)) == NULL) {
++ DEBUG(DBG_INFO,
++ "peigh_lookup failed for "
++ "%x:%x:%x:%x:%x:%x:%x:%x",
++ NIPV6ADDR(&ll_addr));
++ pneigh_delete(&nd_tbl, home_addr, dev);
++ goto free_dev;
++ } else {
++ send_ll_na = 1;
++ }
++ } else {
++ ll_pneigh = NULL;
++ }
++#endif
++ /* Proxy neighbor advertisement of MN's home address
++ * to all nodes solicited multicast address
++ */
++ if (!ipv6_get_lladdr(dev, &na_saddr)) {
++ ipv6_addr_all_nodes(&mcdest);
++ ndisc_send_na(dev, NULL, &mcdest, home_addr, 0,
++ solicited, override, inc_opt);
++#if 1 /* TEST */
++ if (send_ll_na) {
++ ndisc_send_na(dev, NULL, &mcdest, &ll_addr,
++ 0, solicited, override, inc_opt);
++ }
++#endif
++ err = 0;
++ } else {
++ DEBUG(DBG_ERROR, "failed to get link local address for sending proxy NA");
++ }
++free_dev:
++ dev_put(dev);
++ return err;
++
++}
++
++struct inet6_ifaddr *is_on_link_ipv6_address(struct in6_addr *mn_haddr,
++ struct in6_addr *ha_addr)
++{
++ struct inet6_ifaddr *ifp;
++ struct inet6_dev *in6_dev;
++ struct inet6_ifaddr *oifp = NULL;
++
++ if ((ifp = ipv6_get_ifaddr(ha_addr, 0)) == NULL)
++ return NULL;
++
++ if ((in6_dev = ifp->idev) != NULL) {
++ in6_dev_hold(in6_dev);
++ oifp = in6_dev->addr_list;
++ while (oifp != NULL) {
++ spin_lock(&oifp->lock);
++ if (mipv6_prefix_compare(&oifp->addr, mn_haddr,
++ oifp->prefix_len) &&
++ !(oifp->flags & IFA_F_TENTATIVE)) {
++ spin_unlock(&oifp->lock);
++ DEBUG(DBG_INFO, "Home Addr Opt: on-link");
++ in6_ifa_hold(oifp);
++ break;
++ }
++ spin_unlock(&oifp->lock);
++ oifp = oifp->if_next;
++ }
++ in6_dev_put(in6_dev);
++ }
++ in6_ifa_put(ifp);
++/* DEBUG(DBG_WARNING, "Home Addr Opt NOT on-link"); */
++ return oifp;
++
++}
++
++/*
++ * Lifetime checks. ifp->valid_lft >= ifp->prefered_lft always (see addrconf.c)
++ * Returned value is in seconds.
++ */
++
++static __u32 get_min_lifetime(struct inet6_ifaddr *ifp, __u32 lifetime)
++{
++ __u32 rem_lifetime = 0;
++ unsigned long now = jiffies;
++
++ if (ifp->valid_lft == 0) {
++ rem_lifetime = lifetime;
++ } else {
++ __u32 valid_lft_left =
++ ifp->valid_lft - ((now - ifp->tstamp) / HZ);
++ rem_lifetime =
++ min_t(unsigned long, valid_lft_left, lifetime);
++ }
++
++ return rem_lifetime;
++}
++
++#define MAX_LIFETIME 1000
++
++/**
++ * mipv6_lifetime_check - check maximum lifetime is not exceeded
++ * @lifetime: lifetime to check
++ *
++ * Checks @lifetime does not exceed %MAX_LIFETIME. Returns @lifetime
++ * if not exceeded, otherwise returns %MAX_LIFETIME.
++ **/
++static int mipv6_lifetime_check(int lifetime)
++{
++ return (lifetime > MAX_LIFETIME) ? MAX_LIFETIME : lifetime;
++}
++
++/* Generic routine handling finish of BU processing */
++void mipv6_bu_finish(struct inet6_ifaddr *ifp, int ifindex, __u8 ba_status,
++ struct in6_addr *daddr, struct in6_addr *haddr,
++ struct in6_addr *coa, struct in6_addr *rep_coa,
++ __u32 ba_lifetime, __u16 sequence, __u8 flags, __u8 *k_bu)
++{
++ int err;
++
++ if (ba_status >= REASON_UNSPECIFIED) {
++ /* DAD failed */
++ goto out;
++ }
++
++ ba_lifetime = get_min_lifetime(ifp, ba_lifetime);
++ ba_lifetime = mipv6_lifetime_check(ba_lifetime);
++
++ if ((err = mipv6_bcache_add(ifindex, daddr, haddr, coa,
++ ba_lifetime, sequence, flags,
++ HOME_REGISTRATION)) != 0 ) {
++ DEBUG(DBG_WARNING, "home reg failed.");
++
++ if (err == -ENOMEDIUM)
++ return;
++
++ ba_status = INSUFFICIENT_RESOURCES;
++ } else {
++ DEBUG(DBG_INFO, "home reg succeeded.");
++ }
++
++ DEBUG(DBG_DATADUMP, "home_addr: %x:%x:%x:%x:%x:%x:%x:%x",
++ NIPV6ADDR(haddr));
++ DEBUG(DBG_DATADUMP, "coa: %x:%x:%x:%x:%x:%x:%x:%x",
++ NIPV6ADDR(coa));
++ DEBUG(DBG_DATADUMP, "lifet:%d, seq:%d", ba_lifetime, sequence);
++out:
++ mipv6_send_ba(daddr, haddr, coa, rep_coa, ba_status, sequence,
++ ba_lifetime, k_bu);
++}
++
++static int ha_proxy_create(int flags, int ifindex, struct in6_addr *coa,
++ struct in6_addr *our_addr, struct in6_addr *home_addr)
++{
++ int ret;
++
++ if ((ret = mipv6_add_tnl_to_mn(coa, our_addr, home_addr)) <= 0) {
++ if (ret != -ENOMEDIUM) {
++ DEBUG(DBG_ERROR, "unable to configure tunnel to MN!");
++ }
++ return -1;
++ }
++ if (mipv6_proxy_nd(home_addr, ifindex,
++ flags & MIPV6_BU_F_LLADDR) != 0) {
++ DEBUG(DBG_ERROR, "mipv6_proxy_nd failed!");
++ mipv6_del_tnl_to_mn(coa, our_addr, home_addr);
++ return -2;
++ }
++ return 0;
++}
++
++static void ha_proxy_del(struct in6_addr *home_addr, struct mipv6_bce *entry)
++{
++ if (mipv6_proxy_nd_rem(&entry->home_addr, entry->ifindex,
++ entry->flags & MIPV6_BU_F_LLADDR) == 0) {
++ DEBUG(DBG_INFO, "proxy_nd succ");
++ } else {
++ DEBUG(DBG_INFO, "proxy_nd fail");
++ }
++ mipv6_del_tnl_to_mn(&entry->coa, &entry->our_addr, home_addr);
++}
++
++static void bc_home_add(int ifindex,
++ struct in6_addr *daddr, struct in6_addr *haddr,
++ struct in6_addr *coa, struct in6_addr *rep_coa,
++ __u32 lifetime, __u16 sequence, __u8 flags,
++ __u8 *k_bu)
++{
++ struct inet6_ifaddr *ifp = NULL;
++ __u8 ba_status = SUCCESS;
++
++ DEBUG_FUNC();
++
++ ifp = is_on_link_ipv6_address(haddr, daddr);
++
++ if (ifp == NULL) {
++ ba_status = NOT_HOME_SUBNET;
++ } else if (((ipv6_addr_type(haddr) & IPV6_ADDR_SITELOCAL) ||
++ (ipv6_addr_type(coa) & IPV6_ADDR_SITELOCAL))
++ && !mipv6_ha_tunnel_sitelocal) {
++ /* Site-local home or care-of addresses are not
++ accepted by default */
++ ba_status = ADMINISTRATIVELY_PROHIBITED;
++ } else {
++ int ret;
++
++ ifindex = ifp->idev->dev->ifindex;
++
++ if ((ret = mipv6_dad_start(ifp, ifindex, daddr,
++ haddr, coa, rep_coa, lifetime,
++ sequence, flags)) < 0) {
++ /* An error occurred */
++ ba_status = -ret;
++ } else if (ret) {
++ /* DAD is needed to be performed. */
++ in6_ifa_put(ifp);
++ return;
++ }
++ }
++
++ mipv6_bu_finish(ifp, ifindex, ba_status, daddr, haddr, coa,
++ rep_coa, lifetime, sequence, flags, k_bu);
++ if (ifp)
++ in6_ifa_put(ifp);
++}
++
++static void bc_home_delete(struct in6_addr *daddr, struct in6_addr *haddr,
++ struct in6_addr *coa, struct in6_addr *rep_coa,
++ __u16 sequence, __u8 flags, __u8 *k_bu)
++{
++ __u8 status = SUCCESS;
++ struct mipv6_bce bce;
++
++ /* Primary Care-of Address Deregistration */
++ if (mipv6_bcache_get(haddr, daddr, &bce) < 0) {
++ DEBUG(DBG_INFO, "entry is not in cache");
++ status = NOT_HA_FOR_MN;
++ } else {
++ ha_proxy_del(&bce.home_addr, &bce);
++ mipv6_bcache_delete(haddr, daddr, HOME_REGISTRATION);
++ }
++ mipv6_send_ba(daddr, haddr, coa, rep_coa, status, sequence, 0, k_bu);
++}
++
++extern int mipv6_ra_rcv_ptr(struct sk_buff *skb, struct icmp6hdr *msg);
++
++
++static int
++mipv6_ha_tnl_xmit_stats_hook(struct ip6_tnl *t, struct sk_buff *skb)
++{
++ DEBUG_FUNC();
++ if (is_mip6_tnl(t))
++ MIPV6_INC_STATS(n_encapsulations);
++ return IP6_TNL_ACCEPT;
++}
++
++static struct ip6_tnl_hook_ops mipv6_ha_tnl_xmit_stats_ops = {
++ {NULL, NULL},
++ IP6_TNL_PRE_ENCAP,
++ IP6_TNL_PRI_LAST,
++ mipv6_ha_tnl_xmit_stats_hook
++};
++
++static int
++mipv6_ha_tnl_rcv_stats_hook(struct ip6_tnl *t, struct sk_buff *skb)
++{
++ DEBUG_FUNC();
++ if (is_mip6_tnl(t))
++ MIPV6_INC_STATS(n_decapsulations);
++ return IP6_TNL_ACCEPT;
++}
++
++static struct ip6_tnl_hook_ops mipv6_ha_tnl_rcv_stats_ops = {
++ {NULL, NULL},
++ IP6_TNL_PRE_DECAP,
++ IP6_TNL_PRI_LAST,
++ mipv6_ha_tnl_rcv_stats_hook
++};
++
++static struct mip6_func old;
++
++int __init mipv6_ha_init(void)
++{
++ DEBUG_FUNC();
++
++#ifdef CONFIG_SYSCTL
++ if (!(mipv6_ha_sysctl_header =
++ register_sysctl_table(mipv6_ha_sysctl.mipv6_root_table, 0)))
++ printk(KERN_ERR "Failed to register sysctl handlers!");
++#endif
++ memcpy(&old, &mip6_fn, sizeof(struct mip6_func));
++ mip6_fn.bce_home_add = bc_home_add;
++ mip6_fn.bce_home_del = bc_home_delete;
++ mip6_fn.proxy_del = ha_proxy_del;
++ mip6_fn.proxy_create = ha_proxy_create;
++ /* register packet interception hooks */
++ ip6ip6_tnl_register_hook(&mipv6_ha_tnl_xmit_stats_ops);
++ ip6ip6_tnl_register_hook(&mipv6_ha_tnl_rcv_stats_ops);
++ return 0;
++}
++
++void __exit mipv6_ha_exit(void)
++{
++ DEBUG_FUNC();
++
++#ifdef CONFIG_SYSCTL
++ unregister_sysctl_table(mipv6_ha_sysctl_header);
++#endif
++
++ /* remove packet interception hooks */
++ ip6ip6_tnl_unregister_hook(&mipv6_ha_tnl_rcv_stats_ops);
++ ip6ip6_tnl_unregister_hook(&mipv6_ha_tnl_xmit_stats_ops);
++
++ mip6_fn.bce_home_add = old.bce_home_add;
++ mip6_fn.bce_home_del = old.bce_home_del;
++ mip6_fn.proxy_del = old.proxy_del;
++ mip6_fn.proxy_create = old.proxy_create;
++}
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/ha.h linux-2.4.25/net/ipv6/mobile_ip6/ha.h
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/ha.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/ha.h 2004-06-26 11:29:30.000000000 +0100
+@@ -0,0 +1,39 @@
++/*
++ * MIPL Mobile IPv6 Home Agent header file
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#ifndef _HA_H
++#define _HA_H
++
++int mipv6_ha_init(void);
++void mipv6_ha_exit(void);
++
++int mipv6_dad_start(struct inet6_ifaddr *ifp, int ifindex,
++ struct in6_addr *daddr, struct in6_addr *haddr,
++ struct in6_addr *coa, struct in6_addr *rep_coa,
++ __u32 ba_lifetime, __u16 sequence, __u8 flags);
++
++void mipv6_bu_finish(struct inet6_ifaddr *ifp, int ifindex,
++ __u8 ba_status, struct in6_addr *daddr,
++ struct in6_addr *haddr, struct in6_addr *coa,
++ struct in6_addr *rep_coa, __u32 ba_lifetime,
++ __u16 sequence, __u8 flags, __u8 *k_bu);
++
++
++static __inline__ void mipv6_generate_ll_addr(struct in6_addr *ll_addr,
++ struct in6_addr *addr)
++{
++ ll_addr->s6_addr32[0] = htonl(0xfe800000);
++ ll_addr->s6_addr32[1] = 0;
++ ll_addr->s6_addr32[2] = addr->s6_addr32[2];
++ ll_addr->s6_addr32[3] = addr->s6_addr32[3];
++}
++
++#endif
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/halist.c linux-2.4.25/net/ipv6/mobile_ip6/halist.c
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/halist.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/halist.c 2004-06-26 11:29:30.000000000 +0100
+@@ -0,0 +1,507 @@
++/*
++ * Home Agents List
++ *
++ * Authors:
++ * Antti Tuominen <ajtuomin@tml.hut.fi>
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ *
++ */
++
++#define PREF_BASE 0xffff /* MAX value for u16 field in RA */
++
++#include <linux/autoconf.h>
++#include <linux/sched.h>
++#include <linux/timer.h>
++#include <linux/proc_fs.h>
++#include <linux/init.h>
++#include <net/ipv6.h>
++#include <net/addrconf.h>
++
++#include "hashlist.h"
++#include "util.h"
++#include "debug.h"
++
++struct mipv6_halist {
++ struct hashlist *entries;
++ struct timer_list expire_timer;
++};
++
++static rwlock_t home_agents_lock = RW_LOCK_UNLOCKED;
++
++static struct mipv6_halist home_agents;
++
++struct mipv6_halist_entry {
++ struct hashlist_entry e;
++ int ifindex; /* Link identifier */
++ struct in6_addr link_local_addr; /* HA's link-local address */
++ struct in6_addr global_addr; /* HA's Global address */
++ int plen;
++ long preference; /* The preference for this HA */
++ unsigned long expire; /* expiration time (jiffies) */
++};
++
++static inline void mipv6_ha_ac_add(struct in6_addr *ll_addr, int ifindex,
++ struct in6_addr *glob_addr, int plen)
++{
++ struct net_device *dev;
++
++ if ((dev = __dev_get_by_index(ifindex)) && ipv6_chk_addr(ll_addr, dev)) {
++ struct in6_addr addr;
++ mipv6_ha_anycast(&addr, glob_addr, plen);
++ ipv6_dev_ac_inc(dev, &addr);
++ }
++}
++
++static inline void mipv6_ha_ac_del(struct in6_addr *ll_addr, int ifindex,
++ struct in6_addr *glob_addr, int plen)
++{
++ struct net_device *dev;
++
++ if ((dev = __dev_get_by_index(ifindex)) && ipv6_chk_addr(ll_addr, dev)) {
++ struct in6_addr addr;
++ mipv6_ha_anycast(&addr, glob_addr, plen);
++ ipv6_dev_ac_dec(dev, &addr);
++ }
++}
++
++struct preflist_iterator_args {
++ int count;
++ int requested;
++ int ifindex;
++ struct in6_addr *list;
++};
++
++static int preflist_iterator(void *data, void *args,
++ unsigned long *pref)
++{
++ struct preflist_iterator_args *state =
++ (struct preflist_iterator_args *)args;
++ struct mipv6_halist_entry *entry =
++ (struct mipv6_halist_entry *)data;
++ struct in6_addr *newaddr =
++ (struct in6_addr *)state->list + state->count;
++
++ if (state->count >= state->requested)
++ return ITERATOR_STOP;
++
++ if (time_after(jiffies, entry->expire)) {
++ if (!ipv6_addr_any(&entry->link_local_addr)) {
++ mipv6_ha_ac_del(&entry->link_local_addr,
++ entry->ifindex,
++ &entry->global_addr, entry->plen);
++ }
++ DEBUG(DBG_INFO, "preflist_iterator: Deleting entry with address %x:%x:%x:%x:%x:%x:%x:%x to list", NIPV6ADDR(&entry->global_addr));
++ return ITERATOR_DELETE_ENTRY;
++ }
++ if (state->ifindex != entry->ifindex)
++ return ITERATOR_CONT;
++
++ ipv6_addr_copy(newaddr, &entry->global_addr);
++ DEBUG(DBG_INFO, "preflist_iterator: adding new entry with address %x:%x:%x:%x:%x:%x:%x:%x to list", NIPV6ADDR(&entry->global_addr));
++ state->count++;
++
++ return ITERATOR_CONT;
++}
++
++static int gc_iterator(void *data, void *args,
++ unsigned long *pref)
++{
++ struct mipv6_halist_entry *entry =
++ (struct mipv6_halist_entry *)data;
++
++ int *type = (int *)args;
++
++ if (*type == 1 || time_after(jiffies, entry->expire)) {
++ if (!ipv6_addr_any(&entry->link_local_addr)) {
++ mipv6_ha_ac_del(&entry->link_local_addr,
++ entry->ifindex,
++ &entry->global_addr, entry->plen);
++ }
++ return ITERATOR_DELETE_ENTRY;
++ }
++
++ return ITERATOR_CONT;
++}
++
++static int mipv6_halist_gc(int type)
++{
++ DEBUG_FUNC();
++ hashlist_iterate(home_agents.entries, &type, gc_iterator);
++ return 0;
++}
++
++static void mipv6_halist_expire(unsigned long dummy)
++{
++ DEBUG_FUNC();
++
++ write_lock(&home_agents_lock);
++ mipv6_halist_gc(0);
++ write_unlock(&home_agents_lock);
++}
++
++
++static struct mipv6_halist_entry *mipv6_halist_new_entry(void)
++{
++ struct mipv6_halist_entry *entry;
++
++ DEBUG_FUNC();
++
++ entry = hashlist_alloc(home_agents.entries, SLAB_ATOMIC);
++
++ return entry;
++}
++
++
++
++/**
++ * mipv6_halist_add - Add new home agent to the Home Agents List
++ * @ifindex: interface identifier
++ * @glob_addr: home agent's global address
++ * @ll_addr: home agent's link-local address
++ * @pref: relative preference for this home agent
++ * @lifetime: lifetime for the entry
++ *
++ * Adds new home agent to the Home Agents List. The list is interface
++ * specific and @ifindex tells through which interface the home agent
++ * was heard. Returns zero on success and negative on failure.
++ **/
++
++int mipv6_halist_add(int ifindex, struct in6_addr *glob_addr, int plen,
++ struct in6_addr *ll_addr, unsigned int pref, __u32 lifetime)
++{
++ int update = 0, ret = 0;
++ unsigned int mpref;
++ struct mipv6_halist_entry *entry = NULL;
++
++ DEBUG_FUNC();
++
++ write_lock(&home_agents_lock);
++
++ if (glob_addr == NULL || lifetime <= 0) {
++ DEBUG(DBG_WARNING, "invalid arguments");
++ ret = -EINVAL;
++ goto out;
++ }
++ mpref = PREF_BASE - pref;
++ if ((entry = (struct mipv6_halist_entry *)
++ hashlist_get(home_agents.entries, glob_addr)) != NULL) {
++ if (entry->ifindex == ifindex) {
++ DEBUG(DBG_DATADUMP, "updating old entry with address %x:%x:%x:%x:%x:%x:%x:%x", NIPV6ADDR(glob_addr));
++ update = 1;
++ } else {
++ DEBUG(DBG_INFO, "halist_add : adding new entry with address %x:%x:%x:%x:%x:%x:%x:%x", NIPV6ADDR(glob_addr));
++ update = 0;
++ }
++ }
++ if (update) {
++ entry->expire = jiffies + lifetime * HZ;
++ if (entry->preference != mpref) {
++ entry->preference = mpref;
++ ret = hashlist_reposition(home_agents.entries,
++ (void *)entry, mpref);
++ }
++ } else {
++ entry = mipv6_halist_new_entry();
++ if (entry == NULL) {
++ DEBUG(DBG_INFO, "list full");
++ ret = -ENOMEM;
++ goto out;
++ }
++ entry->ifindex = ifindex;
++ if (ll_addr) {
++ ipv6_addr_copy(&entry->link_local_addr, ll_addr);
++ mipv6_ha_ac_add(ll_addr, ifindex, glob_addr, plen);
++ } else
++ ipv6_addr_set(&entry->link_local_addr, 0, 0, 0, 0);
++
++ ipv6_addr_copy(&entry->global_addr, glob_addr);
++ entry->plen = plen;
++ entry->preference = mpref;
++ entry->expire = jiffies + lifetime * HZ;
++ ret = hashlist_add(home_agents.entries, glob_addr, mpref,
++ entry);
++ }
++out:
++ write_unlock(&home_agents_lock);
++ return ret;
++}
++
++/**
++ * mipv6_halist_delete - delete home agent from Home Agents List
++ * @glob_addr: home agent's global address
++ *
++ * Deletes entry for home agent @glob_addr from the Home Agent List.
++ **/
++int mipv6_halist_delete(struct in6_addr *glob_addr)
++{
++ struct hashlist_entry *e;
++ struct mipv6_halist_entry *entry;
++ DEBUG_FUNC();
++
++ if (glob_addr == NULL) {
++ DEBUG(DBG_WARNING, "invalid glob addr");
++ return -EINVAL;
++ }
++ write_lock(&home_agents_lock);
++ if ((e = hashlist_get(home_agents.entries, glob_addr)) == NULL) {
++ write_unlock(&home_agents_lock);
++ return -ENOENT;
++ }
++ hashlist_delete(home_agents.entries, e);
++ entry = (struct mipv6_halist_entry *)e;
++ if (!ipv6_addr_any(&entry->link_local_addr)) {
++ mipv6_ha_ac_del(&entry->link_local_addr, entry->ifindex,
++ &entry->global_addr, entry->plen);
++ }
++ hashlist_free(home_agents.entries, e);
++ write_unlock(&home_agents_lock);
++ return 0;
++}
++
++/**
++ * mipv6_ha_get_pref_list - Get list of preferred home agents
++ * @ifindex: interface identifier
++ * @addrs: pointer to a buffer to store the list
++ * @max: maximum number of home agents to return
++ *
++ * Creates a list of @max preferred (or all known if less than @max)
++ * home agents. Home Agents List is interface specific so you must
++ * supply @ifindex. Stores list in addrs and returns number of home
++ * agents stored. On failure, returns a negative value.
++ **/
++int mipv6_ha_get_pref_list(int ifindex, struct in6_addr **addrs, int max)
++{
++ struct preflist_iterator_args args;
++
++ DEBUG_FUNC();
++ if (max <= 0) {
++ *addrs = NULL;
++ return 0;
++ }
++
++ args.count = 0;
++ args.requested = max;
++ args.ifindex = ifindex;
++ args.list = kmalloc(max * sizeof(struct in6_addr), GFP_ATOMIC);
++
++ if (args.list == NULL) return -ENOMEM;
++
++ read_lock(&home_agents_lock);
++ hashlist_iterate(home_agents.entries, &args, preflist_iterator);
++ read_unlock(&home_agents_lock);
++
++ if (args.count >= 0) {
++ *addrs = args.list;
++ } else {
++ kfree(args.list);
++ *addrs = NULL;
++ }
++
++ return args.count;
++}
++
++struct getaddr_iterator_args {
++ struct net_device *dev;
++ struct in6_addr *addr;
++};
++
++static int getaddr_iterator(void *data, void *args,
++ unsigned long *pref)
++{
++ struct mipv6_halist_entry *entry =
++ (struct mipv6_halist_entry *)data;
++ struct getaddr_iterator_args *state =
++ (struct getaddr_iterator_args *)args;
++
++ if (entry->ifindex != state->dev->ifindex)
++ return ITERATOR_CONT;
++
++ if (ipv6_chk_addr(&entry->global_addr, state->dev)) {
++ ipv6_addr_copy(state->addr, &entry->global_addr);
++ return ITERATOR_STOP;
++ }
++ return ITERATOR_CONT;
++}
++
++/*
++ * Get Home Agent Address for given interface. If node is not serving
++ * as a HA for this interface returns negative error value.
++ */
++int mipv6_ha_get_addr(int ifindex, struct in6_addr *addr)
++{
++ struct getaddr_iterator_args args;
++ struct net_device *dev;
++
++ if (ifindex <= 0)
++ return -EINVAL;
++
++ if ((dev = dev_get_by_index(ifindex)) == NULL)
++ return -ENODEV;
++
++ memset(addr, 0, sizeof(struct in6_addr));
++ args.dev = dev;
++ args.addr = addr;
++ read_lock(&home_agents_lock);
++ hashlist_iterate(home_agents.entries, &args, getaddr_iterator);
++ read_unlock(&home_agents_lock);
++ dev_put(dev);
++
++ if (ipv6_addr_any(addr))
++ return -ENOENT;
++
++ return 0;
++}
++
++#define HALIST_INFO_LEN 81
++
++struct procinfo_iterator_args {
++ char *buffer;
++ int offset;
++ int length;
++ int skip;
++ int len;
++};
++
++static int procinfo_iterator(void *data, void *args,
++ unsigned long *pref)
++{
++ struct procinfo_iterator_args *arg =
++ (struct procinfo_iterator_args *)args;
++ struct mipv6_halist_entry *entry =
++ (struct mipv6_halist_entry *)data;
++ unsigned long int expire;
++
++ DEBUG_FUNC();
++
++ if (entry == NULL) return ITERATOR_ERR;
++
++ if (time_after(jiffies, entry->expire)) {
++ if (!ipv6_addr_any(&entry->link_local_addr)) {
++ mipv6_ha_ac_del(&entry->link_local_addr,
++ entry->ifindex,
++ &entry->global_addr, entry->plen);
++ }
++ return ITERATOR_DELETE_ENTRY;
++ }
++ if (arg->skip < arg->offset / HALIST_INFO_LEN) {
++ arg->skip++;
++ return ITERATOR_CONT;
++ }
++
++ if (arg->len >= arg->length)
++ return ITERATOR_CONT;
++
++ expire = (entry->expire - jiffies) / HZ;
++
++ arg->len += sprintf(arg->buffer + arg->len,
++ "%02d %08x%08x%08x%08x %08x%08x%08x%08x %05ld %05ld\n",
++ entry->ifindex,
++ ntohl(entry->global_addr.s6_addr32[0]),
++ ntohl(entry->global_addr.s6_addr32[1]),
++ ntohl(entry->global_addr.s6_addr32[2]),
++ ntohl(entry->global_addr.s6_addr32[3]),
++ ntohl(entry->link_local_addr.s6_addr32[0]),
++ ntohl(entry->link_local_addr.s6_addr32[1]),
++ ntohl(entry->link_local_addr.s6_addr32[2]),
++ ntohl(entry->link_local_addr.s6_addr32[3]),
++ -(entry->preference - PREF_BASE), expire);
++
++ return ITERATOR_CONT;
++}
++
++static int halist_proc_info(char *buffer, char **start, off_t offset,
++ int length)
++{
++ struct procinfo_iterator_args args;
++
++ DEBUG_FUNC();
++
++ args.buffer = buffer;
++ args.offset = offset;
++ args.length = length;
++ args.skip = 0;
++ args.len = 0;
++
++ read_lock_bh(&home_agents_lock);
++ hashlist_iterate(home_agents.entries, &args, procinfo_iterator);
++ read_unlock_bh(&home_agents_lock);
++
++ *start = buffer;
++ if (offset)
++ *start += offset % HALIST_INFO_LEN;
++
++ args.len -= offset % HALIST_INFO_LEN;
++
++ if (args.len > length)
++ args.len = length;
++ if (args.len < 0)
++ args.len = 0;
++
++ return args.len;
++}
++
++static int halist_compare(void *data, void *hashkey)
++{
++ struct mipv6_halist_entry *e = (struct mipv6_halist_entry *)data;
++ struct in6_addr *key = (struct in6_addr *)hashkey;
++
++ return ipv6_addr_cmp(&e->global_addr, key);
++}
++
++static __u32 halist_hash(void *hashkey)
++{
++ struct in6_addr *key = (struct in6_addr *)hashkey;
++ __u32 hash;
++
++ hash = key->s6_addr32[0] ^
++ key->s6_addr32[1] ^
++ key->s6_addr32[2] ^
++ key->s6_addr32[3];
++
++ return hash;
++}
++
++int __init mipv6_halist_init(__u32 size)
++{
++ DEBUG_FUNC();
++
++ if (size <= 0) {
++ DEBUG(DBG_ERROR, "size must be at least 1");
++ return -EINVAL;
++ }
++ init_timer(&home_agents.expire_timer);
++ home_agents.expire_timer.data = 0;
++ home_agents.expire_timer.function = mipv6_halist_expire;
++ home_agents_lock = RW_LOCK_UNLOCKED;
++
++ home_agents.entries = hashlist_create(16, size, sizeof(struct mipv6_halist_entry),
++ "mip6_halist", NULL, NULL,
++ halist_compare, halist_hash);
++
++ if (home_agents.entries == NULL) {
++ DEBUG(DBG_ERROR, "Failed to initialize hashlist");
++ return -ENOMEM;
++ }
++
++ proc_net_create("mip6_home_agents", 0, halist_proc_info);
++ DEBUG(DBG_INFO, "Home Agents List initialized");
++ return 0;
++}
++
++void __exit mipv6_halist_exit(void)
++{
++ DEBUG_FUNC();
++ proc_net_remove("mip6_home_agents");
++ write_lock_bh(&home_agents_lock);
++ DEBUG(DBG_INFO, "Stopping the halist timer");
++ del_timer(&home_agents.expire_timer);
++ mipv6_halist_gc(1);
++ write_unlock_bh(&home_agents_lock);
++ hashlist_destroy(home_agents.entries);
++}
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/halist.h linux-2.4.25/net/ipv6/mobile_ip6/halist.h
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/halist.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/halist.h 2004-06-26 11:29:30.000000000 +0100
+@@ -0,0 +1,28 @@
++/*
++ * MIPL Mobile IPv6 Home Agents List header file
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#ifndef _HALIST_H
++#define _HALIST_H
++
++int mipv6_halist_init(__u32 size);
++
++void mipv6_halist_exit(void);
++
++int mipv6_halist_add(int ifindex, struct in6_addr *glob_addr, int plen,
++ struct in6_addr *ll_addr, unsigned int pref, __u32 lifetime);
++
++int mipv6_halist_delete(struct in6_addr *glob_addr);
++
++int mipv6_ha_get_pref_list(int ifindex, struct in6_addr **addrs, int max);
++
++int mipv6_ha_get_addr(int ifindex, struct in6_addr *addr);
++
++#endif /* _HALIST_H */
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/hashlist.c linux-2.4.25/net/ipv6/mobile_ip6/hashlist.c
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/hashlist.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/hashlist.c 2004-06-26 11:29:31.000000000 +0100
+@@ -0,0 +1,351 @@
++/*
++ * Generic hashtable with chaining. Supports secodary sort order
++ * with doubly linked-list.
++ *
++ * Authors:
++ * Sami Kivisaari <skivisaa@cc.hut.fi>
++ * Antti Tuominen <ajtuomin@tml.hut.fi>
++ *
++ * $Id: s.hashlist.c 1.21 02/10/07 19:31:52+03:00 antti@traci.mipl.mediapoli.com $
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of
++ * the License, or (at your option) any later version.
++ */
++
++#include <linux/slab.h>
++#include "hashlist.h"
++#include "debug.h"
++
++struct hashlist {
++ int count; /* entry count */
++ int maxcount; /* max entries */
++ __u32 bucketnum; /* hash buckets */
++
++ kmem_cache_t *kmem;
++
++ struct list_head *hashtable;
++ struct list_head sortedlist;
++
++ int (*compare)(void *data, void *hashkey);
++ __u32 (*hash_function)(void *hashkey);
++};
++
++/**
++ * hashlist_create - Create new hashlist
++ * @bucketnum: number of hash buckets
++ * @maxentries: maximum number of entries (0 = no limit)
++ * @size: entry size in bytes
++ * @name: name for kmem_cache_t
++ * @ctor: kmem_cache_t constructor
++ * @dtor: kmem_cache_t destructor
++ * @compare: compare function for key
++ * @hash_function: hash function
++ *
++ * Creates a hashlist structure with @max_entries entries of @size
++ * bytes. User must supply @hash_function and @compare function for
++ * the hashlist. User can also supply @ctor and @dtor for kmem_cache.
++ **/
++struct hashlist *hashlist_create(int bucketnum, int max_entries, size_t size,
++ char *name,
++ void (*ctor)(void *, kmem_cache_t *, unsigned long),
++ void (*dtor)(void *, kmem_cache_t *, unsigned long),
++ int (*compare)(void *data, void *hashkey),
++ __u32 (*hash_function)(void *hashkey))
++{
++ int i;
++ struct hashlist *hl;
++
++ if (!compare || !hash_function)
++ goto hlfailed;
++
++ hl = kmalloc(sizeof(struct hashlist), GFP_ATOMIC);
++ if (!hl) goto hlfailed;
++
++ hl->kmem = kmem_cache_create(name, size, 0, 0, ctor, dtor);
++ if (!hl->kmem) goto poolfailed;
++
++ hl->hashtable = kmalloc(
++ sizeof(struct list_head) * bucketnum, GFP_ATOMIC);
++ if (!hl->hashtable) goto hashfailed;
++
++ for (i = 0; i < bucketnum; i++)
++ INIT_LIST_HEAD(&hl->hashtable[i]);
++
++ INIT_LIST_HEAD(&hl->sortedlist);
++
++ hl->maxcount = max_entries;
++ hl->count = 0;
++ hl->bucketnum = bucketnum;
++ hl->compare = compare;
++ hl->hash_function = hash_function;
++
++ return hl;
++
++hashfailed:
++ kmem_cache_destroy(hl->kmem);
++ hl->kmem = NULL;
++
++poolfailed:
++ kfree(hl);
++
++hlfailed:
++ DEBUG(DBG_ERROR, "could not create hashlist");
++
++ return NULL;
++}
++
++/**
++ * hashlist_destroy - Destroy hashlist
++ * @hashlist: hashlist to destroy
++ *
++ * Frees all memory allocated for a hashlist.
++ **/
++void hashlist_destroy(struct hashlist *hashlist)
++{
++ DEBUG_FUNC();
++
++ if (hashlist == NULL) return;
++
++ if (hashlist->hashtable) {
++ kfree(hashlist->hashtable);
++ hashlist->hashtable = NULL;
++ }
++
++ if (hashlist->kmem) {
++ kmem_cache_destroy(hashlist->kmem);
++ hashlist->kmem = NULL;
++ }
++
++ kfree(hashlist);
++
++ return;
++}
++
++/*
++ * Insert a chain of entries to hashlist into correct order. The
++ * entries are assumed to have valid hashkeys. We use time_after_eq
++ * for comparing, since it handles wrap-around correctly, and the
++ * sortkey is usually jiffies.
++ */
++static void sorted_insert(struct list_head *lh, struct hashlist_entry *he)
++{
++ struct list_head *p;
++ struct hashlist_entry *hlp = NULL;
++ unsigned long sortkey = he->sortkey;
++
++ if (list_empty(lh)) {
++ list_add(&he->sorted, lh);
++ return;
++ }
++
++ list_for_each(p, lh) {
++ hlp = list_entry(p, typeof(*hlp), sorted);
++ if (time_after_eq(hlp->sortkey, sortkey)) {
++ list_add(&he->sorted, hlp->sorted.prev);
++ return;
++ }
++ }
++ list_add(&he->sorted, &hlp->sorted);
++}
++
++/**
++ * hashlist_iterate - Apply function for all elements in a hash list
++ * @hashlist: pointer to hashlist
++ * @args: data to pass to the function
++ * @func: pointer to a function
++ *
++ * Apply arbitrary function @func to all elements in a hash list.
++ * @func must be a pointer to a function with the following prototype:
++ * int func(void *entry, void *arg, struct in6_addr *hashkey, unsigned
++ * long *sortkey). Function must return %ITERATOR_STOP,
++ * %ITERATOR_CONT or %ITERATOR_DELETE_ENTRY. %ITERATOR_STOP stops
++ * iterator and returns last return value from the function.
++ * %ITERATOR_CONT continues with iteration. %ITERATOR_DELETE_ENTRY
++ * deletes current entry from the hashlist. If function changes
++ * hashlist element's sortkey, iterator automatically schedules
++ * element to be reinserted after all elements have been processed.
++ */
++int hashlist_iterate(
++ struct hashlist *hashlist, void *args,
++ hashlist_iterator_t func)
++{
++ int res = ITERATOR_CONT;
++ unsigned long skey;
++ struct list_head *p, *n, repos;
++ struct hashlist_entry *he;
++
++ DEBUG_FUNC();
++ INIT_LIST_HEAD(&repos);
++
++ list_for_each_safe(p, n, &hashlist->sortedlist) {
++ he = list_entry(p, typeof(*he), sorted);
++ if (res == ITERATOR_STOP)
++ break;
++ skey = he->sortkey;
++ res = func(he, args, &he->sortkey);
++ if (res == ITERATOR_DELETE_ENTRY) {
++ hashlist_delete(hashlist, he);
++ hashlist_free(hashlist, he);
++ } else if (skey != he->sortkey) {
++ /* iterator changed the sortkey, schedule for
++ * repositioning */
++ list_move(&he->sorted, &repos);
++ }
++ }
++ list_for_each_safe(p, n, &repos) {
++ he = list_entry(p, typeof(*he), sorted);
++ sorted_insert(&hashlist->sortedlist, he);
++ }
++ return res;
++}
++
++/**
++ * hashlist_alloc - Allocate memory for a hashlist entry
++ * @hashlist: hashlist for allocated entry
++ * @size: size of entry in bytes
++ *
++ * Allocates @size bytes memory from @hashlist->kmem.
++ **/
++void *hashlist_alloc(struct hashlist *hashlist, int type)
++{
++ if (hashlist == NULL) return NULL;
++ return kmem_cache_alloc(hashlist->kmem, type);
++}
++
++/**
++ * hashlist_free - Free hashlist entry
++ * @hashlist: hashlist where @he is
++ * @he: entry to free
++ *
++ * Frees an allocated hashlist entry.
++ **/
++void hashlist_free(struct hashlist *hashlist, struct hashlist_entry *he)
++{
++ kmem_cache_free(hashlist->kmem, he);
++}
++
++/**
++ * hashlist_add - Add element to hashlist
++ * @hashlist: pointer to hashlist
++ * @hashkey: hashkey for the element
++ * @sortkey: key for sorting
++ * @data: element data
++ *
++ * Add element to hashlist. Hashlist is also sorted in a linked list
++ * by @sortkey.
++ */
++int hashlist_add(struct hashlist *hashlist, void *hashkey,
++ unsigned long sortkey, void *entry)
++{
++ struct hashlist_entry *he = (struct hashlist_entry *)entry;
++ unsigned int hash;
++
++ if (hashlist->count >= hashlist->maxcount)
++ return -1;
++
++ hashlist->count++;
++
++ /* link the entry to sorted order */
++ he->sortkey = sortkey;
++ sorted_insert(&hashlist->sortedlist, he);
++
++ /* hash the entry */
++ hash = hashlist->hash_function(hashkey) % hashlist->bucketnum;
++ list_add(&he->hashlist, &hashlist->hashtable[hash]);
++
++ return 0;
++}
++
++/**
++ * hashlist_get_ex - Get element from hashlist
++ * @hashlist: hashlist
++ * @hashkey: hashkey of the desired entry
++ *
++ * Lookup entry with @hashkey from the hash table using @compare
++ * function for entry comparison. Returns entry on success, otherwise
++ * %NULL.
++ **/
++struct hashlist_entry *hashlist_get_ex(
++ struct hashlist *hashlist, void *hashkey,
++ int (*compare)(void *data, void *hashkey))
++{
++ struct list_head *p, *bkt;
++ __u32 hash;
++
++ hash = hashlist->hash_function(hashkey) % hashlist->bucketnum;
++ bkt = &hashlist->hashtable[hash];
++
++ /* scan the entries within the same hashbucket */
++ list_for_each(p, bkt) {
++ struct hashlist_entry *he = list_entry(p, typeof(*he),
++ hashlist);
++ if (compare(he, hashkey) == 0)
++ return he;
++ }
++
++ return NULL;
++}
++
++/**
++ * hashlist_get - Get element from hashlist
++ * @hashlist: hashlist
++ * @hashkey: hashkey of the desired entry
++ *
++ * Lookup entry with @hashkey from the hash table. Returns entry on
++ * success, otherwise %NULL.
++ **/
++struct hashlist_entry *hashlist_get(struct hashlist *hashlist, void *hashkey)
++{
++ return hashlist_get_ex(hashlist, hashkey, hashlist->compare);
++}
++
++/**
++ * hashlist_reposition - set entry to new position in the list
++ * @hashlist: hashlist
++ * @he: entry to reposition
++ * @sortkey: new sortkey of the entry
++ *
++ * If secondary order sortkey changes, entry must be repositioned in
++ * the sorted list.
++ **/
++int hashlist_reposition(struct hashlist *hashlist, struct hashlist_entry *he,
++ unsigned long sortkey)
++{
++ list_del(&he->sorted);
++ he->sortkey = sortkey;
++ sorted_insert(&hashlist->sortedlist, he);
++
++ return 0;
++}
++
++/**
++ * hashlist_delete - Delete entry from hashlist
++ * @hashlist: hashlist where entry is
++ * @he: entry to delete
++ *
++ * Deletes an entry from the hashlist and sorted list.
++ **/
++void hashlist_delete(struct hashlist *hashlist,
++ struct hashlist_entry *he)
++{
++ list_del_init(&he->hashlist);
++ list_del_init(&he->sorted);
++
++ hashlist->count--;
++}
++
++/**
++ * hashlist_get_first - Get first item from sorted list
++ * @hashlist: pointer to hashlist
++ *
++ * Returns first item in the secondary sort order.
++ **/
++void * hashlist_get_first(struct hashlist *hashlist)
++{
++ if (list_empty(&hashlist->sortedlist))
++ return NULL;
++
++ return list_entry(hashlist->sortedlist.next, struct hashlist_entry, sorted);
++}
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/hashlist.h linux-2.4.25/net/ipv6/mobile_ip6/hashlist.h
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/hashlist.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/hashlist.h 2004-06-26 11:29:31.000000000 +0100
+@@ -0,0 +1,63 @@
++/*
++ * MIPL Mobile IPv6 Hashlist header file
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#ifndef _HASHLIST_H
++#define _HASHLIST_H
++
++#define ITERATOR_ERR -1
++#define ITERATOR_CONT 0
++#define ITERATOR_STOP 1
++#define ITERATOR_DELETE_ENTRY 2
++
++struct kmem_cache_t;
++
++struct hashlist_entry {
++ unsigned long sortkey;
++ struct list_head sorted;
++ struct list_head hashlist;
++};
++
++struct hashlist * hashlist_create(
++ int bucketnum, int max_entries, size_t size, char *name,
++ void (*ctor)(void *, kmem_cache_t *, unsigned long),
++ void (*dtor)(void *, kmem_cache_t *, unsigned long),
++ int (*compare)(void *data, void *hashkey),
++ __u32 (*hash_function)(void *hashkey));
++
++void hashlist_destroy(struct hashlist *hashlist);
++
++void *hashlist_alloc(struct hashlist *hashlist, int type);
++
++void hashlist_free(struct hashlist *hashlist, struct hashlist_entry *he);
++
++struct hashlist_entry *hashlist_get(struct hashlist *hashlist, void *hashkey);
++
++struct hashlist_entry *hashlist_get_ex(
++ struct hashlist *hashlist, void *hashkey,
++ int (*compare)(void *data, void *hashkey));
++
++int hashlist_add(struct hashlist *hashlist, void *hashkey,
++ unsigned long sortkey, void *data);
++
++void hashlist_delete(struct hashlist *hashlist, struct hashlist_entry *he);
++
++/* iterator function */
++typedef int (*hashlist_iterator_t)(void *, void *, unsigned long *);
++
++int hashlist_iterate(struct hashlist *hashlist, void *args,
++ hashlist_iterator_t func);
++
++void * hashlist_get_first(struct hashlist *hashlist);
++
++int hashlist_reposition(struct hashlist *hashlist, struct hashlist_entry *he,
++ unsigned long sortkey);
++
++#endif
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/hmac.c linux-2.4.25/net/ipv6/mobile_ip6/hmac.c
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/hmac.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/hmac.c 2004-06-26 11:29:31.000000000 +0100
+@@ -0,0 +1,658 @@
++/* Authentication algorithms
++ *
++ * Authors:
++ * Alexis Olivereau <Alexis.Olivereau@crm.mot.com>
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ *
++ * Changes:
++ * Henrik Petander : Cleaned up unused parts
++ *
++ */
++
++#include <linux/sched.h>
++#include <linux/tty.h>
++#include <linux/types.h>
++#include <linux/slab.h>
++#include <linux/in6.h>
++
++#include "hmac.h"
++#define LROLL(x, s) (((x) << (s)) | ((x) >> (32 - (s))))
++
++/* MD5 */
++#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
++#define G(x, y, z) ((y) ^ ((z) & ((x) ^ (y))))
++#define H(x, y, z) ((x) ^ (y) ^ (z))
++#define I(x, y, z) ((y) ^ ((x) | ~(z)))
++
++#define FF(a, b, c, d, m, s, t) { \
++ (a) += F ((b), (c), (d)) + (m) + (t); \
++ (a) = LROLL((a), (s)); \
++ (a) += (b); \
++ }
++#define GG(a, b, c, d, m, s, t) { \
++ (a) += G ((b), (c), (d)) + (m) + (t); \
++ (a) = LROLL((a), (s)); \
++ (a) += (b); \
++ }
++#define HH(a, b, c, d, m, s, t) { \
++ (a) += H ((b), (c), (d)) + (m) + (t); \
++ (a) = LROLL((a), (s)); \
++ (a) += (b); \
++ }
++#define II(a, b, c, d, m, s, t) { \
++ (a) += I ((b), (c), (d)) + (m) + (t); \
++ (a) = LROLL((a), (s)); \
++ (a) += (b); \
++ }
++
++#define s11 7
++#define s12 12
++#define s13 17
++#define s14 22
++#define s21 5
++#define s22 9
++#define s23 14
++#define s24 20
++#define s31 4
++#define s32 11
++#define s33 16
++#define s34 23
++#define s41 6
++#define s42 10
++#define s43 15
++#define s44 21
++
++/* SHA-1 */
++#define f(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
++#define g(x, y, z) (((x) & (y)) | ((x) & (z)) | ((y) & (z)))
++#define h(x, y, z) ((x) ^ (y) ^ (z))
++
++#define K1 0x5a827999
++#define K2 0x6ed9eba1
++#define K3 0x8f1bbcdc
++#define K4 0xca62c1d6
++
++int ah_hmac_md5_init(struct ah_processing *ahp, u_int8_t *key, u_int32_t key_len)
++{
++ int i;
++ int key_up4;
++ uint32_t ipad = 0x36363636;
++ uint8_t extkey[64];
++
++ ahp->key_auth = key;
++ ahp->key_auth_len = key_len;
++ ahp->context = (void *) kmalloc(sizeof(MD5_CTX), GFP_ATOMIC);
++ if (ahp->context == NULL)
++ return -1;
++ md5_init((MD5_CTX *) ahp->context);
++ if ((64 * sizeof(uint8_t)) < ahp->key_auth_len) {
++ printk("buffer overflow!");
++ return -1;
++ }
++ memcpy(extkey, ahp->key_auth, ahp->key_auth_len);
++ if (ahp->key_auth_len % 4) {
++ memset(extkey + ahp->key_auth_len, 0,
++ 4 - (ahp->key_auth_len % 4));
++ }
++ key_up4 = ((ahp->key_auth_len + 0x3) & 0xFFFFFFFC) / 4;
++
++ for (i = 0; i < key_up4; i++)
++ ((uint32_t *) extkey)[i] = ((uint32_t *) extkey)[i] ^ ipad;
++ for (i = key_up4; i < 16; i++)
++ ((uint32_t *) extkey)[i] = ipad;
++
++ md5_compute((MD5_CTX *) ahp->context, extkey, 64);
++ return 0;
++}
++
++void ah_hmac_md5_loop(struct ah_processing *ahp, void *str, uint32_t len)
++{
++ md5_compute((MD5_CTX *) ahp->context, str, len);
++}
++
++void ah_hmac_md5_result(struct ah_processing *ahp, char *digest)
++{
++ uint8_t inner[HMAC_MD5_HASH_LEN];
++ int i;
++ int key_up4;
++ uint32_t opad = 0x5c5c5c5c;
++ uint8_t extkey[64];
++
++ md5_final((MD5_CTX *) ahp->context, inner);
++ md5_init((MD5_CTX *) ahp->context);
++
++ memcpy(extkey, ahp->key_auth, ahp->key_auth_len);
++ if (ahp->key_auth_len % 4) {
++ memset(extkey + ahp->key_auth_len, 0,
++ 4 - (ahp->key_auth_len % 4));
++ }
++ key_up4 = ((ahp->key_auth_len + 0x3) & 0xFFFFFFFC) / 4;
++
++ for (i = 0; i < key_up4; i++)
++ ((uint32_t *) extkey)[i] = ((uint32_t *) extkey)[i] ^ opad;
++ for (i = key_up4; i < 16; i++)
++ ((uint32_t *) extkey)[i] = opad;
++
++ md5_compute((MD5_CTX *) ahp->context, extkey, 64);
++ md5_compute((MD5_CTX *) ahp->context, inner, HMAC_MD5_HASH_LEN);
++
++ md5_final((MD5_CTX *) ahp->context, digest);
++
++ kfree(ahp->context);
++}
++
++int ah_hmac_sha1_init(struct ah_processing *ahp, u_int8_t *key, u_int32_t key_len)
++{
++ int i;
++ int key_up4;
++ uint32_t ipad = 0x36363636;
++ uint8_t extkey[64];
++
++ ahp->key_auth = key;
++ ahp->key_auth_len = key_len;
++
++ ahp->context = (void *) kmalloc(sizeof(SHA1_CTX), GFP_ATOMIC);
++ //if (ahp->context == NULL)
++ // return -1;
++
++ sha1_init((SHA1_CTX *) ahp->context);
++
++ memcpy(extkey, ahp->key_auth, ahp->key_auth_len);
++ if (ahp->key_auth_len % 4) {
++ memset(extkey + ahp->key_auth_len, 0,
++ 4 - (ahp->key_auth_len % 4));
++ }
++ key_up4 = ((ahp->key_auth_len + 0x3) & 0xFFFFFFFC) / 4;
++
++ for (i = 0; i < key_up4; i++)
++ ((uint32_t *) extkey)[i] = ((uint32_t *) extkey)[i] ^ ipad;
++ for (i = key_up4; i < 16; i++)
++ ((uint32_t *) extkey)[i] = ipad;
++
++ sha1_compute((SHA1_CTX *) ahp->context, extkey, 64);
++ return 0;
++}
++
++void ah_hmac_sha1_loop(struct ah_processing *ahp, void *str, uint32_t len)
++{
++ if (!ahp)
++ return;
++ sha1_compute((SHA1_CTX *) ahp->context, str, len);
++}
++
++void ah_hmac_sha1_result(struct ah_processing *ahp, char *digest)
++{
++ uint8_t inner[HMAC_SHA1_HASH_LEN];
++ int i;
++ int key_up4;
++ uint32_t opad = 0x5c5c5c5c;
++ uint8_t extkey[64];
++
++ if (!ahp)
++ return;
++ sha1_final((SHA1_CTX *) ahp->context, inner);
++ sha1_init((SHA1_CTX *) ahp->context);
++
++ memcpy(extkey, ahp->key_auth, ahp->key_auth_len);
++ if (ahp->key_auth_len % 4) {
++ memset(extkey + ahp->key_auth_len, 0,
++ 4 - (ahp->key_auth_len % 4));
++ }
++ key_up4 = ((ahp->key_auth_len + 0x3) & 0xFFFFFFFC) / 4;
++
++ for (i = 0; i < key_up4; i++)
++ ((uint32_t *) extkey)[i] = ((uint32_t *) extkey)[i] ^ opad;
++ for (i = key_up4; i < 16; i++)
++ ((uint32_t *) extkey)[i] = opad;
++
++ sha1_compute((SHA1_CTX *) ahp->context, extkey, 64);
++ sha1_compute((SHA1_CTX *) ahp->context, inner,
++ HMAC_SHA1_HASH_LEN);
++
++ sha1_final((SHA1_CTX *) ahp->context, digest);
++
++ kfree(ahp->context);
++}
++
++void md5_init(MD5_CTX * ctx)
++{
++ ctx->A = 0x67452301;
++ ctx->B = 0xefcdab89;
++ ctx->C = 0x98badcfe;
++ ctx->D = 0x10325476;
++ ctx->buf_cur = ctx->buf;
++ ctx->bitlen[0] = ctx->bitlen[1] = 0;
++ memset(ctx->buf, 0, 64);
++}
++
++void md5_over_block(MD5_CTX * ctx, uint8_t * data)
++{
++ uint32_t M[16];
++ uint32_t a = ctx->A;
++ uint32_t b = ctx->B;
++ uint32_t c = ctx->C;
++ uint32_t d = ctx->D;
++
++ create_M_blocks(M, data);
++
++ /* Round 1 */
++ FF(a, b, c, d, M[0], s11, 0xd76aa478); /* 1 */
++ FF(d, a, b, c, M[1], s12, 0xe8c7b756); /* 2 */
++ FF(c, d, a, b, M[2], s13, 0x242070db); /* 3 */
++ FF(b, c, d, a, M[3], s14, 0xc1bdceee); /* 4 */
++ FF(a, b, c, d, M[4], s11, 0xf57c0faf); /* 5 */
++ FF(d, a, b, c, M[5], s12, 0x4787c62a); /* 6 */
++ FF(c, d, a, b, M[6], s13, 0xa8304613); /* 7 */
++ FF(b, c, d, a, M[7], s14, 0xfd469501); /* 8 */
++ FF(a, b, c, d, M[8], s11, 0x698098d8); /* 9 */
++ FF(d, a, b, c, M[9], s12, 0x8b44f7af); /* 10 */
++ FF(c, d, a, b, M[10], s13, 0xffff5bb1); /* 11 */
++ FF(b, c, d, a, M[11], s14, 0x895cd7be); /* 12 */
++ FF(a, b, c, d, M[12], s11, 0x6b901122); /* 13 */
++ FF(d, a, b, c, M[13], s12, 0xfd987193); /* 14 */
++ FF(c, d, a, b, M[14], s13, 0xa679438e); /* 15 */
++ FF(b, c, d, a, M[15], s14, 0x49b40821); /* 16 */
++
++ /* Round 2 */
++ GG(a, b, c, d, M[1], s21, 0xf61e2562); /* 17 */
++ GG(d, a, b, c, M[6], s22, 0xc040b340); /* 18 */
++ GG(c, d, a, b, M[11], s23, 0x265e5a51); /* 19 */
++ GG(b, c, d, a, M[0], s24, 0xe9b6c7aa); /* 20 */
++ GG(a, b, c, d, M[5], s21, 0xd62f105d); /* 21 */
++ GG(d, a, b, c, M[10], s22, 0x02441453); /* 22 */
++ GG(c, d, a, b, M[15], s23, 0xd8a1e681); /* 23 */
++ GG(b, c, d, a, M[4], s24, 0xe7d3fbc8); /* 24 */
++ GG(a, b, c, d, M[9], s21, 0x21e1cde6); /* 25 */
++ GG(d, a, b, c, M[14], s22, 0xc33707d6); /* 26 */
++ GG(c, d, a, b, M[3], s23, 0xf4d50d87); /* 27 */
++ GG(b, c, d, a, M[8], s24, 0x455a14ed); /* 28 */
++ GG(a, b, c, d, M[13], s21, 0xa9e3e905); /* 29 */
++ GG(d, a, b, c, M[2], s22, 0xfcefa3f8); /* 30 */
++ GG(c, d, a, b, M[7], s23, 0x676f02d9); /* 31 */
++ GG(b, c, d, a, M[12], s24, 0x8d2a4c8a); /* 32 */
++
++ /* Round 3 */
++ HH(a, b, c, d, M[5], s31, 0xfffa3942); /* 33 */
++ HH(d, a, b, c, M[8], s32, 0x8771f681); /* 34 */
++ HH(c, d, a, b, M[11], s33, 0x6d9d6122); /* 35 */
++ HH(b, c, d, a, M[14], s34, 0xfde5380c); /* 36 */
++ HH(a, b, c, d, M[1], s31, 0xa4beea44); /* 37 */
++ HH(d, a, b, c, M[4], s32, 0x4bdecfa9); /* 38 */
++ HH(c, d, a, b, M[7], s33, 0xf6bb4b60); /* 39 */
++ HH(b, c, d, a, M[10], s34, 0xbebfbc70); /* 40 */
++ HH(a, b, c, d, M[13], s31, 0x289b7ec6); /* 41 */
++ HH(d, a, b, c, M[0], s32, 0xeaa127fa); /* 42 */
++ HH(c, d, a, b, M[3], s33, 0xd4ef3085); /* 43 */
++ HH(b, c, d, a, M[6], s34, 0x4881d05); /* 44 */
++ HH(a, b, c, d, M[9], s31, 0xd9d4d039); /* 45 */
++ HH(d, a, b, c, M[12], s32, 0xe6db99e5); /* 46 */
++ HH(c, d, a, b, M[15], s33, 0x1fa27cf8); /* 47 */
++ HH(b, c, d, a, M[2], s34, 0xc4ac5665); /* 48 */
++
++ /* Round 4 */
++ II(a, b, c, d, M[0], s41, 0xf4292244); /* 49 */
++ II(d, a, b, c, M[7], s42, 0x432aff97); /* 50 */
++ II(c, d, a, b, M[14], s43, 0xab9423a7); /* 51 */
++ II(b, c, d, a, M[5], s44, 0xfc93a039); /* 52 */
++ II(a, b, c, d, M[12], s41, 0x655b59c3); /* 53 */
++ II(d, a, b, c, M[3], s42, 0x8f0ccc92); /* 54 */
++ II(c, d, a, b, M[10], s43, 0xffeff47d); /* 55 */
++ II(b, c, d, a, M[1], s44, 0x85845dd1); /* 56 */
++ II(a, b, c, d, M[8], s41, 0x6fa87e4f); /* 57 */
++ II(d, a, b, c, M[15], s42, 0xfe2ce6e0); /* 58 */
++ II(c, d, a, b, M[6], s43, 0xa3014314); /* 59 */
++ II(b, c, d, a, M[13], s44, 0x4e0811a1); /* 60 */
++ II(a, b, c, d, M[4], s41, 0xf7537e82); /* 61 */
++ II(d, a, b, c, M[11], s42, 0xbd3af235); /* 62 */
++ II(c, d, a, b, M[2], s43, 0x2ad7d2bb); /* 63 */
++ II(b, c, d, a, M[9], s44, 0xeb86d391); /* 64 */
++
++ ctx->A += a;
++ ctx->B += b;
++ ctx->C += c;
++ ctx->D += d;
++}
++
++void create_M_blocks(uint32_t * M, uint8_t * data)
++{
++#ifdef HAVE_LITTLE_ENDIAN
++ memcpy((uint8_t *) M, data, 64);
++#endif /* HAVE_LITTLE_ENDIAN */
++
++#ifdef HAVE_BIG_ENDIAN
++ int i;
++ for (i = 0; i < 16; i++, data += 4) {
++ ((uint8_t *) (&M[i]))[0] = data[3];
++ ((uint8_t *) (&M[i]))[1] = data[2];
++ ((uint8_t *) (&M[i]))[2] = data[1];
++ ((uint8_t *) (&M[i]))[3] = data[0];
++ }
++#endif /* HAVE_BIG_ENDIAN */
++}
++
++void md5_compute(MD5_CTX * ctx, uint8_t * data, uint32_t len)
++{
++ uint8_t pos = ((ctx->bitlen[0] >> 3) & 0x3f);
++
++ /* First we update the bit length */
++ if ((ctx->bitlen[0] += (len << 3)) < (len << 3))
++ ctx->bitlen[1]++;
++ ctx->bitlen[1] += (len >> 29); /* len is expressed in bytes */
++
++ if (pos) {
++ /* Buffer is not empty */
++ if (64 - pos >= len) {
++ memcpy(ctx->buf_cur, data, len);
++ ctx->buf_cur += len;
++ pos += len;
++ if (pos == 64) {
++ /* The current block is over */
++ md5_over_block(ctx, ctx->buf);
++ ctx->buf_cur = ctx->buf;
++ }
++ return;
++ } else {
++ memcpy(ctx->buf_cur, data, 64 - pos);
++ md5_over_block(ctx, ctx->buf);
++ len -= (64 - pos);
++ data += (64 - pos);
++ ctx->buf_cur = ctx->buf;
++ }
++ }
++ while (len >= 64) {
++ md5_over_block(ctx, data);
++ len -= 64;
++ data += 64;
++ }
++ if (len) {
++ memcpy(ctx->buf_cur, data, len);
++ ctx->buf_cur += len;
++ }
++}
++
++void md5_final(MD5_CTX * ctx, uint8_t * digest)
++{
++ uint32_t rem_size;
++ uint8_t *buf_cur = ctx->buf_cur;
++ int i;
++
++ rem_size = 64 - ((ctx->bitlen[0] >> 3) & 0x3f);
++ *(buf_cur++) = 0x80;
++
++ if (rem_size > 8 + 1) {
++ /* We have enough room in the current block */
++ for (i = 0; i < rem_size - 8 - 1; i++) {
++ *(buf_cur++) = 0;
++ }
++ } else {
++ /* We do not have enough room and need therefore to add a new
++ 64-byte block */
++ for (i = 0; i < rem_size - 1; i++) {
++ *(buf_cur++) = 0;
++ }
++ md5_over_block(ctx, ctx->buf);
++
++ buf_cur = ctx->buf;
++ for (i = 0; i < 64 - 8; i++) {
++ *(buf_cur++) = 0;
++ }
++ }
++#ifdef HAVE_LITTLE_ENDIAN
++ memcpy(buf_cur, (uint8_t *) ctx->bitlen, 8);
++#endif /* HAVE_LITTLE_ENDIAN */
++
++#ifdef HAVE_BIG_ENDIAN
++ *(buf_cur++) = (ctx->bitlen[0] >> 24) & 0xff;
++ *(buf_cur++) = (ctx->bitlen[0] >> 16) & 0xff;
++ *(buf_cur++) = (ctx->bitlen[0] >> 8) & 0xff;
++ *(buf_cur++) = (ctx->bitlen[0] >> 0) & 0xff;
++ *(buf_cur++) = (ctx->bitlen[1] >> 24) & 0xff;
++ *(buf_cur++) = (ctx->bitlen[1] >> 16) & 0xff;
++ *(buf_cur++) = (ctx->bitlen[1] >> 8) & 0xff;
++ *(buf_cur++) = (ctx->bitlen[1] >> 0) & 0xff;
++#endif /* HAVE_BIG_ENDIAN */
++
++ md5_over_block(ctx, ctx->buf);
++
++#ifdef HAVE_LITTLE_ENDIAN
++ memcpy(digest + 0, (uint8_t *) (&(ctx->A)), sizeof(uint32_t));
++ memcpy(digest + 4, (uint8_t *) (&(ctx->B)), sizeof(uint32_t));
++ memcpy(digest + 8, (uint8_t *) (&(ctx->C)), sizeof(uint32_t));
++ memcpy(digest + 12, (uint8_t *) (&(ctx->D)), sizeof(uint32_t));
++#endif /* HAVE_LITTLE_ENDIAN */
++
++#ifdef HAVE_BIG_ENDIAN
++ digest[0] = ((ctx->A) >> 24) & 0xff;
++ digest[1] = ((ctx->A) >> 16) & 0xff;
++ digest[2] = ((ctx->A) >> 8) & 0xff;
++ digest[3] = ((ctx->A) >> 0) & 0xff;
++ digest[4] = ((ctx->B) >> 24) & 0xff;
++ digest[5] = ((ctx->B) >> 16) & 0xff;
++ digest[6] = ((ctx->B) >> 8) & 0xff;
++ digest[7] = ((ctx->B) >> 0) & 0xff;
++ digest[8] = ((ctx->C) >> 24) & 0xff;
++ digest[9] = ((ctx->C) >> 16) & 0xff;
++ digest[10] = ((ctx->C) >> 8) & 0xff;
++ digest[11] = ((ctx->C) >> 0) & 0xff;
++ digest[12] = ((ctx->D) >> 24) & 0xff;
++ digest[13] = ((ctx->D) >> 16) & 0xff;
++ digest[14] = ((ctx->D) >> 8) & 0xff;
++ digest[15] = ((ctx->D) >> 0) & 0xff;
++#endif /* HAVE_BIG_ENDIAN */
++}
++
++void sha1_init(SHA1_CTX * ctx)
++{
++ ctx->A = 0x67452301;
++ ctx->B = 0xefcdab89;
++ ctx->C = 0x98badcfe;
++ ctx->D = 0x10325476;
++ ctx->E = 0xc3d2e1f0;
++ ctx->buf_cur = ctx->buf;
++ ctx->bitlen[0] = ctx->bitlen[1] = 0;
++ memset(ctx->buf, 0, 64);
++}
++
++void sha1_over_block(SHA1_CTX * ctx, uint8_t * data)
++{
++ int i;
++ uint32_t W[80];
++ uint32_t a = ctx->A;
++ uint32_t b = ctx->B;
++ uint32_t c = ctx->C;
++ uint32_t d = ctx->D;
++ uint32_t e = ctx->E;
++ uint32_t temp;
++
++ create_W_blocks(W, data);
++
++ /* Round 1 */
++ for (i = 0; i < 20; i++) {
++ temp = LROLL(a, 5) + f(b, c, d) + e + W[i] + K1;
++ e = d;
++ d = c;
++ c = LROLL(b, 30);
++ b = a;
++ a = temp;
++ }
++
++ /* Round 2 */
++ for (i = 20; i < 40; i++) {
++ temp = LROLL(a, 5) + h(b, c, d) + e + W[i] + K2;
++ e = d;
++ d = c;
++ c = LROLL(b, 30);
++ b = a;
++ a = temp;
++ }
++
++ /* Round 3 */
++ for (i = 40; i < 60; i++) {
++ temp = LROLL(a, 5) + g(b, c, d) + e + W[i] + K3;
++ e = d;
++ d = c;
++ c = LROLL(b, 30);
++ b = a;
++ a = temp;
++ }
++
++ /* Round 4 */
++ for (i = 60; i < 80; i++) {
++ temp = LROLL(a, 5) + h(b, c, d) + e + W[i] + K4;
++ e = d;
++ d = c;
++ c = LROLL(b, 30);
++ b = a;
++ a = temp;
++ }
++
++ ctx->A += a;
++ ctx->B += b;
++ ctx->C += c;
++ ctx->D += d;
++ ctx->E += e;
++}
++
++void create_W_blocks(uint32_t * W, uint8_t * data)
++{
++ int i;
++
++#ifdef HAVE_BIG_ENDIAN
++ memcpy((uint8_t *) W, data, 64);
++#endif /* HAVE_BIG_ENDIAN */
++
++#ifdef HAVE_LITTLE_ENDIAN
++ for (i = 0; i < 16; i++, data += 4) {
++ ((uint8_t *) (&W[i]))[0] = data[3];
++ ((uint8_t *) (&W[i]))[1] = data[2];
++ ((uint8_t *) (&W[i]))[2] = data[1];
++ ((uint8_t *) (&W[i]))[3] = data[0];
++ }
++#endif /* HAVE_LITTLE_ENDIAN */
++ for (i = 16; i < 80; i++) {
++ W[i] = W[i - 3] ^ W[i - 8] ^ W[i - 14] ^ W[i - 16];
++ W[i] = LROLL(W[i], 1);
++ }
++}
++
++void sha1_compute(SHA1_CTX * ctx, uint8_t * data, uint32_t len)
++{
++ uint8_t pos = ((ctx->bitlen[0] >> 3) & 0x3f);
++
++ /* First we update the bit length */
++ if ((ctx->bitlen[0] += (len << 3)) < (len << 3))
++ ctx->bitlen[1]++;
++ ctx->bitlen[1] += (len >> 29); /* len is expressed in bytes */
++
++ if (pos) {
++ /* Buffer is not empty */
++ if (64 - pos >= len) {
++ memcpy(ctx->buf_cur, data, len);
++ ctx->buf_cur += len;
++ pos += len;
++ if (pos == 64) {
++ /* The current block is over */
++ sha1_over_block(ctx, ctx->buf);
++ ctx->buf_cur = ctx->buf;
++ }
++ return;
++ } else {
++ memcpy(ctx->buf_cur, data, 64 - pos);
++ sha1_over_block(ctx, ctx->buf);
++ len -= (64 - pos);
++ data += (64 - pos);
++ ctx->buf_cur = ctx->buf;
++ }
++ }
++ while (len >= 64) {
++ sha1_over_block(ctx, data);
++ len -= 64;
++ data += 64;
++ }
++ if (len) {
++ memcpy(ctx->buf_cur, data, len);
++ ctx->buf_cur += len;
++ }
++}
++
++void sha1_final(SHA1_CTX * ctx, uint8_t * digest)
++{
++ uint32_t rem_size;
++ uint8_t *buf_cur = ctx->buf_cur;
++ int i;
++
++ rem_size = 64 - ((ctx->bitlen[0] >> 3) & 0x3f);
++ *(buf_cur++) = 0x80;
++
++ if (rem_size > 8 + 1) {
++ /* We have enough room in the current block */
++ for (i = 0; i < rem_size - 8 - 1; i++) {
++ *(buf_cur++) = 0;
++ }
++ } else {
++ /* We do not have enough room and need therefore to add a new
++ 64-byte block */
++ for (i = 0; i < rem_size - 1; i++) {
++ *(buf_cur++) = 0;
++ }
++ sha1_over_block(ctx, ctx->buf);
++
++ buf_cur = ctx->buf;
++ for (i = 0; i < 64 - 8; i++) {
++ *(buf_cur++) = 0;
++ }
++ }
++#ifdef HAVE_BIG_ENDIAN
++ memcpy(buf_cur, (uint8_t *) ctx->bitlen, 8);
++#endif /* HAVE_BIG_ENDIAN */
++
++#ifdef HAVE_LITTLE_ENDIAN
++ *(buf_cur++) = (ctx->bitlen[1] >> 24) & 0xff;
++ *(buf_cur++) = (ctx->bitlen[1] >> 16) & 0xff;
++ *(buf_cur++) = (ctx->bitlen[1] >> 8) & 0xff;
++ *(buf_cur++) = (ctx->bitlen[1] >> 0) & 0xff;
++ *(buf_cur++) = (ctx->bitlen[0] >> 24) & 0xff;
++ *(buf_cur++) = (ctx->bitlen[0] >> 16) & 0xff;
++ *(buf_cur++) = (ctx->bitlen[0] >> 8) & 0xff;
++ *(buf_cur++) = (ctx->bitlen[0] >> 0) & 0xff;
++#endif /* HAVE_LITTLE_ENDIAN */
++
++ sha1_over_block(ctx, ctx->buf);
++
++#ifdef HAVE_BIG_ENDIAN
++ memcpy(digest + 0, (uint8_t *) (&(ctx->A)), sizeof(uint32_t));
++ memcpy(digest + 4, (uint8_t *) (&(ctx->B)), sizeof(uint32_t));
++ memcpy(digest + 8, (uint8_t *) (&(ctx->C)), sizeof(uint32_t));
++ memcpy(digest + 12, (uint8_t *) (&(ctx->D)), sizeof(uint32_t));
++ memcpy(digest + 16, (uint8_t *) (&(ctx->E)), sizeof(uint32_t));
++#endif /* HAVE_BIG_ENDIAN */
++
++#ifdef HAVE_LITTLE_ENDIAN
++ digest[0] = ((ctx->A) >> 24) & 0xff;
++ digest[1] = ((ctx->A) >> 16) & 0xff;
++ digest[2] = ((ctx->A) >> 8) & 0xff;
++ digest[3] = ((ctx->A) >> 0) & 0xff;
++ digest[4] = ((ctx->B) >> 24) & 0xff;
++ digest[5] = ((ctx->B) >> 16) & 0xff;
++ digest[6] = ((ctx->B) >> 8) & 0xff;
++ digest[7] = ((ctx->B) >> 0) & 0xff;
++ digest[8] = ((ctx->C) >> 24) & 0xff;
++ digest[9] = ((ctx->C) >> 16) & 0xff;
++ digest[10] = ((ctx->C) >> 8) & 0xff;
++ digest[11] = ((ctx->C) >> 0) & 0xff;
++ digest[12] = ((ctx->D) >> 24) & 0xff;
++ digest[13] = ((ctx->D) >> 16) & 0xff;
++ digest[14] = ((ctx->D) >> 8) & 0xff;
++ digest[15] = ((ctx->D) >> 0) & 0xff;
++ digest[16] = ((ctx->E) >> 24) & 0xff;
++ digest[17] = ((ctx->E) >> 16) & 0xff;
++ digest[18] = ((ctx->E) >> 8) & 0xff;
++ digest[19] = ((ctx->E) >> 0) & 0xff;
++#endif /* HAVE_LITTLE_ENDIAN */
++}
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/hmac.h linux-2.4.25/net/ipv6/mobile_ip6/hmac.h
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/hmac.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/hmac.h 2004-06-26 11:29:31.000000000 +0100
+@@ -0,0 +1,94 @@
++/*
++ * MIPL Mobile IPv6 Message authentication algorithms
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#ifndef _HMAC_H
++#define _HMAC_H
++
++#include <linux/types.h>
++#include <linux/in6.h>
++
++#define HAVE_LITTLE_ENDIAN
++
++#define NO_EXPIRY 1 /* For sec_as */
++
++#define ALG_AUTH_NONE 0
++#define ALG_AUTH_HMAC_MD5 1
++#define ALG_AUTH_HMAC_SHA1 2
++
++struct sec_as;
++struct ah_processing {
++ void *context;
++ struct sec_as *sas;
++ u_int8_t *key_auth;
++ u_int32_t key_auth_len;
++};
++
++struct antireplay {
++ u_int32_t count;
++ u_int32_t bitmap;
++};
++
++typedef struct {
++ u_int32_t A, B, C, D;
++ u_int32_t bitlen[2];
++ u_int8_t* buf_cur;
++ u_int8_t buf[64];
++} MD5_CTX;
++
++typedef struct {
++ u_int32_t A, B, C, D, E;
++ u_int32_t bitlen[2];
++ u_int8_t* buf_cur;
++ u_int8_t buf[64];
++} SHA1_CTX;
++
++
++
++int ah_hmac_md5_init (struct ah_processing *ahp, u_int8_t *key, u_int32_t key_len);
++void ah_hmac_md5_loop(struct ah_processing*, void*, u_int32_t);
++void ah_hmac_md5_result(struct ah_processing*, char*);
++int ah_hmac_sha1_init(struct ah_processing*, u_int8_t *key, u_int32_t key_len);
++void ah_hmac_sha1_loop(struct ah_processing*, void*, u_int32_t);
++void ah_hmac_sha1_result(struct ah_processing*, char*);
++
++
++#define AH_HDR_LEN 12 /* # of bytes for Next Header, Payload Length,
++ RESERVED, Security Parameters Index and
++
++ Sequence Number Field */
++
++void md5_init(MD5_CTX *ctx);
++void md5_over_block(MD5_CTX *ctx, u_int8_t* data);
++void create_M_blocks(u_int32_t* M, u_int8_t* data);
++void md5_compute(MD5_CTX *ctx, u_int8_t* data, u_int32_t len);
++void md5_final(MD5_CTX *ctx, u_int8_t* digest);
++
++void sha1_init(SHA1_CTX *ctx);
++void sha1_over_block(SHA1_CTX *ctx, u_int8_t* data);
++void create_W_blocks(u_int32_t* W, u_int8_t* data);
++void sha1_compute(SHA1_CTX *ctx, u_int8_t* data, u_int32_t len);
++void sha1_final(SHA1_CTX *ctx, u_int8_t* digest);
++
++struct mipv6_acq {
++ struct in6_addr coa;
++ struct in6_addr haddr;
++ struct in6_addr peer;
++ u_int32_t spi;
++};
++#define MIPV6_MAX_AUTH_DATA 20
++
++#define HMAC_MD5_HASH_LEN 16
++#define HMAC_SHA1_HASH_LEN 20
++#define HMAC_SHA1_KEY_SIZE 20
++#define HMAC_MD5_ICV_LEN 12 /* RFC 2403 */
++#define HMAC_SHA1_ICV_LEN 12 /* RFC 2404 */
++
++#endif /* _HMAC_H */
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/ioctl_mn.c linux-2.4.25/net/ipv6/mobile_ip6/ioctl_mn.c
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/ioctl_mn.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/ioctl_mn.c 2004-06-26 11:29:31.000000000 +0100
+@@ -0,0 +1,142 @@
++/*
++ * Mobile Node IOCTL Control device
++ *
++ * Authors:
++ * Henrik Petander <lpetande@tml.hut.fi>
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#include <linux/config.h>
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/poll.h>
++#include <linux/ioctl.h>
++#include <net/ipv6.h>
++#include <asm/uaccess.h>
++
++#include "debug.h"
++#include "mdetect.h"
++#include "multiaccess_ctl.h"
++
++/* Reserved for local / experimental use */
++#define MAJOR_NUM 0xf9
++
++/* Get Care-of address information for Mobile Node */
++#define IOCTL_GET_CAREOFADDR _IOWR(MAJOR_NUM, 9, void *)
++
++#define MA_IOCTL_SET_IFACE_PREFERENCE _IOR (MAJOR_NUM, 13, void *)
++
++/* The name of the device file */
++#define CTLFILE "mipv6_dev"
++
++static int inuse = 0;
++
++static int mipv6_open(struct inode *inode, struct file *file)
++{
++ DEBUG(DBG_INFO, "(%p)\n", file);
++
++ if (inuse)
++ return -EBUSY;
++
++ inuse++;
++
++ MOD_INC_USE_COUNT;
++
++ return 0;
++}
++
++static int mipv6_close(struct inode *inode, struct file *file)
++{
++ DEBUG(DBG_INFO, "(%p,%p)\n", inode, file);
++ inuse--;
++
++ MOD_DEC_USE_COUNT;
++
++ return 0;
++}
++
++int mipv6_ioctl(struct inode *inode, struct file *file,
++ unsigned int ioctl_num, /* The number of the ioctl */
++ unsigned long arg) /* The parameter to it */
++{
++ struct in6_addr careofaddr;
++
++ /* Switch according to the ioctl called */
++ switch (ioctl_num) {
++ case IOCTL_GET_CAREOFADDR:
++ DEBUG(DBG_DATADUMP, "IOCTL_GET_CAREOFADDR");
++ /* First get home address from user and then look up
++ * the care-of address and return it
++ */
++ if (copy_from_user(&careofaddr, (struct in6_addr *)arg,
++ sizeof(struct in6_addr)) < 0) {
++ DEBUG(DBG_WARNING, "Copy from user failed");
++ return -EFAULT;
++ }
++ mipv6_get_care_of_address(&careofaddr, &careofaddr);
++ if (copy_to_user((struct in6_addr *)arg, &careofaddr,
++ sizeof(struct in6_addr)) < 0) {
++ DEBUG(DBG_WARNING, "copy_to_user failed");
++ return -EFAULT;
++ }
++ break;
++ case MA_IOCTL_SET_IFACE_PREFERENCE:
++ DEBUG(DBG_INFO, "MA_IOCTL_SET_IFACE_PREFERENCE");
++ ma_ctl_set_preference(arg);
++ break;
++
++ default:
++ DEBUG(DBG_WARNING, "Unknown ioctl cmd (%d)", ioctl_num);
++ return -ENOENT;
++ }
++ return 0;
++}
++
++struct file_operations Fops = {
++ owner: THIS_MODULE,
++ read: NULL,
++ write: NULL,
++ poll: NULL,
++ ioctl: mipv6_ioctl,
++ open: mipv6_open,
++ release: mipv6_close
++};
++
++
++/* Initialize the module - Register the character device */
++int mipv6_ioctl_mn_init(void)
++{
++ int ret_val;
++
++ /* Register the character device (atleast try) */
++ ret_val = register_chrdev(MAJOR_NUM, CTLFILE, &Fops);
++
++ /* Negative values signify an error */
++ if (ret_val < 0) {
++ DEBUG(DBG_ERROR, "failed registering char device (err=%d)",
++ ret_val);
++ return ret_val;
++ }
++
++ DEBUG(DBG_INFO, "Device number %x, success", MAJOR_NUM);
++ return 0;
++}
++
++
++/* Cleanup - unregister the appropriate file from /proc */
++void mipv6_ioctl_mn_exit(void)
++{
++ int ret;
++ /* Unregister the device */
++ ret = unregister_chrdev(MAJOR_NUM, CTLFILE);
++
++ /* If there's an error, report it */
++ if (ret < 0)
++ DEBUG(DBG_ERROR, "errorcode: %d\n", ret);
++}
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/mdetect.c linux-2.4.25/net/ipv6/mobile_ip6/mdetect.c
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/mdetect.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/mdetect.c 2004-06-26 11:29:31.000000000 +0100
+@@ -0,0 +1,1153 @@
++/*
++ * Movement Detection Module
++ *
++ * Authors:
++ * Henrik Petander <lpetande@cc.hut.fi>
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ *
++ * Handles the L3 movement detection of mobile node and also
++ * changing of its routes.
++ *
++ */
++
++/*
++ * Changes:
++ *
++ * Nanno Langstraat : Locking fixes
++ * Venkata Jagana : Locking fix
++ */
++
++#include <linux/autoconf.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/if_arp.h>
++#include <linux/route.h>
++#include <net/ipv6.h>
++#include <net/ip6_route.h>
++#include <net/addrconf.h>
++#include <net/mipglue.h>
++#ifdef CONFIG_SYSCTL
++#include <linux/sysctl.h>
++#endif /* CONFIG_SYSCTL */
++
++#include "util.h"
++#include "mdetect.h"
++#include "mn.h"
++#include "debug.h"
++#include "multiaccess_ctl.h"
++
++#define START 0
++#define CONTINUE 1
++#define OK 2
++#define DEBUG_MDETECT 7
++
++#define DEF_RTR_POLL_IVAL 5 /* In seconds */
++
++#define NO_RTR 0
++#define RTR_SUSPECT 1
++#define CURR_RTR_OK 2
++
++#define RA_RCVD 0
++#define NA_RCVD 1
++#define TIMEOUT 2
++
++#define MIPV6_MDF_NONE 0x0
++#define MIPV6_MDF_HAS_RTR_PREV 0x1
++
++#define ROUTER_REACHABLE 1
++#define RADV_MISSED 2
++#define NOT_REACHABLE 3
++
++/* R_TIME_OUT paramater is used to make the decision when to change the
++ * default router, if the current one is unreachable. 2s is pretty aggressive
++ * and may result in hopping between two routers. OTOH a small value enhances
++ * the performance
++ */
++#define R_TIME_OUT 30*HZ
++
++/* maximum RA interval for router unreachability detection */
++#define MAX_RADV_INTERVAL 6*HZ /* 6000 ms... */
++
++/* Threshold for exponential resending of router solicitations */
++#define RS_RESEND_LINEAR 10*HZ
++
++#define EAGER_CELL_SWITCHING 1
++#define LAZY_CELL_SWITCHING 0
++#define RESPECT_DAD 1
++
++#define ROUTER_ADDRESS 0x20
++
++/* RA flags */
++#define ND_RA_FLAG_MANAGED 0x80
++#define ND_RA_FLAG_OTHER 0x40
++#define ND_RA_FLAG_HA 0x20
++
++/* DAD flags for global and link local addresses */
++
++#define COA_TENTATIVE 0x10
++#define LLADDR_TENTATIVE 0x01
++
++struct router {
++ struct list_head list;
++ struct in6_addr ll_addr;
++ struct in6_addr raddr; /* Also contains prefix */
++ __u8 link_addr[MAX_ADDR_LEN]; /* link layer address */
++ __u8 link_addr_len;
++ __u8 state;
++ __u8 is_current;
++ __u8 reachable;
++ int ifindex;
++ int pfix_len; /* Length of the network prefix */
++ unsigned long lifetime; /* from ra */
++ __u32 last_ns_sent;
++ __u32 last_ra_rcvd;
++ __u32 interval; /* ra interval in milliseconds, 0 if not set */
++ int glob_addr; /*Whether raddr contains also routers global address*/
++ __u8 flags; /* RA flags, for example ha */
++ struct in6_addr CoA; /* care-off address used with this router */
++ int extra_addr_route;
++};
++
++/* dad could also be RESPECT_DAD for duplicate address detection of
++ new care-of addresses */
++static int dad = 0;
++
++/* Only one choice, nothing else implemented */
++int max_rtr_reach_time = DEF_RTR_POLL_IVAL;
++
++
++int eager_cell_switching = EAGER_CELL_SWITCHING; /* Can be set to 0 via proc */
++static spinlock_t router_lock;
++static spinlock_t ho_lock;
++
++static void coa_timer_handler(unsigned long arg);
++static void timer_handler(unsigned long foo);
++static struct router *curr_router = NULL, *next_router = NULL;
++static struct timer_list r_timer = { function: timer_handler };
++static struct timer_list coa_timer = { function: coa_timer_handler };
++#define MAX_ROUTERS 1000
++static LIST_HEAD(rtr_list);
++static int num_routers = 0;
++static struct handoff *_ho = NULL;
++/*
++ * Functions for handling the default router list, which movement
++ * detection uses for avoiding loops etc.
++ */
++
++/* TODO: Send NS to router after MAX interval has passed from last RA */
++static int mipv6_router_state(struct router *rtr) {
++ if (rtr->interval) {
++ if (time_before(jiffies, (rtr->last_ra_rcvd + (rtr->interval * HZ) / 1000)))
++ return ROUTER_REACHABLE;
++ else
++ return NOT_REACHABLE;
++ }
++ else
++ if (time_after(jiffies, rtr->last_ra_rcvd + (rtr->lifetime * HZ)))
++ return NOT_REACHABLE;
++ return ROUTER_REACHABLE;
++}
++
++/* searches for a specific router or any router that is reachable,
++ * if address is NULL. Also deletes obsolete routers.
++ */
++static void mipv6_router_gc(void)
++{
++ struct router *curr = NULL;
++ struct list_head *lh, *lh_tmp;
++
++ DEBUG_FUNC();
++
++ list_for_each_safe(lh, lh_tmp, &rtr_list) {
++ curr = list_entry(lh, struct router, list);
++ if (mipv6_router_state(curr) == NOT_REACHABLE && !curr->is_current) {
++ num_routers--;
++ list_del_init(&curr->list);
++ DEBUG(DBG_DATADUMP, "Deleting unreachable router %x:%x:%x:%x:%x:%x:%x:%x",
++ NIPV6ADDR(&curr->raddr));
++ kfree(curr);
++ }
++ else {
++ DEBUG(DBG_DATADUMP, "NOT Deleting router %x:%x:%x:%x:%x:%x:%x:%x",
++ NIPV6ADDR(&curr->raddr));
++ }
++ }
++}
++
++static struct router *mipv6_rtr_get(struct in6_addr *search_addr)
++{
++ struct router *rtr = NULL;
++ struct list_head *lh;
++
++ DEBUG_FUNC();
++
++ if (search_addr == NULL)
++ return NULL;
++ list_for_each(lh, &rtr_list) {
++ rtr = list_entry(lh, struct router, list);
++ if(!ipv6_addr_cmp(search_addr, &rtr->raddr)) {
++ return rtr;
++ }
++ }
++ return NULL;
++}
++
++/*
++ * Adds router to list
++ */
++static struct router *mipv6_rtr_add(struct router *nrt)
++{
++
++ struct router *rptr;
++
++ DEBUG_FUNC();
++
++ /* check if someone is trying DoS attack, or we just have some
++ memory leaks... */
++ if (num_routers > MAX_ROUTERS) {
++ DEBUG(DBG_CRITICAL,
++ "failed to add new router, MAX_ROUTERS exceeded");
++ return NULL;
++ }
++
++ rptr = kmalloc(sizeof(struct router), GFP_ATOMIC);
++ if (rptr) {
++ memcpy(rptr, nrt, sizeof(struct router));
++ list_add(&rptr->list, &rtr_list);
++ num_routers++;
++ }
++ DEBUG(DBG_INFO, "Adding router: %x:%x:%x:%x:%x:%x:%x:%x, "
++ "lifetime : %d sec, adv.interval: %d millisec",
++ NIPV6ADDR(&rptr->raddr), rptr->lifetime, rptr->interval);
++
++ DEBUG(DBG_INFO, "num_routers after addition: %d", num_routers);
++ return rptr;
++}
++
++/* Cleans up the list */
++static void list_free(struct router **curr_router_p)
++{
++ struct router *tmp;
++ struct list_head *lh, *lh_tmp;
++
++ DEBUG_FUNC();
++
++ DEBUG(DBG_INFO, "Freeing the router list");
++ /* set curr_router->prev_router and curr_router NULL */
++ *curr_router_p = NULL;
++ list_for_each_safe(lh, lh_tmp, &rtr_list) {
++ tmp = list_entry(lh, struct router, list);
++ DEBUG(DBG_INFO, "%x:%x:%x:%x:%x:%x:%x:%x",
++ NIPV6ADDR(&tmp->ll_addr));
++ list_del(&tmp->list);
++ kfree(tmp);
++ num_routers--;
++ }
++}
++
++int rs_state = START;
++
++/* Sends router solicitations to all valid devices
++ * source = link local address (of sending interface)
++ * dstaddr = all routers multicast address
++ * Solicitations are sent at an exponentially decreasing rate
++ *
++ * TODO: send solicitation first at a normal rate (from ipv6) and
++ * after that use the exponentially increasing intervals
++ */
++static int rs_send(void)
++{
++ struct net_device *dev;
++ struct in6_addr raddr, lladdr;
++ struct inet6_dev *in6_dev = NULL;
++ static int num_rs;
++
++ if (rs_state == START) {
++ num_rs = 0;
++ rs_state = CONTINUE;
++ } else if (num_rs++ > MAX_RTR_SOLICITATIONS)
++ return HZ;
++
++ ipv6_addr_all_routers(&raddr);
++ read_lock(&dev_base_lock);
++
++ /* Send router solicitations to all interfaces */
++ for (dev = dev_base; dev; dev = dev->next) {
++ if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ETHER) {
++ DEBUG(DBG_DATADUMP, "Sending RS to device %s",
++ dev->name);
++ if (!ipv6_get_lladdr(dev, &lladdr)) {
++ ndisc_send_rs(dev, &lladdr, &raddr);
++ in6_dev = in6_dev_get(dev);
++ in6_dev->if_flags |= IF_RS_SENT;
++ in6_dev_put(in6_dev);
++ } else {
++ DEBUG(DBG_DATADUMP, "%s: device doesn't have link-local address!\n", dev->name);
++ continue;
++ }
++ }
++
++ }
++ read_unlock(&dev_base_lock);
++ return RTR_SOLICITATION_INTERVAL;
++}
++
++/* Create a new CoA for MN and also add a route to it if it is still tentative
++ to allow MN to get packets to the address immediately
++ */
++static int form_coa(struct in6_addr *coa, struct in6_addr *pfix,
++ int plen, int ifindex)
++{
++ struct net_device *dev;
++ struct inet6_dev *in6_dev;
++ int ret = 0;
++
++ if ((dev = dev_get_by_index(ifindex)) == NULL) {
++ DEBUG(DBG_WARNING, "Device is not present");
++ return -1;
++ }
++ if ((in6_dev = in6_dev_get(dev)) == NULL) {
++ DEBUG(DBG_WARNING, "inet6_dev is not present");
++ dev_put(dev);
++ return -1;
++ }
++ coa->s6_addr32[0] = pfix->s6_addr32[0];
++ coa->s6_addr32[1] = pfix->s6_addr32[1];
++
++ if (ipv6_generate_eui64(coa->s6_addr + 8, dev) &&
++ ipv6_inherit_eui64(coa->s6_addr + 8, in6_dev)) {
++ in6_dev_put(in6_dev);
++ dev_put(dev);
++ return -1;
++ }
++ if (ipv6_chk_addr(coa, dev) == 0) {
++ DEBUG(DBG_WARNING, "care-of address still tentative");
++ ret = 1;
++ }
++ DEBUG(DBG_INFO, "Formed new CoA: %x:%x:%x:%x:%x:%x:%x:%x",
++ NIPV6ADDR(coa));
++
++ in6_dev_put(in6_dev);
++ dev_put(dev);
++ return ret;
++}
++
++static inline int rtr_is_gw(struct router *rtr, struct rt6_info *rt)
++{
++ return ((rt->rt6i_flags & RTF_GATEWAY) &&
++ !ipv6_addr_cmp(&rt->rt6i_gateway, &rtr->ll_addr));
++}
++
++static inline int is_prefix_route(struct router *rtr, struct rt6_info *rt)
++{
++ return (!(rt->rt6i_flags & RTF_GATEWAY) &&
++ mipv6_prefix_compare(&rt->rt6i_dst.addr, &rtr->raddr,
++ rtr->pfix_len));
++}
++
++/*
++ * Function that determines whether given rt6_info should be destroyed
++ * (negative => destroy rt6_info, zero or positive => do nothing)
++ */
++static int mn_route_cleaner(struct rt6_info *rt, void *arg)
++{
++ int type;
++
++ struct router *rtr = (struct router *)arg;
++
++ int ret = -1;
++
++ DEBUG_FUNC();
++
++ if (!rt || !rtr) {
++ DEBUG(DBG_ERROR, "mn_route_cleaner: rt or rtr NULL");
++ return 0;
++ }
++
++ /* Do not delete routes to local addresses or to multicast
++ * addresses, since we need them to get router advertisements
++ * etc. Multicast addresses are more tricky, but we don't
++ * delete them in any case. The routing mechanism is not optimal for
++ * multihoming.
++ *
++ * Also keep all new prefix routes, gateway routes through rtr and
++ * all remaining default routes (including those used for reverse
++ * tunneling)
++ */
++ type = ipv6_addr_type(&rt->rt6i_dst.addr);
++
++ if ((type & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)) ||
++ rt->rt6i_dev == &loopback_dev || rtr_is_gw(rtr, rt) ||
++ is_prefix_route(rtr, rt) || (rt->rt6i_flags & RTF_DEFAULT))
++ ret = 0;
++
++ /* delete all others */
++
++ if (rt->rt6i_dev != &loopback_dev) {
++ DEBUG(DEBUG_MDETECT,
++ "%s route:\n"
++ "dev: %s,\n"
++ "gw: %x:%x:%x:%x:%x:%x:%x:%x,\n"
++ "flags: %x,\n"
++ "metric: %d,\n"
++ "src: %x:%x:%x:%x:%x:%x:%x:%x,\n"
++ "dst: %x:%x:%x:%x:%x:%x:%x:%x,\n"
++ "plen: %d\n",
++ (ret ? "Deleting" : "Keeping"),
++ rt->rt6i_dev->name,
++ NIPV6ADDR(&rt->rt6i_gateway),
++ rt->rt6i_flags,
++ rt->rt6i_metric,
++ NIPV6ADDR(&rt->rt6i_src.addr),
++ NIPV6ADDR(&rt->rt6i_dst.addr),
++ rt->rt6i_dst.plen);
++ }
++ return ret;
++}
++
++/*
++ * Deletes old routes
++ */
++static __inline__ void delete_routes(struct router *rtr)
++{
++ DEBUG_FUNC();
++
++ /* Routing table is locked to ensure that nobody uses its */
++ write_lock_bh(&rt6_lock);
++ DEBUG(DBG_INFO, "mipv6: Purging routes");
++ /* TODO: Does not prune, should it? */
++ fib6_clean_tree(&ip6_routing_table,
++ mn_route_cleaner, 0, rtr);
++ write_unlock_bh(&rt6_lock);
++
++}
++
++
++static __inline__ void delete_coas(struct router *rtr)
++{
++ struct net_device *dev;
++ struct inet6_dev *idev;
++ struct inet6_ifaddr *ifa;
++
++ dev = dev_get_by_index(rtr->ifindex);
++ if (!dev)
++ return;
++
++ idev = in6_dev_get(dev);
++
++ if (idev) {
++ read_lock_bh(&idev->lock);
++ ifa = idev->addr_list;
++ while (ifa) {
++ int keep;
++ spin_lock(&ifa->lock);
++
++ keep = (ifa->flags&(IFA_F_PERMANENT|IFA_F_HOMEADDR) ||
++ !ipv6_addr_cmp(&ifa->addr, &rtr->CoA));
++
++ spin_unlock(&ifa->lock);
++
++ if (keep)
++ ifa = ifa->if_next;
++ else {
++ in6_ifa_hold(ifa);
++ read_unlock_bh(&idev->lock);
++
++ ipv6_del_addr(ifa);
++
++ read_lock_bh(&idev->lock);
++ ifa = idev->addr_list;
++ }
++ }
++ read_unlock_bh(&idev->lock);
++ in6_dev_put(idev);
++ }
++ dev_put(dev);
++}
++
++int next_mdet_state[3][3] = {{CURR_RTR_OK, NO_RTR, NO_RTR},
++ {CURR_RTR_OK, CURR_RTR_OK, NO_RTR},
++ {CURR_RTR_OK, CURR_RTR_OK, RTR_SUSPECT}};
++
++char *states[3] = {"NO_RTR", "RTR_SUSPECT", "CURR_RTR_OK"};
++char *events[3] = {"RA_RCVD", "NA_RCVD", "TIMEOUT"};
++
++/* State transitions
++ * NO_RTR, RA_RCVD -> CURR_RTR_OK
++ * NO_RTR, NA_RCVD -> NO_RTR
++ * NO_RTR, TIMEOUT -> NO_RTR
++
++ * RTR_SUSPECT, RA_RCVD -> CURR_RTR_OK
++ * RTR_SUSPECT, NA_RCVD -> CURR_RTR_OK
++ * RTR_SUSPECT, TIMEOUT -> NO_RTR
++
++ * CURR_RTR_OK, RA_RCVD -> CURR_RTR_OK
++ * CURR_RTR_OK, NA_RCVD -> CURR_RTR_OK
++ * CURR_RTR_OK, TIMEOUT -> RTR_SUSPECT
++ */
++static int _curr_state = NO_RTR;
++
++#if 0
++static int get_mdet_state(void){
++ int state;
++ spin_lock_bh(&router_lock);
++ state = _curr_state;
++ spin_unlock_bh(&router_lock);
++ return state;
++}
++#endif
++
++/* Needs to be called with router_lock locked */
++static int mdet_statemachine(int event)
++{
++
++ if (event > 2 || _curr_state > 2) {
++ DEBUG(DBG_ERROR, "Got illegal event or curr_state");
++ return -1;
++ }
++
++ DEBUG(DBG_DATADUMP, "Got event %s and curr_state is %s",
++ events[event], states[_curr_state]);
++
++ _curr_state = next_mdet_state[_curr_state][event];
++ DEBUG(DBG_DATADUMP, "Next state is %s", states[_curr_state]);
++ return _curr_state;
++}
++
++static void mipv6_do_ll_dad(int ifindex)
++{
++ struct net_device *dev = dev_get_by_index(ifindex);
++ if (dev) {
++ struct in6_addr lladdr;
++ struct inet6_ifaddr *ifa;
++ if (!ipv6_get_lladdr(dev, &lladdr) &&
++ (ifa = ipv6_get_ifaddr(&lladdr, dev)) != NULL) {
++ spin_lock_bh(&ifa->lock);
++ if (!(ifa->flags & IFA_F_TENTATIVE)) {
++ ifa->flags |= IFA_F_TENTATIVE;
++ spin_unlock_bh(&ifa->lock);
++ addrconf_dad_start(ifa, 0);
++ } else
++ spin_unlock_bh(&ifa->lock);
++
++ }
++ dev_put(dev);
++ }
++}
++/*
++ * Changes the router, called from ndisc.c if mipv6_router_event
++ * returns true.
++ */
++
++static void mipv6_change_router(void)
++{
++ struct in6_addr coa;
++ int ret, ifindex;
++
++ DEBUG_FUNC();
++
++
++ if (next_router == NULL)
++ return;
++
++ spin_lock(&router_lock);
++
++
++ if (curr_router != NULL &&
++ !ipv6_addr_cmp(&curr_router->ll_addr, &next_router->ll_addr)) {
++ DEBUG(DBG_INFO,"Trying to handoff from: "
++ "%x:%x:%x:%x:%x:%x:%x:%x",
++ NIPV6ADDR(&curr_router->ll_addr));
++ DEBUG(DBG_INFO,"Trying to handoff to: "
++ "%x:%x:%x:%x:%x:%x:%x:%x",
++ NIPV6ADDR(&next_router->ll_addr));
++ next_router = NULL; /* Let's not leave dangling pointers */
++ spin_unlock(&router_lock);
++ return;
++ }
++ ret = form_coa(&next_router->CoA, &next_router->raddr,
++ next_router->pfix_len, next_router->ifindex);
++ if (ret < 0) {
++ DEBUG(DBG_ERROR, "handoff: Creation of coa failed");
++ spin_unlock(&router_lock);
++ return;
++ } else if (ret > 0)
++ next_router->flags |= COA_TENTATIVE;
++
++ mdet_statemachine(RA_RCVD); /* TODO: What if DAD fails... */
++ if (next_router->interval)
++ mod_timer(&r_timer, jiffies +
++ (next_router->interval * HZ)/1000);
++ else
++ mod_timer(&r_timer, jiffies + max_rtr_reach_time * HZ);
++
++
++ if (ret == 0) {
++ ipv6_addr_copy(&coa, &next_router->CoA);
++ ifindex = next_router->ifindex;
++ spin_unlock(&router_lock);
++ mipv6_mdet_finalize_ho(&coa, ifindex);
++ return;
++ }
++ spin_unlock(&router_lock);
++
++}
++static unsigned long ns_send(void)
++{
++ struct neighbour *neigh;
++ struct net_device *dev;
++ struct in6_addr *raddr;
++
++ DEBUG(DBG_DATADUMP, "Sending Neighbour solicitation to default router to verify its reachability");
++ if (!curr_router)
++ return HZ;
++ if ((dev = dev_get_by_index(curr_router->ifindex)) == NULL)
++ return HZ;
++ if ((neigh = ndisc_get_neigh(dev, &curr_router->ll_addr)) == NULL) {
++ dev_put(dev);
++ return HZ;
++ }
++ if (curr_router->glob_addr)
++ raddr = &curr_router->raddr;
++ else
++ raddr = &curr_router->ll_addr;
++
++ curr_router->last_ns_sent = jiffies;
++ ndisc_send_ns(dev, neigh, raddr, raddr, NULL);
++
++ neigh_release(neigh);
++ dev_put(dev);
++ return HZ/5; /* Wait 200ms for a reply */
++}
++
++static int na_rcvd(void)
++{
++ int neigh_ok = 0;
++ struct neighbour *neigh;
++ struct net_device *dev;
++
++ if (!curr_router)
++ return 0;
++ if ((dev = dev_get_by_index(curr_router->ifindex)) == NULL)
++ return 0;
++ if ((neigh = ndisc_get_neigh(dev, &curr_router->ll_addr)) == NULL) {
++ dev_put(dev);
++ return 0;
++ }
++ if (neigh->flags & NTF_ROUTER &&
++ (time_after(neigh->confirmed, curr_router->last_ns_sent) ||
++ neigh->confirmed == curr_router->last_ns_sent)) {
++ neigh_ok = 1;
++ DEBUG(DBG_DATADUMP, "Mdetect event: NA rcvd from curr rtr");
++ } else
++ DEBUG(DBG_DATADUMP, "Mdetect event: NA NOT rcvd from curr rtr within time limit");
++ neigh_release(neigh);
++ dev_put(dev);
++ return neigh_ok;
++}
++
++static void coa_timer_handler(unsigned long dummy)
++{
++
++ spin_lock_bh(&ho_lock);
++ if (_ho) {
++ DEBUG(DBG_INFO, "Starting handoff after DAD");
++ mipv6_mobile_node_moved(_ho);
++ kfree(_ho);
++ _ho = NULL;
++ }
++ spin_unlock_bh(&ho_lock);
++}
++static void timer_handler(unsigned long foo)
++{
++ unsigned long timeout;
++ int state;
++ spin_lock_bh(&router_lock);
++
++ if (_curr_state != NO_RTR)
++ rs_state = START;
++
++ if (_curr_state == RTR_SUSPECT && na_rcvd()) {
++ state = mdet_statemachine(NA_RCVD);
++ timeout = curr_router->interval ? curr_router->interval : max_rtr_reach_time * HZ;
++ } else {
++ state = mdet_statemachine(TIMEOUT);
++ if (state == NO_RTR)
++ timeout = rs_send();
++ else /* RTR_SUSPECT */
++ timeout = ns_send();
++ }
++ if (!timeout)
++ timeout = HZ;
++
++ mipv6_router_gc();
++ mod_timer(&r_timer, jiffies + timeout);
++ spin_unlock_bh(&router_lock);
++}
++
++/**
++ * mipv6_get_care_of_address - get node's care-of primary address
++ * @homeaddr: one of node's home addresses
++ * @coaddr: buffer to store care-of address
++ *
++ * Stores the current care-of address in the @coaddr, assumes
++ * addresses in EUI-64 format. Since node might have several home
++ * addresses caller MUST supply @homeaddr. If node is at home
++ * @homeaddr is stored in @coaddr. Returns 0 on success, otherwise a
++ * negative value.
++ **/
++int mipv6_get_care_of_address(
++ struct in6_addr *homeaddr, struct in6_addr *coaddr)
++{
++
++ DEBUG_FUNC();
++
++ if (homeaddr == NULL)
++ return -1;
++ spin_lock_bh(&router_lock);
++ if (curr_router == NULL || mipv6_mn_is_at_home(homeaddr) ||
++ mipv6_prefix_compare(homeaddr, &curr_router->raddr, 64) ||
++ curr_router->flags&COA_TENTATIVE) {
++ DEBUG(DBG_INFO,
++ "mipv6_get_care_of_address: returning home address");
++ ipv6_addr_copy(coaddr, homeaddr);
++ spin_unlock_bh(&router_lock);
++ return 0;
++
++ }
++
++ /* At home or address check failure probably due to dad wait */
++ if (mipv6_prefix_compare(&curr_router->raddr, homeaddr,
++ curr_router->pfix_len)
++ || (dad == RESPECT_DAD &&
++ (ipv6_chk_addr(coaddr, NULL) == 0))) {
++ ipv6_addr_copy(coaddr, homeaddr);
++ } else {
++ ipv6_addr_copy(coaddr, &curr_router->CoA);
++ }
++
++ spin_unlock_bh(&router_lock);
++ return 0;
++}
++
++int mipv6_mdet_del_if(int ifindex)
++{
++ struct router *curr = NULL;
++ struct list_head *lh, *lh_tmp;
++
++ spin_lock_bh(&router_lock);
++ list_for_each_safe(lh, lh_tmp, &rtr_list) {
++ curr = list_entry(lh, struct router, list);
++ if (curr->ifindex == ifindex) {
++ num_routers--;
++ list_del_init(&curr->list);
++ DEBUG(DBG_DATADUMP, "Deleting router %x:%x:%x:%x:%x:%x:%x:%x on interface %d",
++ NIPV6ADDR(&curr->raddr), ifindex);
++ if (curr_router == curr)
++ curr_router = NULL;
++ kfree(curr);
++ }
++ }
++ spin_unlock_bh(&router_lock);
++ return 0;
++}
++
++void mipv6_mdet_retrigger_ho(void)
++{
++ struct handoff ho;
++
++ spin_lock_bh(&router_lock);
++ if (curr_router != NULL) {
++ ho.coa = &curr_router->CoA;
++ ho.plen = curr_router->pfix_len;
++ ho.ifindex = curr_router->ifindex;
++ ipv6_addr_copy(&ho.rtr_addr, &curr_router->raddr);
++ ho.home_address = (curr_router->glob_addr &&
++ curr_router->flags&ND_RA_FLAG_HA);
++ }
++ spin_unlock_bh(&router_lock);
++ mipv6_mobile_node_moved(&ho);
++}
++
++void mipv6_mdet_set_curr_rtr_reachable(int reachable)
++{
++ spin_lock_bh(&router_lock);
++ if (curr_router != NULL) {
++ curr_router->reachable = reachable;
++ }
++ spin_unlock_bh(&router_lock);
++
++}
++
++int mipv6_mdet_finalize_ho(const struct in6_addr *coa, const int ifindex)
++{
++ int dummy;
++ struct handoff ho;
++ struct router *tmp;
++ struct net_device *dev;
++ struct in6_addr ll_addr;
++
++ spin_lock_bh(&router_lock);
++
++ if (!next_router) {
++ spin_unlock_bh(&router_lock);
++ return 0;
++ }
++
++ dev = dev_get_by_index(next_router->ifindex);
++
++ if (ipv6_get_lladdr(dev, &ll_addr) == 0) {
++ if (ipv6_addr_cmp(&ll_addr, coa) == 0)
++ DEBUG(DBG_INFO, "DAD for link local address completed");
++ next_router->flags &= ~LLADDR_TENTATIVE;
++ }
++
++ dev_put(dev);
++
++ if (mipv6_prefix_compare(coa, &next_router->CoA,
++ next_router->pfix_len)) {
++ DEBUG(DBG_INFO, "DAD for Care-of address completed");
++ next_router->flags &= ~COA_TENTATIVE;
++ }
++ if (!(next_router->flags&LLADDR_TENTATIVE) && !(next_router->flags&COA_TENTATIVE)) {
++ DEBUG(DBG_INFO, "%s: Proceeding with handoff after DAD\n", __FUNCTION__);
++ tmp = curr_router;
++ curr_router = next_router;
++ curr_router->is_current = 1;
++ next_router = NULL;
++ curr_router->flags &= ~COA_TENTATIVE;
++ delete_routes(curr_router);
++ delete_coas(curr_router);
++ if (tmp) {
++ struct net_device *dev_old = dev_get_by_index(tmp->ifindex);
++ struct rt6_info *rt = NULL;
++ if (dev_old) {
++ rt = rt6_get_dflt_router(&tmp->ll_addr, dev_old);
++ dev_put(dev_old);
++ }
++ if (rt)
++ ip6_del_rt(rt, NULL);
++ tmp->is_current = 0;
++ }
++
++ ma_ctl_upd_iface(curr_router->ifindex, MA_IFACE_CURRENT, &dummy);
++ ma_ctl_upd_iface(curr_router->ifindex, MA_IFACE_CURRENT, &dummy);
++
++
++ ho.coa = &curr_router->CoA;
++ ho.plen = curr_router->pfix_len;
++ ho.ifindex = curr_router->ifindex;
++ ipv6_addr_copy(&ho.rtr_addr, &curr_router->raddr);
++ ho.home_address = (curr_router->glob_addr &&
++ curr_router->flags&ND_RA_FLAG_HA);
++
++ spin_unlock_bh(&router_lock);
++ mipv6_mobile_node_moved(&ho);
++ } else
++ spin_unlock_bh(&router_lock);
++ return 0;
++}
++/* Decides whether router candidate is the same router as current rtr
++ * based on prefix / global addresses of the routers and their link local
++ * addresses
++ */
++static int is_current_rtr(struct router *nrt, struct router *crt)
++{
++ DEBUG_FUNC();
++
++ DEBUG(DEBUG_MDETECT, "Current router: "
++ "%x:%x:%x:%x:%x:%x:%x:%x and", NIPV6ADDR(&crt->raddr));
++ DEBUG(DEBUG_MDETECT, "Candidate router: "
++ "%x:%x:%x:%x:%x:%x:%x:%x", NIPV6ADDR(&nrt->raddr));
++
++ return (!ipv6_addr_cmp(&nrt->raddr,&crt->raddr) &&
++ !ipv6_addr_cmp(&nrt->ll_addr, &crt->ll_addr));
++}
++
++/*
++ * Change next router to nrtr
++ * Returns 1, if router has been changed.
++ */
++
++static int change_next_rtr(struct router *nrtr, struct router *ortr)
++{
++ int changed = 0;
++ DEBUG_FUNC();
++
++ if (!next_router || ipv6_addr_cmp(&nrtr->raddr, &next_router->raddr)) {
++ changed = 1;
++ }
++ next_router = nrtr;
++ return changed;
++}
++static int clean_ncache(struct router *nrt, struct router *ort, int same_if)
++{
++ struct net_device *ortdev;
++ DEBUG_FUNC();
++
++ /* Always call ifdown after a handoff to ensure proper routing */
++
++ if (!ort)
++ return 0;
++ if ((ortdev = dev_get_by_index(ort->ifindex)) == NULL) {
++ DEBUG(DBG_WARNING, "Device is not present");
++ return -1;
++ }
++ neigh_ifdown(&nd_tbl, ortdev);
++ dev_put(ortdev);
++ return 0;
++}
++
++static int mdet_get_if_preference(int ifi)
++{
++ int pref = 0;
++
++ DEBUG_FUNC();
++
++ pref = ma_ctl_get_preference(ifi);
++
++ DEBUG(DEBUG_MDETECT, "ifi: %d preference %d", ifi, pref);
++
++ return pref;
++}
++
++/*
++ * Called from mipv6_mn_ra_rcv to determine whether to do a handoff.
++ */
++static int mipv6_router_event(struct router *rptr)
++{
++ struct router *nrt = NULL;
++ int new_router = 0, same_if = 1;
++ int oldstate = _curr_state;
++ int addrtype = ipv6_addr_type(&rptr->raddr);
++
++ DEBUG_FUNC();
++
++ if (rptr->lifetime == 0)
++ return MIPV6_IGN_RTR;
++ DEBUG(DEBUG_MDETECT, "Received a RA from router: "
++ "%x:%x:%x:%x:%x:%x:%x:%x", NIPV6ADDR(&rptr->raddr));
++ spin_lock(&router_lock);
++
++ /* Add or update router entry */
++ if ((nrt = mipv6_rtr_get(&rptr->raddr)) == NULL) {
++ if (addrtype == IPV6_ADDR_ANY || (nrt = mipv6_rtr_add(rptr)) == NULL) {
++ spin_unlock(&router_lock);
++ return MIPV6_IGN_RTR;
++ }
++ DEBUG(DBG_INFO, "Router not on list,adding it to the list");
++ new_router = 1;
++ }
++ nrt->last_ra_rcvd = jiffies;
++ nrt->state = ROUTER_REACHABLE;
++ nrt->interval = rptr->interval;
++ nrt->lifetime = rptr->lifetime;
++ nrt->ifindex = rptr->ifindex;
++ nrt->flags = rptr->flags;
++ nrt->glob_addr = rptr->glob_addr;
++
++ /* Whether from current router */
++ if (curr_router && curr_router->reachable &&
++ is_current_rtr(nrt, curr_router)) {
++ if (nrt->interval)
++ mod_timer(&r_timer, jiffies + (nrt->interval * HZ)/1000);
++ else
++ mod_timer(&r_timer, jiffies + max_rtr_reach_time * HZ);
++ mdet_statemachine(RA_RCVD);
++ spin_unlock(&router_lock);
++ return MIPV6_ADD_RTR;
++ } else if (oldstate == NO_RTR) {
++ rt6_purge_dflt_routers(0); /* For multiple interface case */
++ DEBUG(DBG_INFO, "No router or router not reachable, switching to new one");
++ goto handoff;
++ }
++ if (!curr_router) {
++ /* Startup */
++ goto handoff;
++ }
++ /* Router behind same interface as current one ?*/
++ same_if = (nrt->ifindex == curr_router->ifindex);
++ /* Switch to new router behind same interface if eager cell
++ * switching is used or if the interface is preferred
++ */
++ if ((new_router && eager_cell_switching && same_if) ||
++ (mdet_get_if_preference(nrt->ifindex) >
++ mdet_get_if_preference(curr_router->ifindex))) {
++ DEBUG(DBG_INFO, "Switching to new router.");
++ goto handoff;
++ }
++
++ /* No handoff, don't add default route */
++ DEBUG(DEBUG_MDETECT, "Ignoring RA");
++ spin_unlock(&router_lock);
++ return MIPV6_IGN_RTR;
++handoff:
++ clean_ncache(nrt, curr_router, same_if);
++ nrt->reachable = 1;
++ if (same_if && change_next_rtr(nrt, curr_router)) {
++ mipv6_do_ll_dad(nrt->ifindex);
++ nrt->flags |= LLADDR_TENTATIVE;
++ }
++ spin_unlock(&router_lock);
++
++ return MIPV6_CHG_RTR;
++}
++
++/*
++ * Called from ndisc.c's router_discovery.
++ */
++
++static inline int ret_to_ha(struct in6_addr *addr)
++{
++ int res = 0;
++ struct mn_info *minfo;
++ read_lock(&mn_info_lock);
++ minfo = mipv6_mninfo_get_by_ha(addr);
++ if (minfo != NULL) {
++ spin_lock(&minfo->lock);
++ if (minfo->has_home_reg) {
++ res = 1;
++ }
++ spin_unlock(&minfo->lock);
++ }
++ read_unlock(&mn_info_lock);
++ return res;
++}
++
++static int mipv6_mn_ra_rcv(struct sk_buff *skb, struct ndisc_options *ndopts)
++{
++ int ifi = ((struct inet6_skb_parm *)skb->cb)->iif;
++ struct ra_msg *ra = (struct ra_msg *) skb->h.raw;
++ struct in6_addr *saddr = &skb->nh.ipv6h->saddr;
++ struct router nrt;
++ struct in6_addr *ha = NULL;
++ u8 *lladdr = NULL;
++ int res;
++ DEBUG_FUNC();
++
++ memset(&nrt, 0, sizeof(struct router));
++
++ if (ra->icmph.icmp6_home_agent) {
++ nrt.flags |= ND_RA_FLAG_HA;
++ DEBUG(DBG_DATADUMP, "RA has ND_RA_FLAG_HA up");
++ }
++
++ if (ra->icmph.icmp6_addrconf_managed) {
++ nrt.flags |= ND_RA_FLAG_MANAGED;
++ DEBUG(DBG_DATADUMP, "RA has ND_RA_FLAG_MANAGED up");
++ }
++
++ if (ra->icmph.icmp6_addrconf_other) {
++ nrt.flags |= ND_RA_FLAG_OTHER;
++ DEBUG(DBG_DATADUMP, "RA has ND_RA_FLAG_OTHER up");
++ }
++
++ ipv6_addr_copy(&nrt.ll_addr, saddr);
++ nrt.ifindex = ifi;
++ nrt.lifetime = ntohs(ra->icmph.icmp6_rt_lifetime);
++
++ if (ndopts->nd_opts_src_lladdr) {
++ lladdr = (u8 *) ndopts->nd_opts_src_lladdr+2;
++ nrt.link_addr_len = skb->dev->addr_len;
++ memcpy(nrt.link_addr, lladdr, nrt.link_addr_len);
++ }
++ if (ndopts->nd_opts_pi) {
++ struct nd_opt_hdr *p;
++ for (p = ndopts->nd_opts_pi;
++ p;
++ p = ndisc_next_option(p, ndopts->nd_opts_pi_end)) {
++ struct prefix_info *pinfo;
++ int update = 0;
++
++ pinfo = (struct prefix_info *) p;
++
++ if (!pinfo->autoconf)
++ continue;
++
++ if ((pinfo->router_address &&
++ (update = ret_to_ha(&pinfo->prefix))) ||
++ ipv6_addr_type(&nrt.raddr) != IPV6_ADDR_UNICAST) {
++ ipv6_addr_copy(&nrt.raddr, &pinfo->prefix);
++ nrt.pfix_len = pinfo->prefix_len;
++ if (pinfo->router_address)
++ nrt.glob_addr = 1;
++ else
++ nrt.glob_addr = 0;
++ if (update)
++ ha = &pinfo->prefix;
++ DEBUG(DBG_DATADUMP, "Address of the received "
++ "prefix info option: %x:%x:%x:%x:%x:%x:%x:%x",
++ NIPV6ADDR(&nrt.raddr));
++ DEBUG(DBG_DATADUMP, "the length of the prefix is %d",
++ nrt.pfix_len);
++ }
++ }
++ }
++ if (ndopts->nd_opts_rai) {
++ nrt.interval = ntohl(*(__u32 *)(ndopts->nd_opts_rai+4));
++ DEBUG(DBG_DATADUMP,
++ "received router interval option with interval : %d ",
++ nrt.interval / HZ);
++
++ if (nrt.interval > MAX_RADV_INTERVAL) {
++ nrt.interval = 0;
++ DEBUG(DBG_DATADUMP, "but we are using: %d, "
++ "because interval>MAX_RADV_INTERVAL",
++ nrt.interval / HZ);
++ }
++ }
++
++ res = mipv6_router_event(&nrt);
++
++ if (ha && lladdr) {
++ mipv6_mn_ha_nd_update(__dev_get_by_index(ifi), ha, lladdr);
++ }
++ return res;
++}
++
++int __init mipv6_initialize_mdetect(void)
++{
++
++ DEBUG_FUNC();
++
++ spin_lock_init(&router_lock);
++ spin_lock_init(&ho_lock);
++ init_timer(&coa_timer);
++ init_timer(&r_timer);
++ r_timer.expires = jiffies + HZ;
++ add_timer(&r_timer);
++
++ /* Actual HO, also deletes old routes after the addition of new ones
++ in ndisc */
++ MIPV6_SETCALL(mipv6_change_router, mipv6_change_router);
++
++ MIPV6_SETCALL(mipv6_ra_rcv, mipv6_mn_ra_rcv);
++
++ return 0;
++}
++
++int __exit mipv6_shutdown_mdetect()
++{
++
++ DEBUG_FUNC();
++
++ MIPV6_RESETCALL(mipv6_ra_rcv);
++ MIPV6_RESETCALL(mipv6_change_router);
++ spin_lock_bh(&router_lock);
++ spin_lock(&ho_lock);
++ del_timer(&coa_timer);
++ del_timer(&r_timer);
++ /* Free the memory allocated by router list */
++ list_free(&curr_router);
++ if (_ho)
++ kfree(_ho);
++ spin_unlock(&ho_lock);
++ spin_unlock_bh(&router_lock);
++ return 0;
++}
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/mdetect.h linux-2.4.25/net/ipv6/mobile_ip6/mdetect.h
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/mdetect.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/mdetect.h 2004-06-26 11:29:31.000000000 +0100
+@@ -0,0 +1,37 @@
++/*
++ * MIPL Mobile IPv6 Movement detection module header file
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#ifndef _MDETECT_H
++#define _MDETECT_H
++
++struct handoff {
++ int home_address; /* Is the coa a home address */
++ int ifindex;
++ int plen;
++ struct in6_addr *coa;
++ struct in6_addr rtr_addr; /* Prefix or rtr address if coa is home address */
++};
++
++int mipv6_initialize_mdetect(void);
++
++int mipv6_shutdown_mdetect(void);
++
++int mipv6_get_care_of_address(struct in6_addr *homeaddr, struct in6_addr *coa);
++
++int mipv6_mdet_del_if(int ifindex);
++
++int mipv6_mdet_finalize_ho(const struct in6_addr *coa, const int ifindex);
++
++void mipv6_mdet_retrigger_ho(void);
++
++void mipv6_mdet_set_curr_rtr_reachable(int reachable);
++
++#endif /* _MDETECT_H */
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/mipv6_icmp.c linux-2.4.25/net/ipv6/mobile_ip6/mipv6_icmp.c
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/mipv6_icmp.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/mipv6_icmp.c 2004-06-26 11:29:31.000000000 +0100
+@@ -0,0 +1,342 @@
++/**
++ * Generic icmp routines
++ *
++ * Authors:
++ * Jaakko Laine <medved@iki.fi>,
++ * Ville Nuorvala <vnuorval@tcs.hut.fi>
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#include <linux/config.h>
++#include <linux/icmpv6.h>
++#include <net/checksum.h>
++#include <net/ipv6.h>
++#include <net/ip6_route.h>
++#include <net/mipv6.h>
++#include <net/mipglue.h>
++
++#include "debug.h"
++#include "bcache.h"
++#include "mipv6_icmp.h"
++#include "config.h"
++
++struct mipv6_icmpv6_msg {
++ struct icmp6hdr icmph;
++ __u8 *data;
++ struct in6_addr *daddr;
++ int len;
++ __u32 csum;
++};
++
++#define MIPV6_ICMP_HOP_LIMIT 64
++
++static struct socket *mipv6_icmpv6_socket = NULL;
++static __u16 identifier = 0;
++
++int mipv6_icmpv6_no_rcv(struct sk_buff *skb)
++{
++ return 0;
++}
++
++static int mipv6_icmpv6_xmit_holder = -1;
++
++static int mipv6_icmpv6_xmit_lock_bh(void)
++{
++ if (!spin_trylock(&mipv6_icmpv6_socket->sk->lock.slock)) {
++ if (mipv6_icmpv6_xmit_holder == smp_processor_id())
++ return -EAGAIN;
++ spin_lock(&mipv6_icmpv6_socket->sk->lock.slock);
++ }
++ mipv6_icmpv6_xmit_holder = smp_processor_id();
++ return 0;
++}
++
++static __inline__ int mipv6_icmpv6_xmit_lock(void)
++{
++ int ret;
++ local_bh_disable();
++ ret = mipv6_icmpv6_xmit_lock_bh();
++ if (ret)
++ local_bh_enable();
++ return ret;
++}
++
++static void mipv6_icmpv6_xmit_unlock_bh(void)
++{
++ mipv6_icmpv6_xmit_holder = -1;
++ spin_unlock(&mipv6_icmpv6_socket->sk->lock.slock);
++}
++
++static __inline__ void mipv6_icmpv6_xmit_unlock(void)
++{
++ mipv6_icmpv6_xmit_unlock_bh();
++ local_bh_enable();
++}
++
++
++/**
++ * mipv6_icmpv6_dest_unreach - Destination Unreachable ICMP error message handler
++ * @skb: buffer containing ICMP error message
++ *
++ * Special Mobile IPv6 ICMP handling. If Correspondent Node receives
++ * persistent ICMP Destination Unreachable messages for a destination
++ * in its Binding Cache, the binding should be deleted. See draft
++ * section 8.8.
++ **/
++static int mipv6_icmpv6_rcv_dest_unreach(struct sk_buff *skb)
++{
++ struct icmp6hdr *icmph = (struct icmp6hdr *) skb->h.raw;
++ struct ipv6hdr *ipv6h = (struct ipv6hdr *) (icmph + 1);
++ int left = (skb->tail - skb->h.raw) - sizeof(*icmph)- sizeof(ipv6h);
++ struct ipv6_opt_hdr *eh;
++ struct rt2_hdr *rt2h = NULL;
++ struct in6_addr *daddr = &ipv6h->daddr;
++ struct in6_addr *saddr = &ipv6h->saddr;
++ int hdrlen, nexthdr = ipv6h->nexthdr;
++ struct mipv6_bce bce;
++ DEBUG_FUNC();
++
++ eh = (struct ipv6_opt_hdr *) (ipv6h + 1);
++
++ while (left > 0) {
++ if (nexthdr != NEXTHDR_HOP && nexthdr != NEXTHDR_DEST &&
++ nexthdr != NEXTHDR_ROUTING)
++ return 0;
++
++ hdrlen = ipv6_optlen(eh);
++ if (hdrlen > left)
++ return 0;
++
++ if (nexthdr == NEXTHDR_ROUTING) {
++ struct ipv6_rt_hdr *rth = (struct ipv6_rt_hdr *) eh;
++
++ if (rth->type == IPV6_SRCRT_TYPE_2) {
++ if (hdrlen != sizeof(struct rt2_hdr))
++ return 0;
++
++ rt2h = (struct rt2_hdr *) rth;
++
++ if (rt2h->rt_hdr.segments_left > 0)
++ daddr = &rt2h->addr;
++ break;
++ }
++ }
++ /* check for home address option in case this node is a MN */
++ if (nexthdr == NEXTHDR_DEST) {
++ __u8 *raw = (__u8 *) eh;
++ __u16 i = 2;
++ while (1) {
++ struct mipv6_dstopt_homeaddr *hao;
++
++ if (i + sizeof (*hao) > hdrlen)
++ break;
++
++ hao = (struct mipv6_dstopt_homeaddr *) &raw[i];
++
++ if (hao->type == MIPV6_TLV_HOMEADDR &&
++ hao->length == sizeof(struct in6_addr)) {
++ saddr = &hao->addr;
++ break;
++ }
++ if (hao->type)
++ i += hao->length + 2;
++ else
++ i++;
++ }
++
++ }
++ nexthdr = eh->nexthdr;
++ eh = (struct ipv6_opt_hdr *) ((u8 *) eh + hdrlen);
++ left -= hdrlen;
++ }
++ if (rt2h == NULL) return 0;
++
++ if (mipv6_bcache_get(daddr, saddr, &bce) == 0 && !(bce.flags&HOME_REGISTRATION)) {
++ /* A primitive algorithm for detecting persistent ICMP destination unreachable messages */
++ if (bce.destunr_count &&
++ time_after(jiffies,
++ bce.last_destunr + MIPV6_DEST_UNR_IVAL*HZ))
++ bce.destunr_count = 0;
++
++ bce.destunr_count++;
++
++ mipv6_bcache_icmp_err(daddr, saddr, bce.destunr_count);
++
++ if (bce.destunr_count > MIPV6_MAX_DESTUNREACH && mipv6_bcache_delete(daddr, saddr, CACHE_ENTRY) == 0) {
++ DEBUG(DBG_INFO, "Deleted bcache entry "
++ "%x:%x:%x:%x:%x:%x:%x:%x "
++ "%x:%x:%x:%x:%x:%x:%x:%x (reason: "
++ "%d dest unreachables) ",
++ NIPV6ADDR(daddr), NIPV6ADDR(saddr), bce.destunr_count);
++ }
++ }
++ return 0;
++}
++
++static int mipv6_icmpv6_getfrag(const void *data, struct in6_addr *saddr,
++ char *buff, unsigned int offset,
++ unsigned int len)
++{
++ struct mipv6_icmpv6_msg *msg = (struct mipv6_icmpv6_msg *) data;
++ struct icmp6hdr *icmph;
++ __u32 csum;
++
++ if (offset) {
++ msg->csum = csum_partial_copy_nocheck(msg->data + offset -
++ sizeof(*icmph), buff,
++ len, msg->csum);
++ return 0;
++ }
++
++ csum = csum_partial_copy_nocheck((__u8 *) &msg->icmph, buff,
++ sizeof(*icmph), msg->csum);
++
++ csum = csum_partial_copy_nocheck(msg->data, buff + sizeof(*icmph),
++ len - sizeof(*icmph), csum);
++
++ icmph = (struct icmp6hdr *) buff;
++
++ icmph->icmp6_cksum = csum_ipv6_magic(saddr, msg->daddr, msg->len,
++ IPPROTO_ICMPV6, csum);
++ return 0;
++}
++
++/**
++ * mipv6_icmpv6_send - generic icmpv6 message send
++ * @daddr: destination address
++ * @saddr: source address
++ * @type: icmp type
++ * @code: icmp code
++ * @id: packet identifier. If null, uses internal counter to get new id
++ * @data: packet data
++ * @datalen: length of data in bytes
++ */
++void mipv6_icmpv6_send(struct in6_addr *daddr, struct in6_addr *saddr, int type,
++ int code, __u16 *id, __u16 flags, void *data, int datalen)
++{
++ struct sock *sk = mipv6_icmpv6_socket->sk;
++ struct flowi fl;
++ struct mipv6_icmpv6_msg msg;
++
++ DEBUG_FUNC();
++
++ fl.proto = IPPROTO_ICMPV6;
++ fl.fl6_dst = daddr;
++ fl.fl6_src = saddr;
++ fl.fl6_flowlabel = 0;
++ fl.uli_u.icmpt.type = type;
++ fl.uli_u.icmpt.code = code;
++
++ msg.icmph.icmp6_type = type;
++ msg.icmph.icmp6_code = code;
++ msg.icmph.icmp6_cksum = 0;
++
++ if (id)
++ msg.icmph.icmp6_identifier = htons(*id);
++ else
++ msg.icmph.icmp6_identifier = htons(identifier++);
++
++ msg.icmph.icmp6_sequence = htons(flags);
++ msg.data = data;
++ msg.csum = 0;
++ msg.len = datalen + sizeof(struct icmp6hdr);
++ msg.daddr = daddr;
++
++ if (mipv6_icmpv6_xmit_lock())
++ return;
++
++ ip6_build_xmit(sk, mipv6_icmpv6_getfrag, &msg, &fl, msg.len, NULL, -1,
++ MSG_DONTWAIT);
++
++ ICMP6_INC_STATS_BH(Icmp6OutMsgs);
++ mipv6_icmpv6_xmit_unlock();
++}
++
++/**
++ * icmp6_rcv - ICMPv6 receive and multiplex
++ * @skb: buffer containing ICMP message
++ *
++ * Generic ICMPv6 receive function to multiplex messages to approriate
++ * handlers. Only used for ICMP messages with special handling in
++ * Mobile IPv6.
++ **/
++static void icmp6_rcv(struct sk_buff *skb)
++{
++ struct icmp6hdr *hdr;
++
++ if (skb_is_nonlinear(skb) &&
++ skb_linearize(skb, GFP_ATOMIC) != 0) {
++ kfree_skb(skb);
++ return;
++ }
++ __skb_push(skb, skb->data-skb->h.raw);
++
++ hdr = (struct icmp6hdr *) skb->h.raw;
++
++ switch (hdr->icmp6_type) {
++ case ICMPV6_DEST_UNREACH:
++ mipv6_icmpv6_rcv_dest_unreach(skb);
++ break;
++
++ case ICMPV6_PARAMPROB:
++ mip6_fn.icmpv6_paramprob_rcv(skb);
++ break;
++
++ case MIPV6_DHAAD_REPLY:
++ mip6_fn.icmpv6_dhaad_rep_rcv(skb);
++ break;
++
++ case MIPV6_PREFIX_ADV:
++ mip6_fn.icmpv6_pfxadv_rcv(skb);
++ break;
++
++ case MIPV6_DHAAD_REQUEST:
++ mip6_fn.icmpv6_dhaad_req_rcv(skb);
++ break;
++
++ case MIPV6_PREFIX_SOLICIT:
++ mip6_fn.icmpv6_pfxsol_rcv(skb);
++ break;
++ }
++}
++
++int mipv6_icmpv6_init(void)
++{
++ struct sock *sk;
++ int err;
++
++ if ((mipv6_icmpv6_socket = sock_alloc()) == NULL) {
++ DEBUG(DBG_ERROR, "Cannot allocate mipv6_icmpv6_socket");
++ return -1;
++ }
++ mipv6_icmpv6_socket->type = SOCK_RAW;
++
++ if ((err = sock_create(PF_INET6, SOCK_RAW, IPPROTO_ICMP,
++ &mipv6_icmpv6_socket)) < 0) {
++ DEBUG(DBG_ERROR, "Cannot initialize mipv6_icmpv6_socket");
++ sock_release(mipv6_icmpv6_socket);
++ mipv6_icmpv6_socket = NULL; /* For safety */
++ return err;
++ }
++ sk = mipv6_icmpv6_socket->sk;
++ sk->allocation = GFP_ATOMIC;
++ sk->prot->unhash(sk);
++
++ /* Register our ICMP handler */
++ MIPV6_SETCALL(mipv6_icmp_rcv, icmp6_rcv);
++ return 0;
++}
++
++void mipv6_icmpv6_exit(void)
++{
++ MIPV6_RESETCALL(mipv6_icmp_rcv);
++ if (mipv6_icmpv6_socket)
++ sock_release(mipv6_icmpv6_socket);
++ mipv6_icmpv6_socket = NULL; /* For safety */
++}
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/mipv6_icmp.h linux-2.4.25/net/ipv6/mobile_ip6/mipv6_icmp.h
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/mipv6_icmp.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/mipv6_icmp.h 2004-06-26 11:29:31.000000000 +0100
+@@ -0,0 +1,43 @@
++/*
++ * MIPL Mobile IPv6 ICMP send and receive prototypes
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#ifndef _MIPV6_ICMP
++#define _MIPV6_ICMP
++
++#include <linux/config.h>
++#include <linux/in6.h>
++
++void mipv6_icmpv6_send(struct in6_addr *daddr, struct in6_addr *saddr,
++ int type, int code, __u16 *id, __u16 flags,
++ void *data, int datalen);
++
++void mipv6_icmpv6_send_dhaad_req(struct in6_addr *home_addr, int plen, __u16 dhaad_id);
++
++void mipv6_icmpv6_send_dhaad_rep(int ifindex, __u16 id, struct in6_addr *daddr);
++/* No handling */
++int mipv6_icmpv6_no_rcv(struct sk_buff *skb);
++
++/* Receive DHAAD Reply message */
++int mipv6_icmpv6_rcv_dhaad_rep(struct sk_buff *skb);
++/* Receive Parameter Problem message */
++int mipv6_icmpv6_rcv_paramprob(struct sk_buff *skb);
++/* Receive prefix advertisements */
++int mipv6_icmpv6_rcv_pfx_adv(struct sk_buff *skb);
++
++/* Receive DHAAD Request message */
++int mipv6_icmpv6_rcv_dhaad_req(struct sk_buff *skb);
++/* Receive prefix solicitations */
++int mipv6_icmpv6_rcv_pfx_sol(struct sk_buff *skb);
++
++int mipv6_icmpv6_init(void);
++void mipv6_icmpv6_exit(void);
++
++#endif
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/mipv6_icmp_ha.c linux-2.4.25/net/ipv6/mobile_ip6/mipv6_icmp_ha.c
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/mipv6_icmp_ha.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/mipv6_icmp_ha.c 2004-06-26 11:29:31.000000000 +0100
+@@ -0,0 +1,158 @@
++/*
++ * Home Agent specific ICMP routines
++ *
++ * Authors:
++ * Antti Tuominen <ajtuomin@tml.hut.fi>
++ * Jaakko Laine <medved@iki.fi>
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#include <linux/autoconf.h>
++#include <linux/sched.h>
++#include <net/ipv6.h>
++#include <net/addrconf.h>
++#include <net/ip6_route.h>
++#include <net/mipv6.h>
++
++#include "halist.h"
++#include "debug.h"
++#include "mipv6_icmp.h"
++//#include "prefix.h"
++
++/* Is this the easiest way of checking on
++ * which interface an anycast address is ?
++ */
++static int find_ac_dev(struct in6_addr *addr)
++{
++ int ifindex = 0;
++ struct net_device *dev;
++ read_lock(&dev_base_lock);
++ for (dev=dev_base; dev; dev=dev->next) {
++ if (ipv6_chk_acast_addr(dev, addr)) {
++ ifindex = dev->ifindex;
++ break;
++ }
++ }
++ read_unlock(&dev_base_lock);
++ return ifindex;
++}
++
++/**
++ * mipv6_icmpv6_send_dhaad_rep - Reply to DHAAD Request
++ * @ifindex: index of interface request was received from
++ * @id: request's identification number
++ * @daddr: requester's IPv6 address
++ *
++ * When Home Agent receives Dynamic Home Agent Address Discovery
++ * request, it replies with a list of home agents available on the
++ * home link.
++ */
++void mipv6_icmpv6_send_dhaad_rep(int ifindex, __u16 id, struct in6_addr *daddr)
++{
++ __u8 *data = NULL;
++ struct in6_addr home, *ha_addrs = NULL;
++ int addr_count, max_addrs, size = 0;
++
++ if (daddr == NULL)
++ return;
++
++ if (mipv6_ha_get_addr(ifindex, &home) < 0) {
++ DEBUG(DBG_INFO, "Not Home Agent in this interface");
++ return;
++ }
++
++ /* We send all available HA addresses, not exceeding a maximum
++ * number we can fit in a packet with minimum IPv6 MTU (to
++ * avoid fragmentation).
++ */
++ max_addrs = 76;
++ addr_count = mipv6_ha_get_pref_list(ifindex, &ha_addrs, max_addrs);
++
++ if (addr_count < 0) return;
++
++ if (addr_count != 0 && ha_addrs == NULL) {
++ DEBUG(DBG_ERROR, "addr_count = %d but return no addresses",
++ addr_count);
++ return;
++ }
++ data = (u8 *)ha_addrs;
++
++ size = addr_count * sizeof(struct in6_addr);
++
++ mipv6_icmpv6_send(daddr, &home, MIPV6_DHAAD_REPLY,
++ 0, &id, 0, data, size);
++ if (ha_addrs) {
++ data = NULL;
++ kfree(ha_addrs);
++ }
++}
++
++/**
++ * mipv6_icmpv6_dhaad_req - Home Agent Address Discovery Request ICMP handler
++ * @skb: buffer containing ICMP information message
++ *
++ * Special Mobile IPv6 ICMP message. Handles Dynamic Home Agent
++ * Address Discovery Request messages.
++ **/
++int mipv6_icmpv6_rcv_dhaad_req(struct sk_buff *skb)
++{
++ struct icmp6hdr *phdr = (struct icmp6hdr *) skb->h.raw;
++ struct in6_addr *saddr = &skb->nh.ipv6h->saddr;
++ struct in6_addr *daddr = &skb->nh.ipv6h->daddr;
++ __u16 identifier;
++ int ifindex = 0;
++
++ DEBUG_FUNC();
++
++ /* Invalid packet checks. */
++ if (phdr->icmp6_code != 0)
++ return 0;
++
++ identifier = ntohs(phdr->icmp6_identifier);
++
++ /*
++ * Make sure we have the right ifindex (if the
++ * req came through another interface.
++ */
++ ifindex = find_ac_dev(daddr);
++ if (ifindex == 0) {
++ DEBUG(DBG_WARNING, "received dhaad request to anycast address %x:%x:%x:%x:%x:%x:%x:%x"
++ " on which prefix we are not HA",
++ NIPV6ADDR(daddr));
++ return 0;
++ }
++
++ /*
++ * send reply with list
++ */
++ mipv6_icmpv6_send_dhaad_rep(ifindex, identifier, saddr);
++ return 1;
++}
++#if 0
++/**
++ * mipv6_icmpv6_handle_pfx_sol - handle prefix solicitations
++ * @skb: sk_buff including the icmp6 message
++ */
++int mipv6_icmpv6_rcv_pfx_sol(struct sk_buff *skb)
++{
++ struct in6_addr *saddr = &skb->nh.ipv6h->saddr;
++ struct in6_addr *daddr = &skb->nh.ipv6h->daddr;
++ struct inet6_ifaddr *ifp;
++
++ DEBUG_FUNC();
++
++ if (!(ifp = ipv6_get_ifaddr(daddr, NULL)))
++ return -1;
++
++ in6_ifa_put(ifp);
++ mipv6_pfx_cancel_send(saddr, -1);
++
++ return 0;
++}
++#endif
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/mipv6_icmp_mn.c linux-2.4.25/net/ipv6/mobile_ip6/mipv6_icmp_mn.c
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/mipv6_icmp_mn.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/mipv6_icmp_mn.c 2004-06-26 11:29:31.000000000 +0100
+@@ -0,0 +1,273 @@
++/*
++ * Mobile Node specific ICMP routines
++ *
++ * Authors:
++ * Antti Tuominen <ajtuomin@tml.hut.fi>
++ * Jaakko Laine <medved@iki.fi>
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#include <linux/sched.h>
++#include <net/ipv6.h>
++#include <net/ip6_route.h>
++#include <net/addrconf.h>
++#include <net/mipv6.h>
++
++#include "mn.h"
++#include "bul.h"
++#include "mdetect.h"
++#include "debug.h"
++#include "mipv6_icmp.h"
++#include "util.h"
++//#include "prefix.h"
++
++#define INFINITY 0xffffffff
++
++/**
++ * mipv6_icmpv6_paramprob - Parameter Problem ICMP error message handler
++ * @skb: buffer containing ICMP error message
++ *
++ * Special Mobile IPv6 ICMP handling. If Mobile Node receives ICMP
++ * Parameter Problem message when using a Home Address Option,
++ * offending node should be logged and error message dropped. If
++ * error is received because of a Binding Update, offending node
++ * should be recorded in Binding Update List and no more Binding
++ * Updates should be sent to this destination. See RFC 3775 section
++ * 10.15.
++ **/
++int mipv6_icmpv6_rcv_paramprob(struct sk_buff *skb)
++{
++ struct icmp6hdr *phdr = (struct icmp6hdr *) skb->h.raw;
++ struct in6_addr *saddr = skb ? &skb->nh.ipv6h->saddr : NULL;
++ struct in6_addr *daddr = skb ? &skb->nh.ipv6h->daddr : NULL;
++ struct ipv6hdr *hdr = (struct ipv6hdr *) (phdr + 1);
++ int ulen = (skb->tail - (unsigned char *) (phdr + 1));
++
++ int errptr;
++ __u8 *off_octet;
++
++ DEBUG_FUNC();
++
++ /* We only handle code 1 & 2 messages. */
++ if (phdr->icmp6_code != ICMPV6_UNK_NEXTHDR &&
++ phdr->icmp6_code != ICMPV6_UNK_OPTION)
++ return 0;
++
++ /* Find offending octet in the original packet. */
++ errptr = ntohl(phdr->icmp6_pointer);
++
++ /* There is not enough of the original packet left to figure
++ * out what went wrong. Bail out. */
++ if (ulen <= errptr)
++ return 0;
++
++ off_octet = ((__u8 *) hdr + errptr);
++ DEBUG(DBG_INFO, "Parameter problem: offending octet %d [0x%2x]",
++ errptr, *off_octet);
++
++ /* If CN did not understand Mobility Header, set BUL entry to
++ * ACK_ERROR so no further BUs are sumbitted to this CN. */
++ if (phdr->icmp6_code == ICMPV6_UNK_NEXTHDR &&
++ *off_octet == IPPROTO_MOBILITY) {
++ struct bul_inval_args args;
++ args.all_rr_states = 1;
++ args.cn = saddr;
++ args.mn = daddr;
++ write_lock(&bul_lock);
++ mipv6_bul_iterate(mn_bul_invalidate, &args);
++ write_unlock(&bul_lock);
++ }
++
++ /* If CN did not understand Home Address Option, we log an
++ * error and discard the error message. */
++ if (phdr->icmp6_code == ICMPV6_UNK_OPTION &&
++ *off_octet == MIPV6_TLV_HOMEADDR) {
++ DEBUG(DBG_WARNING, "Correspondent node does not "
++ "implement Home Address Option receipt.");
++ return 1;
++ }
++ return 0;
++}
++
++/**
++ * mipv6_mn_dhaad_send_req - Send DHAAD Request to home network
++ * @home_addr: address to do DHAAD for
++ * @plen: prefix length for @home_addr
++ *
++ * Send Dynamic Home Agent Address Discovery Request to the Home
++ * Agents anycast address in the nodes home network.
++ **/
++void
++mipv6_icmpv6_send_dhaad_req(struct in6_addr *home_addr, int plen, __u16 dhaad_id)
++{
++ struct in6_addr ha_anycast;
++ struct in6_addr careofaddr;
++
++ if (mipv6_get_care_of_address(home_addr, &careofaddr) < 0) {
++ DEBUG(DBG_WARNING, "Could not get node's Care-of Address");
++ return;
++ }
++
++ if (mipv6_ha_anycast(&ha_anycast, home_addr, plen) < 0) {
++ DEBUG(DBG_WARNING,
++ "Could not get Home Agent Anycast address for home address %x:%x.%x:%x:%x:%x:%x:%x/%d",
++ NIPV6ADDR(home_addr), plen);
++ return;
++ }
++
++ mipv6_icmpv6_send(&ha_anycast, &careofaddr, MIPV6_DHAAD_REQUEST, 0,
++ &dhaad_id, 0, NULL, 0);
++
++}
++
++/**
++ * mipv6_icmpv6_dhaad_rep - Home Agent Address Discovery Reply ICMP handler
++ * @skb: buffer containing ICMP information message
++ *
++ * Special Mobile IPv6 ICMP message. Handles Dynamic Home Agent
++ * Address Discovery Reply messages.
++ **/
++int mipv6_icmpv6_rcv_dhaad_rep(struct sk_buff *skb)
++{
++ struct icmp6hdr *phdr = (struct icmp6hdr *) skb->h.raw;
++ struct in6_addr *address;
++ struct in6_addr *saddr = &skb->nh.ipv6h->saddr;
++ __u16 identifier;
++ int ulen = (skb->tail - (unsigned char *) ((__u32 *) phdr + 2));
++ int i;
++ struct in6_addr home_addr, coa;
++ struct in6_addr *first_ha = NULL;
++ struct mn_info *minfo;
++ int n_addr = ulen / sizeof(struct in6_addr);
++
++ DEBUG_FUNC();
++
++ /* Invalid packet checks. */
++ if (ulen % sizeof(struct in6_addr) != 0)
++ return 0;
++
++ if (phdr->icmp6_code != 0)
++ return 0;
++
++ identifier = ntohs(phdr->icmp6_identifier);
++ if (ulen > 0) {
++ address = (struct in6_addr *) ((__u32 *) phdr + 2);
++ } else {
++ address = saddr;
++ n_addr = 1;
++ }
++
++ /* receive list of home agent addresses
++ * add to home agents list
++ */
++ DEBUG(DBG_INFO, "DHAAD: got %d home agents", n_addr);
++
++ first_ha = address;
++
++ /* lookup H@ with identifier */
++ read_lock(&mn_info_lock);
++ minfo = mipv6_mninfo_get_by_id(identifier);
++ if (!minfo) {
++ read_unlock(&mn_info_lock);
++ DEBUG(DBG_INFO, "no mninfo with id %d",
++ identifier);
++ return 0;
++ }
++ spin_lock(&minfo->lock);
++
++ /* Logic:
++ * 1. if old HA on list, prefer it
++ * 2. otherwise first HA on list prefered
++ */
++ for (i = 0; i < n_addr; i++) {
++ DEBUG(DBG_INFO, "HA[%d] %x:%x:%x:%x:%x:%x:%x:%x",
++ i, NIPV6ADDR(address));
++ if (ipv6_addr_cmp(&minfo->ha, address) == 0) {
++ spin_unlock(&minfo->lock);
++ read_unlock(&mn_info_lock);
++ return 0;
++ }
++ address++;
++ }
++ ipv6_addr_copy(&minfo->ha, first_ha);
++ spin_unlock(&minfo->lock);
++ ipv6_addr_copy(&home_addr, &minfo->home_addr);
++ read_unlock(&mn_info_lock);
++
++ mipv6_get_care_of_address(&home_addr, &coa);
++ init_home_registration(&home_addr, &coa);
++
++ return 1;
++}
++#if 0
++/**
++ * mipv6_icmpv6_handle_pfx_adv - handle prefix advertisements
++ * @skb: sk_buff including the icmp6 message
++ */
++int mipv6_icmpv6_rcv_pfx_adv(struct sk_buff *skb)
++{
++ struct icmp6hdr *hdr = (struct icmp6hdr *) skb->h.raw;
++ struct in6_addr *saddr = &skb->nh.ipv6h->saddr;
++ struct in6_addr *daddr = &skb->nh.ipv6h->daddr;
++ __u8 *opt = (__u8 *) (hdr + 1);
++ int optlen = (skb->tail - opt);
++ unsigned long min_expire = INFINITY;
++ struct inet6_skb_parm *parm = (struct inet6_skb_parm *) skb->cb;
++
++ DEBUG_FUNC();
++
++ while (optlen > 0) {
++ int len = opt[1] << 3;
++ if (len == 0)
++ goto set_timer;
++
++ if (opt[0] == ND_OPT_PREFIX_INFO) {
++ int ifindex;
++ unsigned long expire;
++ struct prefix_info *pinfo =
++ (struct prefix_info *) opt;
++ struct net_device *dev;
++ struct mn_info *mninfo;
++
++ read_lock(&mn_info_lock);
++ mninfo = mipv6_mninfo_get_by_ha(saddr);
++ if (mninfo == NULL) {
++ ifindex = 0;
++ } else {
++ spin_lock(&mninfo->lock);
++ ifindex = mninfo->ifindex;
++ spin_unlock(&mninfo->lock);
++ mninfo = NULL;
++ }
++ read_unlock(&mn_info_lock);
++
++ if (!(dev = dev_get_by_index(ifindex))) {
++ DEBUG(DBG_WARNING, "Cannot find device by index %d", parm->iif);
++ goto nextopt;
++ }
++
++ expire = ntohl(pinfo->valid);
++ expire = expire == 0 ? INFINITY : expire;
++
++ min_expire = expire < min_expire ? expire : min_expire;
++
++ dev_put(dev);
++ }
++
++nextopt:
++ optlen -= len;
++ opt += len;
++ }
++
++set_timer:
++
++ mipv6_pfx_add_home(parm->iif, saddr, daddr, min_expire);
++ return 0;
++}
++#endif
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/mn.c linux-2.4.25/net/ipv6/mobile_ip6/mn.c
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/mn.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/mn.c 2004-06-26 11:29:31.000000000 +0100
+@@ -0,0 +1,1521 @@
++/*
++ * Mobile-node functionality
++ *
++ * Authors:
++ * Sami Kivisaari <skivisaa@cc.hut.fi>
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ *
++ */
++
++#include <linux/autoconf.h>
++#include <linux/sched.h>
++#include <linux/ipv6.h>
++#include <linux/net.h>
++#include <linux/init.h>
++#include <linux/skbuff.h>
++#include <linux/rtnetlink.h>
++#include <linux/if_arp.h>
++#include <linux/ipsec.h>
++#include <linux/notifier.h>
++#include <linux/list.h>
++#include <linux/route.h>
++#include <linux/netfilter.h>
++#include <linux/netfilter_ipv6.h>
++#include <linux/tqueue.h>
++#include <linux/proc_fs.h>
++
++#include <asm/uaccess.h>
++
++#include <net/ipv6.h>
++#include <net/addrconf.h>
++#include <net/neighbour.h>
++#include <net/ndisc.h>
++#include <net/ip6_route.h>
++#include <net/mipglue.h>
++
++#include "util.h"
++#include "mdetect.h"
++#include "bul.h"
++#include "mobhdr.h"
++#include "debug.h"
++#include "mn.h"
++#include "mipv6_icmp.h"
++#include "multiaccess_ctl.h"
++//#include "prefix.h"
++#include "tunnel_mn.h"
++#include "stats.h"
++#include "config.h"
++
++#define MIPV6_BUL_SIZE 128
++
++static LIST_HEAD(mn_info_list);
++
++/* Lock for list of MN infos */
++rwlock_t mn_info_lock = RW_LOCK_UNLOCKED;
++
++static spinlock_t ifrh_lock = SPIN_LOCK_UNLOCKED;
++
++struct ifr_holder {
++ struct list_head list;
++ struct in6_ifreq ifr;
++ int old_ifi;
++ struct handoff *ho;
++};
++
++LIST_HEAD(ifrh_list);
++
++static struct tq_struct mv_home_addr_task;
++
++/* Determines whether manually configured home addresses are preferred as
++ * source addresses over dynamically configured ones
++ */
++int mipv6_use_preconfigured_hoaddr = 1;
++
++/* Determines whether home addresses, which are at home are preferred as
++ * source addresses over other home addresses
++ */
++int mipv6_use_topol_corr_hoaddr = 0;
++
++static spinlock_t icmpv6_id_lock = SPIN_LOCK_UNLOCKED;
++static __u16 icmpv6_id = 0;
++
++static inline __u16 mipv6_get_dhaad_id(void)
++{
++ __u16 ret;
++ spin_lock_bh(&icmpv6_id_lock);
++ ret = ++icmpv6_id;
++ spin_unlock_bh(&icmpv6_id_lock);
++ return ret;
++}
++
++/**
++ * mipv6_mninfo_get_by_home - Returns mn_info for a home address
++ * @haddr: home address of MN
++ *
++ * Returns mn_info on success %NULL otherwise. Caller MUST hold
++ * @mn_info_lock (read or write).
++ **/
++struct mn_info *mipv6_mninfo_get_by_home(struct in6_addr *haddr)
++{
++ struct list_head *lh;
++ struct mn_info *minfo;
++
++ DEBUG_FUNC();
++
++ if (!haddr)
++ return NULL;
++
++ list_for_each(lh, &mn_info_list) {
++ minfo = list_entry(lh, struct mn_info, list);
++ spin_lock(&minfo->lock);
++ if (!ipv6_addr_cmp(&minfo->home_addr, haddr)) {
++ spin_unlock(&minfo->lock);
++ return minfo;
++ }
++ spin_unlock(&minfo->lock);
++ }
++ return NULL;
++}
++
++/**
++ * mipv6_mninfo_get_by_ha - Lookup mn_info with Home Agent address
++ * @home_agent: Home Agent address
++ *
++ * Searches for a mn_info entry with @ha set to @home_agent. You MUST
++ * hold @mn_info_lock when calling this function. Returns pointer to
++ * mn_info entry or %NULL on failure.
++ **/
++struct mn_info *mipv6_mninfo_get_by_ha(struct in6_addr *home_agent)
++{
++ struct list_head *lh;
++ struct mn_info *minfo;
++
++ if (!home_agent)
++ return NULL;
++
++ list_for_each(lh, &mn_info_list) {
++ minfo = list_entry(lh, struct mn_info, list);
++ spin_lock(&minfo->lock);
++ if (!ipv6_addr_cmp(&minfo->ha, home_agent)) {
++ spin_unlock(&minfo->lock);
++ return minfo;
++ }
++ spin_unlock(&minfo->lock);
++ }
++ return NULL;
++}
++
++/**
++ * mipv6_mninfo_get_by_id - Lookup mn_info with id
++ * @id: DHAAD identifier
++ *
++ * Searches for a mn_info entry with @dhaad_id set to @id. You MUST
++ * hold @mn_info_lock when calling this function. Returns pointer to
++ * mn_info entry or %NULL on failure.
++ **/
++struct mn_info *mipv6_mninfo_get_by_id(unsigned short id)
++{
++ struct list_head *lh;
++ struct mn_info *minfo = 0;
++
++ list_for_each(lh, &mn_info_list) {
++ minfo = list_entry(lh, struct mn_info, list);
++ spin_lock(&minfo->lock);
++ if (minfo->dhaad_id == id) {
++ spin_unlock(&minfo->lock);
++ return minfo;
++ }
++ spin_unlock(&minfo->lock);
++ }
++ return NULL;
++}
++
++/**
++ * mipv6_mninfo_add - Adds a new home info for MN
++ * @ifindex: Interface for home address
++ * @home_addr: Home address of MN, must be set
++ * @plen: prefix length of the home address, must be set
++ * @isathome : home address at home
++ * @lifetime: lifetime of the home address, 0 is infinite
++ * @ha: home agent for the home address
++ * @ha_plen: prefix length of home agent's address, can be zero
++ * @ha_lifetime: Lifetime of the home address, 0 is infinite
++ *
++ * The function adds a new home info entry for MN, allowing it to
++ * register the home address with the home agent. Starts home
++ * registration process. If @ha is %ADDRANY, DHAAD is performed to
++ * find a home agent. Returns 0 on success, a negative value
++ * otherwise. Caller MUST NOT hold @mn_info_lock or
++ * @addrconf_hash_lock.
++ **/
++void mipv6_mninfo_add(int ifindex, struct in6_addr *home_addr, int plen,
++ int isathome, unsigned long lifetime, struct in6_addr *ha,
++ int ha_plen, unsigned long ha_lifetime, int man_conf)
++{
++ struct mn_info *minfo;
++ struct in6_addr coa;
++
++ DEBUG_FUNC();
++
++ write_lock_bh(&mn_info_lock);
++ if ((minfo = mipv6_mninfo_get_by_home(home_addr)) != NULL){
++ DEBUG(1, "MN info already exists");
++ write_unlock_bh(&mn_info_lock);
++ return;
++ }
++ minfo = kmalloc(sizeof(struct mn_info), GFP_ATOMIC);
++ if (!minfo) {
++ write_unlock_bh(&mn_info_lock);
++ return;
++ }
++ memset(minfo, 0, sizeof(struct mn_info));
++ spin_lock_init(&minfo->lock);
++
++
++ ipv6_addr_copy(&minfo->home_addr, home_addr);
++
++ if (ha)
++ ipv6_addr_copy(&minfo->ha, ha);
++ if (ha_plen < 128 && ha_plen > 0)
++ minfo->home_plen = ha_plen;
++ else minfo->home_plen = 64;
++
++ minfo->ifindex_user = ifindex; /* Ifindex for tunnel interface */
++ minfo->ifindex = ifindex; /* Interface on which home address is currently conf'd */
++ /* TODO: we should get home address lifetime from somewhere */
++ /* minfo->home_addr_expires = jiffies + lifetime * HZ; */
++
++ /* manual configuration flag cannot be unset by dynamic updates
++ * from prefix advertisements
++ */
++ if (!minfo->man_conf) minfo->man_conf = man_conf;
++ minfo->is_at_home = isathome;
++
++ list_add(&minfo->list, &mn_info_list);
++ write_unlock_bh(&mn_info_lock);
++
++ if (mipv6_get_care_of_address(home_addr, &coa) == 0)
++ init_home_registration(home_addr, &coa);
++}
++
++/**
++ * mipv6_mninfo_del - Delete home info for MN
++ * @home_addr : Home address or prefix
++ * @del_dyn_only : Delete only dynamically created home entries
++ *
++ * Deletes every mn_info entry that matches the first plen bits of
++ * @home_addr. Returns number of deleted entries on success and a
++ * negative value otherwise. Caller MUST NOT hold @mn_info_lock.
++ **/
++int mipv6_mninfo_del(struct in6_addr *home_addr, int del_dyn_only)
++{
++ struct list_head *lh, *next;
++ struct mn_info *minfo;
++ int ret = -1;
++ if (!home_addr)
++ return -1;
++
++ write_lock(&mn_info_lock);
++
++ list_for_each_safe(lh, next, &mn_info_list) {
++ minfo = list_entry(lh, struct mn_info, list);
++ if (ipv6_addr_cmp(&minfo->home_addr, home_addr) == 0
++ && ((!minfo->man_conf && del_dyn_only) || !del_dyn_only)){
++ list_del(&minfo->list);
++ kfree(minfo);
++ ret++;
++ }
++ }
++ write_unlock(&mn_info_lock);
++ return ret;
++}
++
++void mipv6_mn_set_home(int ifindex, struct in6_addr *homeaddr, int plen,
++ struct in6_addr *homeagent, int ha_plen)
++{
++ mipv6_mninfo_add(ifindex, homeaddr, plen, 0, 0,
++ homeagent, ha_plen, 0, 1);
++}
++
++static int skip_dad(struct in6_addr *addr)
++{
++ struct mn_info *minfo;
++ int ret = 0;
++
++ if (addr == NULL) {
++ DEBUG(DBG_CRITICAL, "Null argument");
++ return 0;
++ }
++ read_lock_bh(&mn_info_lock);
++ if ((minfo = mipv6_mninfo_get_by_home(addr)) != NULL) {
++ if ((minfo->is_at_home != MN_NOT_AT_HOME) && (minfo->has_home_reg))
++ ret = 1;
++ DEBUG(DBG_INFO, "minfo->is_at_home = %d, minfo->has_home_reg = %d",
++ minfo->is_at_home, minfo->has_home_reg);
++ }
++ read_unlock_bh(&mn_info_lock);
++
++ return ret;
++}
++/**
++ * mipv6_mn_is_home_addr - Determines if addr is node's home address
++ * @addr: IPv6 address
++ *
++ * Returns 1 if addr is node's home address. Otherwise returns zero.
++ **/
++int mipv6_mn_is_home_addr(struct in6_addr *addr)
++{
++ int ret = 0;
++
++ if (addr == NULL) {
++ DEBUG(DBG_CRITICAL, "Null argument");
++ return -1;
++ }
++ read_lock_bh(&mn_info_lock);
++ if (mipv6_mninfo_get_by_home(addr))
++ ret = 1;
++ read_unlock_bh(&mn_info_lock);
++
++ return (ret);
++}
++
++/**
++ * mipv6_mn_is_at_home - determine if node is home for a home address
++ * @home_addr : home address of MN
++ *
++ * Returns 1 if home address in question is in the home network, 0
++ * otherwise. Caller MUST NOT not hold @mn_info_lock.
++ **/
++int mipv6_mn_is_at_home(struct in6_addr *home_addr)
++{
++ struct mn_info *minfo;
++ int ret = 0;
++ read_lock_bh(&mn_info_lock);
++ if ((minfo = mipv6_mninfo_get_by_home(home_addr)) != NULL) {
++ spin_lock(&minfo->lock);
++ ret = (minfo->is_at_home == MN_AT_HOME);
++ spin_unlock(&minfo->lock);
++ }
++ read_unlock_bh(&mn_info_lock);
++ return ret;
++}
++void mipv6_mn_set_home_reg(struct in6_addr *home_addr, int has_home_reg)
++{
++ struct mn_info *minfo;
++ read_lock_bh(&mn_info_lock);
++
++ if ((minfo = mipv6_mninfo_get_by_home(home_addr)) != NULL) {
++ spin_lock(&minfo->lock);
++ minfo->has_home_reg = has_home_reg;
++ spin_unlock(&minfo->lock);
++ }
++ read_unlock_bh(&mn_info_lock);
++}
++
++static int mn_inet6addr_event(
++ struct notifier_block *nb, unsigned long event, void *ptr)
++{
++ struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)ptr;
++
++ switch (event) {
++ case NETDEV_UP:
++ /* Is address a valid coa ?*/
++ if (!(ifp->flags & IFA_F_TENTATIVE))
++ mipv6_mdet_finalize_ho(&ifp->addr,
++ ifp->idev->dev->ifindex);
++ else if(skip_dad(&ifp->addr))
++ ifp->flags &= ~IFA_F_TENTATIVE;
++ break;
++ case NETDEV_DOWN:
++#if 0
++ /* This is useless with manually configured home
++ addresses, which will not expire
++ */
++ mipv6_mninfo_del(&ifp->addr, 0);
++#endif
++ break;
++
++ }
++
++ return NOTIFY_DONE;
++}
++
++struct notifier_block mipv6_mn_inet6addr_notifier = {
++ mn_inet6addr_event,
++ NULL,
++ 0 /* check if using zero is ok */
++};
++
++static void mipv6_get_saddr_hook(struct in6_addr *homeaddr)
++{
++ int found = 0, reiter = 0;
++ struct list_head *lh;
++ struct mn_info *minfo = NULL;
++ struct in6_addr coa;
++
++ read_lock_bh(&mn_info_lock);
++restart:
++ list_for_each(lh, &mn_info_list) {
++ minfo = list_entry(lh, struct mn_info, list);
++ if ((ipv6_addr_scope(homeaddr) != ipv6_addr_scope(&minfo->home_addr))
++ || ipv6_chk_addr(&minfo->home_addr, NULL) == 0)
++ continue;
++
++ spin_lock(&minfo->lock);
++ if (minfo->is_at_home == MN_AT_HOME || minfo->has_home_reg) {
++ if ((mipv6_use_topol_corr_hoaddr &&
++ minfo->is_at_home == MN_AT_HOME) ||
++ (mipv6_use_preconfigured_hoaddr &&
++ minfo->man_conf) ||
++ (!(mipv6_use_preconfigured_hoaddr ||
++ mipv6_use_topol_corr_hoaddr) || reiter)) {
++ spin_unlock(&minfo->lock);
++ ipv6_addr_copy(homeaddr, &minfo->home_addr);
++ found = 1;
++ break;
++ }
++ }
++ spin_unlock(&minfo->lock);
++ }
++ if (!found && !reiter) {
++ reiter = 1;
++ goto restart;
++ }
++
++ if (!found && minfo &&
++ !mipv6_get_care_of_address(&minfo->home_addr, &coa)) {
++ ipv6_addr_copy(homeaddr, &coa);
++ }
++ read_unlock_bh(&mn_info_lock);
++
++ DEBUG(DBG_DATADUMP, "Source address selection: %x:%x:%x:%x:%x:%x:%x:%x",
++ NIPV6ADDR(homeaddr));
++ return;
++}
++
++static void mv_home_addr(void *arg)
++{
++ mm_segment_t oldfs;
++ int err = 0, new_if = 0;
++ struct list_head *lh, *next;
++ struct ifr_holder *ifrh;
++ LIST_HEAD(list);
++
++ DEBUG(DBG_INFO, "mipv6 move home address task");
++
++ spin_lock_bh(&ifrh_lock);
++ list_splice_init(&ifrh_list, &list);
++ spin_unlock_bh(&ifrh_lock);
++
++ oldfs = get_fs(); set_fs(KERNEL_DS);
++ list_for_each_safe(lh, next, &list) {
++ ifrh = list_entry(lh, struct ifr_holder, list);
++ if (ifrh->old_ifi) {
++ new_if = ifrh->ifr.ifr6_ifindex;
++ ifrh->ifr.ifr6_ifindex = ifrh->old_ifi;
++ err = addrconf_del_ifaddr(&ifrh->ifr);
++ ifrh->ifr.ifr6_ifindex = new_if;
++ if (err < 0)
++ DEBUG(DBG_WARNING, "removal of home address %x:%x:%x:%x:%x:%x:%x:%x from"
++ " old interface %d failed with status %d",
++ NIPV6ADDR(&ifrh->ifr.ifr6_addr), ifrh->old_ifi, err);
++ }
++ if(!err) {
++ err = addrconf_add_ifaddr(&ifrh->ifr);
++ }
++ if (ifrh->ho) {
++ DEBUG(DBG_INFO, "Calling mobile_node moved after moving home address to new if");
++ mipv6_mobile_node_moved(ifrh->ho);
++ }
++ list_del(&ifrh->list);
++ kfree(ifrh);
++ }
++ set_fs(oldfs);
++
++ if (err < 0)
++ DEBUG(DBG_WARNING, "adding of home address to a new interface %d failed %d", new_if, err);
++ else {
++ DEBUG(DBG_WARNING, "adding of home address to a new interface OK");
++ }
++}
++
++struct dhaad_halist {
++ struct list_head list;
++ struct in6_addr addr;
++ int retry;
++};
++
++/* clear all has from candidate list. do this when a new dhaad reply
++ * is received. */
++int mipv6_mn_flush_ha_candidate(struct list_head *ha)
++{
++ struct list_head *p, *tmp;
++ struct dhaad_halist *e;
++
++ list_for_each_safe(p, tmp, ha) {
++ e = list_entry(p, struct dhaad_halist, list);
++ list_del(p);
++ kfree(e);
++ e = NULL;
++ }
++ return 0;
++}
++
++/* add new ha to candidates. only done when dhaad reply is received. */
++int mipv6_mn_add_ha_candidate(struct list_head *ha, struct in6_addr *addr)
++{
++ struct dhaad_halist *e;
++
++ e = kmalloc(sizeof(*e), GFP_ATOMIC);
++ memset(e, 0, sizeof(*e));
++ ipv6_addr_copy(&e->addr, addr);
++
++ list_add_tail(&e->list, ha);
++ return 0;
++}
++
++#define MAX_RETRIES_PER_HA 3
++
++/* get next ha candidate. this is done when dhaad reply has been
++ * received and we want to register with the best available ha. */
++int mipv6_mn_get_ha_candidate(struct list_head *ha, struct in6_addr *addr)
++{
++ struct list_head *p;
++
++ list_for_each(p, ha) {
++ struct dhaad_halist *e;
++ e = list_entry(p, typeof(*e), list);
++ if (e->retry >= 0 && e->retry < MAX_RETRIES_PER_HA) {
++ ipv6_addr_copy(addr, &e->addr);
++ return 0;
++ }
++ }
++ return -1;
++}
++
++/* change candidate status. if registration with ha fails, we
++ * increase retry for ha candidate. if retry is >= 3 we set it to -1
++ * (failed), do get_ha_candidate() again */
++int mipv6_mn_try_ha_candidate(struct list_head *ha, struct in6_addr *addr)
++{
++ struct list_head *p;
++
++ list_for_each(p, ha) {
++ struct dhaad_halist *e;
++ e = list_entry(p, typeof(*e), list);
++ if (ipv6_addr_cmp(addr, &e->addr) == 0) {
++ if (e->retry >= MAX_RETRIES_PER_HA) e->retry = -1;
++ else if (e->retry >= 0) e->retry++;
++ return 0;
++ }
++ }
++ return -1;
++}
++
++/**
++ * mipv6_mn_get_bulifetime - Get lifetime for a binding update
++ * @home_addr: home address for BU
++ * @coa: care-of address for BU
++ * @flags: flags used for BU
++ *
++ * Returns maximum lifetime for BUs determined by the lifetime of
++ * care-of address and the lifetime of home address.
++ **/
++__u32 mipv6_mn_get_bulifetime(struct in6_addr *home_addr, struct in6_addr *coa,
++ __u8 flags)
++{
++ struct inet6_ifaddr *ifp_hoa, *ifp_coa;
++ __u32 lifetime = (flags & MIPV6_BU_F_HOME ?
++ HA_BU_DEF_LIFETIME : CN_BU_DEF_LIFETIME);
++
++ ifp_hoa = ipv6_get_ifaddr(home_addr, NULL);
++ if(!ifp_hoa) {
++ DEBUG(DBG_INFO, "home address missing");
++ return 0;
++ }
++ if (!(ifp_hoa->flags & IFA_F_PERMANENT)){
++ if (ifp_hoa->valid_lft)
++ lifetime = min_t(__u32, lifetime, ifp_hoa->valid_lft);
++ else
++ DEBUG(DBG_ERROR, "Zero lifetime for home address");
++ }
++ in6_ifa_put(ifp_hoa);
++
++ ifp_coa = ipv6_get_ifaddr(coa, NULL);
++ if (!ifp_coa) {
++ DEBUG(DBG_INFO, "care-of address missing");
++ return 0;
++ }
++ if (!(ifp_coa->flags & IFA_F_PERMANENT)) {
++ if(ifp_coa->valid_lft)
++ lifetime = min_t(__u32, lifetime, ifp_coa->valid_lft);
++ else
++ DEBUG(DBG_ERROR,
++ "Zero lifetime for care-of address");
++ }
++ in6_ifa_put(ifp_coa);
++
++ DEBUG(DBG_INFO, "Lifetime for binding is %ld", lifetime);
++ return lifetime;
++}
++
++static int
++mipv6_mn_tnl_rcv_send_bu_hook(struct ip6_tnl *t, struct sk_buff *skb)
++{
++ struct ipv6hdr *inner;
++ struct ipv6hdr *outer = skb->nh.ipv6h;
++ struct mn_info *minfo = NULL;
++ __u32 lifetime;
++ __u8 user_flags = 0;
++
++ DEBUG_FUNC();
++
++ if (!is_mip6_tnl(t))
++ return IP6_TNL_ACCEPT;
++
++ if (!mip6node_cnf.accept_ret_rout) {
++ DEBUG(DBG_INFO, "Return routability administratively disabled"
++ " not doing route optimization");
++ return IP6_TNL_ACCEPT;
++ }
++ if (!pskb_may_pull(skb, skb->h.raw-skb->data+sizeof(*inner)))
++ return IP6_TNL_DROP;
++
++ inner = (struct ipv6hdr *)skb->h.raw;
++
++ read_lock(&mn_info_lock);
++ minfo = mipv6_mninfo_get_by_home(&inner->daddr);
++
++ if (!minfo) {
++ DEBUG(DBG_WARNING, "MN info missing");
++ read_unlock(&mn_info_lock);
++ return IP6_TNL_ACCEPT;
++ }
++ DEBUG(DBG_DATADUMP, "MIPV6 MN: Received a tunneled IPv6 packet"
++ " to %x:%x:%x:%x:%x:%x:%x:%x,"
++ " from %x:%x:%x:%x:%x:%x:%x:%x with\n tunnel header"
++ "daddr: %x:%x:%x:%x:%x:%x:%x:%x,"
++ "saddr: %x:%x:%x:%x:%x:%x:%x:%x",
++ NIPV6ADDR(&inner->daddr), NIPV6ADDR(&inner->saddr),
++ NIPV6ADDR(&outer->daddr), NIPV6ADDR(&outer->saddr));
++
++ spin_lock(&minfo->lock);
++
++ /* We don't send bus in response to all tunneled packets */
++
++ if (!ipv6_addr_cmp(&minfo->ha, &inner->saddr)) {
++ spin_unlock(&minfo->lock);
++ read_unlock(&mn_info_lock);
++ DEBUG(DBG_ERROR, "HA BUG: Received a tunneled packet "
++ "originally sent by home agent, not sending BU");
++ return IP6_TNL_ACCEPT;
++ }
++ spin_unlock(&minfo->lock);
++ read_unlock(&mn_info_lock);
++
++ DEBUG(DBG_DATADUMP, "Sending BU to correspondent node");
++
++ user_flags |= mip6node_cnf.bu_cn_ack ? MIPV6_BU_F_ACK : 0;
++
++ if (inner->nexthdr != IPPROTO_DSTOPTS &&
++ inner->nexthdr != IPPROTO_MOBILITY) {
++ struct in6_addr coa;
++ /* Don't start RR when receiving ICMP error messages */
++ if (inner->nexthdr == IPPROTO_ICMPV6) {
++ int ptr = (u8*)(inner+1) - skb->data;
++ u8 type;
++
++ if (skb_copy_bits(skb,
++ ptr+offsetof(struct icmp6hdr,
++ icmp6_type),
++ &type, 1)
++ || !(type & ICMPV6_INFOMSG_MASK)) {
++ return IP6_TNL_ACCEPT;
++ }
++ }
++ lifetime = mipv6_mn_get_bulifetime(&inner->daddr,
++ &outer->daddr, 0);
++ if (lifetime &&
++ !mipv6_get_care_of_address(&inner->daddr, &coa)) {
++ write_lock(&bul_lock);
++ mipv6_send_bu(&inner->daddr, &inner->saddr, &coa,
++ INITIAL_BINDACK_TIMEOUT,
++ MAX_BINDACK_TIMEOUT, 1,
++ user_flags,
++ lifetime, NULL);
++ write_unlock(&bul_lock);
++ }
++ }
++ DEBUG(DBG_DATADUMP, "setting rcv_tunnel flag in skb");
++ skb->security |= MIPV6_RCV_TUNNEL;
++ return IP6_TNL_ACCEPT;
++}
++
++static struct ip6_tnl_hook_ops mipv6_mn_tnl_rcv_send_bu_ops = {
++ {NULL, NULL},
++ IP6_TNL_PRE_DECAP,
++ IP6_TNL_PRI_FIRST,
++ mipv6_mn_tnl_rcv_send_bu_hook
++};
++
++static int
++mipv6_mn_tnl_xmit_stats_hook(struct ip6_tnl *t, struct sk_buff *skb)
++{
++ DEBUG_FUNC();
++ if (is_mip6_tnl(t))
++ MIPV6_INC_STATS(n_encapsulations);
++ return IP6_TNL_ACCEPT;
++}
++
++static struct ip6_tnl_hook_ops mipv6_mn_tnl_xmit_stats_ops = {
++ {NULL, NULL},
++ IP6_TNL_PRE_ENCAP,
++ IP6_TNL_PRI_LAST,
++ mipv6_mn_tnl_xmit_stats_hook
++};
++
++static int
++mipv6_mn_tnl_rcv_stats_hook(struct ip6_tnl *t, struct sk_buff *skb)
++{
++ DEBUG_FUNC();
++ if (is_mip6_tnl(t))
++ MIPV6_INC_STATS(n_decapsulations);
++ return IP6_TNL_ACCEPT;
++}
++
++static struct ip6_tnl_hook_ops mipv6_mn_tnl_rcv_stats_ops = {
++ {NULL, NULL},
++ IP6_TNL_PRE_DECAP,
++ IP6_TNL_PRI_LAST,
++ mipv6_mn_tnl_rcv_stats_hook
++};
++
++static void mn_check_tunneled_packet(struct sk_buff *skb)
++{
++ DEBUG_FUNC();
++ /* If tunnel flag was set */
++ if (skb->security & MIPV6_RCV_TUNNEL) {
++ struct in6_addr coa;
++ __u32 lifetime;
++ __u8 user_flags = 0;
++ int ptr = (u8*)(skb->nh.ipv6h+1) - skb->data;
++ int len = skb->len - ptr;
++ __u8 nexthdr = skb->nh.ipv6h->nexthdr;
++
++ if (len < 0)
++ return;
++
++ ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr, len);
++ if (ptr < 0)
++ return;
++
++ if (!mip6node_cnf.accept_ret_rout) {
++ DEBUG(DBG_INFO, "Return routability administratively disabled");
++ return;
++ }
++ if (nexthdr == IPPROTO_MOBILITY)
++ return;
++
++ /* Don't start RR when receiving ICMP error messages */
++ if (nexthdr == IPPROTO_ICMPV6) {
++ u8 type;
++
++ if (skb_copy_bits(skb,
++ ptr+offsetof(struct icmp6hdr,
++ icmp6_type),
++ &type, 1)
++ || !(type & ICMPV6_INFOMSG_MASK)) {
++ return;
++ }
++ }
++ user_flags |= mip6node_cnf.bu_cn_ack ? MIPV6_BU_F_ACK : 0;
++ mipv6_get_care_of_address(&skb->nh.ipv6h->daddr, &coa);
++ lifetime = mipv6_mn_get_bulifetime(&skb->nh.ipv6h->daddr,
++ &coa, 0);
++
++ DEBUG(DBG_WARNING, "packet to address %x:%x:%x:%x:%x:%x:%x:%x"
++ "was tunneled. Sending BU to CN"
++ "%x:%x:%x:%x:%x:%x:%x:%x",
++ NIPV6ADDR(&skb->nh.ipv6h->daddr),
++ NIPV6ADDR(&skb->nh.ipv6h->saddr));
++ /* This should work also with home address option */
++
++ write_lock(&bul_lock);
++ mipv6_send_bu(&skb->nh.ipv6h->daddr, &skb->nh.ipv6h->saddr,
++ &coa, INITIAL_BINDACK_TIMEOUT,
++ MAX_BINDACK_TIMEOUT, 1, user_flags,
++ lifetime, NULL);
++ write_unlock(&bul_lock);
++ }
++}
++
++static int sched_mv_home_addr_task(struct in6_addr *haddr, int plen_new,
++ int newif, int oldif, struct handoff *ho)
++{
++ int alloc_size;
++ struct ifr_holder *ifrh;
++
++ alloc_size = sizeof(*ifrh) + (ho ? sizeof(*ho): 0);
++ if ((ifrh = kmalloc(alloc_size, GFP_ATOMIC)) == NULL) {
++ DEBUG(DBG_ERROR, "Out of memory");
++ return -1;
++ }
++ if (ho) {
++ ifrh->ho = (struct handoff *)((struct ifr_holder *)(ifrh + 1));
++ memcpy(ifrh->ho, ho, sizeof(*ho));
++ } else
++ ifrh->ho = NULL;
++
++ /* must queue task to avoid deadlock with rtnl */
++ ifrh->ifr.ifr6_ifindex = newif;
++ ifrh->ifr.ifr6_prefixlen = plen_new;
++ ipv6_addr_copy(&ifrh->ifr.ifr6_addr, haddr);
++ ifrh->old_ifi = oldif;
++
++ spin_lock_bh(&ifrh_lock);
++ list_add_tail(&ifrh->list, &ifrh_list);
++ spin_unlock_bh(&ifrh_lock);
++
++ schedule_task(&mv_home_addr_task);
++
++ return 0;
++}
++
++static void send_ret_home_ns(struct in6_addr *ha_addr,
++ struct in6_addr *home_addr,
++ int ifindex)
++{
++ struct in6_addr nil;
++ struct in6_addr mcaddr;
++ struct net_device *dev = dev_get_by_index(ifindex);
++ if (!dev)
++ return;
++ memset(&nil, 0, sizeof(nil));
++ addrconf_addr_solict_mult(home_addr, &mcaddr);
++ ndisc_send_ns(dev, NULL, home_addr, &mcaddr, &nil);
++ dev_put(dev);
++}
++
++static inline int ha_is_reachable(int ifindex, struct in6_addr *ha)
++{
++ struct net_device *dev;
++ int reachable = 0;
++
++ dev = dev_get_by_index(ifindex);
++ if (dev) {
++ struct neighbour *neigh;
++ if ((neigh = ndisc_get_neigh(dev, ha)) != NULL) {
++ read_lock_bh(&neigh->lock);
++ if (neigh->nud_state&NUD_VALID)
++ reachable = 1;
++ read_unlock_bh(&neigh->lock);
++ neigh_release(neigh);
++ }
++ dev_put(dev);
++ }
++ return reachable;
++}
++
++static int mn_ha_handoff(struct handoff *ho)
++{
++ struct list_head *lh;
++ struct mn_info *minfo;
++ struct in6_addr *coa= ho->coa;
++ int wait_mv_home = 0;
++
++ read_lock_bh(&mn_info_lock);
++ list_for_each(lh, &mn_info_list) {
++ __u8 has_home_reg;
++ int ifindex;
++ struct in6_addr ha;
++ __u8 athome;
++ __u32 lifetime;
++ struct mipv6_bul_entry *entry = NULL;
++
++ minfo = list_entry(lh, struct mn_info, list);
++ spin_lock(&minfo->lock);
++ has_home_reg = minfo->has_home_reg;
++ ifindex = minfo->ifindex;
++ ipv6_addr_copy(&ha, &minfo->ha);
++
++ if (mipv6_prefix_compare(&ho->rtr_addr, &minfo->home_addr,
++ ho->plen)) {
++ if (minfo->has_home_reg)
++ athome = minfo->is_at_home = MN_RETURNING_HOME;
++ else
++ athome = minfo->is_at_home = MN_AT_HOME;
++ coa = &minfo->home_addr;
++
++ spin_unlock(&minfo->lock);
++#if 0
++ /* Cancel prefix solicitation, rtr is our HA */
++ mipv6_pfx_cancel_send(&ho->rtr_addr, ifindex);
++#endif
++ minfo->ifindex = ho->ifindex;
++
++ if (minfo->has_home_reg &&
++ !ha_is_reachable(ho->ifindex, &minfo->ha)) {
++ send_ret_home_ns(&minfo->ha,
++ &minfo->home_addr,
++ ho->ifindex);
++ mipv6_mdet_set_curr_rtr_reachable(0);
++ wait_mv_home++;
++ }
++ if (ifindex != ho->ifindex){
++ wait_mv_home++;
++ DEBUG(DBG_INFO,
++ "Moving home address back to "
++ "the home interface");
++ sched_mv_home_addr_task(&minfo->home_addr,
++ 128,
++ ho->ifindex,
++ ifindex, ho);
++ }
++ if (!has_home_reg || wait_mv_home)
++ continue;
++
++ lifetime = 0;
++
++ } else {
++ athome = minfo->is_at_home = MN_NOT_AT_HOME;
++ if (minfo->ifindex_user != minfo->ifindex) {
++ DEBUG(DBG_INFO, "Scheduling home address move to virtual interface");
++ sched_mv_home_addr_task(&minfo->home_addr,
++ 128,
++ minfo->ifindex_user,
++ minfo->ifindex, ho); /* Is minfo->ifindex correct */
++
++ wait_mv_home++;
++ }
++ minfo->ifindex = minfo->ifindex_user;
++ spin_unlock(&minfo->lock);
++ if (wait_mv_home)
++ continue;
++ if (!has_home_reg &&
++ init_home_registration(&minfo->home_addr,
++ ho->coa)) {
++ continue;
++ }
++ lifetime = mipv6_mn_get_bulifetime(&minfo->home_addr,
++ ho->coa,
++ MIPV6_BU_F_HOME);
++
++ }
++ write_lock(&bul_lock);
++ if (!(entry = mipv6_bul_get(&ha, &minfo->home_addr)) ||
++ !(entry->flags & MIPV6_BU_F_HOME)) {
++ DEBUG(DBG_ERROR,
++ "Unable to find home registration for "
++ "home address: %x:%x:%x:%x:%x:%x:%x:%x!\n",
++ NIPV6ADDR(&minfo->home_addr));
++ write_unlock(&bul_lock);
++ continue;
++ }
++ DEBUG(DBG_INFO, "Sending home de ? %d registration for "
++ "home address: %x:%x:%x:%x:%x:%x:%x:%x\n"
++ "to home agent %x:%x:%x:%x:%x:%x:%x:%x, "
++ "with lifetime %ld",
++ (athome != MN_NOT_AT_HOME),
++ NIPV6ADDR(&entry->home_addr),
++ NIPV6ADDR(&entry->cn_addr), lifetime);
++ mipv6_send_bu(&entry->home_addr, &entry->cn_addr,
++ coa, INITIAL_BINDACK_TIMEOUT,
++ MAX_BINDACK_TIMEOUT, 1, entry->flags,
++ lifetime, NULL);
++ write_unlock(&bul_lock);
++
++ }
++ read_unlock_bh(&mn_info_lock);
++ return wait_mv_home;
++}
++/**
++ * mn_cn_handoff - called for every bul entry to send BU to CN
++ * @rawentry: bul entry
++ * @args: handoff event
++ * @sortkey:
++ *
++ * Since MN can have many home addresses and home networks, every BUL
++ * entry needs to be checked
++ **/
++int mn_cn_handoff(void *rawentry, void *args, unsigned long *sortkey)
++{
++ struct mipv6_bul_entry *entry = (struct mipv6_bul_entry *)rawentry;
++ struct in6_addr *coa = (struct in6_addr *)args;
++
++ DEBUG_FUNC();
++
++ /* Home registrations already handled by mn_ha_handoff */
++ if (entry->flags & MIPV6_BU_F_HOME)
++ return ITERATOR_CONT;
++
++ /* BUL is locked by mipv6_mobile_node_moved which calls us
++ through mipv6_bul_iterate */
++
++ if (mipv6_prefix_compare(coa,
++ &entry->home_addr,
++ 64)) {
++ mipv6_send_bu(&entry->home_addr, &entry->cn_addr,
++ &entry->home_addr, INITIAL_BINDACK_TIMEOUT,
++ MAX_BINDACK_TIMEOUT, 1, entry->flags, 0,
++ NULL);
++ } else {
++ u32 lifetime = mipv6_mn_get_bulifetime(&entry->home_addr,
++ coa,
++ entry->flags);
++ mipv6_send_bu(&entry->home_addr, &entry->cn_addr,
++ coa, INITIAL_BINDACK_TIMEOUT,
++ MAX_BINDACK_TIMEOUT, 1, entry->flags,
++ lifetime, NULL);
++ }
++ return ITERATOR_CONT;
++}
++
++
++int mn_bul_invalidate(void *rawentry, void *args, unsigned long *sortkey)
++{
++ struct mipv6_bul_entry *bul = (struct mipv6_bul_entry *)rawentry;
++ struct bul_inval_args *arg = (struct bul_inval_args *)args;
++
++ DEBUG_FUNC();
++
++ if (!ipv6_addr_cmp(arg->cn, &bul->cn_addr) &&
++ (!ipv6_addr_cmp(arg->mn, &bul->home_addr) ||
++ !ipv6_addr_cmp(arg->mn, &bul->coa))) {
++ if (arg->all_rr_states || !bul->rr ||
++ (bul->rr->rr_state != RR_INIT &&
++ bul->rr->rr_state != RR_DONE)) {
++ bul->state = ACK_ERROR;
++ bul->callback = bul_entry_expired;
++ bul->callback_time = jiffies +
++ DUMB_CN_BU_LIFETIME * HZ;
++ bul->expire = bul->callback_time;
++ DEBUG(DBG_INFO, "BUL entry set to ACK_ERROR");
++ mipv6_bul_reschedule(bul);
++ }
++ }
++ return ITERATOR_CONT;
++}
++/**
++ * init_home_registration - start Home Registration process
++ * @home_addr: home address
++ * @coa: care-of address
++ *
++ * Checks whether we have a Home Agent address for this home address.
++ * If not starts Dynamic Home Agent Address Discovery. Otherwise
++ * tries to register with home agent if not already registered.
++ * Returns 1, if home registration process is started and 0 otherwise
++ **/
++int init_home_registration(struct in6_addr *home_addr, struct in6_addr *coa)
++{
++ struct mn_info *hinfo;
++ struct in6_addr ha;
++ __u8 man_conf;
++ int ifindex;
++ __u32 lifetime;
++ __u8 user_flags = 0, flags;
++
++ DEBUG_FUNC();
++
++ read_lock_bh(&mn_info_lock);
++ if ((hinfo = mipv6_mninfo_get_by_home(home_addr)) == NULL) {
++ DEBUG(DBG_ERROR, "No mn_info found for address: "
++ "%x:%x:%x:%x:%x:%x:%x:%x",
++ NIPV6ADDR(home_addr));
++ read_unlock_bh(&mn_info_lock);
++ return -ENOENT;
++ }
++ spin_lock(&hinfo->lock);
++ if (mipv6_prefix_compare(&hinfo->home_addr, coa, hinfo->home_plen)) {
++ spin_unlock(&hinfo->lock);
++ read_unlock_bh(&mn_info_lock);
++ DEBUG(DBG_INFO, "Adding home address, MN at home");
++ return 1;
++ }
++ if (ipv6_addr_any(&hinfo->ha)) {
++ int dhaad_id = mipv6_get_dhaad_id();
++ hinfo->dhaad_id = dhaad_id;
++ spin_unlock(&hinfo->lock);
++ mipv6_icmpv6_send_dhaad_req(home_addr, hinfo->home_plen, dhaad_id);
++ read_unlock_bh(&mn_info_lock);
++ DEBUG(DBG_INFO,
++ "Home Agent address not set, initiating DHAAD");
++ return 1;
++ }
++ ipv6_addr_copy(&ha, &hinfo->ha);
++ man_conf = hinfo->man_conf;
++ ifindex = hinfo->ifindex;
++ spin_unlock(&hinfo->lock);
++ read_unlock_bh(&mn_info_lock);
++#if 0
++ if (man_conf)
++ mipv6_pfx_add_ha(&ha, coa, ifindex);
++#endif
++ if (mipv6_bul_exists(&ha, home_addr)) {
++ DEBUG(DBG_INFO, "BU already sent to HA");
++ return 0;
++ }
++ /* user flags received through sysctl */
++ user_flags |= mip6node_cnf.bu_lladdr ? MIPV6_BU_F_LLADDR : 0;
++ user_flags |= mip6node_cnf.bu_keymgm ? MIPV6_BU_F_KEYMGM : 0;
++
++ flags = MIPV6_BU_F_HOME | MIPV6_BU_F_ACK | user_flags;
++
++ lifetime = mipv6_mn_get_bulifetime(home_addr, coa, flags);
++
++ DEBUG(DBG_INFO, "Sending initial home registration for "
++ "home address: %x:%x:%x:%x:%x:%x:%x:%x\n"
++ "to home agent %x:%x:%x:%x:%x:%x:%x:%x, "
++ "with lifetime %ld, prefixlength %d",
++ NIPV6ADDR(home_addr), NIPV6ADDR(&ha), lifetime, 0);
++
++ write_lock_bh(&bul_lock);
++ mipv6_send_bu(home_addr, &ha, coa, INITIAL_BINDACK_DAD_TIMEOUT,
++ MAX_BINDACK_TIMEOUT, 1, flags, lifetime, NULL);
++ write_unlock_bh(&bul_lock);
++
++ return 1;
++}
++
++/**
++ * mipv6_mobile_node_moved - Send BUs to all HAs and CNs
++ * @ho: handoff structure contains the new and previous routers
++ *
++ * Event for handoff. Sends BUs everyone on Binding Update List.
++ **/
++int mipv6_mobile_node_moved(struct handoff *ho)
++{
++#if 0
++ int bu_to_prev_router = 1;
++#endif
++ int dummy;
++
++ DEBUG_FUNC();
++
++ ma_ctl_upd_iface(ho->ifindex,
++ MA_IFACE_CURRENT | MA_IFACE_HAS_ROUTER, &dummy);
++
++ /* First send BU to HA, then to all other nodes that are on BU list */
++ if (mn_ha_handoff(ho) != 0)
++ return 0; /* Wait for move home address task */
++#if 0
++ /* Add current care-of address to mn_info list, if current router acts
++ as a HA.*/
++
++ if (ho->home_address && bu_to_prev_router)
++ mipv6_mninfo_add(ho->coa, ho->plen,
++ MN_AT_HOME, 0, &ho->rtr_addr,
++ ho->plen, ROUTER_BU_DEF_LIFETIME,
++ 0);
++
++#endif
++ return 0;
++}
++
++/**
++ * mipv6_mn_send_home_na - send NA when returning home
++ * @haddr: home address to advertise
++ *
++ * After returning home, MN must advertise all its valid addresses in
++ * home link to all nodes.
++ **/
++void mipv6_mn_send_home_na(struct in6_addr *haddr)
++{
++ struct net_device *dev = NULL;
++ struct in6_addr mc_allnodes;
++ struct mn_info *hinfo = NULL;
++
++ read_lock(&mn_info_lock);
++ hinfo = mipv6_mninfo_get_by_home(haddr);
++ if (!hinfo) {
++ read_unlock(&mn_info_lock);
++ return;
++ }
++ spin_lock(&hinfo->lock);
++ hinfo->is_at_home = MN_AT_HOME;
++ dev = dev_get_by_index(hinfo->ifindex);
++ spin_unlock(&hinfo->lock);
++ read_unlock(&mn_info_lock);
++ if (dev == NULL) {
++ DEBUG(DBG_ERROR, "Send home_na: device not found.");
++ return;
++ }
++
++ ipv6_addr_all_nodes(&mc_allnodes);
++ ndisc_send_na(dev, NULL, &mc_allnodes, haddr, 0, 0, 1, 1);
++ dev_put(dev);
++}
++
++static int mn_use_hao(struct in6_addr *daddr, struct in6_addr *saddr)
++{
++ struct mipv6_bul_entry *entry;
++ struct mn_info *minfo = NULL;
++ int add_ha = 0;
++
++ read_lock_bh(&mn_info_lock);
++ minfo = mipv6_mninfo_get_by_home(saddr);
++ if (minfo && minfo->is_at_home != MN_AT_HOME) {
++ read_lock_bh(&bul_lock);
++ if ((entry = mipv6_bul_get(daddr, saddr)) == NULL) {
++ read_unlock_bh(&bul_lock);
++ read_unlock_bh(&mn_info_lock);
++ return add_ha;
++ }
++ add_ha = (entry->state != ACK_ERROR &&
++ (!entry->rr || entry->rr->rr_state == RR_DONE ||
++ entry->flags & MIPV6_BU_F_HOME));
++ read_unlock_bh(&bul_lock);
++ }
++ read_unlock_bh(&mn_info_lock);
++ return add_ha;
++}
++
++static int
++mn_dev_event(struct notifier_block *nb, unsigned long event, void *ptr)
++{
++ struct net_device *dev = ptr;
++ struct list_head *lh;
++ struct mn_info *minfo;
++ int newif = 0;
++
++ /* here are probably the events we need to worry about */
++ switch (event) {
++ case NETDEV_UP:
++ DEBUG(DBG_DATADUMP, "New netdevice %s registered.", dev->name);
++ if (dev->type != ARPHRD_LOOPBACK && !dev_is_mip6_tnl(dev))
++ ma_ctl_add_iface(dev->ifindex);
++
++ break;
++ case NETDEV_GOING_DOWN:
++ DEBUG(DBG_DATADUMP, "Netdevice %s disappeared.", dev->name);
++ /*
++ * Go through mn_info list and move all home addresses on the
++ * netdev going down to a new device. This will make it
++ * practically impossible for the home address to return home,
++ * but allow MN to retain its connections using the address.
++ */
++
++ read_lock_bh(&mn_info_lock);
++ list_for_each(lh, &mn_info_list) {
++ minfo = list_entry(lh, struct mn_info, list);
++ spin_lock(&minfo->lock);
++ if (minfo->ifindex == dev->ifindex) {
++ if (sched_mv_home_addr_task(&minfo->home_addr, 128,
++ minfo->ifindex_user,
++ 0, NULL) < 0) {
++ minfo->ifindex = 0;
++ spin_unlock(&minfo->lock);
++ read_unlock_bh(&mn_info_lock);
++ return NOTIFY_DONE;
++ } else {
++ minfo->ifindex = minfo->ifindex_user;
++ if (minfo->is_at_home) {
++ minfo->is_at_home = 0;
++
++ }
++ newif = minfo->ifindex_user;
++ }
++ }
++ spin_unlock(&minfo->lock);
++ }
++
++ read_unlock_bh(&mn_info_lock);
++ }
++ ma_ctl_upd_iface(dev->ifindex, MA_IFACE_NOT_PRESENT, &newif);
++ mipv6_mdet_del_if(dev->ifindex);
++
++ return NOTIFY_DONE;
++}
++
++struct notifier_block mipv6_mn_dev_notifier = {
++ mn_dev_event,
++ NULL,
++ 0 /* check if using zero is ok */
++};
++
++static void deprecate_addr(struct mn_info *minfo)
++{
++ /*
++ * Lookup address from IPv6 address list and set deprecated flag
++ */
++
++}
++
++/*
++ * Required because we can only modify addresses after the packet is
++ * constructed. We otherwise mess with higher level protocol
++ * pseudoheaders. With strict protocol layering life would be SO much
++ * easier!
++ */
++static unsigned int modify_xmit_addrs(unsigned int hooknum,
++ struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ int (*okfn) (struct sk_buff *))
++{
++ struct sk_buff *skb = *pskb;
++
++ DEBUG_FUNC();
++
++ if (skb) {
++ struct ipv6hdr *hdr = skb->nh.ipv6h;
++ struct inet6_skb_parm *opt = (struct inet6_skb_parm *)skb->cb;
++ struct mipv6_bul_entry *bule;
++ struct in6_addr *daddr;
++
++ if (!ipv6_addr_any(&opt->hoa))
++ daddr = &opt->hoa;
++ else
++ daddr = &hdr->daddr;
++
++ /* We don't consult bul when sending a BU to avoid deadlock, since
++ * BUL is already locked.
++ */
++
++
++ if (opt->mipv6_flags & MIPV6_SND_HAO &&
++ !(opt->mipv6_flags & MIPV6_SND_BU)) {
++ write_lock(&bul_lock);
++ bule = mipv6_bul_get(daddr, &hdr->saddr);
++ if (!bule) {
++ write_unlock(&bul_lock);
++ return NF_ACCEPT;
++ }
++ if (!bule->rr || bule->rr->rr_state == RR_DONE ||
++ bule->flags & MIPV6_BU_F_HOME) {
++ DEBUG(DBG_DATADUMP,
++ "Replace source address with CoA and reroute");
++ ipv6_addr_copy(&hdr->saddr, &bule->coa);
++ skb->nfcache |= NFC_ALTERED;
++ }
++ write_unlock(&bul_lock);
++ } else if (opt->mipv6_flags & MIPV6_SND_HAO) {
++ mipv6_get_care_of_address(&hdr->saddr, &hdr->saddr);
++ skb->nfcache |= NFC_ALTERED;
++ }
++ }
++ return NF_ACCEPT;
++}
++
++/* We set a netfilter hook so that we can modify outgoing packet's
++ * source addresses
++ */
++struct nf_hook_ops addr_modify_hook_ops = {
++ {NULL, NULL}, /* List head, no predecessor, no successor */
++ modify_xmit_addrs,
++ PF_INET6,
++ NF_IP6_LOCAL_OUT,
++ NF_IP6_PRI_FIRST /* Should be of EXTREMELY high priority since we
++ * do not want to mess with IPSec (possibly
++ * implemented as packet filter)
++ */
++};
++
++#define MN_INFO_LEN 77
++
++static int mn_proc_info(char *buffer, char **start, off_t offset,
++ int length)
++{
++ struct list_head *p;
++ struct mn_info *minfo;
++ int len = 0, skip = 0;
++
++ DEBUG_FUNC();
++
++ read_lock_bh(&mn_info_lock);
++ list_for_each(p, &mn_info_list) {
++ if (len < offset / MN_INFO_LEN) {
++ skip++;
++ continue;
++ }
++ if (len >= length)
++ break;
++ minfo = list_entry(p, struct mn_info, list);
++ spin_lock(&minfo->lock);
++ len += sprintf(buffer + len, "%02d %08x%08x%08x%08x %02x "
++ "%08x%08x%08x%08x %d %d\n",
++ minfo->ifindex,
++ ntohl(minfo->home_addr.s6_addr32[0]),
++ ntohl(minfo->home_addr.s6_addr32[1]),
++ ntohl(minfo->home_addr.s6_addr32[2]),
++ ntohl(minfo->home_addr.s6_addr32[3]),
++ minfo->home_plen,
++ ntohl(minfo->ha.s6_addr32[0]),
++ ntohl(minfo->ha.s6_addr32[1]),
++ ntohl(minfo->ha.s6_addr32[2]),
++ ntohl(minfo->ha.s6_addr32[3]),
++ minfo->is_at_home, minfo->has_home_reg);
++ spin_unlock(&minfo->lock);
++ }
++ read_unlock_bh(&mn_info_lock);
++
++ *start = buffer;
++ if (offset)
++ *start += offset % MN_INFO_LEN;
++
++ len -= offset % MN_INFO_LEN;
++
++ if (len > length)
++ len = length;
++ if (len < 0)
++ len = 0;
++
++ return len;
++}
++
++int mipv6_mn_ha_nd_update(struct net_device *dev,
++ struct in6_addr *ha, u8 *lladdr)
++{
++ int valid = 0;
++ struct neighbour *neigh;
++ if ((neigh = ndisc_get_neigh(dev, ha))) {
++ read_lock(&neigh->lock);
++ valid = neigh->nud_state & NUD_VALID;
++ read_unlock(&neigh->lock);
++ if (!valid && lladdr)
++ neigh_update(neigh, lladdr, NUD_REACHABLE, 0, 1);
++ neigh_release(neigh);
++ }
++ return valid;
++}
++
++int mipv6_mn_ha_probe(struct inet6_ifaddr *ifp, u8 *lladdr)
++{
++ struct mn_info *minfo;
++
++ if (!(minfo = mipv6_mninfo_get_by_home(&ifp->addr)) ||
++ ipv6_addr_any(&minfo->ha))
++ return 0;
++
++ if (mipv6_mn_ha_nd_update(ifp->idev->dev, &minfo->ha, lladdr))
++ mipv6_mdet_retrigger_ho();
++ return 1;
++}
++
++int __init mipv6_mn_init(void)
++{
++ struct net_device *dev;
++
++ DEBUG_FUNC();
++
++ if (mipv6_add_tnl_to_ha())
++ return -ENODEV;
++
++ mipv6_bul_init(MIPV6_BUL_SIZE);
++ mip6_fn.mn_use_hao = mn_use_hao;
++ mip6_fn.mn_check_tunneled_packet = mn_check_tunneled_packet;
++ INIT_TQUEUE(&mv_home_addr_task, mv_home_addr, NULL);
++
++ ma_ctl_init();
++ for (dev = dev_base; dev; dev = dev->next) {
++ if (dev->flags & IFF_UP &&
++ dev->type != ARPHRD_LOOPBACK && !dev_is_mip6_tnl(dev)) {
++ ma_ctl_add_iface(dev->ifindex);
++ }
++ }
++ DEBUG(DBG_INFO, "Multiaccess support initialized");
++
++ register_netdevice_notifier(&mipv6_mn_dev_notifier);
++ register_inet6addr_notifier(&mipv6_mn_inet6addr_notifier);
++
++ ip6ip6_tnl_register_hook(&mipv6_mn_tnl_rcv_send_bu_ops);
++ ip6ip6_tnl_register_hook(&mipv6_mn_tnl_xmit_stats_ops);
++ ip6ip6_tnl_register_hook(&mipv6_mn_tnl_rcv_stats_ops);
++
++ MIPV6_SETCALL(mipv6_set_home, mipv6_mn_set_home);
++
++ mipv6_initialize_mdetect();
++
++ /* COA to home transformation hook */
++ MIPV6_SETCALL(mipv6_get_home_address, mipv6_get_saddr_hook);
++ MIPV6_SETCALL(mipv6_mn_ha_probe, mipv6_mn_ha_probe);
++ MIPV6_SETCALL(mipv6_is_home_addr, mipv6_mn_is_home_addr);
++ proc_net_create("mip6_mninfo", 0, mn_proc_info);
++ /* Set packet modification hook (source addresses) */
++ nf_register_hook(&addr_modify_hook_ops);
++
++ return 0;
++}
++
++void __exit mipv6_mn_exit(void)
++{
++ struct list_head *lh, *tmp;
++ struct mn_info *minfo;
++ DEBUG_FUNC();
++
++ mip6_fn.mn_use_hao = NULL;
++ mip6_fn.mn_check_tunneled_packet = NULL;
++
++ MIPV6_RESETCALL(mipv6_set_home);
++ MIPV6_RESETCALL(mipv6_get_home_address);
++ MIPV6_RESETCALL(mipv6_mn_ha_probe);
++ MIPV6_RESETCALL(mipv6_is_home_addr);
++ nf_unregister_hook(&addr_modify_hook_ops);
++ proc_net_remove("mip6_mninfo");
++ mipv6_shutdown_mdetect();
++ ip6ip6_tnl_unregister_hook(&mipv6_mn_tnl_rcv_stats_ops);
++ ip6ip6_tnl_unregister_hook(&mipv6_mn_tnl_xmit_stats_ops);
++ ip6ip6_tnl_unregister_hook(&mipv6_mn_tnl_rcv_send_bu_ops);
++ ma_ctl_clean();
++
++ unregister_inet6addr_notifier(&mipv6_mn_inet6addr_notifier);
++ unregister_netdevice_notifier(&mipv6_mn_dev_notifier);
++ write_lock_bh(&mn_info_lock);
++
++ list_for_each_safe(lh, tmp, &mn_info_list) {
++ minfo = list_entry(lh, struct mn_info, list);
++ if (minfo->is_at_home == MN_NOT_AT_HOME)
++ deprecate_addr(minfo);
++ list_del(&minfo->list);
++ kfree(minfo);
++ }
++ write_unlock_bh(&mn_info_lock);
++ mipv6_bul_exit();
++ flush_scheduled_tasks();
++ mipv6_del_tnl_to_ha();
++}
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/mn.h linux-2.4.25/net/ipv6/mobile_ip6/mn.h
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/mn.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/mn.h 2004-06-26 11:29:31.000000000 +0100
+@@ -0,0 +1,96 @@
++/*
++ * MIPL Mobile IPv6 Mobile Node header file
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#ifndef _MN_H
++#define _MN_H
++
++#include <linux/in6.h>
++
++/* constants for sending of BUs*/
++#define HA_BU_DEF_LIFETIME 10000
++#define CN_BU_DEF_LIFETIME 420 /* Max lifetime for RR bindings from RFC 3775 */
++#define DUMB_CN_BU_LIFETIME 600 /* BUL entry lifetime in case of dumb CN */
++#define ROUTER_BU_DEF_LIFETIME 30 /* For packet forwarding from previous coa */
++#define ERROR_DEF_LIFETIME DUMB_CN_BU_LIFETIME
++
++extern rwlock_t mn_info_lock;
++
++#define MN_NOT_AT_HOME 0
++#define MN_RETURNING_HOME 1
++#define MN_AT_HOME 2
++
++/*
++ * Mobile Node information record
++ */
++struct mn_info {
++ struct in6_addr home_addr;
++ struct in6_addr ha;
++ __u8 home_plen;
++ __u8 is_at_home;
++ __u8 has_home_reg;
++ __u8 man_conf;
++ int ifindex;
++ int ifindex_user;
++ unsigned long home_addr_expires;
++ unsigned short dhaad_id;
++ struct list_head list;
++ spinlock_t lock;
++};
++
++/* prototypes for interface functions */
++int mipv6_mn_init(void);
++void mipv6_mn_exit(void);
++
++struct handoff;
++
++/* Interface to movement detection */
++int mipv6_mobile_node_moved(struct handoff *ho);
++
++void mipv6_mn_send_home_na(struct in6_addr *haddr);
++/* Init home reg. with coa */
++int init_home_registration(struct in6_addr *home_addr, struct in6_addr *coa);
++
++/* mn_info functions that require locking by caller */
++struct mn_info *mipv6_mninfo_get_by_home(struct in6_addr *haddr);
++
++struct mn_info *mipv6_mninfo_get_by_ha(struct in6_addr *home_agent);
++
++struct mn_info *mipv6_mninfo_get_by_id(unsigned short id);
++
++/* "safe" mn_info functions */
++void mipv6_mninfo_add(int ifindex, struct in6_addr *home_addr, int plen,
++ int isathome, unsigned long lifetime, struct in6_addr *ha,
++ int ha_plen, unsigned long ha_lifetime, int man_conf);
++
++int mipv6_mninfo_del(struct in6_addr *home_addr, int del_dyn_only);
++
++void mipv6_mn_set_home_reg(struct in6_addr *home_addr, int has_home_reg);
++
++int mipv6_mn_is_at_home(struct in6_addr *addr);
++
++int mipv6_mn_is_home_addr(struct in6_addr *addr);
++
++__u32 mipv6_mn_get_bulifetime(struct in6_addr *home_addr,
++ struct in6_addr *coa, __u8 flags);
++int mn_cn_handoff(void *rawentry, void *args, unsigned long *sortkey);
++
++int mipv6_mn_ha_nd_update(struct net_device *dev,
++ struct in6_addr *ha, u8 *lladdr);
++
++struct bul_inval_args {
++ int all_rr_states;
++ struct in6_addr *cn;
++ struct in6_addr *mn;
++};
++
++int mn_bul_invalidate(void *rawentry, void *args, unsigned long *sortkey);
++
++#endif /* _MN_H */
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/mobhdr.h linux-2.4.25/net/ipv6/mobile_ip6/mobhdr.h
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/mobhdr.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/mobhdr.h 2004-06-26 11:29:31.000000000 +0100
+@@ -0,0 +1,101 @@
++/*
++ * MIPL Mobile IPv6 Mobility Header send and receive
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#ifndef _MOBHDR_H
++#define _MOBHDR_H
++
++#include <net/mipv6.h>
++
++/* RR states for mipv6_send_bu() */
++#define RR_INIT 0x00
++#define RR_WAITH 0x01
++#define RR_WAITC 0x02
++#define RR_WAITHC 0x13
++#define RR_DONE 0x10
++
++#define MH_UNKNOWN_CN 1
++#define MH_AUTH_FAILED 2
++#define MH_SEQUENCE_MISMATCH 3
++
++struct mipv6_bul_entry;
++struct sk_buff;
++
++int mipv6_mh_common_init(void);
++void mipv6_mh_common_exit(void);
++int mipv6_mh_mn_init(void);
++void mipv6_mh_mn_exit(void);
++
++struct mipv6_mh_opt {
++ struct mipv6_mo_alt_coa *alt_coa;
++ struct mipv6_mo_nonce_indices *nonce_indices;
++ struct mipv6_mo_bauth_data *auth_data;
++ struct mipv6_mo_br_advice *br_advice;
++ int freelen;
++ int totlen;
++ u8 *next_free;
++ u8 data[0];
++};
++
++struct mobopt {
++ struct mipv6_mo_alt_coa *alt_coa;
++ struct mipv6_mo_nonce_indices *nonce_indices;
++ struct mipv6_mo_bauth_data *auth_data;
++ struct mipv6_mo_br_advice *br_advice;
++};
++
++struct mipv6_mh_opt *alloc_mh_opts(int totlen);
++int append_mh_opt(struct mipv6_mh_opt *ops, u8 type, u8 len, void *data);
++int parse_mo_tlv(void *mos, int len, struct mobopt *opts);
++int mipv6_add_pad(u8 *data, int n);
++
++struct mipv6_auth_parm {
++ struct in6_addr *coa;
++ struct in6_addr *cn_addr;
++ __u8 *k_bu;
++};
++
++int send_mh(struct in6_addr *daddr, struct in6_addr *saddr,
++ u8 msg_type, u8 msg_len, u8 *msg,
++ struct in6_addr *hao_addr, struct in6_addr *rth_addr,
++ struct mipv6_mh_opt *ops, struct mipv6_auth_parm *parm);
++
++int mipv6_mh_register(int type, int (*func)(struct sk_buff *,
++ struct in6_addr *, struct in6_addr *,
++ struct in6_addr *, struct in6_addr *, struct mipv6_mh *));
++
++void mipv6_mh_unregister(int type);
++
++int mipv6_send_brr(struct in6_addr *saddr, struct in6_addr *daddr,
++ struct mipv6_mh_opt *ops);
++
++int mipv6_send_bu(struct in6_addr *saddr, struct in6_addr *daddr,
++ struct in6_addr *coa, __u32 initdelay,
++ __u32 maxackdelay, __u8 exp, __u8 flags,
++ __u32 lifetime, struct mipv6_mh_opt *ops);
++
++int mipv6_send_be(struct in6_addr *saddr, struct in6_addr *daddr,
++ struct in6_addr *home, __u8 status);
++
++int mipv6_send_ba(struct in6_addr *saddr, struct in6_addr *daddr,
++ struct in6_addr *auth_coa, struct in6_addr *rep_coa,
++ u8 status, u16 sequence, u32 lifetime, u8 *k_bu);
++
++/* Binding Authentication Data Option routines */
++#define MAX_HASH_LENGTH 20
++#define MIPV6_RR_MAC_LENGTH 12
++
++int mipv6_auth_build(struct in6_addr *cn_addr, struct in6_addr *coa,
++ __u8 *opt, __u8 *aud_data, __u8 *k_bu);
++
++int mipv6_auth_check(struct in6_addr *cn_addr, struct in6_addr *coa,
++ __u8 *opt, __u8 optlen, struct mipv6_mo_bauth_data *aud,
++ __u8 *k_bu);
++#endif /* _MOBHDR_H */
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/mobhdr_common.c linux-2.4.25/net/ipv6/mobile_ip6/mobhdr_common.c
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/mobhdr_common.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/mobhdr_common.c 2004-06-26 11:29:31.000000000 +0100
+@@ -0,0 +1,1210 @@
++/*
++ * Mobile IPv6 Mobility Header Common Functions
++ *
++ * Authors:
++ * Antti Tuominen <ajtuomin@tml.hut.fi>
++ *
++ * $Id: s.mh_recv.c 1.159 02/10/16 15:01:29+03:00 antti@traci.mipl.mediapoli.com $
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ *
++ */
++
++#include <linux/autoconf.h>
++#include <linux/types.h>
++#include <linux/in6.h>
++#include <linux/skbuff.h>
++#include <linux/ipsec.h>
++#include <linux/init.h>
++#include <net/ipv6.h>
++#include <net/ip6_route.h>
++#include <net/addrconf.h>
++#include <net/mipv6.h>
++#include <net/checksum.h>
++#include <net/protocol.h>
++
++#include "stats.h"
++#include "debug.h"
++#include "mobhdr.h"
++#include "bcache.h"
++
++#include "rr_crypto.h"
++#include "exthdrs.h"
++#include "config.h"
++
++#define MIPV6_MH_MAX MIPV6_MH_BE
++struct mh_proto {
++ int (*func) (struct sk_buff *,
++ struct in6_addr *, struct in6_addr *,
++ struct in6_addr *, struct in6_addr *,
++ struct mipv6_mh *);
++};
++
++static struct mh_proto mh_rcv[MIPV6_MH_MAX];
++
++int mipv6_mh_register(int type, int (*func)(struct sk_buff *,
++ struct in6_addr *, struct in6_addr *,
++ struct in6_addr *, struct in6_addr *, struct mipv6_mh *))
++{
++ if (mh_rcv[type].func != NULL)
++ return -1;
++
++ mh_rcv[type].func = func;
++
++ return 0;
++}
++
++void mipv6_mh_unregister(int type)
++{
++ if (type < 0 || type > MIPV6_MH_MAX)
++ return;
++
++ mh_rcv[type].func = NULL;
++}
++
++struct socket *mipv6_mh_socket = NULL;
++
++/* TODO: Fix fragmentation */
++static int dstopts_getfrag(
++ const void *data, struct in6_addr *addr,
++ char *buff, unsigned int offset, unsigned int len)
++{
++ memcpy(buff, data + offset, len);
++ return 0;
++}
++
++struct mipv6_mh_opt *alloc_mh_opts(int totlen)
++{
++ struct mipv6_mh_opt *ops;
++
++ ops = kmalloc(sizeof(*ops) + totlen, GFP_ATOMIC);
++ if (ops == NULL)
++ return NULL;
++
++ memset(ops, 0, sizeof(*ops));
++ ops->next_free = ops->data;
++ ops->freelen = totlen;
++
++ return ops;
++}
++
++int append_mh_opt(struct mipv6_mh_opt *ops, u8 type, u8 len, void *data)
++{
++ struct mipv6_mo *mo;
++
++ if (ops->next_free == NULL) {
++ DEBUG(DBG_ERROR, "No free room for option");
++ return -ENOMEM;
++ }
++ if (ops->freelen < len + 2) {
++ DEBUG(DBG_ERROR, "No free room for option");
++ return -ENOMEM;
++ }
++ else {
++ ops->freelen -= (len + 2);
++ ops->totlen += (len + 2);
++ }
++
++ mo = (struct mipv6_mo *)ops->next_free;
++ mo->type = type;
++ mo->length = len;
++
++ switch (type) {
++ case MIPV6_OPT_ALTERNATE_COA:
++ ops->alt_coa = (struct mipv6_mo_alt_coa *)mo;
++ ipv6_addr_copy(&ops->alt_coa->addr, (struct in6_addr *)data);
++ break;
++ case MIPV6_OPT_NONCE_INDICES:
++ DEBUG(DBG_INFO, "Added nonce indices pointer");
++ ops->nonce_indices = (struct mipv6_mo_nonce_indices *)mo;
++ ops->nonce_indices->home_nonce_i = *(__u16 *)data;
++ ops->nonce_indices->careof_nonce_i = *((__u16 *)data + 1);
++ break;
++ case MIPV6_OPT_AUTH_DATA:
++ DEBUG(DBG_INFO, "Added opt auth_data pointer");
++ ops->auth_data = (struct mipv6_mo_bauth_data *)mo;
++ break;
++ case MIPV6_OPT_BIND_REFRESH_ADVICE:
++ ops->br_advice = (struct mipv6_mo_br_advice *)mo;
++ ops->br_advice->refresh_interval = htons(*(u16 *)data);
++ break;
++ default:
++ DEBUG(DBG_ERROR, "Unknow option type");
++ break;
++ }
++
++ if (ops->freelen == 0)
++ ops->next_free = NULL;
++ else
++ ops->next_free += (len + 2);
++
++ return 0;
++}
++
++/*
++ * Calculates required padding with xn + y requirement with offset
++ */
++static inline int optpad(int xn, int y, int offset)
++{
++ return ((y - offset) & (xn - 1));
++}
++
++static int option_pad(int type, int offset)
++{
++ if (type == MIPV6_OPT_ALTERNATE_COA)
++ return optpad(8, 6, offset); /* 8n + 6 */
++ if (type == MIPV6_OPT_BIND_REFRESH_ADVICE ||
++ type == MIPV6_OPT_NONCE_INDICES)
++ return optpad(2, 0, offset); /* 2n */
++ return 0;
++}
++
++/*
++ * Add Pad1 or PadN option to data
++ */
++int mipv6_add_pad(u8 *data, int n)
++{
++ struct mipv6_mo_padn *padn;
++
++ if (n <= 0) return 0;
++ if (n == 1) {
++ *data = MIPV6_OPT_PAD1;
++ return 1;
++ }
++ padn = (struct mipv6_mo_padn *)data;
++ padn->type = MIPV6_OPT_PADN;
++ padn->length = n - 2;
++ memset(padn->data, 0, n - 2);
++ return n;
++}
++
++/*
++ * Write options to mobility header buffer
++ */
++static int prepare_mh_opts(u8 *optdata, int off, struct mipv6_mh_opt *ops)
++{
++ u8 *nextopt = optdata;
++ int offset = off, pad = 0;
++
++ if (ops == NULL) {
++ nextopt = NULL;
++ return -1;
++ }
++
++ if (ops->alt_coa) {
++ pad = option_pad(MIPV6_OPT_ALTERNATE_COA, offset);
++ nextopt += mipv6_add_pad(nextopt, pad);
++ memcpy(nextopt, ops->alt_coa, sizeof(struct mipv6_mo_alt_coa));
++ nextopt += sizeof(struct mipv6_mo_alt_coa);
++ offset += pad + sizeof(struct mipv6_mo_alt_coa);
++ }
++
++ if (ops->br_advice) {
++ pad = option_pad(MIPV6_OPT_BIND_REFRESH_ADVICE, offset);
++ nextopt += mipv6_add_pad(nextopt, pad);
++ memcpy(nextopt, ops->br_advice, sizeof(struct mipv6_mo_br_advice));
++ nextopt += sizeof(struct mipv6_mo_br_advice);
++ offset += pad + sizeof(struct mipv6_mo_br_advice);
++ }
++
++ if (ops->nonce_indices) {
++ pad = option_pad(MIPV6_OPT_NONCE_INDICES, offset);
++ nextopt += mipv6_add_pad(nextopt, pad);
++ memcpy(nextopt, ops->nonce_indices, sizeof(struct mipv6_mo_nonce_indices));
++ nextopt += sizeof(struct mipv6_mo_nonce_indices);
++ offset += pad + sizeof(struct mipv6_mo_nonce_indices);
++ }
++
++ if (ops->auth_data) {
++ /* This option should always be the last. Header
++ * length must be a multiple of 8 octects, so we pad
++ * if necessary. */
++ pad = optpad(8, 0, offset + ops->auth_data->length + 2);
++ nextopt += mipv6_add_pad(nextopt, pad);
++ memcpy(nextopt, ops->auth_data, ops->auth_data->length + 2);
++ nextopt += ops->auth_data->length + 2;
++ }
++ nextopt = NULL;
++
++ return 0;
++}
++
++static int calculate_mh_opts(struct mipv6_mh_opt *ops, int mh_len)
++{
++ int offset = mh_len;
++
++ if (ops == NULL)
++ return 0;
++
++ if (ops->alt_coa)
++ offset += sizeof(struct mipv6_mo_alt_coa)
++ + option_pad(MIPV6_OPT_ALTERNATE_COA, offset);
++
++ if (ops->br_advice)
++ offset += sizeof(struct mipv6_mo_br_advice)
++ + option_pad(MIPV6_OPT_BIND_REFRESH_ADVICE, offset);
++
++ if (ops->nonce_indices)
++ offset += sizeof(struct mipv6_mo_nonce_indices)
++ + option_pad(MIPV6_OPT_NONCE_INDICES, offset);
++
++ if (ops->auth_data) /* no alignment */
++ offset += ops->auth_data->length + 2;
++
++ return offset - mh_len;
++}
++
++/*
++ *
++ * Mobility Header Message send functions
++ *
++ */
++
++/**
++ * send_mh - builds and sends a MH msg
++ *
++ * @daddr: destination address for packet
++ * @saddr: source address for packet
++ * @msg_type: type of MH
++ * @msg_len: message length
++ * @msg: MH type specific data
++ * @hao_addr: home address for home address option
++ * @rth_addr: routing header address
++ * @ops: mobility options
++ * @parm: auth data
++ *
++ * Builds MH, appends the type specific msg data to the header and
++ * sends the packet with a home address option, if a home address was
++ * given. Returns 0, if everything succeeded and a negative error code
++ * otherwise.
++ **/
++int send_mh(struct in6_addr *daddr,
++ struct in6_addr *saddr,
++ u8 msg_type, u8 msg_len, u8 *msg,
++ struct in6_addr *hao_addr,
++ struct in6_addr *rth_addr,
++ struct mipv6_mh_opt *ops,
++ struct mipv6_auth_parm *parm)
++{
++ struct flowi fl;
++ struct mipv6_mh *mh;
++ struct sock *sk = mipv6_mh_socket->sk;
++ struct ipv6_txoptions *txopt = NULL;
++ int tot_len = sizeof(struct mipv6_mh) + msg_len;
++ int padded_len = 0, txopt_len = 0;
++
++ DEBUG_FUNC();
++ /* Add length of options */
++ tot_len += calculate_mh_opts(ops, tot_len);
++ /* Needs to be a multiple of 8 octets */
++ padded_len = tot_len + optpad(8, 0, tot_len);
++
++ mh = sock_kmalloc(sk, padded_len, GFP_ATOMIC);
++ if (!mh) {
++ DEBUG(DBG_ERROR, "memory allocation failed");
++ return -ENOMEM;
++ }
++
++ memset(&fl, 0, sizeof(fl));
++ fl.proto = IPPROTO_MOBILITY;
++ fl.fl6_dst = daddr;
++ fl.fl6_src = saddr;
++ fl.fl6_flowlabel = 0;
++ fl.oif = sk->bound_dev_if;
++
++ if (hao_addr || rth_addr) {
++ __u8 *opt_ptr;
++
++ if (hao_addr)
++ txopt_len += sizeof(struct mipv6_dstopt_homeaddr) + 6;
++ if (rth_addr)
++ txopt_len += sizeof(struct rt2_hdr);
++
++ txopt_len += sizeof(*txopt);
++ txopt = sock_kmalloc(sk, txopt_len, GFP_ATOMIC);
++ if (txopt == NULL) {
++ DEBUG(DBG_ERROR, "No socket space left");
++ sock_kfree_s(sk, mh, padded_len);
++ return -ENOMEM;
++ }
++ memset(txopt, 0, txopt_len);
++ txopt->tot_len = txopt_len;
++ opt_ptr = (__u8 *) (txopt + 1);
++ if (hao_addr) {
++ int holen = sizeof(struct mipv6_dstopt_homeaddr) + 6;
++ txopt->dst1opt = (struct ipv6_opt_hdr *) opt_ptr;
++ txopt->opt_flen += holen;
++ opt_ptr += holen;
++ mipv6_append_dst1opts(txopt->dst1opt, saddr,
++ NULL, holen);
++ txopt->mipv6_flags = MIPV6_SND_HAO | MIPV6_SND_BU;
++ }
++ if (rth_addr) {
++ int rtlen = sizeof(struct rt2_hdr);
++ txopt->srcrt2 = (struct ipv6_rt_hdr *) opt_ptr;
++ txopt->opt_nflen += rtlen;
++ opt_ptr += rtlen;
++ mipv6_append_rt2hdr(txopt->srcrt2, rth_addr);
++ }
++ }
++
++ /* Fill in the fields of MH */
++ mh->payload = NEXTHDR_NONE;
++ mh->length = (padded_len >> 3) - 1; /* Units of 8 octets - 1 */
++ mh->type = msg_type;
++ mh->reserved = 0;
++ mh->checksum = 0;
++
++ memcpy(mh->data, msg, msg_len);
++ prepare_mh_opts(mh->data + msg_len, msg_len + sizeof(*mh), ops);
++ /* If BAD is present, this is already done. */
++ mipv6_add_pad((u8 *)mh + tot_len, padded_len - tot_len);
++
++ if (parm && parm->k_bu && ops && ops->auth_data) {
++ /* Calculate the position of the authorization data before adding checksum*/
++ mipv6_auth_build(parm->cn_addr, parm->coa, (__u8 *)mh,
++ (__u8 *)mh + padded_len - MIPV6_RR_MAC_LENGTH, parm->k_bu);
++ }
++ /* Calculate the MH checksum */
++ mh->checksum = csum_ipv6_magic(fl.fl6_src, fl.fl6_dst,
++ padded_len, IPPROTO_MOBILITY,
++ csum_partial((char *)mh, padded_len, 0));
++ ip6_build_xmit(sk, dstopts_getfrag, mh, &fl, padded_len, txopt, 255,
++ MSG_DONTWAIT);
++ /* dst cache must be cleared so RR messages can be routed through
++ different interfaces */
++ sk_dst_reset(sk);
++
++ if (txopt_len)
++ sock_kfree_s(sk, txopt, txopt_len);
++ sock_kfree_s(sk, mh, padded_len);
++ return 0;
++}
++
++/**
++ * mipv6_send_brr - send a Binding Refresh Request
++ * @saddr: source address for BRR
++ * @daddr: destination address for BRR
++ * @ops: mobility options
++ *
++ * Sends a binding request. On a mobile node, use the mobile node's
++ * home address for @saddr. Returns 0 on success, negative on
++ * failure.
++ **/
++int mipv6_send_brr(struct in6_addr *saddr, struct in6_addr *daddr,
++ struct mipv6_mh_opt *ops)
++{
++ struct mipv6_mh_brr br;
++
++ memset(&br, 0, sizeof(br));
++ /* We don't need to explicitly add a RH to brr, since it will be
++ * included automatically, if a BCE exists
++ */
++ MIPV6_INC_STATS(n_brr_sent);
++ return send_mh(daddr, saddr, MIPV6_MH_BRR, sizeof(br), (u8 *)&br,
++ NULL, NULL, ops, NULL);
++}
++
++/**
++ * mipv6_send_ba - send a Binding Acknowledgement
++ * @saddr: source address for BA
++ * @daddr: destination address for BA
++ * @reply_coa: destination care-of address of MN
++ * @auth_coa: care-of address of MN used for authentication
++ * @status: status field value
++ * @sequence: sequence number from BU
++ * @lifetime: granted lifetime for binding in seconds
++ * @ops: mobility options
++ *
++ * Send a binding acknowledgement. On a mobile node, use the mobile
++ * node's home address for saddr. Returns 0 on success, non-zero on
++ * failure.
++ **/
++int mipv6_send_ba(struct in6_addr *saddr, struct in6_addr *daddr,
++ struct in6_addr *auth_coa, struct in6_addr *rep_coa,
++ u8 status, u16 sequence, u32 lifetime, u8 *k_bu)
++{
++ struct mipv6_mh_ba ba;
++ struct mipv6_auth_parm parm;
++ struct mipv6_mh_opt *ops = NULL;
++ int ops_len = 0, ret = 0;
++ struct mipv6_bce bc_entry;
++ int coming_home = 0;
++ int bypass_tnl = 0;
++
++ memset(&ba, 0, sizeof(ba));
++
++ ba.status = status;
++ ba.sequence = htons(sequence);
++ ba.lifetime = htons(lifetime >> 2);
++
++ DEBUG(DBG_INFO, "sending a status %d BA %s authenticator to MN \n"
++ "%x:%x:%x:%x:%x:%x:%x:%x at care of address \n"
++ "%x:%x:%x:%x:%x:%x:%x:%x : with lifetime %d and \n"
++ " sequence number %d",
++ status, k_bu ? "with" : "without",
++ NIPV6ADDR(daddr), NIPV6ADDR(auth_coa), lifetime, sequence);
++
++ memset(&parm, 0, sizeof(parm));
++ parm.coa = auth_coa;
++ parm.cn_addr = saddr;
++
++ if (k_bu) {
++ ops_len += sizeof(struct mipv6_mo_bauth_data) +
++ MIPV6_RR_MAC_LENGTH;
++ parm.k_bu = k_bu;
++ }
++
++ if (mip6node_cnf.binding_refresh_advice) {
++ ops_len += sizeof(struct mipv6_mo_br_advice);
++ }
++ if (ops_len) {
++ ops = alloc_mh_opts(ops_len);
++ if (ops == NULL) {
++ DEBUG(DBG_WARNING, "Out of memory");
++ return -ENOMEM;
++ }
++ if (mip6node_cnf.binding_refresh_advice > 0) {
++ if (append_mh_opt(ops, MIPV6_OPT_BIND_REFRESH_ADVICE, 2,
++ &mip6node_cnf.binding_refresh_advice) < 0) {
++ DEBUG(DBG_WARNING, "Adding BRA failed");
++ if (ops)
++ kfree(ops);
++ return -ENOMEM;
++ }
++ }
++ if (k_bu) {
++ if (append_mh_opt(ops, MIPV6_OPT_AUTH_DATA,
++ MIPV6_RR_MAC_LENGTH, NULL) < 0) {
++ DEBUG(DBG_WARNING, "Adding BAD failed");
++ if (ops)
++ kfree(ops);
++ return -ENOMEM;
++ }
++ }
++ }
++ coming_home = !ipv6_addr_cmp(rep_coa, daddr);
++
++ bypass_tnl = (coming_home &&
++ !mipv6_bcache_get(daddr, saddr, &bc_entry) &&
++ bc_entry.flags&MIPV6_BU_F_HOME &&
++ status >= 128);
++
++ if (bypass_tnl && mip6_fn.bce_tnl_rt_del)
++ mip6_fn.bce_tnl_rt_del(&bc_entry.coa,
++ &bc_entry.our_addr,
++ &bc_entry.home_addr);
++
++ if (coming_home)
++ ret = send_mh(daddr, saddr, MIPV6_MH_BA, sizeof(ba), (u8 *)&ba,
++ NULL, NULL, ops, &parm);
++ else
++ ret = send_mh(daddr, saddr, MIPV6_MH_BA, sizeof(ba), (u8 *)&ba,
++ NULL, rep_coa, ops, &parm);
++
++ if (bypass_tnl && mip6_fn.bce_tnl_rt_add)
++ mip6_fn.bce_tnl_rt_add(&bc_entry.coa,
++ &bc_entry.our_addr,
++ &bc_entry.home_addr);
++
++ if (ret == 0) {
++ if (status < 128) {
++ MIPV6_INC_STATS(n_ba_sent);
++ } else {
++ MIPV6_INC_STATS(n_ban_sent);
++ }
++ }
++
++ if (ops)
++ kfree(ops);
++
++ return 0;
++}
++
++/**
++ * mipv6_send_be - send a Binding Error message
++ * @saddr: source address for BE
++ * @daddr: destination address for BE
++ * @home: Home Address in offending packet (if any)
++ *
++ * Sends a binding error. On a mobile node, use the mobile node's
++ * home address for @saddr. Returns 0 on success, negative on
++ * failure.
++ **/
++int mipv6_send_be(struct in6_addr *saddr, struct in6_addr *daddr,
++ struct in6_addr *home, __u8 status)
++{
++ struct mipv6_mh_be be;
++ int ret = 0;
++ struct mipv6_bce bc_entry;
++ int bypass_tnl = 0;
++
++ if (ipv6_addr_is_multicast(daddr))
++ return -EINVAL;
++
++ memset(&be, 0, sizeof(be));
++ be.status = status;
++ if (home)
++ ipv6_addr_copy(&be.home_addr, home);
++
++ if (mipv6_bcache_get(daddr, saddr, &bc_entry) == 0 &&
++ bc_entry.flags&MIPV6_BU_F_HOME)
++ bypass_tnl = 1;
++
++ if (bypass_tnl && mip6_fn.bce_tnl_rt_del)
++ mip6_fn.bce_tnl_rt_del(&bc_entry.coa,
++ &bc_entry.our_addr,
++ &bc_entry.home_addr);
++
++ ret = send_mh(daddr, saddr, MIPV6_MH_BE, sizeof(be), (u8 *)&be,
++ NULL, NULL, NULL, NULL);
++
++ if (bypass_tnl && mip6_fn.bce_tnl_rt_add)
++ mip6_fn.bce_tnl_rt_add(&bc_entry.coa,
++ &bc_entry.our_addr,
++ &bc_entry.home_addr);
++
++ if (ret == 0)
++ MIPV6_INC_STATS(n_be_sent);
++
++ return ret;
++}
++
++/**
++ * mipv6_send_addr_test - send a HoT or CoT message
++ * @saddr: source address
++ * @daddr: destination address
++ * @msg_type: HoT or CoT message
++ * @init: HoTI or CoTI message
++ *
++ * Send a reply to HoTI or CoTI message.
++ **/
++static int mipv6_send_addr_test(struct in6_addr *saddr,
++ struct in6_addr *daddr,
++ int msg_type,
++ struct mipv6_mh_addr_ti *init)
++{
++ u_int8_t *kgen_token = NULL;
++ struct mipv6_mh_addr_test addr_test;
++ struct mipv6_rr_nonce *nonce;
++ struct mipv6_mh_opt *ops = NULL;
++ int ret = 0;
++
++ DEBUG_FUNC();
++
++ if ((nonce = mipv6_rr_get_new_nonce())== NULL) {
++ DEBUG(DBG_WARNING, "Nonce creation failed");
++ return 0;
++ }
++ if (mipv6_rr_cookie_create(daddr, &kgen_token, nonce->index)) {
++ DEBUG(DBG_WARNING, "No cookie");
++ return 0;
++ }
++
++ addr_test.nonce_index = nonce->index;
++ memcpy(addr_test.init_cookie, init->init_cookie,
++ MIPV6_RR_COOKIE_LENGTH);
++ memcpy(addr_test.kgen_token, kgen_token,
++ MIPV6_RR_COOKIE_LENGTH);
++
++ /* No options defined */
++ ret = send_mh(daddr, saddr, msg_type, sizeof(addr_test),
++ (u8 *)&addr_test, NULL, NULL, ops, NULL);
++
++ if (ret == 0) {
++ if (msg_type == MIPV6_MH_HOT) {
++ MIPV6_INC_STATS(n_hot_sent);
++ } else {
++ MIPV6_INC_STATS(n_cot_sent);
++ }
++ }
++
++ return 0;
++}
++
++static void bc_cache_add(int ifindex, struct in6_addr *daddr,
++ struct in6_addr *haddr, struct in6_addr *coa,
++ struct in6_addr *rep_coa, __u32 lifetime,
++ __u16 sequence, __u8 flags, __u8 *k_bu)
++{
++ __u8 ba_status = SUCCESS;
++
++ if (lifetime > MAX_RR_BINDING_LIFE)
++ lifetime = MAX_RR_BINDING_LIFE;
++
++ if (mipv6_bcache_add(ifindex, daddr, haddr, coa, lifetime,
++ sequence, flags, CACHE_ENTRY) != 0) {
++ DEBUG(DBG_ERROR, "binding failed.");
++ ba_status = INSUFFICIENT_RESOURCES;
++ }
++
++ if (flags & MIPV6_BU_F_ACK) {
++ DEBUG(DBG_INFO, "sending ack (code=%d)", ba_status);
++ mipv6_send_ba(daddr, haddr, coa, rep_coa, ba_status, sequence,
++ lifetime, k_bu);
++ }
++}
++
++static void bc_cn_home_add(int ifindex, struct in6_addr *daddr,
++ struct in6_addr *haddr, struct in6_addr *coa,
++ struct in6_addr *rep_coa, __u32 lifetime,
++ __u16 sequence, __u8 flags, __u8 *k_bu)
++{
++ mipv6_send_ba(daddr, haddr, coa, rep_coa,
++ HOME_REGISTRATION_NOT_SUPPORTED,
++ sequence, lifetime, k_bu);
++}
++
++static void bc_cache_delete(struct in6_addr *daddr, struct in6_addr *haddr,
++ struct in6_addr *coa, struct in6_addr *rep_coa,
++ __u16 sequence, __u8 flags,
++ __u8 *k_bu)
++{
++ __u8 status = SUCCESS;
++
++ /* Cached Care-of Address Deregistration */
++ if (mipv6_bcache_exists(haddr, daddr) == CACHE_ENTRY) {
++ mipv6_bcache_delete(haddr, daddr, CACHE_ENTRY);
++ } else {
++ DEBUG(DBG_INFO, "entry is not in cache");
++ status = REASON_UNSPECIFIED;
++ }
++ if (flags & MIPV6_BU_F_ACK) {
++ mipv6_send_ba(daddr, haddr, coa, rep_coa, status, sequence,
++ 0, k_bu);
++ }
++}
++
++static void bc_cn_home_delete(struct in6_addr *daddr, struct in6_addr *haddr,
++ struct in6_addr *coa, struct in6_addr *rep_coa,
++ __u16 sequence, __u8 flags,
++ __u8 *k_bu)
++{
++}
++
++/**
++ * parse_mo_tlv - Parse TLV-encoded Mobility Options
++ * @mos: pointer to Mobility Options
++ * @len: total length of options
++ * @opts: structure to store option pointers
++ *
++ * Parses Mobility Options passed in @mos. Stores pointers in @opts
++ * to all valid mobility options found in @mos. Unknown options and
++ * padding (%MIPV6_OPT_PAD1 and %MIPV6_OPT_PADN) is ignored and
++ * skipped.
++ **/
++int parse_mo_tlv(void *mos, int len, struct mobopt *opts)
++{
++ struct mipv6_mo *curr = (struct mipv6_mo *)mos;
++ int left = len;
++
++ while (left > 0) {
++ int optlen = 0;
++ if (curr->type == MIPV6_OPT_PAD1)
++ optlen = 1;
++ else
++ optlen = 2 + curr->length;
++
++ if (optlen > left)
++ goto bad;
++
++ switch (curr->type) {
++ case MIPV6_OPT_PAD1:
++ DEBUG(DBG_DATADUMP, "MIPV6_OPT_PAD1 at %x", curr);
++ break;
++ case MIPV6_OPT_PADN:
++ DEBUG(DBG_DATADUMP, "MIPV6_OPT_PADN at %x", curr);
++ break;
++ case MIPV6_OPT_ALTERNATE_COA:
++ DEBUG(DBG_DATADUMP, "MIPV6_OPT_ACOA at %x", curr);
++ opts->alt_coa = (struct mipv6_mo_alt_coa *)curr;
++ break;
++ case MIPV6_OPT_NONCE_INDICES:
++ DEBUG(DBG_DATADUMP, "MIPV6_OPT_NONCE_INDICES at %x", curr);
++ opts->nonce_indices =
++ (struct mipv6_mo_nonce_indices *)curr;
++ break;
++ case MIPV6_OPT_AUTH_DATA:
++ DEBUG(DBG_DATADUMP, "MIPV6_OPT_AUTH_DATA at %x", curr);
++ opts->auth_data = (struct mipv6_mo_bauth_data *)curr;
++ break;
++ case MIPV6_OPT_BIND_REFRESH_ADVICE:
++ DEBUG(DBG_DATADUMP, "MIPV6_OPT_BIND_REFRESH_ADVICE at %x", curr);
++ opts->br_advice = (struct mipv6_mo_br_advice *)curr;
++ break;
++ default:
++ DEBUG(DBG_INFO, "MO Unknown option type %d at %x, ignoring.",
++ curr->type, curr);
++ /* unknown mobility option, ignore and skip */
++ }
++
++ (u8 *)curr += optlen;
++ left -= optlen;
++ }
++
++ if (left == 0)
++ return 0;
++ bad:
++ return -1;
++}
++
++/*
++ *
++ * Mobility Header Message handlers
++ *
++ */
++
++static int mipv6_handle_mh_testinit(struct sk_buff *skb,
++ struct in6_addr *cn,
++ struct in6_addr *lcoa,
++ struct in6_addr *saddr,
++ struct in6_addr *fcoa,
++ struct mipv6_mh *mh)
++{
++ struct mipv6_mh_addr_ti *ti = (struct mipv6_mh_addr_ti *)mh->data;
++ int msg_len = (mh->length+1) << 3;
++ int opt_len;
++ DEBUG_FUNC();
++
++ if (msg_len > skb->len)
++ return -1;
++
++ opt_len = msg_len - sizeof(*mh) - sizeof(*ti);
++
++ if (opt_len < 0) {
++ __u32 pos = (__u32)&mh->length - (__u32)skb->nh.raw;
++ icmpv6_send(skb, ICMPV6_PARAMPROB,
++ ICMPV6_HDR_FIELD, pos, skb->dev);
++
++ DEBUG(DBG_INFO, "Mobility Header length less than H/C TestInit");
++ return -1;
++ }
++ if (!mip6node_cnf.accept_ret_rout) {
++ DEBUG(DBG_INFO, "Return routability administratively disabled");
++ return -1;
++ }
++ if (lcoa || fcoa) {
++ DEBUG(DBG_INFO, "H/C TestInit has HAO or RTH2, dropped.");
++ return -1;
++ }
++
++ if (mh->type == MIPV6_MH_HOTI) {
++ MIPV6_INC_STATS(n_hoti_rcvd);
++ return mipv6_send_addr_test(cn, saddr, MIPV6_MH_HOT, ti);
++ } else if (mh->type == MIPV6_MH_COTI) {
++ MIPV6_INC_STATS(n_coti_rcvd);
++ return mipv6_send_addr_test(cn, saddr, MIPV6_MH_COT, ti);
++ } else
++ return -1; /* Impossible to get here */
++}
++
++/**
++ * mipv6_handle_mh_bu - Binding Update handler
++ * @src: care-of address of sender
++ * @dst: our address
++ * @haddr: home address of sender
++ * @mh: pointer to the beginning of the Mobility Header
++ *
++ * Handles Binding Update. Packet and offset to option are passed.
++ * Returns 0 on success, otherwise negative.
++ **/
++static int mipv6_handle_mh_bu(struct sk_buff *skb,
++ struct in6_addr *dst,
++ struct in6_addr *unused,
++ struct in6_addr *haddr,
++ struct in6_addr *coaddr,
++ struct mipv6_mh *mh)
++{
++ struct mipv6_mh_bu *bu = (struct mipv6_mh_bu *)mh->data;
++ int msg_len = (mh->length+1) << 3;
++ int opt_len;
++ int auth = 0;
++ int dereg; /* Is this deregistration? */
++ int addr_type;
++
++ struct mipv6_bce bc_entry;
++ struct in6_addr *coa, *reply_coa;
++ __u8 *key_bu = NULL; /* RR BU authentication key */
++ __u8 flags = bu->flags;
++ __u16 sequence;
++ __u32 lifetime;
++ __u16 nonce_ind = (__u16) -1;
++
++ if (msg_len > skb->len)
++ return -1;
++
++ opt_len = msg_len - sizeof(*mh) - sizeof(*bu);
++
++ if (opt_len < 0) {
++ __u32 pos = (__u32)&mh->length - (__u32)skb->nh.raw;
++ icmpv6_send(skb, ICMPV6_PARAMPROB,
++ ICMPV6_HDR_FIELD, pos, skb->dev);
++
++ DEBUG(DBG_INFO, "Mobility Header length less than BU");
++ MIPV6_INC_STATS(n_bu_drop.invalid);
++ return -1;
++ }
++
++ addr_type = ipv6_addr_type(haddr);
++ if (addr_type&IPV6_ADDR_LINKLOCAL || !(addr_type&IPV6_ADDR_UNICAST))
++ return -EINVAL;
++
++ /* If HAO not present, CoA == HAddr */
++ if (coaddr == NULL)
++ coa = haddr;
++ else {
++ coa = coaddr;
++ addr_type = ipv6_addr_type(coa);
++ if (addr_type&IPV6_ADDR_LINKLOCAL ||
++ !(addr_type&IPV6_ADDR_UNICAST))
++ return -EINVAL;
++ }
++ reply_coa = coa;
++
++ sequence = ntohs(bu->sequence);
++ if (bu->lifetime == 0xffff)
++ lifetime = 0xffffffff;
++ else
++ lifetime = ntohs(bu->lifetime) << 2;
++
++ dereg = (ipv6_addr_cmp(haddr, coa) == 0 || lifetime == 0);
++
++ if (opt_len > 0) {
++ struct mobopt opts;
++ memset(&opts, 0, sizeof(opts));
++ if (parse_mo_tlv(bu + 1, opt_len, &opts) < 0) {
++ MIPV6_INC_STATS(n_bu_drop.invalid);
++ return -1;
++ }
++ /*
++ * MIPV6_OPT_AUTH_DATA, MIPV6_OPT_NONCE_INDICES,
++ * MIPV6_OPT_ALT_COA
++ */
++ if (opts.alt_coa) {
++ coa = &opts.alt_coa->addr;
++ dereg = (ipv6_addr_cmp(haddr, coa) == 0 || lifetime == 0);
++ }
++ addr_type = ipv6_addr_type(coa);
++ if (addr_type&IPV6_ADDR_LINKLOCAL ||
++ !(addr_type&IPV6_ADDR_UNICAST))
++ return -EINVAL;
++
++ if (flags & MIPV6_BU_F_HOME) {
++ if (opts.nonce_indices)
++ return -1;
++ } else {
++ u8 ba_status = 0;
++ u8 *h_ckie = NULL, *c_ckie = NULL; /* Home and care-of cookies */
++
++ /* BUs to CN MUST include authorization data and nonce indices options */
++ if (!opts.auth_data || !opts.nonce_indices) {
++ DEBUG(DBG_WARNING,
++ "Route optimization BU without authorization material, aborting processing");
++ return MH_AUTH_FAILED;
++ }
++ if (mipv6_rr_cookie_create(
++ haddr, &h_ckie, opts.nonce_indices->home_nonce_i) < 0) {
++ DEBUG(DBG_WARNING,
++ "mipv6_rr_cookie_create failed for home cookie");
++ ba_status = EXPIRED_HOME_NONCE_INDEX;
++ }
++ nonce_ind = opts.nonce_indices->home_nonce_i;
++ /* Don't create the care-of cookie, if MN deregisters */
++ if (!dereg && mipv6_rr_cookie_create(
++ coa, &c_ckie,
++ opts.nonce_indices->careof_nonce_i) < 0) {
++ DEBUG(DBG_WARNING,
++ "mipv6_rr_cookie_create failed for coa cookie");
++ if (ba_status == 0)
++ ba_status = EXPIRED_CAREOF_NONCE_INDEX;
++ else
++ ba_status = EXPIRED_NONCES;
++ }
++ if (ba_status == 0) {
++ if (dereg)
++ key_bu = mipv6_rr_key_calc(h_ckie, NULL);
++ else
++ key_bu = mipv6_rr_key_calc(h_ckie, c_ckie);
++ mh->checksum = 0;/* TODO: Don't mangle the packet */
++ if (key_bu && mipv6_auth_check(
++ dst, coa, (__u8 *)mh, msg_len + sizeof(*mh), opts.auth_data, key_bu) == 0) {
++ DEBUG(DBG_INFO, "mipv6_auth_check OK for BU");
++ auth = 1;
++ } else {
++ DEBUG(DBG_WARNING,
++ "BU Authentication failed");
++ }
++ }
++ if (h_ckie)
++ kfree(h_ckie);
++ if (c_ckie)
++ kfree(c_ckie);
++ if (ba_status != 0) {
++ MIPV6_INC_STATS(n_bu_drop.auth);
++ mipv6_send_ba(dst, haddr, coa,
++ reply_coa, ba_status,
++ sequence, 0, NULL);
++ goto out;
++ }
++ }
++
++ }
++ /* Require authorization option for RO, home reg is protected by IPsec */
++ if (!(flags & MIPV6_BU_F_HOME) && !auth) {
++ MIPV6_INC_STATS(n_bu_drop.auth);
++ if (key_bu)
++ kfree(key_bu);
++ return MH_AUTH_FAILED;
++ }
++
++ if (mipv6_bcache_get(haddr, dst, &bc_entry) == 0) {
++ if ((bc_entry.flags&MIPV6_BU_F_HOME) !=
++ (flags&MIPV6_BU_F_HOME)) {
++ DEBUG(DBG_INFO,
++ "Registration type change. Sending BA REG_TYPE_CHANGE_FORBIDDEN");
++ mipv6_send_ba(dst, haddr, coa, reply_coa,
++ REG_TYPE_CHANGE_FORBIDDEN,
++ sequence, lifetime, key_bu);
++ goto out;
++ }
++ if (!MIPV6_SEQ_GT(sequence, bc_entry.seq)) {
++ DEBUG(DBG_INFO,
++ "Sequence number mismatch. Sending BA SEQUENCE_NUMBER_OUT_OF_WINDOW");
++ mipv6_send_ba(dst, haddr, coa, reply_coa,
++ SEQUENCE_NUMBER_OUT_OF_WINDOW,
++ bc_entry.seq, lifetime, key_bu);
++ goto out;
++ }
++ }
++
++ if (!dereg) {
++ int ifindex;
++ struct rt6_info *rt;
++
++ /* Avoid looping binding cache entries */
++ if (mipv6_bcache_get(coa, dst, &bc_entry) == 0) {
++ DEBUG(DBG_WARNING, "Looped BU, dropping the packet");
++ goto out;
++ }
++ DEBUG(DBG_INFO, "calling bu_add.");
++ if ((rt = rt6_lookup(haddr, dst, 0, 0)) != NULL) {
++ ifindex = rt->rt6i_dev->ifindex;
++ dst_release(&rt->u.dst);
++ } else {
++ /*
++ * Can't process the BU since the right interface is
++ * not found.
++ */
++ DEBUG(DBG_WARNING, "No route entry found for handling "
++ "a BU request, (using 0 as index)");
++ ifindex = 0;
++ }
++ if (flags & MIPV6_BU_F_HOME)
++ mip6_fn.bce_home_add(ifindex, dst, haddr, coa,
++ reply_coa, lifetime, sequence,
++ flags, key_bu);
++ else
++ mip6_fn.bce_cache_add(ifindex, dst, haddr, coa,
++ reply_coa, lifetime, sequence,
++ flags, key_bu);
++ } else {
++ DEBUG(DBG_INFO, "calling BCE delete.");
++
++ if (flags & MIPV6_BU_F_HOME)
++ mip6_fn.bce_home_del(dst, haddr, coa, reply_coa,
++ sequence, flags, key_bu);
++ else {
++ mipv6_rr_invalidate_nonce(nonce_ind);
++ mip6_fn.bce_cache_del(dst, haddr, coa, reply_coa,
++ sequence, flags, key_bu);
++ }
++ }
++ out:
++ MIPV6_INC_STATS(n_bu_rcvd);
++ if (key_bu)
++ kfree(key_bu);
++ return 0;
++}
++
++static int mipv6_mh_rcv(struct sk_buff *skb)
++{
++ struct inet6_skb_parm *opt = (struct inet6_skb_parm *)skb->cb;
++ struct mipv6_mh *mh;
++ struct in6_addr *lhome, *fhome, *lcoa = NULL, *fcoa = NULL;
++ int ret = 0;
++
++ fhome = &skb->nh.ipv6h->saddr;
++ lhome = &skb->nh.ipv6h->daddr;
++
++ if (opt->hao != 0) {
++ struct mipv6_dstopt_homeaddr *hao;
++ hao = (struct mipv6_dstopt_homeaddr *)(skb->nh.raw + opt->hao);
++ fcoa = &hao->addr;
++ }
++
++ if (opt->srcrt2 != 0) {
++ struct rt2_hdr *rt2;
++ rt2 = (struct rt2_hdr *)((u8 *)skb->nh.raw + opt->srcrt2);
++ lcoa = &rt2->addr;
++ }
++
++ /* Verify checksum is correct */
++ if (skb->ip_summed == CHECKSUM_HW) {
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++ if (csum_ipv6_magic(fhome, lhome, skb->len, IPPROTO_MOBILITY,
++ skb->csum)) {
++ if (net_ratelimit())
++ printk(KERN_WARNING "MIPv6 MH hw checksum failed\n");
++ skb->ip_summed = CHECKSUM_NONE;
++ }
++ }
++ if (skb->ip_summed == CHECKSUM_NONE) {
++ if (csum_ipv6_magic(fhome, lhome, skb->len, IPPROTO_MOBILITY,
++ skb_checksum(skb, 0, skb->len, 0))) {
++ if (net_ratelimit())
++ printk(KERN_WARNING "MIPv6 MH checksum failed\n");
++ goto bad;
++ }
++ }
++
++ if (!pskb_may_pull(skb, skb->h.raw-skb->data+sizeof(*mh)) ||
++ !pskb_may_pull(skb,
++ skb->h.raw-skb->data+((skb->h.raw[1]+1)<<3))) {
++ DEBUG(DBG_INFO, "MIPv6 MH invalid length");
++ kfree_skb(skb);
++ return 0;
++ }
++
++ mh = (struct mipv6_mh *) skb->h.raw;
++
++ /* Verify there are no more headers after the MH */
++ if (mh->payload != NEXTHDR_NONE) {
++ __u32 pos = (__u32)&mh->payload - (__u32)skb->nh.raw;
++ icmpv6_send(skb, ICMPV6_PARAMPROB,
++ ICMPV6_HDR_FIELD, pos, skb->dev);
++
++ DEBUG(DBG_INFO, "MIPv6 MH error");
++ goto bad;
++ }
++
++ if (mh->type > MIPV6_MH_MAX) {
++ /* send binding error */
++ printk("Invalid mobility header type (%d)\n", mh->type);
++ mipv6_send_be(lhome, fcoa ? fcoa : fhome,
++ fcoa ? fhome : NULL,
++ MIPV6_BE_UNKNOWN_MH_TYPE);
++ goto bad;
++ }
++ if (mh_rcv[mh->type].func != NULL) {
++ ret = mh_rcv[mh->type].func(skb, lhome, lcoa, fhome, fcoa, mh);
++ } else {
++ DEBUG(DBG_INFO, "No handler for MH Type %d", mh->type);
++ goto bad;
++ }
++
++ kfree_skb(skb);
++ return 0;
++
++bad:
++ MIPV6_INC_STATS(n_mh_in_error);
++ kfree_skb(skb);
++ return 0;
++
++}
++
++#if LINUX_VERSION_CODE >= 0x2052a
++struct inet6_protocol mipv6_mh_protocol =
++{
++ mipv6_mh_rcv, /* handler */
++ NULL /* error control */
++};
++#else
++struct inet6_protocol mipv6_mh_protocol =
++{
++ mipv6_mh_rcv, /* handler */
++ NULL, /* error control */
++ NULL, /* next */
++ IPPROTO_MOBILITY, /* protocol ID */
++ 0, /* copy */
++ NULL, /* data */
++ "MIPv6 MH" /* name */
++};
++#endif
++
++/*
++ *
++ * Code module init/exit functions
++ *
++ */
++
++int __init mipv6_mh_common_init(void)
++{
++ struct sock *sk;
++ int err;
++
++ mip6_fn.bce_home_add = bc_cn_home_add;
++ mip6_fn.bce_cache_add = bc_cache_add;
++ mip6_fn.bce_home_del = bc_cn_home_delete;
++ mip6_fn.bce_cache_del = bc_cache_delete;
++
++ mipv6_mh_socket = sock_alloc();
++ if (mipv6_mh_socket == NULL) {
++ printk(KERN_ERR
++ "Failed to create the MIP6 MH control socket.\n");
++ return -1;
++ }
++ mipv6_mh_socket->type = SOCK_RAW;
++
++ if ((err = sock_create(PF_INET6, SOCK_RAW, IPPROTO_MOBILITY,
++ &mipv6_mh_socket)) < 0) {
++ printk(KERN_ERR
++ "Failed to initialize the MIP6 MH control socket (err %d).\n",
++ err);
++ sock_release(mipv6_mh_socket);
++ mipv6_mh_socket = NULL; /* for safety */
++ return err;
++ }
++
++ sk = mipv6_mh_socket->sk;
++ sk->allocation = GFP_ATOMIC;
++ sk->sndbuf = 64 * 1024 + sizeof(struct sk_buff);
++ sk->prot->unhash(sk);
++
++ memset(&mh_rcv, 0, sizeof(mh_rcv));
++ mh_rcv[MIPV6_MH_HOTI].func = mipv6_handle_mh_testinit;
++ mh_rcv[MIPV6_MH_COTI].func = mipv6_handle_mh_testinit;
++ mh_rcv[MIPV6_MH_BU].func = mipv6_handle_mh_bu;
++
++#if LINUX_VERSION_CODE >= 0x2052a
++ if (inet6_add_protocol(&mipv6_mh_protocol, IPPROTO_MOBILITY) < 0) {
++ printk(KERN_ERR "Failed to register MOBILITY protocol\n");
++ sock_release(mipv6_mh_socket);
++ mipv6_mh_socket = NULL;
++ return -EAGAIN;
++ }
++#else
++ inet6_add_protocol(&mipv6_mh_protocol);
++#endif
++ /* To disable the use of dst_cache,
++ * which slows down the sending of BUs ??
++ */
++ sk->dst_cache=NULL;
++
++ return 0;
++}
++
++void __exit mipv6_mh_common_exit(void)
++{
++ if (mipv6_mh_socket) sock_release(mipv6_mh_socket);
++ mipv6_mh_socket = NULL; /* For safety. */
++
++#if LINUX_VERSION_CODE >= 0x2052a
++ inet6_del_protocol(&mipv6_mh_protocol, IPPROTO_MOBILITY);
++#else
++ inet6_del_protocol(&mipv6_mh_protocol);
++#endif
++ memset(&mh_rcv, 0, sizeof(mh_rcv));
++}
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/mobhdr_mn.c linux-2.4.25/net/ipv6/mobile_ip6/mobhdr_mn.c
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/mobhdr_mn.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/mobhdr_mn.c 2004-06-26 11:29:32.000000000 +0100
+@@ -0,0 +1,1155 @@
++/*
++ * Mobile IPv6 Mobility Header Functions for Mobile Node
++ *
++ * Authors:
++ * Antti Tuominen <ajtuomin@tml.hut.fi>
++ * Niklas Kämpe <nhkampe@cc.hut.fi>
++ * Henrik Petander <henrik.petander@hut.fi>
++ *
++ * $Id:$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of
++ * the License, or (at your option) any later version.
++ *
++ */
++
++#include <linux/types.h>
++#include <linux/sched.h>
++#include <linux/init.h>
++#include <net/ipv6.h>
++#include <net/addrconf.h>
++#include <net/mipv6.h>
++
++#include "mobhdr.h"
++#include "mn.h"
++#include "bul.h"
++#include "rr_crypto.h"
++#include "debug.h"
++#include "util.h"
++#include "stats.h"
++
++int rr_configured = 1;
++
++/* Return value of mipv6_rr_state() */
++#define NO_RR 0
++#define DO_RR 1
++#define RR_FOR_COA 2
++#define INPROGRESS_RR 3
++
++/**
++ * send_bu_msg - sends a Binding Update
++ * @bulentry : BUL entry with the information for building a BU
++ *
++ * Function builds a BU msg based on the contents of a bul entry.
++ * Does not change the bul entry.
++ **/
++static int send_bu_msg(struct mipv6_bul_entry *binding)
++{
++ int auth = 0; /* Use auth */
++ int ret = 0;
++ struct mipv6_auth_parm parm;
++ struct mipv6_mh_bu bu;
++
++ if (!binding) {
++ DEBUG(DBG_ERROR, "called with a null bul entry");
++ return -1;
++ }
++
++ memset(&parm, 0, sizeof(parm));
++ if (mipv6_prefix_compare(&binding->coa, &binding->home_addr, 64))
++ parm.coa = &binding->home_addr;
++ else
++ parm.coa = &binding->coa;
++ parm.cn_addr = &binding->cn_addr;
++
++ if (binding->rr && binding->rr->kbu) {
++ DEBUG(DBG_INFO, "Binding with key");
++ auth = 1;
++ parm.k_bu = binding->rr->kbu;
++ }
++ memset(&bu, 0, sizeof(bu));
++ bu.flags = binding->flags;
++ bu.sequence = htons(binding->seq);
++ bu.lifetime = htons(binding->lifetime >> 2);
++ bu.reserved = 0;
++
++ ret = send_mh(&binding->cn_addr, &binding->home_addr,
++ MIPV6_MH_BU, sizeof(bu), (u8 *)&bu,
++ &binding->home_addr, NULL,
++ binding->ops, &parm);
++
++ if (ret == 0)
++ MIPV6_INC_STATS(n_bu_sent);
++
++ return ret;
++}
++
++/**
++ * mipv6_send_addr_test_init - send a HoTI or CoTI message
++ * @saddr: source address for H/CoTI
++ * @daddr: destination address for H/CoTI
++ * @msg_type: Identifies whether HoTI or CoTI
++ * @init_cookie: the HoTi or CoTi init cookie
++ *
++ * The message will be retransmitted till we get a HoT or CoT message, since
++ * our caller (mipv6_RR_start) has entered this message in the BUL with
++ * exponential backoff retramission set.
++ */
++static int mipv6_send_addr_test_init(struct in6_addr *saddr,
++ struct in6_addr *daddr,
++ u8 msg_type,
++ u8 *init_cookie)
++{
++ struct mipv6_mh_addr_ti ti;
++ struct mipv6_mh_opt *ops = NULL;
++ int ret = 0;
++
++ /* Set reserved and copy the cookie from address test init msg */
++ ti.reserved = 0;
++ mipv6_rr_mn_cookie_create(init_cookie);
++ memcpy(ti.init_cookie, init_cookie, MIPV6_RR_COOKIE_LENGTH);
++
++ ret = send_mh(daddr, saddr, msg_type, sizeof(ti), (u8 *)&ti,
++ NULL, NULL, ops, NULL);
++ if (ret == 0) {
++ if (msg_type == MIPV6_MH_HOTI) {
++ MIPV6_INC_STATS(n_hoti_sent);
++ } else {
++ MIPV6_INC_STATS(n_coti_sent);
++ }
++ }
++
++ return ret;
++}
++
++/*
++ *
++ * Callback handlers for binding update list
++ *
++ */
++
++/* Return value 0 means keep entry, non-zero means discard entry. */
++
++/* Callback for BUs not requiring acknowledgement
++ */
++int bul_entry_expired(struct mipv6_bul_entry *bulentry)
++{
++ /* Lifetime expired, delete entry. */
++ DEBUG(DBG_INFO, "bul entry 0x%p lifetime expired, deleting entry",
++ bulentry);
++ return 1;
++}
++
++/* Callback for BUs requiring acknowledgement with exponential resending
++ * scheme */
++static int bul_resend_exp(struct mipv6_bul_entry *bulentry)
++{
++ unsigned long now = jiffies;
++
++ DEBUG(DBG_INFO, "(0x%x) resending bu", (int) bulentry);
++
++
++ /* If sending a de-registration, do not care about the
++ * lifetime value, as de-registrations are normally sent with
++ * a zero lifetime value. If the entry is a home entry get the
++ * current lifetime.
++ */
++
++ if (bulentry->lifetime != 0) {
++ bulentry->lifetime = mipv6_mn_get_bulifetime(
++ &bulentry->home_addr, &bulentry->coa, bulentry->flags);
++
++ bulentry->expire = now + bulentry->lifetime * HZ;
++ } else {
++ bulentry->expire = now + HOME_RESEND_EXPIRE * HZ;
++ }
++ if (bulentry->rr) {
++ /* Redo RR, if cookies have expired */
++ if (time_after(jiffies, bulentry->rr->home_time + MAX_TOKEN_LIFE * HZ))
++ bulentry->rr->rr_state |= RR_WAITH;
++ if (time_after(jiffies, bulentry->rr->careof_time + MAX_NONCE_LIFE * HZ))
++ bulentry->rr->rr_state |= RR_WAITC;
++
++ if (bulentry->rr->rr_state & RR_WAITH) {
++ /* Resend HoTI directly */
++ mipv6_send_addr_test_init(&bulentry->home_addr,
++ &bulentry->cn_addr, MIPV6_MH_HOTI,
++ bulentry->rr->hot_cookie);
++ }
++ if (bulentry->rr->rr_state & RR_WAITC) {
++ /* Resend CoTI directly */
++ mipv6_send_addr_test_init(&bulentry->coa,
++ &bulentry->cn_addr, MIPV6_MH_COTI,
++ bulentry->rr->cot_cookie);
++ }
++ goto out;
++ }
++
++ bulentry->seq++;
++
++ if (send_bu_msg(bulentry) < 0)
++ DEBUG(DBG_ERROR, "Resending of BU failed");
++
++out:
++ /* Schedule next retransmission */
++ if (bulentry->delay < bulentry->maxdelay) {
++ bulentry->delay = 2 * bulentry->delay;
++ if (bulentry->delay > bulentry->maxdelay) {
++ /* can happen if maxdelay is not power(mindelay, 2) */
++ bulentry->delay = bulentry->maxdelay;
++ }
++ } else if (bulentry->flags & MIPV6_BU_F_HOME) {
++ /* Home registration - continue sending BU at maxdelay rate */
++ DEBUG(DBG_INFO, "Sending BU to HA after max ack wait time "
++ "reached(0x%x)", (int) bulentry);
++ bulentry->delay = bulentry->maxdelay;
++ } else if (!(bulentry->flags & MIPV6_BU_F_HOME)) {
++ /* Failed to get BA from a CN */
++ bulentry->callback_time = now;
++ return -1;
++ }
++
++ bulentry->callback_time = now + bulentry->delay * HZ;
++ return 0;
++}
++
++
++
++/* Callback for sending a registration refresh BU
++ */
++static int bul_refresh(struct mipv6_bul_entry *bulentry)
++{
++ unsigned long now = jiffies;
++
++ /* Refresh interval passed, send new BU */
++ DEBUG(DBG_INFO, "bul entry 0x%x refresh interval passed, sending new BU", (int) bulentry);
++ if (bulentry->lifetime == 0)
++ return 0;
++
++ /* Set new maximum lifetime and expiration time */
++ bulentry->lifetime = mipv6_mn_get_bulifetime(&bulentry->home_addr,
++ &bulentry->coa,
++ bulentry->flags);
++ bulentry->expire = now + bulentry->lifetime * HZ;
++ bulentry->seq++;
++ /* Send update */
++ if (send_bu_msg(bulentry) < 0)
++ DEBUG(DBG_ERROR, "Resending of BU failed");
++
++ if (time_after_eq(now, bulentry->expire)) {
++ /* Sanity check */
++ DEBUG(DBG_ERROR, "bul entry expire time in history - setting expire to %u secs", ERROR_DEF_LIFETIME);
++ bulentry->lifetime = ERROR_DEF_LIFETIME;
++ bulentry->expire = now + ERROR_DEF_LIFETIME*HZ;
++ }
++
++ /* Set up retransmission */
++ bulentry->state = RESEND_EXP;
++ bulentry->callback = bul_resend_exp;
++ bulentry->callback_time = now + INITIAL_BINDACK_TIMEOUT*HZ;
++ bulentry->delay = INITIAL_BINDACK_TIMEOUT;
++ bulentry->maxdelay = MAX_BINDACK_TIMEOUT;
++
++ return 0;
++}
++
++static int mipv6_send_RR_bu(struct mipv6_bul_entry *bulentry)
++{
++ int ret;
++ int ops_len = 0;
++ u16 nonces[2];
++
++ DEBUG(DBG_INFO, "Sending BU to CN %x:%x:%x:%x:%x:%x:%x:%x "
++ "for home address %x:%x:%x:%x:%x:%x:%x:%x",
++ NIPV6ADDR(&bulentry->cn_addr), NIPV6ADDR(&bulentry->home_addr));
++ nonces[0] = bulentry->rr->home_nonce_index;
++ nonces[1] = bulentry->rr->careof_nonce_index;
++ ops_len = sizeof(struct mipv6_mo_bauth_data) + MIPV6_RR_MAC_LENGTH +
++ sizeof(struct mipv6_mo_nonce_indices);
++ if (bulentry->ops) {
++ DEBUG(DBG_WARNING, "Bul entry had existing mobility options, freeing them");
++ kfree(bulentry->ops);
++ }
++ bulentry->ops = alloc_mh_opts(ops_len);
++
++ if (!bulentry->ops)
++ return -ENOMEM;
++ if (append_mh_opt(bulentry->ops, MIPV6_OPT_NONCE_INDICES,
++ sizeof(struct mipv6_mo_nonce_indices) - 2, nonces) < 0)
++ return -ENOMEM;
++
++ if (append_mh_opt(bulentry->ops, MIPV6_OPT_AUTH_DATA,
++ MIPV6_RR_MAC_LENGTH, NULL) < 0)
++ return -ENOMEM;
++ /* RR procedure is over, send a BU */
++ if (!(bulentry->flags & MIPV6_BU_F_ACK)) {
++ DEBUG(DBG_INFO, "Setting bul callback to bul_entry_expired");
++ bulentry->state = ACK_OK;
++ bulentry->callback = bul_entry_expired;
++ bulentry->callback_time = jiffies + HZ * bulentry->lifetime;
++ bulentry->expire = jiffies + HZ * bulentry->lifetime;
++ }
++ else {
++ bulentry->callback_time = jiffies + HZ;
++ bulentry->expire = jiffies + HZ * bulentry->lifetime;
++ }
++
++ ret = send_bu_msg(bulentry);
++ mipv6_bul_reschedule(bulentry);
++ return ret;
++}
++
++static int mipv6_rr_state(struct mipv6_bul_entry *bul, struct in6_addr *saddr,
++ struct in6_addr *coa, __u8 flags)
++{
++ if (!rr_configured)
++ return NO_RR;
++ if (flags & MIPV6_BU_F_HOME) {
++ /* We don't need RR, this is a Home Registration */
++ return NO_RR;
++ }
++ if (!bul || !bul->rr) {
++ /* First time BU to CN, need RR */
++ return DO_RR;
++ }
++
++ switch (bul->rr->rr_state) {
++ case RR_INIT:
++ /* Need RR if first BU to CN */
++ return DO_RR;
++ case RR_DONE:
++ /* If MN moves to a new coa, do RR for it */
++ if (!ipv6_addr_cmp(&bul->coa, coa))
++ return NO_RR;
++ else
++ return DO_RR;
++ default:
++ /*
++ * We are in the middle of RR, the HoTI and CoTI have been
++ * sent. But we haven't got HoT and CoT from the CN, so
++ * don't do anything more at this time.
++ */
++ return INPROGRESS_RR;
++ }
++}
++
++/**
++ * mipv6_RR_start - Start Return Routability procedure
++ * @home_addr: home address
++ * @cn_addr: correspondent address
++ * @coa: care-of address
++ * @entry: binding update list entry (if any)
++ * @initdelay: initial ack timeout
++ * @maxackdelay: maximum ack timeout
++ * @flags: flags
++ * @lifetime: lifetime of binding
++ * @ops: mobility options
++ *
++ * Caller must hold @bul_lock (write).
++ **/
++static int mipv6_RR_start(struct in6_addr *home_addr, struct in6_addr *cn_addr,
++ struct in6_addr *coa, struct mipv6_bul_entry *entry,
++ __u32 initdelay, __u32 maxackdelay, __u8 flags,
++ __u32 lifetime, struct mipv6_mh_opt *ops)
++{
++ int ret = -1;
++ struct mipv6_bul_entry *bulentry = entry;
++ struct mipv6_rr_info *rr = NULL;
++ int seq = 0;
++ DEBUG_FUNC();
++
++ /* Do RR procedure only for care-of address after handoff,
++ if home cookie is still valid */
++ if (bulentry && bulentry->rr) {
++ if (time_before(jiffies, bulentry->rr->home_time + MAX_NONCE_LIFE * HZ) &&
++ lifetime && !(ipv6_addr_cmp(home_addr, coa) == 0)) {
++ mipv6_rr_mn_cookie_create(bulentry->rr->cot_cookie);
++ DEBUG(DBG_INFO, "Bul entry and rr info exist, only doing RR for CoA");
++ ipv6_addr_copy(&bulentry->coa, coa);
++ bulentry->rr->rr_state |= RR_WAITC;
++ } else if (!lifetime) { /* Send only HoTi when returning home */
++ mipv6_rr_mn_cookie_create(bulentry->rr->hot_cookie);
++ DEBUG(DBG_INFO, "Bul entry and rr info exist, only doing RR for HoA");
++ ipv6_addr_copy(&bulentry->coa, coa); /* Home address as CoA */
++ bulentry->rr->rr_state |= RR_WAITH;
++ }
++ } else {
++ DEBUG(DBG_INFO, "Doing RR for both HoA and CoA");
++ rr = kmalloc(sizeof(*rr), GFP_ATOMIC);
++ memset(rr, 0, sizeof(*rr));
++ rr->rr_state = RR_WAITHC;
++ }
++ if (bulentry) {
++ if (bulentry->state == ACK_ERROR)
++ goto out;
++ seq = bulentry->seq + 1;
++ } else
++ seq = 0;
++ /* Save the info in the BUL to retransmit the BU after RR is done */
++ /* Caller must hold bul_lock (write) since we don't */
++
++ if ((bulentry = mipv6_bul_add(cn_addr, home_addr, coa,
++ min_t(__u32, lifetime, MAX_RR_BINDING_LIFE),
++ seq, flags, bul_resend_exp, initdelay,
++ RESEND_EXP, initdelay,
++ maxackdelay, ops,
++ rr)) == NULL) {
++ DEBUG(DBG_INFO, "couldn't update BUL for HoTi");
++ goto out;
++ }
++
++ rr = bulentry->rr;
++ if (rr->rr_state&RR_WAITH)
++ mipv6_send_addr_test_init(home_addr, cn_addr, MIPV6_MH_HOTI,
++ rr->hot_cookie);
++ if (ipv6_addr_cmp(home_addr, coa) && lifetime)
++ mipv6_send_addr_test_init(coa, cn_addr, MIPV6_MH_COTI, rr->cot_cookie);
++ else {
++ bulentry->rr->rr_state &= ~RR_WAITC;
++ }
++ ret = 0;
++out:
++ return ret;
++}
++
++/*
++ * Status codes for mipv6_ba_rcvd()
++ */
++#define STATUS_UPDATE 0
++#define STATUS_REMOVE 1
++
++/**
++ * mipv6_ba_rcvd - Update BUL for this Binding Acknowledgement
++ * @ifindex: interface BA came from
++ * @cnaddr: sender IPv6 address
++ * @home_addr: home address
++ * @sequence: sequence number
++ * @lifetime: lifetime granted by Home Agent in seconds
++ * @refresh: recommended resend interval
++ * @status: %STATUS_UPDATE (ack) or %STATUS_REMOVE (nack)
++ *
++ * This function must be called to notify the module of the receipt of
++ * a binding acknowledgement so that it can cease retransmitting the
++ * option. The caller must have validated the acknowledgement before calling
++ * this function. 'status' can be either STATUS_UPDATE in which case the
++ * binding acknowledgement is assumed to be valid and the corresponding
++ * binding update list entry is updated, or STATUS_REMOVE in which case
++ * the corresponding binding update list entry is removed (this can be
++ * used upon receiving a negative acknowledgement).
++ * Returns 0 if a matching binding update has been sent or non-zero if
++ * not.
++ */
++static int mipv6_ba_rcvd(int ifindex, struct in6_addr *cnaddr,
++ struct in6_addr *home_addr,
++ u16 sequence, u32 lifetime,
++ u32 refresh, int status)
++{
++ struct mipv6_bul_entry *bulentry;
++ unsigned long now = jiffies;
++ struct in6_addr coa;
++
++ DEBUG(DBG_INFO, "BA received with sequence number 0x%x, status: %d",
++ (int) sequence, status);
++
++ /* Find corresponding entry in binding update list. */
++ write_lock(&bul_lock);
++ if ((bulentry = mipv6_bul_get(cnaddr, home_addr)) == NULL) {
++ DEBUG(DBG_INFO, "- discarded, no entry in bul matches BA source address");
++ write_unlock(&bul_lock);
++ return -1;
++ }
++
++ ipv6_addr_copy(&coa, &bulentry->coa);
++ if (status == SEQUENCE_NUMBER_OUT_OF_WINDOW) {
++ __u32 lifetime = mipv6_mn_get_bulifetime(&bulentry->home_addr,
++ &bulentry->coa,
++ bulentry->flags);
++ bulentry->seq = sequence;
++
++ mipv6_send_bu(&bulentry->home_addr, &bulentry->cn_addr,
++ &bulentry->coa, INITIAL_BINDACK_TIMEOUT,
++ MAX_BINDACK_TIMEOUT, 1, bulentry->flags,
++ lifetime, NULL);
++ write_unlock(&bul_lock);
++ return 0;
++ } else if (status >= REASON_UNSPECIFIED) {
++ int err;
++ int at_home = MN_NOT_AT_HOME;
++ DEBUG(DBG_WARNING, "- NACK - BA status: %d, deleting bul entry", status);
++ if (bulentry->flags & MIPV6_BU_F_HOME) {
++ struct mn_info *minfo;
++ read_lock(&mn_info_lock);
++ minfo = mipv6_mninfo_get_by_home(home_addr);
++ if (minfo) {
++ spin_lock(&minfo->lock);
++ if (minfo->is_at_home != MN_NOT_AT_HOME)
++ minfo->is_at_home = MN_AT_HOME;
++ at_home = minfo->is_at_home;
++ minfo->has_home_reg = 0;
++ spin_unlock(&minfo->lock);
++ }
++ read_unlock(&mn_info_lock);
++ DEBUG(DBG_ERROR, "Home registration failed: BA status: %d, deleting bul entry", status);
++ }
++ write_unlock(&bul_lock);
++ err = mipv6_bul_delete(cnaddr, home_addr);
++ if (at_home == MN_AT_HOME) {
++ mipv6_mn_send_home_na(home_addr);
++ write_lock_bh(&bul_lock);
++ mipv6_bul_iterate(mn_cn_handoff, &coa);
++ write_unlock_bh(&bul_lock);
++ }
++ return err;
++ }
++ bulentry->state = ACK_OK;
++
++ if (bulentry->flags & MIPV6_BU_F_HOME && lifetime > 0) {
++ /* For home registrations: schedule a refresh binding update.
++ * Use the refresh interval given by home agent or 80%
++ * of lifetime, whichever is less.
++ *
++ * Adjust binding lifetime if 'granted' lifetime
++ * (lifetime value in received binding acknowledgement)
++ * is shorter than 'requested' lifetime (lifetime
++ * value sent in corresponding binding update).
++ * max((L_remain - (L_update - L_ack)), 0)
++ */
++ if (lifetime * HZ < (bulentry->expire - bulentry->lastsend)) {
++ bulentry->expire =
++ max_t(__u32, bulentry->expire -
++ ((bulentry->expire - bulentry->lastsend) -
++ lifetime * HZ), jiffies +
++ ERROR_DEF_LIFETIME * HZ);
++ }
++ if (refresh > lifetime || refresh == 0)
++ refresh = 4 * lifetime / 5;
++ DEBUG(DBG_INFO, "setting callback for expiration of"
++ " a Home Registration: lifetime:%d, refresh:%d",
++ lifetime, refresh);
++ bulentry->callback = bul_refresh;
++ bulentry->callback_time = now + refresh * HZ;
++ bulentry->expire = now + lifetime * HZ;
++ bulentry->lifetime = lifetime;
++ if (time_after_eq(jiffies, bulentry->expire)) {
++ /* Sanity check */
++ DEBUG(DBG_ERROR, "bul entry expire time in history - setting expire to %u secs",
++ ERROR_DEF_LIFETIME);
++ bulentry->expire = jiffies + ERROR_DEF_LIFETIME * HZ;
++ }
++ mipv6_mn_set_home_reg(home_addr, 1);
++ mipv6_bul_iterate(mn_cn_handoff, &coa);
++ } else if ((bulentry->flags & MIPV6_BU_F_HOME) && bulentry->lifetime == 0) {
++ write_unlock(&bul_lock);
++ DEBUG(DBG_INFO, "Got BA for deregistration BU");
++ mipv6_mn_set_home_reg(home_addr, 0);
++ mipv6_bul_delete(cnaddr, home_addr);
++ mipv6_mn_send_home_na(home_addr);
++
++ write_lock_bh(&bul_lock);
++ mipv6_bul_iterate(mn_cn_handoff, &coa);
++ write_unlock_bh(&bul_lock);
++ return 0;
++ }
++
++ mipv6_bul_reschedule(bulentry);
++ write_unlock(&bul_lock);
++
++ return 0;
++}
++
++static int mipv6_handle_mh_HC_test(struct sk_buff *skb,
++ struct in6_addr *saddr,
++ struct in6_addr *fcoa,
++ struct in6_addr *cn,
++ struct in6_addr *lcoa,
++ struct mipv6_mh *mh)
++{
++ int ret = 0;
++ int msg_len = (mh->length+1) << 3;
++ int opt_len;
++
++ struct mipv6_mh_addr_test *tm = (struct mipv6_mh_addr_test *)mh->data;
++ struct mipv6_bul_entry *bulentry;
++
++ DEBUG_FUNC();
++
++ if (msg_len > skb->len)
++ return -1;
++
++ opt_len = msg_len - sizeof(*mh) - sizeof(*tm);
++
++ if (opt_len < 0) {
++ __u32 pos = (__u32)&mh->length - (__u32)skb->nh.raw;
++ icmpv6_send(skb, ICMPV6_PARAMPROB,
++ ICMPV6_HDR_FIELD, pos, skb->dev);
++
++ DEBUG(DBG_INFO, "Mobility Header length less than H/C Test");
++ return -1;
++ }
++ if (fcoa || lcoa) {
++ DEBUG(DBG_INFO, "H/C Test has HAO or RTH2, dropped.");
++ return -1;
++ }
++ write_lock(&bul_lock);
++
++ /* We need to get the home address, since CoT only has the CoA*/
++ if (mh->type == MIPV6_MH_COT) {
++ if ((bulentry = mipv6_bul_get_by_ccookie(cn, tm->init_cookie)) == NULL) {
++ DEBUG(DBG_ERROR, "has no BUL or RR state for "
++ "source:%x:%x:%x:%x:%x:%x:%x:%x",
++ NIPV6ADDR(cn));
++ write_unlock(&bul_lock);
++ return -1;
++ }
++ } else { /* HoT has the home address */
++ if (((bulentry = mipv6_bul_get(cn, saddr)) == NULL) || !bulentry->rr) {
++ DEBUG(DBG_ERROR, "has no BUL or RR state for "
++ "source:%x:%x:%x:%x:%x:%x:%x:%x "
++ "dest:%x:%x:%x:%x:%x:%x:%x:%x",
++ NIPV6ADDR(cn), NIPV6ADDR(saddr));
++ write_unlock(&bul_lock);
++ return -1;
++ }
++ }
++
++ switch (mh->type) {
++ case MIPV6_MH_HOT:
++ if ((bulentry->rr->rr_state & RR_WAITH) == 0) {
++ DEBUG(DBG_ERROR, "Not waiting for a Home Test message");
++ goto out;
++ }
++ /*
++ * Make sure no home cookies have been received yet.
++ * TODO: Check not being put in at this time since subsequent
++ * BU's after this time will have home cookie stored.
++ */
++
++ /* Check if the cookie received is the right one */
++ if (!mipv6_equal_cookies(tm->init_cookie,
++ bulentry->rr->hot_cookie)) {
++ /* Invalid cookie, might be an old cookie */
++ DEBUG(DBG_WARNING, "Received HoT cookie does not match stored cookie");
++ goto out;
++ }
++ DEBUG(DBG_INFO, "Got Care-of Test message");
++ bulentry->rr->rr_state &= ~RR_WAITH;
++ memcpy(bulentry->rr->home_cookie, tm->kgen_token, MIPV6_COOKIE_LEN);
++ bulentry->rr->home_nonce_index = tm->nonce_index;
++ bulentry->rr->home_time = jiffies;
++ ret = 1;
++ break;
++
++ case MIPV6_MH_COT:
++ if ((bulentry->rr->rr_state & RR_WAITC) == 0) {
++ DEBUG(DBG_ERROR, "Not waiting for a Home Test message");
++ goto out;
++ }
++ /*
++ * Make sure no home cookies have been received yet.
++ * TODO: Check not being put in at this time since subsequent
++ * BU's at this time will have careof cookie stored.
++ */
++
++ /* Check if the cookie received is the right one */
++ if (!mipv6_equal_cookies(tm->init_cookie,
++ bulentry->rr->cot_cookie)) {
++ DEBUG(DBG_INFO, "Received CoT cookie does not match stored cookie");
++ goto out;
++ }
++ bulentry->rr->rr_state &= ~RR_WAITC;
++ memcpy(bulentry->rr->careof_cookie, tm->kgen_token, MIPV6_COOKIE_LEN);
++ bulentry->rr->careof_nonce_index = tm->nonce_index;
++ bulentry->rr->careof_time = jiffies;
++ ret = 1;
++ break;
++ default:
++ /* Impossible to get here */
++ break;
++ }
++out:
++ if (bulentry->rr->rr_state == RR_DONE) {
++ if (bulentry->rr->kbu) /* First free any old keys */
++ kfree(bulentry->rr->kbu);
++ /* Store the session key to be used in BU's */
++ if (ipv6_addr_cmp(&bulentry->coa, &bulentry->home_addr) && bulentry->lifetime)
++ bulentry->rr->kbu = mipv6_rr_key_calc(bulentry->rr->home_cookie,
++ bulentry->rr->careof_cookie);
++ else
++ bulentry->rr->kbu = mipv6_rr_key_calc(bulentry->rr->home_cookie,
++ NULL);
++ /* RR procedure is over, send a BU */
++ mipv6_send_RR_bu(bulentry);
++ }
++ write_unlock(&bul_lock);
++ return ret;
++}
++
++/**
++ * mipv6_handle_mh_brr - Binding Refresh Request handler
++ * @home: home address
++ * @coa: care-of address
++ * @cn: source of this packet
++ * @mh: pointer to the beginning of the Mobility Header
++ *
++ * Handles Binding Refresh Request. Packet and offset to option are
++ * passed. Returns 0 on success, otherwise negative.
++ **/
++static int mipv6_handle_mh_brr(struct sk_buff *skb,
++ struct in6_addr *home,
++ struct in6_addr *unused1,
++ struct in6_addr *cn,
++ struct in6_addr *unused2,
++ struct mipv6_mh *mh)
++{
++ struct mipv6_mh_brr *brr = (struct mipv6_mh_brr *)mh->data;
++ struct mipv6_bul_entry *binding;
++ int msg_len = (mh->length+1) << 3;
++ int opt_len;
++
++ if (msg_len > skb->len)
++ return -1;
++
++ opt_len = msg_len - sizeof(*mh) - sizeof(*brr);
++
++ if (opt_len < 0) {
++ __u32 pos = (__u32)&mh->length - (__u32)skb->nh.raw;
++ icmpv6_send(skb, ICMPV6_PARAMPROB,
++ ICMPV6_HDR_FIELD, pos, skb->dev);
++
++ DEBUG(DBG_WARNING, "Mobility Header length less than BRR");
++ MIPV6_INC_STATS(n_brr_drop.invalid);
++ return -1;
++ }
++
++ /* check we know src, else drop */
++ write_lock(&bul_lock);
++ if ((binding = mipv6_bul_get(cn, home)) == NULL) {
++ MIPV6_INC_STATS(n_brr_drop.misc);
++ write_unlock(&bul_lock);
++ return MH_UNKNOWN_CN;
++ }
++
++ MIPV6_INC_STATS(n_brr_rcvd);
++
++ if (opt_len > 0) {
++ struct mobopt opts;
++ memset(&opts, 0, sizeof(opts));
++ if (parse_mo_tlv(brr + 1, opt_len, &opts) < 0) {
++ write_unlock(&bul_lock);
++ return -1;
++ }
++ /*
++ * MIPV6_OPT_AUTH_DATA
++ */
++ }
++
++ /* must hold bul_lock (write) */
++ mipv6_RR_start(home, cn, &binding->coa, binding, binding->delay,
++ binding->maxdelay, binding->flags,
++ binding->lifetime, binding->ops);
++
++ write_unlock(&bul_lock);
++ /* MAY also decide to delete binding and send zero lifetime BU
++ with alt-coa set to home address */
++
++ return 0;
++}
++
++/**
++ * mipv6_handle_mh_ba - Binding Acknowledgement handler
++ * @src: source of this packet
++ * @coa: care-of address
++ * @home: home address
++ * @mh: pointer to the beginning of the Mobility Header
++ *
++ **/
++static int mipv6_handle_mh_ba(struct sk_buff *skb,
++ struct in6_addr *home,
++ struct in6_addr *coa,
++ struct in6_addr *src,
++ struct in6_addr *unused,
++ struct mipv6_mh *mh)
++{
++ struct mipv6_mh_ba *ba = (struct mipv6_mh_ba *)mh->data;
++ struct mipv6_bul_entry *binding = NULL;
++ struct mobopt opts;
++ int msg_len = (mh->length+1) << 3;
++ int opt_len;
++
++ int auth = 1, req_auth = 1, refresh = -1, ifindex = 0;
++ u32 lifetime, sequence;
++
++ if (msg_len > skb->len)
++ return -1;
++
++ opt_len = msg_len - sizeof(*mh) - sizeof(*ba);
++
++ if (opt_len < 0) {
++ __u32 pos = (__u32)&mh->length - (__u32)skb->nh.raw;
++ icmpv6_send(skb, ICMPV6_PARAMPROB,
++ ICMPV6_HDR_FIELD, pos, skb->dev);
++
++ DEBUG(DBG_WARNING, "Mobility Header length less than BA");
++ MIPV6_INC_STATS(n_ba_drop.invalid);
++ return -1;
++ }
++
++ lifetime = ntohs(ba->lifetime) << 2;
++ sequence = ntohs(ba->sequence);
++
++ if (opt_len > 0) {
++ memset(&opts, 0, sizeof(opts));
++ if (parse_mo_tlv(ba + 1, opt_len, &opts) < 0)
++ return -1;
++ /*
++ * MIPV6_OPT_AUTH_DATA, MIPV6_OPT_BR_ADVICE
++ */
++ if (opts.br_advice)
++ refresh = ntohs(opts.br_advice->refresh_interval);
++ }
++
++ if (ba->status >= EXPIRED_HOME_NONCE_INDEX &&
++ ba->status <= EXPIRED_NONCES)
++ req_auth = 0;
++
++ write_lock(&bul_lock);
++ binding = mipv6_bul_get(src, home);
++ if (!binding) {
++ DEBUG(DBG_INFO, "No binding, BA dropped.");
++ write_unlock(&bul_lock);
++ return -1;
++ }
++
++ if (opts.auth_data && binding->rr &&
++ (mipv6_auth_check(src, coa, (__u8 *)mh, msg_len,
++ opts.auth_data, binding->rr->kbu) == 0))
++ auth = 1;
++
++ if (req_auth && binding->rr && !auth) {
++ DEBUG(DBG_INFO, "BA Authentication failed.");
++ MIPV6_INC_STATS(n_ba_drop.auth);
++ write_unlock(&bul_lock);
++ return MH_AUTH_FAILED;
++ }
++
++ if (ba->status == SEQUENCE_NUMBER_OUT_OF_WINDOW) {
++ DEBUG(DBG_INFO,
++ "Sequence number out of window, setting seq to %d",
++ sequence);
++ } else if (binding->seq != sequence) {
++ DEBUG(DBG_INFO, "BU/BA Sequence Number mismatch %d != %d",
++ binding->seq, sequence);
++ MIPV6_INC_STATS(n_ba_drop.invalid);
++ write_unlock(&bul_lock);
++ return MH_SEQUENCE_MISMATCH;
++ }
++ if (ba->status == EXPIRED_HOME_NONCE_INDEX || ba->status == EXPIRED_NONCES) {
++ if (binding->rr) {
++ /* Need to resend home test init to CN */
++ binding->rr->rr_state |= RR_WAITH;
++ mipv6_send_addr_test_init(&binding->home_addr,
++ &binding->cn_addr,
++ MIPV6_MH_HOTI,
++ binding->rr->hot_cookie);
++ MIPV6_INC_STATS(n_ban_rcvd);
++ } else {
++ DEBUG(DBG_WARNING, "Got BA with status EXPIRED_HOME_NONCE_INDEX"
++ "for non-RR BU");
++ MIPV6_INC_STATS(n_ba_drop.invalid);
++ }
++ write_unlock(&bul_lock);
++ return 0;
++ }
++ if (ba->status == EXPIRED_CAREOF_NONCE_INDEX || ba->status == EXPIRED_NONCES) {
++ if (binding->rr) {
++ /* Need to resend care-of test init to CN */
++ binding->rr->rr_state |= RR_WAITC;
++ mipv6_send_addr_test_init(&binding->coa,
++ &binding->cn_addr,
++ MIPV6_MH_COTI,
++ binding->rr->cot_cookie);
++ MIPV6_INC_STATS(n_ban_rcvd);
++ } else {
++ DEBUG(DBG_WARNING, "Got BA with status EXPIRED_HOME_CAREOF_INDEX"
++ "for non-RR BU");
++ MIPV6_INC_STATS(n_ba_drop.invalid);
++ }
++ write_unlock(&bul_lock);
++ return 0;
++ }
++ write_unlock(&bul_lock);
++
++ if (ba->status >= REASON_UNSPECIFIED) {
++ DEBUG(DBG_INFO, "Binding Ack status : %d indicates error", ba->status);
++ mipv6_ba_rcvd(ifindex, src, home, sequence, lifetime,
++ refresh, ba->status);
++ MIPV6_INC_STATS(n_ban_rcvd);
++ return 0;
++ }
++ MIPV6_INC_STATS(n_ba_rcvd);
++ if (mipv6_ba_rcvd(ifindex, src, home, ntohs(ba->sequence), lifetime,
++ refresh, ba->status)) {
++ DEBUG(DBG_WARNING, "mipv6_ba_rcvd failed");
++ }
++
++ return 0;
++}
++
++/**
++ * mipv6_handle_mh_be - Binding Error handler
++ * @cn: source of this packet
++ * @coa: care-of address
++ * @home: home address
++ * @mh: pointer to the beginning of the Mobility Header
++ *
++ **/
++
++static int mipv6_handle_mh_be(struct sk_buff *skb,
++ struct in6_addr *home,
++ struct in6_addr *coa,
++ struct in6_addr *cn,
++ struct in6_addr *unused,
++ struct mipv6_mh *mh)
++{
++ struct mipv6_mh_be *be = (struct mipv6_mh_be *)mh->data;
++ int msg_len = (mh->length+1) << 3;
++ int opt_len;
++ struct in6_addr *hoa;
++ struct bul_inval_args args;
++
++ DEBUG_FUNC();
++
++ if (msg_len > skb->len)
++ return -1;
++
++ opt_len = msg_len - sizeof(*mh) - sizeof(*be);
++
++ if (opt_len < 0) {
++ __u32 pos = (__u32)&mh->length - (__u32)skb->nh.raw;
++ icmpv6_send(skb, ICMPV6_PARAMPROB,
++ ICMPV6_HDR_FIELD, pos, skb->dev);
++
++ DEBUG(DBG_WARNING, "Mobility Header length less than BE");
++ MIPV6_INC_STATS(n_be_drop.invalid);
++ return -1;
++ }
++
++
++ if (!ipv6_addr_any(&be->home_addr))
++ hoa = &be->home_addr;
++ else
++ hoa = home;
++
++ MIPV6_INC_STATS(n_be_rcvd);
++
++ args.all_rr_states = 0;
++ args.cn = cn;
++ args.mn = hoa;
++
++ switch (be->status) {
++ case 1: /* Home Address Option used without a binding */
++ /* Get ULP information about CN-MN communication. If
++ nothing in progress, MUST delete. Otherwise MAY
++ ignore. */
++ args.all_rr_states = 1;
++ case 2: /* Received unknown MH type */
++ /* If not expecting ack, SHOULD ignore. If MH
++ extension in use, stop it. If not, stop RO for
++ this CN. */
++ write_lock(&bul_lock);
++ mipv6_bul_iterate(mn_bul_invalidate, &args);
++ write_unlock(&bul_lock);
++ break;
++ }
++
++ return 0;
++}
++
++/*
++ * mipv6_bu_rate_limit() : Takes a bulentry, a COA and 'flags' to check
++ * whether BU being sent is for Home Registration or not.
++ *
++ * If the number of BU's sent is fewer than MAX_FAST_UPDATES, this BU
++ * is allowed to be sent at the MAX_UPDATE_RATE.
++ * If the number of BU's sent is greater than or equal to MAX_FAST_UPDATES,
++ * this BU is allowed to be sent at the SLOW_UPDATE_RATE.
++ *
++ * Assumption : This function is not re-entrant. and the caller holds the
++ * bulentry lock (by calling mipv6_bul_get()) to stop races with other
++ * CPU's executing this same function.
++ *
++ * Side-Effects. Either of the following could on success :
++ * 1. Sets consecutive_sends to 1 if the entry is a Home agent
++ * registration or the COA has changed.
++ * 2. Increments consecutive_sends if the number of BU's sent so
++ * far is less than MAX_FAST_UPDATES, and this BU is being sent
++ * atleast MAX_UPDATE_RATE after previous one.
++ *
++ * Return Value : 0 on Success, -1 on Failure
++ */
++static int mipv6_bu_rate_limit(struct mipv6_bul_entry *bulentry,
++ struct in6_addr *coa, __u8 flags)
++{
++ if ((flags & MIPV6_BU_F_HOME) || ipv6_addr_cmp(&bulentry->coa, coa)) {
++ /* Home Agent Registration or different COA - restart from 1 */
++ bulentry->consecutive_sends = 1;
++ return 0;
++ }
++
++ if (bulentry->consecutive_sends < MAX_FAST_UPDATES) {
++ /* First MAX_FAST_UPDATES can be sent at MAX_UPDATE_RATE */
++ if (jiffies - bulentry->lastsend < MAX_UPDATE_RATE * HZ) {
++ return -1;
++ }
++ bulentry->consecutive_sends ++;
++ } else {
++ /* Remaining updates SHOULD be sent at SLOW_UPDATE_RATE */
++ if (jiffies - bulentry->lastsend < SLOW_UPDATE_RATE * HZ) {
++ return -1;
++ }
++ /* Don't inc 'consecutive_sends' to avoid overflow to zero */
++ }
++ /* OK to send a BU */
++ return 0;
++}
++
++/**
++ * mipv6_send_bu - send a Binding Update
++ * @saddr: source address for BU
++ * @daddr: destination address for BU
++ * @coa: care-of address for MN
++ * @initdelay: initial BA wait timeout
++ * @maxackdelay: maximum BA wait timeout
++ * @exp: exponention back off
++ * @flags: flags for BU
++ * @lifetime: granted lifetime for binding
++ * @ops: mobility options
++ *
++ * Send a binding update. 'flags' may contain any of %MIPV6_BU_F_ACK,
++ * %MIPV6_BU_F_HOME, %MIPV6_BU_F_ROUTER bitwise ORed. If
++ * %MIPV6_BU_F_ACK is included retransmission will be attempted until
++ * the update has been acknowledged. Retransmission is done if no
++ * acknowledgement is received within @initdelay seconds. @exp
++ * specifies whether to use exponential backoff (@exp != 0) or linear
++ * backoff (@exp == 0). For exponential backoff the time to wait for
++ * an acknowledgement is doubled on each retransmission until a delay
++ * of @maxackdelay, after which retransmission is no longer attempted.
++ * For linear backoff the delay is kept constant and @maxackdelay
++ * specifies the maximum number of retransmissions instead. If
++ * sub-options are present ops must contain all sub-options to be
++ * added. On a mobile node, use the mobile node's home address for
++ * @saddr. Returns 0 on success, non-zero on failure.
++ *
++ * Caller may not hold @bul_lock.
++ **/
++int mipv6_send_bu(struct in6_addr *saddr, struct in6_addr *daddr,
++ struct in6_addr *coa, u32 initdelay,
++ u32 maxackdelay, u8 exp, u8 flags, u32 lifetime,
++ struct mipv6_mh_opt *ops)
++{
++ int ret;
++ __u8 state;
++ __u16 seq = 0;
++ int (*callback)(struct mipv6_bul_entry *);
++ __u32 callback_time;
++ struct mipv6_bul_entry *bulentry;
++
++ /* First a sanity check: don't send BU to local addresses */
++ if(ipv6_chk_addr(daddr, NULL)) {
++ DEBUG(DBG_ERROR, "BUG: Trying to send BU to local address");
++ return -1;
++ }
++ DEBUG(DBG_INFO, "Sending BU to CN %x:%x:%x:%x:%x:%x:%x:%x "
++ "for home address %x:%x:%x:%x:%x:%x:%x:%x",
++ NIPV6ADDR(daddr), NIPV6ADDR(saddr));
++
++ if ((bulentry = mipv6_bul_get(daddr, saddr)) != NULL) {
++ if (bulentry->state == ACK_ERROR) {
++ /*
++ * Don't send any more BU's to nodes which don't
++ * understanding one.
++ */
++ DEBUG(DBG_INFO, "Not sending BU to node which doesn't"
++ " understand one");
++ return -1;
++ }
++ if (mipv6_bu_rate_limit(bulentry, coa, flags) < 0) {
++ DEBUG(DBG_DATADUMP, "Limiting BU sent.");
++ return 0;
++ }
++ }
++
++ switch (mipv6_rr_state(bulentry, saddr, coa, flags)) {
++ case INPROGRESS_RR:
++ /* We are already doing RR, don't do BU at this time, it is
++ * done automatically later */
++ DEBUG(DBG_INFO, "RR in progress not sending BU");
++ return 0;
++
++ case DO_RR:
++ /* Just do RR and return, BU is done automatically later */
++ DEBUG(DBG_INFO, "starting RR" );
++ mipv6_RR_start(saddr, daddr, coa, bulentry, initdelay,
++ maxackdelay, flags, lifetime, ops);
++ return 0;
++
++ case NO_RR:
++ DEBUG(DBG_DATADUMP, "No RR necessary" );
++ default:
++ break;
++ }
++
++ if (bulentry)
++ seq = bulentry->seq + 1;
++
++ /* Add to binding update list */
++
++ if (flags & MIPV6_BU_F_ACK) {
++ DEBUG(DBG_INFO, "Setting bul callback to bul_resend_exp");
++ /* Send using exponential backoff */
++ state = RESEND_EXP;
++ callback = bul_resend_exp;
++ callback_time = initdelay;
++ } else {
++ DEBUG(DBG_INFO, "Setting bul callback to bul_entry_expired");
++ /* No acknowledgement/resending required */
++ state = ACK_OK; /* pretend we got an ack */
++ callback = bul_entry_expired;
++ callback_time = lifetime;
++ }
++
++ /* BU only for the home address */
++ /* We must hold bul_lock (write) while calling add */
++ if ((bulentry = mipv6_bul_add(daddr, saddr, coa, lifetime, seq,
++ flags, callback, callback_time,
++ state, initdelay, maxackdelay, ops,
++ NULL)) == NULL) {
++ DEBUG(DBG_INFO, "couldn't update BUL");
++ return 0;
++ }
++ ret = send_bu_msg(bulentry);
++
++ return ret;
++}
++
++int __init mipv6_mh_mn_init(void)
++{
++ mipv6_mh_register(MIPV6_MH_HOT, mipv6_handle_mh_HC_test);
++ mipv6_mh_register(MIPV6_MH_COT, mipv6_handle_mh_HC_test);
++ mipv6_mh_register(MIPV6_MH_BA, mipv6_handle_mh_ba);
++ mipv6_mh_register(MIPV6_MH_BRR, mipv6_handle_mh_brr);
++ mipv6_mh_register(MIPV6_MH_BE, mipv6_handle_mh_be);
++
++ return 0;
++}
++
++void __exit mipv6_mh_mn_exit(void)
++{
++ mipv6_mh_unregister(MIPV6_MH_HOT);
++ mipv6_mh_unregister(MIPV6_MH_COT);
++ mipv6_mh_unregister(MIPV6_MH_BA);
++ mipv6_mh_unregister(MIPV6_MH_BRR);
++ mipv6_mh_unregister(MIPV6_MH_BE);
++}
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/module_cn.c linux-2.4.25/net/ipv6/mobile_ip6/module_cn.c
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/module_cn.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/module_cn.c 2004-06-26 11:29:31.000000000 +0100
+@@ -0,0 +1,167 @@
++/*
++ * Mobile IPv6 Common Module
++ *
++ * Authors:
++ * Sami Kivisaari <skivisaa@cc.hut.fi>
++ * Antti Tuominen <ajtuomin@tml.hut.fi>
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#include <linux/config.h>
++#include <linux/module.h>
++#include <linux/init.h>
++
++#ifdef CONFIG_SYSCTL
++#include <linux/sysctl.h>
++#endif /* CONFIG_SYSCTL */
++
++#include <net/mipglue.h>
++
++#include "bcache.h"
++#include "mipv6_icmp.h"
++#include "stats.h"
++#include "mobhdr.h"
++#include "exthdrs.h"
++
++int mipv6_debug = 1;
++
++#if defined(MODULE) && LINUX_VERSION_CODE > 0x20115
++MODULE_AUTHOR("MIPL Team");
++MODULE_DESCRIPTION("Mobile IPv6");
++MODULE_LICENSE("GPL");
++MODULE_PARM(mipv6_debug, "i");
++#endif
++
++#include "config.h"
++
++struct mip6_func mip6_fn;
++struct mip6_conf mip6node_cnf = {
++ capabilities: CAP_CN,
++ accept_ret_rout: 1,
++ max_rtr_reachable_time: 0,
++ eager_cell_switching: 0,
++ max_num_tunnels: 0,
++ min_num_tunnels: 0,
++ binding_refresh_advice: 0,
++ bu_lladdr: 0,
++ bu_keymgm: 0,
++ bu_cn_ack: 0
++};
++
++#define MIPV6_BCACHE_SIZE 128
++
++/**********************************************************************
++ *
++ * MIPv6 CN Module Init / Cleanup
++ *
++ **********************************************************************/
++
++#ifdef CONFIG_SYSCTL
++/* Sysctl table */
++ctl_table mipv6_mobility_table[] = {
++ {NET_IPV6_MOBILITY_DEBUG, "debuglevel",
++ &mipv6_debug, sizeof(int), 0644, NULL,
++ &proc_dointvec},
++ {NET_IPV6_MOBILITY_RETROUT, "accept_return_routability",
++ &mip6node_cnf.accept_ret_rout, sizeof(int), 0644, NULL,
++ &proc_dointvec},
++ {0}
++};
++ctl_table mipv6_table[] = {
++ {NET_IPV6_MOBILITY, "mobility", NULL, 0, 0555, mipv6_mobility_table},
++ {0}
++};
++
++static struct ctl_table_header *mipv6_sysctl_header;
++static struct ctl_table mipv6_net_table[];
++static struct ctl_table mipv6_root_table[];
++
++ctl_table mipv6_net_table[] = {
++ {NET_IPV6, "ipv6", NULL, 0, 0555, mipv6_table},
++ {0}
++};
++
++ctl_table mipv6_root_table[] = {
++ {CTL_NET, "net", NULL, 0, 0555, mipv6_net_table},
++ {0}
++};
++#endif /* CONFIG_SYSCTL */
++
++extern void mipv6_rr_init(void);
++
++/* Initialize the module */
++static int __init mip6_init(void)
++{
++ int err = 0;
++
++ printk(KERN_INFO "MIPL Mobile IPv6 for Linux Correspondent Node %s (%s)\n",
++ MIPLVERSION, MIPV6VERSION);
++
++#ifdef CONFIG_IPV6_MOBILITY_DEBUG
++ printk(KERN_INFO "Debug-level: %d\n", mipv6_debug);
++#endif
++
++ if ((err = mipv6_bcache_init(MIPV6_BCACHE_SIZE)) < 0)
++ goto bcache_fail;
++
++ if ((err = mipv6_icmpv6_init()) < 0)
++ goto icmp_fail;
++
++ if ((err = mipv6_stats_init()) < 0)
++ goto stats_fail;
++ mipv6_rr_init();
++
++#ifdef CONFIG_SYSCTL
++ mipv6_sysctl_header = register_sysctl_table(mipv6_root_table, 0);
++#endif
++
++ if ((err = mipv6_mh_common_init()) < 0)
++ goto mh_fail;
++
++ MIPV6_SETCALL(mipv6_modify_txoptions, mipv6_modify_txoptions);
++
++ MIPV6_SETCALL(mipv6_handle_homeaddr, mipv6_handle_homeaddr);
++ MIPV6_SETCALL(mipv6_icmp_swap_addrs, mipv6_icmp_swap_addrs);
++
++ return 0;
++
++mh_fail:
++#ifdef CONFIG_SYSCTL
++ unregister_sysctl_table(mipv6_sysctl_header);
++#endif
++ mipv6_stats_exit();
++stats_fail:
++ mipv6_icmpv6_exit();
++icmp_fail:
++ mipv6_bcache_exit();
++bcache_fail:
++ return err;
++}
++module_init(mip6_init);
++
++#ifdef MODULE
++/* Cleanup module */
++static void __exit mip6_exit(void)
++{
++ printk(KERN_INFO "mip6_base.o exiting.\n");
++#ifdef CONFIG_SYSCTL
++ unregister_sysctl_table(mipv6_sysctl_header);
++#endif
++
++ /* Invalidate all custom kernel hooks. No need to do this
++ separately for all hooks. */
++ mipv6_invalidate_calls();
++
++ mipv6_mh_common_exit();
++ mipv6_stats_exit();
++ mipv6_icmpv6_exit();
++ mipv6_bcache_exit();
++}
++module_exit(mip6_exit);
++#endif /* MODULE */
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/module_ha.c linux-2.4.25/net/ipv6/mobile_ip6/module_ha.c
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/module_ha.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/module_ha.c 2004-06-26 11:29:31.000000000 +0100
+@@ -0,0 +1,264 @@
++/*
++ * Mobile IPv6 Home Agent Module
++ *
++ * Authors:
++ * Sami Kivisaari <skivisaa@cc.hut.fi>
++ * Antti Tuominen <ajtuomin@tml.hut.fi>
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#include <linux/config.h>
++#include <linux/module.h>
++#include <linux/init.h>
++
++#ifdef CONFIG_SYSCTL
++#include <linux/sysctl.h>
++#endif /* CONFIG_SYSCTL */
++
++#include <net/mipglue.h>
++#include <net/addrconf.h>
++
++#include "mobhdr.h"
++#include "tunnel_ha.h"
++#include "ha.h"
++#include "halist.h"
++#include "mipv6_icmp.h"
++//#include "prefix.h"
++#include "bcache.h"
++#include "debug.h"
++
++int mipv6_use_auth = 0;
++
++#if defined(MODULE) && LINUX_VERSION_CODE > 0x20115
++MODULE_AUTHOR("MIPL Team");
++MODULE_DESCRIPTION("Mobile IPv6 Home Agent");
++MODULE_LICENSE("GPL");
++#endif
++
++#include "config.h"
++
++#define MIPV6_HALIST_SIZE 128
++struct ha_info_opt {
++ u8 type;
++ u8 len;
++ u16 res;
++ u16 pref;
++ u16 ltime;
++};
++/*
++ * Called from ndisc.c's router_discovery.
++ */
++static int mipv6_ha_ra_rcv(struct sk_buff *skb, struct ndisc_options *ndopts)
++{
++ unsigned int ha_info_pref = 0, ha_info_lifetime;
++ int ifi = ((struct inet6_skb_parm *)skb->cb)->iif;
++ struct ra_msg *ra = (struct ra_msg *) skb->h.raw;
++ struct in6_addr *saddr = &skb->nh.ipv6h->saddr;
++ struct in6_addr ll_addr;
++ struct hal {
++ struct in6_addr prefix;
++ int plen;
++ struct hal *next;
++ };
++
++ DEBUG_FUNC();
++
++ ha_info_lifetime = ntohs(ra->icmph.icmp6_rt_lifetime);
++ ipv6_addr_copy(&ll_addr, saddr);
++
++ if (ndopts->nd_opts_hai) {
++ struct ha_info_opt *hai = (struct ha_info_opt *)ndopts->nd_opts_hai;
++ ha_info_pref = ntohs(hai->pref);
++ ha_info_lifetime = ntohs(hai->ltime);
++ DEBUG(DBG_DATADUMP,
++ "received home agent info with preference : %d and lifetime : %d",
++ ha_info_pref, ha_info_lifetime);
++ }
++ if (ndopts->nd_opts_pi) {
++ struct nd_opt_hdr *p;
++ for (p = ndopts->nd_opts_pi;
++ p;
++ p = ndisc_next_option(p, ndopts->nd_opts_pi_end)) {
++ struct prefix_info *pinfo;
++
++ pinfo = (struct prefix_info *) p;
++
++ if (pinfo->router_address) {
++ DEBUG(DBG_DATADUMP, "Adding router address to "
++ "ha queue \n");
++ /* If RA has H bit set and Prefix Info
++ * Option R bit set, queue this
++ * address to be added to Home Agents
++ * List.
++ */
++ if (ipv6_addr_type(&pinfo->prefix) &
++ IPV6_ADDR_LINKLOCAL)
++ continue;
++ if (!ra->icmph.icmp6_home_agent || !ha_info_lifetime) {
++ mipv6_halist_delete(&pinfo->prefix);
++ continue;
++ } else {
++
++ mipv6_halist_add(ifi, &pinfo->prefix,
++ pinfo->prefix_len, &ll_addr,
++ ha_info_pref, ha_info_lifetime);
++ }
++
++ }
++
++ }
++ }
++ return MIPV6_ADD_RTR;
++}
++
++/**********************************************************************
++ *
++ * MIPv6 Module Init / Cleanup
++ *
++ **********************************************************************/
++
++#ifdef CONFIG_SYSCTL
++/* Sysctl table */
++extern int
++mipv6_max_tnls_sysctl(ctl_table *, int, struct file *, void *, size_t *);
++
++extern int
++mipv6_min_tnls_sysctl(ctl_table *, int, struct file *, void *, size_t *);
++
++int max_adv = ~(u16)0;
++int min_zero = 0;
++ctl_table mipv6_mobility_table[] = {
++ {NET_IPV6_MOBILITY_BINDING_REFRESH, "binding_refresh_advice",
++ &mip6node_cnf.binding_refresh_advice, sizeof(int), 0644, NULL,
++ &proc_dointvec_minmax, &sysctl_intvec, 0, &min_zero, &max_adv},
++
++ {NET_IPV6_MOBILITY_MAX_TNLS, "max_tnls", &mipv6_max_tnls, sizeof(int),
++ 0644, NULL, &mipv6_max_tnls_sysctl},
++ {NET_IPV6_MOBILITY_MIN_TNLS, "min_tnls", &mipv6_min_tnls, sizeof(int),
++ 0644, NULL, &mipv6_min_tnls_sysctl},
++ {0}
++};
++ctl_table mipv6_table[] = {
++ {NET_IPV6_MOBILITY, "mobility", NULL, 0, 0555, mipv6_mobility_table},
++ {0}
++};
++
++static struct ctl_table_header *mipv6_sysctl_header;
++static struct ctl_table mipv6_net_table[];
++static struct ctl_table mipv6_root_table[];
++
++ctl_table mipv6_net_table[] = {
++ {NET_IPV6, "ipv6", NULL, 0, 0555, mipv6_table},
++ {0}
++};
++
++ctl_table mipv6_root_table[] = {
++ {CTL_NET, "net", NULL, 0, 0555, mipv6_net_table},
++ {0}
++};
++#endif /* CONFIG_SYSCTL */
++
++extern void mipv6_check_dad(struct in6_addr *haddr);
++extern void mipv6_dad_init(void);
++extern void mipv6_dad_exit(void);
++extern int mipv6_forward(struct sk_buff *);
++
++/* Initialize the module */
++static int __init mip6_ha_init(void)
++{
++ int err = 0;
++
++ printk(KERN_INFO "MIPL Mobile IPv6 for Linux Home Agent %s (%s)\n",
++ MIPLVERSION, MIPV6VERSION);
++ mip6node_cnf.capabilities = CAP_CN | CAP_HA;
++
++ mip6_fn.icmpv6_dhaad_rep_rcv = mipv6_icmpv6_no_rcv;
++ mip6_fn.icmpv6_dhaad_req_rcv = mipv6_icmpv6_rcv_dhaad_req;
++ mip6_fn.icmpv6_pfxadv_rcv = mipv6_icmpv6_no_rcv;
++ mip6_fn.icmpv6_pfxsol_rcv = mipv6_icmpv6_no_rcv;
++ mip6_fn.icmpv6_paramprob_rcv = mipv6_icmpv6_no_rcv;
++
++#ifdef CONFIG_IPV6_MOBILITY_DEBUG
++ printk(KERN_INFO "Debug-level: %d\n", mipv6_debug);
++#endif
++
++#ifdef CONFIG_SYSCTL
++ mipv6_sysctl_header = register_sysctl_table(mipv6_root_table, 0);
++#endif
++ mipv6_initialize_tunnel();
++
++ if ((err = mipv6_ha_init()) < 0)
++ goto ha_fail;
++
++ MIPV6_SETCALL(mipv6_ra_rcv, mipv6_ha_ra_rcv);
++ MIPV6_SETCALL(mipv6_forward, mipv6_forward);
++ mipv6_dad_init();
++ MIPV6_SETCALL(mipv6_check_dad, mipv6_check_dad);
++
++ if ((err = mipv6_halist_init(MIPV6_HALIST_SIZE)) < 0)
++ goto halist_fail;
++
++// mipv6_initialize_pfx_icmpv6();
++
++ return 0;
++
++halist_fail:
++ mipv6_dad_exit();
++ mipv6_ha_exit();
++ha_fail:
++ mipv6_shutdown_tunnel();
++
++ mip6_fn.icmpv6_dhaad_rep_rcv = NULL;
++ mip6_fn.icmpv6_dhaad_req_rcv = NULL;
++ mip6_fn.icmpv6_pfxadv_rcv = NULL;
++ mip6_fn.icmpv6_pfxsol_rcv = NULL;
++ mip6_fn.icmpv6_paramprob_rcv = NULL;
++
++ MIPV6_RESETCALL(mipv6_ra_rcv);
++ MIPV6_RESETCALL(mipv6_forward);
++ MIPV6_RESETCALL(mipv6_check_dad);
++
++#ifdef CONFIG_SYSCTL
++ unregister_sysctl_table(mipv6_sysctl_header);
++#endif
++ return err;
++}
++module_init(mip6_ha_init);
++
++#ifdef MODULE
++/* Cleanup module */
++static void __exit mip6_ha_exit(void)
++{
++ printk(KERN_INFO "mip6_ha.o exiting.\n");
++ mip6node_cnf.capabilities &= ~(int)CAP_HA;
++
++ mipv6_bcache_cleanup(HOME_REGISTRATION);
++
++ MIPV6_RESETCALL(mipv6_ra_rcv);
++ MIPV6_RESETCALL(mipv6_forward);
++ MIPV6_RESETCALL(mipv6_check_dad);
++
++ mipv6_halist_exit();
++// mipv6_shutdown_pfx_icmpv6();
++
++ mip6_fn.icmpv6_dhaad_rep_rcv = NULL;
++ mip6_fn.icmpv6_dhaad_req_rcv = NULL;
++ mip6_fn.icmpv6_pfxadv_rcv = NULL;
++ mip6_fn.icmpv6_pfxsol_rcv = NULL;
++ mip6_fn.icmpv6_paramprob_rcv = NULL;
++
++ mipv6_dad_exit();
++ mipv6_ha_exit();
++ mipv6_shutdown_tunnel();
++#ifdef CONFIG_SYSCTL
++ unregister_sysctl_table(mipv6_sysctl_header);
++#endif
++}
++module_exit(mip6_ha_exit);
++#endif /* MODULE */
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/module_mn.c linux-2.4.25/net/ipv6/mobile_ip6/module_mn.c
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/module_mn.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/module_mn.c 2004-06-26 11:29:31.000000000 +0100
+@@ -0,0 +1,188 @@
++/*
++ * Mobile IPv6 Mobile Node Module
++ *
++ * Authors:
++ * Sami Kivisaari <skivisaa@cc.hut.fi>
++ * Antti Tuominen <ajtuomin@tml.hut.fi>
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#include <linux/config.h>
++#include <linux/module.h>
++#include <linux/init.h>
++
++#ifdef CONFIG_SYSCTL
++#include <linux/sysctl.h>
++#endif /* CONFIG_SYSCTL */
++
++#include <net/mipglue.h>
++
++extern int mipv6_debug;
++int mipv6_use_auth = 0;
++
++#if defined(MODULE) && LINUX_VERSION_CODE > 0x20115
++MODULE_AUTHOR("MIPL Team");
++MODULE_DESCRIPTION("Mobile IPv6 Mobile Node");
++MODULE_LICENSE("GPL");
++MODULE_PARM(mipv6_debug, "i");
++#endif
++
++#include "config.h"
++
++#include "mobhdr.h"
++#include "mn.h"
++#include "mipv6_icmp.h"
++//#include "prefix.h"
++
++/* TODO: These will go as soon as we get rid of the last two ioctls */
++extern int mipv6_ioctl_mn_init(void);
++extern void mipv6_ioctl_mn_exit(void);
++
++/**********************************************************************
++ *
++ * MIPv6 Module Init / Cleanup
++ *
++ **********************************************************************/
++
++#ifdef CONFIG_SYSCTL
++/* Sysctl table */
++
++extern int max_rtr_reach_time;
++extern int eager_cell_switching;
++
++static int max_reach = 1000;
++static int min_reach = 1;
++static int max_one = 1;
++static int min_zero = 0;
++
++extern int
++mipv6_mdetect_mech_sysctl(ctl_table *, int, struct file *, void *, size_t *);
++
++extern int
++mipv6_router_reach_sysctl(ctl_table *, int, struct file *, void *, size_t *);
++
++ctl_table mipv6_mobility_table[] = {
++ {NET_IPV6_MOBILITY_BU_F_LLADDR, "bu_flag_lladdr",
++ &mip6node_cnf.bu_lladdr, sizeof(int), 0644, NULL,
++ &proc_dointvec_minmax, &sysctl_intvec, 0, &min_zero, &max_one},
++ {NET_IPV6_MOBILITY_BU_F_KEYMGM, "bu_flag_keymgm",
++ &mip6node_cnf.bu_keymgm, sizeof(int), 0644, NULL,
++ &proc_dointvec_minmax, &sysctl_intvec, 0, &min_zero, &max_one},
++ {NET_IPV6_MOBILITY_BU_F_CN_ACK, "bu_flag_cn_ack",
++ &mip6node_cnf.bu_cn_ack, sizeof(int), 0644, NULL,
++ &proc_dointvec_minmax, &sysctl_intvec, 0, &min_zero, &max_one},
++
++ {NET_IPV6_MOBILITY_ROUTER_REACH, "max_router_reachable_time",
++ &max_rtr_reach_time, sizeof(int), 0644, NULL,
++ &proc_dointvec_minmax, &sysctl_intvec, 0, &min_reach, &max_reach},
++
++ {NET_IPV6_MOBILITY_MDETECT_MECHANISM, "eager_cell_switching",
++ &eager_cell_switching, sizeof(int), 0644, NULL,
++ &proc_dointvec_minmax, &sysctl_intvec, 0, &min_zero, &max_one},
++
++ {0}
++};
++ctl_table mipv6_table[] = {
++ {NET_IPV6_MOBILITY, "mobility", NULL, 0, 0555, mipv6_mobility_table},
++ {0}
++};
++
++static struct ctl_table_header *mipv6_sysctl_header;
++static struct ctl_table mipv6_net_table[];
++static struct ctl_table mipv6_root_table[];
++
++ctl_table mipv6_net_table[] = {
++ {NET_IPV6, "ipv6", NULL, 0, 0555, mipv6_table},
++ {0}
++};
++
++ctl_table mipv6_root_table[] = {
++ {CTL_NET, "net", NULL, 0, 0555, mipv6_net_table},
++ {0}
++};
++#endif /* CONFIG_SYSCTL */
++
++/* Initialize the module */
++static int __init mip6_mn_init(void)
++{
++ int err = 0;
++
++ printk(KERN_INFO "MIPL Mobile IPv6 for Linux Mobile Node %s (%s)\n",
++ MIPLVERSION, MIPV6VERSION);
++ mip6node_cnf.capabilities = CAP_CN | CAP_MN;
++
++#ifdef CONFIG_IPV6_MOBILITY_DEBUG
++ printk(KERN_INFO "Debug-level: %d\n", mipv6_debug);
++#endif
++
++#ifdef CONFIG_SYSCTL
++ mipv6_sysctl_header = register_sysctl_table(mipv6_root_table, 0);
++#endif
++ if ((err = mipv6_mn_init()) < 0)
++ goto mn_fail;
++
++ mipv6_mh_mn_init();
++
++ mip6_fn.icmpv6_dhaad_rep_rcv = mipv6_icmpv6_rcv_dhaad_rep;
++ mip6_fn.icmpv6_dhaad_req_rcv = mipv6_icmpv6_no_rcv;
++ mip6_fn.icmpv6_pfxadv_rcv = mipv6_icmpv6_no_rcv;
++ mip6_fn.icmpv6_pfxsol_rcv = mipv6_icmpv6_no_rcv;
++ mip6_fn.icmpv6_paramprob_rcv = mipv6_icmpv6_rcv_paramprob;
++
++// mipv6_initialize_pfx_icmpv6();
++
++ if ((err = mipv6_ioctl_mn_init()) < 0)
++ goto ioctl_fail;
++
++ return 0;
++
++ioctl_fail:
++// mipv6_shutdown_pfx_icmpv6();
++
++ mip6_fn.icmpv6_dhaad_rep_rcv = NULL;
++ mip6_fn.icmpv6_dhaad_req_rcv = NULL;
++ mip6_fn.icmpv6_pfxadv_rcv = NULL;
++ mip6_fn.icmpv6_pfxsol_rcv = NULL;
++ mip6_fn.icmpv6_paramprob_rcv = NULL;
++
++ mipv6_mh_mn_exit();
++ mipv6_mn_exit();
++mn_fail:
++#ifdef CONFIG_SYSCTL
++ unregister_sysctl_table(mipv6_sysctl_header);
++#endif
++ return err;
++}
++module_init(mip6_mn_init);
++
++#ifdef MODULE
++/* Cleanup module */
++static void __exit mip6_mn_exit(void)
++{
++ printk(KERN_INFO "mip6_mn.o exiting.\n");
++ mip6node_cnf.capabilities &= ~(int)CAP_MN;
++
++ mipv6_ioctl_mn_exit();
++// mipv6_shutdown_pfx_icmpv6();
++
++ mip6_fn.icmpv6_dhaad_rep_rcv = NULL;
++ mip6_fn.icmpv6_dhaad_req_rcv = NULL;
++ mip6_fn.icmpv6_pfxadv_rcv = NULL;
++ mip6_fn.icmpv6_pfxsol_rcv = NULL;
++ mip6_fn.icmpv6_paramprob_rcv = NULL;
++
++ mipv6_mn_exit();
++
++/* common cleanup */
++#ifdef CONFIG_SYSCTL
++ unregister_sysctl_table(mipv6_sysctl_header);
++#endif
++}
++module_exit(mip6_mn_exit);
++#endif /* MODULE */
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/multiaccess_ctl.c linux-2.4.25/net/ipv6/mobile_ip6/multiaccess_ctl.c
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/multiaccess_ctl.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/multiaccess_ctl.c 2004-06-26 11:29:31.000000000 +0100
+@@ -0,0 +1,287 @@
++/*
++ * 2001 (c) Oy L M Ericsson Ab
++ *
++ * Author: NomadicLab / Ericsson Research <ipv6@nomadiclab.com>
++ *
++ * $Id$
++ *
++ */
++
++/*
++ * Vertical hand-off information manager
++ */
++
++#include <linux/netdevice.h>
++#include <linux/in6.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/proc_fs.h>
++#include <linux/string.h>
++#include <linux/kernel.h>
++#include <asm/io.h>
++#include <asm/uaccess.h>
++#include <linux/list.h>
++#include "multiaccess_ctl.h"
++#include "debug.h"
++
++/*
++ * Local variables
++ */
++static LIST_HEAD(if_list);
++
++/* Internal interface information list */
++struct ma_if_info {
++ struct list_head list;
++ int interface_id;
++ int preference;
++ __u8 status;
++};
++
++/**
++ * ma_ctl_get_preference - get preference value for interface
++ * @ifi: interface index
++ *
++ * Returns integer value preference for given interface.
++ **/
++int ma_ctl_get_preference(int ifi)
++{
++ struct list_head *lh;
++ struct ma_if_info *info;
++ int pref = 0;
++
++ list_for_each(lh, &if_list) {
++ info = list_entry(lh, struct ma_if_info, list);
++ if (info->interface_id == ifi) {
++ pref = info->preference;
++ return pref;
++ }
++ }
++ return -1;
++}
++/**
++ * ma_ctl_get_preference - get preference value for interface
++ * @ifi: interface index
++ *
++ * Returns integer value interface index for interface with highest preference.
++ **/
++int ma_ctl_get_preferred_if(void)
++{
++ struct list_head *lh;
++ struct ma_if_info *info, *pref_if = NULL;
++
++ list_for_each(lh, &if_list) {
++ info = list_entry(lh, struct ma_if_info, list);
++ if (!pref_if || (info->preference > pref_if->preference)) {
++ pref_if = info;
++ }
++ }
++ if (pref_if) return pref_if->interface_id;
++ return 0;
++}
++/**
++ * ma_ctl_set_preference - set preference for interface
++ * @arg: ioctl args
++ *
++ * Sets preference of an existing interface (called by ioctl).
++ **/
++void ma_ctl_set_preference(unsigned long arg)
++{
++ struct list_head *lh;
++ struct ma_if_info *info;
++ struct ma_if_uinfo uinfo;
++
++ memset(&uinfo, 0, sizeof(struct ma_if_uinfo));
++ if (copy_from_user(&uinfo, (struct ma_if_uinfo *)arg,
++ sizeof(struct ma_if_uinfo)) < 0) {
++ DEBUG(DBG_WARNING, "copy_from_user failed");
++ return;
++ }
++
++ /* check if the interface exists */
++ list_for_each(lh, &if_list) {
++ info = list_entry(lh, struct ma_if_info, list);
++ if (info->interface_id == uinfo.interface_id) {
++ info->preference = uinfo.preference;
++ return;
++ }
++ }
++}
++
++/**
++ * ma_ctl_add_iface - add new interface to list
++ * @if_index: interface index
++ *
++ * Adds new interface entry to preference list. Preference is set to
++ * the same value as @if_index. Entry @status is set to
++ * %MA_IFACE_NOT_USED.
++ **/
++void ma_ctl_add_iface(int if_index)
++{
++ struct list_head *lh;
++ struct ma_if_info *info;
++
++ DEBUG_FUNC();
++
++ /* check if the interface already exists */
++ list_for_each(lh, &if_list) {
++ info = list_entry(lh, struct ma_if_info, list);
++ if (info->interface_id == if_index) {
++ info->status = MA_IFACE_NOT_USED;
++ info->preference = if_index;
++ return;
++ }
++ }
++
++ info = kmalloc(sizeof(struct ma_if_info), GFP_ATOMIC);
++ if (info == NULL) {
++ DEBUG(DBG_ERROR, "Out of memory");
++ return;
++ }
++ memset(info, 0, sizeof(struct ma_if_info));
++ info->interface_id = if_index;
++ info->preference = if_index;
++ info->status = MA_IFACE_NOT_USED;
++ list_add(&info->list, &if_list);
++}
++
++/**
++ * ma_ctl_del_iface - remove entry from the list
++ * @if_index: interface index
++ *
++ * Removes entry for interface @if_index from preference list.
++ **/
++int ma_ctl_del_iface(int if_index)
++{
++ struct list_head *lh, *next;
++ struct ma_if_info *info;
++
++ DEBUG_FUNC();
++
++ /* if the iface exists, change availability to 0 */
++ list_for_each_safe(lh, next, &if_list) {
++ info = list_entry(lh, struct ma_if_info, list);
++ if (info->interface_id == if_index) {
++ list_del(&info->list);
++ kfree(info);
++ return 0;
++ }
++ }
++
++ return -1;
++}
++
++/**
++ * ma_ctl_upd_iface - update entry (and list)
++ * @if_index: interface to update
++ * @status: new status for interface
++ * @change_if_index: new interface
++ *
++ * Updates @if_index entry on preference list. Entry status is set to
++ * @status. If new @status is %MA_IFACE_CURRENT, updates list to have
++ * only one current device. If @status is %MA_IFACE_NOT_PRESENT,
++ * entry is deleted and further if entry had %MA_IFACE_CURRENT set,
++ * new current device is looked up and returned in @change_if_index.
++ * New preferred interface is also returned if current device changes
++ * to %MA_IFACE_NOT_USED. Returns 0 on success, otherwise negative.
++ **/
++int ma_ctl_upd_iface(int if_index, int status, int *change_if_index)
++{
++ struct list_head *lh, *tmp;
++ struct ma_if_info *info, *pref = NULL;
++ int found = 0;
++
++ DEBUG_FUNC();
++
++ *change_if_index = 0;
++
++ /* check if the interface exists */
++ list_for_each_safe(lh, tmp, &if_list) {
++ info = list_entry(lh, struct ma_if_info, list);
++ if (status == MA_IFACE_NOT_PRESENT) {
++ if (info->interface_id == if_index) {
++ list_del_init(&info->list);
++ kfree(info);
++ found = 1;
++ break;
++ }
++ } else if (status == MA_IFACE_CURRENT) {
++ if (info->interface_id == if_index) {
++ info->status |= MA_IFACE_CURRENT;
++ found = 1;
++ } else {
++ info->status |= MA_IFACE_NOT_USED;
++ }
++ } else if (status == MA_IFACE_NOT_USED) {
++ if (info->interface_id == if_index) {
++ if (info->status | MA_IFACE_CURRENT) {
++ found = 1;
++ }
++ info->status &= !MA_IFACE_CURRENT;
++ info->status |= MA_IFACE_NOT_USED;
++ info->status &= !MA_IFACE_HAS_ROUTER;
++ }
++ break;
++ } else if (status == MA_IFACE_HAS_ROUTER) {
++ if (info->interface_id == if_index) {
++ info->status |= MA_IFACE_HAS_ROUTER;
++ }
++ return 0;
++ }
++ }
++
++ if (status & (MA_IFACE_NOT_USED|MA_IFACE_NOT_PRESENT) && found) {
++ /* select new interface */
++ list_for_each(lh, &if_list) {
++ info = list_entry(lh, struct ma_if_info, list);
++ if (pref == NULL || ((info->preference > pref->preference) &&
++ info->status & MA_IFACE_HAS_ROUTER))
++ pref = info;
++ }
++ if (pref) {
++ *change_if_index = pref->interface_id;
++ pref->status |= MA_IFACE_CURRENT;
++ } else {
++ *change_if_index = -1;
++ }
++ return 0;
++ }
++
++ if (found) return 0;
++
++ return -1;
++}
++
++static int if_proc_info(char *buffer, char **start, off_t offset,
++ int length)
++{
++ struct list_head *lh;
++ struct ma_if_info *info;
++ int len = 0;
++
++ list_for_each(lh, &if_list) {
++ info = list_entry(lh, struct ma_if_info, list);
++ len += sprintf(buffer + len, "%02d %010d %1d %1d\n",
++ info->interface_id, info->preference,
++ !!(info->status & MA_IFACE_HAS_ROUTER),
++ !!(info->status & MA_IFACE_CURRENT));
++ }
++
++ *start = buffer + offset;
++
++ len -= offset;
++
++ if (len > length) len = length;
++
++ return len;
++
++}
++
++void ma_ctl_init(void)
++{
++ proc_net_create("mip6_iface", 0, if_proc_info);
++}
++
++void ma_ctl_clean(void)
++{
++ proc_net_remove("mip6_iface");
++}
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/multiaccess_ctl.h linux-2.4.25/net/ipv6/mobile_ip6/multiaccess_ctl.h
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/multiaccess_ctl.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/multiaccess_ctl.h 2004-06-26 11:29:31.000000000 +0100
+@@ -0,0 +1,77 @@
++/*
++ * 2001 (c) Oy L M Ericsson Ab
++ *
++ * Author: NomadicLab / Ericsson Research <ipv6@nomadiclab.com>
++ *
++ * $Id$
++ *
++ */
++
++#ifndef _MULTIACCESS_CTL_H
++#define _MULTIACCESS_CTL_H
++
++/* status */
++#define MA_IFACE_NOT_PRESENT 0x01
++#define MA_IFACE_NOT_USED 0x02
++#define MA_IFACE_HAS_ROUTER 0x04
++#define MA_IFACE_CURRENT 0x10
++
++struct ma_if_uinfo {
++ int interface_id;
++ int preference;
++ __u8 status;
++};
++/*
++ * @ma_ctl_get_preferred_id: returns most preferred interface id
++ */
++int ma_ctl_get_preferred_if(void);
++
++/* @ma_ctl_get_preference: returns preference for an interface
++ * @name: name of the interface (dev->name)
++ */
++int ma_ctl_get_preference(int ifi);
++
++/*
++ * Public function: ma_ctl_set_preference
++ * Description: Set preference of an existing interface (called by ioctl)
++ * Returns:
++ */
++void ma_ctl_set_preference(unsigned long);
++
++/*
++ * Public function: ma_ctl_add_iface
++ * Description: Inform control module to insert a new interface
++ * Returns: 0 if success, any other number means an error
++ */
++void ma_ctl_add_iface(int);
++
++/*
++ * Public function: ma_ctl_del_iface
++ * Description: Inform control module to remove an obsolete interface
++ * Returns: 0 if success, any other number means an error
++ */
++int ma_ctl_del_iface(int);
++
++/*
++ * Public function: ma_ctl_upd_iface
++ * Description: Inform control module of status change.
++ * Returns: 0 if success, any other number means an error
++ */
++int ma_ctl_upd_iface(int, int, int *);
++
++/*
++ * Public function: ma_ctl_init
++ * Description: XXX
++ * Returns: XXX
++ */
++void ma_ctl_init(void);
++
++/*
++ * Public function: ma_ctl_clean
++ * Description: XXX
++ * Returns: -
++ */
++void ma_ctl_clean(void);
++
++
++#endif
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/ndisc_ha.c linux-2.4.25/net/ipv6/mobile_ip6/ndisc_ha.c
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/ndisc_ha.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/ndisc_ha.c 2004-06-26 11:29:31.000000000 +0100
+@@ -0,0 +1,596 @@
++/*
++ * Mobile IPv6 Duplicate Address Detection Functions
++ *
++ * Authors:
++ * Krishna Kumar <krkumar@us.ibm.com>
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ *
++ */
++
++#include <linux/autoconf.h>
++#include <linux/types.h>
++#include <linux/init.h>
++#include <linux/skbuff.h>
++#include <linux/in6.h>
++#include <net/ipv6.h>
++#include <net/addrconf.h>
++#include <net/mipv6.h>
++
++#include "debug.h"
++#include "bcache.h"
++#include "ha.h" /* mipv6_generate_ll_addr */
++
++/*
++ * Binding Updates from MN are cached in this structure till DAD is performed.
++ * This structure is used to retrieve a pending Binding Update for the HA to
++ * reply to after performing DAD. The first cell is different from the rest as
++ * follows :
++ * 1. The first cell is used to chain the remaining cells.
++ * 2. The timeout of the first cell is used to delete expired entries
++ * in the list of cells, while the timeout of the other cells are
++ * used for timing out a NS request so as to reply to a BU.
++ * 3. The only elements of the first cell that are used are :
++ * next, prev, and callback_timer.
++ *
++ * TODO : Don't we need to do pneigh_lookup on the Link Local address ?
++ */
++struct mipv6_dad_cell {
++ /* Information needed for DAD management */
++ struct mipv6_dad_cell *next; /* Next element on the DAD list */
++ struct mipv6_dad_cell *prev; /* Prev element on the DAD list */
++ __u16 probes; /* Number of times to probe for addr */
++ __u16 flags; /* Entry flags - see below */
++ struct timer_list callback_timer; /* timeout for entry */
++
++ /* Information needed for performing DAD */
++ struct inet6_ifaddr *ifp;
++ int ifindex;
++ struct in6_addr daddr;
++ struct in6_addr haddr; /* home address */
++ struct in6_addr ll_haddr; /* Link Local value of haddr */
++ struct in6_addr coa;
++ struct in6_addr rep_coa;
++ __u32 ba_lifetime;
++ __u16 sequence;
++ __u8 bu_flags;
++};
++
++/* Values for the 'flags' field in the mipv6_dad_cell */
++#define DAD_INIT_ENTRY 0
++#define DAD_DUPLICATE_ADDRESS 1
++#define DAD_UNIQUE_ADDRESS 2
++
++/* Head of the pending DAD list */
++static struct mipv6_dad_cell dad_cell_head;
++
++/* Lock to access the pending DAD list */
++static rwlock_t dad_lock = RW_LOCK_UNLOCKED;
++
++/* Timer routine which deletes 'expired' entries in the DAD list */
++static void mipv6_dad_delete_old_entries(unsigned long unused)
++{
++ struct mipv6_dad_cell *curr, *next;
++ unsigned long next_time = 0;
++
++ write_lock(&dad_lock);
++ curr = dad_cell_head.next;
++ while (curr != &dad_cell_head) {
++ next = curr->next;
++ if (curr->flags != DAD_INIT_ENTRY) {
++ if (curr->callback_timer.expires <= jiffies) {
++ /* Entry has expired, free it up. */
++ curr->next->prev = curr->prev;
++ curr->prev->next = curr->next;
++ in6_ifa_put(curr->ifp);
++ kfree(curr);
++ } else if (next_time <
++ curr->callback_timer.expires) {
++ next_time = curr->callback_timer.expires;
++ }
++ }
++ curr = next;
++ }
++ write_unlock(&dad_lock);
++ if (next_time) {
++ /*
++ * Start another timer if more cells need to be removed at
++ * a later stage.
++ */
++ dad_cell_head.callback_timer.expires = next_time;
++ add_timer(&dad_cell_head.callback_timer);
++ }
++}
++
++/*
++ * Queue a timeout routine to clean up 'expired' DAD entries.
++ */
++static void mipv6_start_dad_head_timer(struct mipv6_dad_cell *cell)
++{
++ unsigned long expire = jiffies +
++ cell->ifp->idev->nd_parms->retrans_time * 10;
++
++ if (!timer_pending(&dad_cell_head.callback_timer) ||
++ expire < dad_cell_head.callback_timer.expires) {
++ /*
++ * Add timer if none pending, or mod the timer if new
++ * cell needs to be expired before existing timer runs.
++ *
++ * We let the cell remain as long as possible, so that
++ * new BU's as part of retransmissions don't have to go
++ * through DAD before replying.
++ */
++ dad_cell_head.callback_timer.expires = expire;
++
++ /*
++ * Keep the cell around for atleast some time to handle
++ * retransmissions or BU's due to fast MN movement. This
++ * is needed otherwise a previous timeout can delete all
++ * expired entries including this new one.
++ */
++ cell->callback_timer.expires = jiffies +
++ cell->ifp->idev->nd_parms->retrans_time * 5;
++ if (!timer_pending(&dad_cell_head.callback_timer)) {
++ add_timer(&dad_cell_head.callback_timer);
++ } else {
++ mod_timer(&dad_cell_head.callback_timer, expire);
++ }
++ }
++}
++
++
++/* Join solicited node MC address */
++static inline void mipv6_join_sol_mc_addr(struct in6_addr *addr,
++ struct net_device *dev)
++{
++ struct in6_addr maddr;
++
++ /* Join solicited node MC address */
++ addrconf_addr_solict_mult(addr, &maddr);
++ ipv6_dev_mc_inc(dev, &maddr);
++}
++
++/* Leave solicited node MC address */
++static inline void mipv6_leave_sol_mc_addr(struct in6_addr *addr,
++ struct net_device *dev)
++{
++ struct in6_addr maddr;
++
++ addrconf_addr_solict_mult(addr, &maddr);
++ ipv6_dev_mc_dec(dev, &maddr);
++}
++
++/* Send a NS */
++static inline void mipv6_dad_send_ns(struct inet6_ifaddr *ifp,
++ struct in6_addr *haddr)
++{
++ struct in6_addr unspec;
++ struct in6_addr mcaddr;
++
++ ipv6_addr_set(&unspec, 0, 0, 0, 0);
++ addrconf_addr_solict_mult(haddr, &mcaddr);
++
++ /* addr is 'unspec' since we treat this address as transient */
++ ndisc_send_ns(ifp->idev->dev, NULL, haddr, &mcaddr, &unspec);
++}
++
++/*
++ * Search for a home address in the list of pending DAD's. Called from
++ * Neighbor Advertisement
++ * Return values :
++ * -1 : No DAD entry found for this advertisement, or entry already
++ * finished processing.
++ * 0 : Entry found waiting for DAD to finish.
++ */
++static int dad_search_haddr(struct in6_addr *ll_haddr,
++ struct in6_addr *daddr, struct in6_addr *haddr,
++ struct in6_addr *coa, struct in6_addr *rep_coa,
++ __u16 * seq, struct inet6_ifaddr **ifp)
++{
++ struct mipv6_dad_cell *cell;
++
++ read_lock(&dad_lock);
++ cell = dad_cell_head.next;
++ while (cell != &dad_cell_head &&
++ ipv6_addr_cmp(&cell->ll_haddr, ll_haddr) &&
++ ipv6_addr_cmp(&cell->haddr, ll_haddr)) {
++ cell = cell->next;
++ }
++ if (cell == &dad_cell_head || cell->flags != DAD_INIT_ENTRY) {
++ /* Not found element, or element already finished processing */
++ if (cell != &dad_cell_head) {
++ /*
++ * Set the state to DUPLICATE, even if it was UNIQUE
++ * earlier. It is not needed to setup timer via
++ * mipv6_start_dad_head_timer since this must have
++ * already been done.
++ */
++ cell->flags = DAD_DUPLICATE_ADDRESS;
++ }
++ read_unlock(&dad_lock);
++ return -1;
++ }
++
++ /*
++ * The NA found an unprocessed entry in the DAD list. Expire this
++ * entry since another node advertised this address. Caller should
++ * reject BU (DAD failed).
++ */
++ ipv6_addr_copy(daddr, &cell->daddr);
++ ipv6_addr_copy(haddr, &cell->haddr);
++ ipv6_addr_copy(coa, &cell->coa);
++ ipv6_addr_copy(rep_coa, &cell->rep_coa);
++ *seq = cell->sequence;
++ *ifp = cell->ifp;
++
++ if (del_timer(&cell->callback_timer) == 0) {
++ /* Timer already deleted, race with Timeout Handler */
++ /* No action needed */
++ }
++
++ cell->flags = DAD_DUPLICATE_ADDRESS;
++
++ /* Now leave this address to avoid future processing of NA's */
++ mipv6_leave_sol_mc_addr(&cell->ll_haddr, cell->ifp->idev->dev);
++ /* Leave also global address, if link local address was in use */
++ if (ipv6_addr_cmp(&cell->ll_haddr, &cell->haddr))
++ mipv6_leave_sol_mc_addr(&cell->haddr, cell->ifp->idev->dev);
++ /* Start dad_head timer to remove this entry */
++ mipv6_start_dad_head_timer(cell);
++
++ read_unlock(&dad_lock);
++
++ return 0;
++}
++
++/* ENTRY routine called via Neighbor Advertisement */
++void mipv6_check_dad(struct in6_addr *ll_haddr)
++{
++ struct in6_addr daddr, haddr, coa, rep_coa;
++ struct inet6_ifaddr *ifp;
++ __u16 seq;
++
++ if (dad_search_haddr(ll_haddr, &daddr, &haddr, &coa, &rep_coa, &seq,
++ &ifp) < 0) {
++ /*
++ * Didn't find entry, or no action needed (the action has
++ * already been performed).
++ */
++ return;
++ }
++
++ /*
++ * A DAD cell was present, meaning that there is a pending BU
++ * request for 'haddr' - reject the BU.
++ */
++ mipv6_bu_finish(ifp, 0, DUPLICATE_ADDR_DETECT_FAIL,
++ &daddr, &haddr, &coa, &rep_coa, 0, seq, 0, NULL);
++ return;
++}
++
++/*
++ * Check if the passed 'cell' is in the list of pending DAD's. Called from
++ * the Timeout Handler.
++ *
++ * Assumes that the caller is holding the dad_lock in reader mode.
++ */
++static int dad_search_cell(struct mipv6_dad_cell *cell)
++{
++ struct mipv6_dad_cell *tmp;
++
++ tmp = dad_cell_head.next;
++ while (tmp != &dad_cell_head && tmp != cell) {
++ tmp = tmp->next;
++ }
++ if (tmp == cell) {
++ if (cell->flags == DAD_INIT_ENTRY) {
++ /* Found valid entry */
++ if (--cell->probes == 0) {
++ /*
++ * Retransmission's are over - return success.
++ */
++ cell->flags = DAD_UNIQUE_ADDRESS;
++
++ /*
++ * Leave this address to avoid future
++ * processing of NA's.
++ */
++ mipv6_leave_sol_mc_addr(&cell->ll_haddr,
++ cell->ifp->idev->
++ dev);
++ if (ipv6_addr_cmp(&cell->ll_haddr, &cell->haddr))
++ mipv6_leave_sol_mc_addr(&cell->haddr,
++ cell->ifp->idev->dev);
++ /* start timeout to delete this cell. */
++ mipv6_start_dad_head_timer(cell);
++ return 0;
++ }
++ /*
++ * Retransmission not finished, send another NS and
++ * return failure.
++ */
++ mipv6_dad_send_ns(cell->ifp, &cell->ll_haddr);
++ if (ipv6_addr_cmp(&cell->ll_haddr, &cell->haddr))
++ mipv6_leave_sol_mc_addr(&cell->haddr,
++ cell->ifp->idev->dev);
++ cell->callback_timer.expires = jiffies +
++ cell->ifp->idev->nd_parms->retrans_time;
++ add_timer(&cell->callback_timer);
++ } else {
++ /*
++ * This means that an NA was received before the
++ * timeout and when the state changed from
++ * DAD_INIT_ENTRY, the BU got failed as a result.
++ * There is nothing to be done.
++ */
++ }
++ }
++ return -1;
++}
++
++/* ENTRY routine called via Timeout */
++static void mipv6_dad_timeout(unsigned long arg)
++{
++ __u8 ba_status = SUCCESS;
++ struct in6_addr daddr;
++ struct in6_addr haddr;
++ struct in6_addr coa;
++ struct in6_addr rep_coa;
++ struct inet6_ifaddr *ifp;
++ int ifindex;
++ __u32 ba_lifetime;
++ __u16 sequence;
++ __u8 flags;
++ struct mipv6_dad_cell *cell = (struct mipv6_dad_cell *) arg;
++
++ /*
++ * If entry is not in the list, we have already sent BU Failure
++ * after getting a NA.
++ */
++ read_lock(&dad_lock);
++ if (dad_search_cell(cell) < 0) {
++ /*
++ * 'cell' is no longer valid (may not be in the list or
++ * is already processed, due to NA processing), or NS
++ * retransmissions are not yet over.
++ */
++ read_unlock(&dad_lock);
++ return;
++ }
++
++ /* This is the final Timeout. Send Bind Ack Success */
++
++ ifp = cell->ifp;
++ ifindex = cell->ifindex;
++ ba_lifetime = cell->ba_lifetime;
++ sequence = cell->sequence;
++ flags = cell->bu_flags;
++
++ ipv6_addr_copy(&daddr, &cell->daddr);
++ ipv6_addr_copy(&haddr, &cell->haddr);
++ ipv6_addr_copy(&coa, &cell->coa);
++ ipv6_addr_copy(&rep_coa, &cell->rep_coa);
++ read_unlock(&dad_lock);
++
++ /* Send BU Acknowledgement Success */
++ mipv6_bu_finish(ifp, ifindex, ba_status,
++ &daddr, &haddr, &coa, &rep_coa,
++ ba_lifetime, sequence, flags, NULL);
++ return;
++}
++
++/*
++ * Check if original home address exists in our DAD pending list, if so return
++ * the cell.
++ *
++ * Assumes that the caller is holding the dad_lock in writer mode.
++ */
++static struct mipv6_dad_cell *mipv6_dad_get_cell(struct in6_addr *haddr)
++{
++ struct mipv6_dad_cell *cell;
++
++ cell = dad_cell_head.next;
++ while (cell != &dad_cell_head
++ && ipv6_addr_cmp(&cell->haddr, haddr)) {
++ cell = cell->next;
++ }
++ if (cell == &dad_cell_head) {
++ /* Not found element */
++ return NULL;
++ }
++ return cell;
++}
++
++/*
++ * Save all parameters needed for doing a Bind Ack in the mipv6_dad_cell
++ * structure.
++ */
++static void mipv6_dad_save_cell(struct mipv6_dad_cell *cell,
++ struct inet6_ifaddr *ifp, int ifindex,
++ struct in6_addr *daddr,
++ struct in6_addr *haddr,
++ struct in6_addr *coa,
++ struct in6_addr *rep_coa,
++ __u32 ba_lifetime,
++ __u16 sequence, __u8 flags)
++{
++ in6_ifa_hold(ifp);
++ cell->ifp = ifp;
++ cell->ifindex = ifindex;
++
++ ipv6_addr_copy(&cell->daddr, daddr);
++ ipv6_addr_copy(&cell->haddr, haddr);
++ ipv6_addr_copy(&cell->coa, coa);
++ ipv6_addr_copy(&cell->rep_coa, rep_coa);
++
++ /* Convert cell->ll_haddr to Link Local address */
++ if (flags & MIPV6_BU_F_LLADDR)
++ mipv6_generate_ll_addr(&cell->ll_haddr, haddr);
++ else
++ ipv6_addr_copy(&cell->ll_haddr, haddr);
++
++ cell->ba_lifetime = ba_lifetime;
++ cell->sequence = sequence;
++ cell->bu_flags = flags;
++}
++
++/*
++ * Top level DAD routine for performing DAD.
++ *
++ * Return values
++ * 0 : Don't need to do DAD.
++ * 1 : Need to do DAD.
++ * -n : Error, where 'n' is the reason for the error.
++ *
++ * Assumption : DAD process has been optimized by using cached values upto
++ * some time. However sometimes this can cause problems. Eg. when the first
++ * BU was received, DAD might have failed. Before the second BU arrived,
++ * the node using MN's home address might have stopped using it, but still
++ * we will return DAD_DUPLICATE_ADDRESS based on the first DAD's result. Or
++ * this can go the other way around. However, it is a very small possibility
++ * and thus optimization is turned on by default. It is possible to change
++ * this feature (needs a little code-rewriting in this routine), but
++ * currently DAD result is being cached for performance reasons.
++ */
++int mipv6_dad_start(struct inet6_ifaddr *ifp, int ifindex,
++ struct in6_addr *daddr, struct in6_addr *haddr,
++ struct in6_addr *coa, struct in6_addr *rep_coa,
++ __u32 ba_lifetime, __u16 sequence, __u8 flags)
++{
++ int found;
++ struct mipv6_dad_cell *cell;
++ struct mipv6_bce bc_entry;
++
++ if (ifp->idev->cnf.dad_transmits == 0) {
++ /* DAD is not configured on the HA, return SUCCESS */
++ return 0;
++ }
++
++ if (mipv6_bcache_get(haddr, daddr, &bc_entry) == 0) {
++ /*
++ * We already have an entry in our cache - don't need to
++ * do DAD as we are already defending this home address.
++ */
++ return 0;
++ }
++
++ write_lock(&dad_lock);
++ if ((cell = mipv6_dad_get_cell(haddr)) != NULL) {
++ /*
++ * An existing entry for BU was found in our cache due
++ * to retransmission of the BU or a new COA registration.
++ */
++ switch (cell->flags) {
++ case DAD_INIT_ENTRY:
++ /* Old entry is waiting for DAD to complete */
++ break;
++ case DAD_UNIQUE_ADDRESS:
++ /* DAD is finished successfully - return success. */
++ write_unlock(&dad_lock);
++ return 0;
++ case DAD_DUPLICATE_ADDRESS:
++ /*
++ * DAD is finished and we got a NA while doing BU -
++ * return failure.
++ */
++ write_unlock(&dad_lock);
++ return -DUPLICATE_ADDR_DETECT_FAIL;
++ default:
++ /* Unknown state - should never happen */
++ DEBUG(DBG_WARNING,
++ "cell entry in unknown state : %d",
++ cell->flags);
++ write_unlock(&dad_lock);
++ return -REASON_UNSPECIFIED;
++ }
++ found = 1;
++ } else {
++ if ((cell = (struct mipv6_dad_cell *)
++ kmalloc(sizeof(struct mipv6_dad_cell), GFP_ATOMIC))
++ == NULL) {
++ return -INSUFFICIENT_RESOURCES;
++ }
++ found = 0;
++ }
++
++ mipv6_dad_save_cell(cell, ifp, ifindex, daddr, haddr, coa, rep_coa,
++ ba_lifetime, sequence, flags);
++
++ if (!found) {
++ cell->flags = DAD_INIT_ENTRY;
++ cell->probes = ifp->idev->cnf.dad_transmits;
++
++ /* Insert element on dad_cell_head list */
++ dad_cell_head.prev->next = cell;
++ cell->next = &dad_cell_head;
++ cell->prev = dad_cell_head.prev;
++ dad_cell_head.prev = cell;
++ write_unlock(&dad_lock);
++ if (flags & MIPV6_BU_F_LLADDR) {
++ /* join the solicited node MC of the global homeaddr.*/
++ mipv6_join_sol_mc_addr(&cell->haddr, ifp->idev->dev);
++ /* Send a NS */
++ mipv6_dad_send_ns(ifp, &cell->haddr);
++ }
++ /* join the solicited node MC of the homeaddr. */
++ mipv6_join_sol_mc_addr(&cell->ll_haddr, ifp->idev->dev);
++
++ /* Send a NS */
++ mipv6_dad_send_ns(ifp, &cell->ll_haddr);
++
++ /* Initialize timer for this cell to timeout the NS. */
++ init_timer(&cell->callback_timer);
++ cell->callback_timer.data = (unsigned long) cell;
++ cell->callback_timer.function = mipv6_dad_timeout;
++ cell->callback_timer.expires = jiffies +
++ ifp->idev->nd_parms->retrans_time;
++ add_timer(&cell->callback_timer);
++ } else {
++ write_unlock(&dad_lock);
++ }
++ return 1;
++}
++
++void __init mipv6_dad_init(void)
++{
++ dad_cell_head.next = dad_cell_head.prev = &dad_cell_head;
++ init_timer(&dad_cell_head.callback_timer);
++ dad_cell_head.callback_timer.data = 0;
++ dad_cell_head.callback_timer.function =
++ mipv6_dad_delete_old_entries;
++}
++
++void __exit mipv6_dad_exit(void)
++{
++ struct mipv6_dad_cell *curr, *next;
++
++ write_lock_bh(&dad_lock);
++ del_timer(&dad_cell_head.callback_timer);
++
++ curr = dad_cell_head.next;
++ while (curr != &dad_cell_head) {
++ next = curr->next;
++ del_timer(&curr->callback_timer);
++ if (curr->flags == DAD_INIT_ENTRY) {
++ /*
++ * We were in DAD_INIT state and listening to the
++ * solicited node MC address - need to stop that.
++ */
++ mipv6_leave_sol_mc_addr(&curr->ll_haddr,
++ curr->ifp->idev->dev);
++ if (ipv6_addr_cmp(&curr->ll_haddr, &curr->haddr))
++ mipv6_leave_sol_mc_addr(&curr->haddr,
++ curr->ifp->idev->dev);
++ }
++ in6_ifa_put(curr->ifp);
++ kfree(curr);
++ curr = next;
++ }
++ dad_cell_head.next = dad_cell_head.prev = &dad_cell_head;
++ write_unlock_bh(&dad_lock);
++}
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/prefix.c linux-2.4.25/net/ipv6/mobile_ip6/prefix.c
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/prefix.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/prefix.c 2004-06-26 11:29:31.000000000 +0100
+@@ -0,0 +1,217 @@
++/**
++ * Prefix solicitation and advertisement
++ *
++ * Authors:
++ * Jaakko Laine <medved@iki.fi>
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#include <linux/config.h>
++#include <linux/icmpv6.h>
++#include <linux/net.h>
++#include <linux/spinlock.h>
++#include <linux/timer.h>
++#include <linux/netdevice.h>
++#include <net/ipv6.h>
++#include <net/addrconf.h>
++#include <net/ip6_route.h>
++#include <net/mipv6.h>
++
++#include "mipv6_icmp.h"
++#include "debug.h"
++#include "sortedlist.h"
++#include "prefix.h"
++#include "config.h"
++
++#define INFINITY 0xffffffff
++
++struct timer_list pfx_timer;
++
++struct list_head pfx_list;
++rwlock_t pfx_list_lock = RW_LOCK_UNLOCKED;
++
++int compare_pfx_list_entry(const void *data1, const void *data2,
++ int datalen)
++{
++ struct pfx_list_entry *e1 = (struct pfx_list_entry *) data1;
++ struct pfx_list_entry *e2 = (struct pfx_list_entry *) data2;
++
++ return ((ipv6_addr_cmp(&e1->daddr, &e2->daddr) == 0)
++ && (e2->ifindex == -1 || e1->ifindex == e2->ifindex));
++}
++
++/**
++ * mipv6_pfx_cancel_send - cancel pending items to daddr from saddr
++ * @daddr: Destination address
++ * @ifindex: pending items on this interface will be canceled
++ *
++ * if ifindex == -1, all items to daddr will be removed
++ */
++void mipv6_pfx_cancel_send(struct in6_addr *daddr, int ifindex)
++{
++ unsigned long tmp;
++ struct pfx_list_entry entry;
++
++ DEBUG_FUNC();
++
++ /* We'll just be comparing these parts... */
++ memcpy(&entry.daddr, daddr, sizeof(struct in6_addr));
++ entry.ifindex = ifindex;
++
++ write_lock_bh(&pfx_list_lock);
++
++ while (mipv6_slist_del_item(&pfx_list, &entry,
++ compare_pfx_list_entry) == 0)
++ ;
++
++ if ((tmp = mipv6_slist_get_first_key(&pfx_list)))
++ mod_timer(&pfx_timer, tmp);
++
++ write_unlock_bh(&pfx_list_lock);
++}
++
++/**
++ * mipv6_pfx_add_ha - add a new HA to send prefix solicitations to
++ * @daddr: address of HA
++ * @saddr: our address to use as source address
++ * @ifindex: interface index
++ */
++void mipv6_pfx_add_ha(struct in6_addr *daddr, struct in6_addr *saddr,
++ int ifindex)
++{
++ unsigned long tmp;
++ struct pfx_list_entry entry;
++
++ DEBUG_FUNC();
++
++ memcpy(&entry.daddr, daddr, sizeof(struct in6_addr));
++ memcpy(&entry.saddr, saddr, sizeof(struct in6_addr));
++ entry.retries = 0;
++ entry.ifindex = ifindex;
++
++ write_lock_bh(&pfx_list_lock);
++ if (mipv6_slist_modify(&pfx_list, &entry, sizeof(struct pfx_list_entry),
++ jiffies + INITIAL_SOLICIT_TIMER * HZ,
++ compare_pfx_list_entry))
++ DEBUG(DBG_WARNING, "Cannot add new HA to pfx list");
++
++ if ((tmp = mipv6_slist_get_first_key(&pfx_list)))
++ mod_timer(&pfx_timer, tmp);
++ write_unlock_bh(&pfx_list_lock);
++}
++
++int mipv6_pfx_add_home(int ifindex, struct in6_addr *saddr,
++ struct in6_addr *daddr, unsigned long min_expire)
++{
++ unsigned long tmp;
++
++ write_lock(&pfx_list_lock);
++
++ if (min_expire != INFINITY) {
++ unsigned long expire;
++ struct pfx_list_entry entry;
++
++ memcpy(&entry.daddr, saddr, sizeof(struct in6_addr));
++ memcpy(&entry.saddr, daddr, sizeof(struct in6_addr));
++ entry.retries = 0;
++ entry.ifindex = ifindex;
++
++ /* This is against the RFC 3775, but we need to set
++ * a minimum interval for a prefix solicitation.
++ * Otherwise a prefix solicitation storm will
++ * result if valid lifetime of the prefix is
++ * smaller than MAX_PFX_ADV_DELAY
++ */
++ min_expire -= MAX_PFX_ADV_DELAY;
++ min_expire = min_expire < MIN_PFX_SOL_DELAY ? MIN_PFX_SOL_DELAY : min_expire;
++
++ expire = jiffies + min_expire * HZ;
++
++ if (mipv6_slist_modify(&pfx_list, &entry,
++ sizeof(struct pfx_list_entry),
++ expire,
++ compare_pfx_list_entry) != 0)
++ DEBUG(DBG_WARNING, "Cannot add new entry to pfx_list");
++ }
++
++ if ((tmp = mipv6_slist_get_first_key(&pfx_list)))
++ mod_timer(&pfx_timer, tmp);
++
++ write_unlock(&pfx_list_lock);
++
++ return 0;
++}
++
++/**
++ * set_ha_pfx_list - manipulate pfx_list for HA when timer goes off
++ * @entry: pfx_list_entry that is due
++ */
++static void set_ha_pfx_list(struct pfx_list_entry *entry)
++{
++}
++
++/**
++ * set_mn_pfx_list - manipulate pfx_list for MN when timer goes off
++ * @entry: pfx_list_entry that is due
++ */
++static void set_mn_pfx_list(struct pfx_list_entry *entry)
++{
++}
++
++/**
++ * pfx_timer_handler - general timer handler
++ * @dummy: dummy
++ *
++ * calls set_ha_pfx_list and set_mn_pfx_list to do the thing when
++ * a timer goes off
++ */
++static void pfx_timer_handler(unsigned long dummy)
++{
++ unsigned long tmp;
++ struct pfx_list_entry *entry;
++
++ DEBUG_FUNC();
++
++ write_lock(&pfx_list_lock);
++ if (!(entry = mipv6_slist_get_first(&pfx_list)))
++ goto out;
++
++ if (mip6node_cnf.capabilities & CAP_HA)
++ set_ha_pfx_list(entry);
++ if (mip6node_cnf.capabilities & CAP_MN)
++ set_mn_pfx_list(entry);
++ if ((tmp = mipv6_slist_get_first_key(&pfx_list)))
++ mod_timer(&pfx_timer, tmp);
++
++ out:
++ write_unlock(&pfx_list_lock);
++}
++
++int mipv6_initialize_pfx_icmpv6(void)
++{
++ INIT_LIST_HEAD(&pfx_list);
++
++ init_timer(&pfx_timer);
++ pfx_timer.function = pfx_timer_handler;
++
++ return 0;
++}
++
++void mipv6_shutdown_pfx_icmpv6(void)
++{
++ struct prefix_info *tmp;
++
++ if (timer_pending(&pfx_timer))
++ del_timer(&pfx_timer);
++
++ write_lock_bh(&pfx_list_lock);
++ while ((tmp = mipv6_slist_del_first(&pfx_list)))
++ kfree(tmp);
++ write_unlock_bh(&pfx_list_lock);
++}
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/prefix.h linux-2.4.25/net/ipv6/mobile_ip6/prefix.h
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/prefix.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/prefix.h 2004-06-26 11:29:31.000000000 +0100
+@@ -0,0 +1,57 @@
++/*
++ * MIPL Mobile IPv6 Prefix solicitation and advertisement
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#ifndef _PREFIX_H
++#define _PREFIX_H
++
++#include <net/addrconf.h>
++
++struct pfx_list_entry {
++ struct in6_addr daddr;
++ struct in6_addr saddr;
++ int retries;
++ int ifindex;
++};
++
++extern struct list_head pfx_list;
++extern rwlock_t pfx_list_lock;
++extern struct timer_list pfx_timer;
++
++int compare_pfx_list_entry(const void *data1, const void *data2,
++ int datalen);
++
++/**
++ * mipv6_pfx_cancel_send - cancel pending pfx_advs/sols to daddr
++ * @daddr: destination address
++ * @ifindex: pending items on this interface will be canceled
++ *
++ * if ifindex == -1, all items to daddr will be removed
++ */
++void mipv6_pfx_cancel_send(struct in6_addr *daddr, int ifindex);
++
++/**
++ * mipv6_pfx_add_ha - add a new HA to send prefix solicitations to
++ * @daddr: address of HA
++ * @saddr: our address to use as source address
++ * @ifindex: interface index
++ */
++void mipv6_pfx_add_ha(struct in6_addr *daddr, struct in6_addr *saddr,
++ int ifindex);
++
++void mipv6_pfxs_modified(struct prefix_info *pinfo, int ifindex);
++
++int mipv6_pfx_add_home(int ifindex, struct in6_addr *daddr,
++ struct in6_addr *saddr, unsigned long min_expire);
++
++int mipv6_initialize_pfx_icmpv6(void);
++void mipv6_shutdown_pfx_icmpv6(void);
++
++#endif
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/prefix_ha.c linux-2.4.25/net/ipv6/mobile_ip6/prefix_ha.c
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/prefix_ha.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/prefix_ha.c 2004-06-26 11:29:31.000000000 +0100
+@@ -0,0 +1,122 @@
++/**
++ * Prefix advertisement for Home Agent
++ *
++ * Authors:
++ * Jaakko Laine <medved@iki.fi>
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#include <linux/config.h>
++#include <linux/icmpv6.h>
++#include <linux/net.h>
++#include <linux/spinlock.h>
++#include <linux/timer.h>
++#include <linux/netdevice.h>
++#include <net/ipv6.h>
++#include <net/addrconf.h>
++#include <net/ip6_route.h>
++#include <net/mipv6.h>
++
++#include "mipv6_icmp.h"
++#include "debug.h"
++#include "sortedlist.h"
++#include "util.h"
++#include "bcache.h"
++#include "config.h"
++#include "prefix.h"
++
++/**
++ * pfx_adv_iterator - modify pfx_list entries according to new prefix info
++ * @data: MN's home registration bcache_entry
++ * @args: new prefix info
++ * @sortkey: ignored
++ */
++static int pfx_adv_iterator(void *data, void *args, unsigned long sortkey)
++{
++ struct mipv6_bce *bc_entry = (struct mipv6_bce *) data;
++ struct prefix_info *pinfo = (struct prefix_info *) args;
++
++ if (mipv6_prefix_compare(&bc_entry->coa, &pinfo->prefix,
++ pinfo->prefix_len) == 0) {
++ struct pfx_list_entry pfx_entry;
++
++ memcpy(&pfx_entry.daddr, &bc_entry->coa,
++ sizeof(struct in6_addr));
++ memcpy(&pfx_entry.daddr, &bc_entry->our_addr,
++ sizeof(struct in6_addr));
++ pfx_entry.retries = 0;
++ pfx_entry.ifindex = bc_entry->ifindex;
++
++ mipv6_slist_modify(&pfx_list, &pfx_entry,
++ sizeof(struct pfx_list_entry),
++ jiffies +
++ net_random() % (MAX_PFX_ADV_DELAY * HZ),
++ compare_pfx_list_entry);
++ }
++
++ return 0;
++}
++
++struct homereg_iterator_args {
++ struct list_head *head;
++ int count;
++};
++
++static int homereg_iterator(void *data, void *args, unsigned long *sortkey)
++{
++ struct mipv6_bce *entry = (struct mipv6_bce *) data;
++ struct homereg_iterator_args *state =
++ (struct homereg_iterator_args *) args;
++
++ if (entry->type == HOME_REGISTRATION) {
++ mipv6_slist_add(state->head, entry,
++ sizeof(struct mipv6_bce),
++ state->count);
++ state->count++;
++ }
++ return 0;
++}
++
++static int mipv6_bcache_get_homeregs(struct list_head *head)
++{
++ struct homereg_iterator_args args;
++
++ DEBUG_FUNC();
++
++ args.count = 0;
++ args.head = head;
++
++ mipv6_bcache_iterate(homereg_iterator, &args);
++ return args.count;
++}
++
++/**
++ * mipv6_prefix_added - prefix was added to interface, act accordingly
++ * @pinfo: prefix_info that was added
++ * @ifindex: interface index
++ */
++void mipv6_pfxs_modified(struct prefix_info *pinfo, int ifindex)
++{
++ int count;
++ unsigned long tmp;
++ struct list_head home_regs;
++
++ DEBUG_FUNC();
++
++ INIT_LIST_HEAD(&home_regs);
++
++ if (!(count = mipv6_bcache_get_homeregs(&home_regs)))
++ return;
++
++ write_lock_bh(&pfx_list_lock);
++ mipv6_slist_for_each(&home_regs, pinfo, pfx_adv_iterator);
++ if ((tmp = mipv6_slist_get_first_key(&pfx_list)))
++ mod_timer(&pfx_timer, tmp);
++ write_unlock_bh(&pfx_list_lock);
++}
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/rr_crypto.c linux-2.4.25/net/ipv6/mobile_ip6/rr_crypto.c
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/rr_crypto.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/rr_crypto.c 2004-06-26 11:29:31.000000000 +0100
+@@ -0,0 +1,255 @@
++/*
++ * rr_cookie.c - Mobile IPv6 return routability crypto
++ * Author : Henrik Petander <henrik.petander@hut.fi>
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ *
++ *
++ *
++ */
++
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <linux/spinlock.h>
++#include <linux/sched.h>
++#include <linux/timer.h>
++#include <linux/in6.h>
++#include <linux/init.h>
++#include <linux/random.h>
++
++#include <net/ipv6.h>
++
++#include "debug.h"
++#include "hmac.h"
++#include "rr_crypto.h"
++
++#define DBG_RR 5
++
++u8 k_CN[HMAC_SHA1_KEY_SIZE]; // secret key of CN
++
++u16 curr_index = 0;
++
++struct nonce_timestamp nonce_table[MAX_NONCES];
++spinlock_t nonce_lock = SPIN_LOCK_UNLOCKED;
++void update_nonces(void);
++
++/** nonce_is_fresh - whether the nonce was generated recently
++ *
++ * @non_ts : table entry containing the nonce and a timestamp
++ * @interval : if nonce was generated within interval seconds it is fresh
++ *
++ * Returns 1 if the nonce is fresh, 0 otherwise.
++ */
++static int nonce_is_fresh(struct nonce_timestamp *non_ts, unsigned long interval)
++{
++ if (time_before(jiffies, non_ts->timestamp + interval * HZ) && !non_ts->invalid)
++ return 1;
++ return 0;
++}
++void mipv6_rr_invalidate_nonce(u16 nonce_ind)
++{
++ spin_lock_bh(&nonce_lock);
++ if (nonce_ind > MAX_NONCES) {
++ spin_unlock_bh(&nonce_lock);
++ return;
++ }
++ nonce_table[nonce_ind].invalid = 1;
++ spin_unlock_bh(&nonce_lock);
++}
++/* Returns a pointer to a new nonce */
++struct mipv6_rr_nonce * mipv6_rr_get_new_nonce(void)
++{
++ struct mipv6_rr_nonce *nce = kmalloc(sizeof(*nce), GFP_ATOMIC);
++
++ if (!nce)
++ return NULL;
++ // Lock nonces here
++ spin_lock_bh(&nonce_lock);
++ // If nonce is not fresh create new one
++ if (!nonce_is_fresh(&nonce_table[curr_index], MIPV6_RR_NONCE_LIFETIME)) {
++ // increment the last nonce pointer and create new nonce
++ curr_index++;
++ // Wrap around
++ if (curr_index == MAX_NONCES)
++ curr_index = 0;
++ // Get random data to fill the nonce data
++ get_random_bytes(nonce_table[curr_index].nonce.data, MIPV6_RR_NONCE_DATA_LENGTH);
++ // Fill the index field
++ nonce_table[curr_index].nonce.index = curr_index;
++ nonce_table[curr_index].invalid = 0;
++ nonce_table[curr_index].timestamp = jiffies;
++ }
++ spin_unlock_bh(&nonce_lock);
++ memcpy(nce, &nonce_table[curr_index].nonce, sizeof(*nce));
++ // Unlock nonces
++ return nce;
++}
++/** mipv6_rr_nonce_get_by_index - returns a nonce for index
++ * @nonce_ind : index of the nonce
++ *
++ * Returns a nonce or NULL if the nonce index was invalid or the nonce
++ * for the index was not fresh.
++ */
++struct mipv6_rr_nonce * mipv6_rr_nonce_get_by_index(u16 nonce_ind)
++{
++ struct mipv6_rr_nonce *nce = NULL;
++
++ spin_lock_bh(&nonce_lock);
++ if (nonce_ind >= MAX_NONCES) {
++ DEBUG(DBG_WARNING, "Nonce index field from BU invalid");
++
++ /* Here a double of the nonce_lifetime is used for freshness
++ * verification, since the nonces
++ * are not created in response to every initiator packet
++ */
++ } else if (nonce_is_fresh(&nonce_table[nonce_ind], 2 * MIPV6_RR_NONCE_LIFETIME)) {
++ nce = kmalloc(sizeof(*nce), GFP_ATOMIC);
++ memcpy(nce, &nonce_table[nonce_ind].nonce, sizeof(*nce));
++ }
++ spin_unlock_bh(&nonce_lock);
++
++ return nce;
++}
++
++/* Fills rr test init cookies with random bytes */
++void mipv6_rr_mn_cookie_create(u8 *cookie)
++{
++ get_random_bytes(cookie, MIPV6_RR_COOKIE_LENGTH);
++}
++
++/** mipv6_rr_cookie_create - builds a home or care-of cookie
++ *
++ * @addr : the home or care-of address from HoTI or CoTI
++ * @ckie : memory where the cookie is copied to
++ * @nce : pointer to a nonce used for the calculation, nce is freed during the function
++ *
++ */
++int mipv6_rr_cookie_create(struct in6_addr *addr, u8 **ckie,
++ u16 nonce_index)
++{
++ struct ah_processing ah_proc;
++ u8 digest[HMAC_SHA1_HASH_LEN];
++ struct mipv6_rr_nonce *nce;
++
++ if ((nce = mipv6_rr_nonce_get_by_index(nonce_index))== NULL)
++ return -1;
++
++ if (*ckie == NULL && (*ckie = kmalloc(MIPV6_RR_COOKIE_LENGTH,
++ GFP_ATOMIC)) == NULL) {
++ kfree(nce);
++ return -1;
++ }
++ /* Calculate the full hmac-sha1 digest from address and nonce using the secret key of cn */
++
++ if (ah_hmac_sha1_init(&ah_proc, k_CN, HMAC_SHA1_KEY_SIZE) < 0) {
++ DEBUG(DBG_ERROR, "Hmac sha1 initialization failed");
++ kfree(nce);
++ return -1;
++ }
++
++ ah_hmac_sha1_loop(&ah_proc, addr, sizeof(*addr));
++ ah_hmac_sha1_loop(&ah_proc, nce->data, MIPV6_RR_NONCE_DATA_LENGTH);
++ ah_hmac_sha1_result(&ah_proc, digest);
++
++
++ /* clean up nonce */
++ kfree(nce);
++
++ /* Copy first 64 bits of hash target to the cookie */
++ memcpy(*ckie, digest, MIPV6_RR_COOKIE_LENGTH);
++ return 0;
++}
++
++/** mipv6_rr_key_calc - creates BU authentication key
++ *
++ * @hoc : Home Cookie
++ * @coc : Care-of Cookie
++ *
++ * Returns BU authentication key of length HMAC_SHA1_KEY_SIZE or NULL in error cases,
++ * caller needs to free the key.
++ */
++u8 *mipv6_rr_key_calc(u8 *hoc, u8 *coc)
++{
++
++ u8 *key_bu = kmalloc(HMAC_SHA1_KEY_SIZE, GFP_ATOMIC);
++ SHA1_CTX c;
++
++ if (!key_bu) {
++ DEBUG(DBG_CRITICAL, "Memory allocation failed, could nort create BU authentication key");
++ return NULL;
++ }
++
++ /* Calculate the key from home and care-of cookies
++ * Kbu = sha1(home_cookie | care-of cookie)
++ * or KBu = sha1(home_cookie), if MN deregisters
++ */
++ sha1_init(&c);
++ sha1_compute(&c, hoc, MIPV6_RR_COOKIE_LENGTH);
++ if (coc)
++ sha1_compute(&c, coc, MIPV6_RR_COOKIE_LENGTH);
++ sha1_final(&c, key_bu);
++ DEBUG(DBG_RR, "Home and Care-of cookies used for calculating key ");
++ debug_print_buffer(DBG_RR, hoc, MIPV6_RR_COOKIE_LENGTH);
++ if (coc)
++ debug_print_buffer(DBG_RR, coc, MIPV6_RR_COOKIE_LENGTH);
++
++ return key_bu;
++}
++
++void mipv6_rr_init(void)
++{
++ get_random_bytes(k_CN, HMAC_SHA1_KEY_SIZE);
++ memset(nonce_table, 0, MAX_NONCES * sizeof(struct nonce_timestamp));
++}
++
++#ifdef TEST_MIPV6_RR_CRYPTO
++void mipv6_test_rr(void)
++{
++ struct mipv6_rr_nonce *nonce;
++ struct in6_addr a1, a2;
++ int ind1, ind2;
++ u8 *ckie1 = NULL, *ckie2 = NULL;
++ u8 *key_mn = NULL, *key_cn = NULL;
++ mipv6_init_rr();
++
++ nonce = mipv6_rr_get_new_nonce();
++ if (!nonce) {
++ printk("mipv6_rr_get_new_nonce() failed, at 1! \n");
++ return;
++ }
++ mipv6_rr_cookie_create(&a1, &ckie1, nonce->index);
++ ind1 = nonce->index;
++ kfree(nonce);
++
++ nonce = mipv6_rr_get_new_nonce();
++ if (!nonce) {
++ printk("mipv6_rr_get_new_nonce() failed, at 2! \n");
++ return;
++ }
++
++ mipv6_rr_cookie_create(&a2, &ckie2, nonce->index);
++ ind2 = nonce->index;
++ key_mn = mipv6_rr_key_calc(ckie1, ckie2);
++
++ /* Create home and coa cookies based on indices */
++ mipv6_rr_cookie_create(&a1, &ckie1, ind1);
++ mipv6_rr_cookie_create(&a2, &ckie2, ind2);
++ key_cn = mipv6_rr_key_calc(ckie1, ckie2);
++ if (!key_cn || !key_mn) {
++ printk("creation of secret key failed!\n");
++ return;
++ }
++ if(memcmp(key_cn, key_mn, HMAC_SHA1_KEY_SIZE))
++ printk("mipv6_rr_key_calc produced different keys for MN and CN \n");
++ else
++ printk("mipv6_rr_crypto test OK\n");
++ kfree(nonce);
++ kfree(key_cn);
++ kfree(key_mn);
++}
++#endif
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/rr_crypto.h linux-2.4.25/net/ipv6/mobile_ip6/rr_crypto.h
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/rr_crypto.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/rr_crypto.h 2004-06-26 11:29:31.000000000 +0100
+@@ -0,0 +1,72 @@
++/*
++ * MIPL Mobile IPv6 Return routability crypto prototypes
++ *
++ * $Id:$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#ifndef _RR_CRYPTO
++#define _RR_CRYPTO
++
++#include <linux/in6.h>
++
++/* Macros and data structures */
++
++#define MIPV6_RR_NONCE_LIFETIME 60
++#define MIPV6_RR_NONCE_DATA_LENGTH 8
++#define MIPV6_RR_COOKIE_LENGTH 8
++#define COOKIE_SIZE 8
++#define MAX_NONCES 4
++#define HMAC_SHA1_KEY_SIZE 20
++
++struct mipv6_rr_nonce {
++ u_int16_t index;
++ u_int8_t data[MIPV6_RR_NONCE_DATA_LENGTH];
++};
++
++struct nonce_timestamp {
++ struct mipv6_rr_nonce nonce;
++ unsigned long timestamp;
++ u_int8_t invalid;
++};
++
++/* Function definitions */
++
++/* Return 1 if equal, 0 if not */
++static __inline__ int mipv6_equal_cookies(u8 *c1, u8 *c2)
++{
++ return (memcmp(c1, c2, MIPV6_RR_COOKIE_LENGTH) == 0);
++}
++
++/* Function declarations */
++
++/* Create cookie for HoTi and CoTi */
++extern void mipv6_rr_mn_cookie_create(u8 *cookie);
++
++/* Create cookie for HoT and CoT */
++extern int mipv6_rr_cookie_create(struct in6_addr *addr, u8 **ckie, u16 nonce_index);
++
++/* Calculate return routability key from home and care-of cookies, key length is
++ * HMAC_SHA1_KEY_SIZE
++ */
++extern u_int8_t *mipv6_rr_key_calc(u8 *hoc, u8 *coc);
++
++extern struct mipv6_rr_nonce *mipv6_rr_get_new_nonce(void);
++
++/* For avoiding replay attacks when MN deregisters */
++extern void mipv6_rr_invalidate_nonce(u16 nonce_index);
++/*
++ * initializes the return routability crypto
++ */
++
++void mipv6_rr_init(void);
++
++#ifdef TEST_MIPV6_RR_CRYPTO
++void mipv6_test_rr(void);
++#endif /* TEST_MIPV6_RR_CRYPTO */
++
++#endif /* RR_CRYPTO */
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/sortedlist.c linux-2.4.25/net/ipv6/mobile_ip6/sortedlist.c
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/sortedlist.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/sortedlist.c 2004-06-26 11:29:31.000000000 +0100
+@@ -0,0 +1,349 @@
++/**
++ * Sorted list - linked list with sortkey.
++ *
++ * Authors:
++ * Jaakko Laine <medved@iki.fi>
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <linux/list.h>
++#include <linux/slab.h>
++#include <linux/spinlock.h>
++#include <linux/string.h>
++
++struct mipv6_sorted_list_entry {
++ struct list_head list;
++ void *data;
++ int datalen;
++ unsigned long sortkey;
++};
++
++/**
++ * compare - compares two arbitrary data items
++ * @data1: first data item
++ * @data2: second data item
++ * @datalen: length of data items in bits
++ *
++ * datalen is in bits!
++ */
++int mipv6_bitwise_compare(const void *data1, const void *data2, int datalen)
++{
++ int n = datalen;
++ __u8 * ptr1 = (__u8 *)data1;
++ __u8 * ptr2 = (__u8 *)data2;
++
++ for (; n>=0; n-=8, ptr1++, ptr2++) {
++ if (n >= 8) {
++ if (*ptr1 != *ptr2)
++ return 0;
++ } else {
++ if ((*ptr1 ^ *ptr2) & ((~0) << (8 - n)))
++ return 0;
++ }
++ }
++
++ return 1;
++}
++
++/**
++ * mipv6_slist_add - add an entry to sorted list
++ * @head: list_head of the sorted list
++ * @data: item to store
++ * @datalen: length of data (in bytes)
++ * @key: sortkey of item
++ *
++ * Allocates memory for entry and data
++ */
++int mipv6_slist_add(struct list_head *head, void *data, int datalen,
++ unsigned long sortkey)
++{
++ struct list_head *pos;
++ struct mipv6_sorted_list_entry *entry, *tmp, *next;
++
++ entry = kmalloc(sizeof(struct mipv6_sorted_list_entry), GFP_ATOMIC);
++
++ if (!entry)
++ return -1;
++
++ entry->data = kmalloc(datalen, GFP_ATOMIC);
++
++ if (!entry->data) {
++ kfree(entry);
++ return -1;
++ }
++
++ memcpy(entry->data, data, datalen);
++ entry->datalen = datalen;
++ entry->sortkey = sortkey;
++
++ if ((pos = head->next) == head) {
++ list_add(&entry->list, head);
++ return 0;
++ }
++
++ tmp = list_entry(pos, struct mipv6_sorted_list_entry, list);
++ if (entry->sortkey < tmp->sortkey) {
++ list_add(&entry->list, head);
++ return 0;
++ }
++
++ for (; pos != head; pos = pos->next) {
++ tmp = list_entry(pos, struct mipv6_sorted_list_entry, list);
++ if (pos->next == head) {
++ list_add(&entry->list, &tmp->list);
++ return 0;
++ }
++ next = list_entry(pos->next, struct mipv6_sorted_list_entry, list);
++ if (entry->sortkey >= tmp->sortkey && entry->sortkey < next->sortkey) {
++ list_add(&entry->list, &tmp->list);
++ return 0;
++ }
++ }
++
++ /* never reached */
++ return -1;
++}
++
++/**
++ * mipv6_slist_get_first - get the first data item in the list
++ * @head: list_head of the sorted list
++ *
++ * Returns the actual data item, not copy, so don't kfree it
++ */
++void *mipv6_slist_get_first(struct list_head *head)
++{
++ struct mipv6_sorted_list_entry *entry;
++
++ if (list_empty(head))
++ return NULL;
++
++ entry = list_entry(head->next, struct mipv6_sorted_list_entry, list);
++ return entry->data;
++}
++
++/**
++ * mipv6_slist_del_first - delete (and get) the first item in list
++ * @head: list_head of the sorted list
++ *
++ * Remember to kfree the item
++ */
++void *mipv6_slist_del_first(struct list_head *head)
++{
++ void *tmp;
++ struct mipv6_sorted_list_entry *entry;
++
++ if (list_empty(head))
++ return NULL;
++
++ entry = list_entry(head->next, struct mipv6_sorted_list_entry, list);
++ tmp = entry->data;
++
++ list_del(head->next);
++ kfree(entry);
++
++ return tmp;
++}
++
++/**
++ * mipv6_slist_del_item - delete entry
++ * @head: list_head of the sorted list
++ * @data: item to delete
++ * @compare: function used for comparing the data items
++ *
++ * compare function needs to have prototype
++ * int (*compare)(const void *data1, const void *data2, int datalen)
++ */
++int mipv6_slist_del_item(struct list_head *head, void *data,
++ int (*compare)(const void *data1, const void *data2,
++ int datalen))
++{
++ struct list_head *pos;
++ struct mipv6_sorted_list_entry *entry;
++
++ for(pos = head->next; pos != head; pos = pos->next) {
++ entry = list_entry(pos, struct mipv6_sorted_list_entry, list);
++ if (compare(data, entry->data, entry->datalen)) {
++ list_del(pos);
++ kfree(entry->data);
++ kfree(entry);
++ return 0;
++ }
++ }
++
++ return -1;
++}
++
++/**
++ * mipv6_slist_get_first_key - get sortkey of the first item
++ * @head: list_head of the sorted list
++ */
++unsigned long mipv6_slist_get_first_key(struct list_head *head)
++{
++ struct mipv6_sorted_list_entry *entry;
++
++ if (list_empty(head))
++ return 0;
++
++ entry = list_entry(head->next, struct mipv6_sorted_list_entry, list);
++ return entry->sortkey;
++}
++
++/**
++ * mipv6_slist_get_key - get sortkey of the data item
++ * @head: list_head of the sorted list
++ * @data: the item to search for
++ * @compare: function used for comparing the data items
++ *
++ * compare function needs to have prototype
++ * int (*compare)(const void *data1, const void *data2, int datalen)
++ */
++unsigned long mipv6_slist_get_key(struct list_head *head, void *data,
++ int (*compare)(const void *data1,
++ const void *data2,
++ int datalen))
++{
++ struct list_head *pos;
++ struct mipv6_sorted_list_entry *entry;
++
++ for(pos = head->next; pos != head; pos = pos->next) {
++ entry = list_entry(pos, struct mipv6_sorted_list_entry, list);
++ if (compare(data, entry->data, entry->datalen))
++ return entry->sortkey;
++ }
++
++ return 0;
++}
++
++/**
++ * mipv6_slist_get_data - get the data item identified by sortkey
++ * @head: list_head of the sorted list
++ * @key: sortkey of the item
++ *
++ * Returns the actual data item, not copy, so don't kfree it
++ */
++void *mipv6_slist_get_data(struct list_head *head, unsigned long sortkey)
++{
++ struct list_head *pos;
++ struct mipv6_sorted_list_entry *entry;
++
++ list_for_each(pos, head) {
++ entry = list_entry(pos, struct mipv6_sorted_list_entry, list);
++ if (entry->sortkey == sortkey)
++ return entry->data;
++ }
++
++ return NULL;
++}
++
++/**
++ * reorder_entry - move an entry to a new position according to sortkey
++ * @head: list_head of the sorted list
++ * @entry_pos: current place of the entry
++ * @key: new sortkey
++ */
++static void reorder_entry(struct list_head *head, struct list_head *entry_pos,
++ unsigned long sortkey)
++{
++ struct list_head *pos;
++ struct mipv6_sorted_list_entry *entry;
++
++ list_del(entry_pos);
++
++ for (pos = head->next; pos != head; pos = pos->next) {
++ entry = list_entry(pos, struct mipv6_sorted_list_entry, list);
++ if (sortkey >= entry->sortkey) {
++ list_add(entry_pos, &entry->list);
++ return;
++ }
++ }
++
++ list_add(entry_pos, head);
++}
++
++/**
++ * mipv6_slist_modify - modify data item
++ * @head: list_head of the sorted list
++ * @data: item, whose sortkey is to be modified
++ * @datalen: datalen in bytes
++ * @new_key: new sortkey
++ * @compare: function used for comparing the data items
++ *
++ * Compies the new data on top of the old one, if compare function returns
++ * true. If there's no matching entry, new one will be created.
++ * Compare function needs to have prototype
++ * int (*compare)(const void *data1, const void *data2, int datalen)
++ */
++int mipv6_slist_modify(struct list_head *head, void *data, int datalen,
++ unsigned long new_key,
++ int (*compare)(const void *data1, const void *data2,
++ int datalen))
++{
++ struct list_head *pos;
++ struct mipv6_sorted_list_entry *entry;
++
++ for (pos = head->next; pos != head; pos = pos->next) {
++ entry = list_entry(pos, struct mipv6_sorted_list_entry, list);
++ if (compare(data, entry->data, datalen)) {
++ memcpy(entry->data, data, datalen);
++ entry->sortkey = new_key;
++ reorder_entry(head, &entry->list, new_key);
++ return 0;
++ }
++ }
++
++ return mipv6_slist_add(head, data, datalen, new_key);
++}
++
++/**
++ * mipv6_slist_push_first - move the first entry to place indicated by new_key
++ * @head: list_head of the sorted list
++ * @new_key: new sortkey
++ */
++int mipv6_slist_push_first(struct list_head *head, unsigned long new_key)
++{
++ struct mipv6_sorted_list_entry *entry;
++
++ if (list_empty(head))
++ return -1;
++
++ entry = list_entry(head->next, struct mipv6_sorted_list_entry, list);
++ entry->sortkey = new_key;
++
++ reorder_entry(head, head->next, new_key);
++ return 0;
++}
++
++/**
++ * mipv6_slist_for_each - apply func to every item in list
++ * @head: list_head of the sorted list
++ * @args: args to pass to func
++ * @func: function to use
++ *
++ * function must be of type
++ * int (*func)(void *data, void *args, unsigned long sortkey)
++ * List iteration will stop once func has been applied to every item
++ * or when func returns true
++ */
++int mipv6_slist_for_each(struct list_head *head, void *args,
++ int (*func)(void *data, void *args,
++ unsigned long sortkey))
++{
++ struct list_head *pos;
++ struct mipv6_sorted_list_entry *entry;
++
++ list_for_each(pos, head) {
++ entry = list_entry(pos, struct mipv6_sorted_list_entry, list);
++ if (func(entry->data, args, entry->sortkey))
++ break;
++ }
++
++ return 0;
++}
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/sortedlist.h linux-2.4.25/net/ipv6/mobile_ip6/sortedlist.h
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/sortedlist.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/sortedlist.h 2004-06-26 11:29:31.000000000 +0100
+@@ -0,0 +1,133 @@
++/*
++ * Sorted list - linked list with sortkey
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++/**
++ * compare - compares two arbitrary data items
++ * @data1: first data item
++ * @data2: second data item
++ * @datalen: length of data items in bits
++ *
++ * datalen is in bits!
++ */
++int mipv6_bitwise_compare(const void *data1, const void *data2, int datalen);
++
++/**
++ * mipv6_slist_add - add an entry to sorted list
++ * @head: list_head of the sorted list
++ * @data: item to store
++ * @datalen: length of data (in bytes)
++ * @key: sortkey of item
++ *
++ * Allocates memory for entry and data
++ */
++int mipv6_slist_add(struct list_head *head, void *data, int datalen,
++ unsigned long sortkey);
++
++/**
++ * mipv6_slist_get_first - get the first data item in the list
++ * @head: list_head of the sorted list
++ *
++ * Returns the actual data item, not copy, so don't kfree it
++ */
++void *mipv6_slist_get_first(struct list_head *head);
++
++/**
++ * mipv6_slist_del_first - delete (and get) the first item in list
++ * @head: list_head of the sorted list
++ *
++ * Remember to kfree the item
++ */
++void *mipv6_slist_del_first(struct list_head *head);
++
++/**
++ * mipv6_slist_del_item - delete entry
++ * @head: list_head of the sorted list
++ * @data: item to delete
++ * @compare: function used for comparing the data items
++ *
++ * compare function needs to have prototype
++ * int (*compare)(const void *data1, const void *data2, int datalen) where
++ * datalen is in bits
++ */
++int mipv6_slist_del_item(struct list_head *head, void *data,
++ int (*compare)(const void *data1, const void *data2,
++ int datalen));
++
++/**
++ * mipv6_slist_get_first_key - get sortkey of the first item
++ * @head: list_head of the sorted list
++ */
++unsigned long mipv6_slist_get_first_key(struct list_head *head);
++
++/**
++ * mipv6_slist_get_key - get sortkey of the data item
++ * @head: list_head of the sorted list
++ * @data: the item to search for
++ * @compare: function used for comparing the data items
++ *
++ * compare function needs to have prototype
++ * int (*compare)(const void *data1, const void *data2, int datalen) where
++ * datalen is in bits
++ */
++unsigned long mipv6_slist_get_key(struct list_head *head, void *data,
++ int (*compare)(const void *data1,
++ const void *data2,
++ int datalen));
++
++/**
++ * mipv6_slist_get_data - get the data item identified by sortkey
++ * @head: list_head of the sorted list
++ * @key: sortkey of the item
++ *
++ * Returns the actual data item, not copy, so don't kfree it
++ */
++void *mipv6_slist_get_data(struct list_head *head, unsigned long sortkey);
++
++/**
++ * mipv6_slist_modify - modify data item
++ * @head: list_head of the sorted list
++ * @data: item, whose sortkey is to be modified
++ * @datalen: datalen in bytes
++ * @new_key: new sortkey
++ * @compare: function used for comparing the data items
++ *
++ * Compies the new data on top of the old one, if compare function returns
++ * non-negative. If there's no matching entry, new one will be created.
++ * Compare function needs to have prototype
++ * int (*compare)(const void *data1, const void *data2, int datalen) where
++ * datalen is in bits.
++ */
++int mipv6_slist_modify(struct list_head *head, void *data, int datalen,
++ unsigned long new_key,
++ int (*compare)(const void *data1, const void *data2,
++ int datalen));
++
++/**
++ * mipv6_slist_push_first - move the first entry to place indicated by new_key
++ * @head: list_head of the sorted list
++ * @new_key: new sortkey
++ */
++int mipv6_slist_push_first(struct list_head *head, unsigned long new_key);
++
++/**
++ * mipv6_slist_for_each - apply func to every item in list
++ * @head: list_head of the sorted list
++ * @args: args to pass to func
++ * @func: function to use
++ *
++ * function must be of type
++ * int (*func)(void *data, void *args, unsigned long sortkey)
++ * List iteration will stop once func has been applied to every item
++ * or when func returns true
++ */
++int mipv6_slist_for_each(struct list_head *head, void *args,
++ int (*func)(void *data, void *args,
++ unsigned long sortkey));
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/stats.c linux-2.4.25/net/ipv6/mobile_ip6/stats.c
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/stats.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/stats.c 2004-06-26 11:29:32.000000000 +0100
+@@ -0,0 +1,90 @@
++/*
++ * Statistics module
++ *
++ * Authors:
++ * Sami Kivisaari <skivisaa@cc.hut.fi>
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ *
++ * Changes:
++ * Krishna Kumar,
++ * Venkata Jagana : SMP locking fix
++ */
++
++#include <linux/config.h>
++#include <linux/proc_fs.h>
++#include "stats.h"
++
++struct mipv6_statistics mipv6_stats;
++
++static int proc_info_dump(
++ char *buffer, char **start,
++ off_t offset, int length)
++{
++ struct inf {
++ char *name;
++ int *value;
++ } int_stats[] = {
++ {"NEncapsulations", &mipv6_stats.n_encapsulations},
++ {"NDecapsulations", &mipv6_stats.n_decapsulations},
++ {"NBindRefreshRqsRcvd", &mipv6_stats.n_brr_rcvd},
++ {"NHomeTestInitsRcvd", &mipv6_stats.n_hoti_rcvd},
++ {"NCareofTestInitsRcvd", &mipv6_stats.n_coti_rcvd},
++ {"NHomeTestRcvd", &mipv6_stats.n_hot_rcvd},
++ {"NCareofTestRcvd", &mipv6_stats.n_cot_rcvd},
++ {"NBindUpdatesRcvd", &mipv6_stats.n_bu_rcvd},
++ {"NBindAcksRcvd", &mipv6_stats.n_ba_rcvd},
++ {"NBindNAcksRcvd", &mipv6_stats.n_ban_rcvd},
++ {"NBindErrorsRcvd", &mipv6_stats.n_be_rcvd},
++ {"NBindRefreshRqsSent", &mipv6_stats.n_brr_sent},
++ {"NHomeTestInitsSent", &mipv6_stats.n_hoti_sent},
++ {"NCareofTestInitsSent", &mipv6_stats.n_coti_sent},
++ {"NHomeTestSent", &mipv6_stats.n_hot_sent},
++ {"NCareofTestSent", &mipv6_stats.n_cot_sent},
++ {"NBindUpdatesSent", &mipv6_stats.n_bu_sent},
++ {"NBindAcksSent", &mipv6_stats.n_ba_sent},
++ {"NBindNAcksSent", &mipv6_stats.n_ban_sent},
++ {"NBindErrorsSent", &mipv6_stats.n_be_sent},
++ {"NBindUpdatesDropAuth", &mipv6_stats.n_bu_drop.auth},
++ {"NBindUpdatesDropInvalid", &mipv6_stats.n_bu_drop.invalid},
++ {"NBindUpdatesDropMisc", &mipv6_stats.n_bu_drop.misc},
++ {"NBindAcksDropAuth", &mipv6_stats.n_bu_drop.auth},
++ {"NBindAcksDropInvalid", &mipv6_stats.n_bu_drop.invalid},
++ {"NBindAcksDropMisc", &mipv6_stats.n_bu_drop.misc},
++ {"NBindRqsDropAuth", &mipv6_stats.n_bu_drop.auth},
++ {"NBindRqsDropInvalid", &mipv6_stats.n_bu_drop.invalid},
++ {"NBindRqsDropMisc", &mipv6_stats.n_bu_drop.misc}
++ };
++
++ int i;
++ int len = 0;
++ for(i=0; i<sizeof(int_stats) / sizeof(struct inf); i++) {
++ len += sprintf(buffer + len, "%s = %d\n",
++ int_stats[i].name, *int_stats[i].value);
++ }
++
++ *start = buffer + offset;
++
++ len -= offset;
++
++ if(len > length) len = length;
++
++ return len;
++}
++
++int mipv6_stats_init(void)
++{
++ memset(&mipv6_stats, 0, sizeof(struct mipv6_statistics));
++ proc_net_create("mip6_stat", 0, proc_info_dump);
++ return 0;
++}
++
++void mipv6_stats_exit(void)
++{
++ proc_net_remove("mip6_stat");
++}
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/stats.h linux-2.4.25/net/ipv6/mobile_ip6/stats.h
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/stats.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/stats.h 2004-06-26 11:29:32.000000000 +0100
+@@ -0,0 +1,71 @@
++/*
++ * MIPL Mobile IPv6 Statistics header file
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#ifndef _STATS_H
++#define _STATS_H
++
++struct mipv6_drop {
++ __u32 auth;
++ __u32 invalid;
++ __u32 misc;
++};
++
++struct mipv6_statistics {
++ int n_encapsulations;
++ int n_decapsulations;
++ int n_mh_in_msg;
++ int n_mh_in_error;
++ int n_mh_out_msg;
++ int n_mh_out_error;
++
++ int n_brr_rcvd;
++ int n_hoti_rcvd;
++ int n_coti_rcvd;
++ int n_hot_rcvd;
++ int n_cot_rcvd;
++ int n_bu_rcvd;
++ int n_ba_rcvd;
++ int n_ban_rcvd;
++ int n_be_rcvd;
++
++ int n_brr_sent;
++ int n_hoti_sent;
++ int n_coti_sent;
++ int n_hot_sent;
++ int n_cot_sent;
++ int n_bu_sent;
++ int n_ba_sent;
++ int n_ban_sent;
++ int n_be_sent;
++
++ int n_ha_rcvd;
++ int n_ha_sent;
++
++ struct mipv6_drop n_bu_drop;
++ struct mipv6_drop n_ba_drop;
++ struct mipv6_drop n_brr_drop;
++ struct mipv6_drop n_be_drop;
++ struct mipv6_drop n_ha_drop;
++};
++
++extern struct mipv6_statistics mipv6_stats;
++
++#ifdef CONFIG_SMP
++/* atomic_t is max 24 bits long */
++#define MIPV6_INC_STATS(X) atomic_inc((atomic_t *)&mipv6_stats.X);
++#else
++#define MIPV6_INC_STATS(X) mipv6_stats.X++;
++#endif
++
++int mipv6_stats_init(void);
++void mipv6_stats_exit(void);
++
++#endif
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/tunnel.h linux-2.4.25/net/ipv6/mobile_ip6/tunnel.h
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/tunnel.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/tunnel.h 2004-06-26 11:29:32.000000000 +0100
+@@ -0,0 +1,35 @@
++/*
++ * MIPL Mobile IPv6 IP6-IP6 tunneling header file
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#ifndef _TUNNEL_H
++#define _TUNNEL_H
++
++#include <linux/in6.h>
++#include <linux/if_arp.h>
++#include <net/ipv6_tunnel.h>
++
++static __inline__ int is_mip6_tnl(struct ip6_tnl *t)
++{
++ return (t != NULL &&
++ t->parms.flags & IP6_TNL_F_KERNEL_DEV &&
++ t->parms.flags & IP6_TNL_F_MIP6_DEV);
++
++}
++
++static __inline__ int dev_is_mip6_tnl(struct net_device *dev)
++{
++ struct ip6_tnl *t = (struct ip6_tnl *)dev->priv;
++ return (dev->type == ARPHRD_TUNNEL6 && is_mip6_tnl(t));
++}
++
++
++#endif
++
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/tunnel_ha.c linux-2.4.25/net/ipv6/mobile_ip6/tunnel_ha.c
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/tunnel_ha.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/tunnel_ha.c 2004-06-26 11:29:32.000000000 +0100
+@@ -0,0 +1,264 @@
++/*
++ * IPv6-IPv6 tunneling module
++ *
++ * Authors:
++ * Sami Kivisaari <skivisaa@cc.hut.fi>
++ * Ville Nuorvala <vnuorval@tml.hut.fi>
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ *
++ */
++
++#include <linux/net.h>
++#include <linux/skbuff.h>
++#include <linux/ipv6.h>
++#include <linux/net.h>
++#include <linux/netdevice.h>
++#include <linux/init.h>
++#include <linux/route.h>
++#include <linux/ipv6_route.h>
++
++#ifdef CONFIG_SYSCTL
++#include <linux/sysctl.h>
++#endif /* CONFIG_SYSCTL */
++
++#include <net/protocol.h>
++#include <net/ipv6.h>
++#include <net/ip6_route.h>
++#include <net/dst.h>
++#include <net/addrconf.h>
++
++#include "tunnel.h"
++#include "debug.h"
++#include "stats.h"
++#include "config.h"
++
++#define MIPV6_TNL_MAX IP6_TNL_MAX
++#define MIPV6_TNL_MIN 1
++
++int mipv6_max_tnls = 3;
++int mipv6_min_tnls = 1;
++
++DECLARE_MUTEX(tnl_sem);
++
++int mipv6_max_tnls_sysctl(ctl_table *ctl, int write, struct file *filp,
++ void *buffer, size_t *lenp)
++{
++ int err;
++
++ DEBUG_FUNC();
++
++ down(&tnl_sem);
++ if (write) {
++ int diff;
++ int old_max_tnls = mipv6_max_tnls;
++ err = proc_dointvec(ctl, write, filp, buffer, lenp);
++ if (err < 0)
++ goto out;
++ if (mipv6_max_tnls < mipv6_min_tnls ||
++ mipv6_max_tnls > MIPV6_TNL_MAX) {
++ mipv6_max_tnls = old_max_tnls;
++ goto out;
++ }
++ if (mipv6_max_tnls < old_max_tnls) {
++ diff = old_max_tnls - mipv6_max_tnls;
++ ip6ip6_tnl_dec_max_kdev_count(diff);
++ } else if (mipv6_max_tnls > old_max_tnls) {
++ diff = mipv6_max_tnls - old_max_tnls;
++ ip6ip6_tnl_inc_max_kdev_count(diff);
++ }
++ } else {
++ err = proc_dointvec(ctl, write, filp, buffer, lenp);
++ }
++out:
++ up(&tnl_sem);
++ return err;
++}
++
++int mipv6_min_tnls_sysctl(ctl_table *ctl, int write, struct file *filp,
++ void *buffer, size_t *lenp)
++{
++ int err;
++
++ DEBUG_FUNC();
++
++ down(&tnl_sem);
++ if (write) {
++ int diff;
++ int old_min_tnls = mipv6_min_tnls;
++ err = proc_dointvec(ctl, write, filp, buffer, lenp);
++ if (err < 0)
++ goto out;
++ if (mipv6_min_tnls > mipv6_max_tnls ||
++ mipv6_min_tnls < MIPV6_TNL_MIN) {
++ mipv6_min_tnls = old_min_tnls;
++ goto out;
++ }
++ if (mipv6_min_tnls < old_min_tnls) {
++ diff = old_min_tnls - mipv6_min_tnls;
++ ip6ip6_tnl_dec_min_kdev_count(diff);
++ } else if (mipv6_min_tnls > old_min_tnls) {
++ diff = mipv6_min_tnls - old_min_tnls;
++ ip6ip6_tnl_inc_min_kdev_count(diff);
++ }
++ } else {
++ err = proc_dointvec(ctl, write, filp, buffer, lenp);
++ }
++out:
++ up(&tnl_sem);
++ return err;
++}
++
++static __inline__ int mipv6_tnl_add(struct in6_addr *remote,
++ struct in6_addr *local)
++{
++ struct ip6_tnl_parm p;
++ int ret;
++
++ DEBUG_FUNC();
++
++ memset(&p, 0, sizeof(p));
++ p.proto = IPPROTO_IPV6;
++ ipv6_addr_copy(&p.laddr, local);
++ ipv6_addr_copy(&p.raddr, remote);
++ p.hop_limit = 255;
++ p.flags = (IP6_TNL_F_KERNEL_DEV | IP6_TNL_F_MIP6_DEV |
++ IP6_TNL_F_IGN_ENCAP_LIMIT);
++
++ ret = ip6ip6_kernel_tnl_add(&p);
++ if (ret > 0) {
++ DEBUG(DBG_INFO, "added tunnel from: "
++ "%x:%x:%x:%x:%x:%x:%x:%x to: %x:%x:%x:%x:%x:%x:%x:%x",
++ NIPV6ADDR(local), NIPV6ADDR(remote));
++ } else {
++ DEBUG(DBG_WARNING, "unable to add tunnel from: "
++ "%x:%x:%x:%x:%x:%x:%x:%x to: %x:%x:%x:%x:%x:%x:%x:%x",
++ NIPV6ADDR(local), NIPV6ADDR(remote));
++ }
++ return ret;
++}
++
++static __inline__ int mipv6_tnl_del(struct in6_addr *remote,
++ struct in6_addr *local)
++{
++ struct ip6_tnl *t = ip6ip6_tnl_lookup(remote, local);
++
++ DEBUG_FUNC();
++
++ if (t != NULL && (t->parms.flags & IP6_TNL_F_MIP6_DEV)) {
++ DEBUG(DBG_INFO, "deleting tunnel from: "
++ "%x:%x:%x:%x:%x:%x:%x:%x to: %x:%x:%x:%x:%x:%x:%x:%x",
++ NIPV6ADDR(local), NIPV6ADDR(remote));
++
++ return ip6ip6_kernel_tnl_del(t);
++ }
++ return 0;
++}
++
++static int add_route_to_mn(struct in6_addr *coa, struct in6_addr *ha_addr,
++ struct in6_addr *home_addr)
++{
++ struct in6_rtmsg rtmsg;
++ int err;
++ struct ip6_tnl *t = ip6ip6_tnl_lookup(coa, ha_addr);
++
++ if (!is_mip6_tnl(t)) {
++ DEBUG(DBG_CRITICAL,"Tunnel missing");
++ return -ENODEV;
++ }
++
++ DEBUG(DBG_INFO, "adding route to: %x:%x:%x:%x:%x:%x:%x:%x via "
++ "tunnel device", NIPV6ADDR(home_addr));
++
++ memset(&rtmsg, 0, sizeof(rtmsg));
++ ipv6_addr_copy(&rtmsg.rtmsg_dst, home_addr);
++ rtmsg.rtmsg_dst_len = 128;
++ rtmsg.rtmsg_type = RTMSG_NEWROUTE;
++ rtmsg.rtmsg_flags = RTF_UP | RTF_NONEXTHOP | RTF_HOST | RTF_MOBILENODE;
++ rtmsg.rtmsg_ifindex = t->dev->ifindex;
++ rtmsg.rtmsg_metric = IP6_RT_PRIO_MIPV6;
++ if ((err = ip6_route_add(&rtmsg, NULL)) == -EEXIST) {
++ err = 0;
++ }
++ return err;
++}
++
++static void del_route_to_mn(struct in6_addr *coa, struct in6_addr *ha_addr,
++ struct in6_addr *home_addr)
++{
++ struct ip6_tnl *t = ip6ip6_tnl_lookup(coa, ha_addr);
++
++ DEBUG_FUNC();
++
++ if (is_mip6_tnl(t)) {
++ struct in6_rtmsg rtmsg;
++
++ DEBUG(DBG_INFO, "deleting route to: %x:%x:%x:%x:%x:%x:%x:%x "
++ " via tunnel device", NIPV6ADDR(home_addr));
++
++ memset(&rtmsg, 0, sizeof(rtmsg));
++ ipv6_addr_copy(&rtmsg.rtmsg_dst, home_addr);
++ rtmsg.rtmsg_dst_len = 128;
++ rtmsg.rtmsg_ifindex = t->dev->ifindex;
++ rtmsg.rtmsg_metric = IP6_RT_PRIO_MIPV6;
++ ip6_route_del(&rtmsg, NULL);
++ }
++}
++
++
++int mipv6_add_tnl_to_mn(struct in6_addr *coa,
++ struct in6_addr *ha_addr,
++ struct in6_addr *home_addr)
++{
++ int ret;
++
++ DEBUG_FUNC();
++
++ ret = mipv6_tnl_add(coa, ha_addr);
++
++ if (ret > 0) {
++ int err = add_route_to_mn(coa, ha_addr, home_addr);
++ if (err) {
++ if (err != -ENODEV) {
++ mipv6_tnl_del(coa, ha_addr);
++ }
++ return err;
++ }
++ }
++ return ret;
++}
++
++int mipv6_del_tnl_to_mn(struct in6_addr *coa,
++ struct in6_addr *ha_addr,
++ struct in6_addr *home_addr)
++{
++ DEBUG_FUNC();
++ del_route_to_mn(coa, ha_addr, home_addr);
++ return mipv6_tnl_del(coa, ha_addr);
++}
++
++__init void mipv6_initialize_tunnel(void)
++{
++ down(&tnl_sem);
++ ip6ip6_tnl_inc_max_kdev_count(mipv6_max_tnls);
++ ip6ip6_tnl_inc_min_kdev_count(mipv6_min_tnls);
++ up(&tnl_sem);
++ mip6_fn.bce_tnl_rt_add = add_route_to_mn;
++ mip6_fn.bce_tnl_rt_del = del_route_to_mn;
++}
++
++__exit void mipv6_shutdown_tunnel(void)
++{
++ mip6_fn.bce_tnl_rt_del = NULL;
++ mip6_fn.bce_tnl_rt_add = NULL;
++ down(&tnl_sem);
++ ip6ip6_tnl_dec_min_kdev_count(mipv6_min_tnls);
++ ip6ip6_tnl_dec_max_kdev_count(mipv6_max_tnls);
++ up(&tnl_sem);
++}
++
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/tunnel_ha.h linux-2.4.25/net/ipv6/mobile_ip6/tunnel_ha.h
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/tunnel_ha.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/tunnel_ha.h 2004-06-26 11:29:32.000000000 +0100
+@@ -0,0 +1,20 @@
++#ifndef _TUNNEL_HA_H
++#define _TUNNEL_HA_H
++
++#include "tunnel.h"
++
++extern int mipv6_max_tnls;
++extern int mipv6_min_tnls;
++
++extern void mipv6_initialize_tunnel(void);
++extern void mipv6_shutdown_tunnel(void);
++
++extern int mipv6_add_tnl_to_mn(struct in6_addr *coa,
++ struct in6_addr *ha_addr,
++ struct in6_addr *home_addr);
++
++extern int mipv6_del_tnl_to_mn(struct in6_addr *coa,
++ struct in6_addr *ha_addr,
++ struct in6_addr *home_addr);
++
++#endif
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/tunnel_mn.c linux-2.4.25/net/ipv6/mobile_ip6/tunnel_mn.c
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/tunnel_mn.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/tunnel_mn.c 2004-06-26 11:29:32.000000000 +0100
+@@ -0,0 +1,160 @@
++/*
++ * IPv6-IPv6 tunneling module
++ *
++ * Authors:
++ * Sami Kivisaari <skivisaa@cc.hut.fi>
++ * Ville Nuorvala <vnuorval@tml.hut.fi>
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ *
++ */
++
++#include <linux/net.h>
++#include <linux/skbuff.h>
++#include <linux/ipv6.h>
++#include <linux/net.h>
++#include <linux/netdevice.h>
++#include <linux/init.h>
++#include <linux/route.h>
++#include <linux/ipv6_route.h>
++
++#ifdef CONFIG_SYSCTL
++#include <linux/sysctl.h>
++#endif /* CONFIG_SYSCTL */
++
++#include <net/protocol.h>
++#include <net/ipv6.h>
++#include <net/ip6_route.h>
++#include <net/dst.h>
++#include <net/addrconf.h>
++
++#include "tunnel.h"
++#include "debug.h"
++#include "stats.h"
++
++static struct net_device *mn_ha_tdev;
++
++static spinlock_t mn_ha_lock = SPIN_LOCK_UNLOCKED;
++
++static __inline__ int add_reverse_route(struct in6_addr *ha_addr,
++ struct in6_addr *home_addr,
++ struct net_device *tdev)
++{
++ struct in6_rtmsg rtmsg;
++ int err;
++
++ DEBUG_FUNC();
++
++ memset(&rtmsg, 0, sizeof(rtmsg));
++ rtmsg.rtmsg_type = RTMSG_NEWROUTE;
++ ipv6_addr_copy(&rtmsg.rtmsg_src, home_addr);
++ rtmsg.rtmsg_src_len = 128;
++ rtmsg.rtmsg_flags = RTF_UP | RTF_DEFAULT;
++ rtmsg.rtmsg_ifindex = tdev->ifindex;
++ rtmsg.rtmsg_metric = IP6_RT_PRIO_MIPV6;
++ if ((err = ip6_route_add(&rtmsg, NULL)) == -EEXIST) {
++ return 0;
++ }
++ return err;
++}
++
++static __inline__ void del_reverse_route(struct in6_addr *ha_addr,
++ struct in6_addr *home_addr,
++ struct net_device *tdev)
++{
++ struct in6_rtmsg rtmsg;
++
++ DEBUG(DBG_INFO, "removing reverse route via tunnel device");
++
++ memset(&rtmsg, 0, sizeof(rtmsg));
++ ipv6_addr_copy(&rtmsg.rtmsg_src, home_addr);
++ rtmsg.rtmsg_src_len = 128;
++ rtmsg.rtmsg_ifindex = tdev->ifindex;
++ rtmsg.rtmsg_metric = IP6_RT_PRIO_MIPV6;
++ ip6_route_del(&rtmsg, NULL);
++}
++
++int mipv6_add_tnl_to_ha(void)
++{
++ struct ip6_tnl_parm p;
++ struct ip6_tnl *t;
++ int err;
++
++ DEBUG_FUNC();
++
++ memset(&p, 0, sizeof(p));
++ p.proto = IPPROTO_IPV6;
++ p.hop_limit = 255;
++ p.flags = (IP6_TNL_F_KERNEL_DEV | IP6_TNL_F_MIP6_DEV |
++ IP6_TNL_F_IGN_ENCAP_LIMIT);
++ strcpy(p.name, "mip6mnha1");
++
++ rtnl_lock();
++ if ((err = ip6ip6_tnl_create(&p, &t))) {
++ rtnl_unlock();
++ return err;
++ }
++ spin_lock_bh(&mn_ha_lock);
++
++ if (!mn_ha_tdev) {
++ mn_ha_tdev = t->dev;
++ dev_hold(mn_ha_tdev);
++ }
++ spin_unlock_bh(&mn_ha_lock);
++ dev_open(t->dev);
++ rtnl_unlock();
++ return 0;
++}
++
++int mipv6_mv_tnl_to_ha(struct in6_addr *ha_addr,
++ struct in6_addr *coa,
++ struct in6_addr *home_addr)
++{
++ int err = -ENODEV;
++
++ DEBUG_FUNC();
++
++ spin_lock_bh(&mn_ha_lock);
++ if (mn_ha_tdev) {
++ struct ip6_tnl_parm p;
++ memset(&p, 0, sizeof(p));
++ p.proto = IPPROTO_IPV6;
++ ipv6_addr_copy(&p.laddr, coa);
++ ipv6_addr_copy(&p.raddr, ha_addr);
++ p.hop_limit = 255;
++ p.flags = (IP6_TNL_F_KERNEL_DEV | IP6_TNL_F_MIP6_DEV |
++ IP6_TNL_F_IGN_ENCAP_LIMIT);
++
++ ip6ip6_tnl_change((struct ip6_tnl *) mn_ha_tdev->priv, &p);
++ if (ipv6_addr_cmp(coa, home_addr)) {
++ err = add_reverse_route(ha_addr, home_addr,
++ mn_ha_tdev);
++ } else {
++ del_reverse_route(ha_addr, home_addr, mn_ha_tdev);
++ err = 0;
++ }
++ }
++ spin_unlock_bh(&mn_ha_lock);
++ return err;
++}
++
++void mipv6_del_tnl_to_ha(void)
++{
++ struct net_device *dev;
++
++ DEBUG_FUNC();
++
++ rtnl_lock();
++ spin_lock_bh(&mn_ha_lock);
++ dev = mn_ha_tdev;
++ mn_ha_tdev = NULL;
++ spin_unlock_bh(&mn_ha_lock);
++ dev_put(dev);
++ unregister_netdevice(dev);
++ rtnl_unlock();
++}
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/tunnel_mn.h linux-2.4.25/net/ipv6/mobile_ip6/tunnel_mn.h
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/tunnel_mn.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/tunnel_mn.h 2004-06-26 11:29:32.000000000 +0100
+@@ -0,0 +1,14 @@
++#ifndef _TUNNEL_MN_H
++#define _TUNNEL_MN_H
++
++#include "tunnel.h"
++
++extern int mipv6_add_tnl_to_ha(void);
++
++extern int mipv6_mv_tnl_to_ha(struct in6_addr *ha_addr,
++ struct in6_addr *coa,
++ struct in6_addr *home_addr);
++
++extern int mipv6_del_tnl_to_ha(void);
++
++#endif
+diff -uprN linux-2.4.25.old/net/ipv6/mobile_ip6/util.h linux-2.4.25/net/ipv6/mobile_ip6/util.h
+--- linux-2.4.25.old/net/ipv6/mobile_ip6/util.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.4.25/net/ipv6/mobile_ip6/util.h 2004-06-26 11:29:32.000000000 +0100
+@@ -0,0 +1,91 @@
++/*
++ * MIPL Mobile IPv6 Utility functions
++ *
++ * $Id$
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#ifndef _UTIL_H
++#define _UTIL_H
++
++#include <linux/in6.h>
++#include <asm/byteorder.h>
++
++/**
++ * mipv6_prefix_compare - Compare two IPv6 prefixes
++ * @addr: IPv6 address
++ * @prefix: IPv6 address
++ * @nprefix: number of bits to compare
++ *
++ * Perform prefix comparison bitwise for the @nprefix first bits
++ * Returns 1, if the prefixes are the same, 0 otherwise
++ **/
++static inline int mipv6_prefix_compare(const struct in6_addr *addr,
++ const struct in6_addr *prefix,
++ const unsigned int pfix_len)
++{
++ int i;
++ unsigned int nprefix = pfix_len;
++
++ if (nprefix > 128)
++ return 0;
++
++ for (i = 0; nprefix > 0; nprefix -= 32, i++) {
++ if (nprefix >= 32) {
++ if (addr->s6_addr32[i] != prefix->s6_addr32[i])
++ return 0;
++ } else {
++ if (((addr->s6_addr32[i] ^ prefix->s6_addr32[i]) &
++ ((~0) << (32 - nprefix))) != 0)
++ return 0;
++ return 1;
++ }
++ }
++
++ return 1;
++}
++
++/**
++ * homeagent_anycast - Compute Home Agent anycast address
++ * @ac_addr: append home agent anycast suffix to passed prefix
++ * @prefix: prefix ha anycast address is generated from
++ * @plen: length of prefix in bits
++ *
++ * Calculate corresponding Home Agent Anycast Address (RFC2526) in a
++ * given subnet.
++ */
++static inline int
++mipv6_ha_anycast(struct in6_addr *ac_addr, struct in6_addr *prefix, int plen)
++{
++ if (plen <= 0 || plen > 120) {
++ /* error, interface id should be minimum 8 bits */
++ return -1;
++ }
++ ipv6_addr_copy(ac_addr, prefix);
++
++ if (plen < 32)
++ ac_addr->s6_addr32[0] |= htonl((u32)(~0) >> plen);
++ if (plen < 64)
++ ac_addr->s6_addr32[1] |= htonl((u32)(~0) >> (plen > 32 ? plen % 32 : 0));
++ if (plen < 92)
++ ac_addr->s6_addr32[2] |= htonl((u32)(~0) >> (plen > 64 ? plen % 32 : 0));
++ if (plen <= 120)
++ ac_addr->s6_addr32[3] |= htonl((u32)(~0) >> (plen > 92 ? plen % 32 : 0));
++
++ /* RFC2526: for interface identifiers in EUI-64
++ * format, the universal/local bit in the interface
++ * identifier MUST be set to 0. */
++ if (plen == 64) {
++ ac_addr->s6_addr32[2] &= (int)htonl(0xfdffffff);
++ }
++ /* Mobile IPv6 Home-Agents anycast id (0x7e) */
++ ac_addr->s6_addr32[3] &= (int)htonl(0xfffffffe);
++
++ return 0;
++}
++
++#endif /* _UTIL_H */
+diff -uprN linux-2.4.25.old/net/ipv6/ndisc.c linux-2.4.25/net/ipv6/ndisc.c
+--- linux-2.4.25.old/net/ipv6/ndisc.c 2003-11-28 18:26:21.000000000 +0000
++++ linux-2.4.25/net/ipv6/ndisc.c 2004-06-26 11:29:32.000000000 +0100
+@@ -23,6 +23,7 @@
+ * and moved to net/core.
+ * Pekka Savola : RFC2461 validation
+ * YOSHIFUJI Hideaki @USAGI : Verify ND options properly
++ * Ville Nuorvala : RFC2461 fixes to proxy ND
+ */
+
+ /* Set to 3 to get tracing... */
+@@ -70,6 +71,7 @@
+ #include <net/ip6_route.h>
+ #include <net/addrconf.h>
+ #include <net/icmp.h>
++#include <net/mipglue.h>
+
+ #include <net/checksum.h>
+ #include <linux/proc_fs.h>
+@@ -187,6 +189,8 @@ struct ndisc_options *ndisc_parse_option
+ case ND_OPT_TARGET_LL_ADDR:
+ case ND_OPT_MTU:
+ case ND_OPT_REDIRECT_HDR:
++ case ND_OPT_RTR_ADV_INTERVAL:
++ case ND_OPT_HOME_AGENT_INFO:
+ if (ndopts->nd_opt_array[nd_opt->nd_opt_type]) {
+ ND_PRINTK2((KERN_WARNING
+ "ndisc_parse_options(): duplicated ND6 option found: type=%d\n",
+@@ -372,8 +376,8 @@ ndisc_build_ll_hdr(struct sk_buff *skb,
+ */
+
+ void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
+- struct in6_addr *daddr, struct in6_addr *solicited_addr,
+- int router, int solicited, int override, int inc_opt)
++ struct in6_addr *daddr, struct in6_addr *solicited_addr,
++ int router, int solicited, int override, int inc_opt)
+ {
+ static struct in6_addr tmpaddr;
+ struct inet6_ifaddr *ifp;
+@@ -766,7 +770,8 @@ void ndisc_recv_ns(struct sk_buff *skb)
+ int addr_type = ipv6_addr_type(saddr);
+
+ if (in6_dev && in6_dev->cnf.forwarding &&
+- (addr_type & IPV6_ADDR_UNICAST) &&
++ (addr_type & IPV6_ADDR_UNICAST ||
++ addr_type == IPV6_ADDR_ANY) &&
+ pneigh_lookup(&nd_tbl, &msg->target, dev, 0)) {
+ int inc = ipv6_addr_type(daddr)&IPV6_ADDR_MULTICAST;
+
+@@ -778,13 +783,21 @@ void ndisc_recv_ns(struct sk_buff *skb)
+ nd_tbl.stats.rcv_probes_mcast++;
+ else
+ nd_tbl.stats.rcv_probes_ucast++;
+-
+- neigh = neigh_event_ns(&nd_tbl, lladdr, saddr, dev);
+
+- if (neigh) {
+- ndisc_send_na(dev, neigh, saddr, &msg->target,
+- 0, 1, 0, 1);
+- neigh_release(neigh);
++ if (addr_type & IPV6_ADDR_UNICAST) {
++ neigh = neigh_event_ns(&nd_tbl, lladdr, saddr, dev);
++
++ if (neigh) {
++ ndisc_send_na(dev, neigh, saddr, &msg->target,
++ 0, 1, 0, 1);
++ neigh_release(neigh);
++ }
++ } else {
++ /* the proxy should also protect against DAD */
++ struct in6_addr maddr;
++ ipv6_addr_all_nodes(&maddr);
++ ndisc_send_na(dev, NULL, &maddr, &msg->target,
++ 0, 0, 0, 1);
+ }
+ } else {
+ struct sk_buff *n = skb_clone(skb, GFP_ATOMIC);
+@@ -849,6 +862,9 @@ void ndisc_recv_na(struct sk_buff *skb)
+ if (ifp->flags & IFA_F_TENTATIVE) {
+ addrconf_dad_failure(ifp);
+ return;
++ } else if (ndisc_mip_mn_ha_probe(ifp, lladdr)) {
++ in6_ifa_put(ifp);
++ return;
+ }
+ /* What should we make now? The advertisement
+ is invalid, but ndisc specs say nothing
+@@ -887,6 +903,7 @@ void ndisc_recv_na(struct sk_buff *skb)
+ msg->icmph.icmp6_override, 1);
+ neigh_release(neigh);
+ }
++ ndisc_check_mipv6_dad(&msg->target);
+ }
+
+ static void ndisc_router_discovery(struct sk_buff *skb)
+@@ -894,6 +911,7 @@ static void ndisc_router_discovery(struc
+ struct ra_msg *ra_msg = (struct ra_msg *) skb->h.raw;
+ struct neighbour *neigh;
+ struct inet6_dev *in6_dev;
++ int change_rtr;
+ struct rt6_info *rt;
+ int lifetime;
+ struct ndisc_options ndopts;
+@@ -923,10 +941,6 @@ static void ndisc_router_discovery(struc
+ ND_PRINTK1("RA: can't find in6 device\n");
+ return;
+ }
+- if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_ra) {
+- in6_dev_put(in6_dev);
+- return;
+- }
+
+ if (!ndisc_parse_options(opt, optlen, &ndopts)) {
+ in6_dev_put(in6_dev);
+@@ -935,7 +949,12 @@ static void ndisc_router_discovery(struc
+ "ICMP6 RA: invalid ND option, ignored.\n");
+ return;
+ }
++ change_rtr = ndisc_mipv6_ra_rcv(skb, &ndopts);
+
++ if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_ra) {
++ in6_dev_put(in6_dev);
++ return;
++ }
+ if (in6_dev->if_flags & IF_RS_SENT) {
+ /*
+ * flag that an RA was received after an RS was sent
+@@ -963,8 +982,7 @@ static void ndisc_router_discovery(struc
+ ip6_del_rt(rt, NULL);
+ rt = NULL;
+ }
+-
+- if (rt == NULL && lifetime) {
++ if (rt == NULL && lifetime && change_rtr) {
+ ND_PRINTK2("ndisc_rdisc: adding default router\n");
+
+ rt = rt6_add_dflt_router(&skb->nh.ipv6h->saddr, skb->dev);
+@@ -1087,6 +1105,8 @@ out:
+ if (rt)
+ dst_release(&rt->u.dst);
+ in6_dev_put(in6_dev);
++
++ ndisc_mipv6_change_router(change_rtr);
+ }
+
+ static void ndisc_redirect_rcv(struct sk_buff *skb)
+diff -uprN linux-2.4.25.old/net/ipv6/raw.c linux-2.4.25/net/ipv6/raw.c
+--- linux-2.4.25.old/net/ipv6/raw.c 2003-11-28 18:26:21.000000000 +0000
++++ linux-2.4.25/net/ipv6/raw.c 2004-06-26 11:29:32.000000000 +0100
+@@ -43,6 +43,7 @@
+ #include <net/transp_v6.h>
+ #include <net/udp.h>
+ #include <net/inet_common.h>
++#include <net/mipglue.h>
+
+ #include <net/rawv6.h>
+
+@@ -636,6 +637,7 @@ static int rawv6_sendmsg(struct sock *sk
+ hdr.daddr = daddr;
+ else
+ hdr.daddr = NULL;
++ hdr.daddr = mipv6_get_fake_hdr_daddr(hdr.daddr, daddr);
+
+ err = ip6_build_xmit(sk, rawv6_frag_cksum, &hdr, &fl, len,
+ opt, hlimit, msg->msg_flags);
+diff -uprN linux-2.4.25.old/net/ipv6/route.c linux-2.4.25/net/ipv6/route.c
+--- linux-2.4.25.old/net/ipv6/route.c 2004-02-18 13:36:32.000000000 +0000
++++ linux-2.4.25/net/ipv6/route.c 2004-06-26 11:29:32.000000000 +0100
+@@ -49,6 +49,7 @@
+ #include <net/addrconf.h>
+ #include <net/tcp.h>
+ #include <linux/rtnetlink.h>
++#include <net/mipglue.h>
+
+ #include <asm/uaccess.h>
+
+@@ -363,12 +364,8 @@ static struct rt6_info *rt6_cow(struct r
+ rt->u.dst.flags |= DST_HOST;
+
+ #ifdef CONFIG_IPV6_SUBTREES
+- if (rt->rt6i_src.plen && saddr) {
+- ipv6_addr_copy(&rt->rt6i_src.addr, saddr);
+- rt->rt6i_src.plen = 128;
+- }
++ rt->rt6i_src.plen = ort->rt6i_src.plen;
+ #endif
+-
+ rt->rt6i_nexthop = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
+
+ dst_hold(&rt->u.dst);
+@@ -511,14 +508,19 @@ struct dst_entry * ip6_route_output(stru
+ struct rt6_info *rt;
+ int strict;
+ int attempts = 3;
++ struct in6_addr *saddr;
+
++ if (ipv6_chk_addr(fl->nl_u.ip6_u.daddr, NULL))
++ saddr = NULL;
++ else
++ saddr = fl->nl_u.ip6_u.saddr;
++
+ strict = ipv6_addr_type(fl->nl_u.ip6_u.daddr) & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL);
+
+ relookup:
+ read_lock_bh(&rt6_lock);
+
+- fn = fib6_lookup(&ip6_routing_table, fl->nl_u.ip6_u.daddr,
+- fl->nl_u.ip6_u.saddr);
++ fn = fib6_lookup(&ip6_routing_table, fl->nl_u.ip6_u.daddr, saddr);
+
+ restart:
+ rt = fn->leaf;
+@@ -663,25 +665,6 @@ out:
+ return (atomic_read(&ip6_dst_ops.entries) > ip6_rt_max_size);
+ }
+
+-/* Clean host part of a prefix. Not necessary in radix tree,
+- but results in cleaner routing tables.
+-
+- Remove it only when all the things will work!
+- */
+-
+-static void ipv6_addr_prefix(struct in6_addr *pfx,
+- const struct in6_addr *addr, int plen)
+-{
+- int b = plen&0x7;
+- int o = plen>>3;
+-
+- memcpy(pfx->s6_addr, addr, o);
+- if (o < 16)
+- memset(pfx->s6_addr + o, 0, 16 - o);
+- if (b != 0)
+- pfx->s6_addr[o] = addr->s6_addr[o]&(0xff00 >> b);
+-}
+-
+ static int ipv6_get_mtu(struct net_device *dev)
+ {
+ int mtu = IPV6_MIN_MTU;
+@@ -810,7 +793,7 @@ int ip6_route_add(struct in6_rtmsg *rtms
+ if (!(gwa_type&IPV6_ADDR_UNICAST))
+ goto out;
+
+- grt = rt6_lookup(gw_addr, NULL, rtmsg->rtmsg_ifindex, 1);
++ grt = rt6_lookup(gw_addr, &rtmsg->rtmsg_src, rtmsg->rtmsg_ifindex, 1);
+
+ err = -EHOSTUNREACH;
+ if (grt == NULL)
+@@ -848,7 +831,15 @@ int ip6_route_add(struct in6_rtmsg *rtms
+ goto out;
+ }
+ }
+-
++#ifdef USE_IPV6_MOBILITY
++ /* If destination is mobile node, add special skb->dst->input
++ * function for proxy ND.
++ */
++ if (rtmsg->rtmsg_flags & RTF_MOBILENODE) {
++ rt->u.dst.input = ip6_mipv6_forward;
++ }
++#endif /* CONFIG_IPV6_MOBILITY */
++
+ if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr))
+ rt->rt6i_hoplimit = IPV6_DEFAULT_MCASTHOPS;
+ else
+@@ -936,7 +927,7 @@ void rt6_redirect(struct in6_addr *dest,
+ struct rt6_info *rt, *nrt;
+
+ /* Locate old route to this destination. */
+- rt = rt6_lookup(dest, NULL, neigh->dev->ifindex, 1);
++ rt = rt6_lookup(dest, saddr, neigh->dev->ifindex, 1);
+
+ if (rt == NULL)
+ return;
+@@ -1003,6 +994,9 @@ source_ok:
+ nrt = ip6_rt_copy(rt);
+ if (nrt == NULL)
+ goto out;
++#ifdef CONFIG_IPV6_SUBTREES
++ nrt->rt6i_src.plen = rt->rt6i_src.plen;
++#endif
+
+ nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
+ if (on_link)
+@@ -1104,6 +1098,9 @@ void rt6_pmtu_discovery(struct in6_addr
+ nrt = ip6_rt_copy(rt);
+ if (nrt == NULL)
+ goto out;
++#ifdef CONFIG_IPV6_SUBTREES
++ nrt->rt6i_src.plen = rt->rt6i_src.plen;
++#endif
+ ipv6_addr_copy(&nrt->rt6i_dst.addr, daddr);
+ nrt->rt6i_dst.plen = 128;
+ nrt->u.dst.flags |= DST_HOST;
+diff -uprN linux-2.4.25.old/net/ipv6/tcp_ipv6.c linux-2.4.25/net/ipv6/tcp_ipv6.c
+--- linux-2.4.25.old/net/ipv6/tcp_ipv6.c 2003-11-28 18:26:21.000000000 +0000
++++ linux-2.4.25/net/ipv6/tcp_ipv6.c 2004-06-26 11:29:32.000000000 +0100
+@@ -50,6 +50,7 @@
+ #include <net/addrconf.h>
+ #include <net/ip6_route.h>
+ #include <net/inet_ecn.h>
++#include <net/mipglue.h>
+
+ #include <asm/uaccess.h>
+
+@@ -557,6 +558,7 @@ static int tcp_v6_connect(struct sock *s
+ struct flowi fl;
+ struct dst_entry *dst;
+ int addr_type;
++ int reroute = 0;
+ int err;
+
+ if (addr_len < SIN6_LEN_RFC2133)
+@@ -660,7 +662,7 @@ static int tcp_v6_connect(struct sock *s
+
+ fl.proto = IPPROTO_TCP;
+ fl.fl6_dst = &np->daddr;
+- fl.fl6_src = saddr;
++ fl.fl6_src = saddr;
+ fl.oif = sk->bound_dev_if;
+ fl.uli_u.ports.dport = usin->sin6_port;
+ fl.uli_u.ports.sport = sk->sport;
+@@ -669,31 +671,46 @@ static int tcp_v6_connect(struct sock *s
+ struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
+ fl.nl_u.ip6_u.daddr = rt0->addr;
+ }
+-
+ dst = ip6_route_output(sk, &fl);
+-
++#ifdef CONFIG_IPV6_SUBTREES
++ reroute = (saddr == NULL);
++#endif
+ if ((err = dst->error) != 0) {
+ dst_release(dst);
+ goto failure;
+ }
+-
+- ip6_dst_store(sk, dst, NULL);
+- sk->route_caps = dst->dev->features&~NETIF_F_IP_CSUM;
+-
++ if (!reroute) {
++ ip6_dst_store(sk, dst, NULL, NULL);
++ sk->route_caps = dst->dev->features&~NETIF_F_IP_CSUM;
++ }
+ if (saddr == NULL) {
+ err = ipv6_get_saddr(dst, &np->daddr, &saddr_buf);
++
++ if (reroute)
++ dst_release(dst);
+ if (err)
+ goto failure;
+
+ saddr = &saddr_buf;
++ ipv6_addr_copy(&np->rcv_saddr, saddr);
++#ifdef CONFIG_IPV6_SUBTREES
++ fl.fl6_src = saddr;
++ dst = ip6_route_output(sk, &fl);
++
++ if ((err = dst->error) != 0) {
++ dst_release(dst);
++ goto failure;
++ }
++ ip6_dst_store(sk, dst, NULL, NULL);
++ sk->route_caps = dst->dev->features&~NETIF_F_IP_CSUM;
++#endif
+ }
+
+ /* set the source address */
+- ipv6_addr_copy(&np->rcv_saddr, saddr);
+ ipv6_addr_copy(&np->saddr, saddr);
+ sk->rcv_saddr= LOOPBACK4_IPV6;
+
+- tp->ext_header_len = 0;
++ tp->ext_header_len = tcp_v6_get_mipv6_header_len();
+ if (np->opt)
+ tp->ext_header_len = np->opt->opt_flen+np->opt->opt_nflen;
+ tp->mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
+@@ -1338,7 +1355,7 @@ static struct sock * tcp_v6_syn_recv_soc
+ #endif
+ MOD_INC_USE_COUNT;
+
+- ip6_dst_store(newsk, dst, NULL);
++ ip6_dst_store(newsk, dst, NULL, NULL);
+ sk->route_caps = dst->dev->features&~NETIF_F_IP_CSUM;
+
+ newtp = &(newsk->tp_pinfo.af_tcp);
+@@ -1383,7 +1400,7 @@ static struct sock * tcp_v6_syn_recv_soc
+ sock_kfree_s(sk, opt, opt->tot_len);
+ }
+
+- newtp->ext_header_len = 0;
++ newtp->ext_header_len = tcp_v6_get_mipv6_header_len();
+ if (np->opt)
+ newtp->ext_header_len = np->opt->opt_nflen + np->opt->opt_flen;
+
+@@ -1710,7 +1727,7 @@ static int tcp_v6_rebuild_header(struct
+ return err;
+ }
+
+- ip6_dst_store(sk, dst, NULL);
++ ip6_dst_store(sk, dst, NULL, NULL);
+ sk->route_caps = dst->dev->features&~NETIF_F_IP_CSUM;
+ }
+
+@@ -1749,7 +1766,7 @@ static int tcp_v6_xmit(struct sk_buff *s
+ return -sk->err_soft;
+ }
+
+- ip6_dst_store(sk, dst, NULL);
++ ip6_dst_store(sk, dst, NULL, NULL);
+ }
+
+ skb->dst = dst_clone(dst);
+diff -uprN linux-2.4.25.old/net/ipv6/udp.c linux-2.4.25/net/ipv6/udp.c
+--- linux-2.4.25.old/net/ipv6/udp.c 2004-02-18 13:36:32.000000000 +0000
++++ linux-2.4.25/net/ipv6/udp.c 2004-06-26 11:29:32.000000000 +0100
+@@ -48,6 +48,7 @@
+ #include <net/ip.h>
+ #include <net/udp.h>
+ #include <net/inet_common.h>
++#include <net/mipglue.h>
+
+ #include <net/checksum.h>
+
+@@ -232,6 +233,7 @@ int udpv6_connect(struct sock *sk, struc
+ struct ip6_flowlabel *flowlabel = NULL;
+ int addr_type;
+ int err;
++ int reroute = 0;
+
+ if (usin->sin6_family == AF_INET) {
+ if (__ipv6_only_sock(sk))
+@@ -331,7 +333,7 @@ ipv4_connected:
+
+ fl.proto = IPPROTO_UDP;
+ fl.fl6_dst = &np->daddr;
+- fl.fl6_src = &saddr;
++ fl.fl6_src = NULL;
+ fl.oif = sk->bound_dev_if;
+ fl.uli_u.ports.dport = sk->dport;
+ fl.uli_u.ports.sport = sk->sport;
+@@ -348,29 +350,44 @@ ipv4_connected:
+ struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
+ fl.fl6_dst = rt0->addr;
+ }
+-
+ dst = ip6_route_output(sk, &fl);
+-
+ if ((err = dst->error) != 0) {
+ dst_release(dst);
+ fl6_sock_release(flowlabel);
+- return err;
+- }
+-
+- ip6_dst_store(sk, dst, fl.fl6_dst);
+-
++ return err;
++ }
++#ifdef CONFIG_IPV6_SUBTREES
++ reroute = (fl.fl6_src == NULL);
++#endif
+ /* get the source adddress used in the apropriate device */
+
+ err = ipv6_get_saddr(dst, daddr, &saddr);
+
++ if (reroute)
++ dst_release(dst);
++
+ if (err == 0) {
+- if(ipv6_addr_any(&np->saddr))
++#ifdef CONFIG_IPV6_SUBTREES
++ if (reroute) {
++ fl.fl6_src = &saddr;
++ dst = ip6_route_output(sk, &fl);
++ if ((err = dst->error) != 0) {
++ dst_release(dst);
++ fl6_sock_release(flowlabel);
++ return err;
++ }
++ }
++#endif
++ if(ipv6_addr_any(&np->saddr)) {
+ ipv6_addr_copy(&np->saddr, &saddr);
+-
++ fl.fl6_src = &np->saddr;
++ }
+ if(ipv6_addr_any(&np->rcv_saddr)) {
+ ipv6_addr_copy(&np->rcv_saddr, &saddr);
+ sk->rcv_saddr = LOOPBACK4_IPV6;
+ }
++ ip6_dst_store(sk, dst, fl.fl6_dst,
++ fl.fl6_src == &np->saddr ? fl.fl6_src : NULL);
+ sk->state = TCP_ESTABLISHED;
+ }
+ fl6_sock_release(flowlabel);
+@@ -894,6 +911,7 @@ static int udpv6_sendmsg(struct sock *sk
+ opt = fl6_merge_options(&opt_space, flowlabel, opt);
+ if (opt && opt->srcrt)
+ udh.daddr = daddr;
++ udh.daddr = mipv6_get_fake_hdr_daddr(udh.daddr, daddr);
+
+ udh.uh.source = sk->sport;
+ udh.uh.len = len < 0x10000 ? htons(len) : 0;
+diff -uprN linux-2.4.25.old/net/netsyms.c linux-2.4.25/net/netsyms.c
+--- linux-2.4.25.old/net/netsyms.c 2003-11-28 18:26:21.000000000 +0000
++++ linux-2.4.25/net/netsyms.c 2004-06-26 11:29:32.000000000 +0100
+@@ -190,6 +190,7 @@ EXPORT_SYMBOL(neigh_sysctl_register);
+ #endif
+ EXPORT_SYMBOL(pneigh_lookup);
+ EXPORT_SYMBOL(pneigh_enqueue);
++EXPORT_SYMBOL(pneigh_delete);
+ EXPORT_SYMBOL(neigh_destroy);
+ EXPORT_SYMBOL(neigh_parms_alloc);
+ EXPORT_SYMBOL(neigh_parms_release);
diff --git a/packages/linux/files/usb-gadget-ether-compat.patch b/packages/linux/files/usb-gadget-ether-compat.patch
index e69de29bb2..9e336cfa3c 100644
--- a/packages/linux/files/usb-gadget-ether-compat.patch
+++ b/packages/linux/files/usb-gadget-ether-compat.patch
@@ -0,0 +1,30 @@
+--- kernel/drivers/usb/gadget/ether.c 2005-04-24 12:40:08.867411535 +0200
++++ /tmp/ether.c 2005-04-24 12:39:02.119093498 +0200
+@@ -231,6 +231,16 @@
+ MODULE_PARM(host_addr, "s");
+ MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
+
++#ifdef CONFIG_USB_ETH_RNDIS
++/* setting it to 1 disables the RNDIS extension,
++ * needed to make g_ether compatible with usbnet from kernel < 2.6.10:
++ * - simple vendor/product
++ * - just the CDC interface
++ */
++static u8 __initdata compat_mode;
++MODULE_PARM(compat_mode, "b");
++MODULE_PARM_DESC(compat_mode, "non-zero value reverts to traditional usbnet compatibility (RNDIS disabled)");
++#endif
+
+ /*-------------------------------------------------------------------------*/
+
+@@ -2336,6 +2346,10 @@
+ #endif
+ #ifndef CONFIG_USB_ETH_RNDIS
+ rndis = 0;
++#else
++ if (compat_mode) {
++ rndis = 0;
++ }
+ #endif
+
+ /* Because most host side USB stacks handle CDC Ethernet, that