--- ./.pc/ixp400-le-be.patch/ixp400_eth.c	2005-04-19 22:58:18.000000000 -0700
+++ ./ixp400_eth.c	2005-10-23 23:29:06.760778566 -0700
@@ -57,6 +57,7 @@
 #include <linux/mii.h>
 #include <linux/socket.h>
 #include <linux/cache.h>
+#include <linux/interrupt.h>
 #include <asm/io.h>
 #include <asm/errno.h>
 #include <net/pkt_sched.h>
@@ -95,7 +96,7 @@
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Intel Corporation");
 #define MODULE_NAME "ixp400_eth"
-#define MODULE_VERSION "1.4"
+#define MODULE_VERSION_IXP400_ETH "1.4B"
 
 /* Module parameters */
 static int npe_learning = 1;      /* default : NPE learning & filtering enable */
@@ -358,8 +359,13 @@
     /* Used to stop the kernel thread for link monitoring. */
     volatile BOOL maintenanceCheckStopped;
 
+  struct work_struct mii_job;
+
+
     /* used for tx timeout */
-    struct tq_struct tq_timeout;
+  struct work_struct tx_timeout_job;
+ 
+     
 
     /* used to control the message output */
     UINT32 devFlags;
@@ -557,6 +563,8 @@
 #endif
 };
 
+static struct workqueue_struct *npe_eth_workqueue;
+
 /* Mutex lock used to coordinate access to IxEthAcc functions
  * which manipulate the MII registers on the PHYs
  */
@@ -565,6 +573,11 @@
 /* mutex locked when maintenance is being performed */
 static struct semaphore *maintenance_mutex;
 
+/* Flags which is set when corresponding NPE is running,
+ * cleared when NPE is stopped
+ */
+static int npeRunning[IX_ETH_ACC_NUMBER_OF_PORTS];
+
 /* Flags which is set when the corresponding IRQ is running,
  */
 static int irq_pmu_used = 0;
@@ -1070,6 +1083,117 @@
     return 0;
 }
 
+/* 
+ * WORKQUEUE JOBS
+ */
+
+/* This workqueue job will check the PHY for the link duplex and
+ * update the MAC accordingly. It also executes some buffer
+ * maintenance to release mbuf in excess or replenish after
+ * a severe starvation
+ *
+ * This function loops and wake up every 3 seconds.
+ */
+static void dev_media_check_work(void* arg)
+{
+    struct net_device *dev = (struct net_device *) arg;
+    priv_data_t *priv = dev->priv;
+
+    TRACE;
+	
+    /*
+     * Determine the link status
+     */
+
+    if (default_phy_cfg[priv->port_id].linkMonitor)
+    {
+        int linkUp;
+        int speed100;
+        int fullDuplex = -1; /* unknown duplex mode */
+        int newDuplex;
+        int autonegotiate;
+        unsigned phyNum = phyAddresses[priv->port_id];
+        int res;
+
+        TRACE;
+
+        /* lock the MII register access mutex */
+        down(miiAccessMutex);
+	    
+        res = ixEthMiiLinkStatus(phyNum,
+                                 &linkUp,
+                                 &speed100,
+                                 &newDuplex, 
+                                 &autonegotiate);
+        /* release the MII register access mutex */
+        up(miiAccessMutex);
+	    
+        if (res != IX_ETH_ACC_SUCCESS)
+        {
+            P_WARN("ixEthMiiLinkStatus failed on PHY%d.\n"
+                   "\tCan't determine\nthe auto negotiated parameters. "
+                   "Using default values.\n",
+                   phyNum); 
+		
+            /* this shouldn't happen. exit the thread if it does */
+            goto out;
+        }
+	    
+        if (linkUp)
+        {
+            if (! netif_carrier_ok(dev))
+            {
+                /* inform the kernel of a change in link state */
+                netif_carrier_on(dev);
+            }
+
+            /*
+             * Update the MAC mode to match the PHY mode if 
+             * there is a phy mode change.
+             */
+            if (newDuplex != fullDuplex)
+            {
+                fullDuplex = newDuplex;
+                if (fullDuplex)
+                {
+                    ixEthAccPortDuplexModeSet(priv->port_id,
+                                              IX_ETH_ACC_FULL_DUPLEX);
+                }
+                else
+                {
+                    ixEthAccPortDuplexModeSet(priv->port_id,
+                                              IX_ETH_ACC_HALF_DUPLEX);
+                }
+            }
+        }
+        else
+        {
+            fullDuplex = -1;
+            if (netif_carrier_ok(dev))
+            {
+                /* inform the kernel of a change in link state */
+                netif_carrier_off(dev);
+            }
+        }
+    }
+    
+    TRACE;
+    
+    /* this is to prevent the rx pool from emptying when
+     * there's not enough memory for a long time
+     * It prevents also from holding the memory for too
+     * long
+     */
+    dev_buff_maintenance(dev);
+    
+    /* reschedule to run in 3 seconds */
+    queue_delayed_work(npe_eth_workqueue, &priv->mii_job, 3*HZ);
+ out:
+    return;
+}
+
+
+#define sigmask_lock sighand->siglock
 
 /* 
  * KERNEL THREADS
@@ -1122,11 +1246,11 @@
     */
     down (priv->maintenanceCheckThreadComplete);
 
-    daemonize();
-    reparent_to_init();
+    daemonize("dev_media");
+    //    reparent_to_init();
     spin_lock_irq(&current->sigmask_lock);
     sigemptyset(&current->blocked);
-    recalc_sigpending(current);
+    recalc_sigpending();
     spin_unlock_irq(&current->sigmask_lock);
     
     snprintf(current->comm, sizeof(current->comm), "ixp400 %s", dev->name);
@@ -1285,7 +1409,7 @@
 static void dev_pmu_timer_restart(void)
 {
     unsigned long flags;
-    save_flags_cli(flags);
+    local_irq_save(flags);
      __asm__(" mcr p14,0,%0,c1,c1,0\n"  /* write current counter */
             : : "r" (timer_countup_ticks));
 
@@ -1294,13 +1418,13 @@
             " mcr p14,0,r1,c5,c1,0; "  /* clear overflow */
             " mcr p14,0,r1,c4,c1,0\n"  /* enable interrupts */
             : : : "r1");
-    restore_flags(flags);
+    local_irq_restore(flags);
 }
 
 /* Internal ISR : run a few thousand times per second and calls 
  * the queue manager dispatcher entry point.
  */
-static void dev_qmgr_os_isr(int irg, void *dev_id, struct pt_regs *regs)
+static irqreturn_t dev_qmgr_os_isr(int irg, void *dev_id, struct pt_regs *regs)
 {
     /* get the time of this interrupt : all buffers received during this
      * interrupt will be assigned the same time */
@@ -1308,22 +1432,32 @@
 
     /* call the queue manager entry point */
     dispatcherFunc(IX_QMGR_QUELOW_GROUP);
+    return IRQ_HANDLED;
 }
 
 /* Internal ISR : run a few thousand times per second and calls 
  * the ethernet entry point.
  */
-static void dev_poll_os_isr(int irg, void *dev_id, struct pt_regs *regs)
+int icount = 2;
+static irqreturn_t dev_poll_os_isr(int irg, void *dev_id, struct pt_regs *regs)
 {
-    dev_pmu_timer_restart(); /* set up the timer for the next interrupt */
+  if (icount > 0) {
+    icount--;
+    TRACE;
+  }
 
     /* get the time of this interrupt : all buffers received during this
      * interrupt will be assigned the same time */
     do_gettimeofday(&irq_stamp);
-
+  if (icount) TRACE;
     ixEthRxFrameQMCallback(rx_queue_id,0);
+  if (icount) TRACE;
     ixEthTxFrameDoneQMCallback(0,0);
-   
+  if (icount) TRACE;
+    /* here surely */
+  dev_pmu_timer_restart(); /* set up the timer for the next interrupt */
+  return IRQ_HANDLED;
+
 }
 
 /* initialize the PMU timer */
@@ -1370,17 +1504,18 @@
 static void dev_pmu_timer_disable(void)
 {
     unsigned long flags;
-    save_flags_cli(flags);
+    local_irq_save(flags);
     __asm__(" mrc p14,0,r1,c4,c1,0; "  /* get int enable register */
             " and r1,r1,#0x1e; "
             " mcr p14,0,r1,c4,c1,0\n"  /* disable interrupts */
             : : : "r1");
-    restore_flags(flags);
+    local_irq_restore(flags);
 }
 
 /* This timer will call ixEthDBDatabaseMaintenance every
  * IX_ETH_DB_MAINTENANCE_TIME jiffies
  */
+#if 0
 static void maintenance_timer_cb(unsigned long data);
 
 static struct timer_list maintenance_timer = {
@@ -1418,6 +1553,34 @@
 
     maintenance_timer_set();
 }
+#endif
+static void db_maintenance_code(void *data);
+static DECLARE_WORK(db_maintenance_job, db_maintenance_code, NULL);
+
+static inline
+void schedule_db_maintenance(void)
+{
+  TRACE;
+    queue_delayed_work(npe_eth_workqueue, &db_maintenance_job,
+                       DB_MAINTENANCE_TIME);
+}
+
+static inline
+void cancel_db_maintenance(void)
+{
+  TRACE;
+    cancel_delayed_work(&db_maintenance_job);
+}
+
+static void db_maintenance_code(void *data)
+{
+  TRACE;
+    down(maintenance_mutex);
+    ixEthDBDatabaseMaintenance();
+    up(maintenance_mutex);
+    schedule_db_maintenance();
+}
+
 
 /*
  *  DATAPLANE
@@ -1531,7 +1694,7 @@
 	 * and its constants are taken from the eth_type_trans()
 	 * function.
 	 */
-	struct ethhdr *eth = skb->mac.ethernet;
+	struct ethhdr *eth = eth_hdr(skb);
 	unsigned short hproto = ntohs(eth->h_proto);
 	
 	if (hproto >= 1536)
@@ -1573,7 +1736,7 @@
 	     * mode is set This costs
 	     * a lookup inside the packet payload.
 	     */
-	    struct ethhdr *eth = skb->mac.ethernet;
+	    struct ethhdr *eth = eth_hdr(skb);
 	    unsigned char *hdest = eth->h_dest;
 	    
 	    if (memcmp(hdest, dev->dev_addr, ETH_ALEN)!=0)
@@ -1632,7 +1795,7 @@
     dev = (struct net_device *)callbackTag;
     priv = dev->priv;
 
-    qlevel = softnet_data[0].input_pkt_queue.qlen;
+    qlevel = __get_cpu_var(softnet_data).input_pkt_queue.qlen;
     /* check if the system accepts more traffic and
      * against chained mbufs 
      */
@@ -1674,10 +1837,21 @@
 	/* set the length of the received skb from the mbuf length  */
 	skb->tail = skb->data + len;
 	skb->len = len;
+
+#ifndef __ARMEB__
+	{
+	    /* Byte swap all words containing data from the buffer. */
+	    unsigned long *p = (unsigned long*)((unsigned)skb->data & ~0x3);
+	    unsigned long *e = (unsigned long*)(((unsigned)skb->data + skb->len + 3) & ~0x3);
+	    while (p < e)
+		*p = ntohl(*p), ++p;
+	}
+#endif
 	
 #ifdef DEBUG_DUMP
 	skb_dump("rx", skb);
 #endif
+
 	/* Set the skb protocol and set mcast/bcast flags */
 	dev_eth_type_trans(mcastFlags, skb, dev);
 
@@ -1821,6 +1995,39 @@
 	spin_unlock_irq(&priv->lock);
 }
 
+/* start the NPEs */
+static int npe_start(IxEthAccPortId port_id)
+{
+    int res;
+    UINT32 npeImageId;
+
+    switch (port_id)
+    {
+        case IX_ETH_PORT_1:
+	    npeImageId = IX_ETH_NPE_B_IMAGE_ID;
+	    break;
+        case IX_ETH_PORT_2:
+	    npeImageId = IX_ETH_NPE_C_IMAGE_ID;
+	    break;
+	default:
+	    P_ERROR("Invalid port specified. IXP Ethernet NPE not started\n");
+	    return -ENODEV;
+    }
+
+    /* Initialise and Start NPEs */
+    if ((res = ixNpeDlNpeInitAndStart(npeImageId)))
+    {
+	P_ERROR("Error starting NPE for Ethernet port %d!\n", port_id);
+	return -1;
+    }
+
+    /* set this flag to indicate that NPE is running */
+    npeRunning[port_id] = 1;
+
+    return 0;
+}
+
+
 /* The QMgr dispatch entry point can be called from the 
  * IX_OSAL_IXP400_QM1_IRQ_LVL irq (which will trigger
  * an interrupt for every packet) or a timer (which will
@@ -1906,7 +2113,16 @@
     IxEthAccMacAddr npeMacAddr;
     priv_data_t *priv = dev->priv;
 
-    P_DEBUG("port_enable(%s)\n", dev->name);
+    P_DEBUG("port_enable(%s) %d\n", dev->name, priv->port_id);
+
+    if (!npeRunning[priv->port_id])
+    {
+        if ((res = npe_start(priv->port_id)))
+        {
+            TRACE;
+            return res;
+        }
+    }
 
     /* Set MAC addr in h/w (ethAcc checks for MAC address to be valid) */
     memcpy(&npeMacAddr.macAddress,
@@ -2085,6 +2301,16 @@
 	return 0;
     }
 
+#ifndef __ARMEB__
+    {
+	/* Byte swap all words containing data from the buffer. */
+	unsigned long *p = (unsigned long*)((unsigned)skb->data & ~0x3);
+	unsigned long *e = (unsigned long*)(((unsigned)skb->data + skb->len + 3) & ~0x3);
+	while (p < e)
+	    *p = ntohl(*p), ++p;
+    }
+#endif
+
 #ifdef DEBUG_DUMP
     skb_dump("tx", skb);
 #endif
@@ -2120,6 +2346,7 @@
 static int do_dev_open(struct net_device *dev)
 {
     int res;
+    TRACE;
 
     /* prevent the maintenance task from running while bringing up port */
     down(maintenance_mutex);
@@ -2151,6 +2378,27 @@
 }
 
 static void
+dev_tx_timeout_work(void* arg)
+{
+    struct net_device *dev = (struct net_device *)arg;
+    priv_data_t *priv = dev->priv;
+
+    P_ERROR("%s: Tx Timeout for port %d\n", dev->name, priv->port_id);
+
+    down(maintenance_mutex);
+    port_disable(dev);
+
+    /* Note to user: Consider performing other reset operations here (such as
+     * PHY reset), if it is known to help the Tx Flow to become "unstuck"
+     */
+
+    port_enable(dev);
+    up(maintenance_mutex);
+}
+
+
+
+static void
 dev_tx_timeout_task(void *dev_id)
 {
     struct net_device *dev = (struct net_device *)dev_id;
@@ -2191,7 +2439,7 @@
     priv_data_t *priv = dev->priv;
 
     TRACE;
-    schedule_task(&priv->tq_timeout);
+    queue_work(npe_eth_workqueue, &priv->tx_timeout_job);
     
 }
 
@@ -2352,7 +2600,8 @@
 
     TRACE;
 
-    invalidate_dcache_range((unsigned int)&ethStats, sizeof(ethStats));
+    //    invalidate_dcache_range((unsigned int)&ethStats, sizeof(ethStats));
+    IX_ACC_DATA_CACHE_INVALIDATE((unsigned int)&ethStats, sizeof(ethStats));
     if ((res = ixEthAccMibIIStatsGetClear(priv->port_id, &ethStats)))
     {
 	P_ERROR("%s: ixEthAccMibIIStatsGet failed for port %d, res = %d\n",
@@ -2565,7 +2814,6 @@
     miiAccessMutex = (struct semaphore *) kmalloc(sizeof(struct semaphore), GFP_KERNEL);
     if (!miiAccessMutex)
 	return -ENOMEM;
-
     init_MUTEX(miiAccessMutex);
 
     TRACE;
@@ -2673,12 +2921,12 @@
 }
 
 /* set port MAC addr and update the dev struct if successfull */
-int dev_set_mac_address(struct net_device *dev, void *addr)
+int dev_set_mac_address(struct net_device *dev, struct sockaddr *saddr)
 {
     int res;
     priv_data_t *priv = dev->priv;
     IxEthAccMacAddr npeMacAddr;
-    struct sockaddr *saddr = (struct sockaddr *)addr;
+    //    struct sockaddr *saddr = (struct sockaddr *)addr;
 
     /* Get MAC addr from parameter */
     memcpy(&npeMacAddr.macAddress,
@@ -2751,35 +2999,16 @@
 /* Initialize device structs.
  * Resource allocation is deffered until do_dev_open
  */
-static int __devinit dev_eth_probe(struct net_device *dev)
+static int __devinit dev_eth_probe(struct device *_dev)
 {
-    static int found_devices = 0;
-    priv_data_t *priv;
+    int res = -ENOMEM;
+    struct platform_device *pdev = to_platform_device(_dev);
+    struct net_device *ndev = dev_get_drvdata(_dev);
+    priv_data_t *priv = (priv_data_t*)ndev->priv;
 
     TRACE;
 
-    /* there is a limited number of devices */
-    if (found_devices >= dev_max_count) /* module parameter */
-	return -ENODEV;
-
-    SET_MODULE_OWNER(dev);
-
-    /* set device name */
-    sprintf(dev->name, DEVICE_NAME "%d", found_devices);
-
-    /* allocate and initialize priv struct */
-    priv = dev->priv = kmalloc(sizeof(priv_data_t), GFP_KERNEL);
-    if (dev->priv == NULL)
-	return -ENOMEM;
-
-    memset(dev->priv, 0, sizeof(priv_data_t));
-
-    TRACE;
-
-    /* set the mapping between port ID and devices 
-     * 
-     */
-    priv->port_id  = default_portId[found_devices];
+    priv->port_id = pdev->id;
 
     TRACE;
 
@@ -2789,9 +3018,8 @@
     if(priv->rx_pool == NULL)
     {
 	P_ERROR("%s: Buffer RX Pool init failed on port %d\n",
-		dev->name, priv->port_id);
-	kfree(dev->priv);
-	return -ENOMEM;
+		ndev->name, priv->port_id);
+	goto out;
     }
 
     TRACE;
@@ -2802,45 +3030,45 @@
     if(priv->tx_pool == NULL)
     {
 	P_ERROR("%s: Buffer TX Pool init failed on port %d\n",
-		dev->name, priv->port_id);
-	kfree(dev->priv);
-	return -ENOMEM;
+		ndev->name, priv->port_id);
+	goto out;
     }
 
-     TRACE;
-
    /* initialise the MII register access mutex */
     priv->maintenanceCheckThreadComplete = (struct semaphore *)
 	kmalloc(sizeof(struct semaphore), GFP_KERNEL);
     if (!priv->maintenanceCheckThreadComplete)
     {
-	kfree(dev->priv);
-	return -ENOMEM;
+	P_ERROR("%s: Failed to allocate maintenance semaphore %d\n",
+		ndev->name, priv->port_id);
+	goto out;
     }
     priv->lock = SPIN_LOCK_UNLOCKED;
     init_MUTEX(priv->maintenanceCheckThreadComplete);
     priv->maintenanceCheckStopped = TRUE;
 
     /* initialize ethernet device (default handlers) */
-    ether_setup(dev);
+    ether_setup(ndev);
 
     TRACE;
 
-     /* fill in dev struct callbacks with customized handlers */
-    dev->open = do_dev_open;
-    dev->stop = do_dev_stop;
+    INIT_WORK(&priv->mii_job, dev_media_check_work, ndev);
+    INIT_WORK(&priv->tx_timeout_job, dev_tx_timeout_work, ndev);
 
-    dev->hard_start_xmit = dev_hard_start_xmit;
-
-    dev->watchdog_timeo = DEV_WATCHDOG_TIMEO;
-    dev->tx_timeout = dev_tx_timeout;
-    dev->change_mtu = dev_change_mtu;
-    dev->do_ioctl = do_dev_ioctl;
-    dev->get_stats = dev_get_stats;
-    dev->set_multicast_list = dev_set_multicast_list;
-    dev->flags |= IFF_MULTICAST;
+    TRACE;
 
-    dev->set_mac_address = dev_set_mac_address;
+     /* fill in dev struct callbacks with customized handlers */
+    ndev->open = do_dev_open;
+    ndev->stop = do_dev_stop;
+    ndev->hard_start_xmit = dev_hard_start_xmit;
+    ndev->watchdog_timeo = DEV_WATCHDOG_TIMEO;
+    ndev->tx_timeout = dev_tx_timeout;
+    ndev->change_mtu = dev_change_mtu;
+    ndev->do_ioctl = do_dev_ioctl;
+    ndev->get_stats = dev_get_stats;
+    ndev->set_multicast_list = dev_set_multicast_list;
+    ndev->flags |= IFF_MULTICAST;
+    ndev->set_mac_address = dev_set_mac_address;
 
     TRACE;
 
@@ -2858,22 +3086,22 @@
      *
      */
 
-    memcpy(dev->dev_addr, 
+    memcpy(ndev->dev_addr, 
 	   &default_mac_addr[priv->port_id].macAddress,
 	   IX_IEEE803_MAC_ADDRESS_SIZE);
 
     /* possibly remove this test and the message when a valid MAC address 
      * is not hardcoded in the driver source code. 
      */
-    if (is_valid_ether_addr(dev->dev_addr))
+    if (is_valid_ether_addr(ndev->dev_addr))
     {
 	P_WARN("Use default MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x for port %d\n",
-	       (unsigned)dev->dev_addr[0],
-	       (unsigned)dev->dev_addr[1],
-	       (unsigned)dev->dev_addr[2],
-	       (unsigned)dev->dev_addr[3],
-	       (unsigned)dev->dev_addr[4],
-	       (unsigned)dev->dev_addr[5],
+	       (unsigned)ndev->dev_addr[0],
+	       (unsigned)ndev->dev_addr[1],
+	       (unsigned)ndev->dev_addr[2],
+	       (unsigned)ndev->dev_addr[3],
+	       (unsigned)ndev->dev_addr[4],
+	       (unsigned)ndev->dev_addr[5],
 	       priv->port_id);
     }
     
@@ -2883,31 +3111,30 @@
      */
     TRACE;
 
-    dev_change_msdu(dev, dev->mtu + dev->hard_header_len + VLAN_HDR);
-
+    dev_change_msdu(ndev, ndev->mtu + ndev->hard_header_len + VLAN_HDR);
+#if 0
     priv->tq_timeout.routine = dev_tx_timeout_task;
     priv->tq_timeout.data = (void *)dev;
-
+#endif
 #ifdef CONFIG_IXP400_ETH_QDISC_ENABLED
     /* configure and enable a fast TX queuing discipline */
     TRACE;
 
-    priv->qdisc = qdisc_create_dflt(dev, &dev_qdisc_ops);
-    dev->qdisc_sleeping = priv->qdisc;
-    dev->qdisc = priv->qdisc;
+    priv->qdisc = qdisc_create_dflt(ndev, &dev_qdisc_ops);
+    ndev->qdisc_sleeping = priv->qdisc;
+    ndev->qdisc = priv->qdisc;
     
-    if (!dev->qdisc_sleeping)
+    if (!ndev->qdisc_sleeping)
     {
 	P_ERROR("%s: qdisc_create_dflt failed on port %d\n",
-		dev->name, priv->port_id);
-	kfree(dev->priv);
-	return -ENOMEM;
+		ndev->name, priv->port_id);
+	goto out;
     }
 #endif
 
     /* set the internal maximum queueing capabilities */
-    dev->tx_queue_len = TX_MBUF_POOL_SIZE;
-
+    ndev->tx_queue_len = TX_MBUF_POOL_SIZE;
+#if 0
     if (!netif_queue_stopped(dev))
     {
 	TRACE;
@@ -2917,9 +3144,63 @@
     }
 
     found_devices++;
+#endif
+    if ((res = register_netdev(ndev)))
+      P_ERROR("Failed to register netdev. res = %d\n", res);
 
     TRACE;
 
+ out: 
+    return res;
+}
+
+
+static int __devinit npe_eth_init_device(struct device *dev)
+{
+    int res = -ENOMEM;
+    struct platform_device *pdev = to_platform_device(dev);
+    struct net_device *ndev = alloc_etherdev(sizeof(priv_data_t));
+    TRACE;
+    if (ndev == NULL) {
+        P_ERROR("could not allocate device.\n");
+        goto out;
+    }
+    SET_MODULE_OWNER(ndev);
+    SET_NETDEV_DEV(ndev, dev);
+    ixEthAccTxSchedulingDisciplineSet(pdev->id, FIFO_NO_PRIORITY);
+    dev_set_drvdata(dev, ndev);
+    res = dev_eth_probe(dev);
+    if (res == 0) {
+        /* This was added in v0.1.8 of the driver. It seems that we need to
+         * enable the port before the user can set a mac address for the port
+         * using 'ifconfig hw ether ...'. To enable the port we must first
+         * register Q callbacks, so we register the portDisable callbacks to
+         * ensure that no buffers are passed up to the kernel until the port is
+         * brought up properly (ifconfig up)
+         */
+        ixEthAccPortTxDoneCallbackRegister(pdev->id,
+                                           tx_done_disable_cb,
+                                           (UINT32)ndev);
+        ixEthAccPortRxCallbackRegister(pdev->id, 
+                                       rx_disable_cb, 
+                                       (UINT32)ndev);
+        port_enable(ndev);
+    } else {
+        dev_set_drvdata(dev, NULL);
+        kfree(ndev);
+    }
+out:
+    TRACE;
+    return res;
+}
+ 
+static int __devexit npe_eth_fini_device(struct device *dev)
+{
+    struct net_device *ndev = dev_get_drvdata(dev);
+    TRACE;
+    dev_set_drvdata(dev, NULL);
+    unregister_netdev(ndev);
+    kfree(ndev);
     return 0;
 }
 
@@ -2928,6 +3209,28 @@
 
 #ifdef MODULE
 
+#define MODULE_NAME "ixp400_eth"
+
+static struct device_driver npe_eth_driver = {
+    .name       = MODULE_NAME,
+    .bus        = &platform_bus_type,
+    .probe      = npe_eth_init_device,
+    .remove     = npe_eth_fini_device,
+};
+ 
+static struct platform_device npe_eth_devs[] = {
+    {
+        .name   = MODULE_NAME,
+        .id     = IX_ETH_PORT_1,
+    },
+    {
+        .name   = MODULE_NAME,
+        .id     = IX_ETH_PORT_2,
+    }
+};
+ 
+
+
 static struct net_device ixp400_devices[IX_ETH_ACC_NUMBER_OF_PORTS];
 
 int init_module(void)
@@ -2935,11 +3238,11 @@
     int res, dev_count;
     IxEthAccPortId portId;
     struct net_device *dev;
-
+    int i;
     TRACE;
 
-    P_INFO("Initializing IXP400 NPE Ethernet driver software v. " MODULE_VERSION " \n");
-
+    P_INFO("Initializing IXP400 NPE Ethernet driver software v. LE  \n");
+    ixOsalLogLevelSet(IX_OSAL_LOG_LVL_ALL);
     TRACE;
 
     /* check module parameter range */
@@ -2951,6 +3254,16 @@
 
     TRACE;
 
+    /* XXX do this very early */
+    /* initialise the DB Maintenance task mutex */
+    maintenance_mutex = (struct semaphore *) kmalloc(sizeof(struct semaphore), GFP_KERNEL);
+    if (!maintenance_mutex)
+	return -ENOMEM;
+
+    init_MUTEX(maintenance_mutex);
+
+    TRACE;
+
 #ifndef DEBUG
     /* check module parameter range */
     if (log_level >= 2)  /* module parameter */
@@ -3015,6 +3328,13 @@
     /* Initialise the NPEs and access layer */
     TRACE;
 
+    for (i = 0; i < IX_ETH_ACC_NUMBER_OF_PORTS; i++)
+    {
+	if ((res = npe_start(i)))
+	    return res;
+        TRACE;
+    }
+
     if ((res = ethacc_init()))
 	return res;
 
@@ -3026,6 +3346,13 @@
 
     TRACE;
 
+    npe_eth_workqueue = create_workqueue(MODULE_NAME);
+    if (npe_eth_workqueue == NULL)
+	return -ENOMEM;
+
+    TRACE;
+
+#if 0
     /* Initialise the driver structure */
     for (dev_count = 0; 
 	 dev_count < dev_max_count;  /* module parameter */
@@ -3038,7 +3365,7 @@
 	dev->init = dev_eth_probe;
 
         TRACE;
-
+    }
 	if ((res = register_netdev(dev)))
 	{
 	    TRACE;
@@ -3068,6 +3395,35 @@
 	    return convert_error_ethAcc(res);
 	}
     }
+#endif
+    /* set the softirq rx queue thresholds 
+     * (These numbers are based on tuning experiments)
+     * maxbacklog =  (netdev_max_backlog * 10) / 63;
+    */
+    if (netdev_max_backlog == 0)
+    {
+	netdev_max_backlog = 290; /* system default */
+    }
+    netdev_max_backlog /= BACKLOG_TUNE;
+
+    TRACE;
+
+    res = driver_register(&npe_eth_driver);
+    if (res != 0) {
+        P_ERROR("Failed to register NPE EThernet driver (res = %d)\n", res);
+        return res;
+    }
+ 
+    res = platform_device_register(&npe_eth_devs[0]);
+    if (res != 0) {
+        P_ERROR("Failed to register NPE platform device 0 (res = %d)\n", res);
+        return res;
+    }
+    res = platform_device_register(&npe_eth_devs[1]);
+    if (res != 0) {
+        P_ERROR("Failed to register NPE platform device 1 (res = %d)\n", res);
+        return res;
+    }
 
     TRACE;
 
@@ -3104,33 +3460,13 @@
     }
 
     TRACE;
-
-    /* initialise the DB Maintenance task mutex */
-    maintenance_mutex = (struct semaphore *) kmalloc(sizeof(struct semaphore), GFP_KERNEL);
-    if (!maintenance_mutex)
-	return -ENOMEM;
-
-    init_MUTEX(maintenance_mutex);
-
-    TRACE;
-
+TRACE;
     /* Do not start the EthDB maintenance thread if learning & filtering feature is disabled */
     if (npe_learning) /* module parameter */
     {
-        maintenance_timer_set();
-    }
-
-    TRACE;
-
-    /* set the softirq rx queue thresholds 
-     * (These numbers are based on tuning experiments)
-     * maxbacklog =  (netdev_max_backlog * 10) / 63;
-    */
-    if (netdev_max_backlog == 0)
-    {
-	netdev_max_backlog = 290; /* system default */
+      schedule_db_maintenance();
+      //        maintenance_timer_set();
     }
-    netdev_max_backlog /= BACKLOG_TUNE;
 
     TRACE;
 
@@ -3175,7 +3511,8 @@
     TRACE;
 
     /* stop the maintenance timer */
-    maintenance_timer_clear();
+    //    maintenance_timer_clear();
+    cancel_db_maintenance();
 
     TRACE;