Makefile | 2 arch/mips/Kconfig | 28 arch/mips/Makefile | 4 arch/mips/au1000/common/clocks.c | 1 arch/mips/au1000/common/cputable.c | 36 arch/mips/au1000/common/dbdma.c | 34 arch/mips/au1000/common/gpio.c | 6 arch/mips/au1000/common/platform.c | 14 arch/mips/au1000/common/setup.c | 13 arch/mips/au1000/common/time.c | 24 arch/mips/fw/arc/identify.c | 5 arch/mips/kernel/Makefile | 3 arch/mips/kernel/asm-offsets.c | 108 + arch/mips/kernel/cevt-r4k.c | 173 - arch/mips/kernel/cevt-smtc.c | 321 +++ arch/mips/kernel/cevt-txx9.c | 3 arch/mips/kernel/cpu-bugs64.c | 2 arch/mips/kernel/cpu-probe.c | 36 arch/mips/kernel/csrc-r4k.c | 9 arch/mips/kernel/entry.S | 10 arch/mips/kernel/gdb-low.S | 24 arch/mips/kernel/gdb-stub.c | 3 arch/mips/kernel/genex.S | 41 arch/mips/kernel/head.S | 4 arch/mips/kernel/i8253.c | 1 arch/mips/kernel/irixelf.c | 11 arch/mips/kernel/irq-rm7000.c | 1 arch/mips/kernel/irq-rm9000.c | 1 arch/mips/kernel/kspd.c | 5 arch/mips/kernel/mips-mt-fpaff.c | 4 arch/mips/kernel/proc.c | 2 arch/mips/kernel/process.c | 19 arch/mips/kernel/ptrace.c | 2 arch/mips/kernel/rtlx.c | 65 arch/mips/kernel/scall32-o32.S | 15 arch/mips/kernel/scall64-64.S | 2 arch/mips/kernel/scall64-n32.S | 2 arch/mips/kernel/scall64-o32.S | 16 arch/mips/kernel/setup.c | 5 arch/mips/kernel/smp.c | 6 arch/mips/kernel/smtc.c | 257 +- arch/mips/kernel/sysirix.c | 2 arch/mips/kernel/time.c | 2 arch/mips/kernel/traps.c | 124 - arch/mips/kernel/unaligned.c | 8 arch/mips/kernel/vpe.c | 46 arch/mips/lasat/interrupt.c | 2 arch/mips/lasat/lasat_board.c | 13 arch/mips/lasat/sysctl.c | 172 - arch/mips/lasat/sysctl.h | 24 arch/mips/lib/csum_partial.S | 21 arch/mips/lib/ucmpdi2.c | 2 arch/mips/lib/uncached.c | 2 arch/mips/math-emu/cp1emu.c | 8 arch/mips/mips-boards/generic/time.c | 2 arch/mips/mips-boards/malta/Makefile | 3 arch/mips/mips-boards/malta/malta_smtc.c | 12 arch/mips/mipssim/sim_time.c | 2 arch/mips/mm/c-r3k.c | 8 arch/mips/mm/c-r4k.c | 60 arch/mips/mm/c-tx39.c | 17 arch/mips/mm/cache.c | 23 arch/mips/mm/cex-sb1.S | 8 arch/mips/mm/dma-default.c | 5 arch/mips/mm/init.c | 9 arch/mips/mm/pg-r4k.c | 22 arch/mips/mm/pg-sb1.c | 18 arch/mips/mm/sc-ip22.c | 2 arch/mips/mm/sc-mips.c | 3 arch/mips/mm/sc-r5k.c | 2 arch/mips/mm/sc-rm7k.c | 6 arch/mips/mm/tlb-r3k.c | 6 arch/mips/mm/tlb-r4k.c | 15 arch/mips/mm/tlb-r8k.c | 4 arch/mips/mm/tlbex.c | 160 - arch/mips/pci/pci-bcm1480.c | 11 arch/mips/pci/pci-bcm1480ht.c | 21 arch/mips/pci/pci-ip27.c | 42 arch/mips/pci/pci.c | 9 arch/mips/pmc-sierra/yosemite/smp.c | 6 arch/mips/sgi-ip22/ip22-int.c | 8 arch/mips/sgi-ip22/ip22-platform.c | 2 arch/mips/sgi-ip27/ip27-init.c | 2 arch/mips/sgi-ip27/ip27-timer.c | 6 arch/mips/sgi-ip27/ip27-xtalk.c | 6 arch/mips/sgi-ip32/ip32-irq.c | 5 arch/mips/sibyte/bcm1480/irq.c | 1 arch/mips/sibyte/sb1250/irq.c | 1 arch/mips/vr41xx/common/irq.c | 6 drivers/net/Kconfig | 7 drivers/net/Makefile | 2 drivers/net/titan_ge.c | 2069 +++++++++++++++++++++ drivers/net/titan_ge.h | 415 ++++ drivers/net/titan_mdio.c | 217 ++ drivers/net/titan_mdio.h | 56 drivers/scsi/NCR53C9x.h | 7 include/asm-mips/asmmacro.h | 10 include/asm-mips/atomic.h | 4 include/asm-mips/byteorder.h | 5 include/asm-mips/cacheflush.h | 20 include/asm-mips/cevt-r4k.h | 46 include/asm-mips/elf.h | 2 include/asm-mips/gdb-stub.h | 149 - include/asm-mips/irqflags.h | 26 include/asm-mips/lasat/serial.h | 4 include/asm-mips/mach-au1x00/au1000.h | 1 include/asm-mips/mach-ip27/cpu-feature-overrides.h | 4 include/asm-mips/mach-ip27/dma-coherence.h | 2 include/asm-mips/mach-jmr3927/ioremap.h | 2 include/asm-mips/mach-lasat/irq.h | 2 include/asm-mips/mach-pb1x00/pb1200.h | 2 include/asm-mips/mach-qemu/cpu-feature-overrides.h | 2 include/asm-mips/mipsregs.h | 6 include/asm-mips/pgtable-32.h | 2 include/asm-mips/pgtable.h | 3 include/asm-mips/rtlx.h | 6 include/asm-mips/smtc.h | 8 include/asm-mips/sn/mapped_kernel.h | 8 include/asm-mips/stackframe.h | 72 include/asm-mips/time.h | 7 120 files changed, 4344 insertions(+), 1079 deletions(-) diff -Nurd linux-2.6.24/arch/mips/au1000/common/clocks.c mer-smartq-kernel/arch/mips/au1000/common/clocks.c --- linux-2.6.24/arch/mips/au1000/common/clocks.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/au1000/common/clocks.c 2009-11-17 12:13:28.000000000 +0100 @@ -46,6 +46,7 @@ { return au1x00_clock; } +EXPORT_SYMBOL(get_au1x00_speed); diff -Nurd linux-2.6.24/arch/mips/au1000/common/cputable.c mer-smartq-kernel/arch/mips/au1000/common/cputable.c --- linux-2.6.24/arch/mips/au1000/common/cputable.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/au1000/common/cputable.c 2009-11-17 12:13:28.000000000 +0100 @@ -22,24 +22,24 @@ /* With some thought, we can probably use the mask to reduce the * size of the table. */ -struct cpu_spec cpu_specs[] = { - { 0xffffffff, 0x00030100, "Au1000 DA", 1, 0 }, - { 0xffffffff, 0x00030201, "Au1000 HA", 1, 0 }, - { 0xffffffff, 0x00030202, "Au1000 HB", 1, 0 }, - { 0xffffffff, 0x00030203, "Au1000 HC", 1, 1 }, - { 0xffffffff, 0x00030204, "Au1000 HD", 1, 1 }, - { 0xffffffff, 0x01030200, "Au1500 AB", 1, 1 }, - { 0xffffffff, 0x01030201, "Au1500 AC", 0, 1 }, - { 0xffffffff, 0x01030202, "Au1500 AD", 0, 1 }, - { 0xffffffff, 0x02030200, "Au1100 AB", 1, 1 }, - { 0xffffffff, 0x02030201, "Au1100 BA", 1, 1 }, - { 0xffffffff, 0x02030202, "Au1100 BC", 1, 1 }, - { 0xffffffff, 0x02030203, "Au1100 BD", 0, 1 }, - { 0xffffffff, 0x02030204, "Au1100 BE", 0, 1 }, - { 0xffffffff, 0x03030200, "Au1550 AA", 0, 1 }, - { 0xffffffff, 0x04030200, "Au1200 AB", 0, 0 }, - { 0xffffffff, 0x04030201, "Au1200 AC", 1, 0 }, - { 0x00000000, 0x00000000, "Unknown Au1xxx", 1, 0 }, +struct cpu_spec cpu_specs[] = { + { 0xffffffff, 0x00030100, "Au1000 DA", 1, 0, 1 }, + { 0xffffffff, 0x00030201, "Au1000 HA", 1, 0, 1 }, + { 0xffffffff, 0x00030202, "Au1000 HB", 1, 0, 1 }, + { 0xffffffff, 0x00030203, "Au1000 HC", 1, 1, 0 }, + { 0xffffffff, 0x00030204, "Au1000 HD", 1, 1, 0 }, + { 0xffffffff, 0x01030200, "Au1500 AB", 1, 1, 0 }, + { 0xffffffff, 0x01030201, "Au1500 AC", 0, 1, 0 }, + { 0xffffffff, 0x01030202, "Au1500 AD", 0, 1, 0 }, + { 0xffffffff, 0x02030200, "Au1100 AB", 1, 1, 0 }, + { 0xffffffff, 0x02030201, "Au1100 BA", 1, 1, 0 }, + { 0xffffffff, 0x02030202, "Au1100 BC", 1, 1, 0 }, + { 0xffffffff, 0x02030203, "Au1100 BD", 0, 1, 0 }, + { 0xffffffff, 0x02030204, "Au1100 BE", 0, 1, 0 }, + { 0xffffffff, 0x03030200, "Au1550 AA", 0, 1, 0 }, + { 0xffffffff, 0x04030200, "Au1200 AB", 0, 0, 0 }, + { 0xffffffff, 0x04030201, "Au1200 AC", 1, 0, 0 }, + { 0x00000000, 0x00000000, "Unknown Au1xxx", 1, 0, 0 } }; void diff -Nurd linux-2.6.24/arch/mips/au1000/common/dbdma.c mer-smartq-kernel/arch/mips/au1000/common/dbdma.c --- linux-2.6.24/arch/mips/au1000/common/dbdma.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/au1000/common/dbdma.c 2009-11-17 12:13:28.000000000 +0100 @@ -161,22 +161,22 @@ { DSCR_CMD0_ALWAYS, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, /* Provide 16 user definable device types */ - { 0, 0, 0, 0, 0, 0, 0 }, - { 0, 0, 0, 0, 0, 0, 0 }, - { 0, 0, 0, 0, 0, 0, 0 }, - { 0, 0, 0, 0, 0, 0, 0 }, - { 0, 0, 0, 0, 0, 0, 0 }, - { 0, 0, 0, 0, 0, 0, 0 }, - { 0, 0, 0, 0, 0, 0, 0 }, - { 0, 0, 0, 0, 0, 0, 0 }, - { 0, 0, 0, 0, 0, 0, 0 }, - { 0, 0, 0, 0, 0, 0, 0 }, - { 0, 0, 0, 0, 0, 0, 0 }, - { 0, 0, 0, 0, 0, 0, 0 }, - { 0, 0, 0, 0, 0, 0, 0 }, - { 0, 0, 0, 0, 0, 0, 0 }, - { 0, 0, 0, 0, 0, 0, 0 }, - { 0, 0, 0, 0, 0, 0, 0 }, + { ~0, 0, 0, 0, 0, 0, 0 }, + { ~0, 0, 0, 0, 0, 0, 0 }, + { ~0, 0, 0, 0, 0, 0, 0 }, + { ~0, 0, 0, 0, 0, 0, 0 }, + { ~0, 0, 0, 0, 0, 0, 0 }, + { ~0, 0, 0, 0, 0, 0, 0 }, + { ~0, 0, 0, 0, 0, 0, 0 }, + { ~0, 0, 0, 0, 0, 0, 0 }, + { ~0, 0, 0, 0, 0, 0, 0 }, + { ~0, 0, 0, 0, 0, 0, 0 }, + { ~0, 0, 0, 0, 0, 0, 0 }, + { ~0, 0, 0, 0, 0, 0, 0 }, + { ~0, 0, 0, 0, 0, 0, 0 }, + { ~0, 0, 0, 0, 0, 0, 0 }, + { ~0, 0, 0, 0, 0, 0, 0 }, + { ~0, 0, 0, 0, 0, 0, 0 }, }; #define DBDEV_TAB_SIZE (sizeof(dbdev_tab) / sizeof(dbdev_tab_t)) @@ -209,7 +209,7 @@ dbdev_tab_t *p=NULL; static u16 new_id=0x1000; - p = find_dbdev_id(0); + p = find_dbdev_id(~0); if ( NULL != p ) { memcpy(p, dev, sizeof(dbdev_tab_t)); diff -Nurd linux-2.6.24/arch/mips/au1000/common/gpio.c mer-smartq-kernel/arch/mips/au1000/common/gpio.c --- linux-2.6.24/arch/mips/au1000/common/gpio.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/au1000/common/gpio.c 2009-11-17 12:13:28.000000000 +0100 @@ -54,7 +54,7 @@ { gpio -= AU1XXX_GPIO_BASE; - gpio2->output = (GPIO2_OUTPUT_ENABLE_MASK << gpio) | (value << gpio); + gpio2->output = (GPIO2_OUTPUT_ENABLE_MASK << gpio) | ((!!value) << gpio); } static int au1xxx_gpio2_direction_input(unsigned gpio) @@ -67,7 +67,8 @@ static int au1xxx_gpio2_direction_output(unsigned gpio, int value) { gpio -= AU1XXX_GPIO_BASE; - gpio2->dir = (0x01 << gpio) | (value << gpio); + gpio2->dir |= 0x01 << gpio; + gpio2->output = (GPIO2_OUTPUT_ENABLE_MASK << gpio) | ((!!value) << gpio); return 0; } @@ -96,6 +97,7 @@ static int au1xxx_gpio1_direction_output(unsigned gpio, int value) { gpio1->trioutclr = (0x01 & gpio); + au1xxx_gpio1_write(gpio, value); return 0; } diff -Nurd linux-2.6.24/arch/mips/au1000/common/platform.c mer-smartq-kernel/arch/mips/au1000/common/platform.c --- linux-2.6.24/arch/mips/au1000/common/platform.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/au1000/common/platform.c 2009-11-17 12:13:28.000000000 +0100 @@ -118,12 +118,12 @@ static struct resource au1xxx_mmc_resources[] = { [0] = { .start = SD0_PHYS_ADDR, - .end = SD0_PHYS_ADDR + 0x40, + .end = SD0_PHYS_ADDR + 0x7ffff, .flags = IORESOURCE_MEM, }, [1] = { .start = SD1_PHYS_ADDR, - .end = SD1_PHYS_ADDR + 0x40, + .end = SD1_PHYS_ADDR + 0x7ffff, .flags = IORESOURCE_MEM, }, [2] = { @@ -245,13 +245,12 @@ .id = 0, }; -#ifdef CONFIG_MIPS_DB1200 - +#if defined(CONFIG_MIPS_DB1200) || defined(CONFIG_MIPS_PB1200) static struct resource smc91x_resources[] = { [0] = { .name = "smc91x-regs", .start = AU1XXX_SMC91111_PHYS_ADDR, - .end = AU1XXX_SMC91111_PHYS_ADDR + 0xfffff, + .end = AU1XXX_SMC91111_PHYS_ADDR + 0xf, .flags = IORESOURCE_MEM, }, [1] = { @@ -267,8 +266,7 @@ .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, }; - -#endif +#endif /* defined(CONFIG_MIPS_DB1200) || defined(CONFIG_MIPS_PB1200) */ static struct platform_device *au1xxx_platform_devices[] __initdata = { &au1xxx_usb_ohci_device, @@ -284,7 +282,7 @@ &au1200_ide0_device, &au1xxx_mmc_device, #endif -#ifdef CONFIG_MIPS_DB1200 +#if defined(CONFIG_MIPS_DB1200) || defined(CONFIG_MIPS_PB1200) &smc91x_device, #endif }; diff -Nurd linux-2.6.24/arch/mips/au1000/common/setup.c mer-smartq-kernel/arch/mips/au1000/common/setup.c --- linux-2.6.24/arch/mips/au1000/common/setup.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/au1000/common/setup.c 2009-11-17 12:13:28.000000000 +0100 @@ -57,7 +57,7 @@ { struct cpu_spec *sp; char *argptr; - unsigned long prid, cpupll, bclk = 1; + unsigned long prid, cpufreq, bclk = 1; set_cpuspec(); sp = cur_cpu_spec[0]; @@ -65,8 +65,15 @@ board_setup(); /* board specific setup */ prid = read_c0_prid(); - cpupll = (au_readl(0xB1900060) & 0x3F) * 12; - printk("(PRId %08lx) @ %ldMHZ\n", prid, cpupll); + if (sp->cpu_pll_wo) +#ifdef CONFIG_SOC_AU1000_FREQUENCY + cpufreq = CONFIG_SOC_AU1000_FREQUENCY / 1000000; +#else + cpufreq = 396; +#endif + else + cpufreq = (au_readl(SYS_CPUPLL) & 0x3F) * 12; + printk(KERN_INFO "(PRID %08lx) @ %ld MHz\n", prid, cpufreq); bclk = sp->cpu_bclk; if (bclk) diff -Nurd linux-2.6.24/arch/mips/au1000/common/time.c mer-smartq-kernel/arch/mips/au1000/common/time.c --- linux-2.6.24/arch/mips/au1000/common/time.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/au1000/common/time.c 2009-11-17 12:13:28.000000000 +0100 @@ -209,18 +209,22 @@ while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S); au_writel(0, SYS_TOYWRITE); while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S); + } else + no_au1xxx_32khz = 1; - cpu_speed = (au_readl(SYS_CPUPLL) & 0x0000003f) * - AU1000_SRC_CLK; - } - else { - /* The 32KHz oscillator isn't running, so assume there - * isn't one and grab the processor speed from the PLL. - * NOTE: some old silicon doesn't allow reading the PLL. - */ + /* + * On early Au1000, sys_cpupll was write-only. Since these + * silicon versions of Au1000 are not sold by AMD, we don't bend + * over backwards trying to determine the frequency. + */ + if (cur_cpu_spec[0]->cpu_pll_wo) +#ifdef CONFIG_SOC_AU1000_FREQUENCY + cpu_speed = CONFIG_SOC_AU1000_FREQUENCY; +#else + cpu_speed = 396000000; +#endif + else cpu_speed = (au_readl(SYS_CPUPLL) & 0x0000003f) * AU1000_SRC_CLK; - no_au1xxx_32khz = 1; - } mips_hpt_frequency = cpu_speed; // Equation: Baudrate = CPU / (SD * 2 * CLKDIV * 16) set_au1x00_uart_baud_base(cpu_speed / (2 * ((int)(au_readl(SYS_POWERCTRL)&0x03) + 2) * 16)); diff -Nurd linux-2.6.24/arch/mips/fw/arc/identify.c mer-smartq-kernel/arch/mips/fw/arc/identify.c --- linux-2.6.24/arch/mips/fw/arc/identify.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/fw/arc/identify.c 2009-11-17 12:13:29.000000000 +0100 @@ -67,6 +67,11 @@ .liname = "SNI RM200_PCI", .type = MACH_SNI_RM200_PCI, .flags = PROM_FLAG_DONT_FREE_TEMP, + }, { + .arcname = "RM200PCI-R5K", + .liname = "SNI RM200_PCI-R5K", + .type = MACH_SNI_RM200_PCI, + .flags = PROM_FLAG_DONT_FREE_TEMP, } }; diff -Nurd linux-2.6.24/arch/mips/Kconfig mer-smartq-kernel/arch/mips/Kconfig --- linux-2.6.24/arch/mips/Kconfig 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/Kconfig 2009-11-17 12:13:28.000000000 +0100 @@ -1453,7 +1453,6 @@ depends on CPU_MIPS32_R2 #depends on CPU_MIPS64_R2 # once there is hardware ... depends on SYS_SUPPORTS_MULTITHREADING - select GENERIC_CLOCKEVENTS_BROADCAST select CPU_MIPSR2_IRQ_VI select CPU_MIPSR2_IRQ_EI select MIPS_MT @@ -1487,32 +1486,17 @@ Includes a loader for loading an elf relocatable object onto another VPE and running it. -config MIPS_MT_SMTC_INSTANT_REPLAY - bool "Low-latency Dispatch of Deferred SMTC IPIs" - depends on MIPS_MT_SMTC && !PREEMPT - default y - help - SMTC pseudo-interrupts between TCs are deferred and queued - if the target TC is interrupt-inhibited (IXMT). In the first - SMTC prototypes, these queued IPIs were serviced on return - to user mode, or on entry into the kernel idle loop. The - INSTANT_REPLAY option dispatches them as part of local_irq_restore() - processing, which adds runtime overhead (hence the option to turn - it off), but ensures that IPIs are handled promptly even under - heavy I/O interrupt load. - config MIPS_MT_SMTC_IM_BACKSTOP bool "Use per-TC register bits as backstop for inhibited IM bits" depends on MIPS_MT_SMTC - default y + default n help To support multiple TC microthreads acting as "CPUs" within a VPE, VPE-wide interrupt mask bits must be specially manipulated during interrupt handling. To support legacy drivers and interrupt controller management code, SMTC has a "backstop" to track and if necessary restore the interrupt mask. This has some performance - impact on interrupt service overhead. Disable it only if you know - what you are doing. + impact on interrupt service overhead. config MIPS_MT_SMTC_IRQAFF bool "Support IRQ affinity API" @@ -1522,10 +1506,8 @@ Enables SMP IRQ affinity API (/proc/irq/*/smp_affinity, etc.) for SMTC Linux kernel. Requires platform support, of which an example can be found in the MIPS kernel i8259 and Malta - platform code. It is recommended that MIPS_MT_SMTC_INSTANT_REPLAY - be enabled if MIPS_MT_SMTC_IRQAFF is used. Adds overhead to - interrupt dispatch, and should be used only if you know what - you are doing. + platform code. Adds some overhead to interrupt dispatch, and + should be used only if you know what you are doing. config MIPS_VPE_LOADER_TOM bool "Load VPE program into memory hidden from linux" @@ -1781,7 +1763,7 @@ Allows the configuration of the timer frequency. config HZ_48 - bool "48 HZ" if SYS_SUPPORTS_48HZ + bool "48 HZ" if SYS_SUPPORTS_48HZ || SYS_SUPPORTS_ARBIT_HZ config HZ_100 bool "100 HZ" if SYS_SUPPORTS_100HZ || SYS_SUPPORTS_ARBIT_HZ diff -Nurd linux-2.6.24/arch/mips/kernel/asm-offsets.c mer-smartq-kernel/arch/mips/kernel/asm-offsets.c --- linux-2.6.24/arch/mips/kernel/asm-offsets.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/kernel/asm-offsets.c 2009-11-17 12:13:29.000000000 +0100 @@ -13,7 +13,7 @@ #include #include #include - +#include #include #include @@ -337,3 +337,109 @@ size("#define IC_IRQ_CPUSTAT_T ", irq_cpustat_t); linefeed; } + +void output_gdbreg_defines(void) +{ + text("/* MIPS struct gdb_regs offsets. */"); + offset("#define GDB_FR_REG0 ", struct gdb_regs, reg0); + offset("#define GDB_FR_REG1 ", struct gdb_regs, reg1); + offset("#define GDB_FR_REG2 ", struct gdb_regs, reg2); + offset("#define GDB_FR_REG3 ", struct gdb_regs, reg3); + offset("#define GDB_FR_REG4 ", struct gdb_regs, reg4); + offset("#define GDB_FR_REG5 ", struct gdb_regs, reg5); + offset("#define GDB_FR_REG6 ", struct gdb_regs, reg6); + offset("#define GDB_FR_REG7 ", struct gdb_regs, reg7); + offset("#define GDB_FR_REG8 ", struct gdb_regs, reg8); + offset("#define GDB_FR_REG9 ", struct gdb_regs, reg9); + offset("#define GDB_FR_REG10 ", struct gdb_regs, reg10); + offset("#define GDB_FR_REG11 ", struct gdb_regs, reg11); + offset("#define GDB_FR_REG12 ", struct gdb_regs, reg12); + offset("#define GDB_FR_REG13 ", struct gdb_regs, reg13); + offset("#define GDB_FR_REG14 ", struct gdb_regs, reg14); + offset("#define GDB_FR_REG15 ", struct gdb_regs, reg15); + offset("#define GDB_FR_REG16 ", struct gdb_regs, reg16); + offset("#define GDB_FR_REG17 ", struct gdb_regs, reg17); + offset("#define GDB_FR_REG18 ", struct gdb_regs, reg18); + offset("#define GDB_FR_REG19 ", struct gdb_regs, reg19); + offset("#define GDB_FR_REG20 ", struct gdb_regs, reg20); + offset("#define GDB_FR_REG21 ", struct gdb_regs, reg21); + offset("#define GDB_FR_REG22 ", struct gdb_regs, reg22); + offset("#define GDB_FR_REG23 ", struct gdb_regs, reg23); + offset("#define GDB_FR_REG24 ", struct gdb_regs, reg24); + offset("#define GDB_FR_REG25 ", struct gdb_regs, reg25); + offset("#define GDB_FR_REG26 ", struct gdb_regs, reg26); + offset("#define GDB_FR_REG27 ", struct gdb_regs, reg27); + offset("#define GDB_FR_REG28 ", struct gdb_regs, reg28); + offset("#define GDB_FR_REG29 ", struct gdb_regs, reg29); + offset("#define GDB_FR_REG30 ", struct gdb_regs, reg30); + offset("#define GDB_FR_REG31 ", struct gdb_regs, reg31); + linefeed; + + offset("#define GDB_FR_STATUS ", struct gdb_regs, cp0_status); + offset("#define GDB_FR_HI ", struct gdb_regs, hi); + offset("#define GDB_FR_LO ", struct gdb_regs, lo); +#ifdef CONFIG_CPU_HAS_SMARTMIPS + offset("#define GDB_FR_ACX ", struct gdb_regs, acx); +#endif + offset("#define GDB_FR_BADVADDR ", struct gdb_regs, cp0_badvaddr); + offset("#define GDB_FR_CAUSE ", struct gdb_regs, cp0_cause); + offset("#define GDB_FR_EPC ", struct gdb_regs, cp0_epc); + linefeed; + + offset("#define GDB_FR_FPR0 ", struct gdb_regs, fpr0); + offset("#define GDB_FR_FPR1 ", struct gdb_regs, fpr1); + offset("#define GDB_FR_FPR2 ", struct gdb_regs, fpr2); + offset("#define GDB_FR_FPR3 ", struct gdb_regs, fpr3); + offset("#define GDB_FR_FPR4 ", struct gdb_regs, fpr4); + offset("#define GDB_FR_FPR5 ", struct gdb_regs, fpr5); + offset("#define GDB_FR_FPR6 ", struct gdb_regs, fpr6); + offset("#define GDB_FR_FPR7 ", struct gdb_regs, fpr7); + offset("#define GDB_FR_FPR8 ", struct gdb_regs, fpr8); + offset("#define GDB_FR_FPR9 ", struct gdb_regs, fpr9); + offset("#define GDB_FR_FPR10 ", struct gdb_regs, fpr10); + offset("#define GDB_FR_FPR11 ", struct gdb_regs, fpr11); + offset("#define GDB_FR_FPR12 ", struct gdb_regs, fpr12); + offset("#define GDB_FR_FPR13 ", struct gdb_regs, fpr13); + offset("#define GDB_FR_FPR14 ", struct gdb_regs, fpr14); + offset("#define GDB_FR_FPR15 ", struct gdb_regs, fpr15); + offset("#define GDB_FR_FPR16 ", struct gdb_regs, fpr16); + offset("#define GDB_FR_FPR17 ", struct gdb_regs, fpr17); + offset("#define GDB_FR_FPR18 ", struct gdb_regs, fpr18); + offset("#define GDB_FR_FPR19 ", struct gdb_regs, fpr19); + offset("#define GDB_FR_FPR20 ", struct gdb_regs, fpr20); + offset("#define GDB_FR_FPR21 ", struct gdb_regs, fpr21); + offset("#define GDB_FR_FPR22 ", struct gdb_regs, fpr22); + offset("#define GDB_FR_FPR23 ", struct gdb_regs, fpr23); + offset("#define GDB_FR_FPR24 ", struct gdb_regs, fpr24); + offset("#define GDB_FR_FPR25 ", struct gdb_regs, fpr25); + offset("#define GDB_FR_FPR26 ", struct gdb_regs, fpr26); + offset("#define GDB_FR_FPR27 ", struct gdb_regs, fpr27); + offset("#define GDB_FR_FPR28 ", struct gdb_regs, fpr28); + offset("#define GDB_FR_FPR29 ", struct gdb_regs, fpr29); + offset("#define GDB_FR_FPR30 ", struct gdb_regs, fpr30); + offset("#define GDB_FR_FPR31 ", struct gdb_regs, fpr31); + linefeed; + + offset("#define GDB_FR_FSR ", struct gdb_regs, cp1_fsr); + offset("#define GDB_FR_FIR ", struct gdb_regs, cp1_fir); + offset("#define GDB_FR_FRP ", struct gdb_regs, frame_ptr); + offset("#define GDB_FR_DUMMY ", struct gdb_regs, dummy); + + offset("#define GDB_FR_CP0_INDEX ", struct gdb_regs, cp0_index); + offset("#define GDB_FR_CP0_RANDOM ", struct gdb_regs, cp0_random); + offset("#define GDB_FR_CP0_ENTRYLO0 ", struct gdb_regs, cp0_entrylo0); + offset("#define GDB_FR_CP0_ENTRYLO1 ", struct gdb_regs, cp0_entrylo1); + offset("#define GDB_FR_CP0_CONTEXT ", struct gdb_regs, cp0_context); + offset("#define GDB_FR_CP0_PAGEMASK ", struct gdb_regs, cp0_pagemask); + offset("#define GDB_FR_CP0_WIRED ", struct gdb_regs, cp0_wired); + offset("#define GDB_FR_CP0_REG7 ", struct gdb_regs, cp0_reg7); + offset("#define GDB_FR_CP0_REG8 ", struct gdb_regs, cp0_reg8); + offset("#define GDB_FR_CP0_REG9 ", struct gdb_regs, cp0_reg9); + offset("#define GDB_FR_CP0_ENTRYHI ", struct gdb_regs, cp0_entryhi); + offset("#define GDB_FR_CP0_REG11 ", struct gdb_regs, cp0_reg11); + offset("#define GDB_FR_CP0_REG12 ", struct gdb_regs, cp0_reg12); + offset("#define GDB_FR_CP0_REG13 ", struct gdb_regs, cp0_reg13); + offset("#define GDB_FR_CP0_REG14 ", struct gdb_regs, cp0_reg14); + offset("#define GDB_FR_CP0_PRID ", struct gdb_regs, cp0_prid); + size("#define GDB_FR_SIZE ", struct gdb_regs); +} diff -Nurd linux-2.6.24/arch/mips/kernel/cevt-r4k.c mer-smartq-kernel/arch/mips/kernel/cevt-r4k.c --- linux-2.6.24/arch/mips/kernel/cevt-r4k.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/kernel/cevt-r4k.c 2009-11-17 12:13:29.000000000 +0100 @@ -12,6 +12,14 @@ #include #include +#include + +/* + * The SMTC Kernel for the 34K, 1004K, et. al. replaces several + * of these routines with SMTC-specific variants. + */ + +#ifndef CONFIG_MIPS_MT_SMTC static int mips_next_event(unsigned long delta, struct clock_event_device *evt) @@ -19,60 +27,27 @@ unsigned int cnt; int res; -#ifdef CONFIG_MIPS_MT_SMTC - { - unsigned long flags, vpflags; - local_irq_save(flags); - vpflags = dvpe(); -#endif cnt = read_c0_count(); cnt += delta; write_c0_compare(cnt); res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0; -#ifdef CONFIG_MIPS_MT_SMTC - evpe(vpflags); - local_irq_restore(flags); - } -#endif return res; } -static void mips_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) +#endif /* CONFIG_MIPS_MT_SMTC */ + +void mips_set_clock_mode(enum clock_event_mode mode, + struct clock_event_device *evt) { /* Nothing to do ... */ } -static DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); -static int cp0_timer_irq_installed; - -/* - * Timer ack for an R4k-compatible timer of a known frequency. - */ -static void c0_timer_ack(void) -{ - write_c0_compare(read_c0_compare()); -} +DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); +int cp0_timer_irq_installed; -/* - * Possibly handle a performance counter interrupt. - * Return true if the timer interrupt should not be checked - */ -static inline int handle_perf_irq(int r2) -{ - /* - * The performance counter overflow interrupt may be shared with the - * timer interrupt (cp0_perfcount_irq < 0). If it is and a - * performance counter has overflowed (perf_irq() == IRQ_HANDLED) - * and we can't reliably determine if a counter interrupt has also - * happened (!r2) then don't check for a timer interrupt. - */ - return (cp0_perfcount_irq < 0) && - perf_irq() == IRQ_HANDLED && - !r2; -} +#ifndef CONFIG_MIPS_MT_SMTC -static irqreturn_t c0_compare_interrupt(int irq, void *dev_id) +irqreturn_t c0_compare_interrupt(int irq, void *dev_id) { const int r2 = cpu_has_mips_r2; struct clock_event_device *cd; @@ -93,12 +68,8 @@ * interrupt. Being the paranoiacs we are we check anyway. */ if (!r2 || (read_c0_cause() & (1 << 30))) { - c0_timer_ack(); -#ifdef CONFIG_MIPS_MT_SMTC - if (cpu_data[cpu].vpe_id) - goto out; - cpu = 0; -#endif + /* Clear Count/Compare Interrupt */ + write_c0_compare(read_c0_compare()); cd = &per_cpu(mips_clockevent_device, cpu); cd->event_handler(cd); } @@ -107,65 +78,16 @@ return IRQ_HANDLED; } -static struct irqaction c0_compare_irqaction = { +#endif /* Not CONFIG_MIPS_MT_SMTC */ + +struct irqaction c0_compare_irqaction = { .handler = c0_compare_interrupt, -#ifdef CONFIG_MIPS_MT_SMTC - .flags = IRQF_DISABLED, -#else .flags = IRQF_DISABLED | IRQF_PERCPU, -#endif .name = "timer", }; -#ifdef CONFIG_MIPS_MT_SMTC -DEFINE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device); -static void smtc_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) -{ -} - -static void mips_broadcast(cpumask_t mask) -{ - unsigned int cpu; - - for_each_cpu_mask(cpu, mask) - smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0); -} - -static void setup_smtc_dummy_clockevent_device(void) -{ - //uint64_t mips_freq = mips_hpt_^frequency; - unsigned int cpu = smp_processor_id(); - struct clock_event_device *cd; - - cd = &per_cpu(smtc_dummy_clockevent_device, cpu); - - cd->name = "SMTC"; - cd->features = CLOCK_EVT_FEAT_DUMMY; - - /* Calculate the min / max delta */ - cd->mult = 0; //div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); - cd->shift = 0; //32; - cd->max_delta_ns = 0; //clockevent_delta2ns(0x7fffffff, cd); - cd->min_delta_ns = 0; //clockevent_delta2ns(0x30, cd); - - cd->rating = 200; - cd->irq = 17; //-1; -// if (cpu) -// cd->cpumask = CPU_MASK_ALL; // cpumask_of_cpu(cpu); -// else - cd->cpumask = cpumask_of_cpu(cpu); - - cd->set_mode = smtc_set_mode; - - cd->broadcast = mips_broadcast; - - clockevents_register_device(cd); -} -#endif - -static void mips_event_handler(struct clock_event_device *dev) +void mips_event_handler(struct clock_event_device *dev) { } @@ -177,7 +99,23 @@ return (read_c0_cause() >> cp0_compare_irq) & 0x100; } -static int c0_compare_int_usable(void) +/* + * Compare interrupt can be routed and latched outside the core, + * so a single execution hazard barrier may not be enough to give + * it time to clear as seen in the Cause register. 4 time the + * pipeline depth seems reasonably conservative, and empirically + * works better in configurations with high CPU/bus clock ratios. + */ + +#define compare_change_hazard() \ + do { \ + irq_disable_hazard(); \ + irq_disable_hazard(); \ + irq_disable_hazard(); \ + irq_disable_hazard(); \ + } while (0) + +int c0_compare_int_usable(void) { unsigned int delta; unsigned int cnt; @@ -187,7 +125,7 @@ */ if (c0_compare_int_pending()) { write_c0_compare(read_c0_count()); - irq_disable_hazard(); + compare_change_hazard(); if (c0_compare_int_pending()) return 0; } @@ -196,7 +134,7 @@ cnt = read_c0_count(); cnt += delta; write_c0_compare(cnt); - irq_disable_hazard(); + compare_change_hazard(); if ((int)(read_c0_count() - cnt) < 0) break; /* increase delta if the timer was already expired */ @@ -205,11 +143,12 @@ while ((int)(read_c0_count() - cnt) <= 0) ; /* Wait for expiry */ + compare_change_hazard(); if (!c0_compare_int_pending()) return 0; write_c0_compare(read_c0_count()); - irq_disable_hazard(); + compare_change_hazard(); if (c0_compare_int_pending()) return 0; @@ -219,6 +158,8 @@ return 1; } +#ifndef CONFIG_MIPS_MT_SMTC + int __cpuinit mips_clockevent_init(void) { uint64_t mips_freq = mips_hpt_frequency; @@ -229,17 +170,6 @@ if (!cpu_has_counter || !mips_hpt_frequency) return -ENXIO; -#ifdef CONFIG_MIPS_MT_SMTC - setup_smtc_dummy_clockevent_device(); - - /* - * On SMTC we only register VPE0's compare interrupt as clockevent - * device. - */ - if (cpu) - return 0; -#endif - if (!c0_compare_int_usable()) return -ENXIO; @@ -265,13 +195,9 @@ cd->rating = 300; cd->irq = irq; -#ifdef CONFIG_MIPS_MT_SMTC - cd->cpumask = CPU_MASK_ALL; -#else cd->cpumask = cpumask_of_cpu(cpu); -#endif cd->set_next_event = mips_next_event; - cd->set_mode = mips_set_mode; + cd->set_mode = mips_set_clock_mode; cd->event_handler = mips_event_handler; clockevents_register_device(cd); @@ -281,12 +207,9 @@ cp0_timer_irq_installed = 1; -#ifdef CONFIG_MIPS_MT_SMTC -#define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq) - setup_irq_smtc(irq, &c0_compare_irqaction, CPUCTR_IMASKBIT); -#else setup_irq(irq, &c0_compare_irqaction); -#endif return 0; } + +#endif /* Not CONFIG_MIPS_MT_SMTC */ diff -Nurd linux-2.6.24/arch/mips/kernel/cevt-smtc.c mer-smartq-kernel/arch/mips/kernel/cevt-smtc.c --- linux-2.6.24/arch/mips/kernel/cevt-smtc.c 1970-01-01 01:00:00.000000000 +0100 +++ mer-smartq-kernel/arch/mips/kernel/cevt-smtc.c 2009-11-17 12:13:29.000000000 +0100 @@ -0,0 +1,321 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2007 MIPS Technologies, Inc. + * Copyright (C) 2007 Ralf Baechle + * Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl + */ +#include +#include +#include + +#include +#include +#include + +/* + * Variant clock event timer support for SMTC on MIPS 34K, 1004K + * or other MIPS MT cores. + * + * Notes on SMTC Support: + * + * SMTC has multiple microthread TCs pretending to be Linux CPUs. + * But there's only one Count/Compare pair per VPE, and Compare + * interrupts are taken opportunisitically by available TCs + * bound to the VPE with the Count register. The new timer + * framework provides for global broadcasts, but we really + * want VPE-level multicasts for best behavior. So instead + * of invoking the high-level clock-event broadcast code, + * this version of SMTC support uses the historical SMTC + * multicast mechanisms "under the hood", appearing to the + * generic clock layer as if the interrupts are per-CPU. + * + * The approach taken here is to maintain a set of NR_CPUS + * virtual timers, and track which "CPU" needs to be alerted + * at each event. + * + * It's unlikely that we'll see a MIPS MT core with more than + * 2 VPEs, but we *know* that we won't need to handle more + * VPEs than we have "CPUs". So NCPUs arrays of NCPUs elements + * is always going to be overkill, but always going to be enough. + */ + +unsigned long smtc_nexttime[NR_CPUS][NR_CPUS]; +static int smtc_nextinvpe[NR_CPUS]; + +/* + * Timestamps stored are absolute values to be programmed + * into Count register. Valid timestamps will never be zero. + * If a Zero Count value is actually calculated, it is converted + * to be a 1, which will introduce 1 or two CPU cycles of error + * roughly once every four billion events, which at 1000 HZ means + * about once every 50 days. If that's actually a problem, one + * could alternate squashing 0 to 1 and to -1. + */ + +#define MAKEVALID(x) (((x) == 0L) ? 1L : (x)) +#define ISVALID(x) ((x) != 0L) + +/* + * Time comparison is subtle, as it's really truncated + * modular arithmetic. + */ + +#define IS_SOONER(a, b, reference) \ + (((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference))) + +/* + * CATCHUP_INCREMENT, used when the function falls behind the counter. + * Could be an increasing function instead of a constant; + */ + +#define CATCHUP_INCREMENT 64 + +static int mips_next_event(unsigned long delta, + struct clock_event_device *evt) +{ + unsigned long flags; + unsigned int mtflags; + unsigned long timestamp, reference, previous; + unsigned long nextcomp = 0L; + int vpe = current_cpu_data.vpe_id; + int cpu = smp_processor_id(); + local_irq_save(flags); + mtflags = dmt(); + + /* + * Maintain the per-TC virtual timer + * and program the per-VPE shared Count register + * as appropriate here... + */ + reference = (unsigned long)read_c0_count(); + timestamp = MAKEVALID(reference + delta); + /* + * To really model the clock, we have to catch the case + * where the current next-in-VPE timestamp is the old + * timestamp for the calling CPE, but the new value is + * in fact later. In that case, we have to do a full + * scan and discover the new next-in-VPE CPU id and + * timestamp. + */ + previous = smtc_nexttime[vpe][cpu]; + if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous) + && IS_SOONER(previous, timestamp, reference)) { + int i; + int soonest = cpu; + + /* + * Update timestamp array here, so that new + * value gets considered along with those of + * other virtual CPUs on the VPE. + */ + smtc_nexttime[vpe][cpu] = timestamp; + for_each_online_cpu(i) { + if (ISVALID(smtc_nexttime[vpe][i]) + && IS_SOONER(smtc_nexttime[vpe][i], + smtc_nexttime[vpe][soonest], reference)) { + soonest = i; + } + } + smtc_nextinvpe[vpe] = soonest; + nextcomp = smtc_nexttime[vpe][soonest]; + /* + * Otherwise, we don't have to process the whole array rank, + * we just have to see if the event horizon has gotten closer. + */ + } else { + if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) || + IS_SOONER(timestamp, + smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) { + smtc_nextinvpe[vpe] = cpu; + nextcomp = timestamp; + } + /* + * Since next-in-VPE may me the same as the executing + * virtual CPU, we update the array *after* checking + * its value. + */ + smtc_nexttime[vpe][cpu] = timestamp; + } + + /* + * It may be that, in fact, we don't need to update Compare, + * but if we do, we want to make sure we didn't fall into + * a crack just behind Count. + */ + if (ISVALID(nextcomp)) { + write_c0_compare(nextcomp); + ehb(); + /* + * We never return an error, we just make sure + * that we trigger the handlers as quickly as + * we can if we fell behind. + */ + while ((nextcomp - (unsigned long)read_c0_count()) + > (unsigned long)LONG_MAX) { + nextcomp += CATCHUP_INCREMENT; + write_c0_compare(nextcomp); + ehb(); + } + } + emt(mtflags); + local_irq_restore(flags); + return 0; +} + + +void smtc_distribute_timer(int vpe) +{ + unsigned long flags; + unsigned int mtflags; + int cpu; + struct clock_event_device *cd; + unsigned long nextstamp = 0L; + unsigned long reference; + + +repeat: + for_each_online_cpu(cpu) { + /* + * Find virtual CPUs within the current VPE who have + * unserviced timer requests whose time is now past. + */ + local_irq_save(flags); + mtflags = dmt(); + if (cpu_data[cpu].vpe_id == vpe && + ISVALID(smtc_nexttime[vpe][cpu])) { + reference = (unsigned long)read_c0_count(); + if ((smtc_nexttime[vpe][cpu] - reference) + > (unsigned long)LONG_MAX) { + smtc_nexttime[vpe][cpu] = 0L; + emt(mtflags); + local_irq_restore(flags); + /* + * We don't send IPIs to ourself. + */ + if (cpu != smp_processor_id()) { + smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0); + } else { + cd = &per_cpu(mips_clockevent_device, cpu); + cd->event_handler(cd); + } + } else { + /* Local to VPE but Valid Time not yet reached. */ + if (!ISVALID(nextstamp) || + IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp, + reference)) { + smtc_nextinvpe[vpe] = cpu; + nextstamp = smtc_nexttime[vpe][cpu]; + } + emt(mtflags); + local_irq_restore(flags); + } + } else { + emt(mtflags); + local_irq_restore(flags); + + } + } + /* Reprogram for interrupt at next soonest timestamp for VPE */ + if (ISVALID(nextstamp)) { + write_c0_compare(nextstamp); + ehb(); + if ((nextstamp - (unsigned long)read_c0_count()) + > (unsigned long)LONG_MAX) + goto repeat; + } +} + + +irqreturn_t c0_compare_interrupt(int irq, void *dev_id) +{ + int cpu = smp_processor_id(); + + /* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */ + handle_perf_irq(1); + + if (read_c0_cause() & (1 << 30)) { + /* Clear Count/Compare Interrupt */ + write_c0_compare(read_c0_compare()); + smtc_distribute_timer(cpu_data[cpu].vpe_id); + } + return IRQ_HANDLED; +} + + +int __cpuinit mips_clockevent_init(void) +{ + uint64_t mips_freq = mips_hpt_frequency; + unsigned int cpu = smp_processor_id(); + struct clock_event_device *cd; + unsigned int irq; + int i; + int j; + + if (!cpu_has_counter || !mips_hpt_frequency) + return -ENXIO; + if (cpu == 0) { + for (i = 0; i < num_possible_cpus(); i++) { + smtc_nextinvpe[i] = 0; + for (j = 0; j < num_possible_cpus(); j++) + smtc_nexttime[i][j] = 0L; + } + /* + * SMTC also can't have the usablility test + * run by secondary TCs once Compare is in use. + */ + if (!c0_compare_int_usable()) + return -ENXIO; + } + + /* + * With vectored interrupts things are getting platform specific. + * get_c0_compare_int is a hook to allow a platform to return the + * interrupt number of it's liking. + */ + irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; + if (get_c0_compare_int) + irq = get_c0_compare_int(); + + cd = &per_cpu(mips_clockevent_device, cpu); + + cd->name = "MIPS"; + cd->features = CLOCK_EVT_FEAT_ONESHOT; + + /* Calculate the min / max delta */ + cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); + cd->shift = 32; + cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); + cd->min_delta_ns = clockevent_delta2ns(0x300, cd); + + cd->rating = 300; + cd->irq = irq; + cd->cpumask = cpumask_of_cpu(cpu); + cd->set_next_event = mips_next_event; + cd->set_mode = mips_set_clock_mode; + cd->event_handler = mips_event_handler; + + clockevents_register_device(cd); + + /* + * On SMTC we only want to do the data structure + * initialization and IRQ setup once. + */ + if (cpu) + return 0; + /* + * And we need the hwmask associated with the c0_compare + * vector to be initialized. + */ + irq_hwmask[irq] = (0x100 << cp0_compare_irq); + if (cp0_timer_irq_installed) + return 0; + + cp0_timer_irq_installed = 1; + + setup_irq(irq, &c0_compare_irqaction); + + return 0; +} diff -Nurd linux-2.6.24/arch/mips/kernel/cevt-txx9.c mer-smartq-kernel/arch/mips/kernel/cevt-txx9.c --- linux-2.6.24/arch/mips/kernel/cevt-txx9.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/kernel/cevt-txx9.c 2009-11-17 12:13:29.000000000 +0100 @@ -161,6 +161,9 @@ struct txx9_tmr_reg __iomem *tmrptr; tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg)); + /* Start once to make CounterResetEnable effective */ + __raw_writel(TXx9_TMTCR_CRE | TXx9_TMTCR_TCE, &tmrptr->tcr); + /* Stop and reset the counter */ __raw_writel(TXx9_TMTCR_CRE, &tmrptr->tcr); __raw_writel(0, &tmrptr->tisr); __raw_writel(0xffffffff, &tmrptr->cpra); diff -Nurd linux-2.6.24/arch/mips/kernel/cpu-bugs64.c mer-smartq-kernel/arch/mips/kernel/cpu-bugs64.c --- linux-2.6.24/arch/mips/kernel/cpu-bugs64.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/kernel/cpu-bugs64.c 2009-11-17 12:13:29.000000000 +0100 @@ -164,7 +164,7 @@ ); } -static volatile int daddi_ov __initdata = 0; +static volatile int daddi_ov __cpuinitdata = 0; asmlinkage void __init do_daddi_ov(struct pt_regs *regs) { diff -Nurd linux-2.6.24/arch/mips/kernel/cpu-probe.c mer-smartq-kernel/arch/mips/kernel/cpu-probe.c --- linux-2.6.24/arch/mips/kernel/cpu-probe.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/kernel/cpu-probe.c 2009-11-17 12:13:29.000000000 +0100 @@ -45,18 +45,7 @@ local_irq_enable(); } -/* - * There is a race when WAIT instruction executed with interrupt - * enabled. - * But it is implementation-dependent wheter the pipelie restarts when - * a non-enabled interrupt is requested. - */ -static void r4k_wait(void) -{ - __asm__(" .set mips3 \n" - " wait \n" - " .set mips0 \n"); -} +extern void r4k_wait(void); /* * This variant is preferable as it allows testing need_resched and going to @@ -65,14 +54,18 @@ * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes * using this version a gamble. */ -static void r4k_wait_irqoff(void) +void r4k_wait_irqoff(void) { local_irq_disable(); if (!need_resched()) - __asm__(" .set mips3 \n" + __asm__(" .set push \n" + " .set mips3 \n" " wait \n" - " .set mips0 \n"); + " .set pop \n"); local_irq_enable(); + __asm__(" .globl __pastwait \n" + "__pastwait: \n"); + return; } /* @@ -128,7 +121,7 @@ __setup("nowait", wait_disable); -static inline void check_wait(void) +void __init check_wait(void) { struct cpuinfo_mips *c = ¤t_cpu_data; @@ -239,7 +232,6 @@ void __init check_bugs32(void) { - check_wait(); check_errata(); } @@ -548,7 +540,7 @@ } } -static char unknown_isa[] __initdata = KERN_ERR \ +static char unknown_isa[] __cpuinitdata = KERN_ERR \ "Unsupported ISA type, c0.config0: %d."; static inline unsigned int decode_config0(struct cpuinfo_mips *c) @@ -654,7 +646,7 @@ return config3 & MIPS_CONF_M; } -static void __init decode_configs(struct cpuinfo_mips *c) +static void __cpuinit decode_configs(struct cpuinfo_mips *c) { /* MIPS32 or MIPS64 compliant CPU. */ c->options = MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE | MIPS_CPU_COUNTER | @@ -807,7 +799,7 @@ /* * Name a CPU */ -static __init const char *cpu_to_name(struct cpuinfo_mips *c) +static __cpuinit const char *cpu_to_name(struct cpuinfo_mips *c) { const char *name = NULL; @@ -887,7 +879,7 @@ return name; } -__init void cpu_probe(void) +__cpuinit void cpu_probe(void) { struct cpuinfo_mips *c = ¤t_cpu_data; unsigned int cpu = smp_processor_id(); @@ -950,7 +942,7 @@ c->srsets = 1; } -__init void cpu_report(void) +__cpuinit void cpu_report(void) { struct cpuinfo_mips *c = ¤t_cpu_data; diff -Nurd linux-2.6.24/arch/mips/kernel/csrc-r4k.c mer-smartq-kernel/arch/mips/kernel/csrc-r4k.c --- linux-2.6.24/arch/mips/kernel/csrc-r4k.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/kernel/csrc-r4k.c 2009-11-17 12:13:29.000000000 +0100 @@ -22,12 +22,17 @@ .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; -void __init init_mips_clocksource(void) +int __init init_mips_clocksource(void) { - /* Calclate a somewhat reasonable rating value */ + if (!cpu_has_counter || !mips_hpt_frequency) + return -ENXIO; + + /* Calculate a somewhat reasonable rating value */ clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000; clocksource_set_clock(&clocksource_mips, mips_hpt_frequency); clocksource_register(&clocksource_mips); + + return 0; } diff -Nurd linux-2.6.24/arch/mips/kernel/entry.S mer-smartq-kernel/arch/mips/kernel/entry.S --- linux-2.6.24/arch/mips/kernel/entry.S 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/kernel/entry.S 2009-11-17 12:13:29.000000000 +0100 @@ -79,11 +79,6 @@ FEXPORT(restore_all) # restore full frame #ifdef CONFIG_MIPS_MT_SMTC -/* Detect and execute deferred IPI "interrupts" */ - LONG_L s0, TI_REGS($28) - LONG_S sp, TI_REGS($28) - jal deferred_smtc_ipi - LONG_S s0, TI_REGS($28) #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP /* Re-arm any temporarily masked interrupts not explicitly "acked" */ mfc0 v0, CP0_TCSTATUS @@ -112,6 +107,11 @@ xor t0, t0, t3 mtc0 t0, CP0_TCCONTEXT #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ +/* Detect and execute deferred IPI "interrupts" */ + LONG_L s0, TI_REGS($28) + LONG_S sp, TI_REGS($28) + jal deferred_smtc_ipi + LONG_S s0, TI_REGS($28) #endif /* CONFIG_MIPS_MT_SMTC */ .set noat RESTORE_TEMP diff -Nurd linux-2.6.24/arch/mips/kernel/gdb-low.S mer-smartq-kernel/arch/mips/kernel/gdb-low.S --- linux-2.6.24/arch/mips/kernel/gdb-low.S 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/kernel/gdb-low.S 2009-11-17 12:13:29.000000000 +0100 @@ -9,21 +9,21 @@ #include #include #include +#include #include #include -#include #ifdef CONFIG_32BIT #define DMFC0 mfc0 #define DMTC0 mtc0 #define LDC1 lwc1 -#define SDC1 lwc1 +#define SDC1 swc1 #endif #ifdef CONFIG_64BIT #define DMFC0 dmfc0 #define DMTC0 dmtc0 #define LDC1 ldc1 -#define SDC1 ldc1 +#define SDC1 sdc1 #endif /* @@ -78,10 +78,19 @@ LONG_S v0, GDB_FR_EPC(sp) DMFC0 v0, CP0_BADVADDR LONG_S v0, GDB_FR_BADVADDR(sp) +#ifdef CONFIG_CPU_HAS_SMARTMIPS + mflhxu v0 + LONG_S v0, GDB_FR_LO(sp) + mflhxu v0 + LONG_S v0, GDB_FR_HI(sp) + mflhxu v0 + LONG_S v0, GDB_FR_ACX(sp) +#else mfhi v0 LONG_S v0, GDB_FR_HI(sp) mflo v0 LONG_S v0, GDB_FR_LO(sp) +#endif /* * Now the integer registers @@ -317,10 +326,19 @@ LONG_L v1, GDB_FR_EPC(sp) mtc0 v0, CP0_STATUS DMTC0 v1, CP0_EPC +#ifdef CONFIG_CPU_HAS_SMARTMIPS + LONG_L v0, GDB_FR_ACX(sp) + mtlhx v0 + LONG_L v0, GDB_FR_HI(sp) + mtlhx v0 + LONG_L v0, GDB_FR_LO(sp) + mtlhx v0 +#else LONG_L v0, GDB_FR_HI(sp) LONG_L v1, GDB_FR_LO(sp) mthi v0 mtlo v1 +#endif LONG_L $31, GDB_FR_REG31(sp) LONG_L $30, GDB_FR_REG30(sp) LONG_L $28, GDB_FR_REG28(sp) diff -Nurd linux-2.6.24/arch/mips/kernel/gdb-stub.c mer-smartq-kernel/arch/mips/kernel/gdb-stub.c --- linux-2.6.24/arch/mips/kernel/gdb-stub.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/kernel/gdb-stub.c 2009-11-17 12:13:29.000000000 +0100 @@ -139,7 +139,6 @@ #include #include #include -#include /* * external low-level support routines @@ -656,6 +655,7 @@ *epc = (unsigned long)async_breakpoint; } +#ifdef CONFIG_SMP static void kgdb_wait(void *arg) { unsigned flags; @@ -668,6 +668,7 @@ local_irq_restore(flags); } +#endif /* * GDB stub needs to call kgdb_wait on all processor with interrupts diff -Nurd linux-2.6.24/arch/mips/kernel/genex.S mer-smartq-kernel/arch/mips/kernel/genex.S --- linux-2.6.24/arch/mips/kernel/genex.S 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/kernel/genex.S 2009-11-17 12:13:29.000000000 +0100 @@ -20,6 +20,7 @@ #include #include #include +#include #define PANIC_PIC(msg) \ .set push; \ @@ -126,7 +127,42 @@ __FINIT + .align 5 /* 32 byte rollback region */ +LEAF(r4k_wait) + .set push + .set noreorder + /* start of rollback region */ + LONG_L t0, TI_FLAGS($28) + nop + andi t0, _TIF_NEED_RESCHED + bnez t0, 1f + nop + nop + nop + .set mips3 + wait + /* end of rollback region (the region size must be power of two) */ + .set pop +1: + jr ra + END(r4k_wait) + + .macro BUILD_ROLLBACK_PROLOGUE handler + FEXPORT(rollback_\handler) + .set push + .set noat + MFC0 k0, CP0_EPC + PTR_LA k1, r4k_wait + ori k0, 0x1f /* 32 byte rollback region */ + xori k0, 0x1f + bne k0, k1, 9f + MTC0 k0, CP0_EPC +9: + .set pop + .endm + .align 5 +BUILD_ROLLBACK_PROLOGUE handle_int NESTED(handle_int, PT_SIZE, sp) #ifdef CONFIG_TRACE_IRQFLAGS /* @@ -201,6 +237,7 @@ * This prototype is copied to ebase + n*IntCtl.VS and patched * to invoke the handler */ +BUILD_ROLLBACK_PROLOGUE except_vec_vi NESTED(except_vec_vi, 0, sp) SAVE_SOME SAVE_AT @@ -245,8 +282,8 @@ and t0, a0, t1 #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP mfc0 t2, CP0_TCCONTEXT - or t0, t0, t2 - mtc0 t0, CP0_TCCONTEXT + or t2, t0, t2 + mtc0 t2, CP0_TCCONTEXT #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ xor t1, t1, t0 mtc0 t1, CP0_STATUS diff -Nurd linux-2.6.24/arch/mips/kernel/head.S mer-smartq-kernel/arch/mips/kernel/head.S --- linux-2.6.24/arch/mips/kernel/head.S 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/kernel/head.S 2009-11-17 12:13:29.000000000 +0100 @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -194,8 +195,11 @@ j start_kernel END(kernel_entry) + __FINIT +#ifndef CONFIG_HOTPLUG_CPU __INIT +#endif #ifdef CONFIG_SMP /* diff -Nurd linux-2.6.24/arch/mips/kernel/i8253.c mer-smartq-kernel/arch/mips/kernel/i8253.c --- linux-2.6.24/arch/mips/kernel/i8253.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/kernel/i8253.c 2009-11-17 12:13:29.000000000 +0100 @@ -15,6 +15,7 @@ #include DEFINE_SPINLOCK(i8253_lock); +EXPORT_SYMBOL(i8253_lock); /* * Initialize the PIT timer. diff -Nurd linux-2.6.24/arch/mips/kernel/irixelf.c mer-smartq-kernel/arch/mips/kernel/irixelf.c --- linux-2.6.24/arch/mips/kernel/irixelf.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/kernel/irixelf.c 2009-11-17 12:13:29.000000000 +0100 @@ -578,7 +578,7 @@ * process and the system, here we map the page and fill the * structure */ -static void irix_map_prda_page(void) +static int irix_map_prda_page(void) { unsigned long v; struct prda *pp; @@ -587,8 +587,8 @@ v = do_brk(PRDA_ADDRESS, PAGE_SIZE); up_write(¤t->mm->mmap_sem); - if (v < 0) - return; + if (v != PRDA_ADDRESS) + return v; /* v must be an error code */ pp = (struct prda *) v; pp->prda_sys.t_pid = current->pid; @@ -596,6 +596,8 @@ pp->prda_sys.t_rpid = current->pid; /* We leave the rest set to zero */ + + return 0; } @@ -781,7 +783,8 @@ * IRIX maps a page at 0x200000 which holds some system * information. Programs depend on this. */ - irix_map_prda_page(); + if (irix_map_prda_page()) + goto out_free_dentry; padzero(elf_bss); diff -Nurd linux-2.6.24/arch/mips/kernel/irq-rm7000.c mer-smartq-kernel/arch/mips/kernel/irq-rm7000.c --- linux-2.6.24/arch/mips/kernel/irq-rm7000.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/kernel/irq-rm7000.c 2009-11-17 12:13:29.000000000 +0100 @@ -33,6 +33,7 @@ .mask = mask_rm7k_irq, .mask_ack = mask_rm7k_irq, .unmask = unmask_rm7k_irq, + .eoi = unmask_rm7k_irq }; void __init rm7k_cpu_irq_init(void) diff -Nurd linux-2.6.24/arch/mips/kernel/irq-rm9000.c mer-smartq-kernel/arch/mips/kernel/irq-rm9000.c --- linux-2.6.24/arch/mips/kernel/irq-rm9000.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/kernel/irq-rm9000.c 2009-11-17 12:13:29.000000000 +0100 @@ -75,6 +75,7 @@ .mask = mask_rm9k_irq, .mask_ack = mask_rm9k_irq, .unmask = unmask_rm9k_irq, + .eoi = unmask_rm9k_irq }; static struct irq_chip rm9k_perfcounter_irq = { diff -Nurd linux-2.6.24/arch/mips/kernel/kspd.c mer-smartq-kernel/arch/mips/kernel/kspd.c --- linux-2.6.24/arch/mips/kernel/kspd.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/kernel/kspd.c 2009-11-17 12:13:29.000000000 +0100 @@ -257,7 +257,7 @@ vcwd = vpe_getcwd(tclimit); - /* change to the cwd of the process that loaded the SP program */ + /* change to cwd of the process that loaded the SP program */ old_fs = get_fs(); set_fs(KERNEL_DS); sys_chdir(vcwd); @@ -323,6 +323,9 @@ set >>= 1; } } + + /* Put daemon cwd back to root to avoid umount problems */ + sys_chdir("/"); } static int channel_open = 0; diff -Nurd linux-2.6.24/arch/mips/kernel/Makefile mer-smartq-kernel/arch/mips/kernel/Makefile --- linux-2.6.24/arch/mips/kernel/Makefile 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/kernel/Makefile 2009-11-17 12:13:29.000000000 +0100 @@ -10,6 +10,7 @@ obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o +obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o @@ -51,9 +52,9 @@ obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o -obj-$(CONFIG_MIPS_APSP_KSPD) += kspd.o obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o +obj-$(CONFIG_MIPS_APSP_KSPD) += kspd.o obj-$(CONFIG_I8259) += i8259.o obj-$(CONFIG_IRQ_CPU) += irq_cpu.o diff -Nurd linux-2.6.24/arch/mips/kernel/mips-mt-fpaff.c mer-smartq-kernel/arch/mips/kernel/mips-mt-fpaff.c --- linux-2.6.24/arch/mips/kernel/mips-mt-fpaff.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/kernel/mips-mt-fpaff.c 2009-11-17 12:13:29.000000000 +0100 @@ -36,7 +36,7 @@ */ static inline struct task_struct *find_process_by_pid(pid_t pid) { - return pid ? find_task_by_pid(pid) : current; + return pid ? find_task_by_vpid(pid) : current; } @@ -159,7 +159,7 @@ /* * FPU Use Factor empirically derived from experiments on 34K */ -#define FPUSEFACTOR 333 +#define FPUSEFACTOR 2000 static __init int mt_fp_affinity_init(void) { diff -Nurd linux-2.6.24/arch/mips/kernel/proc.c mer-smartq-kernel/arch/mips/kernel/proc.c --- linux-2.6.24/arch/mips/kernel/proc.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/kernel/proc.c 2009-11-17 12:13:29.000000000 +0100 @@ -38,7 +38,7 @@ seq_printf(m, "processor\t\t: %ld\n", n); sprintf(fmt, "cpu model\t\t: %%s V%%d.%%d%s\n", cpu_data[n].options & MIPS_CPU_FPU ? " FPU V%d.%d" : ""); - seq_printf(m, fmt, __cpu_name[smp_processor_id()], + seq_printf(m, fmt, __cpu_name[n], (version >> 4) & 0x0f, version & 0x0f, (fp_vers >> 4) & 0x0f, fp_vers & 0x0f); seq_printf(m, "BogoMIPS\t\t: %lu.%02lu\n", diff -Nurd linux-2.6.24/arch/mips/kernel/process.c mer-smartq-kernel/arch/mips/kernel/process.c --- linux-2.6.24/arch/mips/kernel/process.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/kernel/process.c 2009-11-17 12:13:29.000000000 +0100 @@ -55,7 +55,7 @@ while (1) { tick_nohz_stop_sched_tick(); while (!need_resched()) { -#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG +#ifdef CONFIG_MIPS_MT_SMTC extern void smtc_idle_loop_hook(void); smtc_idle_loop_hook(); @@ -152,17 +152,18 @@ */ p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1); childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); - clear_tsk_thread_flag(p, TIF_USEDFPU); -#ifdef CONFIG_MIPS_MT_FPAFF +#ifdef CONFIG_MIPS_MT_SMTC /* - * FPU affinity support is cleaner if we track the - * user-visible CPU affinity from the very beginning. - * The generic cpus_allowed mask will already have - * been copied from the parent before copy_thread - * is invoked. + * SMTC restores TCStatus after Status, and the CU bits + * are aliased there. */ - p->thread.user_cpus_allowed = p->cpus_allowed; + childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1); +#endif + clear_tsk_thread_flag(p, TIF_USEDFPU); + +#ifdef CONFIG_MIPS_MT_FPAFF + clear_tsk_thread_flag(p, TIF_FPUBOUND); #endif /* CONFIG_MIPS_MT_FPAFF */ if (clone_flags & CLONE_SETTLS) diff -Nurd linux-2.6.24/arch/mips/kernel/ptrace.c mer-smartq-kernel/arch/mips/kernel/ptrace.c --- linux-2.6.24/arch/mips/kernel/ptrace.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/kernel/ptrace.c 2009-11-17 12:13:29.000000000 +0100 @@ -238,7 +238,7 @@ case FPC_EIR: { /* implementation / version register */ unsigned int flags; #ifdef CONFIG_MIPS_MT_SMTC - unsigned int irqflags; + unsigned long irqflags; unsigned int mtflags; #endif /* CONFIG_MIPS_MT_SMTC */ diff -Nurd linux-2.6.24/arch/mips/kernel/rtlx.c mer-smartq-kernel/arch/mips/kernel/rtlx.c --- linux-2.6.24/arch/mips/kernel/rtlx.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/kernel/rtlx.c 2009-11-17 12:13:29.000000000 +0100 @@ -73,6 +73,15 @@ static irqreturn_t rtlx_interrupt(int irq, void *dev_id) { int i; + unsigned int flags, vpeflags; + + /* Ought not to be strictly necessary for SMTC builds */ + local_irq_save(flags); + vpeflags = dvpe(); + set_c0_status(0x100 << MIPS_CPU_RTLX_IRQ); + irq_enable_hazard(); + evpe(vpeflags); + local_irq_restore(flags); for (i = 0; i < RTLX_CHANNELS; i++) { wake_up(&channel_wqs[i].lx_queue); @@ -109,7 +118,8 @@ static int rtlx_init(struct rtlx_info *rtlxi) { if (rtlxi->id != RTLX_ID) { - printk(KERN_ERR "no valid RTLX id at 0x%p 0x%lx\n", rtlxi, rtlxi->id); + printk(KERN_ERR "no valid RTLX id at 0x%p 0x%lx\n", + rtlxi, rtlxi->id); return -ENOEXEC; } @@ -163,18 +173,17 @@ if (rtlx == NULL) { if( (p = vpe_get_shared(tclimit)) == NULL) { - if (can_sleep) { - __wait_event_interruptible(channel_wqs[index].lx_queue, - (p = vpe_get_shared(tclimit)), - ret); - if (ret) - goto out_fail; - } else { - printk(KERN_DEBUG "No SP program loaded, and device " - "opened with O_NONBLOCK\n"); - ret = -ENOSYS; + if (can_sleep) { + __wait_event_interruptible(channel_wqs[index].lx_queue, + (p = vpe_get_shared(tclimit)), ret); + if (ret) goto out_fail; - } + } else { + printk(KERN_DEBUG "No SP program loaded, and device " + "opened with O_NONBLOCK\n"); + ret = -ENOSYS; + goto out_fail; + } } smp_rmb(); @@ -183,7 +192,9 @@ DEFINE_WAIT(wait); for (;;) { - prepare_to_wait(&channel_wqs[index].lx_queue, &wait, TASK_INTERRUPTIBLE); + prepare_to_wait( + &channel_wqs[index].lx_queue, + &wait, TASK_INTERRUPTIBLE); smp_rmb(); if (*p != NULL) break; @@ -196,7 +207,7 @@ } finish_wait(&channel_wqs[index].lx_queue, &wait); } else { - printk(" *vpe_get_shared is NULL. " + pr_err(" *vpe_get_shared is NULL. " "Has an SP program been loaded?\n"); ret = -ENOSYS; goto out_fail; @@ -204,8 +215,9 @@ } if ((unsigned int)*p < KSEG0) { - printk(KERN_WARNING "vpe_get_shared returned an invalid pointer " - "maybe an error code %d\n", (int)*p); + printk(KERN_WARNING "vpe_get_shared returned an " + "invalid pointer maybe an error code %d\n", + (int)*p); ret = -ENOSYS; goto out_fail; } @@ -233,6 +245,10 @@ int rtlx_release(int index) { + if (rtlx == NULL) { + pr_err("rtlx_release() with null rtlx\n"); + return 0; + } rtlx->channel[index].lx_state = RTLX_STATE_UNUSED; return 0; } @@ -252,8 +268,8 @@ int ret = 0; __wait_event_interruptible(channel_wqs[index].lx_queue, - chan->lx_read != chan->lx_write || sp_stopping, - ret); + (chan->lx_read != chan->lx_write) || + sp_stopping, ret); if (ret) return ret; @@ -283,7 +299,9 @@ unsigned int rtlx_write_poll(int index) { struct rtlx_channel *chan = &rtlx->channel[index]; - return write_spacefree(chan->rt_read, chan->rt_write, chan->buffer_size); + + return write_spacefree(chan->rt_read, chan->rt_write, + chan->buffer_size); } ssize_t rtlx_read(int index, void __user *buff, size_t count) @@ -345,8 +363,8 @@ rt_read = rt->rt_read; /* total number of bytes to copy */ - count = min(count, - (size_t)write_spacefree(rt_read, rt->rt_write, rt->buffer_size)); + count = min(count, (size_t)write_spacefree(rt_read, rt->rt_write, + rt->buffer_size)); /* first bit from write pointer to the end of the buffer, or count */ fl = min(count, (size_t) rt->buffer_size - rt->rt_write); @@ -515,6 +533,11 @@ if (cpu_has_vint) set_vi_handler(MIPS_CPU_RTLX_IRQ, rtlx_dispatch); + else { + pr_err("APRP RTLX init on non-vectored-interrupt processor\n"); + err = -ENODEV; + goto out_chrdev; + } rtlx_irq.dev_id = rtlx; setup_irq(rtlx_irq_num, &rtlx_irq); diff -Nurd linux-2.6.24/arch/mips/kernel/scall32-o32.S mer-smartq-kernel/arch/mips/kernel/scall32-o32.S --- linux-2.6.24/arch/mips/kernel/scall32-o32.S 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/kernel/scall32-o32.S 2009-11-17 12:13:29.000000000 +0100 @@ -184,7 +184,7 @@ * The system call does not exist in this kernel */ illegal_syscall: - li v0, -ENOSYS # error + li v0, ENOSYS # error sw v0, PT_R2(sp) li t0, 1 # set error flag sw t0, PT_R7(sp) @@ -270,18 +270,11 @@ subu t0, a0, __NR_O32_Linux # check syscall number sltiu v0, t0, __NR_O32_Linux_syscalls + 1 #endif + beqz t0, einval # do not recurse sll t1, t0, 3 beqz v0, einval - lw t2, sys_call_table(t1) # syscall routine -#if defined(CONFIG_BINFMT_IRIX) - li v1, 4000 # nr of sys_syscall -#else - li v1, 4000 - __NR_O32_Linux # index of sys_syscall -#endif - beq t0, v1, einval # do not recurse - /* Some syscalls like execve get their arguments from struct pt_regs and claim zero arguments in the syscall table. Thus we have to assume the worst case and shuffle around all potential arguments. @@ -305,7 +298,7 @@ jr t2 /* Unreached */ -einval: li v0, -EINVAL +einval: li v0, -ENOSYS jr ra END(sys_syscall) @@ -646,7 +639,7 @@ sys sys_pselect6 6 sys sys_ppoll 5 sys sys_unshare 1 - sys sys_splice 4 + sys sys_splice 6 sys sys_sync_file_range 7 /* 4305 */ sys sys_tee 4 sys sys_vmsplice 4 diff -Nurd linux-2.6.24/arch/mips/kernel/scall64-64.S mer-smartq-kernel/arch/mips/kernel/scall64-64.S --- linux-2.6.24/arch/mips/kernel/scall64-64.S 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/kernel/scall64-64.S 2009-11-17 12:13:29.000000000 +0100 @@ -117,7 +117,7 @@ illegal_syscall: /* This also isn't a 64-bit syscall, throw an error. */ - li v0, -ENOSYS # error + li v0, ENOSYS # error sd v0, PT_R2(sp) li t0, 1 # set error flag sd t0, PT_R7(sp) diff -Nurd linux-2.6.24/arch/mips/kernel/scall64-n32.S mer-smartq-kernel/arch/mips/kernel/scall64-n32.S --- linux-2.6.24/arch/mips/kernel/scall64-n32.S 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/kernel/scall64-n32.S 2009-11-17 12:13:29.000000000 +0100 @@ -390,7 +390,7 @@ PTR sys_splice PTR sys_sync_file_range PTR sys_tee - PTR sys_vmsplice /* 6270 */ + PTR compat_sys_vmsplice /* 6270 */ PTR sys_move_pages PTR compat_sys_set_robust_list PTR compat_sys_get_robust_list diff -Nurd linux-2.6.24/arch/mips/kernel/scall64-o32.S mer-smartq-kernel/arch/mips/kernel/scall64-o32.S --- linux-2.6.24/arch/mips/kernel/scall64-o32.S 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/kernel/scall64-o32.S 2009-11-17 12:13:29.000000000 +0100 @@ -174,14 +174,12 @@ END(handle_sys) LEAF(sys32_syscall) - sltu v0, a0, __NR_O32_Linux + __NR_O32_Linux_syscalls + 1 + subu t0, a0, __NR_O32_Linux # check syscall number + sltiu v0, t0, __NR_O32_Linux_syscalls + 1 + beqz t0, einval # do not recurse + dsll t1, t0, 3 beqz v0, einval - - dsll v0, a0, 3 - ld t2, (sys_call_table - (__NR_O32_Linux * 8))(v0) - - li v1, 4000 # indirect syscall number - beq a0, v1, einval # do not recurse + ld t2, sys_call_table(t1) # syscall routine move a0, a1 # shift argument registers move a1, a2 @@ -198,7 +196,7 @@ jr t2 /* Unreached */ -einval: li v0, -EINVAL +einval: li v0, -ENOSYS jr ra END(sys32_syscall) @@ -512,7 +510,7 @@ PTR sys_splice PTR sys32_sync_file_range /* 4305 */ PTR sys_tee - PTR sys_vmsplice + PTR compat_sys_vmsplice PTR compat_sys_move_pages PTR compat_sys_set_robust_list PTR compat_sys_get_robust_list /* 4310 */ diff -Nurd linux-2.6.24/arch/mips/kernel/setup.c mer-smartq-kernel/arch/mips/kernel/setup.c --- linux-2.6.24/arch/mips/kernel/setup.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/kernel/setup.c 2009-11-17 12:13:29.000000000 +0100 @@ -329,6 +329,7 @@ /* * Determine low and high memory ranges */ + max_pfn = max_low_pfn; if (max_low_pfn > PFN_DOWN(HIGHMEM_START)) { #ifdef CONFIG_HIGHMEM highstart_pfn = PFN_DOWN(HIGHMEM_START); @@ -609,8 +610,8 @@ struct dentry *d; d = debugfs_create_dir("mips", NULL); - if (IS_ERR(d)) - return PTR_ERR(d); + if (!d) + return -ENOMEM; mips_debugfs_dir = d; return 0; } diff -Nurd linux-2.6.24/arch/mips/kernel/smp.c mer-smartq-kernel/arch/mips/kernel/smp.c --- linux-2.6.24/arch/mips/kernel/smp.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/kernel/smp.c 2009-11-17 12:13:29.000000000 +0100 @@ -238,8 +238,10 @@ * Remove this CPU: */ cpu_clear(smp_processor_id(), cpu_online_map); - local_irq_enable(); /* May need to service _machine_restart IPI */ - for (;;); /* Wait if available. */ + for (;;) { + if (cpu_wait) + (*cpu_wait)(); /* Wait if available. */ + } } void smp_send_stop(void) diff -Nurd linux-2.6.24/arch/mips/kernel/smtc.c mer-smartq-kernel/arch/mips/kernel/smtc.c --- linux-2.6.24/arch/mips/kernel/smtc.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/kernel/smtc.c 2009-11-17 12:13:29.000000000 +0100 @@ -1,4 +1,21 @@ -/* Copyright (C) 2004 Mips Technologies, Inc */ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) 2004 Mips Technologies, Inc + * Copyright (C) 2008 Kevin D. Kissell + */ #include #include @@ -22,7 +39,6 @@ #include #include #include -#include #include /* @@ -59,11 +75,6 @@ asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; -/* - * Clock interrupt "latch" buffers, per "CPU" - */ - -static atomic_t ipi_timer_latch[NR_CPUS]; /* * Number of InterProcessor Interupt (IPI) message buffers to allocate @@ -71,7 +82,7 @@ #define IPIBUF_PER_CPU 4 -static struct smtc_ipi_q IPIQ[NR_CPUS]; +struct smtc_ipi_q IPIQ[NR_CPUS]; static struct smtc_ipi_q freeIPIq; @@ -291,7 +302,7 @@ * phys_cpu_present_map and the logical/physical mappings. */ -int __init mipsmt_build_cpu_map(int start_cpu_slot) +int __init smtc_build_cpu_map(int start_cpu_slot) { int i, ntcs; @@ -334,7 +345,12 @@ write_tc_c0_tcstatus((read_tc_c0_tcstatus() & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A); - write_tc_c0_tccontext(0); + /* + * TCContext gets an offset from the base of the IPIQ array + * to be used in low-level code to detect the presence of + * an active IPI queue + */ + write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16); /* Bind tc to vpe */ write_tc_c0_tcbind(vpe); /* In general, all TCs should have the same cpu_data indications */ @@ -346,8 +362,14 @@ cpu_data[cpu].tc_id = tc; } +/* + * Tweak to get Count registes in as close a sync as possible. + * Value seems good for 34K-class cores. + */ -void mipsmt_prepare_cpus(void) +#define CP0_SKEW 8 + +void smtc_prepare_cpus(int cpus) { int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu; unsigned long flags; @@ -371,7 +393,6 @@ IPIQ[i].head = IPIQ[i].tail = NULL; spin_lock_init(&IPIQ[i].lock); IPIQ[i].depth = 0; - atomic_set(&ipi_timer_latch[i], 0); } /* cpu_data index starts at zero */ @@ -492,7 +513,8 @@ write_vpe_c0_compare(0); /* Propagate Config7 */ write_vpe_c0_config7(read_c0_config7()); - write_vpe_c0_count(read_c0_count()); + write_vpe_c0_count(read_c0_count() + CP0_SKEW); + ehb(); } /* enable multi-threading within VPE */ write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE); @@ -564,7 +586,7 @@ void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle) { extern u32 kernelsp[NR_CPUS]; - long flags; + unsigned long flags; int mtflags; LOCK_MT_PRA(); @@ -593,24 +615,22 @@ void smtc_init_secondary(void) { - /* - * Start timer on secondary VPEs if necessary. - * plat_timer_setup has already have been invoked by init/main - * on "boot" TC. Like per_cpu_trap_init() hack, this assumes that - * SMTC init code assigns TCs consdecutively and in ascending order - * to across available VPEs. - */ - if (((read_c0_tcbind() & TCBIND_CURTC) != 0) && - ((read_c0_tcbind() & TCBIND_CURVPE) - != cpu_data[smp_processor_id() - 1].vpe_id)){ - write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ); - } - local_irq_enable(); } void smtc_smp_finish(void) { + int cpu = smp_processor_id(); + + /* + * Lowest-numbered CPU per VPE starts a clock tick. + * Like per_cpu_trap_init() hack, this assumes that + * SMTC init code assigns TCs consdecutively and + * in ascending order across available VPEs. + */ + if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id)) + write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ); + printk("TC %d going on-line as CPU %d\n", cpu_data[smp_processor_id()].tc_id, smp_processor_id()); } @@ -761,8 +781,10 @@ { int tcstatus; struct smtc_ipi *pipi; - long flags; + unsigned long flags; int mtflags; + unsigned long tcrestart; + extern void r4k_wait_irqoff(void), __pastwait(void); if (cpu == smp_processor_id()) { printk("Cannot Send IPI to self!\n"); @@ -779,9 +801,7 @@ pipi->arg = (void *)action; pipi->dest = cpu; if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { - if (type == SMTC_CLOCK_TICK) - atomic_inc(&ipi_timer_latch[cpu]); - /* If not on same VPE, enqueue and send cross-VPE interupt */ + /* If not on same VPE, enqueue and send cross-VPE interrupt */ smtc_ipi_nq(&IPIQ[cpu], pipi); LOCK_CORE_PRA(); settc(cpu_data[cpu].tc_id); @@ -808,22 +828,29 @@ if ((tcstatus & TCSTATUS_IXMT) != 0) { /* - * Spin-waiting here can deadlock, - * so we queue the message for the target TC. + * If we're in the the irq-off version of the wait + * loop, we need to force exit from the wait and + * do a direct post of the IPI. + */ + if (cpu_wait == r4k_wait_irqoff) { + tcrestart = read_tc_c0_tcrestart(); + if (tcrestart >= (unsigned long)r4k_wait_irqoff + && tcrestart < (unsigned long)__pastwait) { + write_tc_c0_tcrestart(__pastwait); + tcstatus &= ~TCSTATUS_IXMT; + write_tc_c0_tcstatus(tcstatus); + goto postdirect; + } + } + /* + * Otherwise we queue the message for the target TC + * to pick up when he does a local_irq_restore() */ write_tc_c0_tchalt(0); UNLOCK_CORE_PRA(); - /* Try to reduce redundant timer interrupt messages */ - if (type == SMTC_CLOCK_TICK) { - if (atomic_postincrement(&ipi_timer_latch[cpu])!=0){ - smtc_ipi_nq(&freeIPIq, pipi); - return; - } - } smtc_ipi_nq(&IPIQ[cpu], pipi); } else { - if (type == SMTC_CLOCK_TICK) - atomic_inc(&ipi_timer_latch[cpu]); +postdirect: post_direct_ipi(cpu, pipi); write_tc_c0_tchalt(0); UNLOCK_CORE_PRA(); @@ -892,7 +919,7 @@ smp_call_function_interrupt(); } -DECLARE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device); +DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); void ipi_decode(struct smtc_ipi *pipi) { @@ -900,20 +927,13 @@ struct clock_event_device *cd; void *arg_copy = pipi->arg; int type_copy = pipi->type; - int ticks; - smtc_ipi_nq(&freeIPIq, pipi); switch (type_copy) { case SMTC_CLOCK_TICK: irq_enter(); kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++; - cd = &per_cpu(smtc_dummy_clockevent_device, cpu); - ticks = atomic_read(&ipi_timer_latch[cpu]); - atomic_sub(ticks, &ipi_timer_latch[cpu]); - while (ticks) { - cd->event_handler(cd); - ticks--; - } + cd = &per_cpu(mips_clockevent_device, cpu); + cd->event_handler(cd); irq_exit(); break; @@ -946,24 +966,48 @@ } } +/* + * Similar to smtc_ipi_replay(), but invoked from context restore, + * so it reuses the current exception frame rather than set up a + * new one with self_ipi. + */ + void deferred_smtc_ipi(void) { - struct smtc_ipi *pipi; - unsigned long flags; -/* DEBUG */ - int q = smp_processor_id(); + int cpu = smp_processor_id(); /* * Test is not atomic, but much faster than a dequeue, * and the vast majority of invocations will have a null queue. + * If irq_disabled when this was called, then any IPIs queued + * after we test last will be taken on the next irq_enable/restore. + * If interrupts were enabled, then any IPIs added after the + * last test will be taken directly. */ - if (IPIQ[q].head != NULL) { - while((pipi = smtc_ipi_dq(&IPIQ[q])) != NULL) { - /* ipi_decode() should be called with interrupts off */ - local_irq_save(flags); + + while (IPIQ[cpu].head != NULL) { + struct smtc_ipi_q *q = &IPIQ[cpu]; + struct smtc_ipi *pipi; + unsigned long flags; + + /* + * It may be possible we'll come in with interrupts + * already enabled. + */ + local_irq_save(flags); + + spin_lock(&q->lock); + pipi = __smtc_ipi_dq(q); + spin_unlock(&q->lock); + if (pipi != NULL) ipi_decode(pipi); - local_irq_restore(flags); - } + /* + * The use of the __raw_local restore isn't + * as obviously necessary here as in smtc_ipi_replay(), + * but it's more efficient, given that we're already + * running down the IPI queue. + */ + __raw_local_irq_restore(flags); } } @@ -984,7 +1028,7 @@ struct smtc_ipi *pipi; unsigned long tcstatus; int sent; - long flags; + unsigned long flags; unsigned int mtflags; unsigned int vpflags; @@ -1075,55 +1119,53 @@ /* * SMTC-specific hacks invoked from elsewhere in the kernel. - * - * smtc_ipi_replay is called from raw_local_irq_restore which is only ever - * called with interrupts disabled. We do rely on interrupts being disabled - * here because using spin_lock_irqsave()/spin_unlock_irqrestore() would - * result in a recursive call to raw_local_irq_restore(). */ -static void __smtc_ipi_replay(void) + /* + * smtc_ipi_replay is called from raw_local_irq_restore + */ + +void smtc_ipi_replay(void) { unsigned int cpu = smp_processor_id(); /* * To the extent that we've ever turned interrupts off, * we may have accumulated deferred IPIs. This is subtle. - * If we use the smtc_ipi_qdepth() macro, we'll get an - * exact number - but we'll also disable interrupts - * and create a window of failure where a new IPI gets - * queued after we test the depth but before we re-enable - * interrupts. So long as IXMT never gets set, however, * we should be OK: If we pick up something and dispatch * it here, that's great. If we see nothing, but concurrent * with this operation, another TC sends us an IPI, IXMT * is clear, and we'll handle it as a real pseudo-interrupt - * and not a pseudo-pseudo interrupt. + * and not a pseudo-pseudo interrupt. The important thing + * is to do the last check for queued message *after* the + * re-enabling of interrupts. */ - if (IPIQ[cpu].depth > 0) { - while (1) { - struct smtc_ipi_q *q = &IPIQ[cpu]; - struct smtc_ipi *pipi; - extern void self_ipi(struct smtc_ipi *); + while (IPIQ[cpu].head != NULL) { + struct smtc_ipi_q *q = &IPIQ[cpu]; + struct smtc_ipi *pipi; + unsigned long flags; - spin_lock(&q->lock); - pipi = __smtc_ipi_dq(q); - spin_unlock(&q->lock); - if (!pipi) - break; + /* + * It's just possible we'll come in with interrupts + * already enabled. + */ + local_irq_save(flags); + spin_lock(&q->lock); + pipi = __smtc_ipi_dq(q); + spin_unlock(&q->lock); + /* + ** But use a raw restore here to avoid recursion. + */ + __raw_local_irq_restore(flags); + + if (pipi) { self_ipi(pipi); smtc_cpu_stats[cpu].selfipis++; } } } -void smtc_ipi_replay(void) -{ - raw_local_irq_disable(); - __smtc_ipi_replay(); -} - EXPORT_SYMBOL(smtc_ipi_replay); void smtc_idle_loop_hook(void) @@ -1202,40 +1244,13 @@ } } - /* - * Now that we limit outstanding timer IPIs, check for hung TC - */ - for (tc = 0; tc < NR_CPUS; tc++) { - /* Don't check ourself - we'll dequeue IPIs just below */ - if ((tc != smp_processor_id()) && - atomic_read(&ipi_timer_latch[tc]) > timerq_limit) { - if (clock_hang_reported[tc] == 0) { - pdb_msg += sprintf(pdb_msg, - "TC %d looks hung with timer latch at %d\n", - tc, atomic_read(&ipi_timer_latch[tc])); - clock_hang_reported[tc]++; - } - } - } emt(mtflags); local_irq_restore(flags); if (pdb_msg != &id_ho_db_msg[0]) printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg); #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ - /* - * Replay any accumulated deferred IPIs. If "Instant Replay" - * is in use, there should never be any. - */ -#ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY - { - unsigned long flags; - - local_irq_save(flags); - __smtc_ipi_replay(); - local_irq_restore(flags); - } -#endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */ + smtc_ipi_replay(); } void smtc_soft_dump(void) @@ -1251,10 +1266,6 @@ printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis); } smtc_ipi_qdump(); - printk("Timer IPI Backlogs:\n"); - for (i=0; i < NR_CPUS; i++) { - printk("%d: %d\n", i, atomic_read(&ipi_timer_latch[i])); - } printk("%d Recoveries of \"stolen\" FPU\n", atomic_read(&smtc_fpu_recoveries)); } diff -Nurd linux-2.6.24/arch/mips/kernel/sysirix.c mer-smartq-kernel/arch/mips/kernel/sysirix.c --- linux-2.6.24/arch/mips/kernel/sysirix.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/kernel/sysirix.c 2009-11-17 12:13:29.000000000 +0100 @@ -111,7 +111,7 @@ printk("irix_prctl[%s:%d]: Wants PR_ISBLOCKED\n", current->comm, current->pid); read_lock(&tasklist_lock); - task = find_task_by_pid(va_arg(args, pid_t)); + task = find_task_by_vpid(va_arg(args, pid_t)); error = -ESRCH; if (error) error = (task->run_list.next != NULL); diff -Nurd linux-2.6.24/arch/mips/kernel/time.c mer-smartq-kernel/arch/mips/kernel/time.c --- linux-2.6.24/arch/mips/kernel/time.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/kernel/time.c 2009-11-17 12:13:29.000000000 +0100 @@ -159,6 +159,6 @@ { plat_time_init(); - if (mips_clockevent_init() || !cpu_has_mfc0_count_bug()) + if (!mips_clockevent_init() || !cpu_has_mfc0_count_bug()) init_mips_clocksource(); } diff -Nurd linux-2.6.24/arch/mips/kernel/traps.c mer-smartq-kernel/arch/mips/kernel/traps.c --- linux-2.6.24/arch/mips/kernel/traps.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/kernel/traps.c 2009-11-17 12:13:29.000000000 +0100 @@ -43,6 +43,9 @@ #include #include +extern void check_wait(void); +extern asmlinkage void r4k_wait(void); +extern asmlinkage void rollback_handle_int(void); extern asmlinkage void handle_int(void); extern asmlinkage void handle_tlbm(void); extern asmlinkage void handle_tlbl(void); @@ -638,35 +641,24 @@ force_sig_info(SIGFPE, &info, current); } -asmlinkage void do_bp(struct pt_regs *regs) +static void do_trap_or_bp(struct pt_regs *regs, unsigned int code, + const char *str) { - unsigned int opcode, bcode; siginfo_t info; - - if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) - goto out_sigsegv; - - /* - * There is the ancient bug in the MIPS assemblers that the break - * code starts left to bit 16 instead to bit 6 in the opcode. - * Gas is bug-compatible, but not always, grrr... - * We handle both cases with a simple heuristics. --macro - */ - bcode = ((opcode >> 6) & ((1 << 20) - 1)); - if (bcode < (1 << 10)) - bcode <<= 10; + char b[40]; /* - * (A short test says that IRIX 5.3 sends SIGTRAP for all break - * insns, even for break codes that indicate arithmetic failures. - * Weird ...) + * A short test says that IRIX 5.3 sends SIGTRAP for all trap + * insns, even for trap and break codes that indicate arithmetic + * failures. Weird ... * But should we continue the brokenness??? --macro */ - switch (bcode) { - case BRK_OVERFLOW << 10: - case BRK_DIVZERO << 10: - die_if_kernel("Break instruction in kernel code", regs); - if (bcode == (BRK_DIVZERO << 10)) + switch (code) { + case BRK_OVERFLOW: + case BRK_DIVZERO: + scnprintf(b, sizeof(b), "%s instruction in kernel code", str); + die_if_kernel(b, regs); + if (code == BRK_DIVZERO) info.si_code = FPE_INTDIV; else info.si_code = FPE_INTOVF; @@ -676,12 +668,34 @@ force_sig_info(SIGFPE, &info, current); break; case BRK_BUG: - die("Kernel bug detected", regs); + die_if_kernel("Kernel bug detected", regs); + force_sig(SIGTRAP, current); break; default: - die_if_kernel("Break instruction in kernel code", regs); + scnprintf(b, sizeof(b), "%s instruction in kernel code", str); + die_if_kernel(b, regs); force_sig(SIGTRAP, current); } +} + +asmlinkage void do_bp(struct pt_regs *regs) +{ + unsigned int opcode, bcode; + + if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) + goto out_sigsegv; + + /* + * There is the ancient bug in the MIPS assemblers that the break + * code starts left to bit 16 instead to bit 6 in the opcode. + * Gas is bug-compatible, but not always, grrr... + * We handle both cases with a simple heuristics. --macro + */ + bcode = ((opcode >> 6) & ((1 << 20) - 1)); + if (bcode >= (1 << 10)) + bcode >>= 10; + + do_trap_or_bp(regs, bcode, "Break"); return; out_sigsegv: @@ -691,7 +705,6 @@ asmlinkage void do_tr(struct pt_regs *regs) { unsigned int opcode, tcode = 0; - siginfo_t info; if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) goto out_sigsegv; @@ -700,32 +713,7 @@ if (!(opcode & OPCODE)) tcode = ((opcode >> 6) & ((1 << 10) - 1)); - /* - * (A short test says that IRIX 5.3 sends SIGTRAP for all trap - * insns, even for trap codes that indicate arithmetic failures. - * Weird ...) - * But should we continue the brokenness??? --macro - */ - switch (tcode) { - case BRK_OVERFLOW: - case BRK_DIVZERO: - die_if_kernel("Trap instruction in kernel code", regs); - if (tcode == BRK_DIVZERO) - info.si_code = FPE_INTDIV; - else - info.si_code = FPE_INTOVF; - info.si_signo = SIGFPE; - info.si_errno = 0; - info.si_addr = (void __user *) regs->cp0_epc; - force_sig_info(SIGFPE, &info, current); - break; - case BRK_BUG: - die("Kernel bug detected", regs); - break; - default: - die_if_kernel("Trap instruction in kernel code", regs); - force_sig(SIGTRAP, current); - } + do_trap_or_bp(regs, tcode, "Trap"); return; out_sigsegv: @@ -783,8 +771,10 @@ if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) { cpumask_t tmask; - cpus_and(tmask, current->thread.user_cpus_allowed, - mt_fpu_cpumask); + current->thread.user_cpus_allowed + = current->cpus_allowed; + cpus_and(tmask, current->cpus_allowed, + mt_fpu_cpumask); set_cpus_allowed(current, tmask); set_thread_flag(TIF_FPUBOUND); } @@ -1146,6 +1136,9 @@ extern char except_vec_vi, except_vec_vi_lui; extern char except_vec_vi_ori, except_vec_vi_end; + extern char rollback_except_vec_vi; + char *vec_start = (cpu_wait == r4k_wait) ? + &rollback_except_vec_vi : &except_vec_vi; #ifdef CONFIG_MIPS_MT_SMTC /* * We need to provide the SMTC vectored interrupt handler @@ -1153,11 +1146,11 @@ * Status.IM bit to be masked before going there. */ extern char except_vec_vi_mori; - const int mori_offset = &except_vec_vi_mori - &except_vec_vi; + const int mori_offset = &except_vec_vi_mori - vec_start; #endif /* CONFIG_MIPS_MT_SMTC */ - const int handler_len = &except_vec_vi_end - &except_vec_vi; - const int lui_offset = &except_vec_vi_lui - &except_vec_vi; - const int ori_offset = &except_vec_vi_ori - &except_vec_vi; + const int handler_len = &except_vec_vi_end - vec_start; + const int lui_offset = &except_vec_vi_lui - vec_start; + const int ori_offset = &except_vec_vi_ori - vec_start; if (handler_len > VECTORSPACING) { /* @@ -1167,7 +1160,7 @@ panic("VECTORSPACING too small"); } - memcpy(b, &except_vec_vi, handler_len); + memcpy(b, vec_start, handler_len); #ifdef CONFIG_MIPS_MT_SMTC BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */ @@ -1287,7 +1280,7 @@ int cp0_perfcount_irq; EXPORT_SYMBOL_GPL(cp0_perfcount_irq); -void __init per_cpu_trap_init(void) +void __cpuinit per_cpu_trap_init(void) { unsigned int cpu = smp_processor_id(); unsigned int status_set = ST0_CU0; @@ -1404,11 +1397,12 @@ flush_icache_range(ebase + offset, ebase + offset + size); } -static char panic_null_cerr[] __initdata = +static char panic_null_cerr[] __cpuinitdata = "Trying to set NULL cache error exception handler"; /* Install uncached CPU exception handler */ -void __init set_uncached_handler(unsigned long offset, void *addr, unsigned long size) +void __cpuinit set_uncached_handler(unsigned long offset, void *addr, + unsigned long size) { #ifdef CONFIG_32BIT unsigned long uncached_ebase = KSEG1ADDR(ebase); @@ -1437,6 +1431,10 @@ extern char except_vec3_generic, except_vec3_r4000; extern char except_vec4; unsigned long i; + int rollback; + + check_wait(); + rollback = (cpu_wait == r4k_wait); if (cpu_has_veic || cpu_has_vint) ebase = (unsigned long) alloc_bootmem_low_pages(0x200 + VECTORSPACING*64); @@ -1496,7 +1494,7 @@ if (board_be_init) board_be_init(); - set_except_vector(0, handle_int); + set_except_vector(0, rollback ? rollback_handle_int : handle_int); set_except_vector(1, handle_tlbm); set_except_vector(2, handle_tlbl); set_except_vector(3, handle_tlbs); diff -Nurd linux-2.6.24/arch/mips/kernel/unaligned.c mer-smartq-kernel/arch/mips/kernel/unaligned.c --- linux-2.6.24/arch/mips/kernel/unaligned.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/kernel/unaligned.c 2009-11-17 12:13:29.000000000 +0100 @@ -560,12 +560,12 @@ return -ENODEV; d = debugfs_create_u32("unaligned_instructions", S_IRUGO, mips_debugfs_dir, &unaligned_instructions); - if (IS_ERR(d)) - return PTR_ERR(d); + if (!d) + return -ENOMEM; d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR, mips_debugfs_dir, &unaligned_action); - if (IS_ERR(d)) - return PTR_ERR(d); + if (!d) + return -ENOMEM; return 0; } __initcall(debugfs_unaligned); diff -Nurd linux-2.6.24/arch/mips/kernel/vpe.c mer-smartq-kernel/arch/mips/kernel/vpe.c --- linux-2.6.24/arch/mips/kernel/vpe.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/kernel/vpe.c 2009-11-17 12:13:29.000000000 +0100 @@ -263,13 +263,21 @@ /* Find some VPE program space */ static void *alloc_progmem(unsigned long len) { + void *addr; + #ifdef CONFIG_MIPS_VPE_LOADER_TOM - /* this means you must tell linux to use less memory than you physically have */ - return pfn_to_kaddr(max_pfn); + /* + * This means you must tell Linux to use less memory than you + * physically have, for example by passing a mem= boot argument. + */ + addr = pfn_to_kaddr(max_pfn); + memset(addr, 0, len); #else - // simple grab some mem for now - return kmalloc(len, GFP_KERNEL); + /* simple grab some mem for now */ + addr = kzalloc(len, GFP_KERNEL); #endif + + return addr; } static void release_progmem(void *ptr) @@ -774,10 +782,15 @@ /* take system out of configuration state */ clear_c0_mvpcontrol(MVPCONTROL_VPC); + /* + * SMTC/SMVP kernels manage VPE enable independently, + * but uniprocessor kernels need to turn it on, even + * if that wasn't the pre-dvpe() state. + */ #ifdef CONFIG_SMP - evpe(EVPE_ENABLE); -#else evpe(vpeflags); +#else + evpe(EVPE_ENABLE); #endif emt(dmt_flag); local_irq_restore(flags); @@ -885,9 +898,10 @@ } v->load_addr = alloc_progmem(mod.core_size); - memset(v->load_addr, 0, mod.core_size); + if (!v->load_addr) + return -ENOMEM; - printk("VPE loader: loading to %p\n", v->load_addr); + pr_info("VPE loader: loading to %p\n", v->load_addr); if (relocate) { for (i = 0; i < hdr->e_shnum; i++) { @@ -939,12 +953,14 @@ struct elf_phdr *phdr = (struct elf_phdr *) ((char *)hdr + hdr->e_phoff); for (i = 0; i < hdr->e_phnum; i++) { - if (phdr->p_type != PT_LOAD) - continue; - - memcpy((void *)phdr->p_paddr, (char *)hdr + phdr->p_offset, phdr->p_filesz); - memset((void *)phdr->p_paddr + phdr->p_filesz, 0, phdr->p_memsz - phdr->p_filesz); - phdr++; + if (phdr->p_type == PT_LOAD) { + memcpy((void *)phdr->p_paddr, + (char *)hdr + phdr->p_offset, + phdr->p_filesz); + memset((void *)phdr->p_paddr + phdr->p_filesz, + 0, phdr->p_memsz - phdr->p_filesz); + } + phdr++; } for (i = 0; i < hdr->e_shnum; i++) { @@ -1113,7 +1129,7 @@ /* It's good to be able to run the SP and if it chokes have a look at the /dev/rt?. But if we reset the pointer to the shared struct we - loose what has happened. So perhaps if garbage is sent to the vpe + lose what has happened. So perhaps if garbage is sent to the vpe device, use it as a trigger for the reset. Hopefully a nice executable will be along shortly. */ if (ret < 0) diff -Nurd linux-2.6.24/arch/mips/lasat/interrupt.c mer-smartq-kernel/arch/mips/lasat/interrupt.c --- linux-2.6.24/arch/mips/lasat/interrupt.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/lasat/interrupt.c 2009-11-17 12:13:29.000000000 +0100 @@ -34,11 +34,13 @@ void disable_lasat_irq(unsigned int irq_nr) { + irq_nr -= LASAT_IRQ_BASE; *lasat_int_mask &= ~(1 << irq_nr) << lasat_int_mask_shift; } void enable_lasat_irq(unsigned int irq_nr) { + irq_nr -= LASAT_IRQ_BASE; *lasat_int_mask |= (1 << irq_nr) << lasat_int_mask_shift; } diff -Nurd linux-2.6.24/arch/mips/lasat/lasat_board.c mer-smartq-kernel/arch/mips/lasat/lasat_board.c --- linux-2.6.24/arch/mips/lasat/lasat_board.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/lasat/lasat_board.c 2009-11-17 12:13:29.000000000 +0100 @@ -23,18 +23,19 @@ #include #include #include +#include #include #include #include "at93c.h" /* New model description table */ #include "lasat_models.h" +static DEFINE_MUTEX(lasat_eeprom_mutex); + #define EEPROM_CRC(data, len) (~crc32(~0, data, len)) struct lasat_info lasat_board_info; -void update_bcastaddr(void); - int EEPROMRead(unsigned int pos, unsigned char *data, int len) { int i; @@ -258,10 +259,6 @@ sprintf(lasat_board_info.li_typestr, "%d", 10 * c); } -#if defined(CONFIG_INET) && defined(CONFIG_SYSCTL) - update_bcastaddr(); -#endif - return 0; } @@ -269,6 +266,8 @@ { unsigned long crc; + mutex_lock(&lasat_eeprom_mutex); + /* Generate the CRC */ crc = EEPROM_CRC((unsigned char *)(&lasat_board_info.li_eeprom_info), sizeof(struct lasat_eeprom_struct) - 4); @@ -277,4 +276,6 @@ /* Write the EEPROM info */ EEPROMWrite(0, (unsigned char *)&lasat_board_info.li_eeprom_info, sizeof(struct lasat_eeprom_struct)); + + mutex_unlock(&lasat_eeprom_mutex); } diff -Nurd linux-2.6.24/arch/mips/lasat/sysctl.c mer-smartq-kernel/arch/mips/lasat/sysctl.c --- linux-2.6.24/arch/mips/lasat/sysctl.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/lasat/sysctl.c 2009-11-17 12:13:29.000000000 +0100 @@ -29,15 +29,13 @@ #include #include #include -#include #include #include -#include "sysctl.h" +#ifdef CONFIG_DS1603 #include "ds1603.h" - -static DEFINE_MUTEX(lasat_info_mutex); +#endif /* Strategy function to write EEPROM after changing string entry */ int sysctl_lasatstring(ctl_table *table, int *name, int nlen, @@ -46,18 +44,15 @@ { int r; - mutex_lock(&lasat_info_mutex); r = sysctl_string(table, name, nlen, oldval, oldlenp, newval, newlen); - if (r < 0) { - mutex_unlock(&lasat_info_mutex); + if (r < 0) return r; - } + if (newval && newlen) lasat_write_eeprom_info(); - mutex_unlock(&lasat_info_mutex); - return 1; + return 0; } @@ -67,14 +62,11 @@ { int r; - mutex_lock(&lasat_info_mutex); r = proc_dostring(table, write, filp, buffer, lenp, ppos); - if ((!write) || r) { - mutex_unlock(&lasat_info_mutex); + if ((!write) || r) return r; - } + lasat_write_eeprom_info(); - mutex_unlock(&lasat_info_mutex); return 0; } @@ -85,28 +77,24 @@ { int r; - mutex_lock(&lasat_info_mutex); r = proc_dointvec(table, write, filp, buffer, lenp, ppos); - if ((!write) || r) { - mutex_unlock(&lasat_info_mutex); + if ((!write) || r) return r; - } + lasat_write_eeprom_info(); - mutex_unlock(&lasat_info_mutex); return 0; } +#ifdef CONFIG_DS1603 static int rtctmp; -#ifdef CONFIG_DS1603 /* proc function to read/write RealTime Clock */ int proc_dolasatrtc(ctl_table *table, int write, struct file *filp, void *buffer, size_t *lenp, loff_t *ppos) { int r; - mutex_lock(&lasat_info_mutex); if (!write) { rtctmp = read_persistent_clock(); /* check for time < 0 and set to 0 */ @@ -114,12 +102,11 @@ rtctmp = 0; } r = proc_dointvec(table, write, filp, buffer, lenp, ppos); - if ((!write) || r) { - mutex_unlock(&lasat_info_mutex); + if (r) return r; - } - rtc_mips_set_mmss(rtctmp); - mutex_unlock(&lasat_info_mutex); + + if (write) + rtc_mips_set_mmss(rtctmp); return 0; } @@ -132,17 +119,14 @@ { int r; - mutex_lock(&lasat_info_mutex); r = sysctl_intvec(table, name, nlen, oldval, oldlenp, newval, newlen); - if (r < 0) { - mutex_unlock(&lasat_info_mutex); + if (r < 0) return r; - } + if (newval && newlen) lasat_write_eeprom_info(); - mutex_unlock(&lasat_info_mutex); - return 1; + return 0; } #ifdef CONFIG_DS1603 @@ -153,50 +137,27 @@ { int r; - mutex_lock(&lasat_info_mutex); rtctmp = read_persistent_clock(); if (rtctmp < 0) rtctmp = 0; r = sysctl_intvec(table, name, nlen, oldval, oldlenp, newval, newlen); - if (r < 0) { - mutex_unlock(&lasat_info_mutex); + if (r < 0) return r; - } if (newval && newlen) rtc_mips_set_mmss(rtctmp); - mutex_unlock(&lasat_info_mutex); - return 1; + return r; } #endif #ifdef CONFIG_INET -static char lasat_bcastaddr[16]; - -void update_bcastaddr(void) -{ - unsigned int ip; - - ip = (lasat_board_info.li_eeprom_info.ipaddr & - lasat_board_info.li_eeprom_info.netmask) | - ~lasat_board_info.li_eeprom_info.netmask; - - sprintf(lasat_bcastaddr, "%d.%d.%d.%d", - (ip) & 0xff, - (ip >> 8) & 0xff, - (ip >> 16) & 0xff, - (ip >> 24) & 0xff); -} - -static char proc_lasat_ipbuf[32]; - -/* Parsing of IP address */ int proc_lasat_ip(ctl_table *table, int write, struct file *filp, void *buffer, size_t *lenp, loff_t *ppos) { unsigned int ip; char *p, c; int len; + char ipbuf[32]; if (!table->data || !table->maxlen || !*lenp || (*ppos && !write)) { @@ -204,117 +165,88 @@ return 0; } - mutex_lock(&lasat_info_mutex); if (write) { len = 0; p = buffer; while (len < *lenp) { - if (get_user(c, p++)) { - mutex_unlock(&lasat_info_mutex); + if (get_user(c, p++)) return -EFAULT; - } if (c == 0 || c == '\n') break; len++; } - if (len >= sizeof(proc_lasat_ipbuf)-1) - len = sizeof(proc_lasat_ipbuf) - 1; - if (copy_from_user(proc_lasat_ipbuf, buffer, len)) { - mutex_unlock(&lasat_info_mutex); + if (len >= sizeof(ipbuf)-1) + len = sizeof(ipbuf) - 1; + if (copy_from_user(ipbuf, buffer, len)) return -EFAULT; - } - proc_lasat_ipbuf[len] = 0; + ipbuf[len] = 0; *ppos += *lenp; /* Now see if we can convert it to a valid IP */ - ip = in_aton(proc_lasat_ipbuf); + ip = in_aton(ipbuf); *(unsigned int *)(table->data) = ip; lasat_write_eeprom_info(); } else { ip = *(unsigned int *)(table->data); - sprintf(proc_lasat_ipbuf, "%d.%d.%d.%d", + sprintf(ipbuf, "%d.%d.%d.%d", (ip) & 0xff, (ip >> 8) & 0xff, (ip >> 16) & 0xff, (ip >> 24) & 0xff); - len = strlen(proc_lasat_ipbuf); + len = strlen(ipbuf); if (len > *lenp) len = *lenp; if (len) - if (copy_to_user(buffer, proc_lasat_ipbuf, len)) { - mutex_unlock(&lasat_info_mutex); + if (copy_to_user(buffer, ipbuf, len)) return -EFAULT; - } if (len < *lenp) { - if (put_user('\n', ((char *) buffer) + len)) { - mutex_unlock(&lasat_info_mutex); + if (put_user('\n', ((char *) buffer) + len)) return -EFAULT; - } len++; } *lenp = len; *ppos += len; } - update_bcastaddr(); - mutex_unlock(&lasat_info_mutex); return 0; } -#endif /* defined(CONFIG_INET) */ +#endif -static int sysctl_lasat_eeprom_value(ctl_table *table, int *name, int nlen, +static int sysctl_lasat_prid(ctl_table *table, int *name, int nlen, void *oldval, size_t *oldlenp, void *newval, size_t newlen) { int r; - mutex_lock(&lasat_info_mutex); r = sysctl_intvec(table, name, nlen, oldval, oldlenp, newval, newlen); - if (r < 0) { - mutex_unlock(&lasat_info_mutex); + if (r < 0) return r; - } - if (newval && newlen) { - if (name && *name == LASAT_PRID) - lasat_board_info.li_eeprom_info.prid = *(int *)newval; - + lasat_board_info.li_eeprom_info.prid = *(int *)newval; lasat_write_eeprom_info(); lasat_init_board_info(); } - mutex_unlock(&lasat_info_mutex); - return 0; } -int proc_lasat_eeprom_value(ctl_table *table, int write, struct file *filp, +int proc_lasat_prid(ctl_table *table, int write, struct file *filp, void *buffer, size_t *lenp, loff_t *ppos) { int r; - mutex_lock(&lasat_info_mutex); r = proc_dointvec(table, write, filp, buffer, lenp, ppos); - if ((!write) || r) { - mutex_unlock(&lasat_info_mutex); + if (r < 0) return r; + if (write) { + lasat_board_info.li_eeprom_info.prid = + lasat_board_info.li_prid; + lasat_write_eeprom_info(); + lasat_init_board_info(); } - if (filp && filp->f_path.dentry) { - if (!strcmp(filp->f_path.dentry->d_name.name, "prid")) - lasat_board_info.li_eeprom_info.prid = - lasat_board_info.li_prid; - if (!strcmp(filp->f_path.dentry->d_name.name, "debugaccess")) - lasat_board_info.li_eeprom_info.debugaccess = - lasat_board_info.li_debugaccess; - } - lasat_write_eeprom_info(); - mutex_unlock(&lasat_info_mutex); - return 0; } extern int lasat_boot_to_service; -#ifdef CONFIG_SYSCTL - static ctl_table lasat_table[] = { { .ctl_name = CTL_UNNUMBERED, @@ -349,8 +281,8 @@ .data = &lasat_board_info.li_prid, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_lasat_eeprom_value, - .strategy = &sysctl_lasat_eeprom_value + .proc_handler = &proc_lasat_prid, + .strategy = &sysctl_lasat_prid }, #ifdef CONFIG_INET { @@ -363,7 +295,7 @@ .strategy = &sysctl_lasat_intvec }, { - .ctl_name = LASAT_NETMASK, + .ctl_name = CTL_UNNUMBERED, .procname = "netmask", .data = &lasat_board_info.li_eeprom_info.netmask, .maxlen = sizeof(int), @@ -371,15 +303,6 @@ .proc_handler = &proc_lasat_ip, .strategy = &sysctl_lasat_intvec }, - { - .ctl_name = CTL_UNNUMBERED, - .procname = "bcastaddr", - .data = &lasat_bcastaddr, - .maxlen = sizeof(lasat_bcastaddr), - .mode = 0600, - .proc_handler = &proc_dostring, - .strategy = &sysctl_string - }, #endif { .ctl_name = CTL_UNNUMBERED, @@ -417,7 +340,7 @@ .data = &lasat_board_info.li_namestr, .maxlen = sizeof(lasat_board_info.li_namestr), .mode = 0444, - .proc_handler = &proc_dostring, + .proc_handler = &proc_dostring, .strategy = &sysctl_string }, { @@ -448,9 +371,12 @@ lasat_table_header = register_sysctl_table(lasat_root_table); + if (!lasat_table_header) { + printk(KERN_ERR "Unable to register LASAT sysctl\n"); + return -ENOMEM; + } return 0; } __initcall(lasat_register_sysctl); -#endif /* CONFIG_SYSCTL */ diff -Nurd linux-2.6.24/arch/mips/lasat/sysctl.h mer-smartq-kernel/arch/mips/lasat/sysctl.h --- linux-2.6.24/arch/mips/lasat/sysctl.h 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/lasat/sysctl.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,24 +0,0 @@ -/* - * LASAT sysctl values - */ - -#ifndef _LASAT_SYSCTL_H -#define _LASAT_SYSCTL_H - -/* /proc/sys/lasat */ -enum { - LASAT_CPU_HZ = 1, - LASAT_BUS_HZ, - LASAT_MODEL, - LASAT_PRID, - LASAT_IPADDR, - LASAT_NETMASK, - LASAT_BCAST, - LASAT_PASSWORD, - LASAT_SBOOT, - LASAT_RTC, - LASAT_NAMESTR, - LASAT_TYPESTR, -}; - -#endif /* _LASAT_SYSCTL_H */ diff -Nurd linux-2.6.24/arch/mips/lib/csum_partial.S mer-smartq-kernel/arch/mips/lib/csum_partial.S --- linux-2.6.24/arch/mips/lib/csum_partial.S 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/lib/csum_partial.S 2009-11-17 12:13:29.000000000 +0100 @@ -38,12 +38,14 @@ #ifdef USE_DOUBLE #define LOAD ld +#define LOAD32 lwu #define ADD daddu #define NBYTES 8 #else #define LOAD lw +#define LOAD32 lw #define ADD addu #define NBYTES 4 @@ -56,6 +58,14 @@ sltu v1, sum, reg; \ ADD sum, v1 +#define ADDC32(sum,reg) \ + .set push; \ + .set noat; \ + addu sum, reg; \ + sltu v1, sum, reg; \ + addu sum, v1; \ + .set pop + #define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \ LOAD _t0, (offset + UNIT(0))(src); \ LOAD _t1, (offset + UNIT(1))(src); \ @@ -128,7 +138,7 @@ beqz t8, qword_align andi t8, src, 0x8 - lw t0, 0x00(src) + LOAD32 t0, 0x00(src) LONG_SUBU a1, a1, 0x4 ADDC(sum, t0) PTR_ADDU src, src, 0x4 @@ -205,7 +215,7 @@ LONG_SRL t8, t8, 0x2 end_words: - lw t0, (src) + LOAD32 t0, (src) LONG_SUBU t8, t8, 0x1 ADDC(sum, t0) bnez t8, end_words @@ -222,6 +232,9 @@ /* Still a full word to go */ ulw t1, (src) PTR_ADDIU src, 4 +#ifdef USE_DOUBLE + dsll t1, t1, 32 /* clear lower 32bit */ +#endif ADDC(sum, t1) 1: move t1, zero @@ -269,7 +282,7 @@ 1: .set reorder /* Add the passed partial csum. */ - ADDC(sum, a2) + ADDC32(sum, a2) jr ra .set noreorder END(csum_partial) @@ -653,7 +666,7 @@ andi sum, 0xffff 1: .set reorder - ADDC(sum, psum) + ADDC32(sum, psum) jr ra .set noreorder diff -Nurd linux-2.6.24/arch/mips/lib/ucmpdi2.c mer-smartq-kernel/arch/mips/lib/ucmpdi2.c --- linux-2.6.24/arch/mips/lib/ucmpdi2.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/lib/ucmpdi2.c 2009-11-17 12:13:29.000000000 +0100 @@ -17,3 +17,5 @@ return 2; return 1; } + +EXPORT_SYMBOL(__ucmpdi2); diff -Nurd linux-2.6.24/arch/mips/lib/uncached.c mer-smartq-kernel/arch/mips/lib/uncached.c --- linux-2.6.24/arch/mips/lib/uncached.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/lib/uncached.c 2009-11-17 12:13:29.000000000 +0100 @@ -36,7 +36,7 @@ * values, so we can avoid sharing the same stack area between a cached * and the uncached mode. */ -unsigned long __init run_uncached(void *func) +unsigned long __cpuinit run_uncached(void *func) { register long sp __asm__("$sp"); register long ret __asm__("$2"); diff -Nurd linux-2.6.24/arch/mips/Makefile mer-smartq-kernel/arch/mips/Makefile --- linux-2.6.24/arch/mips/Makefile 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/Makefile 2009-11-17 12:13:28.000000000 +0100 @@ -548,7 +548,11 @@ # core-$(CONFIG_SNI_RM) += arch/mips/sni/ cflags-$(CONFIG_SNI_RM) += -Iinclude/asm-mips/mach-rm +ifdef CONFIG_CPU_LITTLE_ENDIAN load-$(CONFIG_SNI_RM) += 0xffffffff80600000 +else +load-$(CONFIG_SNI_RM) += 0xffffffff80030000 +endif all-$(CONFIG_SNI_RM) := vmlinux.ecoff # diff -Nurd linux-2.6.24/arch/mips/math-emu/cp1emu.c mer-smartq-kernel/arch/mips/math-emu/cp1emu.c --- linux-2.6.24/arch/mips/math-emu/cp1emu.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/math-emu/cp1emu.c 2009-11-17 12:13:29.000000000 +0100 @@ -1299,12 +1299,12 @@ if (!mips_debugfs_dir) return -ENODEV; dir = debugfs_create_dir("fpuemustats", mips_debugfs_dir); - if (IS_ERR(dir)) - return PTR_ERR(dir); + if (!dir) + return -ENOMEM; for (i = 0; i < ARRAY_SIZE(vars); i++) { d = debugfs_create_u32(vars[i].name, S_IRUGO, dir, vars[i].v); - if (IS_ERR(d)) - return PTR_ERR(d); + if (!d) + return -ENOMEM; } return 0; } diff -Nurd linux-2.6.24/arch/mips/mips-boards/generic/time.c mer-smartq-kernel/arch/mips/mips-boards/generic/time.c --- linux-2.6.24/arch/mips/mips-boards/generic/time.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/mips-boards/generic/time.c 2009-11-17 12:13:29.000000000 +0100 @@ -146,7 +146,7 @@ } } -unsigned int __init get_c0_compare_int(void) +unsigned int __cpuinit get_c0_compare_int(void) { #ifdef MSC01E_INT_BASE if (cpu_has_veic) { diff -Nurd linux-2.6.24/arch/mips/mips-boards/malta/Makefile mer-smartq-kernel/arch/mips/mips-boards/malta/Makefile --- linux-2.6.24/arch/mips/mips-boards/malta/Makefile 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/mips-boards/malta/Makefile 2009-11-17 12:13:29.000000000 +0100 @@ -19,9 +19,8 @@ # under Linux. # -obj-y := malta_int.o malta_platform.o malta_setup.o +obj-y := malta_int.o malta_mtd.o malta_platform.o malta_setup.o -obj-$(CONFIG_MTD) += malta_mtd.o obj-$(CONFIG_MIPS_MT_SMTC) += malta_smtc.o EXTRA_CFLAGS += -Werror diff -Nurd linux-2.6.24/arch/mips/mips-boards/malta/malta_smtc.c mer-smartq-kernel/arch/mips/mips-boards/malta/malta_smtc.c --- linux-2.6.24/arch/mips/mips-boards/malta/malta_smtc.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/mips-boards/malta/malta_smtc.c 2009-11-17 12:13:29.000000000 +0100 @@ -62,14 +62,20 @@ void __cpuinit plat_smp_setup(void) { - if (read_c0_config3() & (1<<2)) - mipsmt_build_cpu_map(0); + if (read_c0_config3() & (1<<2)) { + /* + * we won't get the definitive value until + * we've run smtc_prepare_cpus later, but + * we would appear to need an upper bound now. + */ + smtc_build_cpu_map(0); + } } void __init plat_prepare_cpus(unsigned int max_cpus) { if (read_c0_config3() & (1<<2)) - mipsmt_prepare_cpus(); + smtc_prepare_cpus(max_cpus); } /* diff -Nurd linux-2.6.24/arch/mips/mipssim/sim_time.c mer-smartq-kernel/arch/mips/mipssim/sim_time.c --- linux-2.6.24/arch/mips/mipssim/sim_time.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/mipssim/sim_time.c 2009-11-17 12:13:29.000000000 +0100 @@ -83,7 +83,7 @@ } -unsigned __init get_c0_compare_int(void) +unsigned __cpuinit get_c0_compare_int(void) { #ifdef MSC01E_INT_BASE if (cpu_has_veic) { diff -Nurd linux-2.6.24/arch/mips/mm/cache.c mer-smartq-kernel/arch/mips/mm/cache.c --- linux-2.6.24/arch/mips/mm/cache.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/mm/cache.c 2009-11-17 12:13:29.000000000 +0100 @@ -30,6 +30,9 @@ unsigned long pfn); void (*flush_icache_range)(unsigned long start, unsigned long end); +void (*__flush_cache_vmap)(void); +void (*__flush_cache_vunmap)(void); + /* MIPS specific cache operations */ void (*flush_cache_sigtramp)(unsigned long addr); void (*local_flush_data_cache_page)(void * addr); @@ -92,12 +95,17 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr) { - if (pages_do_alias((unsigned long)page_address(page), vmaddr)) { - void *kaddr; + unsigned long addr = (unsigned long) page_address(page); - kaddr = kmap_coherent(page, vmaddr); - flush_data_cache_page((unsigned long)kaddr); - kunmap_coherent(); + if (pages_do_alias(addr, vmaddr)) { + if (page_mapped(page) && !Page_dcache_dirty(page)) { + void *kaddr; + + kaddr = kmap_coherent(page, vmaddr); + flush_data_cache_page((unsigned long)kaddr); + kunmap_coherent(); + } else + flush_data_cache_page(addr); } } @@ -122,9 +130,10 @@ } } -static char cache_panic[] __initdata = "Yeee, unsupported cache architecture."; +static char cache_panic[] __cpuinitdata = + "Yeee, unsupported cache architecture."; -void __init cpu_cache_init(void) +void __cpuinit cpu_cache_init(void) { if (cpu_has_3k_cache) { extern void __weak r3k_cache_init(void); diff -Nurd linux-2.6.24/arch/mips/mm/cex-sb1.S mer-smartq-kernel/arch/mips/mm/cex-sb1.S --- linux-2.6.24/arch/mips/mm/cex-sb1.S 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/mm/cex-sb1.S 2009-11-17 12:13:29.000000000 +0100 @@ -34,8 +34,6 @@ * is changed. */ - __INIT - .set mips64 .set noreorder .set noat @@ -51,6 +49,10 @@ * (0x170-0x17f) are used to preserve k0, k1, and ra. */ +#ifndef CONFIG_HOTPLUG_CPU + __INIT /* no __CPUINIT; it's a 2.6.25 thing */ +#endif + LEAF(except_vec2_sb1) /* * If this error is recoverable, we need to exit the handler @@ -142,7 +144,9 @@ END(except_vec2_sb1) +#ifndef CONFIG_HOTPLUG_CPU __FINIT +#endif LEAF(handle_vec2_sb1) mfc0 k0,CP0_CONFIG diff -Nurd linux-2.6.24/arch/mips/mm/c-r3k.c mer-smartq-kernel/arch/mips/mm/c-r3k.c --- linux-2.6.24/arch/mips/mm/c-r3k.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/mm/c-r3k.c 2009-11-17 12:13:29.000000000 +0100 @@ -26,7 +26,7 @@ static unsigned long icache_size, dcache_size; /* Size in bytes */ static unsigned long icache_lsize, dcache_lsize; /* Size in bytes */ -unsigned long __init r3k_cache_size(unsigned long ca_flags) +unsigned long __cpuinit r3k_cache_size(unsigned long ca_flags) { unsigned long flags, status, dummy, size; volatile unsigned long *p; @@ -61,7 +61,7 @@ return size * sizeof(*p); } -unsigned long __init r3k_cache_lsize(unsigned long ca_flags) +unsigned long __cpuinit r3k_cache_lsize(unsigned long ca_flags) { unsigned long flags, status, lsize, i; volatile unsigned long *p; @@ -90,7 +90,7 @@ return lsize * sizeof(*p); } -static void __init r3k_probe_cache(void) +static void __cpuinit r3k_probe_cache(void) { dcache_size = r3k_cache_size(ST0_ISC); if (dcache_size) @@ -307,7 +307,7 @@ r3k_flush_dcache_range(start, start + size); } -void __init r3k_cache_init(void) +void __cpuinit r3k_cache_init(void) { extern void build_clear_page(void); extern void build_copy_page(void); diff -Nurd linux-2.6.24/arch/mips/mm/c-r4k.c mer-smartq-kernel/arch/mips/mm/c-r4k.c --- linux-2.6.24/arch/mips/mm/c-r4k.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/mm/c-r4k.c 2009-11-17 12:13:29.000000000 +0100 @@ -93,7 +93,7 @@ blast_dcache32_page(addr); } -static void __init r4k_blast_dcache_page_setup(void) +static void __cpuinit r4k_blast_dcache_page_setup(void) { unsigned long dc_lsize = cpu_dcache_line_size(); @@ -107,7 +107,7 @@ static void (* r4k_blast_dcache_page_indexed)(unsigned long addr); -static void __init r4k_blast_dcache_page_indexed_setup(void) +static void __cpuinit r4k_blast_dcache_page_indexed_setup(void) { unsigned long dc_lsize = cpu_dcache_line_size(); @@ -121,7 +121,7 @@ static void (* r4k_blast_dcache)(void); -static void __init r4k_blast_dcache_setup(void) +static void __cpuinit r4k_blast_dcache_setup(void) { unsigned long dc_lsize = cpu_dcache_line_size(); @@ -206,7 +206,7 @@ static void (* r4k_blast_icache_page)(unsigned long addr); -static void __init r4k_blast_icache_page_setup(void) +static void __cpuinit r4k_blast_icache_page_setup(void) { unsigned long ic_lsize = cpu_icache_line_size(); @@ -223,7 +223,7 @@ static void (* r4k_blast_icache_page_indexed)(unsigned long addr); -static void __init r4k_blast_icache_page_indexed_setup(void) +static void __cpuinit r4k_blast_icache_page_indexed_setup(void) { unsigned long ic_lsize = cpu_icache_line_size(); @@ -247,7 +247,7 @@ static void (* r4k_blast_icache)(void); -static void __init r4k_blast_icache_setup(void) +static void __cpuinit r4k_blast_icache_setup(void) { unsigned long ic_lsize = cpu_icache_line_size(); @@ -268,7 +268,7 @@ static void (* r4k_blast_scache_page)(unsigned long addr); -static void __init r4k_blast_scache_page_setup(void) +static void __cpuinit r4k_blast_scache_page_setup(void) { unsigned long sc_lsize = cpu_scache_line_size(); @@ -286,7 +286,7 @@ static void (* r4k_blast_scache_page_indexed)(unsigned long addr); -static void __init r4k_blast_scache_page_indexed_setup(void) +static void __cpuinit r4k_blast_scache_page_indexed_setup(void) { unsigned long sc_lsize = cpu_scache_line_size(); @@ -304,7 +304,7 @@ static void (* r4k_blast_scache)(void); -static void __init r4k_blast_scache_setup(void) +static void __cpuinit r4k_blast_scache_setup(void) { unsigned long sc_lsize = cpu_scache_line_size(); @@ -361,23 +361,36 @@ #endif } +static void r4k__flush_cache_vmap(void) +{ + r4k_blast_dcache(); +} + +static void r4k__flush_cache_vunmap(void) +{ + r4k_blast_dcache(); +} + static inline void local_r4k_flush_cache_range(void * args) { struct vm_area_struct *vma = args; + int exec = vma->vm_flags & VM_EXEC; if (!(has_valid_asid(vma->vm_mm))) return; r4k_blast_dcache(); + if (exec) + r4k_blast_icache(); } static void r4k_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - if (!cpu_has_dc_aliases) - return; + int exec = vma->vm_flags & VM_EXEC; - r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1); + if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) + r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1); } static inline void local_r4k_flush_cache_mm(void * args) @@ -426,6 +439,7 @@ struct page *page = pfn_to_page(fcp_args->pfn); int exec = vma->vm_flags & VM_EXEC; struct mm_struct *mm = vma->vm_mm; + int map_coherent = 0; pgd_t *pgdp; pud_t *pudp; pmd_t *pmdp; @@ -459,7 +473,9 @@ * Use kmap_coherent or kmap_atomic to do flushes for * another ASID than the current one. */ - if (cpu_has_dc_aliases) + map_coherent = (cpu_has_dc_aliases && + page_mapped(page) && !Page_dcache_dirty(page)); + if (map_coherent) vaddr = kmap_coherent(page, addr); else vaddr = kmap_atomic(page, KM_USER0); @@ -482,7 +498,7 @@ } if (vaddr) { - if (cpu_has_dc_aliases) + if (map_coherent) kunmap_coherent(); else kunmap_atomic(vaddr, KM_USER0); @@ -697,11 +713,11 @@ } } -static char *way_string[] __initdata = { NULL, "direct mapped", "2-way", +static char *way_string[] __cpuinitdata = { NULL, "direct mapped", "2-way", "3-way", "4-way", "5-way", "6-way", "7-way", "8-way" }; -static void __init probe_pcache(void) +static void __cpuinit probe_pcache(void) { struct cpuinfo_mips *c = ¤t_cpu_data; unsigned int config = read_c0_config(); @@ -1020,7 +1036,7 @@ * executes in KSEG1 space or else you will crash and burn badly. You have * been warned. */ -static int __init probe_scache(void) +static int __cpuinit probe_scache(void) { unsigned long flags, addr, begin, end, pow2; unsigned int config = read_c0_config(); @@ -1099,7 +1115,7 @@ extern int rm7k_sc_init(void); extern int mips_sc_init(void); -static void __init setup_scache(void) +static void __cpuinit setup_scache(void) { struct cpuinfo_mips *c = ¤t_cpu_data; unsigned int config = read_c0_config(); @@ -1210,7 +1226,7 @@ } } -static void __init coherency_setup(void) +static void __cpuinit coherency_setup(void) { change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT); @@ -1242,7 +1258,7 @@ } } -void __init r4k_cache_init(void) +void __cpuinit r4k_cache_init(void) { extern void build_clear_page(void); extern void build_copy_page(void); @@ -1285,6 +1301,10 @@ PAGE_SIZE - 1); else shm_align_mask = PAGE_SIZE-1; + + __flush_cache_vmap = r4k__flush_cache_vmap; + __flush_cache_vunmap = r4k__flush_cache_vunmap; + flush_cache_all = cache_noop; __flush_cache_all = r4k___flush_cache_all; flush_cache_mm = r4k_flush_cache_mm; diff -Nurd linux-2.6.24/arch/mips/mm/c-tx39.c mer-smartq-kernel/arch/mips/mm/c-tx39.c --- linux-2.6.24/arch/mips/mm/c-tx39.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/mm/c-tx39.c 2009-11-17 12:13:29.000000000 +0100 @@ -122,6 +122,16 @@ local_irq_restore(flags); } +static void tx39__flush_cache_vmap(void) +{ + tx39_blast_dcache(); +} + +static void tx39__flush_cache_vunmap(void) +{ + tx39_blast_dcache(); +} + static inline void tx39_flush_cache_all(void) { if (!cpu_has_dc_aliases) @@ -329,7 +339,7 @@ } } -void __init tx39_cache_init(void) +void __cpuinit tx39_cache_init(void) { extern void build_clear_page(void); extern void build_copy_page(void); @@ -344,6 +354,8 @@ switch (current_cpu_type()) { case CPU_TX3912: /* TX39/H core (writethru direct-map cache) */ + __flush_cache_vmap = tx39__flush_cache_vmap; + __flush_cache_vunmap = tx39__flush_cache_vunmap; flush_cache_all = tx39h_flush_icache_all; __flush_cache_all = tx39h_flush_icache_all; flush_cache_mm = (void *) tx39h_flush_icache_all; @@ -369,6 +381,9 @@ write_c0_wired(0); /* set 8 on reset... */ /* board-dependent init code may set WBON */ + __flush_cache_vmap = tx39__flush_cache_vmap; + __flush_cache_vunmap = tx39__flush_cache_vunmap; + flush_cache_all = tx39_flush_cache_all; __flush_cache_all = tx39___flush_cache_all; flush_cache_mm = tx39_flush_cache_mm; diff -Nurd linux-2.6.24/arch/mips/mm/dma-default.c mer-smartq-kernel/arch/mips/mm/dma-default.c --- linux-2.6.24/arch/mips/mm/dma-default.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/mm/dma-default.c 2009-11-17 12:13:29.000000000 +0100 @@ -111,6 +111,7 @@ void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) { + plat_unmap_dma_mem(dma_handle); free_pages((unsigned long) vaddr, get_order(size)); } @@ -121,6 +122,8 @@ { unsigned long addr = (unsigned long) vaddr; + plat_unmap_dma_mem(dma_handle); + if (!plat_device_is_coherent(dev)) addr = CAC_ADDR(addr); @@ -324,7 +327,6 @@ if (cpu_is_noncoherent_r10000(dev)) __dma_sync((unsigned long)page_address(sg_page(sg)), sg->length, direction); - plat_unmap_dma_mem(sg->dma_address); } } @@ -342,7 +344,6 @@ if (!plat_device_is_coherent(dev)) __dma_sync((unsigned long)page_address(sg_page(sg)), sg->length, direction); - plat_unmap_dma_mem(sg->dma_address); } } diff -Nurd linux-2.6.24/arch/mips/mm/init.c mer-smartq-kernel/arch/mips/mm/init.c --- linux-2.6.24/arch/mips/mm/init.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/mm/init.c 2009-11-17 12:13:29.000000000 +0100 @@ -211,7 +211,8 @@ void *vfrom, *vto; vto = kmap_atomic(to, KM_USER1); - if (cpu_has_dc_aliases && page_mapped(from)) { + if (cpu_has_dc_aliases && + page_mapped(from) && !Page_dcache_dirty(from)) { vfrom = kmap_coherent(from, vaddr); copy_page(vto, vfrom); kunmap_coherent(); @@ -234,7 +235,8 @@ struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) { - if (cpu_has_dc_aliases && page_mapped(page)) { + if (cpu_has_dc_aliases && + page_mapped(page) && !Page_dcache_dirty(page)) { void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); memcpy(vto, src, len); kunmap_coherent(); @@ -253,7 +255,8 @@ struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) { - if (cpu_has_dc_aliases && page_mapped(page)) { + if (cpu_has_dc_aliases && + page_mapped(page) && !Page_dcache_dirty(page)) { void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); memcpy(dst, vfrom, len); kunmap_coherent(); diff -Nurd linux-2.6.24/arch/mips/mm/pg-r4k.c mer-smartq-kernel/arch/mips/mm/pg-r4k.c --- linux-2.6.24/arch/mips/mm/pg-r4k.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/mm/pg-r4k.c 2009-11-17 12:13:29.000000000 +0100 @@ -64,21 +64,21 @@ * with 64-bit kernels. The prefetch offsets have been experimentally tuned * an Origin 200. */ -static int pref_offset_clear __initdata = 512; -static int pref_offset_copy __initdata = 256; +static int pref_offset_clear __cpuinitdata = 512; +static int pref_offset_copy __cpuinitdata = 256; -static unsigned int pref_src_mode __initdata; -static unsigned int pref_dst_mode __initdata; +static unsigned int pref_src_mode __cpuinitdata; +static unsigned int pref_dst_mode __cpuinitdata; -static int load_offset __initdata; -static int store_offset __initdata; +static int load_offset __cpuinitdata; +static int store_offset __cpuinitdata; -static unsigned int __initdata *dest, *epc; +static unsigned int __cpuinitdata *dest, *epc; static unsigned int instruction_pending; static union mips_instruction delayed_mi; -static void __init emit_instruction(union mips_instruction mi) +static void __cpuinit emit_instruction(union mips_instruction mi) { if (instruction_pending) *epc++ = delayed_mi.word; @@ -220,7 +220,7 @@ emit_instruction(mi); } -static void __init __build_store_reg(int reg) +static void __cpuinit __build_store_reg(int reg) { union mips_instruction mi; unsigned int width; @@ -343,7 +343,7 @@ flush_delay_slot_or_nop(); } -void __init build_clear_page(void) +void __cpuinit build_clear_page(void) { unsigned int loop_start; unsigned long off; @@ -446,7 +446,7 @@ pr_debug("\t.set pop\n"); } -void __init build_copy_page(void) +void __cpuinit build_copy_page(void) { unsigned int loop_start; unsigned long off; diff -Nurd linux-2.6.24/arch/mips/mm/pg-sb1.c mer-smartq-kernel/arch/mips/mm/pg-sb1.c --- linux-2.6.24/arch/mips/mm/pg-sb1.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/mm/pg-sb1.c 2009-11-17 12:13:29.000000000 +0100 @@ -216,7 +216,7 @@ int i; for (i = 0; i < DM_NUM_CHANNELS; i++) { - const u64 base_val = CPHYSADDR(&page_descr[i]) | + const u64 base_val = CPHYSADDR((unsigned long)&page_descr[i]) | V_DM_DSCR_BASE_RINGSZ(1); void *base_reg = IOADDR(A_DM_REGISTER(i, R_DM_DSCR_BASE)); @@ -228,11 +228,11 @@ void clear_page(void *page) { - u64 to_phys = CPHYSADDR(page); + u64 to_phys = CPHYSADDR((unsigned long)page); unsigned int cpu = smp_processor_id(); /* if the page is not in KSEG0, use old way */ - if ((long)KSEGX(page) != (long)CKSEG0) + if ((long)KSEGX((unsigned long)page) != (long)CKSEG0) return clear_page_cpu(page); page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_ZERO_MEM | @@ -252,13 +252,13 @@ void copy_page(void *to, void *from) { - u64 from_phys = CPHYSADDR(from); - u64 to_phys = CPHYSADDR(to); + u64 from_phys = CPHYSADDR((unsigned long)from); + u64 to_phys = CPHYSADDR((unsigned long)to); unsigned int cpu = smp_processor_id(); /* if any page is not in KSEG0, use old way */ - if ((long)KSEGX(to) != (long)CKSEG0 - || (long)KSEGX(from) != (long)CKSEG0) + if ((long)KSEGX((unsigned long)to) != (long)CKSEG0 + || (long)KSEGX((unsigned long)from) != (long)CKSEG0) return copy_page_cpu(to, from); page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_L2C_DEST | @@ -293,10 +293,10 @@ EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(copy_page); -void __init build_clear_page(void) +void __cpuinit build_clear_page(void) { } -void __init build_copy_page(void) +void __cpuinit build_copy_page(void) { } diff -Nurd linux-2.6.24/arch/mips/mm/sc-ip22.c mer-smartq-kernel/arch/mips/mm/sc-ip22.c --- linux-2.6.24/arch/mips/mm/sc-ip22.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/mm/sc-ip22.c 2009-11-17 12:13:29.000000000 +0100 @@ -168,7 +168,7 @@ .bc_inv = indy_sc_wback_invalidate }; -void __init indy_sc_init(void) +void __cpuinit indy_sc_init(void) { if (indy_sc_probe()) { indy_sc_enable(); diff -Nurd linux-2.6.24/arch/mips/mm/sc-mips.c mer-smartq-kernel/arch/mips/mm/sc-mips.c --- linux-2.6.24/arch/mips/mm/sc-mips.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/mm/sc-mips.c 2009-11-17 12:13:29.000000000 +0100 @@ -100,7 +100,7 @@ return 1; } -int __init mips_sc_init(void) +int __cpuinit mips_sc_init(void) { int found = mips_sc_probe(); if (found) { @@ -109,4 +109,3 @@ } return found; } - diff -Nurd linux-2.6.24/arch/mips/mm/sc-r5k.c mer-smartq-kernel/arch/mips/mm/sc-r5k.c --- linux-2.6.24/arch/mips/mm/sc-r5k.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/mm/sc-r5k.c 2009-11-17 12:13:29.000000000 +0100 @@ -99,7 +99,7 @@ .bc_inv = r5k_dma_cache_inv_sc }; -void __init r5k_sc_init(void) +void __cpuinit r5k_sc_init(void) { if (r5k_sc_probe()) { r5k_sc_enable(); diff -Nurd linux-2.6.24/arch/mips/mm/sc-rm7k.c mer-smartq-kernel/arch/mips/mm/sc-rm7k.c --- linux-2.6.24/arch/mips/mm/sc-rm7k.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/mm/sc-rm7k.c 2009-11-17 12:13:29.000000000 +0100 @@ -86,7 +86,7 @@ /* * This function is executed in uncached address space. */ -static __init void __rm7k_sc_enable(void) +static __cpuinit void __rm7k_sc_enable(void) { int i; @@ -107,7 +107,7 @@ } } -static __init void rm7k_sc_enable(void) +static __cpuinit void rm7k_sc_enable(void) { if (read_c0_config() & RM7K_CONF_SE) return; @@ -128,7 +128,7 @@ .bc_inv = rm7k_sc_inv }; -void __init rm7k_sc_init(void) +void __cpuinit rm7k_sc_init(void) { struct cpuinfo_mips *c = ¤t_cpu_data; unsigned int config = read_c0_config(); diff -Nurd linux-2.6.24/arch/mips/mm/tlbex.c mer-smartq-kernel/arch/mips/mm/tlbex.c --- linux-2.6.24/arch/mips/mm/tlbex.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/mm/tlbex.c 2009-11-17 12:13:29.000000000 +0100 @@ -66,7 +66,7 @@ * why; it's not an issue caused by the core RTL. * */ -static __init int __attribute__((unused)) m4kc_tlbp_war(void) +static __cpuinit int __attribute__((unused)) m4kc_tlbp_war(void) { return (current_cpu_data.processor_id & 0xffff00) == (PRID_COMP_MIPS | PRID_IMP_4KC); @@ -140,7 +140,7 @@ | (e) << RE_SH \ | (f) << FUNC_SH) -static __initdata struct insn insn_table[] = { +static __cpuinitdata struct insn insn_table[] = { { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD }, { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD }, @@ -193,7 +193,7 @@ #undef M -static __init u32 build_rs(u32 arg) +static __cpuinit u32 build_rs(u32 arg) { if (arg & ~RS_MASK) printk(KERN_WARNING "TLB synthesizer field overflow\n"); @@ -201,7 +201,7 @@ return (arg & RS_MASK) << RS_SH; } -static __init u32 build_rt(u32 arg) +static __cpuinit u32 build_rt(u32 arg) { if (arg & ~RT_MASK) printk(KERN_WARNING "TLB synthesizer field overflow\n"); @@ -209,7 +209,7 @@ return (arg & RT_MASK) << RT_SH; } -static __init u32 build_rd(u32 arg) +static __cpuinit u32 build_rd(u32 arg) { if (arg & ~RD_MASK) printk(KERN_WARNING "TLB synthesizer field overflow\n"); @@ -217,7 +217,7 @@ return (arg & RD_MASK) << RD_SH; } -static __init u32 build_re(u32 arg) +static __cpuinit u32 build_re(u32 arg) { if (arg & ~RE_MASK) printk(KERN_WARNING "TLB synthesizer field overflow\n"); @@ -225,7 +225,7 @@ return (arg & RE_MASK) << RE_SH; } -static __init u32 build_simm(s32 arg) +static __cpuinit u32 build_simm(s32 arg) { if (arg > 0x7fff || arg < -0x8000) printk(KERN_WARNING "TLB synthesizer field overflow\n"); @@ -233,7 +233,7 @@ return arg & 0xffff; } -static __init u32 build_uimm(u32 arg) +static __cpuinit u32 build_uimm(u32 arg) { if (arg & ~IMM_MASK) printk(KERN_WARNING "TLB synthesizer field overflow\n"); @@ -241,7 +241,7 @@ return arg & IMM_MASK; } -static __init u32 build_bimm(s32 arg) +static __cpuinit u32 build_bimm(s32 arg) { if (arg > 0x1ffff || arg < -0x20000) printk(KERN_WARNING "TLB synthesizer field overflow\n"); @@ -252,7 +252,7 @@ return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff); } -static __init u32 build_jimm(u32 arg) +static __cpuinit u32 build_jimm(u32 arg) { if (arg & ~((JIMM_MASK) << 2)) printk(KERN_WARNING "TLB synthesizer field overflow\n"); @@ -260,7 +260,7 @@ return (arg >> 2) & JIMM_MASK; } -static __init u32 build_func(u32 arg) +static __cpuinit u32 build_func(u32 arg) { if (arg & ~FUNC_MASK) printk(KERN_WARNING "TLB synthesizer field overflow\n"); @@ -268,7 +268,7 @@ return arg & FUNC_MASK; } -static __init u32 build_set(u32 arg) +static __cpuinit u32 build_set(u32 arg) { if (arg & ~SET_MASK) printk(KERN_WARNING "TLB synthesizer field overflow\n"); @@ -280,7 +280,7 @@ * The order of opcode arguments is implicitly left to right, * starting with RS and ending with FUNC or IMM. */ -static void __init build_insn(u32 **buf, enum opcode opc, ...) +static void __cpuinit build_insn(u32 **buf, enum opcode opc, ...) { struct insn *ip = NULL; unsigned int i; @@ -315,69 +315,69 @@ } #define I_u1u2u3(op) \ - static inline void __init i##op(u32 **buf, unsigned int a, \ + static inline void __cpuinit i##op(u32 **buf, unsigned int a, \ unsigned int b, unsigned int c) \ { \ build_insn(buf, insn##op, a, b, c); \ } #define I_u2u1u3(op) \ - static inline void __init i##op(u32 **buf, unsigned int a, \ + static inline void __cpuinit i##op(u32 **buf, unsigned int a, \ unsigned int b, unsigned int c) \ { \ build_insn(buf, insn##op, b, a, c); \ } #define I_u3u1u2(op) \ - static inline void __init i##op(u32 **buf, unsigned int a, \ + static inline void __cpuinit i##op(u32 **buf, unsigned int a, \ unsigned int b, unsigned int c) \ { \ build_insn(buf, insn##op, b, c, a); \ } #define I_u1u2s3(op) \ - static inline void __init i##op(u32 **buf, unsigned int a, \ + static inline void __cpuinit i##op(u32 **buf, unsigned int a, \ unsigned int b, signed int c) \ { \ build_insn(buf, insn##op, a, b, c); \ } #define I_u2s3u1(op) \ - static inline void __init i##op(u32 **buf, unsigned int a, \ + static inline void __cpuinit i##op(u32 **buf, unsigned int a, \ signed int b, unsigned int c) \ { \ build_insn(buf, insn##op, c, a, b); \ } #define I_u2u1s3(op) \ - static inline void __init i##op(u32 **buf, unsigned int a, \ + static inline void __cpuinit i##op(u32 **buf, unsigned int a, \ unsigned int b, signed int c) \ { \ build_insn(buf, insn##op, b, a, c); \ } #define I_u1u2(op) \ - static inline void __init i##op(u32 **buf, unsigned int a, \ + static inline void __cpuinit i##op(u32 **buf, unsigned int a, \ unsigned int b) \ { \ build_insn(buf, insn##op, a, b); \ } #define I_u1s2(op) \ - static inline void __init i##op(u32 **buf, unsigned int a, \ + static inline void __cpuinit i##op(u32 **buf, unsigned int a, \ signed int b) \ { \ build_insn(buf, insn##op, a, b); \ } #define I_u1(op) \ - static inline void __init i##op(u32 **buf, unsigned int a) \ + static inline void __cpuinit i##op(u32 **buf, unsigned int a) \ { \ build_insn(buf, insn##op, a); \ } #define I_0(op) \ - static inline void __init i##op(u32 **buf) \ + static inline void __cpuinit i##op(u32 **buf) \ { \ build_insn(buf, insn##op); \ } @@ -457,7 +457,7 @@ enum label_id lab; }; -static __init void build_label(struct label **lab, u32 *addr, +static __cpuinit void build_label(struct label **lab, u32 *addr, enum label_id l) { (*lab)->addr = addr; @@ -526,34 +526,34 @@ #define i_ehb(buf) i_sll(buf, 0, 0, 3) #ifdef CONFIG_64BIT -static __init int __maybe_unused in_compat_space_p(long addr) +static __cpuinit int __maybe_unused in_compat_space_p(long addr) { /* Is this address in 32bit compat space? */ return (((addr) & 0xffffffff00000000L) == 0xffffffff00000000L); } -static __init int __maybe_unused rel_highest(long val) +static __cpuinit int __maybe_unused rel_highest(long val) { return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000; } -static __init int __maybe_unused rel_higher(long val) +static __cpuinit int __maybe_unused rel_higher(long val) { return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000; } #endif -static __init int rel_hi(long val) +static __cpuinit int rel_hi(long val) { return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000; } -static __init int rel_lo(long val) +static __cpuinit int rel_lo(long val) { return ((val & 0xffff) ^ 0x8000) - 0x8000; } -static __init void i_LA_mostly(u32 **buf, unsigned int rs, long addr) +static __cpuinit void i_LA_mostly(u32 **buf, unsigned int rs, long addr) { #ifdef CONFIG_64BIT if (!in_compat_space_p(addr)) { @@ -571,7 +571,7 @@ i_lui(buf, rs, rel_hi(addr)); } -static __init void __maybe_unused i_LA(u32 **buf, unsigned int rs, +static __cpuinit void __maybe_unused i_LA(u32 **buf, unsigned int rs, long addr) { i_LA_mostly(buf, rs, addr); @@ -589,7 +589,7 @@ enum label_id lab; }; -static __init void r_mips_pc16(struct reloc **rel, u32 *addr, +static __cpuinit void r_mips_pc16(struct reloc **rel, u32 *addr, enum label_id l) { (*rel)->addr = addr; @@ -614,7 +614,7 @@ } } -static __init void resolve_relocs(struct reloc *rel, struct label *lab) +static __cpuinit void resolve_relocs(struct reloc *rel, struct label *lab) { struct label *l; @@ -624,7 +624,7 @@ __resolve_relocs(rel, l); } -static __init void move_relocs(struct reloc *rel, u32 *first, u32 *end, +static __cpuinit void move_relocs(struct reloc *rel, u32 *first, u32 *end, long off) { for (; rel->lab != label_invalid; rel++) @@ -632,7 +632,7 @@ rel->addr += off; } -static __init void move_labels(struct label *lab, u32 *first, u32 *end, +static __cpuinit void move_labels(struct label *lab, u32 *first, u32 *end, long off) { for (; lab->lab != label_invalid; lab++) @@ -640,7 +640,7 @@ lab->addr += off; } -static __init void copy_handler(struct reloc *rel, struct label *lab, +static __cpuinit void copy_handler(struct reloc *rel, struct label *lab, u32 *first, u32 *end, u32 *target) { long off = (long)(target - first); @@ -651,7 +651,7 @@ move_labels(lab, first, end, off); } -static __init int __maybe_unused insn_has_bdelay(struct reloc *rel, +static __cpuinit int __maybe_unused insn_has_bdelay(struct reloc *rel, u32 *addr) { for (; rel->lab != label_invalid; rel++) { @@ -665,49 +665,49 @@ } /* convenience functions for labeled branches */ -static void __init __maybe_unused +static void __cpuinit __maybe_unused il_bltz(u32 **p, struct reloc **r, unsigned int reg, enum label_id l) { r_mips_pc16(r, *p, l); i_bltz(p, reg, 0); } -static void __init __maybe_unused il_b(u32 **p, struct reloc **r, +static void __cpuinit __maybe_unused il_b(u32 **p, struct reloc **r, enum label_id l) { r_mips_pc16(r, *p, l); i_b(p, 0); } -static void __init il_beqz(u32 **p, struct reloc **r, unsigned int reg, +static void __cpuinit il_beqz(u32 **p, struct reloc **r, unsigned int reg, enum label_id l) { r_mips_pc16(r, *p, l); i_beqz(p, reg, 0); } -static void __init __maybe_unused +static void __cpuinit __maybe_unused il_beqzl(u32 **p, struct reloc **r, unsigned int reg, enum label_id l) { r_mips_pc16(r, *p, l); i_beqzl(p, reg, 0); } -static void __init il_bnez(u32 **p, struct reloc **r, unsigned int reg, +static void __cpuinit il_bnez(u32 **p, struct reloc **r, unsigned int reg, enum label_id l) { r_mips_pc16(r, *p, l); i_bnez(p, reg, 0); } -static void __init il_bgezl(u32 **p, struct reloc **r, unsigned int reg, +static void __cpuinit il_bgezl(u32 **p, struct reloc **r, unsigned int reg, enum label_id l) { r_mips_pc16(r, *p, l); i_bgezl(p, reg, 0); } -static void __init __maybe_unused +static void __cpuinit __maybe_unused il_bgez(u32 **p, struct reloc **r, unsigned int reg, enum label_id l) { r_mips_pc16(r, *p, l); @@ -743,16 +743,16 @@ * We deliberately chose a buffer size of 128, so we won't scribble * over anything important on overflow before we panic. */ -static __initdata u32 tlb_handler[128]; +static __cpuinitdata u32 tlb_handler[128]; /* simply assume worst case size for labels and relocs */ -static __initdata struct label labels[128]; -static __initdata struct reloc relocs[128]; +static __cpuinitdata struct label labels[128]; +static __cpuinitdata struct reloc relocs[128]; /* * The R3000 TLB handler is simple. */ -static void __init build_r3000_tlb_refill_handler(void) +static void __cpuinit build_r3000_tlb_refill_handler(void) { long pgdc = (long)pgd_current; u32 *p; @@ -801,7 +801,7 @@ * other one.To keep things simple, we first assume linear space, * then we relocate it to the final handler layout as needed. */ -static __initdata u32 final_handler[64]; +static __cpuinitdata u32 final_handler[64]; /* * Hazards @@ -825,11 +825,12 @@ * * As if we MIPS hackers wouldn't know how to nop pipelines happy ... */ -static __init void __maybe_unused build_tlb_probe_entry(u32 **p) +static __cpuinit void __maybe_unused build_tlb_probe_entry(u32 **p) { switch (current_cpu_type()) { - /* Found by experiment: R4600 v2.0 needs this, too. */ + /* Found by experiment: R4600 v2.0/R4700 needs this, too. */ case CPU_R4600: + case CPU_R4700: case CPU_R5000: case CPU_R5000A: case CPU_NEVADA: @@ -849,7 +850,7 @@ */ enum tlb_write_entry { tlb_random, tlb_indexed }; -static __init void build_tlb_write_entry(u32 **p, struct label **l, +static __cpuinit void build_tlb_write_entry(u32 **p, struct label **l, struct reloc **r, enum tlb_write_entry wmode) { @@ -993,7 +994,7 @@ * TMP and PTR are scratch. * TMP will be clobbered, PTR will hold the pmd entry. */ -static __init void +static __cpuinit void build_get_pmde64(u32 **p, struct label **l, struct reloc **r, unsigned int tmp, unsigned int ptr) { @@ -1054,7 +1055,7 @@ * BVADDR is the faulting address, PTR is scratch. * PTR will hold the pgd for vmalloc. */ -static __init void +static __cpuinit void build_get_pgd_vmalloc64(u32 **p, struct label **l, struct reloc **r, unsigned int bvaddr, unsigned int ptr) { @@ -1118,7 +1119,7 @@ * TMP and PTR are scratch. * TMP will be clobbered, PTR will hold the pgd entry. */ -static __init void __maybe_unused +static __cpuinit void __maybe_unused build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) { long pgdc = (long)pgd_current; @@ -1153,7 +1154,7 @@ #endif /* !CONFIG_64BIT */ -static __init void build_adjust_context(u32 **p, unsigned int ctx) +static __cpuinit void build_adjust_context(u32 **p, unsigned int ctx) { unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12; unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1); @@ -1179,7 +1180,8 @@ i_andi(p, ctx, ctx, mask); } -static __init void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) +static __cpuinit void build_get_ptep(u32 **p, unsigned int tmp, + unsigned int ptr) { /* * Bug workaround for the Nevada. It seems as if under certain @@ -1204,7 +1206,7 @@ i_ADDU(p, ptr, ptr, tmp); /* add in offset */ } -static __init void build_update_entries(u32 **p, unsigned int tmp, +static __cpuinit void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) { /* @@ -1247,7 +1249,7 @@ #endif } -static void __init build_r4000_tlb_refill_handler(void) +static void __cpuinit build_r4000_tlb_refill_handler(void) { u32 *p = tlb_handler; struct label *l = labels; @@ -1394,7 +1396,7 @@ u32 __tlb_handler_align handle_tlbs[FASTPATH_SIZE]; u32 __tlb_handler_align handle_tlbm[FASTPATH_SIZE]; -static void __init +static void __cpuinit iPTE_LW(u32 **p, struct label **l, unsigned int pte, unsigned int ptr) { #ifdef CONFIG_SMP @@ -1414,7 +1416,7 @@ #endif } -static void __init +static void __cpuinit iPTE_SW(u32 **p, struct reloc **r, unsigned int pte, unsigned int ptr, unsigned int mode) { @@ -1474,7 +1476,7 @@ * the page table where this PTE is located, PTE will be re-loaded * with it's original value. */ -static void __init +static void __cpuinit build_pte_present(u32 **p, struct label **l, struct reloc **r, unsigned int pte, unsigned int ptr, enum label_id lid) { @@ -1485,7 +1487,7 @@ } /* Make PTE valid, store result in PTR. */ -static void __init +static void __cpuinit build_make_valid(u32 **p, struct reloc **r, unsigned int pte, unsigned int ptr) { @@ -1498,7 +1500,7 @@ * Check if PTE can be written to, if not branch to LABEL. Regardless * restore PTE with value from PTR when done. */ -static void __init +static void __cpuinit build_pte_writable(u32 **p, struct label **l, struct reloc **r, unsigned int pte, unsigned int ptr, enum label_id lid) { @@ -1511,7 +1513,7 @@ /* Make PTE writable, update software status bits as well, then store * at PTR. */ -static void __init +static void __cpuinit build_make_write(u32 **p, struct reloc **r, unsigned int pte, unsigned int ptr) { @@ -1525,7 +1527,7 @@ * Check if PTE can be modified, if not branch to LABEL. Regardless * restore PTE with value from PTR when done. */ -static void __init +static void __cpuinit build_pte_modifiable(u32 **p, struct label **l, struct reloc **r, unsigned int pte, unsigned int ptr, enum label_id lid) { @@ -1542,7 +1544,7 @@ * This places the pte into ENTRYLO0 and writes it with tlbwi. * Then it returns. */ -static void __init +static void __cpuinit build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp) { i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ @@ -1558,7 +1560,7 @@ * may have the probe fail bit set as a result of a trap on a * kseg2 access, i.e. without refill. Then it returns. */ -static void __init +static void __cpuinit build_r3000_tlb_reload_write(u32 **p, struct label **l, struct reloc **r, unsigned int pte, unsigned int tmp) { @@ -1575,7 +1577,7 @@ i_rfe(p); /* branch delay */ } -static void __init +static void __cpuinit build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte, unsigned int ptr) { @@ -1595,7 +1597,7 @@ i_tlbp(p); /* load delay */ } -static void __init build_r3000_tlb_load_handler(void) +static void __cpuinit build_r3000_tlb_load_handler(void) { u32 *p = handle_tlbl; struct label *l = labels; @@ -1630,7 +1632,7 @@ pr_debug("\t.set pop\n"); } -static void __init build_r3000_tlb_store_handler(void) +static void __cpuinit build_r3000_tlb_store_handler(void) { u32 *p = handle_tlbs; struct label *l = labels; @@ -1665,7 +1667,7 @@ pr_debug("\t.set pop\n"); } -static void __init build_r3000_tlb_modify_handler(void) +static void __cpuinit build_r3000_tlb_modify_handler(void) { u32 *p = handle_tlbm; struct label *l = labels; @@ -1703,7 +1705,7 @@ /* * R4000 style TLB load/store/modify handlers. */ -static void __init +static void __cpuinit build_r4000_tlbchange_handler_head(u32 **p, struct label **l, struct reloc **r, unsigned int pte, unsigned int ptr) @@ -1728,7 +1730,7 @@ build_tlb_probe_entry(p); } -static void __init +static void __cpuinit build_r4000_tlbchange_handler_tail(u32 **p, struct label **l, struct reloc **r, unsigned int tmp, unsigned int ptr) @@ -1745,7 +1747,7 @@ #endif } -static void __init build_r4000_tlb_load_handler(void) +static void __cpuinit build_r4000_tlb_load_handler(void) { u32 *p = handle_tlbl; struct label *l = labels; @@ -1790,7 +1792,7 @@ pr_debug("\t.set pop\n"); } -static void __init build_r4000_tlb_store_handler(void) +static void __cpuinit build_r4000_tlb_store_handler(void) { u32 *p = handle_tlbs; struct label *l = labels; @@ -1826,7 +1828,7 @@ pr_debug("\t.set pop\n"); } -static void __init build_r4000_tlb_modify_handler(void) +static void __cpuinit build_r4000_tlb_modify_handler(void) { u32 *p = handle_tlbm; struct label *l = labels; @@ -1863,7 +1865,7 @@ pr_debug("\t.set pop\n"); } -void __init build_tlb_refill_handler(void) +void __cpuinit build_tlb_refill_handler(void) { /* * The refill handler is generated per-CPU, multi-node systems @@ -1909,7 +1911,7 @@ } } -void __init flush_tlb_handlers(void) +void __cpuinit flush_tlb_handlers(void) { flush_icache_range((unsigned long)handle_tlbl, (unsigned long)handle_tlbl + sizeof(handle_tlbl)); diff -Nurd linux-2.6.24/arch/mips/mm/tlb-r3k.c mer-smartq-kernel/arch/mips/mm/tlb-r3k.c --- linux-2.6.24/arch/mips/mm/tlb-r3k.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/mm/tlb-r3k.c 2009-11-17 12:13:29.000000000 +0100 @@ -246,10 +246,6 @@ old_pagemask = read_c0_pagemask(); w = read_c0_wired(); write_c0_wired(w + 1); - if (read_c0_wired() != w + 1) { - printk("[tlbwired] No WIRED reg?\n"); - return; - } write_c0_index(w << 8); write_c0_pagemask(pagemask); write_c0_entryhi(entryhi); @@ -281,7 +277,7 @@ } } -void __init tlb_init(void) +void __cpuinit tlb_init(void) { local_flush_tlb_all(); diff -Nurd linux-2.6.24/arch/mips/mm/tlb-r4k.c mer-smartq-kernel/arch/mips/mm/tlb-r4k.c --- linux-2.6.24/arch/mips/mm/tlb-r4k.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/mm/tlb-r4k.c 2009-11-17 12:13:29.000000000 +0100 @@ -388,7 +388,7 @@ * lifetime of the system */ -static int temp_tlb_entry __initdata; +static int temp_tlb_entry __cpuinitdata; __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, unsigned long entryhi, unsigned long pagemask) @@ -427,7 +427,7 @@ return ret; } -static void __init probe_tlb(unsigned long config) +static void __cpuinit probe_tlb(unsigned long config) { struct cpuinfo_mips *c = ¤t_cpu_data; unsigned int reg; @@ -455,7 +455,7 @@ c->tlbsize = ((reg >> 25) & 0x3f) + 1; } -static int __initdata ntlb = 0; +static int __cpuinitdata ntlb = 0; static int __init set_ntlb(char *str) { get_option(&str, &ntlb); @@ -464,7 +464,7 @@ __setup("ntlb=", set_ntlb); -void __init tlb_init(void) +void __cpuinit tlb_init(void) { unsigned int config = read_c0_config(); @@ -473,12 +473,15 @@ * - On R4600 1.7 the tlbp never hits for pages smaller than * the value in the c0_pagemask register. * - The entire mm handling assumes the c0_pagemask register to - * be set for 4kb pages. + * be set to fixed-size pages. */ probe_tlb(config); write_c0_pagemask(PM_DEFAULT_MASK); write_c0_wired(0); - write_c0_framemask(0); + if (current_cpu_type() == CPU_R10000 || + current_cpu_type() == CPU_R12000 || + current_cpu_type() == CPU_R14000) + write_c0_framemask(0); temp_tlb_entry = current_cpu_data.tlbsize - 1; /* From this point on the ARC firmware is dead. */ diff -Nurd linux-2.6.24/arch/mips/mm/tlb-r8k.c mer-smartq-kernel/arch/mips/mm/tlb-r8k.c --- linux-2.6.24/arch/mips/mm/tlb-r8k.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/mm/tlb-r8k.c 2009-11-17 12:13:29.000000000 +0100 @@ -214,14 +214,14 @@ local_irq_restore(flags); } -static void __init probe_tlb(unsigned long config) +static void __cpuinit probe_tlb(unsigned long config) { struct cpuinfo_mips *c = ¤t_cpu_data; c->tlbsize = 3 * 128; /* 3 sets each 128 entries */ } -void __init tlb_init(void) +void __cpuinit tlb_init(void) { unsigned int config = read_c0_config(); unsigned long status; diff -Nurd linux-2.6.24/arch/mips/pci/pci-bcm1480.c mer-smartq-kernel/arch/mips/pci/pci-bcm1480.c --- linux-2.6.24/arch/mips/pci/pci-bcm1480.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/pci/pci-bcm1480.c 2009-11-17 12:13:29.000000000 +0100 @@ -185,8 +185,8 @@ static struct resource bcm1480_io_resource = { .name = "BCM1480 PCI I/O", - .start = 0x2c000000UL, - .end = 0x2dffffffUL, + .start = A_BCM1480_PHYS_PCI_IO_MATCH_BYTES, + .end = A_BCM1480_PHYS_PCI_IO_MATCH_BYTES + 0x1ffffffUL, .flags = IORESOURCE_IO, }; @@ -194,6 +194,7 @@ .pci_ops = &bcm1480_pci_ops, .mem_resource = &bcm1480_mem_resource, .io_resource = &bcm1480_io_resource, + .io_offset = A_BCM1480_PHYS_PCI_IO_MATCH_BYTES, }; @@ -249,8 +250,10 @@ * XXX ehs: Should this happen in PCI Device mode? */ - set_io_port_base((unsigned long) - ioremap(A_BCM1480_PHYS_PCI_IO_MATCH_BYTES, 65536)); + bcm1480_controller.io_map_base = (unsigned long) + ioremap(A_BCM1480_PHYS_PCI_IO_MATCH_BYTES, 65536); + bcm1480_controller.io_map_base -= bcm1480_controller.io_offset; + set_io_port_base(bcm1480_controller.io_map_base); isa_slot_offset = (unsigned long) ioremap(A_BCM1480_PHYS_PCI_MEM_MATCH_BYTES, 1024*1024); diff -Nurd linux-2.6.24/arch/mips/pci/pci-bcm1480ht.c mer-smartq-kernel/arch/mips/pci/pci-bcm1480ht.c --- linux-2.6.24/arch/mips/pci/pci-bcm1480ht.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/pci/pci-bcm1480ht.c 2009-11-17 12:13:29.000000000 +0100 @@ -180,8 +180,8 @@ static struct resource bcm1480ht_io_resource = { .name = "BCM1480 HT I/O", - .start = 0x00000000UL, - .end = 0x01ffffffUL, + .start = A_BCM1480_PHYS_HT_IO_MATCH_BYTES, + .end = A_BCM1480_PHYS_HT_IO_MATCH_BYTES + 0x01ffffffUL, .flags = IORESOURCE_IO, }; @@ -191,29 +191,22 @@ .io_resource = &bcm1480ht_io_resource, .index = 1, .get_busno = bcm1480ht_pcibios_get_busno, + .io_offset = A_BCM1480_PHYS_HT_IO_MATCH_BYTES, }; static int __init bcm1480ht_pcibios_init(void) { - uint32_t cmdreg; - ht_cfg_space = ioremap(A_BCM1480_PHYS_HT_CFG_MATCH_BITS, 16*1024*1024); - /* - * See if the PCI bus has been configured by the firmware. - */ - cmdreg = READCFG32(CFGOFFSET(0, PCI_DEVFN(PCI_BRIDGE_DEVICE, 0), - PCI_COMMAND)); - if (!(cmdreg & PCI_COMMAND_MASTER)) { - printk("HT: Skipping HT probe. Bus is not initialized.\n"); - iounmap(ht_cfg_space); - return 1; /* XXX */ - } + /* CFE doesn't always init all HT paths, so we always scan */ bcm1480ht_bus_status |= PCI_BUS_ENABLED; ht_eoi_space = (unsigned long) ioremap(A_BCM1480_PHYS_HT_SPECIAL_MATCH_BYTES, 4 * 1024 * 1024); + bcm1480ht_controller.io_map_base = (unsigned long) + ioremap(A_BCM1480_PHYS_HT_IO_MATCH_BYTES, 65536); + bcm1480ht_controller.io_map_base -= bcm1480ht_controller.io_offset; register_pci_controller(&bcm1480ht_controller); diff -Nurd linux-2.6.24/arch/mips/pci/pci.c mer-smartq-kernel/arch/mips/pci/pci.c --- linux-2.6.24/arch/mips/pci/pci.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/pci/pci.c 2009-11-17 12:13:29.000000000 +0100 @@ -177,6 +177,11 @@ continue; r = &dev->resource[idx]; + if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM))) + continue; + if ((idx == PCI_ROM_RESOURCE) && + (!(r->flags & IORESOURCE_ROM_ENABLE))) + continue; if (!r->start && r->end) { printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev)); return -EINVAL; @@ -186,8 +191,6 @@ if (r->flags & IORESOURCE_MEM) cmd |= PCI_COMMAND_MEMORY; } - if (dev->resource[PCI_ROM_RESOURCE].start) - cmd |= PCI_COMMAND_MEMORY; if (cmd != old_cmd) { printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd); pci_write_config_word(dev, PCI_COMMAND, cmd); @@ -254,7 +257,7 @@ } } -void pcibios_fixup_bus(struct pci_bus *bus) +void __devinit pcibios_fixup_bus(struct pci_bus *bus) { /* Propagate hose info into the subordinate devices. */ diff -Nurd linux-2.6.24/arch/mips/pci/pci-ip27.c mer-smartq-kernel/arch/mips/pci/pci-ip27.c --- linux-2.6.24/arch/mips/pci/pci-ip27.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/pci/pci-ip27.c 2009-11-17 12:13:29.000000000 +0100 @@ -40,7 +40,7 @@ extern struct pci_ops bridge_pci_ops; -int __init bridge_probe(nasid_t nasid, int widget_id, int masterwid) +int __cpuinit bridge_probe(nasid_t nasid, int widget_id, int masterwid) { unsigned long offset = NODE_OFFSET(nasid); struct bridge_controller *bc; @@ -136,25 +136,47 @@ */ int __devinit pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { + return 0; +} + +/* Most MIPS systems have straight-forward swizzling needs. */ +static inline u8 bridge_swizzle(u8 pin, u8 slot) +{ + return (((pin - 1) + slot) % 4) + 1; +} + +static inline struct pci_dev *bridge_root_dev(struct pci_dev *dev) +{ + while (dev->bus->parent) { + /* Move up the chain of bridges. */ + dev = dev->bus->self; + } + + return dev; +} + +/* Do platform specific device initialization at pci_enable_device() time */ +int pcibios_plat_dev_init(struct pci_dev *dev) +{ struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus); - int irq = bc->pci_int[slot]; + struct pci_dev *rdev = bridge_root_dev(dev); + int slot = PCI_SLOT(rdev->devfn); + int irq; + irq = bc->pci_int[slot]; if (irq == -1) { - irq = bc->pci_int[slot] = request_bridge_irq(bc); + irq = request_bridge_irq(bc); if (irq < 0) - panic("Can't allocate interrupt for PCI device %s\n", - pci_name(dev)); + return irq; + + bc->pci_int[slot] = irq; } irq_to_bridge[irq] = bc; irq_to_slot[irq] = slot; - return irq; -} + dev->irq = irq; -/* Do platform specific device initialization at pci_enable_device() time */ -int pcibios_plat_dev_init(struct pci_dev *dev) -{ return 0; } diff -Nurd linux-2.6.24/arch/mips/pmc-sierra/yosemite/smp.c mer-smartq-kernel/arch/mips/pmc-sierra/yosemite/smp.c --- linux-2.6.24/arch/mips/pmc-sierra/yosemite/smp.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/pmc-sierra/yosemite/smp.c 2009-11-17 12:13:29.000000000 +0100 @@ -7,10 +7,10 @@ #define LAUNCHSTACK_SIZE 256 -static __initdata DEFINE_SPINLOCK(launch_lock); +static __cpuinitdata DEFINE_SPINLOCK(launch_lock); -static unsigned long secondary_sp __initdata; -static unsigned long secondary_gp __initdata; +static unsigned long secondary_sp __cpuinitdata; +static unsigned long secondary_gp __cpuinitdata; static unsigned char launchstack[LAUNCHSTACK_SIZE] __initdata __attribute__((aligned(2 * sizeof(long)))); diff -Nurd linux-2.6.24/arch/mips/sgi-ip22/ip22-int.c mer-smartq-kernel/arch/mips/sgi-ip22/ip22-int.c --- linux-2.6.24/arch/mips/sgi-ip22/ip22-int.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/sgi-ip22/ip22-int.c 2009-11-17 12:13:29.000000000 +0100 @@ -68,7 +68,7 @@ sgint->imask1 |= (1 << (irq - SGINT_LOCAL1)); } -void disable_local1_irq(unsigned int irq) +static void disable_local1_irq(unsigned int irq) { sgint->imask1 &= ~(1 << (irq - SGINT_LOCAL1)); } @@ -87,7 +87,7 @@ sgint->cmeimask0 |= (1 << (irq - SGINT_LOCAL2)); } -void disable_local2_irq(unsigned int irq) +static void disable_local2_irq(unsigned int irq) { sgint->cmeimask0 &= ~(1 << (irq - SGINT_LOCAL2)); if (!sgint->cmeimask0) @@ -108,7 +108,7 @@ sgint->cmeimask1 |= (1 << (irq - SGINT_LOCAL3)); } -void disable_local3_irq(unsigned int irq) +static void disable_local3_irq(unsigned int irq) { sgint->cmeimask1 &= ~(1 << (irq - SGINT_LOCAL3)); if (!sgint->cmeimask1) @@ -344,6 +344,6 @@ #ifdef CONFIG_EISA if (ip22_is_fullhouse()) /* Only Indigo-2 has EISA stuff */ - ip22_eisa_init(); + ip22_eisa_init(); #endif } diff -Nurd linux-2.6.24/arch/mips/sgi-ip22/ip22-platform.c mer-smartq-kernel/arch/mips/sgi-ip22/ip22-platform.c --- linux-2.6.24/arch/mips/sgi-ip22/ip22-platform.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/sgi-ip22/ip22-platform.c 2009-11-17 12:13:29.000000000 +0100 @@ -150,7 +150,7 @@ return res; /* Second HPC is missing? */ - if (!ip22_is_fullhouse() || + if (ip22_is_fullhouse() || get_dbe(tmp, (unsigned int *)&hpc3c1->pbdma[1])) return 0; diff -Nurd linux-2.6.24/arch/mips/sgi-ip27/ip27-init.c mer-smartq-kernel/arch/mips/sgi-ip27/ip27-init.c --- linux-2.6.24/arch/mips/sgi-ip27/ip27-init.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/sgi-ip27/ip27-init.c 2009-11-17 12:13:29.000000000 +0100 @@ -54,7 +54,7 @@ extern void xtalk_probe_node(cnodeid_t nid); -static void __init per_hub_init(cnodeid_t cnode) +static void __cpuinit per_hub_init(cnodeid_t cnode) { struct hub_data *hub = hub_data(cnode); nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); diff -Nurd linux-2.6.24/arch/mips/sgi-ip27/ip27-timer.c mer-smartq-kernel/arch/mips/sgi-ip27/ip27-timer.c --- linux-2.6.24/arch/mips/sgi-ip27/ip27-timer.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/sgi-ip27/ip27-timer.c 2009-11-17 12:13:29.000000000 +0100 @@ -158,7 +158,7 @@ } } -unsigned int rt_timer_irq; +int rt_timer_irq; static irqreturn_t hub_rt_counter_handler(int irq, void *dev_id) { @@ -219,7 +219,7 @@ static void __init hub_rt_clock_event_global_init(void) { - unsigned int irq; + int irq; do { smp_wmb(); @@ -285,7 +285,7 @@ set_c0_status(SRB_TIMOCLK); } -void __init hub_rtc_init(cnodeid_t cnode) +void __cpuinit hub_rtc_init(cnodeid_t cnode) { /* * We only need to initialize the current node. diff -Nurd linux-2.6.24/arch/mips/sgi-ip27/ip27-xtalk.c mer-smartq-kernel/arch/mips/sgi-ip27/ip27-xtalk.c --- linux-2.6.24/arch/mips/sgi-ip27/ip27-xtalk.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/sgi-ip27/ip27-xtalk.c 2009-11-17 12:13:29.000000000 +0100 @@ -22,7 +22,7 @@ extern int bridge_probe(nasid_t nasid, int widget, int masterwid); -static int __init probe_one_port(nasid_t nasid, int widget, int masterwid) +static int __cpuinit probe_one_port(nasid_t nasid, int widget, int masterwid) { widgetreg_t widget_id; xwidget_part_num_t partnum; @@ -46,7 +46,7 @@ return 0; } -static int __init xbow_probe(nasid_t nasid) +static int __cpuinit xbow_probe(nasid_t nasid) { lboard_t *brd; klxbow_t *xbow_p; @@ -99,7 +99,7 @@ return 0; } -void __init xtalk_probe_node(cnodeid_t nid) +void __cpuinit xtalk_probe_node(cnodeid_t nid) { volatile u64 hubreg; nasid_t nasid; diff -Nurd linux-2.6.24/arch/mips/sgi-ip32/ip32-irq.c mer-smartq-kernel/arch/mips/sgi-ip32/ip32-irq.c --- linux-2.6.24/arch/mips/sgi-ip32/ip32-irq.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/sgi-ip32/ip32-irq.c 2009-11-17 12:13:29.000000000 +0100 @@ -425,6 +425,11 @@ BUILD_BUG_ON(MACEISA_SERIAL2_RDMAOR_IRQ - MACEISA_AUDIO_SW_IRQ != 31); crime_int = crime->istat & crime_mask; + + /* crime sometime delivers spurious interrupts, ignore them */ + if (unlikely(crime_int == 0)) + return; + irq = MACE_VID_IN1_IRQ + __ffs(crime_int); if (crime_int & CRIME_MACEISA_INT_MASK) { diff -Nurd linux-2.6.24/arch/mips/sibyte/bcm1480/irq.c mer-smartq-kernel/arch/mips/sibyte/bcm1480/irq.c --- linux-2.6.24/arch/mips/sibyte/bcm1480/irq.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/sibyte/bcm1480/irq.c 2009-11-17 12:13:29.000000000 +0100 @@ -25,6 +25,7 @@ #include #include +#include #include #include #include diff -Nurd linux-2.6.24/arch/mips/sibyte/sb1250/irq.c mer-smartq-kernel/arch/mips/sibyte/sb1250/irq.c --- linux-2.6.24/arch/mips/sibyte/sb1250/irq.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/sibyte/sb1250/irq.c 2009-11-17 12:13:29.000000000 +0100 @@ -26,6 +26,7 @@ #include #include +#include #include #include #include diff -Nurd linux-2.6.24/arch/mips/vr41xx/common/irq.c mer-smartq-kernel/arch/mips/vr41xx/common/irq.c --- linux-2.6.24/arch/mips/vr41xx/common/irq.c 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/arch/mips/vr41xx/common/irq.c 2009-11-17 12:13:29.000000000 +0100 @@ -72,6 +72,7 @@ cascade = irq_cascade + irq; if (cascade->get_irq != NULL) { unsigned int source_irq = irq; + int ret; desc = irq_desc + source_irq; if (desc->chip->mask_ack) desc->chip->mask_ack(source_irq); @@ -79,8 +80,9 @@ desc->chip->mask(source_irq); desc->chip->ack(source_irq); } - irq = cascade->get_irq(irq); - if (irq < 0) + ret = cascade->get_irq(irq); + irq = ret; + if (ret < 0) atomic_inc(&irq_err_count); else irq_dispatch(irq); diff -Nurd linux-2.6.24/drivers/net/Kconfig mer-smartq-kernel/drivers/net/Kconfig --- linux-2.6.24/drivers/net/Kconfig 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/drivers/net/Kconfig 2009-11-17 18:05:55.000000000 +0100 @@ -2348,6 +2348,13 @@ Some boards that use the Discovery chipset are the Momenco Ocelot C and Jaguar ATX and Pegasos II. +config TITAN_GE + bool "PMC-Sierra TITAN Gigabit Ethernet Support" + depends on PMC_YOSEMITE + help + This enables support for the the integrated ethernet of + PMC-Sierra's Titan SoC. + config QLA3XXX tristate "QLogic QLA3XXX Network Driver Support" depends on PCI diff -Nurd linux-2.6.24/drivers/net/Makefile mer-smartq-kernel/drivers/net/Makefile --- linux-2.6.24/drivers/net/Makefile 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/drivers/net/Makefile 2009-11-17 12:13:31.000000000 +0100 @@ -124,6 +124,8 @@ obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o obj-$(CONFIG_QLA3XXX) += qla3xxx.o +obj-$(CONFIG_TITAN_GE) += titan_mdio.o titan_ge.o + obj-$(CONFIG_PPP) += ppp_generic.o obj-$(CONFIG_PPP_ASYNC) += ppp_async.o obj-$(CONFIG_PPP_SYNC_TTY) += ppp_synctty.o diff -Nurd linux-2.6.24/drivers/net/titan_ge.c mer-smartq-kernel/drivers/net/titan_ge.c --- linux-2.6.24/drivers/net/titan_ge.c 1970-01-01 01:00:00.000000000 +0100 +++ mer-smartq-kernel/drivers/net/titan_ge.c 2009-11-17 12:13:31.000000000 +0100 @@ -0,0 +1,2069 @@ +/* + * drivers/net/titan_ge.c - Driver for Titan ethernet ports + * + * Copyright (C) 2003 PMC-Sierra Inc. + * Author : Manish Lachwani (lachwani@pmc-sierra.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + * The MAC unit of the Titan consists of the following: + * + * -> XDMA Engine to move data to from the memory to the MAC packet FIFO + * -> FIFO is where the incoming and outgoing data is placed + * -> TRTG is the unit that pulls the data from the FIFO for Tx and pushes + * the data into the FIFO for Rx + * -> TMAC is the outgoing MAC interface and RMAC is the incoming. + * -> AFX is the address filtering block + * -> GMII block to communicate with the PHY + * + * Rx will look like the following: + * GMII --> RMAC --> AFX --> TRTG --> Rx FIFO --> XDMA --> CPU memory + * + * Tx will look like the following: + * CPU memory --> XDMA --> Tx FIFO --> TRTG --> TMAC --> GMII + * + * The Titan driver has support for the following performance features: + * -> Rx side checksumming + * -> Jumbo Frames + * -> Interrupt Coalscing + * -> Rx NAPI + * -> SKB Recycling + * -> Transmit/Receive descriptors in SRAM + * -> Fast routing for IP forwarding + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* For MII specifc registers, titan_mdio.h should be included */ +#include + +#include +#include +#include +#include +#include +#include + +#include "titan_ge.h" +#include "titan_mdio.h" + +/* Static Function Declarations */ +static int titan_ge_eth_open(struct net_device *); +static void titan_ge_eth_stop(struct net_device *); +static struct net_device_stats *titan_ge_get_stats(struct net_device *); +static int titan_ge_init_rx_desc_ring(titan_ge_port_info *, int, int, + unsigned long, unsigned long, + unsigned long); +static int titan_ge_init_tx_desc_ring(titan_ge_port_info *, int, + unsigned long, unsigned long); + +static int titan_ge_open(struct net_device *); +static int titan_ge_start_xmit(struct sk_buff *, struct net_device *); +static int titan_ge_stop(struct net_device *); + +static unsigned long titan_ge_tx_coal(unsigned long, int); + +static void titan_ge_port_reset(unsigned int); +static int titan_ge_free_tx_queue(titan_ge_port_info *); +static int titan_ge_rx_task(struct net_device *, titan_ge_port_info *); +static int titan_ge_port_start(struct net_device *, titan_ge_port_info *); + +static int titan_ge_return_tx_desc(titan_ge_port_info *, int); + +/* + * Some configuration for the FIFO and the XDMA channel needs + * to be done only once for all the ports. This flag controls + * that + */ +static unsigned long config_done; + +/* + * One time out of memory flag + */ +static unsigned int oom_flag; + +static int titan_ge_poll(struct net_device *netdev, int *budget); + +static int titan_ge_receive_queue(struct net_device *, unsigned int); + +static struct platform_device *titan_ge_device[3]; + +/* MAC Address */ +extern unsigned char titan_ge_mac_addr_base[6]; + +unsigned long titan_ge_base; +static unsigned long titan_ge_sram; + +static char titan_string[] = "titan"; + +/* + * The Titan GE has two alignment requirements: + * -> skb->data to be cacheline aligned (32 byte) + * -> IP header alignment to 16 bytes + * + * The latter is not implemented. So, that results in an extra copy on + * the Rx. This is a big performance hog. For the former case, the + * dev_alloc_skb() has been replaced with titan_ge_alloc_skb(). The size + * requested is calculated: + * + * Ethernet Frame Size : 1518 + * Ethernet Header : 14 + * Future Titan change for IP header alignment : 2 + * + * Hence, we allocate (1518 + 14 + 2+ 64) = 1580 bytes. For IP header + * alignment, we use skb_reserve(). + */ + +#define ALIGNED_RX_SKB_ADDR(addr) \ + ((((unsigned long)(addr) + (64UL - 1UL)) \ + & ~(64UL - 1UL)) - (unsigned long)(addr)) + +#define titan_ge_alloc_skb(__length, __gfp_flags) \ +({ struct sk_buff *__skb; \ + __skb = alloc_skb((__length) + 64, (__gfp_flags)); \ + if(__skb) { \ + int __offset = (int) ALIGNED_RX_SKB_ADDR(__skb->data); \ + if(__offset) \ + skb_reserve(__skb, __offset); \ + } \ + __skb; \ +}) + +/* + * Configure the GMII block of the Titan based on what the PHY tells us + */ +static void titan_ge_gmii_config(int port_num) +{ + unsigned int reg_data = 0, phy_reg; + int err; + + err = titan_ge_mdio_read(port_num, TITAN_GE_MDIO_PHY_STATUS, &phy_reg); + + if (err == TITAN_GE_MDIO_ERROR) { + printk(KERN_ERR + "Could not read PHY control register 0x11 \n"); + printk(KERN_ERR + "Setting speed to 1000 Mbps and Duplex to Full \n"); + + return; + } + + err = titan_ge_mdio_write(port_num, TITAN_GE_MDIO_PHY_IE, 0); + + if (phy_reg & 0x8000) { + if (phy_reg & 0x2000) { + /* Full Duplex and 1000 Mbps */ + TITAN_GE_WRITE((TITAN_GE_GMII_CONFIG_MODE + + (port_num << 12)), 0x201); + } else { + /* Half Duplex and 1000 Mbps */ + TITAN_GE_WRITE((TITAN_GE_GMII_CONFIG_MODE + + (port_num << 12)), 0x2201); + } + } + if (phy_reg & 0x4000) { + if (phy_reg & 0x2000) { + /* Full Duplex and 100 Mbps */ + TITAN_GE_WRITE((TITAN_GE_GMII_CONFIG_MODE + + (port_num << 12)), 0x100); + } else { + /* Half Duplex and 100 Mbps */ + TITAN_GE_WRITE((TITAN_GE_GMII_CONFIG_MODE + + (port_num << 12)), 0x2100); + } + } + reg_data = TITAN_GE_READ(TITAN_GE_GMII_CONFIG_GENERAL + + (port_num << 12)); + reg_data |= 0x3; + TITAN_GE_WRITE((TITAN_GE_GMII_CONFIG_GENERAL + + (port_num << 12)), reg_data); +} + +/* + * Enable the TMAC if it is not + */ +static void titan_ge_enable_tx(unsigned int port_num) +{ + unsigned long reg_data; + + reg_data = TITAN_GE_READ(TITAN_GE_TMAC_CONFIG_1 + (port_num << 12)); + if (!(reg_data & 0x8000)) { + printk("TMAC disabled for port %d!! \n", port_num); + + reg_data |= 0x0001; /* Enable TMAC */ + reg_data |= 0x4000; /* CRC Check Enable */ + reg_data |= 0x2000; /* Padding enable */ + reg_data |= 0x0800; /* CRC Add enable */ + reg_data |= 0x0080; /* PAUSE frame */ + + TITAN_GE_WRITE((TITAN_GE_TMAC_CONFIG_1 + + (port_num << 12)), reg_data); + } +} + +/* + * Tx Timeout function + */ +static void titan_ge_tx_timeout(struct net_device *netdev) +{ + titan_ge_port_info *titan_ge_eth = netdev_priv(netdev); + + printk(KERN_INFO "%s: TX timeout ", netdev->name); + printk(KERN_INFO "Resetting card \n"); + + /* Do the reset outside of interrupt context */ + schedule_work(&titan_ge_eth->tx_timeout_task); +} + +/* + * Update the AFX tables for UC and MC for slice 0 only + */ +static void titan_ge_update_afx(titan_ge_port_info * titan_ge_eth) +{ + int port = titan_ge_eth->port_num; + unsigned int i; + volatile unsigned long reg_data = 0; + u8 p_addr[6]; + + memcpy(p_addr, titan_ge_eth->port_mac_addr, 6); + + /* Set the MAC address here for TMAC and RMAC */ + TITAN_GE_WRITE((TITAN_GE_TMAC_STATION_HI + (port << 12)), + ((p_addr[5] << 8) | p_addr[4])); + TITAN_GE_WRITE((TITAN_GE_TMAC_STATION_MID + (port << 12)), + ((p_addr[3] << 8) | p_addr[2])); + TITAN_GE_WRITE((TITAN_GE_TMAC_STATION_LOW + (port << 12)), + ((p_addr[1] << 8) | p_addr[0])); + + TITAN_GE_WRITE((TITAN_GE_RMAC_STATION_HI + (port << 12)), + ((p_addr[5] << 8) | p_addr[4])); + TITAN_GE_WRITE((TITAN_GE_RMAC_STATION_MID + (port << 12)), + ((p_addr[3] << 8) | p_addr[2])); + TITAN_GE_WRITE((TITAN_GE_RMAC_STATION_LOW + (port << 12)), + ((p_addr[1] << 8) | p_addr[0])); + + TITAN_GE_WRITE((0x112c | (port << 12)), 0x1); + /* Configure the eight address filters */ + for (i = 0; i < 8; i++) { + /* Select each of the eight filters */ + TITAN_GE_WRITE((TITAN_GE_AFX_ADDRS_FILTER_CTRL_2 + + (port << 12)), i); + + /* Configure the match */ + reg_data = 0x9; /* Forward Enable Bit */ + TITAN_GE_WRITE((TITAN_GE_AFX_ADDRS_FILTER_CTRL_0 + + (port << 12)), reg_data); + + /* Finally, AFX Exact Match Address Registers */ + TITAN_GE_WRITE((TITAN_GE_AFX_EXACT_MATCH_LOW + (port << 12)), + ((p_addr[1] << 8) | p_addr[0])); + TITAN_GE_WRITE((TITAN_GE_AFX_EXACT_MATCH_MID + (port << 12)), + ((p_addr[3] << 8) | p_addr[2])); + TITAN_GE_WRITE((TITAN_GE_AFX_EXACT_MATCH_HIGH + (port << 12)), + ((p_addr[5] << 8) | p_addr[4])); + + /* VLAN id set to 0 */ + TITAN_GE_WRITE((TITAN_GE_AFX_EXACT_MATCH_VID + + (port << 12)), 0); + } +} + +/* + * Actual Routine to reset the adapter when the timeout occurred + */ +static void titan_ge_tx_timeout_task(struct net_device *netdev) +{ + titan_ge_port_info *titan_ge_eth = netdev_priv(netdev); + int port = titan_ge_eth->port_num; + + printk("Titan GE: Transmit timed out. Resetting ... \n"); + + /* Dump debug info */ + printk(KERN_ERR "TRTG cause : %x \n", + TITAN_GE_READ(0x100c + (port << 12))); + + /* Fix this for the other ports */ + printk(KERN_ERR "FIFO cause : %x \n", TITAN_GE_READ(0x482c)); + printk(KERN_ERR "IE cause : %x \n", TITAN_GE_READ(0x0040)); + printk(KERN_ERR "XDMA GDI ERROR : %x \n", + TITAN_GE_READ(0x5008 + (port << 8))); + printk(KERN_ERR "CHANNEL ERROR: %x \n", + TITAN_GE_READ(TITAN_GE_CHANNEL0_INTERRUPT + + (port << 8))); + + netif_device_detach(netdev); + titan_ge_port_reset(titan_ge_eth->port_num); + titan_ge_port_start(netdev, titan_ge_eth); + netif_device_attach(netdev); +} + +/* + * Change the MTU of the Ethernet Device + */ +static int titan_ge_change_mtu(struct net_device *netdev, int new_mtu) +{ + titan_ge_port_info *titan_ge_eth = netdev_priv(netdev); + unsigned long flags; + + if ((new_mtu > 9500) || (new_mtu < 64)) + return -EINVAL; + + spin_lock_irqsave(&titan_ge_eth->lock, flags); + + netdev->mtu = new_mtu; + + /* Now we have to reopen the interface so that SKBs with the new + * size will be allocated */ + + if (netif_running(netdev)) { + titan_ge_eth_stop(netdev); + + if (titan_ge_eth_open(netdev) != TITAN_OK) { + printk(KERN_ERR + "%s: Fatal error on opening device\n", + netdev->name); + spin_unlock_irqrestore(&titan_ge_eth->lock, flags); + return -1; + } + } + + spin_unlock_irqrestore(&titan_ge_eth->lock, flags); + return 0; +} + +/* + * Titan Gbe Interrupt Handler. All the three ports send interrupt to one line + * only. Once an interrupt is triggered, figure out the port and then check + * the channel. + */ +static irqreturn_t titan_ge_int_handler(int irq, void *dev_id) +{ + struct net_device *netdev = (struct net_device *) dev_id; + titan_ge_port_info *titan_ge_eth = netdev_priv(netdev); + unsigned int port_num = titan_ge_eth->port_num; + unsigned int reg_data; + unsigned int eth_int_cause_error = 0, is; + unsigned long eth_int_cause1; + int err = 0; +#ifdef CONFIG_SMP + unsigned long eth_int_cause2; +#endif + + /* Ack the CPU interrupt */ + switch (port_num) { + case 0: + is = OCD_READ(RM9000x2_OCD_INTP0STATUS1); + OCD_WRITE(RM9000x2_OCD_INTP0CLEAR1, is); + +#ifdef CONFIG_SMP + is = OCD_READ(RM9000x2_OCD_INTP1STATUS1); + OCD_WRITE(RM9000x2_OCD_INTP1CLEAR1, is); +#endif + break; + + case 1: + is = OCD_READ(RM9000x2_OCD_INTP0STATUS0); + OCD_WRITE(RM9000x2_OCD_INTP0CLEAR0, is); + +#ifdef CONFIG_SMP + is = OCD_READ(RM9000x2_OCD_INTP1STATUS0); + OCD_WRITE(RM9000x2_OCD_INTP1CLEAR0, is); +#endif + break; + + case 2: + is = OCD_READ(RM9000x2_OCD_INTP0STATUS4); + OCD_WRITE(RM9000x2_OCD_INTP0CLEAR4, is); + +#ifdef CONFIG_SMP + is = OCD_READ(RM9000x2_OCD_INTP1STATUS4); + OCD_WRITE(RM9000x2_OCD_INTP1CLEAR4, is); +#endif + } + + eth_int_cause1 = TITAN_GE_READ(TITAN_GE_INTR_XDMA_CORE_A); +#ifdef CONFIG_SMP + eth_int_cause2 = TITAN_GE_READ(TITAN_GE_INTR_XDMA_CORE_B); +#endif + + /* Spurious interrupt */ +#ifdef CONFIG_SMP + if ( (eth_int_cause1 == 0) && (eth_int_cause2 == 0)) { +#else + if (eth_int_cause1 == 0) { +#endif + eth_int_cause_error = TITAN_GE_READ(TITAN_GE_CHANNEL0_INTERRUPT + + (port_num << 8)); + + if (eth_int_cause_error == 0) + return IRQ_NONE; + } + + /* Handle Tx first. No need to ack interrupts */ +#ifdef CONFIG_SMP + if ( (eth_int_cause1 & 0x20202) || + (eth_int_cause2 & 0x20202) ) +#else + if (eth_int_cause1 & 0x20202) +#endif + titan_ge_free_tx_queue(titan_ge_eth); + + /* Handle the Rx next */ +#ifdef CONFIG_SMP + if ( (eth_int_cause1 & 0x10101) || + (eth_int_cause2 & 0x10101)) { +#else + if (eth_int_cause1 & 0x10101) { +#endif + if (netif_rx_schedule_prep(netdev)) { + unsigned int ack; + + ack = TITAN_GE_READ(TITAN_GE_INTR_XDMA_IE); + /* Disable Tx and Rx both */ + if (port_num == 0) + ack &= ~(0x3); + if (port_num == 1) + ack &= ~(0x300); + + if (port_num == 2) + ack &= ~(0x30000); + + /* Interrupts have been disabled */ + TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_IE, ack); + + __netif_rx_schedule(netdev); + } + } + + /* Handle error interrupts */ + if (eth_int_cause_error && (eth_int_cause_error != 0x2)) { + printk(KERN_ERR + "XDMA Channel Error : %x on port %d\n", + eth_int_cause_error, port_num); + + printk(KERN_ERR + "XDMA GDI Hardware error : %x on port %d\n", + TITAN_GE_READ(0x5008 + (port_num << 8)), port_num); + + printk(KERN_ERR + "XDMA currently has %d Rx descriptors \n", + TITAN_GE_READ(0x5048 + (port_num << 8))); + + printk(KERN_ERR + "XDMA currently has prefetcted %d Rx descriptors \n", + TITAN_GE_READ(0x505c + (port_num << 8))); + + TITAN_GE_WRITE((TITAN_GE_CHANNEL0_INTERRUPT + + (port_num << 8)), eth_int_cause_error); + } + + /* + * PHY interrupt to inform abt the changes. Reading the + * PHY Status register will clear the interrupt + */ + if ((!(eth_int_cause1 & 0x30303)) && + (eth_int_cause_error == 0)) { + err = + titan_ge_mdio_read(port_num, + TITAN_GE_MDIO_PHY_IS, ®_data); + + if (reg_data & 0x0400) { + /* Link status change */ + titan_ge_mdio_read(port_num, + TITAN_GE_MDIO_PHY_STATUS, ®_data); + if (!(reg_data & 0x0400)) { + /* Link is down */ + netif_carrier_off(netdev); + netif_stop_queue(netdev); + } else { + /* Link is up */ + netif_carrier_on(netdev); + netif_wake_queue(netdev); + + /* Enable the queue */ + titan_ge_enable_tx(port_num); + } + } + } + + return IRQ_HANDLED; +} + +/* + * Multicast and Promiscuous mode set. The + * set_multi entry point is called whenever the + * multicast address list or the network interface + * flags are updated. + */ +static void titan_ge_set_multi(struct net_device *netdev) +{ + titan_ge_port_info *titan_ge_eth = netdev_priv(netdev); + unsigned int port_num = titan_ge_eth->port_num; + unsigned long reg_data; + + reg_data = TITAN_GE_READ(TITAN_GE_AFX_ADDRS_FILTER_CTRL_1 + + (port_num << 12)); + + if (netdev->flags & IFF_PROMISC) { + reg_data |= 0x2; + } + else if (netdev->flags & IFF_ALLMULTI) { + reg_data |= 0x01; + reg_data |= 0x400; /* Use the 64-bit Multicast Hash bin */ + } + else { + reg_data = 0x2; + } + + TITAN_GE_WRITE((TITAN_GE_AFX_ADDRS_FILTER_CTRL_1 + + (port_num << 12)), reg_data); + if (reg_data & 0x01) { + TITAN_GE_WRITE((TITAN_GE_AFX_MULTICAST_HASH_LOW + + (port_num << 12)), 0xffff); + TITAN_GE_WRITE((TITAN_GE_AFX_MULTICAST_HASH_MIDLOW + + (port_num << 12)), 0xffff); + TITAN_GE_WRITE((TITAN_GE_AFX_MULTICAST_HASH_MIDHI + + (port_num << 12)), 0xffff); + TITAN_GE_WRITE((TITAN_GE_AFX_MULTICAST_HASH_HI + + (port_num << 12)), 0xffff); + } +} + +/* + * Open the network device + */ +static int titan_ge_open(struct net_device *netdev) +{ + titan_ge_port_info *titan_ge_eth = netdev_priv(netdev); + unsigned int port_num = titan_ge_eth->port_num; + unsigned int irq = TITAN_ETH_PORT_IRQ - port_num; + int retval; + + retval = request_irq(irq, titan_ge_int_handler, + SA_INTERRUPT | SA_SAMPLE_RANDOM , netdev->name, netdev); + + if (retval != 0) { + printk(KERN_ERR "Cannot assign IRQ number to TITAN GE \n"); + return -1; + } + + netdev->irq = irq; + printk(KERN_INFO "Assigned IRQ %d to port %d\n", irq, port_num); + + spin_lock_irq(&(titan_ge_eth->lock)); + + if (titan_ge_eth_open(netdev) != TITAN_OK) { + spin_unlock_irq(&(titan_ge_eth->lock)); + printk("%s: Error opening interface \n", netdev->name); + free_irq(netdev->irq, netdev); + return -EBUSY; + } + + spin_unlock_irq(&(titan_ge_eth->lock)); + + return 0; +} + +/* + * Allocate the SKBs for the Rx ring. Also used + * for refilling the queue + */ +static int titan_ge_rx_task(struct net_device *netdev, + titan_ge_port_info *titan_ge_port) +{ + struct device *device = &titan_ge_device[titan_ge_port->port_num]->dev; + volatile titan_ge_rx_desc *rx_desc; + struct sk_buff *skb; + int rx_used_desc; + int count = 0; + + while (titan_ge_port->rx_ring_skbs < titan_ge_port->rx_ring_size) { + + /* First try to get the skb from the recycler */ +#ifdef TITAN_GE_JUMBO_FRAMES + skb = titan_ge_alloc_skb(TITAN_GE_JUMBO_BUFSIZE, GFP_ATOMIC); +#else + skb = titan_ge_alloc_skb(TITAN_GE_STD_BUFSIZE, GFP_ATOMIC); +#endif + if (unlikely(!skb)) { + /* OOM, set the flag */ + printk("OOM \n"); + oom_flag = 1; + break; + } + count++; + skb->dev = netdev; + + titan_ge_port->rx_ring_skbs++; + + rx_used_desc = titan_ge_port->rx_used_desc_q; + rx_desc = &(titan_ge_port->rx_desc_area[rx_used_desc]); + +#ifdef TITAN_GE_JUMBO_FRAMES + rx_desc->buffer_addr = dma_map_single(device, skb->data, + TITAN_GE_JUMBO_BUFSIZE - 2, DMA_FROM_DEVICE); +#else + rx_desc->buffer_addr = dma_map_single(device, skb->data, + TITAN_GE_STD_BUFSIZE - 2, DMA_FROM_DEVICE); +#endif + + titan_ge_port->rx_skb[rx_used_desc] = skb; + rx_desc->cmd_sts = TITAN_GE_RX_BUFFER_OWNED; + + titan_ge_port->rx_used_desc_q = + (rx_used_desc + 1) % TITAN_GE_RX_QUEUE; + } + + return count; +} + +/* + * Actual init of the Tital GE port. There is one register for + * the channel configuration + */ +static void titan_port_init(struct net_device *netdev, + titan_ge_port_info * titan_ge_eth) +{ + unsigned long reg_data; + + titan_ge_port_reset(titan_ge_eth->port_num); + + /* First reset the TMAC */ + reg_data = TITAN_GE_READ(TITAN_GE_CHANNEL0_CONFIG); + reg_data |= 0x80000000; + TITAN_GE_WRITE(TITAN_GE_CHANNEL0_CONFIG, reg_data); + + udelay(30); + + reg_data = TITAN_GE_READ(TITAN_GE_CHANNEL0_CONFIG); + reg_data &= ~(0xc0000000); + TITAN_GE_WRITE(TITAN_GE_CHANNEL0_CONFIG, reg_data); + + /* Now reset the RMAC */ + reg_data = TITAN_GE_READ(TITAN_GE_CHANNEL0_CONFIG); + reg_data |= 0x00080000; + TITAN_GE_WRITE(TITAN_GE_CHANNEL0_CONFIG, reg_data); + + udelay(30); + + reg_data = TITAN_GE_READ(TITAN_GE_CHANNEL0_CONFIG); + reg_data &= ~(0x000c0000); + TITAN_GE_WRITE(TITAN_GE_CHANNEL0_CONFIG, reg_data); +} + +/* + * Start the port. All the hardware specific configuration + * for the XDMA, Tx FIFO, Rx FIFO, TMAC, RMAC, TRTG and AFX + * go here + */ +static int titan_ge_port_start(struct net_device *netdev, + titan_ge_port_info * titan_port) +{ + volatile unsigned long reg_data, reg_data1; + int port_num = titan_port->port_num; + int count = 0; + unsigned long reg_data_1; + + if (config_done == 0) { + reg_data = TITAN_GE_READ(0x0004); + reg_data |= 0x100; + TITAN_GE_WRITE(0x0004, reg_data); + + reg_data &= ~(0x100); + TITAN_GE_WRITE(0x0004, reg_data); + + /* Turn on GMII/MII mode and turn off TBI mode */ + reg_data = TITAN_GE_READ(TITAN_GE_TSB_CTRL_1); + reg_data |= 0x00000700; + reg_data &= ~(0x00800000); /* Fencing */ + + TITAN_GE_WRITE(0x000c, 0x00001100); + + TITAN_GE_WRITE(TITAN_GE_TSB_CTRL_1, reg_data); + + /* Set the CPU Resource Limit register */ + TITAN_GE_WRITE(0x00f8, 0x8); + + /* Be conservative when using the BIU buffers */ + TITAN_GE_WRITE(0x0068, 0x4); + } + + titan_port->tx_threshold = 0; + titan_port->rx_threshold = 0; + + /* We need to write the descriptors for Tx and Rx */ + TITAN_GE_WRITE((TITAN_GE_CHANNEL0_TX_DESC + (port_num << 8)), + (unsigned long) titan_port->tx_dma); + TITAN_GE_WRITE((TITAN_GE_CHANNEL0_RX_DESC + (port_num << 8)), + (unsigned long) titan_port->rx_dma); + + if (config_done == 0) { + /* Step 1: XDMA config */ + reg_data = TITAN_GE_READ(TITAN_GE_XDMA_CONFIG); + reg_data &= ~(0x80000000); /* clear reset */ + reg_data |= 0x1 << 29; /* sparse tx descriptor spacing */ + reg_data |= 0x1 << 28; /* sparse rx descriptor spacing */ + reg_data |= (0x1 << 23) | (0x1 << 24); /* Descriptor Coherency */ + reg_data |= (0x1 << 21) | (0x1 << 22); /* Data Coherency */ + TITAN_GE_WRITE(TITAN_GE_XDMA_CONFIG, reg_data); + } + + /* IR register for the XDMA */ + reg_data = TITAN_GE_READ(TITAN_GE_GDI_INTERRUPT_ENABLE + (port_num << 8)); + reg_data |= 0x80068000; /* No Rx_OOD */ + TITAN_GE_WRITE((TITAN_GE_GDI_INTERRUPT_ENABLE + (port_num << 8)), reg_data); + + /* Start the Tx and Rx XDMA controller */ + reg_data = TITAN_GE_READ(TITAN_GE_CHANNEL0_CONFIG + (port_num << 8)); + reg_data &= 0x4fffffff; /* Clear tx reset */ + reg_data &= 0xfff4ffff; /* Clear rx reset */ + +#ifdef TITAN_GE_JUMBO_FRAMES + reg_data |= 0xa0 | 0x30030000; +#else + reg_data |= 0x40 | 0x20030000; +#endif + +#ifndef CONFIG_SMP + reg_data &= ~(0x10); + reg_data |= 0x0f; /* All of the packet */ +#endif + + TITAN_GE_WRITE((TITAN_GE_CHANNEL0_CONFIG + (port_num << 8)), reg_data); + + /* Rx desc count */ + count = titan_ge_rx_task(netdev, titan_port); + TITAN_GE_WRITE((0x5048 + (port_num << 8)), count); + count = TITAN_GE_READ(0x5048 + (port_num << 8)); + + udelay(30); + + /* + * Step 2: Configure the SDQPF, i.e. FIFO + */ + if (config_done == 0) { + reg_data = TITAN_GE_READ(TITAN_GE_SDQPF_RXFIFO_CTL); + reg_data = 0x1; + TITAN_GE_WRITE(TITAN_GE_SDQPF_RXFIFO_CTL, reg_data); + reg_data &= ~(0x1); + TITAN_GE_WRITE(TITAN_GE_SDQPF_RXFIFO_CTL, reg_data); + reg_data = TITAN_GE_READ(TITAN_GE_SDQPF_RXFIFO_CTL); + TITAN_GE_WRITE(TITAN_GE_SDQPF_RXFIFO_CTL, reg_data); + + reg_data = TITAN_GE_READ(TITAN_GE_SDQPF_TXFIFO_CTL); + reg_data = 0x1; + TITAN_GE_WRITE(TITAN_GE_SDQPF_TXFIFO_CTL, reg_data); + reg_data &= ~(0x1); + TITAN_GE_WRITE(TITAN_GE_SDQPF_TXFIFO_CTL, reg_data); + reg_data = TITAN_GE_READ(TITAN_GE_SDQPF_TXFIFO_CTL); + TITAN_GE_WRITE(TITAN_GE_SDQPF_TXFIFO_CTL, reg_data); + } + /* + * Enable RX FIFO 0, 4 and 8 + */ + if (port_num == 0) { + reg_data = TITAN_GE_READ(TITAN_GE_SDQPF_RXFIFO_0); + + reg_data |= 0x100000; + reg_data |= (0xff << 10); + + TITAN_GE_WRITE(TITAN_GE_SDQPF_RXFIFO_0, reg_data); + /* + * BAV2,BAV and DAV settings for the Rx FIFO + */ + reg_data1 = TITAN_GE_READ(0x4844); + reg_data1 |= ( (0x10 << 20) | (0x10 << 10) | 0x1); + TITAN_GE_WRITE(0x4844, reg_data1); + + reg_data &= ~(0x00100000); + reg_data |= 0x200000; + + TITAN_GE_WRITE(TITAN_GE_SDQPF_RXFIFO_0, reg_data); + + reg_data = TITAN_GE_READ(TITAN_GE_SDQPF_TXFIFO_0); + reg_data |= 0x100000; + + TITAN_GE_WRITE(TITAN_GE_SDQPF_TXFIFO_0, reg_data); + + reg_data |= (0xff << 10); + + TITAN_GE_WRITE(TITAN_GE_SDQPF_TXFIFO_0, reg_data); + + /* + * BAV2, BAV and DAV settings for the Tx FIFO + */ + reg_data1 = TITAN_GE_READ(0x4944); + reg_data1 = ( (0x1 << 20) | (0x1 << 10) | 0x10); + + TITAN_GE_WRITE(0x4944, reg_data1); + + reg_data &= ~(0x00100000); + reg_data |= 0x200000; + + TITAN_GE_WRITE(TITAN_GE_SDQPF_TXFIFO_0, reg_data); + + } + + if (port_num == 1) { + reg_data = TITAN_GE_READ(0x4870); + + reg_data |= 0x100000; + reg_data |= (0xff << 10) | (0xff + 1); + + TITAN_GE_WRITE(0x4870, reg_data); + /* + * BAV2,BAV and DAV settings for the Rx FIFO + */ + reg_data1 = TITAN_GE_READ(0x4874); + reg_data1 |= ( (0x10 << 20) | (0x10 << 10) | 0x1); + TITAN_GE_WRITE(0x4874, reg_data1); + + reg_data &= ~(0x00100000); + reg_data |= 0x200000; + + TITAN_GE_WRITE(0x4870, reg_data); + + reg_data = TITAN_GE_READ(0x494c); + reg_data |= 0x100000; + + TITAN_GE_WRITE(0x494c, reg_data); + reg_data |= (0xff << 10) | (0xff + 1); + TITAN_GE_WRITE(0x494c, reg_data); + + /* + * BAV2, BAV and DAV settings for the Tx FIFO + */ + reg_data1 = TITAN_GE_READ(0x4950); + reg_data1 = ( (0x1 << 20) | (0x1 << 10) | 0x10); + + TITAN_GE_WRITE(0x4950, reg_data1); + + reg_data &= ~(0x00100000); + reg_data |= 0x200000; + + TITAN_GE_WRITE(0x494c, reg_data); + } + + /* + * Titan 1.2 revision does support port #2 + */ + if (port_num == 2) { + /* + * Put the descriptors in the SRAM + */ + reg_data = TITAN_GE_READ(0x48a0); + + reg_data |= 0x100000; + reg_data |= (0xff << 10) | (2*(0xff + 1)); + + TITAN_GE_WRITE(0x48a0, reg_data); + /* + * BAV2,BAV and DAV settings for the Rx FIFO + */ + reg_data1 = TITAN_GE_READ(0x48a4); + reg_data1 |= ( (0x10 << 20) | (0x10 << 10) | 0x1); + TITAN_GE_WRITE(0x48a4, reg_data1); + + reg_data &= ~(0x00100000); + reg_data |= 0x200000; + + TITAN_GE_WRITE(0x48a0, reg_data); + + reg_data = TITAN_GE_READ(0x4958); + reg_data |= 0x100000; + + TITAN_GE_WRITE(0x4958, reg_data); + reg_data |= (0xff << 10) | (2*(0xff + 1)); + TITAN_GE_WRITE(0x4958, reg_data); + + /* + * BAV2, BAV and DAV settings for the Tx FIFO + */ + reg_data1 = TITAN_GE_READ(0x495c); + reg_data1 = ( (0x1 << 20) | (0x1 << 10) | 0x10); + + TITAN_GE_WRITE(0x495c, reg_data1); + + reg_data &= ~(0x00100000); + reg_data |= 0x200000; + + TITAN_GE_WRITE(0x4958, reg_data); + } + + if (port_num == 2) { + reg_data = TITAN_GE_READ(0x48a0); + + reg_data |= 0x100000; + reg_data |= (0xff << 10) | (2*(0xff + 1)); + + TITAN_GE_WRITE(0x48a0, reg_data); + /* + * BAV2,BAV and DAV settings for the Rx FIFO + */ + reg_data1 = TITAN_GE_READ(0x48a4); + reg_data1 |= ( (0x10 << 20) | (0x10 << 10) | 0x1); + TITAN_GE_WRITE(0x48a4, reg_data1); + + reg_data &= ~(0x00100000); + reg_data |= 0x200000; + + TITAN_GE_WRITE(0x48a0, reg_data); + + reg_data = TITAN_GE_READ(0x4958); + reg_data |= 0x100000; + + TITAN_GE_WRITE(0x4958, reg_data); + reg_data |= (0xff << 10) | (2*(0xff + 1)); + TITAN_GE_WRITE(0x4958, reg_data); + + /* + * BAV2, BAV and DAV settings for the Tx FIFO + */ + reg_data1 = TITAN_GE_READ(0x495c); + reg_data1 = ( (0x1 << 20) | (0x1 << 10) | 0x10); + + TITAN_GE_WRITE(0x495c, reg_data1); + + reg_data &= ~(0x00100000); + reg_data |= 0x200000; + + TITAN_GE_WRITE(0x4958, reg_data); + } + + /* + * Step 3: TRTG block enable + */ + reg_data = TITAN_GE_READ(TITAN_GE_TRTG_CONFIG + (port_num << 12)); + + /* + * This is the 1.2 revision of the chip. It has fix for the + * IP header alignment. Now, the IP header begins at an + * aligned address and this wont need an extra copy in the + * driver. This performance drawback existed in the previous + * versions of the silicon + */ + reg_data_1 = TITAN_GE_READ(0x103c + (port_num << 12)); + reg_data_1 |= 0x40000000; + TITAN_GE_WRITE((0x103c + (port_num << 12)), reg_data_1); + + reg_data_1 |= 0x04000000; + TITAN_GE_WRITE((0x103c + (port_num << 12)), reg_data_1); + + mdelay(5); + + reg_data_1 &= ~(0x04000000); + TITAN_GE_WRITE((0x103c + (port_num << 12)), reg_data_1); + + mdelay(5); + + reg_data |= 0x0001; + TITAN_GE_WRITE((TITAN_GE_TRTG_CONFIG + (port_num << 12)), reg_data); + + /* + * Step 4: Start the Tx activity + */ + TITAN_GE_WRITE((TITAN_GE_TMAC_CONFIG_2 + (port_num << 12)), 0xe197); +#ifdef TITAN_GE_JUMBO_FRAMES + TITAN_GE_WRITE((0x1258 + (port_num << 12)), 0x4000); +#endif + reg_data = TITAN_GE_READ(TITAN_GE_TMAC_CONFIG_1 + (port_num << 12)); + reg_data |= 0x0001; /* Enable TMAC */ + reg_data |= 0x6c70; /* PAUSE also set */ + + TITAN_GE_WRITE((TITAN_GE_TMAC_CONFIG_1 + (port_num << 12)), reg_data); + + udelay(30); + + /* Destination Address drop bit */ + reg_data = TITAN_GE_READ(TITAN_GE_RMAC_CONFIG_2 + (port_num << 12)); + reg_data |= 0x218; /* DA_DROP bit and pause */ + TITAN_GE_WRITE((TITAN_GE_RMAC_CONFIG_2 + (port_num << 12)), reg_data); + + TITAN_GE_WRITE((0x1218 + (port_num << 12)), 0x3); + +#ifdef TITAN_GE_JUMBO_FRAMES + TITAN_GE_WRITE((0x1208 + (port_num << 12)), 0x4000); +#endif + /* Start the Rx activity */ + reg_data = TITAN_GE_READ(TITAN_GE_RMAC_CONFIG_1 + (port_num << 12)); + reg_data |= 0x0001; /* RMAC Enable */ + reg_data |= 0x0010; /* CRC Check enable */ + reg_data |= 0x0040; /* Min Frame check enable */ + reg_data |= 0x4400; /* Max Frame check enable */ + + TITAN_GE_WRITE((TITAN_GE_RMAC_CONFIG_1 + (port_num << 12)), reg_data); + + udelay(30); + + /* + * Enable the Interrupts for Tx and Rx + */ + reg_data1 = TITAN_GE_READ(TITAN_GE_INTR_XDMA_IE); + + if (port_num == 0) { + reg_data1 |= 0x3; +#ifdef CONFIG_SMP + TITAN_GE_WRITE(0x0038, 0x003); +#else + TITAN_GE_WRITE(0x0038, 0x303); +#endif + } + + if (port_num == 1) { + reg_data1 |= 0x300; + } + + if (port_num == 2) + reg_data1 |= 0x30000; + + TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_IE, reg_data1); + TITAN_GE_WRITE(0x003c, 0x300); + + if (config_done == 0) { + TITAN_GE_WRITE(0x0024, 0x04000024); /* IRQ vector */ + TITAN_GE_WRITE(0x0020, 0x000fb000); /* INTMSG base */ + } + + /* Priority */ + reg_data = TITAN_GE_READ(0x1038 + (port_num << 12)); + reg_data &= ~(0x00f00000); + TITAN_GE_WRITE((0x1038 + (port_num << 12)), reg_data); + + /* Step 5: GMII config */ + titan_ge_gmii_config(port_num); + + if (config_done == 0) { + TITAN_GE_WRITE(0x1a80, 0); + config_done = 1; + } + + return TITAN_OK; +} + +/* + * Function to queue the packet for the Ethernet device + */ +static void titan_ge_tx_queue(titan_ge_port_info * titan_ge_eth, + struct sk_buff * skb) +{ + struct device *device = &titan_ge_device[titan_ge_eth->port_num]->dev; + unsigned int curr_desc = titan_ge_eth->tx_curr_desc_q; + volatile titan_ge_tx_desc *tx_curr; + int port_num = titan_ge_eth->port_num; + + tx_curr = &(titan_ge_eth->tx_desc_area[curr_desc]); + tx_curr->buffer_addr = + dma_map_single(device, skb->data, skb_headlen(skb), + DMA_TO_DEVICE); + + titan_ge_eth->tx_skb[curr_desc] = (struct sk_buff *) skb; + tx_curr->buffer_len = skb_headlen(skb); + + /* Last descriptor enables interrupt and changes ownership */ + tx_curr->cmd_sts = 0x1 | (1 << 15) | (1 << 5); + + /* Kick the XDMA to start the transfer from memory to the FIFO */ + TITAN_GE_WRITE((0x5044 + (port_num << 8)), 0x1); + + /* Current descriptor updated */ + titan_ge_eth->tx_curr_desc_q = (curr_desc + 1) % TITAN_GE_TX_QUEUE; + + /* Prefetch the next descriptor */ + prefetch((const void *) + &titan_ge_eth->tx_desc_area[titan_ge_eth->tx_curr_desc_q]); +} + +/* + * Actually does the open of the Ethernet device + */ +static int titan_ge_eth_open(struct net_device *netdev) +{ + titan_ge_port_info *titan_ge_eth = netdev_priv(netdev); + unsigned int port_num = titan_ge_eth->port_num; + struct device *device = &titan_ge_device[port_num]->dev; + unsigned long reg_data; + unsigned int phy_reg; + int err = 0; + + /* Stop the Rx activity */ + reg_data = TITAN_GE_READ(TITAN_GE_RMAC_CONFIG_1 + (port_num << 12)); + reg_data &= ~(0x00000001); + TITAN_GE_WRITE((TITAN_GE_RMAC_CONFIG_1 + (port_num << 12)), reg_data); + + /* Clear the port interrupts */ + TITAN_GE_WRITE((TITAN_GE_CHANNEL0_INTERRUPT + (port_num << 8)), 0x0); + + if (config_done == 0) { + TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_CORE_A, 0); + TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_CORE_B, 0); + } + + /* Set the MAC Address */ + memcpy(titan_ge_eth->port_mac_addr, netdev->dev_addr, 6); + + if (config_done == 0) + titan_port_init(netdev, titan_ge_eth); + + titan_ge_update_afx(titan_ge_eth); + + /* Allocate the Tx ring now */ + titan_ge_eth->tx_ring_skbs = 0; + titan_ge_eth->tx_ring_size = TITAN_GE_TX_QUEUE; + + /* Allocate space in the SRAM for the descriptors */ + titan_ge_eth->tx_desc_area = (titan_ge_tx_desc *) + (titan_ge_sram + TITAN_TX_RING_BYTES * port_num); + titan_ge_eth->tx_dma = TITAN_SRAM_BASE + TITAN_TX_RING_BYTES * port_num; + + if (!titan_ge_eth->tx_desc_area) { + printk(KERN_ERR + "%s: Cannot allocate Tx Ring (size %d bytes) for port %d\n", + netdev->name, TITAN_TX_RING_BYTES, port_num); + return -ENOMEM; + } + + memset(titan_ge_eth->tx_desc_area, 0, titan_ge_eth->tx_desc_area_size); + + /* Now initialize the Tx descriptor ring */ + titan_ge_init_tx_desc_ring(titan_ge_eth, + titan_ge_eth->tx_ring_size, + (unsigned long) titan_ge_eth->tx_desc_area, + (unsigned long) titan_ge_eth->tx_dma); + + /* Allocate the Rx ring now */ + titan_ge_eth->rx_ring_size = TITAN_GE_RX_QUEUE; + titan_ge_eth->rx_ring_skbs = 0; + + titan_ge_eth->rx_desc_area = + (titan_ge_rx_desc *)(titan_ge_sram + 0x1000 + TITAN_RX_RING_BYTES * port_num); + + titan_ge_eth->rx_dma = TITAN_SRAM_BASE + 0x1000 + TITAN_RX_RING_BYTES * port_num; + + if (!titan_ge_eth->rx_desc_area) { + printk(KERN_ERR "%s: Cannot allocate Rx Ring (size %d bytes)\n", + netdev->name, TITAN_RX_RING_BYTES); + + printk(KERN_ERR "%s: Freeing previously allocated TX queues...", + netdev->name); + + dma_free_coherent(device, titan_ge_eth->tx_desc_area_size, + (void *) titan_ge_eth->tx_desc_area, + titan_ge_eth->tx_dma); + + return -ENOMEM; + } + + memset(titan_ge_eth->rx_desc_area, 0, titan_ge_eth->rx_desc_area_size); + + /* Now initialize the Rx ring */ +#ifdef TITAN_GE_JUMBO_FRAMES + if ((titan_ge_init_rx_desc_ring + (titan_ge_eth, titan_ge_eth->rx_ring_size, TITAN_GE_JUMBO_BUFSIZE, + (unsigned long) titan_ge_eth->rx_desc_area, 0, + (unsigned long) titan_ge_eth->rx_dma)) == 0) +#else + if ((titan_ge_init_rx_desc_ring + (titan_ge_eth, titan_ge_eth->rx_ring_size, TITAN_GE_STD_BUFSIZE, + (unsigned long) titan_ge_eth->rx_desc_area, 0, + (unsigned long) titan_ge_eth->rx_dma)) == 0) +#endif + panic("%s: Error initializing RX Ring\n", netdev->name); + + /* Fill the Rx ring with the SKBs */ + titan_ge_port_start(netdev, titan_ge_eth); + + /* + * Check if Interrupt Coalscing needs to be turned on. The + * values specified in the register is multiplied by + * (8 x 64 nanoseconds) to determine when an interrupt should + * be sent to the CPU. + */ + + if (TITAN_GE_TX_COAL) { + titan_ge_eth->tx_int_coal = + titan_ge_tx_coal(TITAN_GE_TX_COAL, port_num); + } + + err = titan_ge_mdio_read(port_num, TITAN_GE_MDIO_PHY_STATUS, &phy_reg); + if (err == TITAN_GE_MDIO_ERROR) { + printk(KERN_ERR + "Could not read PHY control register 0x11 \n"); + return TITAN_ERROR; + } + if (!(phy_reg & 0x0400)) { + netif_carrier_off(netdev); + netif_stop_queue(netdev); + return TITAN_ERROR; + } else { + netif_carrier_on(netdev); + netif_start_queue(netdev); + } + + return TITAN_OK; +} + +/* + * Queue the packet for Tx. Currently no support for zero copy, + * checksum offload and Scatter Gather. The chip does support + * Scatter Gather only. But, that wont help here since zero copy + * requires support for Tx checksumming also. + */ +int titan_ge_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + titan_ge_port_info *titan_ge_eth = netdev_priv(netdev); + unsigned long flags; + struct net_device_stats *stats; +//printk("titan_ge_start_xmit\n"); + + stats = &titan_ge_eth->stats; + spin_lock_irqsave(&titan_ge_eth->lock, flags); + + if ((TITAN_GE_TX_QUEUE - titan_ge_eth->tx_ring_skbs) <= + (skb_shinfo(skb)->nr_frags + 1)) { + netif_stop_queue(netdev); + spin_unlock_irqrestore(&titan_ge_eth->lock, flags); + printk(KERN_ERR "Tx OOD \n"); + return 1; + } + + titan_ge_tx_queue(titan_ge_eth, skb); + titan_ge_eth->tx_ring_skbs++; + + if (TITAN_GE_TX_QUEUE <= (titan_ge_eth->tx_ring_skbs + 4)) { + spin_unlock_irqrestore(&titan_ge_eth->lock, flags); + titan_ge_free_tx_queue(titan_ge_eth); + spin_lock_irqsave(&titan_ge_eth->lock, flags); + } + + stats->tx_bytes += skb->len; + stats->tx_packets++; + + spin_unlock_irqrestore(&titan_ge_eth->lock, flags); + + netdev->trans_start = jiffies; + + return 0; +} + +/* + * Actually does the Rx. Rx side checksumming supported. + */ +static int titan_ge_rx(struct net_device *netdev, int port_num, + titan_ge_port_info * titan_ge_port, + titan_ge_packet * packet) +{ + int rx_curr_desc, rx_used_desc; + volatile titan_ge_rx_desc *rx_desc; + + rx_curr_desc = titan_ge_port->rx_curr_desc_q; + rx_used_desc = titan_ge_port->rx_used_desc_q; + + if (((rx_curr_desc + 1) % TITAN_GE_RX_QUEUE) == rx_used_desc) + return TITAN_ERROR; + + rx_desc = &(titan_ge_port->rx_desc_area[rx_curr_desc]); + + if (rx_desc->cmd_sts & TITAN_GE_RX_BUFFER_OWNED) + return TITAN_ERROR; + + packet->skb = titan_ge_port->rx_skb[rx_curr_desc]; + packet->len = (rx_desc->cmd_sts & 0x7fff); + + /* + * At this point, we dont know if the checksumming + * actually helps relieve CPU. So, keep it for + * port 0 only + */ + packet->checksum = ntohs((rx_desc->buffer & 0xffff0000) >> 16); + packet->cmd_sts = rx_desc->cmd_sts; + + titan_ge_port->rx_curr_desc_q = (rx_curr_desc + 1) % TITAN_GE_RX_QUEUE; + + /* Prefetch the next descriptor */ + prefetch((const void *) + &titan_ge_port->rx_desc_area[titan_ge_port->rx_curr_desc_q + 1]); + + return TITAN_OK; +} + +/* + * Free the Tx queue of the used SKBs + */ +static int titan_ge_free_tx_queue(titan_ge_port_info *titan_ge_eth) +{ + unsigned long flags; + + /* Take the lock */ + spin_lock_irqsave(&(titan_ge_eth->lock), flags); + + while (titan_ge_return_tx_desc(titan_ge_eth, titan_ge_eth->port_num) == 0) + if (titan_ge_eth->tx_ring_skbs != 1) + titan_ge_eth->tx_ring_skbs--; + + spin_unlock_irqrestore(&titan_ge_eth->lock, flags); + + return TITAN_OK; +} + +/* + * Threshold beyond which we do the cleaning of + * Tx queue and new allocation for the Rx + * queue + */ +#define TX_THRESHOLD 4 +#define RX_THRESHOLD 10 + +/* + * Receive the packets and send it to the kernel. + */ +static int titan_ge_receive_queue(struct net_device *netdev, unsigned int max) +{ + titan_ge_port_info *titan_ge_eth = netdev_priv(netdev); + unsigned int port_num = titan_ge_eth->port_num; + titan_ge_packet packet; + struct net_device_stats *stats; + struct sk_buff *skb; + unsigned long received_packets = 0; + unsigned int ack; + + stats = &titan_ge_eth->stats; + + while ((--max) + && (titan_ge_rx(netdev, port_num, titan_ge_eth, &packet) == TITAN_OK)) { + skb = (struct sk_buff *) packet.skb; + + titan_ge_eth->rx_ring_skbs--; + + if (--titan_ge_eth->rx_work_limit < 0) + break; + received_packets++; + + stats->rx_packets++; + stats->rx_bytes += packet.len; + + if ((packet.cmd_sts & TITAN_GE_RX_PERR) || + (packet.cmd_sts & TITAN_GE_RX_OVERFLOW_ERROR) || + (packet.cmd_sts & TITAN_GE_RX_TRUNC) || + (packet.cmd_sts & TITAN_GE_RX_CRC_ERROR)) { + stats->rx_dropped++; + dev_kfree_skb_any(skb); + + continue; + } + /* + * Either support fast path or slow path. Decision + * making can really slow down the performance. The + * idea is to cut down the number of checks and improve + * the fastpath. + */ + + skb_put(skb, packet.len - 2); + + /* + * Increment data pointer by two since thats where + * the MAC starts + */ + skb_reserve(skb, 2); + skb->protocol = eth_type_trans(skb, netdev); + netif_receive_skb(skb); + + if (titan_ge_eth->rx_threshold > RX_THRESHOLD) { + ack = titan_ge_rx_task(netdev, titan_ge_eth); + TITAN_GE_WRITE((0x5048 + (port_num << 8)), ack); + titan_ge_eth->rx_threshold = 0; + } else + titan_ge_eth->rx_threshold++; + + if (titan_ge_eth->tx_threshold > TX_THRESHOLD) { + titan_ge_eth->tx_threshold = 0; + titan_ge_free_tx_queue(titan_ge_eth); + } + else + titan_ge_eth->tx_threshold++; + + } + return received_packets; +} + + +/* + * Enable the Rx side interrupts + */ +static void titan_ge_enable_int(unsigned int port_num, + titan_ge_port_info *titan_ge_eth, + struct net_device *netdev) +{ + unsigned long reg_data = TITAN_GE_READ(TITAN_GE_INTR_XDMA_IE); + + if (port_num == 0) + reg_data |= 0x3; + if (port_num == 1) + reg_data |= 0x300; + if (port_num == 2) + reg_data |= 0x30000; + + /* Re-enable interrupts */ + TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_IE, reg_data); +} + +/* + * Main function to handle the polling for Rx side NAPI. + * Receive interrupts have been disabled at this point. + * The poll schedules the transmit followed by receive. + */ +static int titan_ge_poll(struct net_device *netdev, int *budget) +{ + titan_ge_port_info *titan_ge_eth = netdev_priv(netdev); + int port_num = titan_ge_eth->port_num; + int work_done = 0; + unsigned long flags, status; + + titan_ge_eth->rx_work_limit = *budget; + if (titan_ge_eth->rx_work_limit > netdev->quota) + titan_ge_eth->rx_work_limit = netdev->quota; + + do { + /* Do the transmit cleaning work here */ + titan_ge_free_tx_queue(titan_ge_eth); + + /* Ack the Rx interrupts */ + if (port_num == 0) + TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_CORE_A, 0x3); + if (port_num == 1) + TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_CORE_A, 0x300); + if (port_num == 2) + TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_CORE_A, 0x30000); + + work_done += titan_ge_receive_queue(netdev, 0); + + /* Out of quota and there is work to be done */ + if (titan_ge_eth->rx_work_limit < 0) + goto not_done; + + /* Receive alloc_skb could lead to OOM */ + if (oom_flag == 1) { + oom_flag = 0; + goto oom; + } + + status = TITAN_GE_READ(TITAN_GE_INTR_XDMA_CORE_A); + } while (status & 0x30300); + + /* If we are here, then no more interrupts to process */ + goto done; + +not_done: + *budget -= work_done; + netdev->quota -= work_done; + return 1; + +oom: + printk(KERN_ERR "OOM \n"); + netif_rx_complete(netdev); + return 0; + +done: + /* + * No more packets on the poll list. Turn the interrupts + * back on and we should be able to catch the new + * packets in the interrupt handler + */ + if (!work_done) + work_done = 1; + + *budget -= work_done; + netdev->quota -= work_done; + + spin_lock_irqsave(&titan_ge_eth->lock, flags); + + /* Remove us from the poll list */ + netif_rx_complete(netdev); + + /* Re-enable interrupts */ + titan_ge_enable_int(port_num, titan_ge_eth, netdev); + + spin_unlock_irqrestore(&titan_ge_eth->lock, flags); + + return 0; +} + +/* + * Close the network device + */ +int titan_ge_stop(struct net_device *netdev) +{ + titan_ge_port_info *titan_ge_eth = netdev_priv(netdev); + + spin_lock_irq(&(titan_ge_eth->lock)); + titan_ge_eth_stop(netdev); + free_irq(netdev->irq, netdev); + spin_unlock_irq(&titan_ge_eth->lock); + + return TITAN_OK; +} + +/* + * Free the Tx ring + */ +static void titan_ge_free_tx_rings(struct net_device *netdev) +{ + titan_ge_port_info *titan_ge_eth = netdev_priv(netdev); + unsigned int port_num = titan_ge_eth->port_num; + unsigned int curr; + unsigned long reg_data; + + /* Stop the Tx DMA */ + reg_data = TITAN_GE_READ(TITAN_GE_CHANNEL0_CONFIG + + (port_num << 8)); + reg_data |= 0xc0000000; + TITAN_GE_WRITE((TITAN_GE_CHANNEL0_CONFIG + + (port_num << 8)), reg_data); + + /* Disable the TMAC */ + reg_data = TITAN_GE_READ(TITAN_GE_TMAC_CONFIG_1 + + (port_num << 12)); + reg_data &= ~(0x00000001); + TITAN_GE_WRITE((TITAN_GE_TMAC_CONFIG_1 + + (port_num << 12)), reg_data); + + for (curr = 0; + (titan_ge_eth->tx_ring_skbs) && (curr < TITAN_GE_TX_QUEUE); + curr++) { + if (titan_ge_eth->tx_skb[curr]) { + dev_kfree_skb(titan_ge_eth->tx_skb[curr]); + titan_ge_eth->tx_ring_skbs--; + } + } + + if (titan_ge_eth->tx_ring_skbs != 0) + printk + ("%s: Error on Tx descriptor free - could not free %d" + " descriptors\n", netdev->name, + titan_ge_eth->tx_ring_skbs); + +#ifndef TITAN_RX_RING_IN_SRAM + dma_free_coherent(&titan_ge_device[port_num]->dev, + titan_ge_eth->tx_desc_area_size, + (void *) titan_ge_eth->tx_desc_area, + titan_ge_eth->tx_dma); +#endif +} + +/* + * Free the Rx ring + */ +static void titan_ge_free_rx_rings(struct net_device *netdev) +{ + titan_ge_port_info *titan_ge_eth = netdev_priv(netdev); + unsigned int port_num = titan_ge_eth->port_num; + unsigned int curr; + unsigned long reg_data; + + /* Stop the Rx DMA */ + reg_data = TITAN_GE_READ(TITAN_GE_CHANNEL0_CONFIG + + (port_num << 8)); + reg_data |= 0x000c0000; + TITAN_GE_WRITE((TITAN_GE_CHANNEL0_CONFIG + + (port_num << 8)), reg_data); + + /* Disable the RMAC */ + reg_data = TITAN_GE_READ(TITAN_GE_RMAC_CONFIG_1 + + (port_num << 12)); + reg_data &= ~(0x00000001); + TITAN_GE_WRITE((TITAN_GE_RMAC_CONFIG_1 + + (port_num << 12)), reg_data); + + for (curr = 0; + titan_ge_eth->rx_ring_skbs && (curr < TITAN_GE_RX_QUEUE); + curr++) { + if (titan_ge_eth->rx_skb[curr]) { + dev_kfree_skb(titan_ge_eth->rx_skb[curr]); + titan_ge_eth->rx_ring_skbs--; + } + } + + if (titan_ge_eth->rx_ring_skbs != 0) + printk(KERN_ERR + "%s: Error in freeing Rx Ring. %d skb's still" + " stuck in RX Ring - ignoring them\n", netdev->name, + titan_ge_eth->rx_ring_skbs); + +#ifndef TITAN_RX_RING_IN_SRAM + dma_free_coherent(&titan_ge_device[port_num]->dev, + titan_ge_eth->rx_desc_area_size, + (void *) titan_ge_eth->rx_desc_area, + titan_ge_eth->rx_dma); +#endif +} + +/* + * Actually does the stop of the Ethernet device + */ +static void titan_ge_eth_stop(struct net_device *netdev) +{ + titan_ge_port_info *titan_ge_eth = netdev_priv(netdev); + + netif_stop_queue(netdev); + + titan_ge_port_reset(titan_ge_eth->port_num); + + titan_ge_free_tx_rings(netdev); + titan_ge_free_rx_rings(netdev); + + /* Disable the Tx and Rx Interrupts for all channels */ + TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_IE, 0x0); +} + +/* + * Update the MAC address. Note that we have to write the + * address in three station registers, 16 bits each. And this + * has to be done for TMAC and RMAC + */ +static void titan_ge_update_mac_address(struct net_device *netdev) +{ + titan_ge_port_info *titan_ge_eth = netdev_priv(netdev); + unsigned int port_num = titan_ge_eth->port_num; + u8 p_addr[6]; + + memcpy(titan_ge_eth->port_mac_addr, netdev->dev_addr, 6); + memcpy(p_addr, netdev->dev_addr, 6); + + /* Update the Address Filtering Match tables */ + titan_ge_update_afx(titan_ge_eth); + + printk("Station MAC : %d %d %d %d %d %d \n", + p_addr[5], p_addr[4], p_addr[3], + p_addr[2], p_addr[1], p_addr[0]); + + /* Set the MAC address here for TMAC and RMAC */ + TITAN_GE_WRITE((TITAN_GE_TMAC_STATION_HI + (port_num << 12)), + ((p_addr[5] << 8) | p_addr[4])); + TITAN_GE_WRITE((TITAN_GE_TMAC_STATION_MID + (port_num << 12)), + ((p_addr[3] << 8) | p_addr[2])); + TITAN_GE_WRITE((TITAN_GE_TMAC_STATION_LOW + (port_num << 12)), + ((p_addr[1] << 8) | p_addr[0])); + + TITAN_GE_WRITE((TITAN_GE_RMAC_STATION_HI + (port_num << 12)), + ((p_addr[5] << 8) | p_addr[4])); + TITAN_GE_WRITE((TITAN_GE_RMAC_STATION_MID + (port_num << 12)), + ((p_addr[3] << 8) | p_addr[2])); + TITAN_GE_WRITE((TITAN_GE_RMAC_STATION_LOW + (port_num << 12)), + ((p_addr[1] << 8) | p_addr[0])); +} + +/* + * Set the MAC address of the Ethernet device + */ +static int titan_ge_set_mac_address(struct net_device *dev, void *addr) +{ + titan_ge_port_info *tp = netdev_priv(dev); + struct sockaddr *sa = addr; + + memcpy(dev->dev_addr, sa->sa_data, dev->addr_len); + + spin_lock_irq(&tp->lock); + titan_ge_update_mac_address(dev); + spin_unlock_irq(&tp->lock); + + return 0; +} + +/* + * Get the Ethernet device stats + */ +static struct net_device_stats *titan_ge_get_stats(struct net_device *netdev) +{ + titan_ge_port_info *titan_ge_eth = netdev_priv(netdev); + + return &titan_ge_eth->stats; +} + +/* + * Initialize the Rx descriptor ring for the Titan Ge + */ +static int titan_ge_init_rx_desc_ring(titan_ge_port_info * titan_eth_port, + int rx_desc_num, + int rx_buff_size, + unsigned long rx_desc_base_addr, + unsigned long rx_buff_base_addr, + unsigned long rx_dma) +{ + volatile titan_ge_rx_desc *rx_desc; + unsigned long buffer_addr; + int index; + unsigned long titan_ge_rx_desc_bus = rx_dma; + + buffer_addr = rx_buff_base_addr; + rx_desc = (titan_ge_rx_desc *) rx_desc_base_addr; + + /* Check alignment */ + if (rx_buff_base_addr & 0xF) + return 0; + + /* Check Rx buffer size */ + if ((rx_buff_size < 8) || (rx_buff_size > TITAN_GE_MAX_RX_BUFFER)) + return 0; + + /* 64-bit alignment + if ((rx_buff_base_addr + rx_buff_size) & 0x7) + return 0; */ + + /* Initialize the Rx desc ring */ + for (index = 0; index < rx_desc_num; index++) { + titan_ge_rx_desc_bus += sizeof(titan_ge_rx_desc); + rx_desc[index].cmd_sts = 0; + rx_desc[index].buffer_addr = buffer_addr; + titan_eth_port->rx_skb[index] = NULL; + buffer_addr += rx_buff_size; + } + + titan_eth_port->rx_curr_desc_q = 0; + titan_eth_port->rx_used_desc_q = 0; + + titan_eth_port->rx_desc_area = (titan_ge_rx_desc *) rx_desc_base_addr; + titan_eth_port->rx_desc_area_size = + rx_desc_num * sizeof(titan_ge_rx_desc); + + titan_eth_port->rx_dma = rx_dma; + + return TITAN_OK; +} + +/* + * Initialize the Tx descriptor ring. Descriptors in the SRAM + */ +static int titan_ge_init_tx_desc_ring(titan_ge_port_info * titan_ge_port, + int tx_desc_num, + unsigned long tx_desc_base_addr, + unsigned long tx_dma) +{ + titan_ge_tx_desc *tx_desc; + int index; + unsigned long titan_ge_tx_desc_bus = tx_dma; + + if (tx_desc_base_addr & 0xF) + return 0; + + tx_desc = (titan_ge_tx_desc *) tx_desc_base_addr; + + for (index = 0; index < tx_desc_num; index++) { + titan_ge_port->tx_dma_array[index] = + (dma_addr_t) titan_ge_tx_desc_bus; + titan_ge_tx_desc_bus += sizeof(titan_ge_tx_desc); + tx_desc[index].cmd_sts = 0x0000; + tx_desc[index].buffer_len = 0; + tx_desc[index].buffer_addr = 0x00000000; + titan_ge_port->tx_skb[index] = NULL; + } + + titan_ge_port->tx_curr_desc_q = 0; + titan_ge_port->tx_used_desc_q = 0; + + titan_ge_port->tx_desc_area = (titan_ge_tx_desc *) tx_desc_base_addr; + titan_ge_port->tx_desc_area_size = + tx_desc_num * sizeof(titan_ge_tx_desc); + + titan_ge_port->tx_dma = tx_dma; + return TITAN_OK; +} + +/* + * Initialize the device as an Ethernet device + */ +static int __init titan_ge_probe(struct device *device) +{ + titan_ge_port_info *titan_ge_eth; + struct net_device *netdev; + int port = to_platform_device(device)->id; + int err; + + netdev = alloc_etherdev(sizeof(titan_ge_port_info)); + if (!netdev) { + err = -ENODEV; + goto out; + } + + netdev->open = titan_ge_open; + netdev->stop = titan_ge_stop; + netdev->hard_start_xmit = titan_ge_start_xmit; + netdev->get_stats = titan_ge_get_stats; + netdev->set_multicast_list = titan_ge_set_multi; + netdev->set_mac_address = titan_ge_set_mac_address; + + /* Tx timeout */ + netdev->tx_timeout = titan_ge_tx_timeout; + netdev->watchdog_timeo = 2 * HZ; + + /* Set these to very high values */ + netdev->poll = titan_ge_poll; + netdev->weight = 64; + + netdev->tx_queue_len = TITAN_GE_TX_QUEUE; + netif_carrier_off(netdev); + netdev->base_addr = 0; + + netdev->change_mtu = titan_ge_change_mtu; + + titan_ge_eth = netdev_priv(netdev); + /* Allocation of memory for the driver structures */ + + titan_ge_eth->port_num = port; + + /* Configure the Tx timeout handler */ + INIT_WORK(&titan_ge_eth->tx_timeout_task, + (void (*)(void *)) titan_ge_tx_timeout_task, netdev); + + spin_lock_init(&titan_ge_eth->lock); + + /* set MAC addresses */ + memcpy(netdev->dev_addr, titan_ge_mac_addr_base, 6); + netdev->dev_addr[5] += port; + + err = register_netdev(netdev); + + if (err) + goto out_free_netdev; + + printk(KERN_NOTICE + "%s: port %d with MAC address %02x:%02x:%02x:%02x:%02x:%02x\n", + netdev->name, port, netdev->dev_addr[0], + netdev->dev_addr[1], netdev->dev_addr[2], + netdev->dev_addr[3], netdev->dev_addr[4], + netdev->dev_addr[5]); + + printk(KERN_NOTICE "Rx NAPI supported, Tx Coalescing ON \n"); + + return 0; + +out_free_netdev: + kfree(netdev); + +out: + return err; +} + +static void __devexit titan_device_remove(struct device *device) +{ +} + +/* + * Reset the Ethernet port + */ +static void titan_ge_port_reset(unsigned int port_num) +{ + unsigned int reg_data; + + /* Stop the Tx port activity */ + reg_data = TITAN_GE_READ(TITAN_GE_TMAC_CONFIG_1 + + (port_num << 12)); + reg_data &= ~(0x0001); + TITAN_GE_WRITE((TITAN_GE_TMAC_CONFIG_1 + + (port_num << 12)), reg_data); + + /* Stop the Rx port activity */ + reg_data = TITAN_GE_READ(TITAN_GE_RMAC_CONFIG_1 + + (port_num << 12)); + reg_data &= ~(0x0001); + TITAN_GE_WRITE((TITAN_GE_RMAC_CONFIG_1 + + (port_num << 12)), reg_data); + + return; +} + +/* + * Return the Tx desc after use by the XDMA + */ +static int titan_ge_return_tx_desc(titan_ge_port_info * titan_ge_eth, int port) +{ + int tx_desc_used; + struct sk_buff *skb; + + tx_desc_used = titan_ge_eth->tx_used_desc_q; + + /* return right away */ + if (tx_desc_used == titan_ge_eth->tx_curr_desc_q) + return TITAN_ERROR; + + /* Now the critical stuff */ + skb = titan_ge_eth->tx_skb[tx_desc_used]; + + dev_kfree_skb_any(skb); + + titan_ge_eth->tx_skb[tx_desc_used] = NULL; + titan_ge_eth->tx_used_desc_q = + (tx_desc_used + 1) % TITAN_GE_TX_QUEUE; + + return 0; +} + +/* + * Coalescing for the Tx path + */ +static unsigned long titan_ge_tx_coal(unsigned long delay, int port) +{ + unsigned long rx_delay; + + rx_delay = TITAN_GE_READ(TITAN_GE_INT_COALESCING); + delay = (delay << 16) | rx_delay; + + TITAN_GE_WRITE(TITAN_GE_INT_COALESCING, delay); + TITAN_GE_WRITE(0x5038, delay); + + return delay; +} + +static struct device_driver titan_soc_driver = { + .name = titan_string, + .bus = &platform_bus_type, + .probe = titan_ge_probe, + .remove = __devexit_p(titan_device_remove), +}; + +static void titan_platform_release (struct device *device) +{ + struct platform_device *pldev; + + /* free device */ + pldev = to_platform_device (device); + kfree (pldev); +} + +/* + * Register the Titan GE with the kernel + */ +static int __init titan_ge_init_module(void) +{ + struct platform_device *pldev; + unsigned int version, device; + int i; + + printk(KERN_NOTICE + "PMC-Sierra TITAN 10/100/1000 Ethernet Driver \n"); + + titan_ge_base = (unsigned long) ioremap(TITAN_GE_BASE, TITAN_GE_SIZE); + if (!titan_ge_base) { + printk("Mapping Titan GE failed\n"); + goto out; + } + + device = TITAN_GE_READ(TITAN_GE_DEVICE_ID); + version = (device & 0x000f0000) >> 16; + device &= 0x0000ffff; + + printk(KERN_NOTICE "Device Id : %x, Version : %x \n", device, version); + +#ifdef TITAN_RX_RING_IN_SRAM + titan_ge_sram = (unsigned long) ioremap(TITAN_SRAM_BASE, + TITAN_SRAM_SIZE); + if (!titan_ge_sram) { + printk("Mapping Titan SRAM failed\n"); + goto out_unmap_ge; + } +#endif + + if (driver_register(&titan_soc_driver)) { + printk(KERN_ERR "Driver registration failed\n"); + goto out_unmap_sram; + } + + for (i = 0; i < 3; i++) { + titan_ge_device[i] = NULL; + + if (!(pldev = kmalloc (sizeof (*pldev), GFP_KERNEL))) + continue; + + memset (pldev, 0, sizeof (*pldev)); + pldev->name = titan_string; + pldev->id = i; + pldev->dev.release = titan_platform_release; + titan_ge_device[i] = pldev; + + if (platform_device_register (pldev)) { + kfree (pldev); + titan_ge_device[i] = NULL; + continue; + } + + if (!pldev->dev.driver) { + /* + * The driver was not bound to this device, there was + * no hardware at this address. Unregister it, as the + * release fuction will take care of freeing the + * allocated structure + */ + titan_ge_device[i] = NULL; + platform_device_unregister (pldev); + } + } + + return 0; + +out_unmap_sram: + iounmap((void *)titan_ge_sram); + +out_unmap_ge: + iounmap((void *)titan_ge_base); + +out: + return -ENOMEM; +} + +/* + * Unregister the Titan GE from the kernel + */ +static void __exit titan_ge_cleanup_module(void) +{ + int i; + + driver_unregister(&titan_soc_driver); + + for (i = 0; i < 3; i++) { + if (titan_ge_device[i]) { + platform_device_unregister (titan_ge_device[i]); + titan_ge_device[i] = NULL; + } + } + + iounmap((void *)titan_ge_sram); + iounmap((void *)titan_ge_base); +} + +MODULE_AUTHOR("Manish Lachwani "); +MODULE_DESCRIPTION("Titan GE Ethernet driver"); +MODULE_LICENSE("GPL"); + +module_init(titan_ge_init_module); +module_exit(titan_ge_cleanup_module); diff -Nurd linux-2.6.24/drivers/net/titan_ge.h mer-smartq-kernel/drivers/net/titan_ge.h --- linux-2.6.24/drivers/net/titan_ge.h 1970-01-01 01:00:00.000000000 +0100 +++ mer-smartq-kernel/drivers/net/titan_ge.h 2009-11-17 12:13:31.000000000 +0100 @@ -0,0 +1,415 @@ +#ifndef _TITAN_GE_H_ +#define _TITAN_GE_H_ + +#include +#include +#include +#include + +/* + * These functions should be later moved to a more generic location since there + * will be others accessing it also + */ + +/* + * This is the way it works: LKB5 Base is at 0x0128. TITAN_BASE is defined in + * include/asm/titan_dep.h. TITAN_GE_BASE is the value in the TITAN_GE_LKB5 + * register. + */ + +#define TITAN_GE_BASE 0xfe000000UL +#define TITAN_GE_SIZE 0x10000UL + +extern unsigned long titan_ge_base; + +#define TITAN_GE_WRITE(offset, data) \ + *(volatile u32 *)(titan_ge_base + (offset)) = (data) + +#define TITAN_GE_READ(offset) *(volatile u32 *)(titan_ge_base + (offset)) + +#ifndef msec_delay +#define msec_delay(x) do { if(in_interrupt()) { \ + /* Don't mdelay in interrupt context! */ \ + BUG(); \ + } else { \ + set_current_state(TASK_UNINTERRUPTIBLE); \ + schedule_timeout((x * HZ)/1000); \ + } } while(0) +#endif + +#define TITAN_GE_PORT_0 + +#define TITAN_SRAM_BASE ((OCD_READ(RM9000x2_OCD_LKB13) & ~1) << 4) +#define TITAN_SRAM_SIZE 0x2000UL + +/* + * We may need these constants + */ +#define TITAN_BIT0 0x00000001 +#define TITAN_BIT1 0x00000002 +#define TITAN_BIT2 0x00000004 +#define TITAN_BIT3 0x00000008 +#define TITAN_BIT4 0x00000010 +#define TITAN_BIT5 0x00000020 +#define TITAN_BIT6 0x00000040 +#define TITAN_BIT7 0x00000080 +#define TITAN_BIT8 0x00000100 +#define TITAN_BIT9 0x00000200 +#define TITAN_BIT10 0x00000400 +#define TITAN_BIT11 0x00000800 +#define TITAN_BIT12 0x00001000 +#define TITAN_BIT13 0x00002000 +#define TITAN_BIT14 0x00004000 +#define TITAN_BIT15 0x00008000 +#define TITAN_BIT16 0x00010000 +#define TITAN_BIT17 0x00020000 +#define TITAN_BIT18 0x00040000 +#define TITAN_BIT19 0x00080000 +#define TITAN_BIT20 0x00100000 +#define TITAN_BIT21 0x00200000 +#define TITAN_BIT22 0x00400000 +#define TITAN_BIT23 0x00800000 +#define TITAN_BIT24 0x01000000 +#define TITAN_BIT25 0x02000000 +#define TITAN_BIT26 0x04000000 +#define TITAN_BIT27 0x08000000 +#define TITAN_BIT28 0x10000000 +#define TITAN_BIT29 0x20000000 +#define TITAN_BIT30 0x40000000 +#define TITAN_BIT31 0x80000000 + +/* Flow Control */ +#define TITAN_GE_FC_NONE 0x0 +#define TITAN_GE_FC_FULL 0x1 +#define TITAN_GE_FC_TX_PAUSE 0x2 +#define TITAN_GE_FC_RX_PAUSE 0x3 + +/* Duplex Settings */ +#define TITAN_GE_FULL_DUPLEX 0x1 +#define TITAN_GE_HALF_DUPLEX 0x2 + +/* Speed settings */ +#define TITAN_GE_SPEED_1000 0x1 +#define TITAN_GE_SPEED_100 0x2 +#define TITAN_GE_SPEED_10 0x3 + +/* Debugging info only */ +#undef TITAN_DEBUG + +/* Keep the rings in the Titan's SSRAM */ +#define TITAN_RX_RING_IN_SRAM + +#ifdef CONFIG_64BIT +#define TITAN_GE_IE_MASK 0xfffffffffb001b64 +#define TITAN_GE_IE_STATUS 0xfffffffffb001b60 +#else +#define TITAN_GE_IE_MASK 0xfb001b64 +#define TITAN_GE_IE_STATUS 0xfb001b60 +#endif + +/* Support for Jumbo Frames */ +#undef TITAN_GE_JUMBO_FRAMES + +/* Rx buffer size */ +#ifdef TITAN_GE_JUMBO_FRAMES +#define TITAN_GE_JUMBO_BUFSIZE 9080 +#else +#define TITAN_GE_STD_BUFSIZE 1580 +#endif + +/* + * Tx and Rx Interrupt Coalescing parameter. These values are + * for 1 Ghz processor. Rx coalescing can be taken care of + * by NAPI. NAPI is adaptive and hence useful. Tx coalescing + * is not adaptive. Hence, these values need to be adjusted + * based on load, CPU speed etc. + */ +#define TITAN_GE_RX_COAL 150 +#define TITAN_GE_TX_COAL 300 + +#if defined(__BIG_ENDIAN) + +/* Define the Rx descriptor */ +typedef struct eth_rx_desc { + u32 reserved; /* Unused */ + u32 buffer_addr; /* CPU buffer address */ + u32 cmd_sts; /* Command and Status */ + u32 buffer; /* XDMA buffer address */ +} titan_ge_rx_desc; + +/* Define the Tx descriptor */ +typedef struct eth_tx_desc { + u16 cmd_sts; /* Command, Status and Buffer count */ + u16 buffer_len; /* Length of the buffer */ + u32 buffer_addr; /* Physical address of the buffer */ +} titan_ge_tx_desc; + +#elif defined(__LITTLE_ENDIAN) + +/* Define the Rx descriptor */ +typedef struct eth_rx_desc { + u32 buffer_addr; /* CPU buffer address */ + u32 reserved; /* Unused */ + u32 buffer; /* XDMA buffer address */ + u32 cmd_sts; /* Command and Status */ +} titan_ge_rx_desc; + +/* Define the Tx descriptor */ +typedef struct eth_tx_desc { + u32 buffer_addr; /* Physical address of the buffer */ + u16 buffer_len; /* Length of the buffer */ + u16 cmd_sts; /* Command, Status and Buffer count */ +} titan_ge_tx_desc; +#endif + +/* Default Tx Queue Size */ +#define TITAN_GE_TX_QUEUE 128 +#define TITAN_TX_RING_BYTES (TITAN_GE_TX_QUEUE * sizeof(struct eth_tx_desc)) + +/* Default Rx Queue Size */ +#define TITAN_GE_RX_QUEUE 64 +#define TITAN_RX_RING_BYTES (TITAN_GE_RX_QUEUE * sizeof(struct eth_rx_desc)) + +/* Packet Structure */ +typedef struct _pkt_info { + unsigned int len; + unsigned int cmd_sts; + unsigned int buffer; + struct sk_buff *skb; + unsigned int checksum; +} titan_ge_packet; + + +#define PHYS_CNT 3 + +/* Titan Port specific data structure */ +typedef struct _eth_port_ctrl { + unsigned int port_num; + u8 port_mac_addr[6]; + + /* Rx descriptor pointers */ + int rx_curr_desc_q, rx_used_desc_q; + + /* Tx descriptor pointers */ + int tx_curr_desc_q, tx_used_desc_q; + + /* Rx descriptor area */ + volatile titan_ge_rx_desc *rx_desc_area; + unsigned int rx_desc_area_size; + struct sk_buff* rx_skb[TITAN_GE_RX_QUEUE]; + + /* Tx Descriptor area */ + volatile titan_ge_tx_desc *tx_desc_area; + unsigned int tx_desc_area_size; + struct sk_buff* tx_skb[TITAN_GE_TX_QUEUE]; + + /* Timeout task */ + struct work_struct tx_timeout_task; + + /* DMA structures and handles */ + dma_addr_t tx_dma; + dma_addr_t rx_dma; + dma_addr_t tx_dma_array[TITAN_GE_TX_QUEUE]; + + /* Device lock */ + spinlock_t lock; + + unsigned int tx_ring_skbs; + unsigned int rx_ring_size; + unsigned int tx_ring_size; + unsigned int rx_ring_skbs; + + struct net_device_stats stats; + + /* Tx and Rx coalescing */ + unsigned long rx_int_coal; + unsigned long tx_int_coal; + + /* Threshold for replenishing the Rx and Tx rings */ + unsigned int tx_threshold; + unsigned int rx_threshold; + + /* NAPI work limit */ + unsigned int rx_work_limit; +} titan_ge_port_info; + +/* Titan specific constants */ +#define TITAN_ETH_PORT_IRQ 3 + +/* Max Rx buffer */ +#define TITAN_GE_MAX_RX_BUFFER 65536 + +/* Tx and Rx Error */ +#define TITAN_GE_ERROR + +/* Rx Descriptor Command and Status */ + +#define TITAN_GE_RX_CRC_ERROR TITAN_BIT27 /* crc error */ +#define TITAN_GE_RX_OVERFLOW_ERROR TITAN_BIT15 /* overflow */ +#define TITAN_GE_RX_BUFFER_OWNED TITAN_BIT21 /* buffer ownership */ +#define TITAN_GE_RX_STP TITAN_BIT31 /* start of packet */ +#define TITAN_GE_RX_BAM TITAN_BIT30 /* broadcast address match */ +#define TITAN_GE_RX_PAM TITAN_BIT28 /* physical address match */ +#define TITAN_GE_RX_LAFM TITAN_BIT29 /* logical address filter match */ +#define TITAN_GE_RX_VLAN TITAN_BIT26 /* virtual lans */ +#define TITAN_GE_RX_PERR TITAN_BIT19 /* packet error */ +#define TITAN_GE_RX_TRUNC TITAN_BIT20 /* packet size greater than 32 buffers */ + +/* Tx Descriptor Command */ +#define TITAN_GE_TX_BUFFER_OWNED TITAN_BIT5 /* buffer ownership */ +#define TITAN_GE_TX_ENABLE_INTERRUPT TITAN_BIT15 /* Interrupt Enable */ + +/* Return Status */ +#define TITAN_OK 0x1 /* Good Status */ +#define TITAN_ERROR 0x2 /* Error Status */ + +/* MIB specific register offset */ +#define TITAN_GE_MSTATX_STATS_BASE_LOW 0x0800 /* MSTATX COUNTL[15:0] */ +#define TITAN_GE_MSTATX_STATS_BASE_MID 0x0804 /* MSTATX COUNTM[15:0] */ +#define TITAN_GE_MSTATX_STATS_BASE_HI 0x0808 /* MSTATX COUNTH[7:0] */ +#define TITAN_GE_MSTATX_CONTROL 0x0828 /* MSTATX Control */ +#define TITAN_GE_MSTATX_VARIABLE_SELECT 0x082C /* MSTATX Variable Select */ + +/* MIB counter offsets, add to the TITAN_GE_MSTATX_STATS_BASE_XXX */ +#define TITAN_GE_MSTATX_RXFRAMESOK 0x0040 +#define TITAN_GE_MSTATX_RXOCTETSOK 0x0050 +#define TITAN_GE_MSTATX_RXFRAMES 0x0060 +#define TITAN_GE_MSTATX_RXOCTETS 0x0070 +#define TITAN_GE_MSTATX_RXUNICASTFRAMESOK 0x0080 +#define TITAN_GE_MSTATX_RXBROADCASTFRAMESOK 0x0090 +#define TITAN_GE_MSTATX_RXMULTICASTFRAMESOK 0x00A0 +#define TITAN_GE_MSTATX_RXTAGGEDFRAMESOK 0x00B0 +#define TITAN_GE_MSTATX_RXMACPAUSECONTROLFRAMESOK 0x00C0 +#define TITAN_GE_MSTATX_RXMACCONTROLFRAMESOK 0x00D0 +#define TITAN_GE_MSTATX_RXFCSERROR 0x00E0 +#define TITAN_GE_MSTATX_RXALIGNMENTERROR 0x00F0 +#define TITAN_GE_MSTATX_RXSYMBOLERROR 0x0100 +#define TITAN_GE_MSTATX_RXLAYER1ERROR 0x0110 +#define TITAN_GE_MSTATX_RXINRANGELENGTHERROR 0x0120 +#define TITAN_GE_MSTATX_RXLONGLENGTHERROR 0x0130 +#define TITAN_GE_MSTATX_RXLONGLENGTHCRCERROR 0x0140 +#define TITAN_GE_MSTATX_RXSHORTLENGTHERROR 0x0150 +#define TITAN_GE_MSTATX_RXSHORTLLENGTHCRCERROR 0x0160 +#define TITAN_GE_MSTATX_RXFRAMES64OCTETS 0x0170 +#define TITAN_GE_MSTATX_RXFRAMES65TO127OCTETS 0x0180 +#define TITAN_GE_MSTATX_RXFRAMES128TO255OCTETS 0x0190 +#define TITAN_GE_MSTATX_RXFRAMES256TO511OCTETS 0x01A0 +#define TITAN_GE_MSTATX_RXFRAMES512TO1023OCTETS 0x01B0 +#define TITAN_GE_MSTATX_RXFRAMES1024TO1518OCTETS 0x01C0 +#define TITAN_GE_MSTATX_RXFRAMES1519TOMAXSIZE 0x01D0 +#define TITAN_GE_MSTATX_RXSTATIONADDRESSFILTERED 0x01E0 +#define TITAN_GE_MSTATX_RXVARIABLE 0x01F0 +#define TITAN_GE_MSTATX_GENERICADDRESSFILTERED 0x0200 +#define TITAN_GE_MSTATX_UNICASTFILTERED 0x0210 +#define TITAN_GE_MSTATX_MULTICASTFILTERED 0x0220 +#define TITAN_GE_MSTATX_BROADCASTFILTERED 0x0230 +#define TITAN_GE_MSTATX_HASHFILTERED 0x0240 +#define TITAN_GE_MSTATX_TXFRAMESOK 0x0250 +#define TITAN_GE_MSTATX_TXOCTETSOK 0x0260 +#define TITAN_GE_MSTATX_TXOCTETS 0x0270 +#define TITAN_GE_MSTATX_TXTAGGEDFRAMESOK 0x0280 +#define TITAN_GE_MSTATX_TXMACPAUSECONTROLFRAMESOK 0x0290 +#define TITAN_GE_MSTATX_TXFCSERROR 0x02A0 +#define TITAN_GE_MSTATX_TXSHORTLENGTHERROR 0x02B0 +#define TITAN_GE_MSTATX_TXLONGLENGTHERROR 0x02C0 +#define TITAN_GE_MSTATX_TXSYSTEMERROR 0x02D0 +#define TITAN_GE_MSTATX_TXMACERROR 0x02E0 +#define TITAN_GE_MSTATX_TXCARRIERSENSEERROR 0x02F0 +#define TITAN_GE_MSTATX_TXSQETESTERROR 0x0300 +#define TITAN_GE_MSTATX_TXUNICASTFRAMESOK 0x0310 +#define TITAN_GE_MSTATX_TXBROADCASTFRAMESOK 0x0320 +#define TITAN_GE_MSTATX_TXMULTICASTFRAMESOK 0x0330 +#define TITAN_GE_MSTATX_TXUNICASTFRAMESATTEMPTED 0x0340 +#define TITAN_GE_MSTATX_TXBROADCASTFRAMESATTEMPTED 0x0350 +#define TITAN_GE_MSTATX_TXMULTICASTFRAMESATTEMPTED 0x0360 +#define TITAN_GE_MSTATX_TXFRAMES64OCTETS 0x0370 +#define TITAN_GE_MSTATX_TXFRAMES65TO127OCTETS 0x0380 +#define TITAN_GE_MSTATX_TXFRAMES128TO255OCTETS 0x0390 +#define TITAN_GE_MSTATX_TXFRAMES256TO511OCTETS 0x03A0 +#define TITAN_GE_MSTATX_TXFRAMES512TO1023OCTETS 0x03B0 +#define TITAN_GE_MSTATX_TXFRAMES1024TO1518OCTETS 0x03C0 +#define TITAN_GE_MSTATX_TXFRAMES1519TOMAXSIZE 0x03D0 +#define TITAN_GE_MSTATX_TXVARIABLE 0x03E0 +#define TITAN_GE_MSTATX_RXSYSTEMERROR 0x03F0 +#define TITAN_GE_MSTATX_SINGLECOLLISION 0x0400 +#define TITAN_GE_MSTATX_MULTIPLECOLLISION 0x0410 +#define TITAN_GE_MSTATX_DEFERREDXMISSIONS 0x0420 +#define TITAN_GE_MSTATX_LATECOLLISIONS 0x0430 +#define TITAN_GE_MSTATX_ABORTEDDUETOXSCOLLS 0x0440 + +/* Interrupt specific defines */ +#define TITAN_GE_DEVICE_ID 0x0000 /* Device ID */ +#define TITAN_GE_RESET 0x0004 /* Reset reg */ +#define TITAN_GE_TSB_CTRL_0 0x000C /* TSB Control reg 0 */ +#define TITAN_GE_TSB_CTRL_1 0x0010 /* TSB Control reg 1 */ +#define TITAN_GE_INTR_GRP0_STATUS 0x0040 /* General Interrupt Group 0 Status */ +#define TITAN_GE_INTR_XDMA_CORE_A 0x0048 /* XDMA Channel Interrupt Status, Core A*/ +#define TITAN_GE_INTR_XDMA_CORE_B 0x004C /* XDMA Channel Interrupt Status, Core B*/ +#define TITAN_GE_INTR_XDMA_IE 0x0058 /* XDMA Channel Interrupt Enable */ +#define TITAN_GE_SDQPF_ECC_INTR 0x480C /* SDQPF ECC Interrupt Status */ +#define TITAN_GE_SDQPF_RXFIFO_CTL 0x4828 /* SDQPF RxFifo Control and Interrupt Enb*/ +#define TITAN_GE_SDQPF_RXFIFO_INTR 0x482C /* SDQPF RxFifo Interrupt Status */ +#define TITAN_GE_SDQPF_TXFIFO_CTL 0x4928 /* SDQPF TxFifo Control and Interrupt Enb*/ +#define TITAN_GE_SDQPF_TXFIFO_INTR 0x492C /* SDQPF TxFifo Interrupt Status */ +#define TITAN_GE_SDQPF_RXFIFO_0 0x4840 /* SDQPF RxFIFO Enable */ +#define TITAN_GE_SDQPF_TXFIFO_0 0x4940 /* SDQPF TxFIFO Enable */ +#define TITAN_GE_XDMA_CONFIG 0x5000 /* XDMA Global Configuration */ +#define TITAN_GE_XDMA_INTR_SUMMARY 0x5010 /* XDMA Interrupt Summary */ +#define TITAN_GE_XDMA_BUFADDRPRE 0x5018 /* XDMA Buffer Address Prefix */ +#define TITAN_GE_XDMA_DESCADDRPRE 0x501C /* XDMA Descriptor Address Prefix */ +#define TITAN_GE_XDMA_PORTWEIGHT 0x502C /* XDMA Port Weight Configuration */ + +/* Rx MAC defines */ +#define TITAN_GE_RMAC_CONFIG_1 0x1200 /* RMAC Configuration 1 */ +#define TITAN_GE_RMAC_CONFIG_2 0x1204 /* RMAC Configuration 2 */ +#define TITAN_GE_RMAC_MAX_FRAME_LEN 0x1208 /* RMAC Max Frame Length */ +#define TITAN_GE_RMAC_STATION_HI 0x120C /* Rx Station Address High */ +#define TITAN_GE_RMAC_STATION_MID 0x1210 /* Rx Station Address Middle */ +#define TITAN_GE_RMAC_STATION_LOW 0x1214 /* Rx Station Address Low */ +#define TITAN_GE_RMAC_LINK_CONFIG 0x1218 /* RMAC Link Configuration */ + +/* Tx MAC defines */ +#define TITAN_GE_TMAC_CONFIG_1 0x1240 /* TMAC Configuration 1 */ +#define TITAN_GE_TMAC_CONFIG_2 0x1244 /* TMAC Configuration 2 */ +#define TITAN_GE_TMAC_IPG 0x1248 /* TMAC Inter-Packet Gap */ +#define TITAN_GE_TMAC_STATION_HI 0x124C /* Tx Station Address High */ +#define TITAN_GE_TMAC_STATION_MID 0x1250 /* Tx Station Address Middle */ +#define TITAN_GE_TMAC_STATION_LOW 0x1254 /* Tx Station Address Low */ +#define TITAN_GE_TMAC_MAX_FRAME_LEN 0x1258 /* TMAC Max Frame Length */ +#define TITAN_GE_TMAC_MIN_FRAME_LEN 0x125C /* TMAC Min Frame Length */ +#define TITAN_GE_TMAC_PAUSE_FRAME_TIME 0x1260 /* TMAC Pause Frame Time */ +#define TITAN_GE_TMAC_PAUSE_FRAME_INTERVAL 0x1264 /* TMAC Pause Frame Interval */ + +/* GMII register */ +#define TITAN_GE_GMII_INTERRUPT_STATUS 0x1348 /* GMII Interrupt Status */ +#define TITAN_GE_GMII_CONFIG_GENERAL 0x134C /* GMII Configuration General */ +#define TITAN_GE_GMII_CONFIG_MODE 0x1350 /* GMII Configuration Mode */ + +/* Tx and Rx XDMA defines */ +#define TITAN_GE_INT_COALESCING 0x5030 /* Interrupt Coalescing */ +#define TITAN_GE_CHANNEL0_CONFIG 0x5040 /* Channel 0 XDMA config */ +#define TITAN_GE_CHANNEL0_INTERRUPT 0x504c /* Channel 0 Interrupt Status */ +#define TITAN_GE_GDI_INTERRUPT_ENABLE 0x5050 /* IE for the GDI Errors */ +#define TITAN_GE_CHANNEL0_PACKET 0x5060 /* Channel 0 Packet count */ +#define TITAN_GE_CHANNEL0_BYTE 0x5064 /* Channel 0 Byte count */ +#define TITAN_GE_CHANNEL0_TX_DESC 0x5054 /* Channel 0 Tx first desc */ +#define TITAN_GE_CHANNEL0_RX_DESC 0x5058 /* Channel 0 Rx first desc */ + +/* AFX (Address Filter Exact) register offsets for Slice 0 */ +#define TITAN_GE_AFX_EXACT_MATCH_LOW 0x1100 /* AFX Exact Match Address Low*/ +#define TITAN_GE_AFX_EXACT_MATCH_MID 0x1104 /* AFX Exact Match Address Mid*/ +#define TITAN_GE_AFX_EXACT_MATCH_HIGH 0x1108 /* AFX Exact Match Address Hi */ +#define TITAN_GE_AFX_EXACT_MATCH_VID 0x110C /* AFX Exact Match VID */ +#define TITAN_GE_AFX_MULTICAST_HASH_LOW 0x1110 /* AFX Multicast HASH Low */ +#define TITAN_GE_AFX_MULTICAST_HASH_MIDLOW 0x1114 /* AFX Multicast HASH MidLow */ +#define TITAN_GE_AFX_MULTICAST_HASH_MIDHI 0x1118 /* AFX Multicast HASH MidHi */ +#define TITAN_GE_AFX_MULTICAST_HASH_HI 0x111C /* AFX Multicast HASH Hi */ +#define TITAN_GE_AFX_ADDRS_FILTER_CTRL_0 0x1120 /* AFX Address Filter Ctrl 0 */ +#define TITAN_GE_AFX_ADDRS_FILTER_CTRL_1 0x1124 /* AFX Address Filter Ctrl 1 */ +#define TITAN_GE_AFX_ADDRS_FILTER_CTRL_2 0x1128 /* AFX Address Filter Ctrl 2 */ + +/* Traffic Groomer block */ +#define TITAN_GE_TRTG_CONFIG 0x1000 /* TRTG Config */ + +#endif /* _TITAN_GE_H_ */ + diff -Nurd linux-2.6.24/drivers/net/titan_mdio.c mer-smartq-kernel/drivers/net/titan_mdio.c --- linux-2.6.24/drivers/net/titan_mdio.c 1970-01-01 01:00:00.000000000 +0100 +++ mer-smartq-kernel/drivers/net/titan_mdio.c 2009-11-17 12:13:31.000000000 +0100 @@ -0,0 +1,217 @@ +/* + * drivers/net/titan_mdio.c - Driver for Titan ethernet ports + * + * Copyright (C) 2003 PMC-Sierra Inc. + * Author : Manish Lachwani (lachwani@pmc-sierra.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Management Data IO (MDIO) driver for the Titan GMII. Interacts with the Marvel PHY + * on the Titan. No support for the TBI as yet. + * + */ + +#include "titan_mdio.h" + +#define MDIO_DEBUG + +/* + * Local constants + */ +#define MAX_CLKA 1023 +#define MAX_PHY_DEV 31 +#define MAX_PHY_REG 31 +#define WRITEADDRS_OPCODE 0x0 +#define READ_OPCODE 0x2 +#define WRITE_OPCODE 0x1 +#define MAX_MDIO_POLL 100 + +/* + * Titan MDIO and SCMB registers + */ +#define TITAN_GE_SCMB_CONTROL 0x01c0 /* SCMB Control */ +#define TITAN_GE_SCMB_CLKA 0x01c4 /* SCMB Clock A */ +#define TITAN_GE_MDIO_COMMAND 0x01d0 /* MDIO Command */ +#define TITAN_GE_MDIO_DEVICE_PORT_ADDRESS 0x01d4 /* MDIO Device and Port addrs */ +#define TITAN_GE_MDIO_DATA 0x01d8 /* MDIO Data */ +#define TITAN_GE_MDIO_INTERRUPTS 0x01dC /* MDIO Interrupts */ + +/* + * Function to poll the MDIO + */ +static int titan_ge_mdio_poll(void) +{ + int i, val; + + for (i = 0; i < MAX_MDIO_POLL; i++) { + val = TITAN_GE_MDIO_READ(TITAN_GE_MDIO_COMMAND); + + if (!(val & 0x8000)) + return TITAN_GE_MDIO_GOOD; + } + + return TITAN_GE_MDIO_ERROR; +} + + +/* + * Initialize and configure the MDIO + */ +int titan_ge_mdio_setup(titan_ge_mdio_config *titan_mdio) +{ + unsigned long val; + + /* Reset the SCMB and program into MDIO mode*/ + TITAN_GE_MDIO_WRITE(TITAN_GE_SCMB_CONTROL, 0x9000); + TITAN_GE_MDIO_WRITE(TITAN_GE_SCMB_CONTROL, 0x1000); + + /* CLK A */ + val = TITAN_GE_MDIO_READ(TITAN_GE_SCMB_CLKA); + val = ( (val & ~(0x03ff)) | (titan_mdio->clka & 0x03ff)); + TITAN_GE_MDIO_WRITE(TITAN_GE_SCMB_CLKA, val); + + /* Preamble Suppresion */ + val = TITAN_GE_MDIO_READ(TITAN_GE_MDIO_COMMAND); + val = ( (val & ~(0x0001)) | (titan_mdio->mdio_spre & 0x0001)); + TITAN_GE_MDIO_WRITE(TITAN_GE_MDIO_COMMAND, val); + + /* MDIO mode */ + val = TITAN_GE_MDIO_READ(TITAN_GE_MDIO_DEVICE_PORT_ADDRESS); + val = ( (val & ~(0x4000)) | (titan_mdio->mdio_mode & 0x4000)); + TITAN_GE_MDIO_WRITE(TITAN_GE_MDIO_DEVICE_PORT_ADDRESS, val); + + return TITAN_GE_MDIO_GOOD; +} + +/* + * Set the PHY address in indirect mode + */ +int titan_ge_mdio_inaddrs(int dev_addr, int reg_addr) +{ + volatile unsigned long val; + + /* Setup the PHY device */ + val = TITAN_GE_MDIO_READ(TITAN_GE_MDIO_DEVICE_PORT_ADDRESS); + val = ( (val & ~(0x1f00)) | ( (dev_addr << 8) & 0x1f00)); + val = ( (val & ~(0x001f)) | ( reg_addr & 0x001f)); + TITAN_GE_MDIO_WRITE(TITAN_GE_MDIO_DEVICE_PORT_ADDRESS, val); + + /* Write the new address */ + val = TITAN_GE_MDIO_READ(TITAN_GE_MDIO_COMMAND); + val = ( (val & ~(0x0300)) | ( (WRITEADDRS_OPCODE << 8) & 0x0300)); + TITAN_GE_MDIO_WRITE(TITAN_GE_MDIO_COMMAND, val); + + return TITAN_GE_MDIO_GOOD; +} + +/* + * Read the MDIO register. This is what the individual parametes mean: + * + * dev_addr : PHY ID + * reg_addr : register offset + * + * See the spec for the Titan MAC. We operate in the Direct Mode. + */ + +#define MAX_RETRIES 2 + +int titan_ge_mdio_read(int dev_addr, int reg_addr, unsigned int *pdata) +{ + volatile unsigned long val; + int retries = 0; + + /* Setup the PHY device */ + +again: + val = TITAN_GE_MDIO_READ(TITAN_GE_MDIO_DEVICE_PORT_ADDRESS); + val = ( (val & ~(0x1f00)) | ( (dev_addr << 8) & 0x1f00)); + val = ( (val & ~(0x001f)) | ( reg_addr & 0x001f)); + val |= 0x4000; + TITAN_GE_MDIO_WRITE(TITAN_GE_MDIO_DEVICE_PORT_ADDRESS, val); + + udelay(30); + + /* Issue the read command */ + val = TITAN_GE_MDIO_READ(TITAN_GE_MDIO_COMMAND); + val = ( (val & ~(0x0300)) | ( (READ_OPCODE << 8) & 0x0300)); + TITAN_GE_MDIO_WRITE(TITAN_GE_MDIO_COMMAND, val); + + udelay(30); + + if (titan_ge_mdio_poll() != TITAN_GE_MDIO_GOOD) + return TITAN_GE_MDIO_ERROR; + + *pdata = (unsigned int)TITAN_GE_MDIO_READ(TITAN_GE_MDIO_DATA); + val = TITAN_GE_MDIO_READ(TITAN_GE_MDIO_INTERRUPTS); + + udelay(30); + + if (val & 0x2) { + if (retries == MAX_RETRIES) + return TITAN_GE_MDIO_ERROR; + else { + retries++; + goto again; + } + } + + return TITAN_GE_MDIO_GOOD; +} + +/* + * Write to the MDIO register + * + * dev_addr : PHY ID + * reg_addr : register that needs to be written to + * + */ +int titan_ge_mdio_write(int dev_addr, int reg_addr, unsigned int data) +{ + volatile unsigned long val; + + if (titan_ge_mdio_poll() != TITAN_GE_MDIO_GOOD) + return TITAN_GE_MDIO_ERROR; + + /* Setup the PHY device */ + val = TITAN_GE_MDIO_READ(TITAN_GE_MDIO_DEVICE_PORT_ADDRESS); + val = ( (val & ~(0x1f00)) | ( (dev_addr << 8) & 0x1f00)); + val = ( (val & ~(0x001f)) | ( reg_addr & 0x001f)); + val |= 0x4000; + TITAN_GE_MDIO_WRITE(TITAN_GE_MDIO_DEVICE_PORT_ADDRESS, val); + + udelay(30); + + /* Setup the data to write */ + TITAN_GE_MDIO_WRITE(TITAN_GE_MDIO_DATA, data); + + udelay(30); + + /* Issue the write command */ + val = TITAN_GE_MDIO_READ(TITAN_GE_MDIO_COMMAND); + val = ( (val & ~(0x0300)) | ( (WRITE_OPCODE << 8) & 0x0300)); + TITAN_GE_MDIO_WRITE(TITAN_GE_MDIO_COMMAND, val); + + udelay(30); + + if (titan_ge_mdio_poll() != TITAN_GE_MDIO_GOOD) + return TITAN_GE_MDIO_ERROR; + + val = TITAN_GE_MDIO_READ(TITAN_GE_MDIO_INTERRUPTS); + if (val & 0x2) + return TITAN_GE_MDIO_ERROR; + + return TITAN_GE_MDIO_GOOD; +} + diff -Nurd linux-2.6.24/drivers/net/titan_mdio.h mer-smartq-kernel/drivers/net/titan_mdio.h --- linux-2.6.24/drivers/net/titan_mdio.h 1970-01-01 01:00:00.000000000 +0100 +++ mer-smartq-kernel/drivers/net/titan_mdio.h 2009-11-17 12:13:31.000000000 +0100 @@ -0,0 +1,56 @@ +/* + * MDIO used to interact with the PHY when using GMII/MII + */ +#ifndef _TITAN_MDIO_H +#define _TITAN_MDIO_H + +#include +#include +#include +#include "titan_ge.h" + + +#define TITAN_GE_MDIO_ERROR (-9000) +#define TITAN_GE_MDIO_GOOD 0 + +#define TITAN_GE_MDIO_BASE titan_ge_base + +#define TITAN_GE_MDIO_READ(offset) \ + *(volatile u32 *)(titan_ge_base + (offset)) + +#define TITAN_GE_MDIO_WRITE(offset, data) \ + *(volatile u32 *)(titan_ge_base + (offset)) = (data) + + +/* GMII specific registers */ +#define TITAN_GE_MARVEL_PHY_ID 0x00 +#define TITAN_PHY_AUTONEG_ADV 0x04 +#define TITAN_PHY_LP_ABILITY 0x05 +#define TITAN_GE_MDIO_MII_CTRL 0x09 +#define TITAN_GE_MDIO_MII_EXTENDED 0x0f +#define TITAN_GE_MDIO_PHY_CTRL 0x10 +#define TITAN_GE_MDIO_PHY_STATUS 0x11 +#define TITAN_GE_MDIO_PHY_IE 0x12 +#define TITAN_GE_MDIO_PHY_IS 0x13 +#define TITAN_GE_MDIO_PHY_LED 0x18 +#define TITAN_GE_MDIO_PHY_LED_OVER 0x19 +#define PHY_ANEG_TIME_WAIT 45 /* 45 seconds wait time */ + +/* + * MDIO Config Structure + */ +typedef struct { + unsigned int clka; + int mdio_spre; + int mdio_mode; +} titan_ge_mdio_config; + +/* + * Function Prototypes + */ +int titan_ge_mdio_setup(titan_ge_mdio_config *); +int titan_ge_mdio_inaddrs(int, int); +int titan_ge_mdio_read(int, int, unsigned int *); +int titan_ge_mdio_write(int, int, unsigned int); + +#endif /* _TITAN_MDIO_H */ diff -Nurd linux-2.6.24/drivers/scsi/NCR53C9x.h mer-smartq-kernel/drivers/scsi/NCR53C9x.h --- linux-2.6.24/drivers/scsi/NCR53C9x.h 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/drivers/scsi/NCR53C9x.h 2009-11-17 12:13:32.000000000 +0100 @@ -144,12 +144,7 @@ #ifndef MULTIPLE_PAD_SIZES -#ifdef CONFIG_CPU_HAS_WB -#include -#define esp_write(__reg, __val) do{(__reg) = (__val); wbflush();} while(0) -#else -#define esp_write(__reg, __val) ((__reg) = (__val)) -#endif +#define esp_write(__reg, __val) do{(__reg) = (__val); iob();} while(0) #define esp_read(__reg) (__reg) struct ESP_regs { diff -Nurd linux-2.6.24/include/asm-mips/asmmacro.h mer-smartq-kernel/include/asm-mips/asmmacro.h --- linux-2.6.24/include/asm-mips/asmmacro.h 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/include/asm-mips/asmmacro.h 2009-11-17 12:13:38.000000000 +0100 @@ -35,6 +35,16 @@ mtc0 \reg, CP0_TCSTATUS _ehb .endm +#elif defined(CONFIG_CPU_MIPSR2) + .macro local_irq_enable reg=t0 + ei + irq_enable_hazard + .endm + + .macro local_irq_disable reg=t0 + di + irq_disable_hazard + .endm #else .macro local_irq_enable reg=t0 mfc0 \reg, CP0_STATUS diff -Nurd linux-2.6.24/include/asm-mips/atomic.h mer-smartq-kernel/include/asm-mips/atomic.h --- linux-2.6.24/include/asm-mips/atomic.h 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/include/asm-mips/atomic.h 2009-11-17 12:13:38.000000000 +0100 @@ -283,10 +283,10 @@ " beqz %0, 2f \n" " subu %0, %1, %3 \n" " .set reorder \n" - "1: \n" " .subsection 2 \n" "2: b 1b \n" " .previous \n" + "1: \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) @@ -664,10 +664,10 @@ " beqz %0, 2f \n" " dsubu %0, %1, %3 \n" " .set reorder \n" - "1: \n" " .subsection 2 \n" "2: b 1b \n" " .previous \n" + "1: \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) diff -Nurd linux-2.6.24/include/asm-mips/byteorder.h mer-smartq-kernel/include/asm-mips/byteorder.h --- linux-2.6.24/include/asm-mips/byteorder.h 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/include/asm-mips/byteorder.h 2009-11-17 12:13:38.000000000 +0100 @@ -43,9 +43,8 @@ static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x) { __asm__( - " dsbh %0, %1 \n" - " dshd %0, %0 \n" - " drotr %0, %0, 32 \n" + " dsbh %0, %1\n" + " dshd %0, %0" : "=r" (x) : "r" (x)); diff -Nurd linux-2.6.24/include/asm-mips/cacheflush.h mer-smartq-kernel/include/asm-mips/cacheflush.h --- linux-2.6.24/include/asm-mips/cacheflush.h 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/include/asm-mips/cacheflush.h 2009-11-17 12:13:38.000000000 +0100 @@ -63,8 +63,22 @@ } extern void (*flush_icache_range)(unsigned long start, unsigned long end); -#define flush_cache_vmap(start, end) flush_cache_all() -#define flush_cache_vunmap(start, end) flush_cache_all() + +extern void (*__flush_cache_vmap)(void); + +static inline void flush_cache_vmap(unsigned long start, unsigned long end) +{ + if (cpu_has_dc_aliases) + __flush_cache_vmap(); +} + +extern void (*__flush_cache_vunmap)(void); + +static inline void flush_cache_vunmap(unsigned long start, unsigned long end) +{ + if (cpu_has_dc_aliases) + __flush_cache_vunmap(); +} extern void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, @@ -93,7 +107,7 @@ clear_bit(PG_dcache_dirty, &(page)->flags) /* Run kernel code uncached, useful for cache probing functions. */ -unsigned long __init run_uncached(void *func); +unsigned long run_uncached(void *func); extern void *kmap_coherent(struct page *page, unsigned long addr); extern void kunmap_coherent(void); diff -Nurd linux-2.6.24/include/asm-mips/cevt-r4k.h mer-smartq-kernel/include/asm-mips/cevt-r4k.h --- linux-2.6.24/include/asm-mips/cevt-r4k.h 1970-01-01 01:00:00.000000000 +0100 +++ mer-smartq-kernel/include/asm-mips/cevt-r4k.h 2009-11-17 12:13:38.000000000 +0100 @@ -0,0 +1,46 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2008 Kevin D. Kissell + */ + +/* + * Definitions used for common event timer implementation + * for MIPS 4K-type processors and their MIPS MT variants. + * Avoids unsightly extern declarations in C files. + */ +#ifndef __ASM_CEVT_R4K_H +#define __ASM_CEVT_R4K_H + +DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); + +void mips_event_handler(struct clock_event_device *dev); +int c0_compare_int_usable(void); +void mips_set_clock_mode(enum clock_event_mode, struct clock_event_device *); +irqreturn_t c0_compare_interrupt(int, void *); + +extern struct irqaction c0_compare_irqaction; +extern int cp0_timer_irq_installed; + +/* + * Possibly handle a performance counter interrupt. + * Return true if the timer interrupt should not be checked + */ + +static inline int handle_perf_irq(int r2) +{ + /* + * The performance counter overflow interrupt may be shared with the + * timer interrupt (cp0_perfcount_irq < 0). If it is and a + * performance counter has overflowed (perf_irq() == IRQ_HANDLED) + * and we can't reliably determine if a counter interrupt has also + * happened (!r2) then don't check for a timer interrupt. + */ + return (cp0_perfcount_irq < 0) && + perf_irq() == IRQ_HANDLED && + !r2; +} + +#endif /* __ASM_CEVT_R4K_H */ diff -Nurd linux-2.6.24/include/asm-mips/elf.h mer-smartq-kernel/include/asm-mips/elf.h --- linux-2.6.24/include/asm-mips/elf.h 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/include/asm-mips/elf.h 2009-11-17 12:13:38.000000000 +0100 @@ -232,7 +232,7 @@ */ #ifdef __MIPSEB__ #define ELF_DATA ELFDATA2MSB -#elif __MIPSEL__ +#elif defined(__MIPSEL__) #define ELF_DATA ELFDATA2LSB #endif #define ELF_ARCH EM_MIPS diff -Nurd linux-2.6.24/include/asm-mips/gdb-stub.h mer-smartq-kernel/include/asm-mips/gdb-stub.h --- linux-2.6.24/include/asm-mips/gdb-stub.h 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/include/asm-mips/gdb-stub.h 2009-11-17 12:13:38.000000000 +0100 @@ -4,148 +4,33 @@ * for more details. * * Copyright (C) 1995 Andreas Busse - * Copyright (C) 2003 Ralf Baechle + * Copyright (C) 2003, 2008 Ralf Baechle (ralf@linux-mips.org) + * Copyright (C) 2008 Wind River Systems, Inc. + * written by Ralf Baechle */ #ifndef _ASM_GDB_STUB_H #define _ASM_GDB_STUB_H - /* - * important register numbers + * GDB interface constants. */ - -#define REG_EPC 37 -#define REG_FP 72 -#define REG_SP 29 +#define REG_EPC 37 +#define REG_FP 72 +#define REG_SP 29 /* * Stack layout for the GDB exception handler * Derived from the stack layout described in asm-mips/stackframe.h - * - * The first PTRSIZE*6 bytes are argument save space for C subroutines. - */ -#define NUMREGS 90 - -#define GDB_FR_REG0 (PTRSIZE*6) /* 0 */ -#define GDB_FR_REG1 ((GDB_FR_REG0) + LONGSIZE) /* 1 */ -#define GDB_FR_REG2 ((GDB_FR_REG1) + LONGSIZE) /* 2 */ -#define GDB_FR_REG3 ((GDB_FR_REG2) + LONGSIZE) /* 3 */ -#define GDB_FR_REG4 ((GDB_FR_REG3) + LONGSIZE) /* 4 */ -#define GDB_FR_REG5 ((GDB_FR_REG4) + LONGSIZE) /* 5 */ -#define GDB_FR_REG6 ((GDB_FR_REG5) + LONGSIZE) /* 6 */ -#define GDB_FR_REG7 ((GDB_FR_REG6) + LONGSIZE) /* 7 */ -#define GDB_FR_REG8 ((GDB_FR_REG7) + LONGSIZE) /* 8 */ -#define GDB_FR_REG9 ((GDB_FR_REG8) + LONGSIZE) /* 9 */ -#define GDB_FR_REG10 ((GDB_FR_REG9) + LONGSIZE) /* 10 */ -#define GDB_FR_REG11 ((GDB_FR_REG10) + LONGSIZE) /* 11 */ -#define GDB_FR_REG12 ((GDB_FR_REG11) + LONGSIZE) /* 12 */ -#define GDB_FR_REG13 ((GDB_FR_REG12) + LONGSIZE) /* 13 */ -#define GDB_FR_REG14 ((GDB_FR_REG13) + LONGSIZE) /* 14 */ -#define GDB_FR_REG15 ((GDB_FR_REG14) + LONGSIZE) /* 15 */ -#define GDB_FR_REG16 ((GDB_FR_REG15) + LONGSIZE) /* 16 */ -#define GDB_FR_REG17 ((GDB_FR_REG16) + LONGSIZE) /* 17 */ -#define GDB_FR_REG18 ((GDB_FR_REG17) + LONGSIZE) /* 18 */ -#define GDB_FR_REG19 ((GDB_FR_REG18) + LONGSIZE) /* 19 */ -#define GDB_FR_REG20 ((GDB_FR_REG19) + LONGSIZE) /* 20 */ -#define GDB_FR_REG21 ((GDB_FR_REG20) + LONGSIZE) /* 21 */ -#define GDB_FR_REG22 ((GDB_FR_REG21) + LONGSIZE) /* 22 */ -#define GDB_FR_REG23 ((GDB_FR_REG22) + LONGSIZE) /* 23 */ -#define GDB_FR_REG24 ((GDB_FR_REG23) + LONGSIZE) /* 24 */ -#define GDB_FR_REG25 ((GDB_FR_REG24) + LONGSIZE) /* 25 */ -#define GDB_FR_REG26 ((GDB_FR_REG25) + LONGSIZE) /* 26 */ -#define GDB_FR_REG27 ((GDB_FR_REG26) + LONGSIZE) /* 27 */ -#define GDB_FR_REG28 ((GDB_FR_REG27) + LONGSIZE) /* 28 */ -#define GDB_FR_REG29 ((GDB_FR_REG28) + LONGSIZE) /* 29 */ -#define GDB_FR_REG30 ((GDB_FR_REG29) + LONGSIZE) /* 30 */ -#define GDB_FR_REG31 ((GDB_FR_REG30) + LONGSIZE) /* 31 */ - -/* - * Saved special registers - */ -#define GDB_FR_STATUS ((GDB_FR_REG31) + LONGSIZE) /* 32 */ -#define GDB_FR_LO ((GDB_FR_STATUS) + LONGSIZE) /* 33 */ -#define GDB_FR_HI ((GDB_FR_LO) + LONGSIZE) /* 34 */ -#define GDB_FR_BADVADDR ((GDB_FR_HI) + LONGSIZE) /* 35 */ -#define GDB_FR_CAUSE ((GDB_FR_BADVADDR) + LONGSIZE) /* 36 */ -#define GDB_FR_EPC ((GDB_FR_CAUSE) + LONGSIZE) /* 37 */ - -/* - * Saved floating point registers - */ -#define GDB_FR_FPR0 ((GDB_FR_EPC) + LONGSIZE) /* 38 */ -#define GDB_FR_FPR1 ((GDB_FR_FPR0) + LONGSIZE) /* 39 */ -#define GDB_FR_FPR2 ((GDB_FR_FPR1) + LONGSIZE) /* 40 */ -#define GDB_FR_FPR3 ((GDB_FR_FPR2) + LONGSIZE) /* 41 */ -#define GDB_FR_FPR4 ((GDB_FR_FPR3) + LONGSIZE) /* 42 */ -#define GDB_FR_FPR5 ((GDB_FR_FPR4) + LONGSIZE) /* 43 */ -#define GDB_FR_FPR6 ((GDB_FR_FPR5) + LONGSIZE) /* 44 */ -#define GDB_FR_FPR7 ((GDB_FR_FPR6) + LONGSIZE) /* 45 */ -#define GDB_FR_FPR8 ((GDB_FR_FPR7) + LONGSIZE) /* 46 */ -#define GDB_FR_FPR9 ((GDB_FR_FPR8) + LONGSIZE) /* 47 */ -#define GDB_FR_FPR10 ((GDB_FR_FPR9) + LONGSIZE) /* 48 */ -#define GDB_FR_FPR11 ((GDB_FR_FPR10) + LONGSIZE) /* 49 */ -#define GDB_FR_FPR12 ((GDB_FR_FPR11) + LONGSIZE) /* 50 */ -#define GDB_FR_FPR13 ((GDB_FR_FPR12) + LONGSIZE) /* 51 */ -#define GDB_FR_FPR14 ((GDB_FR_FPR13) + LONGSIZE) /* 52 */ -#define GDB_FR_FPR15 ((GDB_FR_FPR14) + LONGSIZE) /* 53 */ -#define GDB_FR_FPR16 ((GDB_FR_FPR15) + LONGSIZE) /* 54 */ -#define GDB_FR_FPR17 ((GDB_FR_FPR16) + LONGSIZE) /* 55 */ -#define GDB_FR_FPR18 ((GDB_FR_FPR17) + LONGSIZE) /* 56 */ -#define GDB_FR_FPR19 ((GDB_FR_FPR18) + LONGSIZE) /* 57 */ -#define GDB_FR_FPR20 ((GDB_FR_FPR19) + LONGSIZE) /* 58 */ -#define GDB_FR_FPR21 ((GDB_FR_FPR20) + LONGSIZE) /* 59 */ -#define GDB_FR_FPR22 ((GDB_FR_FPR21) + LONGSIZE) /* 60 */ -#define GDB_FR_FPR23 ((GDB_FR_FPR22) + LONGSIZE) /* 61 */ -#define GDB_FR_FPR24 ((GDB_FR_FPR23) + LONGSIZE) /* 62 */ -#define GDB_FR_FPR25 ((GDB_FR_FPR24) + LONGSIZE) /* 63 */ -#define GDB_FR_FPR26 ((GDB_FR_FPR25) + LONGSIZE) /* 64 */ -#define GDB_FR_FPR27 ((GDB_FR_FPR26) + LONGSIZE) /* 65 */ -#define GDB_FR_FPR28 ((GDB_FR_FPR27) + LONGSIZE) /* 66 */ -#define GDB_FR_FPR29 ((GDB_FR_FPR28) + LONGSIZE) /* 67 */ -#define GDB_FR_FPR30 ((GDB_FR_FPR29) + LONGSIZE) /* 68 */ -#define GDB_FR_FPR31 ((GDB_FR_FPR30) + LONGSIZE) /* 69 */ - -#define GDB_FR_FSR ((GDB_FR_FPR31) + LONGSIZE) /* 70 */ -#define GDB_FR_FIR ((GDB_FR_FSR) + LONGSIZE) /* 71 */ -#define GDB_FR_FRP ((GDB_FR_FIR) + LONGSIZE) /* 72 */ - -#define GDB_FR_DUMMY ((GDB_FR_FRP) + LONGSIZE) /* 73, unused ??? */ - -/* - * Again, CP0 registers - */ -#define GDB_FR_CP0_INDEX ((GDB_FR_DUMMY) + LONGSIZE) /* 74 */ -#define GDB_FR_CP0_RANDOM ((GDB_FR_CP0_INDEX) + LONGSIZE) /* 75 */ -#define GDB_FR_CP0_ENTRYLO0 ((GDB_FR_CP0_RANDOM) + LONGSIZE)/* 76 */ -#define GDB_FR_CP0_ENTRYLO1 ((GDB_FR_CP0_ENTRYLO0) + LONGSIZE)/* 77 */ -#define GDB_FR_CP0_CONTEXT ((GDB_FR_CP0_ENTRYLO1) + LONGSIZE)/* 78 */ -#define GDB_FR_CP0_PAGEMASK ((GDB_FR_CP0_CONTEXT) + LONGSIZE)/* 79 */ -#define GDB_FR_CP0_WIRED ((GDB_FR_CP0_PAGEMASK) + LONGSIZE)/* 80 */ -#define GDB_FR_CP0_REG7 ((GDB_FR_CP0_WIRED) + LONGSIZE) /* 81 */ -#define GDB_FR_CP0_REG8 ((GDB_FR_CP0_REG7) + LONGSIZE) /* 82 */ -#define GDB_FR_CP0_REG9 ((GDB_FR_CP0_REG8) + LONGSIZE) /* 83 */ -#define GDB_FR_CP0_ENTRYHI ((GDB_FR_CP0_REG9) + LONGSIZE) /* 84 */ -#define GDB_FR_CP0_REG11 ((GDB_FR_CP0_ENTRYHI) + LONGSIZE)/* 85 */ -#define GDB_FR_CP0_REG12 ((GDB_FR_CP0_REG11) + LONGSIZE) /* 86 */ -#define GDB_FR_CP0_REG13 ((GDB_FR_CP0_REG12) + LONGSIZE) /* 87 */ -#define GDB_FR_CP0_REG14 ((GDB_FR_CP0_REG13) + LONGSIZE) /* 88 */ -#define GDB_FR_CP0_PRID ((GDB_FR_CP0_REG14) + LONGSIZE) /* 89 */ - -#define GDB_FR_SIZE ((((GDB_FR_CP0_PRID) + LONGSIZE) + (PTRSIZE-1)) & ~(PTRSIZE-1)) - -#ifndef __ASSEMBLY__ - -/* - * This is the same as above, but for the high-level - * part of the GDB stub. */ struct gdb_regs { +#ifdef CONFIG_32BIT /* * Pad bytes for argument save space on the stack * 24/48 Bytes for 32/64 bit code */ unsigned long pad0[6]; +#endif /* * saved main processor registers @@ -159,8 +44,11 @@ * Saved special registers */ long cp0_status; - long lo; long hi; + long lo; +#ifdef CONFIG_CPU_HAS_SMARTMIPS + long acx +#endif long cp0_badvaddr; long cp0_cause; long cp0_epc; @@ -183,7 +71,7 @@ long dummy; /* unused */ /* - * saved cp0 registers + * Saved cp0 registers */ long cp0_index; long cp0_random; @@ -203,13 +91,8 @@ long cp0_prid; }; -/* - * Prototypes - */ - extern int kgdb_enabled; -void set_debug_traps(void); -void set_async_breakpoint(unsigned long *epc); +extern void set_debug_traps(void); +extern void set_async_breakpoint(unsigned long *epc); -#endif /* !__ASSEMBLY__ */ #endif /* _ASM_GDB_STUB_H */ diff -Nurd linux-2.6.24/include/asm-mips/irqflags.h mer-smartq-kernel/include/asm-mips/irqflags.h --- linux-2.6.24/include/asm-mips/irqflags.h 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/include/asm-mips/irqflags.h 2009-11-17 12:13:38.000000000 +0100 @@ -38,8 +38,17 @@ " .set pop \n" " .endm"); +extern void smtc_ipi_replay(void); + static inline void raw_local_irq_enable(void) { +#ifdef CONFIG_MIPS_MT_SMTC + /* + * SMTC kernel needs to do a software replay of queued + * IPIs, at the cost of call overhead on each local_irq_enable() + */ + smtc_ipi_replay(); +#endif __asm__ __volatile__( "raw_local_irq_enable" : /* no outputs */ @@ -47,6 +56,7 @@ : "memory"); } + /* * For cli() we have to insert nops to make sure that the new value * has actually arrived in the status register before the end of this @@ -185,15 +195,14 @@ " .set pop \n" " .endm \n"); -extern void smtc_ipi_replay(void); static inline void raw_local_irq_restore(unsigned long flags) { unsigned long __tmp1; -#ifdef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY +#ifdef CONFIG_MIPS_MT_SMTC /* - * CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY does prompt replay of deferred + * SMTC kernel needs to do a software replay of queued * IPIs, at the cost of branch and call overhead on each * local_irq_restore() */ @@ -208,6 +217,17 @@ : "memory"); } +static inline void __raw_local_irq_restore(unsigned long flags) +{ + unsigned long __tmp1; + + __asm__ __volatile__( + "raw_local_irq_restore\t%0" + : "=r" (__tmp1) + : "0" (flags) + : "memory"); +} + static inline int raw_irqs_disabled_flags(unsigned long flags) { #ifdef CONFIG_MIPS_MT_SMTC diff -Nurd linux-2.6.24/include/asm-mips/lasat/serial.h mer-smartq-kernel/include/asm-mips/lasat/serial.h --- linux-2.6.24/include/asm-mips/lasat/serial.h 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/include/asm-mips/lasat/serial.h 2009-11-17 12:13:38.000000000 +0100 @@ -4,10 +4,10 @@ #define LASAT_BASE_BAUD_100 (7372800 / 16) #define LASAT_UART_REGS_BASE_100 0x1c8b0000 #define LASAT_UART_REGS_SHIFT_100 2 -#define LASATINT_UART_100 8 +#define LASATINT_UART_100 16 /* * LASAT 200 boards serial configuration */ #define LASAT_BASE_BAUD_200 (100000000 / 16 / 12) #define LASAT_UART_REGS_BASE_200 (Vrc5074_PHYS_BASE + 0x0300) #define LASAT_UART_REGS_SHIFT_200 3 -#define LASATINT_UART_200 13 +#define LASATINT_UART_200 21 diff -Nurd linux-2.6.24/include/asm-mips/mach-au1x00/au1000.h mer-smartq-kernel/include/asm-mips/mach-au1x00/au1000.h --- linux-2.6.24/include/asm-mips/mach-au1x00/au1000.h 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/include/asm-mips/mach-au1x00/au1000.h 2009-11-17 12:13:38.000000000 +0100 @@ -1786,6 +1786,7 @@ char *cpu_name; unsigned char cpu_od; /* Set Config[OD] */ unsigned char cpu_bclk; /* Enable BCLK switching */ + unsigned char cpu_pll_wo; /* sys_cpupll reg. write-only */ }; extern struct cpu_spec cpu_specs[]; diff -Nurd linux-2.6.24/include/asm-mips/mach-ip27/cpu-feature-overrides.h mer-smartq-kernel/include/asm-mips/mach-ip27/cpu-feature-overrides.h --- linux-2.6.24/include/asm-mips/mach-ip27/cpu-feature-overrides.h 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/include/asm-mips/mach-ip27/cpu-feature-overrides.h 2009-11-17 12:13:38.000000000 +0100 @@ -34,7 +34,11 @@ #define cpu_has_64bits 1 #define cpu_has_4kex 1 +#define cpu_has_3k_cache 0 +#define cpu_has_6k_cache 0 #define cpu_has_4k_cache 1 +#define cpu_has_8k_cache 0 +#define cpu_has_tx39_cache 0 #define cpu_has_inclusive_pcaches 1 diff -Nurd linux-2.6.24/include/asm-mips/mach-ip27/dma-coherence.h mer-smartq-kernel/include/asm-mips/mach-ip27/dma-coherence.h --- linux-2.6.24/include/asm-mips/mach-ip27/dma-coherence.h 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/include/asm-mips/mach-ip27/dma-coherence.h 2009-11-17 12:13:38.000000000 +0100 @@ -35,7 +35,7 @@ static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr) { - return dma_addr & (0xffUL << 56); + return dma_addr & ~(0xffUL << 56); } static inline void plat_unmap_dma_mem(dma_addr_t dma_addr) diff -Nurd linux-2.6.24/include/asm-mips/mach-jmr3927/ioremap.h mer-smartq-kernel/include/asm-mips/mach-jmr3927/ioremap.h --- linux-2.6.24/include/asm-mips/mach-jmr3927/ioremap.h 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/include/asm-mips/mach-jmr3927/ioremap.h 2009-11-17 12:13:38.000000000 +0100 @@ -25,7 +25,7 @@ { #define TXX9_DIRECTMAP_BASE 0xff000000ul if (offset >= TXX9_DIRECTMAP_BASE && - offset < TXX9_DIRECTMAP_BASE + 0xf0000) + offset < TXX9_DIRECTMAP_BASE + 0xff0000) return (void __iomem *)offset; return NULL; } diff -Nurd linux-2.6.24/include/asm-mips/mach-lasat/irq.h mer-smartq-kernel/include/asm-mips/mach-lasat/irq.h --- linux-2.6.24/include/asm-mips/mach-lasat/irq.h 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/include/asm-mips/mach-lasat/irq.h 2009-11-17 12:13:38.000000000 +0100 @@ -1,7 +1,7 @@ #ifndef _ASM_MACH_LASAT_IRQ_H #define _ASM_MACH_LASAT_IRQ_H -#define LASAT_CASCADE_IRQ (MIPS_CPU_IRQ_BASE + 0) +#define LASAT_CASCADE_IRQ (MIPS_CPU_IRQ_BASE + 2) #define LASAT_IRQ_BASE 8 #define LASAT_IRQ_END 23 diff -Nurd linux-2.6.24/include/asm-mips/mach-pb1x00/pb1200.h mer-smartq-kernel/include/asm-mips/mach-pb1x00/pb1200.h --- linux-2.6.24/include/asm-mips/mach-pb1x00/pb1200.h 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/include/asm-mips/mach-pb1x00/pb1200.h 2009-11-17 12:13:38.000000000 +0100 @@ -244,7 +244,7 @@ PB1200_SD1_INSERT_INT, PB1200_SD1_EJECT_INT, - PB1200_INT_END (PB1200_INT_BEGIN + 15) + PB1200_INT_END = PB1200_INT_BEGIN + 15 }; /* For drivers/pcmcia/au1000_db1x00.c */ diff -Nurd linux-2.6.24/include/asm-mips/mach-qemu/cpu-feature-overrides.h mer-smartq-kernel/include/asm-mips/mach-qemu/cpu-feature-overrides.h --- linux-2.6.24/include/asm-mips/mach-qemu/cpu-feature-overrides.h 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/include/asm-mips/mach-qemu/cpu-feature-overrides.h 2009-11-17 12:13:38.000000000 +0100 @@ -12,7 +12,7 @@ * QEMU only comes with a hazard-free MIPS32 processor, so things are easy. */ #define cpu_has_mips16 0 -#define cpu_has_divec 0 +#define cpu_has_divec 1 #define cpu_has_cache_cdex_p 0 #define cpu_has_prefetch 0 #define cpu_has_mcheck 0 diff -Nurd linux-2.6.24/include/asm-mips/mipsregs.h mer-smartq-kernel/include/asm-mips/mipsregs.h --- linux-2.6.24/include/asm-mips/mipsregs.h 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/include/asm-mips/mipsregs.h 2009-11-17 12:13:38.000000000 +0100 @@ -1459,7 +1459,7 @@ { \ unsigned int res; \ unsigned int omt; \ - unsigned int flags; \ + unsigned long flags; \ \ local_irq_save(flags); \ omt = __dmt(); \ @@ -1477,7 +1477,7 @@ { \ unsigned int res; \ unsigned int omt; \ - unsigned int flags; \ + unsigned long flags; \ \ local_irq_save(flags); \ omt = __dmt(); \ @@ -1495,7 +1495,7 @@ { \ unsigned int res; \ unsigned int omt; \ - unsigned int flags; \ + unsigned long flags; \ \ local_irq_save(flags); \ \ diff -Nurd linux-2.6.24/include/asm-mips/pgtable-32.h mer-smartq-kernel/include/asm-mips/pgtable-32.h --- linux-2.6.24/include/asm-mips/pgtable-32.h 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/include/asm-mips/pgtable-32.h 2009-11-17 12:13:38.000000000 +0100 @@ -57,7 +57,7 @@ #define PMD_ORDER 1 #define PTE_ORDER 0 -#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t)) +#define PTRS_PER_PGD (USER_PTRS_PER_PGD * 2) #define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) #define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE) diff -Nurd linux-2.6.24/include/asm-mips/pgtable.h mer-smartq-kernel/include/asm-mips/pgtable.h --- linux-2.6.24/include/asm-mips/pgtable.h 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/include/asm-mips/pgtable.h 2009-11-17 12:13:38.000000000 +0100 @@ -232,9 +232,10 @@ static inline pte_t pte_mkyoung(pte_t pte) { pte.pte_low |= _PAGE_ACCESSED; - if (pte.pte_low & _PAGE_READ) + if (pte.pte_low & _PAGE_READ) { pte.pte_low |= _PAGE_SILENT_READ; pte.pte_high |= _PAGE_SILENT_READ; + } return pte; } #else diff -Nurd linux-2.6.24/include/asm-mips/rtlx.h mer-smartq-kernel/include/asm-mips/rtlx.h --- linux-2.6.24/include/asm-mips/rtlx.h 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/include/asm-mips/rtlx.h 2009-11-17 12:13:38.000000000 +0100 @@ -3,7 +3,7 @@ * */ -#ifndef __ASM_RTLX_H +#ifndef __ASM_RTLX_H_ #define __ASM_RTLX_H_ #include @@ -29,13 +29,13 @@ extern unsigned int rtlx_write_poll(int index); enum rtlx_state { - RTLX_STATE_UNUSED, + RTLX_STATE_UNUSED = 0, RTLX_STATE_INITIALISED, RTLX_STATE_REMOTE_READY, RTLX_STATE_OPENED }; -#define RTLX_BUFFER_SIZE 1024 +#define RTLX_BUFFER_SIZE 2048 /* each channel supports read and write. linux (vpe0) reads lx_buffer and writes rt_buffer diff -Nurd linux-2.6.24/include/asm-mips/smtc.h mer-smartq-kernel/include/asm-mips/smtc.h --- linux-2.6.24/include/asm-mips/smtc.h 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/include/asm-mips/smtc.h 2009-11-17 12:13:38.000000000 +0100 @@ -6,6 +6,7 @@ */ #include +#include /* * System-wide SMTC status information @@ -38,13 +39,14 @@ struct task_struct; void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu); - +void self_ipi(struct smtc_ipi *); void smtc_flush_tlb_asid(unsigned long asid); -extern int mipsmt_build_cpu_map(int startslot); -extern void mipsmt_prepare_cpus(void); +extern int smtc_build_cpu_map(int startslot); +extern void smtc_prepare_cpus(int cpus); extern void smtc_smp_finish(void); extern void smtc_boot_secondary(int cpu, struct task_struct *t); + /* * Sharing the TLB between multiple VPEs means that the * "random" index selection function is not allowed to diff -Nurd linux-2.6.24/include/asm-mips/sn/mapped_kernel.h mer-smartq-kernel/include/asm-mips/sn/mapped_kernel.h --- linux-2.6.24/include/asm-mips/sn/mapped_kernel.h 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/include/asm-mips/sn/mapped_kernel.h 2009-11-17 12:13:38.000000000 +0100 @@ -5,6 +5,8 @@ #ifndef __ASM_SN_MAPPED_KERNEL_H #define __ASM_SN_MAPPED_KERNEL_H +#include + /* * Note on how mapped kernels work: the text and data section is * compiled at cksseg segment (LOADADDR = 0xc001c000), and the @@ -29,10 +31,8 @@ #define MAPPED_ADDR_RO_TO_PHYS(x) (x - REP_BASE) #define MAPPED_ADDR_RW_TO_PHYS(x) (x - REP_BASE - 16777216) -#define MAPPED_KERN_RO_PHYSBASE(n) \ - (PLAT_NODE_DATA(n)->kern_vars.kv_ro_baseaddr) -#define MAPPED_KERN_RW_PHYSBASE(n) \ - (PLAT_NODE_DATA(n)->kern_vars.kv_rw_baseaddr) +#define MAPPED_KERN_RO_PHYSBASE(n) (hub_data(n)->kern_vars.kv_ro_baseaddr) +#define MAPPED_KERN_RW_PHYSBASE(n) (hub_data(n)->kern_vars.kv_rw_baseaddr) #define MAPPED_KERN_RO_TO_PHYS(x) \ ((unsigned long)MAPPED_ADDR_RO_TO_PHYS(x) | \ diff -Nurd linux-2.6.24/include/asm-mips/stackframe.h mer-smartq-kernel/include/asm-mips/stackframe.h --- linux-2.6.24/include/asm-mips/stackframe.h 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/include/asm-mips/stackframe.h 2009-11-17 12:13:39.000000000 +0100 @@ -288,14 +288,31 @@ #ifdef CONFIG_MIPS_MT_SMTC .set mips32r2 /* - * This may not really be necessary if ints are already - * inhibited here. + * We need to make sure the read-modify-write + * of Status below isn't perturbed by an interrupt + * or cross-TC access, so we need to do at least a DMT, + * protected by an interrupt-inhibit. But setting IXMT + * also creates a few-cycle window where an IPI could + * be queued and not be detected before potentially + * returning to a WAIT or user-mode loop. It must be + * replayed. + * + * We're in the middle of a context switch, and + * we can't dispatch it directly without trashing + * some registers, so we'll try to detect this unlikely + * case and program a software interrupt in the VPE, + * as would be done for a cross-VPE IPI. To accomodate + * the handling of that case, we're doing a DVPE instead + * of just a DMT here to protect against other threads. + * This is a lot of cruft to cover a tiny window. + * If you can find a better design, implement it! + * */ mfc0 v0, CP0_TCSTATUS ori v0, TCSTATUS_IXMT mtc0 v0, CP0_TCSTATUS _ehb - DMT 5 # dmt a1 + DVPE 5 # dvpe a1 jal mips_ihb #endif /* CONFIG_MIPS_MT_SMTC */ mfc0 a0, CP0_STATUS @@ -316,17 +333,50 @@ */ LONG_L v1, PT_TCSTATUS(sp) _ehb - mfc0 v0, CP0_TCSTATUS + mfc0 a0, CP0_TCSTATUS andi v1, TCSTATUS_IXMT - /* We know that TCStatua.IXMT should be set from above */ - xori v0, v0, TCSTATUS_IXMT - or v0, v0, v1 - mtc0 v0, CP0_TCSTATUS - _ehb - andi a1, a1, VPECONTROL_TE + bnez v1, 0f + +/* + * We'd like to detect any IPIs queued in the tiny window + * above and request an software interrupt to service them + * when we ERET. + * + * Computing the offset into the IPIQ array of the executing + * TC's IPI queue in-line would be tedious. We use part of + * the TCContext register to hold 16 bits of offset that we + * can add in-line to find the queue head. + */ + mfc0 v0, CP0_TCCONTEXT + la a2, IPIQ + srl v0, v0, 16 + addu a2, a2, v0 + LONG_L v0, 0(a2) + beqz v0, 0f +/* + * If we have a queue, provoke dispatch within the VPE by setting C_SW1 + */ + mfc0 v0, CP0_CAUSE + ori v0, v0, C_SW1 + mtc0 v0, CP0_CAUSE +0: + /* + * This test should really never branch but + * let's be prudent here. Having atomized + * the shared register modifications, we can + * now EVPE, and must do so before interrupts + * are potentially re-enabled. + */ + andi a1, a1, MVPCONTROL_EVP beqz a1, 1f - emt + evpe 1: + /* We know that TCStatua.IXMT should be set from above */ + xori a0, a0, TCSTATUS_IXMT + or a0, a0, v1 + mtc0 a0, CP0_TCSTATUS + _ehb + .set mips0 #endif /* CONFIG_MIPS_MT_SMTC */ LONG_L v1, PT_EPC(sp) diff -Nurd linux-2.6.24/include/asm-mips/time.h mer-smartq-kernel/include/asm-mips/time.h --- linux-2.6.24/include/asm-mips/time.h 2008-01-24 23:58:37.000000000 +0100 +++ mer-smartq-kernel/include/asm-mips/time.h 2009-11-17 12:13:39.000000000 +0100 @@ -70,11 +70,12 @@ /* * Initialize the count register as a clocksource */ -#ifdef CONFIG_CEVT_R4K -extern void init_mips_clocksource(void); +#ifdef CONFIG_CSRC_R4K +extern int init_mips_clocksource(void); #else -static inline void init_mips_clocksource(void) +static inline int init_mips_clocksource(void) { + return 0; } #endif diff -Nurd linux-2.6.24/Makefile mer-smartq-kernel/Makefile --- linux-2.6.24/Makefile 2009-11-17 21:04:50.000000000 +0100 +++ mer-smartq-kernel/Makefile 2009-11-17 18:05:54.000000000 +0100 @@ -190,7 +190,7 @@ # Default value for CROSS_COMPILE is not to prefix executables # Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile export KBUILD_BUILDHOST := $(SUBARCH) -ARCH ?= $(SUBARCH) +ARCH ?= mips CROSS_COMPILE ?= # Architecture as present in compile.h