diff options
Diffstat (limited to 'arch/mips/lantiq/irq.c')
-rw-r--r-- | arch/mips/lantiq/irq.c | 255 |
1 files changed, 174 insertions, 81 deletions
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c index d673731c538a..57c1a4e51408 100644 --- a/arch/mips/lantiq/irq.c +++ b/arch/mips/lantiq/irq.c @@ -9,6 +9,11 @@ #include <linux/interrupt.h> #include <linux/ioport.h> +#include <linux/sched.h> +#include <linux/irqdomain.h> +#include <linux/of_platform.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <asm/bootinfo.h> #include <asm/irq_cpu.h> @@ -16,7 +21,7 @@ #include <lantiq_soc.h> #include <irq.h> -/* register definitions */ +/* register definitions - internal irqs */ #define LTQ_ICU_IM0_ISR 0x0000 #define LTQ_ICU_IM0_IER 0x0008 #define LTQ_ICU_IM0_IOSR 0x0010 @@ -25,6 +30,7 @@ #define LTQ_ICU_IM1_ISR 0x0028 #define LTQ_ICU_OFFSET (LTQ_ICU_IM1_ISR - LTQ_ICU_IM0_ISR) +/* register definitions - external irqs */ #define LTQ_EIU_EXIN_C 0x0000 #define LTQ_EIU_EXIN_INIC 0x0004 #define LTQ_EIU_EXIN_INEN 0x000C @@ -37,10 +43,14 @@ #define LTQ_EIU_IR4 (INT_NUM_IM1_IRL0 + 1) #define LTQ_EIU_IR5 (INT_NUM_IM1_IRL0 + 2) #define LTQ_EIU_IR6 (INT_NUM_IM2_IRL0 + 30) - +#define XWAY_EXIN_COUNT 3 #define MAX_EIU 6 -/* irqs generated by device attached to the EBU need to be acked in +/* the performance counter */ +#define LTQ_PERF_IRQ (INT_NUM_IM4_IRL0 + 31) + +/* + * irqs generated by devices attached to the EBU need to be acked in * a special manner */ #define LTQ_ICU_EBU_IRQ 22 @@ -51,6 +61,17 @@ #define ltq_eiu_w32(x, y) ltq_w32((x), ltq_eiu_membase + (y)) #define ltq_eiu_r32(x) ltq_r32(ltq_eiu_membase + (x)) +/* our 2 ipi interrupts for VSMP */ +#define MIPS_CPU_IPI_RESCHED_IRQ 0 +#define MIPS_CPU_IPI_CALL_IRQ 1 + +/* we have a cascade of 8 irqs */ +#define MIPS_CPU_IRQ_CASCADE 8 + +#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) +int gic_present; +#endif + static unsigned short ltq_eiu_irq[MAX_EIU] = { LTQ_EIU_IR0, LTQ_EIU_IR1, @@ -60,64 +81,51 @@ static unsigned short ltq_eiu_irq[MAX_EIU] = { LTQ_EIU_IR5, }; -static struct resource ltq_icu_resource = { - .name = "icu", - .start = LTQ_ICU_BASE_ADDR, - .end = LTQ_ICU_BASE_ADDR + LTQ_ICU_SIZE - 1, - .flags = IORESOURCE_MEM, -}; - -static struct resource ltq_eiu_resource = { - .name = "eiu", - .start = LTQ_EIU_BASE_ADDR, - .end = LTQ_EIU_BASE_ADDR + LTQ_ICU_SIZE - 1, - .flags = IORESOURCE_MEM, -}; - +static int exin_avail; static void __iomem *ltq_icu_membase; static void __iomem *ltq_eiu_membase; void ltq_disable_irq(struct irq_data *d) { u32 ier = LTQ_ICU_IM0_IER; - int irq_nr = d->irq - INT_NUM_IRQ0; + int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE; - ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); - irq_nr %= INT_NUM_IM_OFFSET; - ltq_icu_w32(ltq_icu_r32(ier) & ~(1 << irq_nr), ier); + ier += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET); + offset %= INT_NUM_IM_OFFSET; + ltq_icu_w32(ltq_icu_r32(ier) & ~BIT(offset), ier); } void ltq_mask_and_ack_irq(struct irq_data *d) { u32 ier = LTQ_ICU_IM0_IER; u32 isr = LTQ_ICU_IM0_ISR; - int irq_nr = d->irq - INT_NUM_IRQ0; + int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE; - ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); - isr += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); - irq_nr %= INT_NUM_IM_OFFSET; - ltq_icu_w32(ltq_icu_r32(ier) & ~(1 << irq_nr), ier); - ltq_icu_w32((1 << irq_nr), isr); + ier += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET); + isr += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET); + offset %= INT_NUM_IM_OFFSET; + ltq_icu_w32(ltq_icu_r32(ier) & ~BIT(offset), ier); + ltq_icu_w32(BIT(offset), isr); } static void ltq_ack_irq(struct irq_data *d) { u32 isr = LTQ_ICU_IM0_ISR; - int irq_nr = d->irq - INT_NUM_IRQ0; + int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE; - isr += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); - irq_nr %= INT_NUM_IM_OFFSET; - ltq_icu_w32((1 << irq_nr), isr); + isr += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET); + offset %= INT_NUM_IM_OFFSET; + ltq_icu_w32(BIT(offset), isr); } void ltq_enable_irq(struct irq_data *d) { u32 ier = LTQ_ICU_IM0_IER; - int irq_nr = d->irq - INT_NUM_IRQ0; + int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE; - ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); - irq_nr %= INT_NUM_IM_OFFSET; - ltq_icu_w32(ltq_icu_r32(ier) | (1 << irq_nr), ier); + ier += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET); + offset %= INT_NUM_IM_OFFSET; + ltq_icu_w32(ltq_icu_r32(ier) | BIT(offset), ier); } static unsigned int ltq_startup_eiu_irq(struct irq_data *d) @@ -126,15 +134,15 @@ static unsigned int ltq_startup_eiu_irq(struct irq_data *d) ltq_enable_irq(d); for (i = 0; i < MAX_EIU; i++) { - if (d->irq == ltq_eiu_irq[i]) { + if (d->hwirq == ltq_eiu_irq[i]) { /* low level - we should really handle set_type */ ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) | (0x6 << (i * 4)), LTQ_EIU_EXIN_C); /* clear all pending */ - ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INIC) & ~(1 << i), + ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INIC) & ~BIT(i), LTQ_EIU_EXIN_INIC); /* enable */ - ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | (1 << i), + ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | BIT(i), LTQ_EIU_EXIN_INEN); break; } @@ -149,9 +157,9 @@ static void ltq_shutdown_eiu_irq(struct irq_data *d) ltq_disable_irq(d); for (i = 0; i < MAX_EIU; i++) { - if (d->irq == ltq_eiu_irq[i]) { + if (d->hwirq == ltq_eiu_irq[i]) { /* disable */ - ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~(1 << i), + ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~BIT(i), LTQ_EIU_EXIN_INEN); break; } @@ -188,14 +196,15 @@ static void ltq_hw_irqdispatch(int module) if (irq == 0) return; - /* silicon bug causes only the msb set to 1 to be valid. all + /* + * silicon bug causes only the msb set to 1 to be valid. all * other bits might be bogus */ irq = __fls(irq); - do_IRQ((int)irq + INT_NUM_IM0_IRL0 + (INT_NUM_IM_OFFSET * module)); + do_IRQ((int)irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module)); /* if this is a EBU irq, we need to ack it or get a deadlock */ - if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0)) + if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT) ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10, LTQ_EBU_PCC_ISTAT); } @@ -216,6 +225,47 @@ static void ltq_hw5_irqdispatch(void) do_IRQ(MIPS_CPU_TIMER_IRQ); } +#ifdef CONFIG_MIPS_MT_SMP +void __init arch_init_ipiirq(int irq, struct irqaction *action) +{ + setup_irq(irq, action); + irq_set_handler(irq, handle_percpu_irq); +} + +static void ltq_sw0_irqdispatch(void) +{ + do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ); +} + +static void ltq_sw1_irqdispatch(void) +{ + do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ); +} +static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) +{ + scheduler_ipi(); + return IRQ_HANDLED; +} + +static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) +{ + smp_call_function_interrupt(); + return IRQ_HANDLED; +} + +static struct irqaction irq_resched = { + .handler = ipi_resched_interrupt, + .flags = IRQF_PERCPU, + .name = "IPI_resched" +}; + +static struct irqaction irq_call = { + .handler = ipi_call_interrupt, + .flags = IRQF_PERCPU, + .name = "IPI_call" +}; +#endif + asmlinkage void plat_irq_dispatch(void) { unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM; @@ -238,45 +288,75 @@ out: return; } +static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) +{ + struct irq_chip *chip = <q_irq_type; + int i; + + for (i = 0; i < exin_avail; i++) + if (hw == ltq_eiu_irq[i]) + chip = <q_eiu_type; + + irq_set_chip_and_handler(hw, chip, handle_level_irq); + + return 0; +} + +static const struct irq_domain_ops irq_domain_ops = { + .xlate = irq_domain_xlate_onetwocell, + .map = icu_map, +}; + static struct irqaction cascade = { .handler = no_action, .name = "cascade", }; -void __init arch_init_irq(void) +int __init icu_of_init(struct device_node *node, struct device_node *parent) { + struct device_node *eiu_node; + struct resource res; int i; - if (insert_resource(&iomem_resource, <q_icu_resource) < 0) - panic("Failed to insert icu memory"); + if (of_address_to_resource(node, 0, &res)) + panic("Failed to get icu memory range"); - if (request_mem_region(ltq_icu_resource.start, - resource_size(<q_icu_resource), "icu") < 0) - panic("Failed to request icu memory"); + if (request_mem_region(res.start, resource_size(&res), res.name) < 0) + pr_err("Failed to request icu memory"); - ltq_icu_membase = ioremap_nocache(ltq_icu_resource.start, - resource_size(<q_icu_resource)); + ltq_icu_membase = ioremap_nocache(res.start, resource_size(&res)); if (!ltq_icu_membase) panic("Failed to remap icu memory"); - if (insert_resource(&iomem_resource, <q_eiu_resource) < 0) - panic("Failed to insert eiu memory"); - - if (request_mem_region(ltq_eiu_resource.start, - resource_size(<q_eiu_resource), "eiu") < 0) - panic("Failed to request eiu memory"); - - ltq_eiu_membase = ioremap_nocache(ltq_eiu_resource.start, - resource_size(<q_eiu_resource)); - if (!ltq_eiu_membase) - panic("Failed to remap eiu memory"); + /* the external interrupts are optional and xway only */ + eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu"); + if (eiu_node && of_address_to_resource(eiu_node, 0, &res)) { + /* find out how many external irq sources we have */ + const __be32 *count = of_get_property(node, + "lantiq,count", NULL); + + if (count) + exin_avail = *count; + if (exin_avail > MAX_EIU) + exin_avail = MAX_EIU; + + if (request_mem_region(res.start, resource_size(&res), + res.name) < 0) + pr_err("Failed to request eiu memory"); + + ltq_eiu_membase = ioremap_nocache(res.start, + resource_size(&res)); + if (!ltq_eiu_membase) + panic("Failed to remap eiu memory"); + } - /* make sure all irqs are turned off by default */ - for (i = 0; i < 5; i++) + /* turn off all irqs by default */ + for (i = 0; i < 5; i++) { + /* make sure all irqs are turned off by default */ ltq_icu_w32(0, LTQ_ICU_IM0_IER + (i * LTQ_ICU_OFFSET)); - - /* clear all possibly pending interrupts */ - ltq_icu_w32(~0, LTQ_ICU_IM0_ISR + (i * LTQ_ICU_OFFSET)); + /* clear all possibly pending interrupts */ + ltq_icu_w32(~0, LTQ_ICU_IM0_ISR + (i * LTQ_ICU_OFFSET)); + } mips_cpu_irq_init(); @@ -293,20 +373,19 @@ void __init arch_init_irq(void) set_vi_handler(7, ltq_hw5_irqdispatch); } - for (i = INT_NUM_IRQ0; - i <= (INT_NUM_IRQ0 + (5 * INT_NUM_IM_OFFSET)); i++) - if ((i == LTQ_EIU_IR0) || (i == LTQ_EIU_IR1) || - (i == LTQ_EIU_IR2)) - irq_set_chip_and_handler(i, <q_eiu_type, - handle_level_irq); - /* EIU3-5 only exist on ar9 and vr9 */ - else if (((i == LTQ_EIU_IR3) || (i == LTQ_EIU_IR4) || - (i == LTQ_EIU_IR5)) && (ltq_is_ar9() || ltq_is_vr9())) - irq_set_chip_and_handler(i, <q_eiu_type, - handle_level_irq); - else - irq_set_chip_and_handler(i, <q_irq_type, - handle_level_irq); + irq_domain_add_linear(node, 6 * INT_NUM_IM_OFFSET, + &irq_domain_ops, 0); + +#if defined(CONFIG_MIPS_MT_SMP) + if (cpu_has_vint) { + pr_info("Setting up IPI vectored interrupts\n"); + set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ltq_sw0_irqdispatch); + set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ltq_sw1_irqdispatch); + } + arch_init_ipiirq(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ, + &irq_resched); + arch_init_ipiirq(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ, &irq_call); +#endif #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | @@ -315,9 +394,23 @@ void __init arch_init_irq(void) set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5); #endif + + /* tell oprofile which irq to use */ + cp0_perfcount_irq = LTQ_PERF_IRQ; + return 0; } unsigned int __cpuinit get_c0_compare_int(void) { return CP0_LEGACY_COMPARE_IRQ; } + +static struct of_device_id __initdata of_irq_ids[] = { + { .compatible = "lantiq,icu", .data = icu_of_init }, + {}, +}; + +void __init arch_init_irq(void) +{ + of_irq_init(of_irq_ids); +} |