diff options
94 files changed, 1966 insertions, 2236 deletions
diff --git a/Documentation/DocBook/genericirq.tmpl b/Documentation/DocBook/genericirq.tmpl index 1448b33fd22..fb10fd08c05 100644 --- a/Documentation/DocBook/genericirq.tmpl +++ b/Documentation/DocBook/genericirq.tmpl @@ -28,7 +28,7 @@ </authorgroup> <copyright> - <year>2005-2006</year> + <year>2005-2010</year> <holder>Thomas Gleixner</holder> </copyright> <copyright> @@ -100,6 +100,10 @@ <listitem><para>Edge type</para></listitem> <listitem><para>Simple type</para></listitem> </itemizedlist> + During the implementation we identified another type: + <itemizedlist> + <listitem><para>Fast EOI type</para></listitem> + </itemizedlist> In the SMP world of the __do_IRQ() super-handler another type was identified: <itemizedlist> @@ -153,6 +157,7 @@ is still available. This leads to a kind of duality for the time being. Over time the new model should be used in more and more architectures, as it enables smaller and cleaner IRQ subsystems. + It's deprecated for three years now and about to be removed. </para> </chapter> <chapter id="bugs"> @@ -217,6 +222,7 @@ <itemizedlist> <listitem><para>handle_level_irq</para></listitem> <listitem><para>handle_edge_irq</para></listitem> + <listitem><para>handle_fasteoi_irq</para></listitem> <listitem><para>handle_simple_irq</para></listitem> <listitem><para>handle_percpu_irq</para></listitem> </itemizedlist> @@ -233,33 +239,33 @@ are used by the default flow implementations. The following helper functions are implemented (simplified excerpt): <programlisting> -default_enable(irq) +default_enable(struct irq_data *data) { - desc->chip->unmask(irq); + desc->chip->irq_unmask(data); } -default_disable(irq) +default_disable(struct irq_data *data) { - if (!delay_disable(irq)) - desc->chip->mask(irq); + if (!delay_disable(data)) + desc->chip->irq_mask(data); } -default_ack(irq) +default_ack(struct irq_data *data) { - chip->ack(irq); + chip->irq_ack(data); } -default_mask_ack(irq) +default_mask_ack(struct irq_data *data) { - if (chip->mask_ack) { - chip->mask_ack(irq); + if (chip->irq_mask_ack) { + chip->irq_mask_ack(data); } else { - chip->mask(irq); - chip->ack(irq); + chip->irq_mask(data); + chip->irq_ack(data); } } -noop(irq) +noop(struct irq_data *data)) { } @@ -278,12 +284,27 @@ noop(irq) <para> The following control flow is implemented (simplified excerpt): <programlisting> -desc->chip->start(); +desc->chip->irq_mask(); handle_IRQ_event(desc->action); -desc->chip->end(); +desc->chip->irq_unmask(); </programlisting> </para> - </sect3> + </sect3> + <sect3 id="Default_FASTEOI_IRQ_flow_handler"> + <title>Default Fast EOI IRQ flow handler</title> + <para> + handle_fasteoi_irq provides a generic implementation + for interrupts, which only need an EOI at the end of + the handler + </para> + <para> + The following control flow is implemented (simplified excerpt): + <programlisting> +handle_IRQ_event(desc->action); +desc->chip->irq_eoi(); + </programlisting> + </para> + </sect3> <sect3 id="Default_Edge_IRQ_flow_handler"> <title>Default Edge IRQ flow handler</title> <para> @@ -294,20 +315,19 @@ desc->chip->end(); The following control flow is implemented (simplified excerpt): <programlisting> if (desc->status & running) { - desc->chip->hold(); + desc->chip->irq_mask(); desc->status |= pending | masked; return; } -desc->chip->start(); +desc->chip->irq_ack(); desc->status |= running; do { if (desc->status & masked) - desc->chip->enable(); + desc->chip->irq_unmask(); desc->status &= ~pending; handle_IRQ_event(desc->action); } while (status & pending); desc->status &= ~running; -desc->chip->end(); </programlisting> </para> </sect3> @@ -342,9 +362,9 @@ handle_IRQ_event(desc->action); <para> The following control flow is implemented (simplified excerpt): <programlisting> -desc->chip->start(); handle_IRQ_event(desc->action); -desc->chip->end(); +if (desc->chip->irq_eoi) + desc->chip->irq_eoi(); </programlisting> </para> </sect3> @@ -375,8 +395,7 @@ desc->chip->end(); mechanism. (It's necessary to enable CONFIG_HARDIRQS_SW_RESEND when you want to use the delayed interrupt disable feature and your hardware is not capable of retriggering an interrupt.) - The delayed interrupt disable can be runtime enabled, per interrupt, - by setting the IRQ_DELAYED_DISABLE flag in the irq_desc status field. + The delayed interrupt disable is not configurable. </para> </sect2> </sect1> @@ -387,13 +406,13 @@ desc->chip->end(); contains all the direct chip relevant functions, which can be utilized by the irq flow implementations. <itemizedlist> - <listitem><para>ack()</para></listitem> - <listitem><para>mask_ack() - Optional, recommended for performance</para></listitem> - <listitem><para>mask()</para></listitem> - <listitem><para>unmask()</para></listitem> - <listitem><para>retrigger() - Optional</para></listitem> - <listitem><para>set_type() - Optional</para></listitem> - <listitem><para>set_wake() - Optional</para></listitem> + <listitem><para>irq_ack()</para></listitem> + <listitem><para>irq_mask_ack() - Optional, recommended for performance</para></listitem> + <listitem><para>irq_mask()</para></listitem> + <listitem><para>irq_unmask()</para></listitem> + <listitem><para>irq_retrigger() - Optional</para></listitem> + <listitem><para>irq_set_type() - Optional</para></listitem> + <listitem><para>irq_set_wake() - Optional</para></listitem> </itemizedlist> These primitives are strictly intended to mean what they say: ack means ACK, masking means masking of an IRQ line, etc. It is up to the flow @@ -458,6 +477,7 @@ desc->chip->end(); <para> This chapter contains the autogenerated documentation of the internal functions. </para> +!Ikernel/irq/irqdesc.c !Ikernel/irq/handle.c !Ikernel/irq/chip.c </chapter> diff --git a/MAINTAINERS b/MAINTAINERS index 7679bf32f7b..5017b6ab84e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3239,6 +3239,12 @@ F: drivers/net/irda/ F: include/net/irda/ F: net/irda/ +IRQ SUBSYSTEM +M: Thomas Gleixner <tglx@linutronix.de> +S: Maintained +T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git irq/core +F: kernel/irq/ + ISAPNP M: Jaroslav Kysela <perex@perex.cz> S: Maintained diff --git a/arch/arm/include/asm/hw_irq.h b/arch/arm/include/asm/hw_irq.h index 90831f6f5f5..5586b7c8ef6 100644 --- a/arch/arm/include/asm/hw_irq.h +++ b/arch/arm/include/asm/hw_irq.h @@ -24,4 +24,6 @@ void set_irq_flags(unsigned int irq, unsigned int flags); #define IRQF_PROBE (1 << 1) #define IRQF_NOAUTOEN (1 << 2) +#define ARCH_IRQ_INIT_FLAGS (IRQ_NOREQUEST | IRQ_NOPROBE) + #endif diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index c0d5c3b3a76..36ad3be4692 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -154,14 +154,6 @@ void set_irq_flags(unsigned int irq, unsigned int iflags) void __init init_IRQ(void) { - struct irq_desc *desc; - int irq; - - for (irq = 0; irq < nr_irqs; irq++) { - desc = irq_to_desc_alloc_node(irq, 0); - desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE; - } - init_arch_irq(); } @@ -169,7 +161,7 @@ void __init init_IRQ(void) int __init arch_probe_nr_irqs(void) { nr_irqs = arch_nr_irqs ? arch_nr_irqs : NR_IRQS; - return 0; + return nr_irqs; } #endif diff --git a/arch/arm/mach-bcmring/irq.c b/arch/arm/mach-bcmring/irq.c index dc1c4939b0c..e3152631eb3 100644 --- a/arch/arm/mach-bcmring/irq.c +++ b/arch/arm/mach-bcmring/irq.c @@ -67,21 +67,21 @@ static void bcmring_unmask_irq2(unsigned int irq) } static struct irq_chip bcmring_irq0_chip = { - .typename = "ARM-INTC0", + .name = "ARM-INTC0", .ack = bcmring_mask_irq0, .mask = bcmring_mask_irq0, /* mask a specific interrupt, blocking its delivery. */ .unmask = bcmring_unmask_irq0, /* unmaks an interrupt */ }; static struct irq_chip bcmring_irq1_chip = { - .typename = "ARM-INTC1", + .name = "ARM-INTC1", .ack = bcmring_mask_irq1, .mask = bcmring_mask_irq1, .unmask = bcmring_unmask_irq1, }; static struct irq_chip bcmring_irq2_chip = { - .typename = "ARM-SINTC", + .name = "ARM-SINTC", .ack = bcmring_mask_irq2, .mask = bcmring_mask_irq2, .unmask = bcmring_unmask_irq2, diff --git a/arch/arm/mach-iop13xx/msi.c b/arch/arm/mach-iop13xx/msi.c index f34b0ed8063..7149fcc16c8 100644 --- a/arch/arm/mach-iop13xx/msi.c +++ b/arch/arm/mach-iop13xx/msi.c @@ -164,10 +164,10 @@ static void iop13xx_msi_nop(unsigned int irq) static struct irq_chip iop13xx_msi_chip = { .name = "PCI-MSI", .ack = iop13xx_msi_nop, - .enable = unmask_msi_irq, - .disable = mask_msi_irq, - .mask = mask_msi_irq, - .unmask = unmask_msi_irq, + .irq_enable = unmask_msi_irq, + .irq_disable = mask_msi_irq, + .irq_mask = mask_msi_irq, + .irq_unmask = unmask_msi_irq, }; int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c index 4a746ea838f..00b19a416ea 100644 --- a/arch/ia64/kernel/msi_ia64.c +++ b/arch/ia64/kernel/msi_ia64.c @@ -104,8 +104,8 @@ static int ia64_msi_retrigger_irq(unsigned int irq) */ static struct irq_chip ia64_msi_chip = { .name = "PCI-MSI", - .mask = mask_msi_irq, - .unmask = unmask_msi_irq, + .irq_mask = mask_msi_irq, + .irq_unmask = unmask_msi_irq, .ack = ia64_ack_msi_irq, #ifdef CONFIG_SMP .set_affinity = ia64_set_msi_irq_affinity, @@ -160,8 +160,8 @@ static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) static struct irq_chip dmar_msi_type = { .name = "DMAR_MSI", - .unmask = dmar_msi_unmask, - .mask = dmar_msi_mask, + .irq_unmask = dmar_msi_unmask, + .irq_mask = dmar_msi_mask, .ack = ia64_ack_msi_irq, #ifdef CONFIG_SMP .set_affinity = dmar_msi_set_affinity, diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c index 0c72dd46383..a5e500f0285 100644 --- a/arch/ia64/sn/kernel/msi_sn.c +++ b/arch/ia64/sn/kernel/msi_sn.c @@ -228,8 +228,8 @@ static int sn_msi_retrigger_irq(unsigned int irq) static struct irq_chip sn_msi_chip = { .name = "PCI-MSI", - .mask = mask_msi_irq, - .unmask = unmask_msi_irq, + .irq_mask = mask_msi_irq, + .irq_unmask = unmask_msi_irq, .ack = sn_ack_msi_irq, #ifdef CONFIG_SMP .set_affinity = sn_set_msi_irq_affinity, diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c index 3c71f776872..7db26f1f082 100644 --- a/arch/m32r/kernel/irq.c +++ b/arch/m32r/kernel/irq.c @@ -51,7 +51,7 @@ int show_interrupts(struct seq_file *p, void *v) for_each_online_cpu(j) seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); #endif - seq_printf(p, " %14s", irq_desc[i].chip->typename); + seq_printf(p, " %14s", irq_desc[i].chip->name); seq_printf(p, " %s", action->name); for (action=action->next; action; action = action->next) diff --git a/arch/m32r/platforms/m32104ut/setup.c b/arch/m32r/platforms/m32104ut/setup.c index 922fdfdadea..402a59d7219 100644 --- a/arch/m32r/platforms/m32104ut/setup.c +++ b/arch/m32r/platforms/m32104ut/setup.c @@ -65,7 +65,7 @@ static void shutdown_m32104ut_irq(unsigned int irq) static struct irq_chip m32104ut_irq_type = { - .typename = "M32104UT-IRQ", + .name = "M32104UT-IRQ", .startup = startup_m32104ut_irq, .shutdown = shutdown_m32104ut_irq, .enable = enable_m32104ut_irq, diff --git a/arch/m32r/platforms/m32700ut/setup.c b/arch/m32r/platforms/m32700ut/setup.c index 9c1bc7487c1..80b1a026795 100644 --- a/arch/m32r/platforms/m32700ut/setup.c +++ b/arch/m32r/platforms/m32700ut/setup.c @@ -71,7 +71,7 @@ static void shutdown_m32700ut_irq(unsigned int irq) static struct irq_chip m32700ut_irq_type = { - .typename = "M32700UT-IRQ", + .name = "M32700UT-IRQ", .startup = startup_m32700ut_irq, .shutdown = shutdown_m32700ut_irq, .enable = enable_m32700ut_irq, @@ -148,7 +148,7 @@ static void shutdown_m32700ut_pld_irq(unsigned int irq) static struct irq_chip m32700ut_pld_irq_type = { - .typename = "M32700UT-PLD-IRQ", + .name = "M32700UT-PLD-IRQ", .startup = startup_m32700ut_pld_irq, .shutdown = shutdown_m32700ut_pld_irq, .enable = enable_m32700ut_pld_irq, @@ -217,7 +217,7 @@ static void shutdown_m32700ut_lanpld_irq(unsigned int irq) static struct irq_chip m32700ut_lanpld_irq_type = { - .typename = "M32700UT-PLD-LAN-IRQ", + .name = "M32700UT-PLD-LAN-IRQ", .startup = startup_m32700ut_lanpld_irq, .shutdown = shutdown_m32700ut_lanpld_irq, .enable = enable_m32700ut_lanpld_irq, @@ -286,7 +286,7 @@ static void shutdown_m32700ut_lcdpld_irq(unsigned int irq) static struct irq_chip m32700ut_lcdpld_irq_type = { - .typename = "M32700UT-PLD-LCD-IRQ", + .name = "M32700UT-PLD-LCD-IRQ", .startup = startup_m32700ut_lcdpld_irq, .shutdown = shutdown_m32700ut_lcdpld_irq, .enable = enable_m32700ut_lcdpld_irq, diff --git a/arch/m32r/platforms/mappi/setup.c b/arch/m32r/platforms/mappi/setup.c index fb4b17799b6..ea00c84d6b1 100644 --- a/arch/m32r/platforms/mappi/setup.c +++ b/arch/m32r/platforms/mappi/setup.c @@ -65,7 +65,7 @@ static void shutdown_mappi_irq(unsigned int irq) static struct irq_chip mappi_irq_type = { - .typename = "MAPPI-IRQ", + .name = "MAPPI-IRQ", .startup = startup_mappi_irq, .shutdown = shutdown_mappi_irq, .enable = enable_mappi_irq, diff --git a/arch/m32r/platforms/mappi2/setup.c b/arch/m32r/platforms/mappi2/setup.c index 6a65eda0a05..c049376d027 100644 --- a/arch/m32r/platforms/mappi2/setup.c +++ b/arch/m32r/platforms/mappi2/setup.c @@ -72,7 +72,7 @@ static void shutdown_mappi2_irq(unsigned int irq) static struct irq_chip mappi2_irq_type = { - .typename = "MAPPI2-IRQ", + .name = "MAPPI2-IRQ", .startup = startup_mappi2_irq, .shutdown = shutdown_mappi2_irq, .enable = enable_mappi2_irq, diff --git a/arch/m32r/platforms/mappi3/setup.c b/arch/m32r/platforms/mappi3/setup.c index 9c337aeac94..882de25c6e8 100644 --- a/arch/m32r/platforms/mappi3/setup.c +++ b/arch/m32r/platforms/mappi3/setup.c @@ -72,7 +72,7 @@ static void shutdown_mappi3_irq(unsigned int irq) static struct irq_chip mappi3_irq_type = { - .typename = "MAPPI3-IRQ", + .name = "MAPPI3-IRQ", .startup = startup_mappi3_irq, .shutdown = shutdown_mappi3_irq, .enable = enable_mappi3_irq, diff --git a/arch/m32r/platforms/oaks32r/setup.c b/arch/m32r/platforms/oaks32r/setup.c index ed865741c38..d11d93bf74f 100644 --- a/arch/m32r/platforms/oaks32r/setup.c +++ b/arch/m32r/platforms/oaks32r/setup.c @@ -63,7 +63,7 @@ static void shutdown_oaks32r_irq(unsigned int irq) static struct irq_chip oaks32r_irq_type = { - .typename = "OAKS32R-IRQ", + .name = "OAKS32R-IRQ", .startup = startup_oaks32r_irq, .shutdown = shutdown_oaks32r_irq, .enable = enable_oaks32r_irq, diff --git a/arch/m32r/platforms/opsput/setup.c b/arch/m32r/platforms/opsput/setup.c index 80d68065701..5f3402a2fba 100644 --- a/arch/m32r/platforms/opsput/setup.c +++ b/arch/m32r/platforms/opsput/setup.c @@ -72,7 +72,7 @@ static void shutdown_opsput_irq(unsigned int irq) static struct irq_chip opsput_irq_type = { - .typename = "OPSPUT-IRQ", + .name = "OPSPUT-IRQ", .startup = startup_opsput_irq, .shutdown = shutdown_opsput_irq, .enable = enable_opsput_irq, @@ -149,7 +149,7 @@ static void shutdown_opsput_pld_irq(unsigned int irq) static struct irq_chip opsput_pld_irq_type = { - .typename = "OPSPUT-PLD-IRQ", + .name = "OPSPUT-PLD-IRQ", .startup = startup_opsput_pld_irq, .shutdown = shutdown_opsput_pld_irq, .enable = enable_opsput_pld_irq, @@ -218,7 +218,7 @@ static void shutdown_opsput_lanpld_irq(unsigned int irq) static struct irq_chip opsput_lanpld_irq_type = { - .typename = "OPSPUT-PLD-LAN-IRQ", + .name = "OPSPUT-PLD-LAN-IRQ", .startup = startup_opsput_lanpld_irq, .shutdown = shutdown_opsput_lanpld_irq, .enable = enable_opsput_lanpld_irq, diff --git a/arch/m32r/platforms/usrv/setup.c b/arch/m32r/platforms/usrv/setup.c index 757302660af..1beac7a51ed 100644 --- a/arch/m32r/platforms/usrv/setup.c +++ b/arch/m32r/platforms/usrv/setup.c @@ -63,7 +63,7 @@ static void shutdown_mappi_irq(unsigned int irq) static struct irq_chip mappi_irq_type = { - .typename = "M32700-IRQ", + .name = "M32700-IRQ", .startup = startup_mappi_irq, .shutdown = shutdown_mappi_irq, .enable = enable_mappi_irq, @@ -136,7 +136,7 @@ static void shutdown_m32700ut_pld_irq(unsigned int irq) static struct irq_chip m32700ut_pld_irq_type = { - .typename = "USRV-PLD-IRQ", + .name = "USRV-PLD-IRQ", .startup = startup_m32700ut_pld_irq, .shutdown = shutdown_m32700ut_pld_irq, .enable = enable_m32700ut_pld_irq, diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c index 97085530aa6..e3e379c6caa 100644 --- a/arch/powerpc/platforms/cell/axon_msi.c +++ b/arch/powerpc/platforms/cell/axon_msi.c @@ -310,9 +310,9 @@ static void axon_msi_teardown_msi_irqs(struct pci_dev *dev) } static struct irq_chip msic_irq_chip = { - .mask = mask_msi_irq, - .unmask = unmask_msi_irq, - .shutdown = unmask_msi_irq, + .irq_mask = mask_msi_irq, + .irq_unmask = unmask_msi_irq, + .irq_shutdown = mask_msi_irq, .name = "AXON-MSI", }; diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index 93834b0d827..67e2c4bdac8 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c @@ -243,7 +243,7 @@ static unsigned int xics_startup(unsigned int virq) * at that level, so we do it here by hand. */ if (irq_to_desc(virq)->msi_desc) - unmask_msi_irq(virq); + unmask_msi_irq(irq_get_irq_data(virq)); /* unmask it */ xics_unmask_irq(virq); diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index 87991d3abba..bdbd896c89d 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c @@ -51,8 +51,8 @@ static void fsl_msi_end_irq(unsigned int virq) } static struct irq_chip fsl_msi_chip = { - .mask = mask_msi_irq, - .unmask = unmask_msi_irq, + .irq_mask = mask_msi_irq, + .irq_unmask = unmask_msi_irq, .ack = fsl_msi_end_irq, .name = "FSL-MSI", }; diff --git a/arch/powerpc/sysdev/mpic_pasemi_msi.c b/arch/powerpc/sysdev/mpic_pasemi_msi.c index 3b6a9a43718..320ad5a9a25 100644 --- a/arch/powerpc/sysdev/mpic_pasemi_msi.c +++ b/arch/powerpc/sysdev/mpic_pasemi_msi.c @@ -39,24 +39,24 @@ static struct mpic *msi_mpic; -static void mpic_pasemi_msi_mask_irq(unsigned int irq) +static void mpic_pasemi_msi_mask_irq(struct irq_data *data) { - pr_debug("mpic_pasemi_msi_mask_irq %d\n", irq); - mask_msi_irq(irq); - mpic_mask_irq(irq); + pr_debug("mpic_pasemi_msi_mask_irq %d\n", data->irq); + mask_msi_irq(data); + mpic_mask_irq(data->irq); } -static void mpic_pasemi_msi_unmask_irq(unsigned int irq) +static void mpic_pasemi_msi_unmask_irq(struct irq_data *data) { - pr_debug("mpic_pasemi_msi_unmask_irq %d\n", irq); - mpic_unmask_irq(irq); - unmask_msi_irq(irq); + pr_debug("mpic_pasemi_msi_unmask_irq %d\n", data->irq); + mpic_unmask_irq(data->irq); + unmask_msi_irq(data); } static struct irq_chip mpic_pasemi_msi_chip = { - .shutdown = mpic_pasemi_msi_mask_irq, - .mask = mpic_pasemi_msi_mask_irq, - .unmask = mpic_pasemi_msi_unmask_irq, + .irq_shutdown = mpic_pasemi_msi_mask_irq, + .irq_mask = mpic_pasemi_msi_mask_irq, + .irq_unmask = mpic_pasemi_msi_unmask_irq, .eoi = mpic_end_irq, .set_type = mpic_set_irq_type, .set_affinity = mpic_set_affinity, diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c index bcbfe79c704..a2b028b4a20 100644 --- a/arch/powerpc/sysdev/mpic_u3msi.c +++ b/arch/powerpc/sysdev/mpic_u3msi.c @@ -23,22 +23,22 @@ /* A bit ugly, can we get this from the pci_dev somehow? */ static struct mpic *msi_mpic; -static void mpic_u3msi_mask_irq(unsigned int irq) +static void mpic_u3msi_mask_irq(struct irq_data *data) { - mask_msi_irq(irq); - mpic_mask_irq(irq); + mask_msi_irq(data); + mpic_mask_irq(data->irq); } -static void mpic_u3msi_unmask_irq(unsigned int irq) +static void mpic_u3msi_unmask_irq(struct irq_data *data) { - mpic_unmask_irq(irq); - unmask_msi_irq(irq); + mpic_unmask_irq(data->irq); + unmask_msi_irq(data); } static struct irq_chip mpic_u3msi_chip = { - .shutdown = mpic_u3msi_mask_irq, - .mask = mpic_u3msi_mask_irq, - .unmask = mpic_u3msi_unmask_irq, + .irq_shutdown = mpic_u3msi_mask_irq, + .irq_mask = mpic_u3msi_mask_irq, + .irq_unmask = mpic_u3msi_unmask_irq, .eoi = mpic_end_irq, .set_type = mpic_set_irq_type, .set_affinity = mpic_set_affinity, diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index 257de1f0692..ae5bac39b89 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -290,7 +290,7 @@ void __init init_IRQ(void) int __init arch_probe_nr_irqs(void) { nr_irqs = sh_mv.mv_nr_irqs; - return 0; + return NR_IRQS_LEGACY; } #endif diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c index 548b8ca9c21..b210416ace7 100644 --- a/arch/sparc/kernel/pci_msi.c +++ b/arch/sparc/kernel/pci_msi.c @@ -114,10 +114,10 @@ static void free_msi(struct pci_pbm_info *pbm, int msi_num) static struct irq_chip msi_irq = { .name = "PCI-MSI", - .mask = mask_msi_irq, - .unmask = unmask_msi_irq, - .enable = unmask_msi_irq, - .disable = mask_msi_irq, + .irq_mask = mask_msi_irq, + .irq_unmask = unmask_msi_irq, + .irq_enable = unmask_msi_irq, + .irq_disable = mask_msi_irq, /* XXX affinity XXX */ }; diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c index 596c6008693..9a27d563fc3 100644 --- a/arch/tile/kernel/irq.c +++ b/arch/tile/kernel/irq.c @@ -208,7 +208,7 @@ static void tile_irq_chip_eoi(unsigned int irq) } static struct irq_chip tile_irq_chip = { - .typename = "tile_irq_chip", + .name = "tile_irq_chip", .ack = tile_irq_chip_ack, .eoi = tile_irq_chip_eoi, .mask = tile_irq_chip_mask, @@ -288,7 +288,7 @@ int show_interrupts(struct seq_file *p, void *v) for_each_online_cpu(j) seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); #endif - seq_printf(p, " %14s", irq_desc[i].chip->typename); + seq_printf(p, " %14s", irq_desc[i].chip->name); seq_printf(p, " %s", action->name); for (action = action->next; action; action = action->next) diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c index a3f0b04d710..a746e3037a5 100644 --- a/arch/um/kernel/irq.c +++ b/arch/um/kernel/irq.c @@ -46,7 +46,7 @@ int show_interrupts(struct seq_file *p, void *v) for_each_online_cpu(j) seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); #endif - seq_printf(p, " %14s", irq_desc[i].chip->typename); + seq_printf(p, " %14s", irq_desc[i].chip->name); seq_printf(p, " %s", action->name); for (action=action->next; action; action = action->next) @@ -369,7 +369,7 @@ static void dummy(unsigned int irq) /* This is used for everything else than the timer. */ static struct irq_chip normal_irq_type = { - .typename = "SIGIO", + .name = "SIGIO", .release = free_irq_by_irq_and_dev, .disable = dummy, .enable = dummy, @@ -378,7 +378,7 @@ static struct irq_chip normal_irq_type = { }; static struct irq_chip SIGVTALRM_irq_type = { - .typename = "SIGVTALRM", + .name = "SIGVTALRM", .release = free_irq_by_irq_and_dev, .shutdown = dummy, /* never called */ .disable = dummy, diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index cea0cd9a316..8cc510874e1 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -59,6 +59,10 @@ config X86 select ANON_INODES select HAVE_ARCH_KMEMCHECK select HAVE_USER_RETURN_NOTIFIER + select HAVE_GENERIC_HARDIRQS + select HAVE_SPARSE_IRQ + select GENERIC_IRQ_PROBE + select GENERIC_PENDING_IRQ if SMP config INSTRUCTION_DECODER def_bool (KPROBES || PERF_EVENTS) @@ -200,20 +204,6 @@ config HAVE_INTEL_TXT def_bool y depends on EXPERIMENTAL && DMAR && ACPI -# Use the generic interrupt handling code in kernel/irq/: -config GENERIC_HARDIRQS - def_bool y - -config GENERIC_HARDIRQS_NO__DO_IRQ - def_bool y - -config GENERIC_IRQ_PROBE - def_bool y - -config GENERIC_PENDING_IRQ - def_bool y - depends on GENERIC_HARDIRQS && SMP - config USE_GENERIC_SMP_HELPERS def_bool y depends on SMP @@ -296,23 +286,6 @@ config X86_X2APIC If you don't know what to do here, say N. -config SPARSE_IRQ - bool "Support sparse irq numbering" - depends on PCI_MSI || HT_IRQ - ---help--- - This enables support for sparse irqs. This is useful for distro - kernels that want to define a high CONFIG_NR_CPUS value but still - want to have low kernel memory footprint on smaller machines. - - ( Sparse IRQs can also be beneficial on NUMA boxes, as they spread - out the irq_desc[] array in a more NUMA-friendly way. ) - - If you don't know what to do here, say N. - -config NUMA_IRQ_DESC - def_bool y - depends on SPARSE_IRQ && NUMA - config X86_MPPARSE bool "Enable MPS table" if ACPI default y diff --git a/arch/x86/include/asm/apb_timer.h b/arch/x86/include/asm/apb_timer.h index a69b1ac9eaf..2fefa501d3b 100644 --- a/arch/x86/include/asm/apb_timer.h +++ b/arch/x86/include/asm/apb_timer.h @@ -54,7 +54,6 @@ extern struct clock_event_device *global_clock_event; extern unsigned long apbt_quick_calibrate(void); extern int arch_setup_apbt_irqs(int irq, int trigger, int mask, int cpu); extern void apbt_setup_secondary_clock(void); -extern unsigned int boot_cpu_id; extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint); extern void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr); diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h index b185091bf19..4fab24de26b 100644 --- a/arch/x86/include/asm/cpu.h +++ b/arch/x86/include/asm/cpu.h @@ -32,6 +32,5 @@ extern void arch_unregister_cpu(int); DECLARE_PER_CPU(int, cpu_state); -extern unsigned int boot_cpu_id; #endif /* _ASM_X86_CPU_H */ diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h index 1d5c08a1bdf..2c392d663dc 100644 --- a/arch/x86/include/asm/hpet.h +++ b/arch/x86/include/asm/hpet.h @@ -74,10 +74,12 @@ extern void hpet_disable(void); extern unsigned int hpet_readl(unsigned int a); extern void force_hpet_resume(void); -extern void hpet_msi_unmask(unsigned int irq); -extern void hpet_msi_mask(unsigned int irq); -extern void hpet_msi_write(unsigned int irq, struct msi_msg *msg); -extern void hpet_msi_read(unsigned int irq, struct msi_msg *msg); +struct irq_data; +extern void hpet_msi_unmask(struct irq_data *data); +extern void hpet_msi_mask(struct irq_data *data); +struct hpet_dev; +extern void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg); +extern void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg); #ifdef CONFIG_PCI_MSI extern int arch_setup_hpet_msi(unsigned int irq, unsigned int id); diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 46c0fe05f23..d5905fd8ba4 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h @@ -78,6 +78,13 @@ static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr, irq_attr->polarity = polarity; } +struct irq_2_iommu { + struct intel_iommu *iommu; + u16 irte_index; + u16 sub_handle; + u8 irte_mask; +}; + /* * This is performance-critical, we want to do it O(1) * @@ -89,15 +96,17 @@ struct irq_cfg { cpumask_var_t old_domain; u8 vector; u8 move_in_progress : 1; +#ifdef CONFIG_INTR_REMAP + struct irq_2_iommu irq_2_iommu; +#endif }; -extern struct irq_cfg *irq_cfg(unsigned int); extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *); extern void send_cleanup_vector(struct irq_cfg *); -struct irq_desc; -extern unsigned int set_desc_affinity(struct irq_desc *, const struct cpumask *, - unsigned int *dest_id); +struct irq_data; +int __ioapic_set_affinity(struct irq_data *, const struct cpumask *, + unsigned int *dest_id); extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, struct io_apic_irq_attr *irq_attr); extern void setup_ioapic_dest(void); diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h index 1655147646a..a20365953bf 100644 --- a/arch/x86/include/asm/i8259.h +++ b/arch/x86/include/asm/i8259.h @@ -55,6 +55,8 @@ extern struct irq_chip i8259A_chip; struct legacy_pic { int nr_legacy_irqs; struct irq_chip *chip; + void (*mask)(unsigned int irq); + void (*unmask)(unsigned int irq); void (*mask_all)(void); void (*restore_mask)(void); void (*init)(int auto_eoi); diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h index 9cb2edb87c2..c8be4566c3d 100644 --- a/arch/x86/include/asm/io_apic.h +++ b/arch/x86/include/asm/io_apic.h @@ -170,12 +170,6 @@ extern int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); extern void probe_nr_irqs_gsi(void); -extern int setup_ioapic_entry(int apic, int irq, - struct IO_APIC_route_entry *entry, - unsigned int destination, int trigger, - int polarity, int vector, int pin); -extern void ioapic_write_entry(int apic, int pin, - struct IO_APIC_route_entry e); extern void setup_ioapic_ids_from_mpc(void); struct mp_ioapic_gsi{ diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h index f275e224450..1c23360fb2d 100644 --- a/arch/x86/include/asm/irq_remapping.h +++ b/arch/x86/include/asm/irq_remapping.h @@ -3,4 +3,39 @@ #define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8) +#ifdef CONFIG_INTR_REMAP +static inline void prepare_irte(struct irte *irte, int vector, + unsigned int dest) +{ + memset(irte, 0, sizeof(*irte)); + + irte->present = 1; + irte->dst_mode = apic->irq_dest_mode; + /* + * Trigger mode in the IRTE will always be edge, and for IO-APIC, the + * actual level or edge trigger will be setup in the IO-APIC + * RTE. This will help simplify level triggered irq migration. + * For more details, see the comments (in io_apic.c) explainig IO-APIC + * irq migration in the presence of interrupt-remapping. + */ + irte->trigger_mode = 0; + irte->dlvry_mode = apic->irq_delivery_mode; + irte->vector = vector; + irte->dest_id = IRTE_DEST(dest); + irte->redir_hint = 1; +} +static inline bool irq_remapped(struct irq_cfg *cfg) +{ + return cfg->irq_2_iommu.iommu != NULL; +} +#else +static void prepare_irte(struct irte *irte, int vector, unsigned int dest) +{ +} +static inline bool irq_remapped(struct irq_cfg *cfg) +{ + return false; +} +#endif + #endif /* _ASM_X86_IRQ_REMAPPING_H */ diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c index 8dd77800ff5..42a70a2accc 100644 --- a/arch/x86/kernel/apb_timer.c +++ b/arch/x86/kernel/apb_timer.c @@ -231,34 +231,6 @@ static void apbt_restart_clocksource(struct clocksource *cs) apbt_start_counter(phy_cs_timer_id); } -/* Setup IRQ routing via IOAPIC */ -#ifdef CONFIG_SMP -static void apbt_setup_irq(struct apbt_dev *adev) -{ - struct irq_chip *chip; - struct irq_desc *desc; - - /* timer0 irq has been setup early */ - if (adev->irq == 0) - return; - desc = irq_to_desc(adev->irq); - chip = get_irq_chip(adev->irq); - disable_irq(adev->irq); - desc->status |= IRQ_MOVE_PCNTXT; - irq_set_affinity(adev->irq, cpumask_of(adev->cpu)); - /* APB timer irqs are set up as mp_irqs, timer is edge triggerred */ - set_irq_chip_and_handler_name(adev->irq, chip, handle_edge_irq, "edge"); - enable_irq(adev->irq); - if (system_state == SYSTEM_BOOTING) - if (request_irq(adev->irq, apbt_interrupt_handler, - IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING, - adev->name, adev)) { - printk(KERN_ERR "Failed request IRQ for APBT%d\n", - adev->num); - } -} -#endif - static void apbt_enable_int(int n) { unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL); @@ -334,6 +306,27 @@ static int __init apbt_clockevent_register(void) } #ifdef CONFIG_SMP + +static void apbt_setup_irq(struct apbt_dev *adev) +{ + /* timer0 irq has been setup early */ + if (adev->irq == 0) + return; + + if (system_state == SYSTEM_BOOTING) { + irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT); + /* APB timer irqs are set up as mp_irqs, timer is edge type */ + __set_irq_handler(adev->irq, handle_edge_irq, 0, "edge"); + if (request_irq(adev->irq, apbt_interrupt_handler, + IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING, + adev->name, adev)) { + printk(KERN_ERR "Failed request IRQ for APBT%d\n", + adev->num); + } + } else + enable_irq(adev->irq); +} + /* Should be called with per cpu */ void apbt_setup_secondary_clock(void) { @@ -343,7 +336,7 @@ void apbt_setup_secondary_clock(void) /* Don't register boot CPU clockevent */ cpu = smp_processor_id(); - if (cpu == boot_cpu_id) + if (!cpu) return; /* * We need to calculate the scaled math multiplication factor for @@ -389,10 +382,11 @@ static int apbt_cpuhp_notify(struct notifier_block *n, switch (action & 0xf) { case CPU_DEAD: + disable_irq(adev->irq); apbt_disable_int(cpu); - if (system_state == SYSTEM_RUNNING) + if (system_state == SYSTEM_RUNNING) { pr_debug("skipping APBT CPU %lu offline\n", cpu); - else if (adev) { + } else if (adev) { pr_debug("APBT clockevent for cpu %lu offline\n", cpu); free_irq(adev->irq, adev); } diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index e3b534cda49..8cf86fb3b4e 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1665,10 +1665,7 @@ int __init APIC_init_uniprocessor(void) } #endif -#ifndef CONFIG_SMP - enable_IR_x2apic(); default_setup_apic_routing(); -#endif verify_local_APIC(); connect_bsp_APIC(); diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 5c5b8f3dddb..8ae808d110f 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -131,13 +131,9 @@ struct irq_pin_list { struct irq_pin_list *next; }; -static struct irq_pin_list *get_one_free_irq_2_pin(int node) +static struct irq_pin_list *alloc_irq_pin_list(int node) { - struct irq_pin_list *pin; - - pin = kzalloc_node(sizeof(*pin), GFP_ATOMIC, node); - - return pin; + return kzalloc_node(sizeof(struct irq_pin_list), GFP_KERNEL, node); } /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ @@ -150,10 +146,7 @@ static struct irq_cfg irq_cfgx[NR_IRQS]; int __init arch_early_irq_init(void) { struct irq_cfg *cfg; - struct irq_desc *desc; - int count; - int node; - int i; + int count, node, i; if (!legacy_pic->nr_legacy_irqs) { nr_irqs_gsi = 0; @@ -162,13 +155,15 @@ int __init arch_early_irq_init(void) cfg = irq_cfgx; count = ARRAY_SIZE(irq_cfgx); - node= cpu_to_node(boot_cpu_id); + node = cpu_to_node(0); + + /* Make sure the legacy interrupts are marked in the bitmap */ + irq_reserve_irqs(0, legacy_pic->nr_legacy_irqs); for (i = 0; i < count; i++) { - desc = irq_to_desc(i); - desc->chip_data = &cfg[i]; - zalloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node); - zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node); + set_irq_chip_data(i, &cfg[i]); + zalloc_cpumask_var_node(&cfg[i].domain, GFP_KERNEL, node); + zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_KERNEL, node); /* * For legacy IRQ's, start with assigning irq0 to irq15 to * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0. @@ -183,170 +178,88 @@ int __init arch_early_irq_init(void) } #ifdef CONFIG_SPARSE_IRQ -struct irq_cfg *irq_cfg(unsigned int irq) +static struct irq_cfg *irq_cfg(unsigned int irq) { - struct irq_cfg *cfg = NULL; - struct irq_desc *desc; - - desc = irq_to_desc(irq); - if (desc) - cfg = desc->chip_data; - - return cfg; + return get_irq_chip_data(irq); } -static struct irq_cfg *get_one_free_irq_cfg(int node) +static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) { struct irq_cfg *cfg; - cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); - if (cfg) { - if (!zalloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) { - kfree(cfg); - cfg = NULL; - } else if (!zalloc_cpumask_var_node(&cfg->old_domain, - GFP_ATOMIC, node)) { - free_cpumask_var(cfg->domain); - kfree(cfg); - cfg = NULL; - } - } - + cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node); + if (!cfg) + return NULL; + if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node)) + goto out_cfg; + if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node)) + goto out_domain; return cfg; +out_domain: + free_cpumask_var(cfg->domain); +out_cfg: + kfree(cfg); + return NULL; } -int arch_init_chip_data(struct irq_desc *desc, int node) -{ - struct irq_cfg *cfg; - - cfg = desc->chip_data; - if (!cfg) { - desc->chip_data = get_one_free_irq_cfg(node); - if (!desc->chip_data) { - printk(KERN_ERR "can not alloc irq_cfg\n"); - BUG_ON(1); - } - } - - return 0; -} - -/* for move_irq_desc */ -static void -init_copy_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg, int node) +static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) { - struct irq_pin_list *old_entry, *head, *tail, *entry; - - cfg->irq_2_pin = NULL; - old_entry = old_cfg->irq_2_pin; - if (!old_entry) - return; - - entry = get_one_free_irq_2_pin(node); - if (!entry) + if (!cfg) return; + set_irq_chip_data(at, NULL); + free_cpumask_var(cfg->domain); + free_cpumask_var(cfg->old_domain); + kfree(cfg); +} - entry->apic = old_entry->apic; - entry->pin = old_entry->pin; - head = entry; - tail = entry; - old_entry = old_entry->next; - while (old_entry) { - entry = get_one_free_irq_2_pin(node); - if (!entry) { - entry = head; - while (entry) { - head = entry->next; - kfree(entry); - entry = head; - } - /* still use the old one */ - return; - } - entry->apic = old_entry->apic; - entry->pin = old_entry->pin; - tail->next = entry; - tail = entry; - old_entry = old_entry->next; - } +#else - tail->next = NULL; - cfg->irq_2_pin = head; +struct irq_cfg *irq_cfg(unsigned int irq) +{ + return irq < nr_irqs ? irq_cfgx + irq : NULL; } -static void free_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg) +static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) { - struct irq_pin_list *entry, *next; - - if (old_cfg->irq_2_pin == cfg->irq_2_pin) - return; + return irq_cfgx + irq; +} - entry = old_cfg->irq_2_pin; +static inline void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) { } - while (entry) { - next = entry->next; - kfree(entry); - entry = next; - } - old_cfg->irq_2_pin = NULL; -} +#endif -void arch_init_copy_chip_data(struct irq_desc *old_desc, - struct irq_desc *desc, int node) +static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node) { + int res = irq_alloc_desc_at(at, node); struct irq_cfg *cfg; - struct irq_cfg *old_cfg; - - cfg = get_one_free_irq_cfg(node); - if (!cfg) - return; - - desc->chip_data = cfg; - - old_cfg = old_desc->chip_data; - - cfg->vector = old_cfg->vector; - cfg->move_in_progress = old_cfg->move_in_progress; - cpumask_copy(cfg->domain, old_cfg->domain); - cpumask_copy(cfg->old_domain, old_cfg->old_domain); - - init_copy_irq_2_pin(old_cfg, cfg, node); -} + if (res < 0) { + if (res != -EEXIST) + return NULL; + cfg = get_irq_chip_data(at); + if (cfg) + return cfg; + } -static void free_irq_cfg(struct irq_cfg *cfg) -{ - free_cpumask_var(cfg->domain); - free_cpumask_var(cfg->old_domain); - kfree(cfg); + cfg = alloc_irq_cfg(at, node); + if (cfg) + set_irq_chip_data(at, cfg); + else + irq_free_desc(at); + return cfg; } -void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc) +static int alloc_irq_from(unsigned int from, int node) { - struct irq_cfg *old_cfg, *cfg; - - old_cfg = old_desc->chip_data; - cfg = desc->chip_data; - - if (old_cfg == cfg) - return; - - if (old_cfg) { - free_irq_2_pin(old_cfg, cfg); - free_irq_cfg(old_cfg); - old_desc->chip_data = NULL; - } + return irq_alloc_desc_from(from, node); } -/* end for move_irq_desc */ -#else -struct irq_cfg *irq_cfg(unsigned int irq) +static void free_irq_at(unsigned int at, struct irq_cfg *cfg) { - return irq < nr_irqs ? irq_cfgx + irq : NULL; + free_irq_cfg(at, cfg); + irq_free_desc(at); } -#endif - struct io_apic { unsigned int index; unsigned int unused[3]; @@ -451,7 +364,7 @@ __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) io_apic_write(apic, 0x10 + 2*pin, eu.w1); } -void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) +static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) { unsigned long flags; raw_spin_lock_irqsave(&ioapic_lock, flags); @@ -481,7 +394,7 @@ static void ioapic_mask_entry(int apic, int pin) * fast in the common case, and fast for shared ISA-space IRQs. */ static int -add_pin_to_irq_node_nopanic(struct irq_cfg *cfg, int node, int apic, int pin) +__add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) { struct irq_pin_list **last, *entry; @@ -493,7 +406,7 @@ add_pin_to_irq_node_nopanic(struct irq_cfg *cfg, int node, int apic, int pin) last = &entry->next; } - entry = get_one_free_irq_2_pin(node); + entry = alloc_irq_pin_list(node); if (!entry) { printk(KERN_ERR "can not alloc irq_pin_list (%d,%d,%d)\n", node, apic, pin); @@ -508,7 +421,7 @@ add_pin_to_irq_node_nopanic(struct irq_cfg *cfg, int node, int apic, int pin) static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) { - if (add_pin_to_irq_node_nopanic(cfg, node, apic, pin)) + if (__add_pin_to_irq_node(cfg, node, apic, pin)) panic("IO-APIC: failed to add irq-pin. Can not proceed\n"); } @@ -571,11 +484,6 @@ static void __unmask_and_level_IO_APIC_irq(struct irq_pin_list *entry) IO_APIC_REDIR_LEVEL_TRIGGER, NULL); } -static void __unmask_IO_APIC_irq(struct irq_cfg *cfg) -{ - io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL); -} - static void io_apic_sync(struct irq_pin_list *entry) { /* @@ -587,44 +495,37 @@ static void io_apic_sync(struct irq_pin_list *entry) readl(&io_apic->data); } -static void __mask_IO_APIC_irq(struct irq_cfg *cfg) +static void mask_ioapic(struct irq_cfg *cfg) { + unsigned long flags; + + raw_spin_lock_irqsave(&ioapic_lock, flags); io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); + raw_spin_unlock_irqrestore(&ioapic_lock, flags); } -static void mask_IO_APIC_irq_desc(struct irq_desc *desc) +static void mask_ioapic_irq(struct irq_data *data) { - struct irq_cfg *cfg = desc->chip_data; - unsigned long flags; - - BUG_ON(!cfg); + mask_ioapic(data->chip_data); +} - raw_spin_lock_irqsave(&ioapic_lock, flags); - __mask_IO_APIC_irq(cfg); - raw_spin_unlock_irqrestore(&ioapic_lock, flags); +static void __unmask_ioapic(struct irq_cfg *cfg) +{ + io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL); } -static void unmask_IO_APIC_irq_desc(struct irq_desc *desc) +static void unmask_ioapic(struct irq_cfg *cfg) { - struct irq_cfg *cfg = desc->chip_data; unsigned long flags; raw_spin_lock_irqsave(&ioapic_lock, flags); - __unmask_IO_APIC_irq(cfg); + __unmask_ioapic(cfg); raw_spin_unlock_irqrestore(&ioapic_lock, flags); } -static void mask_IO_APIC_irq(unsigned int irq) +static void unmask_ioapic_irq(struct irq_data *data) { - struct irq_desc *desc = irq_to_desc(irq); - - mask_IO_APIC_irq_desc(desc); -} -static void unmask_IO_APIC_irq(unsigned int irq) -{ - struct irq_desc *desc = irq_to_desc(irq); - - unmask_IO_APIC_irq_desc(desc); + unmask_ioapic(data->chip_data); } static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) @@ -694,14 +595,14 @@ struct IO_APIC_route_entry **alloc_ioapic_entries(void) struct IO_APIC_route_entry **ioapic_entries; ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics, - GFP_ATOMIC); + GFP_KERNEL); if (!ioapic_entries) return 0; for (apic = 0; apic < nr_ioapics; apic++) { ioapic_entries[apic] = kzalloc(sizeof(struct IO_APIC_route_entry) * - nr_ioapic_registers[apic], GFP_ATOMIC); + nr_ioapic_registers[apic], GFP_KERNEL); if (!ioapic_entries[apic]) goto nomem; } @@ -1259,7 +1160,6 @@ void __setup_vector_irq(int cpu) /* Initialize vector_irq on a new cpu */ int irq, vector; struct irq_cfg *cfg; - struct irq_desc *desc; /* * vector_lock will make sure that we don't run into irq vector @@ -1268,9 +1168,10 @@ void __setup_vector_irq(int cpu) */ raw_spin_lock(&vector_lock); /* Mark the inuse vectors */ - for_each_irq_desc(irq, desc) { - cfg = desc->chip_data; - + for_each_active_irq(irq) { + cfg = get_irq_chip_data(irq); + if (!cfg) + continue; /* * If it is a legacy IRQ handled by the legacy PIC, this cpu * will be part of the irq_cfg's domain. @@ -1327,17 +1228,17 @@ static inline int IO_APIC_irq_trigger(int irq) } #endif -static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long trigger) +static void ioapic_register_intr(unsigned int irq, unsigned long trigger) { if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || trigger == IOAPIC_LEVEL) - desc->status |= IRQ_LEVEL; + irq_set_status_flags(irq, IRQ_LEVEL); else - desc->status &= ~IRQ_LEVEL; + irq_clear_status_flags(irq, IRQ_LEVEL); - if (irq_remapped(irq)) { - desc->status |= IRQ_MOVE_PCNTXT; + if (irq_remapped(get_irq_chip_data(irq))) { + irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); if (trigger) set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, handle_fasteoi_irq, @@ -1358,10 +1259,10 @@ static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long t handle_edge_irq, "edge"); } -int setup_ioapic_entry(int apic_id, int irq, - struct IO_APIC_route_entry *entry, - unsigned int destination, int trigger, - int polarity, int vector, int pin) +static int setup_ioapic_entry(int apic_id, int irq, + struct IO_APIC_route_entry *entry, + unsigned int destination, int trigger, + int polarity, int vector, int pin) { /* * add it to the IO-APIC irq-routing table: @@ -1382,21 +1283,7 @@ int setup_ioapic_entry(int apic_id, int irq, if (index < 0) panic("Failed to allocate IRTE for ioapic %d\n", apic_id); - memset(&irte, 0, sizeof(irte)); - - irte.present = 1; - irte.dst_mode = apic->irq_dest_mode; - /* - * Trigger mode in the IRTE will always be edge, and the - * actual level or edge trigger will be setup in the IO-APIC - * RTE. This will help simplify level triggered irq migration. - * For more details, see the comments above explainig IO-APIC - * irq migration in the presence of interrupt-remapping. - */ - irte.trigger_mode = 0; - irte.dlvry_mode = apic->irq_delivery_mode; - irte.vector = vector; - irte.dest_id = IRTE_DEST(destination); + prepare_irte(&irte, vector, destination); /* Set source-id of interrupt request */ set_ioapic_sid(&irte, apic_id); @@ -1431,18 +1318,14 @@ int setup_ioapic_entry(int apic_id, int irq, return 0; } -static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq_desc *desc, - int trigger, int polarity) +static void setup_ioapic_irq(int apic_id, int pin, unsigned int irq, + struct irq_cfg *cfg, int trigger, int polarity) { - struct irq_cfg *cfg; struct IO_APIC_route_entry entry; unsigned int dest; if (!IO_APIC_IRQ(irq)) return; - - cfg = desc->chip_data; - /* * For legacy irqs, cfg->domain starts with cpu 0 for legacy * controllers like 8259. Now that IO-APIC can handle this irq, update @@ -1471,9 +1354,9 @@ static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq return; } - ioapic_register_intr(irq, desc, trigger); + ioapic_register_intr(irq, trigger); if (irq < legacy_pic->nr_legacy_irqs) - legacy_pic->chip->mask(irq); + legacy_pic->mask(irq); ioapic_write_entry(apic_id, pin, entry); } @@ -1484,11 +1367,9 @@ static struct { static void __init setup_IO_APIC_irqs(void) { - int apic_id, pin, idx, irq; - int notcon = 0; - struct irq_desc *desc; + int apic_id, pin, idx, irq, notcon = 0; + int node = cpu_to_node(0); struct irq_cfg *cfg; - int node = cpu_to_node(boot_cpu_id); apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); @@ -1525,19 +1406,17 @@ static void __init setup_IO_APIC_irqs(void) apic->multi_timer_check(apic_id, irq)) continue; - desc = irq_to_desc_alloc_node(irq, node); - if (!desc) { - printk(KERN_INFO "can not get irq_desc for %d\n", irq); + cfg = alloc_irq_and_cfg_at(irq, node); + if (!cfg) continue; - } - cfg = desc->chip_data; + add_pin_to_irq_node(cfg, node, apic_id, pin); /* * don't mark it in pin_programmed, so later acpi could * set it correctly when irq < 16 */ - setup_IO_APIC_irq(apic_id, pin, irq, desc, - irq_trigger(idx), irq_polarity(idx)); + setup_ioapic_irq(apic_id, pin, irq, cfg, irq_trigger(idx), + irq_polarity(idx)); } if (notcon) @@ -1552,9 +1431,7 @@ static void __init setup_IO_APIC_irqs(void) */ void setup_IO_APIC_irq_extra(u32 gsi) { - int apic_id = 0, pin, idx, irq; - int node = cpu_to_node(boot_cpu_id); - struct irq_desc *desc; + int apic_id = 0, pin, idx, irq, node = cpu_to_node(0); struct irq_cfg *cfg; /* @@ -1570,18 +1447,15 @@ void setup_IO_APIC_irq_extra(u32 gsi) return; irq = pin_2_irq(idx, apic_id, pin); -#ifdef CONFIG_SPARSE_IRQ - desc = irq_to_desc(irq); - if (desc) + + /* Only handle the non legacy irqs on secondary ioapics */ + if (apic_id == 0 || irq < NR_IRQS_LEGACY) return; -#endif - desc = irq_to_desc_alloc_node(irq, node); - if (!desc) { - printk(KERN_INFO "can not get irq_desc for %d\n", irq); + + cfg = alloc_irq_and_cfg_at(irq, node); + if (!cfg) return; - } - cfg = desc->chip_data; add_pin_to_irq_node(cfg, node, apic_id, pin); if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) { @@ -1591,7 +1465,7 @@ void setup_IO_APIC_irq_extra(u32 gsi) } set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed); - setup_IO_APIC_irq(apic_id, pin, irq, desc, + setup_ioapic_irq(apic_id, pin, irq, cfg, irq_trigger(idx), irq_polarity(idx)); } @@ -1642,7 +1516,6 @@ __apicdebuginit(void) print_IO_APIC(void) union IO_APIC_reg_03 reg_03; unsigned long flags; struct irq_cfg *cfg; - struct irq_desc *desc; unsigned int irq; printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); @@ -1729,10 +1602,10 @@ __apicdebuginit(void) print_IO_APIC(void) } } printk(KERN_DEBUG "IRQ to pin mappings:\n"); - for_each_irq_desc(irq, desc) { + for_each_active_irq(irq) { struct irq_pin_list *entry; - cfg = desc->chip_data; + cfg = get_irq_chip_data(irq); if (!cfg) continue; entry = cfg->irq_2_pin; @@ -2239,29 +2112,26 @@ static int __init timer_irq_works(void) * an edge even if it isn't on the 8259A... */ -static unsigned int startup_ioapic_irq(unsigned int irq) +static unsigned int startup_ioapic_irq(struct irq_data *data) { - int was_pending = 0; + int was_pending = 0, irq = data->irq; unsigned long flags; - struct irq_cfg *cfg; raw_spin_lock_irqsave(&ioapic_lock, flags); if (irq < legacy_pic->nr_legacy_irqs) { - legacy_pic->chip->mask(irq); + legacy_pic->mask(irq); if (legacy_pic->irq_pending(irq)) was_pending = 1; } - cfg = irq_cfg(irq); - __unmask_IO_APIC_irq(cfg); + __unmask_ioapic(data->chip_data); raw_spin_unlock_irqrestore(&ioapic_lock, flags); return was_pending; } -static int ioapic_retrigger_irq(unsigned int irq) +static int ioapic_retrigger_irq(struct irq_data *data) { - - struct irq_cfg *cfg = irq_cfg(irq); + struct irq_cfg *cfg = data->chip_data; unsigned long flags; raw_spin_lock_irqsave(&vector_lock, flags); @@ -2312,7 +2182,7 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq * With interrupt-remapping, destination information comes * from interrupt-remapping table entry. */ - if (!irq_remapped(irq)) + if (!irq_remapped(cfg)) io_apic_write(apic, 0x11 + pin*2, dest); reg = io_apic_read(apic, 0x10 + pin*2); reg &= ~IO_APIC_REDIR_VECTOR_MASK; @@ -2322,65 +2192,46 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq } /* - * Either sets desc->affinity to a valid value, and returns + * Either sets data->affinity to a valid value, and returns * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and - * leaves desc->affinity untouched. + * leaves data->affinity untouched. */ -unsigned int -set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask, - unsigned int *dest_id) +int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, + unsigned int *dest_id) { - struct irq_cfg *cfg; - unsigned int irq; + struct irq_cfg *cfg = data->chip_data; if (!cpumask_intersects(mask, cpu_online_mask)) return -1; - irq = desc->irq; - cfg = desc->chip_data; - if (assign_irq_vector(irq, cfg, mask)) + if (assign_irq_vector(data->irq, data->chip_data, mask)) return -1; - cpumask_copy(desc->affinity, mask); + cpumask_copy(data->affinity, mask); - *dest_id = apic->cpu_mask_to_apicid_and(desc->affinity, cfg->domain); + *dest_id = apic->cpu_mask_to_apicid_and(mask, cfg->domain); return 0; } static int -set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask) +ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, + bool force) { - struct irq_cfg *cfg; + unsigned int dest, irq = data->irq; unsigned long flags; - unsigned int dest; - unsigned int irq; - int ret = -1; - - irq = desc->irq; - cfg = desc->chip_data; + int ret; raw_spin_lock_irqsave(&ioapic_lock, flags); - ret = set_desc_affinity(desc, mask, &dest); + ret = __ioapic_set_affinity(data, mask, &dest); if (!ret) { /* Only the high 8 bits are valid. */ dest = SET_APIC_LOGICAL_ID(dest); - __target_IO_APIC_irq(irq, dest, cfg); + __target_IO_APIC_irq(irq, dest, data->chip_data); } raw_spin_unlock_irqrestore(&ioapic_lock, flags); - return ret; } -static int -set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask) -{ - struct irq_desc *desc; - - desc = irq_to_desc(irq); - - return set_ioapic_affinity_irq_desc(desc, mask); -} - #ifdef CONFIG_INTR_REMAP /* @@ -2395,24 +2246,21 @@ set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask) * the interrupt-remapping table entry. */ static int -migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask) +ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, + bool force) { - struct irq_cfg *cfg; + struct irq_cfg *cfg = data->chip_data; + unsigned int dest, irq = data->irq; struct irte irte; - unsigned int dest; - unsigned int irq; - int ret = -1; if (!cpumask_intersects(mask, cpu_online_mask)) - return ret; + return -EINVAL; - irq = desc->irq; if (get_irte(irq, &irte)) - return ret; + return -EBUSY; - cfg = desc->chip_data; if (assign_irq_vector(irq, cfg, mask)) - return ret; + return -EBUSY; dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask); @@ -2427,29 +2275,14 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask) if (cfg->move_in_progress) send_cleanup_vector(cfg); - cpumask_copy(desc->affinity, mask); - + cpumask_copy(data->affinity, mask); return 0; } -/* - * Migrates the IRQ destination in the process context. - */ -static int set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, - const struct cpumask *mask) -{ - return migrate_ioapic_irq_desc(desc, mask); -} -static int set_ir_ioapic_affinity_irq(unsigned int irq, - const struct cpumask *mask) -{ - struct irq_desc *desc = irq_to_desc(irq); - - return set_ir_ioapic_affinity_irq_desc(desc, mask); -} #else -static inline int set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, - const struct cpumask *mask) +static inline int +ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, + bool force) { return 0; } @@ -2511,10 +2344,8 @@ unlock: irq_exit(); } -static void __irq_complete_move(struct irq_desc **descp, unsigned vector) +static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector) { - struct irq_desc *desc = *descp; - struct irq_cfg *cfg = desc->chip_data; unsigned me; if (likely(!cfg->move_in_progress)) @@ -2526,31 +2357,28 @@ static void __irq_complete_move(struct irq_desc **descp, unsigned vector) send_cleanup_vector(cfg); } -static void irq_complete_move(struct irq_desc **descp) +static void irq_complete_move(struct irq_cfg *cfg) { - __irq_complete_move(descp, ~get_irq_regs()->orig_ax); + __irq_complete_move(cfg, ~get_irq_regs()->orig_ax); } void irq_force_complete_move(int irq) { - struct irq_desc *desc = irq_to_desc(irq); - struct irq_cfg *cfg = desc->chip_data; + struct irq_cfg *cfg = get_irq_chip_data(irq); if (!cfg) return; - __irq_complete_move(&desc, cfg->vector); + __irq_complete_move(cfg, cfg->vector); } #else -static inline void irq_complete_move(struct irq_desc **descp) {} +static inline void irq_complete_move(struct irq_cfg *cfg) { } #endif -static void ack_apic_edge(unsigned int irq) +static void ack_apic_edge(struct irq_data *data) { - struct irq_desc *desc = irq_to_desc(irq); - - irq_complete_move(&desc); - move_native_irq(irq); + irq_complete_move(data->chip_data); + move_native_irq(data->irq); ack_APIC_irq(); } @@ -2572,10 +2400,12 @@ atomic_t irq_mis_count; * Otherwise, we simulate the EOI message manually by changing the trigger * mode to edge and then back to level, with RTE being masked during this. */ -static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) +static void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) { struct irq_pin_list *entry; + unsigned long flags; + raw_spin_lock_irqsave(&ioapic_lock, flags); for_each_irq_pin(entry, cfg->irq_2_pin) { if (mp_ioapics[entry->apic].apicver >= 0x20) { /* @@ -2584,7 +2414,7 @@ static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) * intr-remapping table entry. Hence for the io-apic * EOI we use the pin number. */ - if (irq_remapped(irq)) + if (irq_remapped(cfg)) io_apic_eoi(entry->apic, entry->pin); else io_apic_eoi(entry->apic, cfg->vector); @@ -2593,36 +2423,22 @@ static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) __unmask_and_level_IO_APIC_irq(entry); } } -} - -static void eoi_ioapic_irq(struct irq_desc *desc) -{ - struct irq_cfg *cfg; - unsigned long flags; - unsigned int irq; - - irq = desc->irq; - cfg = desc->chip_data; - - raw_spin_lock_irqsave(&ioapic_lock, flags); - __eoi_ioapic_irq(irq, cfg); raw_spin_unlock_irqrestore(&ioapic_lock, flags); } -static void ack_apic_level(unsigned int irq) +static void ack_apic_level(struct irq_data *data) { + struct irq_cfg *cfg = data->chip_data; + int i, do_unmask_irq = 0, irq = data->irq; struct irq_desc *desc = irq_to_desc(irq); unsigned long v; - int i; - struct irq_cfg *cfg; - int do_unmask_irq = 0; - irq_complete_move(&desc); + irq_complete_move(cfg); #ifdef CONFIG_GENERIC_PENDING_IRQ /* If we are moving the irq we need to mask it */ if (unlikely(desc->status & IRQ_MOVE_PENDING)) { do_unmask_irq = 1; - mask_IO_APIC_irq_desc(desc); + mask_ioapic(cfg); } #endif @@ -2658,7 +2474,6 @@ static void ack_apic_level(unsigned int irq) * we use the above logic (mask+edge followed by unmask+level) from * Manfred Spraul to clear the remote IRR. */ - cfg = desc->chip_data; i = cfg->vector; v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); @@ -2678,7 +2493,7 @@ static void ack_apic_level(unsigned int irq) if (!(v & (1 << (i & 0x1f)))) { atomic_inc(&irq_mis_count); - eoi_ioapic_irq(desc); + eoi_ioapic_irq(irq, cfg); } /* Now we can move and renable the irq */ @@ -2709,61 +2524,57 @@ static void ack_apic_level(unsigned int irq) * accurate and is causing problems then it is a hardware bug * and you can go talk to the chipset vendor about it. */ - cfg = desc->chip_data; if (!io_apic_level_ack_pending(cfg)) move_masked_irq(irq); - unmask_IO_APIC_irq_desc(desc); + unmask_ioapic(cfg); } } #ifdef CONFIG_INTR_REMAP -static void ir_ack_apic_edge(unsigned int irq) +static void ir_ack_apic_edge(struct irq_data *data) { ack_APIC_irq(); } -static void ir_ack_apic_level(unsigned int irq) +static void ir_ack_apic_level(struct irq_data *data) { - struct irq_desc *desc = irq_to_desc(irq); - ack_APIC_irq(); - eoi_ioapic_irq(desc); + eoi_ioapic_irq(data->irq, data->chip_data); } #endif /* CONFIG_INTR_REMAP */ static struct irq_chip ioapic_chip __read_mostly = { - .name = "IO-APIC", - .startup = startup_ioapic_irq, - .mask = mask_IO_APIC_irq, - .unmask = unmask_IO_APIC_irq, - .ack = ack_apic_edge, - .eoi = ack_apic_level, + .name = "IO-APIC", + .irq_startup = startup_ioapic_irq, + .irq_mask = mask_ioapic_irq, + .irq_unmask = unmask_ioapic_irq, + .irq_ack = ack_apic_edge, + .irq_eoi = ack_apic_level, #ifdef CONFIG_SMP - .set_affinity = set_ioapic_affinity_irq, + .irq_set_affinity = ioapic_set_affinity, #endif - .retrigger = ioapic_retrigger_irq, + .irq_retrigger = ioapic_retrigger_irq, }; static struct irq_chip ir_ioapic_chip __read_mostly = { - .name = "IR-IO-APIC", - .startup = startup_ioapic_irq, - .mask = mask_IO_APIC_irq, - .unmask = unmask_IO_APIC_irq, + .name = "IR-IO-APIC", + .irq_startup = startup_ioapic_irq, + .irq_mask = mask_ioapic_irq, + .irq_unmask = unmask_ioapic_irq, #ifdef CONFIG_INTR_REMAP - .ack = ir_ack_apic_edge, - .eoi = ir_ack_apic_level, + .irq_ack = ir_ack_apic_edge, + .irq_eoi = ir_ack_apic_level, #ifdef CONFIG_SMP - .set_affinity = set_ir_ioapic_affinity_irq, + .irq_set_affinity = ir_ioapic_set_affinity, #endif #endif - .retrigger = ioapic_retrigger_irq, + .irq_retrigger = ioapic_retrigger_irq, }; static inline void init_IO_APIC_traps(void) { - int irq; - struct irq_desc *desc; struct irq_cfg *cfg; + unsigned int irq; /* * NOTE! The local APIC isn't very good at handling @@ -2776,8 +2587,8 @@ static inline void init_IO_APIC_traps(void) * Also, we've got to be careful not to trash gate * 0x80, because int 0x80 is hm, kind of importantish. ;) */ - for_each_irq_desc(irq, desc) { - cfg = desc->chip_data; + for_each_active_irq(irq) { + cfg = get_irq_chip_data(irq); if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { /* * Hmm.. We don't have an entry for this, @@ -2788,7 +2599,7 @@ static inline void init_IO_APIC_traps(void) legacy_pic->make_irq(irq); else /* Strange. Oh, well.. */ - desc->chip = &no_irq_chip; + set_irq_chip(irq, &no_irq_chip); } } } @@ -2797,7 +2608,7 @@ static inline void init_IO_APIC_traps(void) * The local APIC irq-chip implementation: */ -static void mask_lapic_irq(unsigned int irq) +static void mask_lapic_irq(struct irq_data *data) { unsigned long v; @@ -2805,7 +2616,7 @@ static void mask_lapic_irq(unsigned int irq) apic_write(APIC_LVT0, v | APIC_LVT_MASKED); } -static void unmask_lapic_irq(unsigned int irq) +static void unmask_lapic_irq(struct irq_data *data) { unsigned long v; @@ -2813,21 +2624,21 @@ static void unmask_lapic_irq(unsigned int irq) apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED); } -static void ack_lapic_irq(unsigned int irq) +static void ack_lapic_irq(struct irq_data *data) { ack_APIC_irq(); } static struct irq_chip lapic_chip __read_mostly = { .name = "local-APIC", - .mask = mask_lapic_irq, - .unmask = unmask_lapic_irq, - .ack = ack_lapic_irq, + .irq_mask = mask_lapic_irq, + .irq_unmask = unmask_lapic_irq, + .irq_ack = ack_lapic_irq, }; -static void lapic_register_intr(int irq, struct irq_desc *desc) +static void lapic_register_intr(int irq) { - desc->status &= ~IRQ_LEVEL; + irq_clear_status_flags(irq, IRQ_LEVEL); set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, "edge"); } @@ -2930,9 +2741,8 @@ int timer_through_8259 __initdata; */ static inline void __init check_timer(void) { - struct irq_desc *desc = irq_to_desc(0); - struct irq_cfg *cfg = desc->chip_data; - int node = cpu_to_node(boot_cpu_id); + struct irq_cfg *cfg = get_irq_chip_data(0); + int node = cpu_to_node(0); int apic1, pin1, apic2, pin2; unsigned long flags; int no_pin1 = 0; @@ -2942,7 +2752,7 @@ static inline void __init check_timer(void) /* * get/set the timer IRQ vector: */ - legacy_pic->chip->mask(0); + legacy_pic->mask(0); assign_irq_vector(0, cfg, apic->target_cpus()); /* @@ -3001,7 +2811,7 @@ static inline void __init check_timer(void) add_pin_to_irq_node(cfg, node, apic1, pin1); setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); } else { - /* for edge trigger, setup_IO_APIC_irq already + /* for edge trigger, setup_ioapic_irq already * leave it unmasked. * so only need to unmask if it is level-trigger * do we really have level trigger timer? @@ -3009,12 +2819,12 @@ static inline void __init check_timer(void) int idx; idx = find_irq_entry(apic1, pin1, mp_INT); if (idx != -1 && irq_trigger(idx)) - unmask_IO_APIC_irq_desc(desc); + unmask_ioapic(cfg); } if (timer_irq_works()) { if (nmi_watchdog == NMI_IO_APIC) { setup_nmi(); - legacy_pic->chip->unmask(0); + legacy_pic->unmask(0); } if (disable_timer_pin_1 > 0) clear_IO_APIC_pin(0, pin1); @@ -3037,14 +2847,14 @@ static inline void __init check_timer(void) */ replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2); setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); - legacy_pic->chip->unmask(0); + legacy_pic->unmask(0); if (timer_irq_works()) { apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); timer_through_8259 = 1; if (nmi_watchdog == NMI_IO_APIC) { - legacy_pic->chip->mask(0); + legacy_pic->mask(0); setup_nmi(); - legacy_pic->chip->unmask(0); + legacy_pic->unmask(0); } goto out; } @@ -3052,7 +2862,7 @@ static inline void __init check_timer(void) * Cleanup, just in case ... */ local_irq_disable(); - legacy_pic->chip->mask(0); + legacy_pic->mask(0); clear_IO_APIC_pin(apic2, pin2); apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); } @@ -3069,16 +2879,16 @@ static inline void __init check_timer(void) apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer as Virtual Wire IRQ...\n"); - lapic_register_intr(0, desc); + lapic_register_intr(0); apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ - legacy_pic->chip->unmask(0); + legacy_pic->unmask(0); if (timer_irq_works()) { apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); goto out; } local_irq_disable(); - legacy_pic->chip->mask(0); + legacy_pic->mask(0); apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); @@ -3244,49 +3054,42 @@ device_initcall(ioapic_init_sysfs); /* * Dynamic irq allocate and deallocation */ -unsigned int create_irq_nr(unsigned int irq_want, int node) +unsigned int create_irq_nr(unsigned int from, int node) { - /* Allocate an unused irq */ - unsigned int irq; - unsigned int new; + struct irq_cfg *cfg; unsigned long flags; - struct irq_cfg *cfg_new = NULL; - struct irq_desc *desc_new = NULL; - - irq = 0; - if (irq_want < nr_irqs_gsi) - irq_want = nr_irqs_gsi; - - raw_spin_lock_irqsave(&vector_lock, flags); - for (new = irq_want; new < nr_irqs; new++) { - desc_new = irq_to_desc_alloc_node(new, node); - if (!desc_new) { - printk(KERN_INFO "can not get irq_desc for %d\n", new); - continue; - } - cfg_new = desc_new->chip_data; - - if (cfg_new->vector != 0) - continue; + unsigned int ret = 0; + int irq; - desc_new = move_irq_desc(desc_new, node); - cfg_new = desc_new->chip_data; + if (from < nr_irqs_gsi) + from = nr_irqs_gsi; - if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0) - irq = new; - break; + irq = alloc_irq_from(from, node); + if (irq < 0) + return 0; + cfg = alloc_irq_cfg(irq, node); + if (!cfg) { + free_irq_at(irq, NULL); + return 0; } - raw_spin_unlock_irqrestore(&vector_lock, flags); - if (irq > 0) - dynamic_irq_init_keep_chip_data(irq); + raw_spin_lock_irqsave(&vector_lock, flags); + if (!__assign_irq_vector(irq, cfg, apic->target_cpus())) + ret = irq; + raw_spin_unlock_irqrestore(&vector_lock, flags); - return irq; + if (ret) { + set_irq_chip_data(irq, cfg); + irq_clear_status_flags(irq, IRQ_NOREQUEST); + } else { + free_irq_at(irq, cfg); + } + return ret; } int create_irq(void) { - int node = cpu_to_node(boot_cpu_id); + int node = cpu_to_node(0); unsigned int irq_want; int irq; @@ -3301,14 +3104,17 @@ int create_irq(void) void destroy_irq(unsigned int irq) { + struct irq_cfg *cfg = get_irq_chip_data(irq); unsigned long flags; - dynamic_irq_cleanup_keep_chip_data(irq); + irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE); - free_irte(irq); + if (intr_remapping_enabled) + free_irte(irq); raw_spin_lock_irqsave(&vector_lock, flags); - __clear_irq_vector(irq, get_irq_chip_data(irq)); + __clear_irq_vector(irq, cfg); raw_spin_unlock_irqrestore(&vector_lock, flags); + free_irq_at(irq, cfg); } /* @@ -3332,7 +3138,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); - if (irq_remapped(irq)) { + if (irq_remapped(get_irq_chip_data(irq))) { struct irte irte; int ir_index; u16 sub_handle; @@ -3340,14 +3146,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, ir_index = map_irq_to_irte_handle(irq, &sub_handle); BUG_ON(ir_index == -1); - memset (&irte, 0, sizeof(irte)); - - irte.present = 1; - irte.dst_mode = apic->irq_dest_mode; - irte.trigger_mode = 0; /* edge */ - irte.dlvry_mode = apic->irq_delivery_mode; - irte.vector = cfg->vector; - irte.dest_id = IRTE_DEST(dest); + prepare_irte(&irte, cfg->vector, dest); /* Set source-id of interrupt request */ if (pdev) @@ -3392,26 +3191,24 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, } #ifdef CONFIG_SMP -static int set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) +static int +msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) { - struct irq_desc *desc = irq_to_desc(irq); - struct irq_cfg *cfg; + struct irq_cfg *cfg = data->chip_data; struct msi_msg msg; unsigned int dest; - if (set_desc_affinity(desc, mask, &dest)) + if (__ioapic_set_affinity(data, mask, &dest)) return -1; - cfg = desc->chip_data; - - get_cached_msi_msg_desc(desc, &msg); + __get_cached_msi_msg(data->msi_desc, &msg); msg.data &= ~MSI_DATA_VECTOR_MASK; msg.data |= MSI_DATA_VECTOR(cfg->vector); msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; msg.address_lo |= MSI_ADDR_DEST_ID(dest); - write_msi_msg_desc(desc, &msg); + __write_msi_msg(data->msi_desc, &msg); return 0; } @@ -3421,17 +3218,17 @@ static int set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) * done in the process context using interrupt-remapping hardware. */ static int -ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) +ir_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, + bool force) { - struct irq_desc *desc = irq_to_desc(irq); - struct irq_cfg *cfg = desc->chip_data; - unsigned int dest; + struct irq_cfg *cfg = data->chip_data; + unsigned int dest, irq = data->irq; struct irte irte; if (get_irte(irq, &irte)) return -1; - if (set_desc_affinity(desc, mask, &dest)) + if (__ioapic_set_affinity(data, mask, &dest)) return -1; irte.vector = cfg->vector; @@ -3461,27 +3258,27 @@ ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) * which implement the MSI or MSI-X Capability Structure. */ static struct irq_chip msi_chip = { - .name = "PCI-MSI", - .unmask = unmask_msi_irq, - .mask = mask_msi_irq, - .ack = ack_apic_edge, + .name = "PCI-MSI", + .irq_unmask = unmask_msi_irq, + .irq_mask = mask_msi_irq, + .irq_ack = ack_apic_edge, #ifdef CONFIG_SMP - .set_affinity = set_msi_irq_affinity, + .irq_set_affinity = msi_set_affinity, #endif - .retrigger = ioapic_retrigger_irq, + .irq_retrigger = ioapic_retrigger_irq, }; static struct irq_chip msi_ir_chip = { - .name = "IR-PCI-MSI", - .unmask = unmask_msi_irq, - .mask = mask_msi_irq, + .name = "IR-PCI-MSI", + .irq_unmask = unmask_msi_irq, + .irq_mask = mask_msi_irq, #ifdef CONFIG_INTR_REMAP - .ack = ir_ack_apic_edge, + .irq_ack = ir_ack_apic_edge, #ifdef CONFIG_SMP - .set_affinity = ir_set_msi_irq_affinity, + .irq_set_affinity = ir_msi_set_affinity, #endif #endif - .retrigger = ioapic_retrigger_irq, + .irq_retrigger = ioapic_retrigger_irq, }; /* @@ -3513,8 +3310,8 @@ static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec) static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) { - int ret; struct msi_msg msg; + int ret; ret = msi_compose_msg(dev, irq, &msg, -1); if (ret < 0) @@ -3523,12 +3320,8 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) set_irq_msi(irq, msidesc); write_msi_msg(irq, &msg); - if (irq_remapped(irq)) { - struct irq_desc *desc = irq_to_desc(irq); - /* - * irq migration in process context - */ - desc->status |= IRQ_MOVE_PCNTXT; + if (irq_remapped(get_irq_chip_data(irq))) { + irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge"); } else set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge"); @@ -3540,13 +3333,10 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) { - unsigned int irq; - int ret, sub_handle; + int node, ret, sub_handle, index = 0; + unsigned int irq, irq_want; struct msi_desc *msidesc; - unsigned int irq_want; struct intel_iommu *iommu = NULL; - int index = 0; - int node; /* x86 doesn't support multiple MSI yet */ if (type == PCI_CAP_ID_MSI && nvec > 1) @@ -3606,18 +3396,17 @@ void arch_teardown_msi_irq(unsigned int irq) #if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP) #ifdef CONFIG_SMP -static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) +static int +dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, + bool force) { - struct irq_desc *desc = irq_to_desc(irq); - struct irq_cfg *cfg; + struct irq_cfg *cfg = data->chip_data; + unsigned int dest, irq = data->irq; struct msi_msg msg; - unsigned int dest; - if (set_desc_affinity(desc, mask, &dest)) + if (__ioapic_set_affinity(data, mask, &dest)) return -1; - cfg = desc->chip_data; - dmar_msi_read(irq, &msg); msg.data &= ~MSI_DATA_VECTOR_MASK; @@ -3633,14 +3422,14 @@ static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) #endif /* CONFIG_SMP */ static struct irq_chip dmar_msi_type = { - .name = "DMAR_MSI", - .unmask = dmar_msi_unmask, - .mask = dmar_msi_mask, - .ack = ack_apic_edge, + .name = "DMAR_MSI", + .irq_unmask = dmar_msi_unmask, + .irq_mask = dmar_msi_mask, + .irq_ack = ack_apic_edge, #ifdef CONFIG_SMP - .set_affinity = dmar_msi_set_affinity, + .irq_set_affinity = dmar_msi_set_affinity, #endif - .retrigger = ioapic_retrigger_irq, + .irq_retrigger = ioapic_retrigger_irq, }; int arch_setup_dmar_msi(unsigned int irq) @@ -3661,26 +3450,24 @@ int arch_setup_dmar_msi(unsigned int irq) #ifdef CONFIG_HPET_TIMER #ifdef CONFIG_SMP -static int hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) +static int hpet_msi_set_affinity(struct irq_data *data, + const struct cpumask *mask, bool force) { - struct irq_desc *desc = irq_to_desc(irq); - struct irq_cfg *cfg; + struct irq_cfg *cfg = data->chip_data; struct msi_msg msg; unsigned int dest; - if (set_desc_affinity(desc, mask, &dest)) + if (__ioapic_set_affinity(data, mask, &dest)) return -1; - cfg = desc->chip_data; - - hpet_msi_read(irq, &msg); + hpet_msi_read(data->handler_data, &msg); msg.data &= ~MSI_DATA_VECTOR_MASK; msg.data |= MSI_DATA_VECTOR(cfg->vector); msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; msg.address_lo |= MSI_ADDR_DEST_ID(dest); - hpet_msi_write(irq, &msg); + hpet_msi_write(data->handler_data, &msg); return 0; } @@ -3688,34 +3475,33 @@ static int hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) #endif /* CONFIG_SMP */ static struct irq_chip ir_hpet_msi_type = { - .name = "IR-HPET_MSI", - .unmask = hpet_msi_unmask, - .mask = hpet_msi_mask, + .name = "IR-HPET_MSI", + .irq_unmask = hpet_msi_unmask, + .irq_mask = hpet_msi_mask, #ifdef CONFIG_INTR_REMAP - .ack = ir_ack_apic_edge, + .irq_ack = ir_ack_apic_edge, #ifdef CONFIG_SMP - .set_affinity = ir_set_msi_irq_affinity, + .irq_set_affinity = ir_msi_set_affinity, #endif #endif - .retrigger = ioapic_retrigger_irq, + .irq_retrigger = ioapic_retrigger_irq, }; static struct irq_chip hpet_msi_type = { .name = "HPET_MSI", - .unmask = hpet_msi_unmask, - .mask = hpet_msi_mask, - .ack = ack_apic_edge, + .irq_unmask = hpet_msi_unmask, + .irq_mask = hpet_msi_mask, + .irq_ack = ack_apic_edge, #ifdef CONFIG_SMP - .set_affinity = hpet_msi_set_affinity, + .irq_set_affinity = hpet_msi_set_affinity, #endif - .retrigger = ioapic_retrigger_irq, + .irq_retrigger = ioapic_retrigger_irq, }; int arch_setup_hpet_msi(unsigned int irq, unsigned int id) { - int ret; struct msi_msg msg; - struct irq_desc *desc = irq_to_desc(irq); + int ret; if (intr_remapping_enabled) { struct intel_iommu *iommu = map_hpet_to_ir(id); @@ -3733,9 +3519,9 @@ int arch_setup_hpet_msi(unsigned int irq, unsigned int id) if (ret < 0) return ret; - hpet_msi_write(irq, &msg); - desc->status |= IRQ_MOVE_PCNTXT; - if (irq_remapped(irq)) + hpet_msi_write(get_irq_data(irq), &msg); + irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); + if (irq_remapped(get_irq_chip_data(irq))) set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type, handle_edge_irq, "edge"); else @@ -3768,33 +3554,30 @@ static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector) write_ht_irq_msg(irq, &msg); } -static int set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask) +static int +ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) { - struct irq_desc *desc = irq_to_desc(irq); - struct irq_cfg *cfg; + struct irq_cfg *cfg = data->chip_data; unsigned int dest; - if (set_desc_affinity(desc, mask, &dest)) + if (__ioapic_set_affinity(data, mask, &dest)) return -1; - cfg = desc->chip_data; - - target_ht_irq(irq, dest, cfg->vector); - + target_ht_irq(data->irq, dest, cfg->vector); return 0; } #endif static struct irq_chip ht_irq_chip = { - .name = "PCI-HT", - .mask = mask_ht_irq, - .unmask = unmask_ht_irq, - .ack = ack_apic_edge, + .name = "PCI-HT", + .irq_mask = mask_ht_irq, + .irq_unmask = unmask_ht_irq, + .irq_ack = ack_apic_edge, #ifdef CONFIG_SMP - .set_affinity = set_ht_irq_affinity, + .irq_set_affinity = ht_set_affinity, #endif - .retrigger = ioapic_retrigger_irq, + .irq_retrigger = ioapic_retrigger_irq, }; int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) @@ -3885,14 +3668,13 @@ int __init arch_probe_nr_irqs(void) if (nr < nr_irqs) nr_irqs = nr; - return 0; + return NR_IRQS_LEGACY; } #endif static int __io_apic_set_pci_routing(struct device *dev, int irq, struct io_apic_irq_attr *irq_attr) { - struct irq_desc *desc; struct irq_cfg *cfg; int node; int ioapic, pin; @@ -3908,13 +3690,11 @@ static int __io_apic_set_pci_routing(struct device *dev, int irq, if (dev) node = dev_to_node(dev); else - node = cpu_to_node(boot_cpu_id); + node = cpu_to_node(0); - desc = irq_to_desc_alloc_node(irq, node); - if (!desc) { - printk(KERN_INFO "can not get irq_desc %d\n", irq); + cfg = alloc_irq_and_cfg_at(irq, node); + if (!cfg) return 0; - } pin = irq_attr->ioapic_pin; trigger = irq_attr->trigger; @@ -3924,15 +3704,14 @@ static int __io_apic_set_pci_routing(struct device *dev, int irq, * IRQs < 16 are already in the irq_2_pin[] map */ if (irq >= legacy_pic->nr_legacy_irqs) { - cfg = desc->chip_data; - if (add_pin_to_irq_node_nopanic(cfg, node, ioapic, pin)) { + if (__add_pin_to_irq_node(cfg, node, ioapic, pin)) { printk(KERN_INFO "can not add pin %d for irq %d\n", pin, irq); return 0; } } - setup_IO_APIC_irq(ioapic, pin, irq, desc, trigger, polarity); + setup_ioapic_irq(ioapic, pin, irq, cfg, trigger, polarity); return 0; } @@ -4125,14 +3904,14 @@ void __init setup_ioapic_dest(void) */ if (desc->status & (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) - mask = desc->affinity; + mask = desc->irq_data.affinity; else mask = apic->target_cpus(); if (intr_remapping_enabled) - set_ir_ioapic_affinity_irq_desc(desc, mask); + ir_ioapic_set_affinity(&desc->irq_data, mask, false); else - set_ioapic_affinity_irq_desc(desc, mask); + ioapic_set_affinity(&desc->irq_data, mask, false); } } @@ -4316,19 +4095,18 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) void __init pre_init_apic_IRQ0(void) { struct irq_cfg *cfg; - struct irq_desc *desc; printk(KERN_INFO "Early APIC setup for system timer0\n"); #ifndef CONFIG_SMP phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid); #endif - desc = irq_to_desc_alloc_node(0, 0); + /* Make sure the irq descriptor is set up */ + cfg = alloc_irq_and_cfg_at(0, 0); setup_local_APIC(); - cfg = irq_cfg(0); add_pin_to_irq_node(cfg, 0, 0, 0); set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge"); - setup_IO_APIC_irq(0, 0, 0, desc, 0, 0); + setup_ioapic_irq(0, 0, 0, cfg, 0, 0); } diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c index a43f71cb30f..c90041ccb74 100644 --- a/arch/x86/kernel/apic/nmi.c +++ b/arch/x86/kernel/apic/nmi.c @@ -178,7 +178,7 @@ int __init check_nmi_watchdog(void) error: if (nmi_watchdog == NMI_IO_APIC) { if (!timer_through_8259) - legacy_pic->chip->mask(0); + legacy_pic->mask(0); on_each_cpu(__acpi_nmi_disable, NULL, 1); } diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c index 83e9be4778e..f9e4e6a5407 100644 --- a/arch/x86/kernel/apic/probe_64.c +++ b/arch/x86/kernel/apic/probe_64.c @@ -54,6 +54,9 @@ static int apicid_phys_pkg_id(int initial_apic_id, int index_msb) */ void __init default_setup_apic_routing(void) { + + enable_IR_x2apic(); + #ifdef CONFIG_X86_X2APIC if (x2apic_mode #ifdef CONFIG_X86_UV diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index ba5f62f45f0..a8b4d91b839 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -148,7 +148,7 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c) { #ifdef CONFIG_SMP /* calling is from identify_secondary_cpu() ? */ - if (c->cpu_index == boot_cpu_id) + if (!c->cpu_index) return; /* diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index f2f9ac7da25..15c671385f5 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -665,7 +665,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) this_cpu->c_early_init(c); #ifdef CONFIG_SMP - c->cpu_index = boot_cpu_id; + c->cpu_index = 0; #endif filter_cpuid_features(c, false); } diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index b4389441efb..695f17731e2 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -170,7 +170,7 @@ static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c) { #ifdef CONFIG_SMP /* calling is from identify_secondary_cpu() ? */ - if (c->cpu_index == boot_cpu_id) + if (!c->cpu_index) return; /* diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index ebdb85cf268..76b8cd953de 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -97,7 +97,6 @@ static void __init nvidia_bugs(int num, int slot, int func) } #if defined(CONFIG_ACPI) && defined(CONFIG_X86_IO_APIC) -#if defined(CONFIG_ACPI) && defined(CONFIG_X86_IO_APIC) static u32 __init ati_ixp4x0_rev(int num, int slot, int func) { u32 d; @@ -115,7 +114,6 @@ static u32 __init ati_ixp4x0_rev(int num, int slot, int func) d &= 0xff; return d; } -#endif static void __init ati_bugs(int num, int slot, int func) { diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 7494999141b..efaf906daf9 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -440,9 +440,9 @@ static int hpet_legacy_next_event(unsigned long delta, static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev); static struct hpet_dev *hpet_devs; -void hpet_msi_unmask(unsigned int irq) +void hpet_msi_unmask(struct irq_data *data) { - struct hpet_dev *hdev = get_irq_data(irq); + struct hpet_dev *hdev = data->handler_data; unsigned int cfg; /* unmask it */ @@ -451,10 +451,10 @@ void hpet_msi_unmask(unsigned int irq) hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); } -void hpet_msi_mask(unsigned int irq) +void hpet_msi_mask(struct irq_data *data) { + struct hpet_dev *hdev = data->handler_data; unsigned int cfg; - struct hpet_dev *hdev = get_irq_data(irq); /* mask it */ cfg = hpet_readl(HPET_Tn_CFG(hdev->num)); @@ -462,18 +462,14 @@ void hpet_msi_mask(unsigned int irq) hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); } -void hpet_msi_write(unsigned int irq, struct msi_msg *msg) +void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg) { - struct hpet_dev *hdev = get_irq_data(irq); - hpet_writel(msg->data, HPET_Tn_ROUTE(hdev->num)); hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hdev->num) + 4); } -void hpet_msi_read(unsigned int irq, struct msi_msg *msg) +void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg) { - struct hpet_dev *hdev = get_irq_data(irq); - msg->data = hpet_readl(HPET_Tn_ROUTE(hdev->num)); msg->address_lo = hpet_readl(HPET_Tn_ROUTE(hdev->num) + 4); msg->address_hi = 0; diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c index cafa7c80ac9..20757cb2efa 100644 --- a/arch/x86/kernel/i8259.c +++ b/arch/x86/kernel/i8259.c @@ -29,24 +29,10 @@ * plus some generic x86 specific things if generic specifics makes * any sense at all. */ +static void init_8259A(int auto_eoi); static int i8259A_auto_eoi; DEFINE_RAW_SPINLOCK(i8259A_lock); -static void mask_and_ack_8259A(unsigned int); -static void mask_8259A(void); -static void unmask_8259A(void); -static void disable_8259A_irq(unsigned int irq); -static void enable_8259A_irq(unsigned int irq); -static void init_8259A(int auto_eoi); -static int i8259A_irq_pending(unsigned int irq); - -struct irq_chip i8259A_chip = { - .name = "XT-PIC", - .mask = disable_8259A_irq, - .disable = disable_8259A_irq, - .unmask = enable_8259A_irq, - .mask_ack = mask_and_ack_8259A, -}; /* * 8259A PIC functions to handle ISA devices: @@ -68,7 +54,7 @@ unsigned int cached_irq_mask = 0xffff; */ unsigned long io_apic_irqs; -static void disable_8259A_irq(unsigned int irq) +static void mask_8259A_irq(unsigned int irq) { unsigned int mask = 1 << irq; unsigned long flags; @@ -82,7 +68,12 @@ static void disable_8259A_irq(unsigned int irq) raw_spin_unlock_irqrestore(&i8259A_lock, flags); } -static void enable_8259A_irq(unsigned int irq) +static void disable_8259A_irq(struct irq_data *data) +{ + mask_8259A_irq(data->irq); +} + +static void unmask_8259A_irq(unsigned int irq) { unsigned int mask = ~(1 << irq); unsigned long flags; @@ -96,6 +87,11 @@ static void enable_8259A_irq(unsigned int irq) raw_spin_unlock_irqrestore(&i8259A_lock, flags); } +static void enable_8259A_irq(struct irq_data *data) +{ + unmask_8259A_irq(data->irq); +} + static int i8259A_irq_pending(unsigned int irq) { unsigned int mask = 1<<irq; @@ -117,7 +113,7 @@ static void make_8259A_irq(unsigned int irq) disable_irq_nosync(irq); io_apic_irqs &= ~(1<<irq); set_irq_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq, - "XT"); + i8259A_chip.name); enable_irq(irq); } @@ -150,8 +146,9 @@ static inline int i8259A_irq_real(unsigned int irq) * first, _then_ send the EOI, and the order of EOI * to the two 8259s is important! */ -static void mask_and_ack_8259A(unsigned int irq) +static void mask_and_ack_8259A(struct irq_data *data) { + unsigned int irq = data->irq; unsigned int irqmask = 1 << irq; unsigned long flags; @@ -223,6 +220,14 @@ spurious_8259A_irq: } } +struct irq_chip i8259A_chip = { + .name = "XT-PIC", + .irq_mask = disable_8259A_irq, + .irq_disable = disable_8259A_irq, + .irq_unmask = enable_8259A_irq, + .irq_mask_ack = mask_and_ack_8259A, +}; + static char irq_trigger[2]; /** * ELCR registers (0x4d0, 0x4d1) control edge/level of IRQ @@ -342,9 +347,9 @@ static void init_8259A(int auto_eoi) * In AEOI mode we just have to mask the interrupt * when acking. */ - i8259A_chip.mask_ack = disable_8259A_irq; + i8259A_chip.irq_mask_ack = disable_8259A_irq; else - i8259A_chip.mask_ack = mask_and_ack_8259A; + i8259A_chip.irq_mask_ack = mask_and_ack_8259A; udelay(100); /* wait for 8259A to initialize */ @@ -363,14 +368,6 @@ static void init_8259A(int auto_eoi) static void legacy_pic_noop(void) { }; static void legacy_pic_uint_noop(unsigned int unused) { }; static void legacy_pic_int_noop(int unused) { }; - -static struct irq_chip dummy_pic_chip = { - .name = "dummy pic", - .mask = legacy_pic_uint_noop, - .unmask = legacy_pic_uint_noop, - .disable = legacy_pic_uint_noop, - .mask_ack = legacy_pic_uint_noop, -}; static int legacy_pic_irq_pending_noop(unsigned int irq) { return 0; @@ -378,7 +375,9 @@ static int legacy_pic_irq_pending_noop(unsigned int irq) struct legacy_pic null_legacy_pic = { .nr_legacy_irqs = 0, - .chip = &dummy_pic_chip, + .chip = &dummy_irq_chip, + .mask = legacy_pic_uint_noop, + .unmask = legacy_pic_uint_noop, .mask_all = legacy_pic_noop, .restore_mask = legacy_pic_noop, .init = legacy_pic_int_noop, @@ -389,7 +388,9 @@ struct legacy_pic null_legacy_pic = { struct legacy_pic default_legacy_pic = { .nr_legacy_irqs = NR_IRQS_LEGACY, .chip = &i8259A_chip, - .mask_all = mask_8259A, + .mask = mask_8259A_irq, + .unmask = unmask_8259A_irq, + .mask_all = mask_8259A, .restore_mask = unmask_8259A, .init = init_8259A, .irq_pending = i8259A_irq_pending, diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 91fd0c70a18..d765bdc4807 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -159,7 +159,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_printf(p, "%*d: ", prec, i); for_each_online_cpu(j) seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); - seq_printf(p, " %8s", desc->chip->name); + seq_printf(p, " %8s", desc->irq_data.chip->name); seq_printf(p, "-%-8s", desc->name); if (action) { @@ -282,6 +282,7 @@ void fixup_irqs(void) unsigned int irq, vector; static int warned; struct irq_desc *desc; + struct irq_data *data; for_each_irq_desc(irq, desc) { int break_affinity = 0; @@ -296,7 +297,8 @@ void fixup_irqs(void) /* interrupt's are disabled at this point */ raw_spin_lock(&desc->lock); - affinity = desc->affinity; + data = &desc->irq_data; + affinity = data->affinity; if (!irq_has_action(irq) || cpumask_equal(affinity, cpu_online_mask)) { raw_spin_unlock(&desc->lock); @@ -315,16 +317,16 @@ void fixup_irqs(void) affinity = cpu_all_mask; } - if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->mask) - desc->chip->mask(irq); + if (!(desc->status & IRQ_MOVE_PCNTXT) && data->chip->irq_mask) + data->chip->irq_mask(data); - if (desc->chip->set_affinity) - desc->chip->set_affinity(irq, affinity); + if (data->chip->irq_set_affinity) + data->chip->irq_set_affinity(data, affinity, true); else if (!(warned++)) set_affinity = 0; - if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask) - desc->chip->unmask(irq); + if (!(desc->status & IRQ_MOVE_PCNTXT) && data->chip->irq_unmask) + data->chip->irq_unmask(data); raw_spin_unlock(&desc->lock); @@ -355,10 +357,10 @@ void fixup_irqs(void) if (irr & (1 << (vector % 32))) { irq = __get_cpu_var(vector_irq)[vector]; - desc = irq_to_desc(irq); + data = irq_get_irq_data(irq); raw_spin_lock(&desc->lock); - if (desc->chip->retrigger) - desc->chip->retrigger(irq); + if (data->chip->irq_retrigger) + data->chip->irq_retrigger(data); raw_spin_unlock(&desc->lock); } } diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index 990ae7cfc57..a91ab503e24 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c @@ -100,6 +100,8 @@ int vector_used_by_percpu_irq(unsigned int vector) void __init init_ISA_irqs(void) { + struct irq_chip *chip = legacy_pic->chip; + const char *name = chip->name; int i; #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) @@ -107,19 +109,8 @@ void __init init_ISA_irqs(void) #endif legacy_pic->init(0); - /* - * 16 old-style INTA-cycle interrupts: - */ - for (i = 0; i < legacy_pic->nr_legacy_irqs; i++) { - struct irq_desc *desc = irq_to_desc(i); - - desc->status = IRQ_DISABLED; - desc->action = NULL; - desc->depth = 1; - - set_irq_chip_and_handler_name(i, &i8259A_chip, - handle_level_irq, "XT"); - } + for (i = 0; i < legacy_pic->nr_legacy_irqs; i++) + set_irq_chip_and_handler_name(i, chip, handle_level_irq, name); } void __init init_IRQ(void) diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 035c8c52918..b3ea9db39db 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -36,7 +36,7 @@ static int init_one_level2_page(struct kimage *image, pgd_t *pgd, if (!page) goto out; pud = (pud_t *)page_address(page); - memset(pud, 0, PAGE_SIZE); + clear_page(pud); set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE)); } pud = pud_offset(pgd, addr); @@ -45,7 +45,7 @@ static int init_one_level2_page(struct kimage *image, pgd_t *pgd, if (!page) goto out; pmd = (pmd_t *)page_address(page); - memset(pmd, 0, PAGE_SIZE); + clear_page(pmd); set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); } pmd = pmd_offset(pud, addr); diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index e3af342fe83..7a4cf14223b 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -84,7 +84,7 @@ static int __init reboot_setup(char *str) } /* we will leave sorting out the final value when we are ready to reboot, since we might not - have set up boot_cpu_id or smp_num_cpu */ + have detected BSP APIC ID or smp_num_cpu */ break; #endif /* CONFIG_SMP */ diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index c3a4fbb2b99..7d5ee08c982 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -125,7 +125,6 @@ unsigned long max_pfn_mapped; RESERVE_BRK(dmi_alloc, 65536); #endif -unsigned int boot_cpu_id __read_mostly; static __initdata unsigned long _brk_start = (unsigned long)__brk_base; unsigned long _brk_end = (unsigned long)__brk_base; diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index a60df9ae645..2335c15c93a 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -253,7 +253,7 @@ void __init setup_per_cpu_areas(void) * Up to this point, the boot CPU has been using .init.data * area. Reload any changed state for the boot CPU. */ - if (cpu == boot_cpu_id) + if (!cpu) switch_to_new_gdt(cpu); } diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 8b3bfc4dd70..864b386f6c0 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -324,9 +324,9 @@ notrace static void __cpuinit start_secondary(void *unused) check_tsc_sync_target(); if (nmi_watchdog == NMI_IO_APIC) { - legacy_pic->chip->mask(0); + legacy_pic->mask(0); enable_NMI_through_LVT0(); - legacy_pic->chip->unmask(0); + legacy_pic->unmask(0); } /* This must be done before setting cpu_online_mask */ @@ -1109,8 +1109,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) } set_cpu_sibling_map(0); - enable_IR_x2apic(); - default_setup_apic_routing(); if (smp_sanity_check(max_cpus) < 0) { printk(KERN_INFO "SMP disabled\n"); @@ -1118,6 +1116,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) goto out; } + default_setup_apic_routing(); + preempt_disable(); if (read_apic_id() != boot_cpu_physical_apicid) { panic("Boot APIC ID in local APIC unexpected (%d vs %d)", diff --git a/arch/x86/kernel/uv_irq.c b/arch/x86/kernel/uv_irq.c index 1132129db79..7b24460917d 100644 --- a/arch/x86/kernel/uv_irq.c +++ b/arch/x86/kernel/uv_irq.c @@ -28,34 +28,21 @@ struct uv_irq_2_mmr_pnode{ static spinlock_t uv_irq_lock; static struct rb_root uv_irq_root; -static int uv_set_irq_affinity(unsigned int, const struct cpumask *); +static int uv_set_irq_affinity(struct irq_data *, const struct cpumask *, bool); -static void uv_noop(unsigned int irq) -{ -} - -static unsigned int uv_noop_ret(unsigned int irq) -{ - return 0; -} +static void uv_noop(struct irq_data *data) { } -static void uv_ack_apic(unsigned int irq) +static void uv_ack_apic(struct irq_data *data) { ack_APIC_irq(); } static struct irq_chip uv_irq_chip = { - .name = "UV-CORE", - .startup = uv_noop_ret, - .shutdown = uv_noop, - .enable = uv_noop, - .disable = uv_noop, - .ack = uv_noop, - .mask = uv_noop, - .unmask = uv_noop, - .eoi = uv_ack_apic, - .end = uv_noop, - .set_affinity = uv_set_irq_affinity, + .name = "UV-CORE", + .irq_mask = uv_noop, + .irq_unmask = uv_noop, + .irq_eoi = uv_ack_apic, + .irq_set_affinity = uv_set_irq_affinity, }; /* @@ -144,26 +131,22 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, unsigned long mmr_offset, int limit) { const struct cpumask *eligible_cpu = cpumask_of(cpu); - struct irq_desc *desc = irq_to_desc(irq); - struct irq_cfg *cfg; - int mmr_pnode; + struct irq_cfg *cfg = get_irq_chip_data(irq); unsigned long mmr_value; struct uv_IO_APIC_route_entry *entry; - int err; + int mmr_pnode, err; BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); - cfg = irq_cfg(irq); - err = assign_irq_vector(irq, cfg, eligible_cpu); if (err != 0) return err; if (limit == UV_AFFINITY_CPU) - desc->status |= IRQ_NO_BALANCING; + irq_set_status_flags(irq, IRQ_NO_BALANCING); else - desc->status |= IRQ_MOVE_PCNTXT; + irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, irq_name); @@ -206,17 +189,17 @@ static void arch_disable_uv_irq(int mmr_pnode, unsigned long mmr_offset) uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); } -static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask) +static int +uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask, + bool force) { - struct irq_desc *desc = irq_to_desc(irq); - struct irq_cfg *cfg = desc->chip_data; + struct irq_cfg *cfg = data->chip_data; unsigned int dest; - unsigned long mmr_value; + unsigned long mmr_value, mmr_offset; struct uv_IO_APIC_route_entry *entry; - unsigned long mmr_offset; int mmr_pnode; - if (set_desc_affinity(desc, mask, &dest)) + if (__ioapic_set_affinity(data, mask, &dest)) return -1; mmr_value = 0; @@ -231,7 +214,7 @@ static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask) entry->dest = dest; /* Get previously stored MMR and pnode of hub sourcing interrupts */ - if (uv_irq_2_mmr_info(irq, &mmr_offset, &mmr_pnode)) + if (uv_irq_2_mmr_info(data->irq, &mmr_offset, &mmr_pnode)) return -1; uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c index e680ea52db9..3371bd053b8 100644 --- a/arch/x86/kernel/visws_quirks.c +++ b/arch/x86/kernel/visws_quirks.c @@ -66,10 +66,7 @@ static void __init visws_time_init(void) } /* Replaces the default init_ISA_irqs in the generic setup */ -static void __init visws_pre_intr_init(void) -{ - init_VISWS_APIC_irqs(); -} +static void __init visws_pre_intr_init(void); /* Quirk for machine specific memory setup. */ @@ -429,67 +426,34 @@ static int is_co_apic(unsigned int irq) /* * This is the SGI Cobalt (IO-)APIC: */ - -static void enable_cobalt_irq(unsigned int irq) +static void enable_cobalt_irq(struct irq_data *data) { - co_apic_set(is_co_apic(irq), irq); + co_apic_set(is_co_apic(data->irq), data->irq); } -static void disable_cobalt_irq(unsigned int irq) +static void disable_cobalt_irq(struct irq_data *data) { - int entry = is_co_apic(irq); + int entry = is_co_apic(data->irq); co_apic_write(CO_APIC_LO(entry), CO_APIC_MASK); co_apic_read(CO_APIC_LO(entry)); } -/* - * "irq" really just serves to identify the device. Here is where we - * map this to the Cobalt APIC entry where it's physically wired. - * This is called via request_irq -> setup_irq -> irq_desc->startup() - */ -static unsigned int startup_cobalt_irq(unsigned int irq) +static void ack_cobalt_irq(struct irq_data *data) { unsigned long flags; - struct irq_desc *desc = irq_to_desc(irq); spin_lock_irqsave(&cobalt_lock, flags); - if ((desc->status & (IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING))) - desc->status &= ~(IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING); - enable_cobalt_irq(irq); - spin_unlock_irqrestore(&cobalt_lock, flags); - return 0; -} - -static void ack_cobalt_irq(unsigned int irq) -{ - unsigned long flags; - - spin_lock_irqsave(&cobalt_lock, flags); - disable_cobalt_irq(irq); + disable_cobalt_irq(data); apic_write(APIC_EOI, APIC_EIO_ACK); spin_unlock_irqrestore(&cobalt_lock, flags); } -static void end_cobalt_irq(unsigned int irq) -{ - unsigned long flags; - struct irq_desc *desc = irq_to_desc(irq); - - spin_lock_irqsave(&cobalt_lock, flags); - if (!(desc->status & (IRQ_DISABLED | IRQ_INPROGRESS))) - enable_cobalt_irq(irq); - spin_unlock_irqrestore(&cobalt_lock, flags); -} - static struct irq_chip cobalt_irq_type = { - .name = "Cobalt-APIC", - .startup = startup_cobalt_irq, - .shutdown = disable_cobalt_irq, - .enable = enable_cobalt_irq, - .disable = disable_cobalt_irq, - .ack = ack_cobalt_irq, - .end = end_cobalt_irq, + .name = "Cobalt-APIC", + .irq_enable = enable_cobalt_irq, + .irq_disable = disable_cobalt_irq, + .irq_ack = ack_cobalt_irq, }; @@ -503,35 +467,34 @@ static struct irq_chip cobalt_irq_type = { * interrupt controller type, and through a special virtual interrupt- * controller. Device drivers only see the virtual interrupt sources. */ -static unsigned int startup_piix4_master_irq(unsigned int irq) +static unsigned int startup_piix4_master_irq(struct irq_data *data) { legacy_pic->init(0); - - return startup_cobalt_irq(irq); + enable_cobalt_irq(data); } -static void end_piix4_master_irq(unsigned int irq) +static void end_piix4_master_irq(struct irq_data *data) { unsigned long flags; spin_lock_irqsave(&cobalt_lock, flags); - enable_cobalt_irq(irq); + enable_cobalt_irq(data); spin_unlock_irqrestore(&cobalt_lock, flags); } static struct irq_chip piix4_master_irq_type = { - .name = "PIIX4-master", - .startup = startup_piix4_master_irq, - .ack = ack_cobalt_irq, - .end = end_piix4_master_irq, + .name = "PIIX4-master", + .irq_startup = startup_piix4_master_irq, + .irq_ack = ack_cobalt_irq, }; +static void pii4_mask(struct irq_data *data) { } static struct irq_chip piix4_virtual_irq_type = { - .name = "PIIX4-virtual", + .name = "PIIX4-virtual", + .mask = pii4_mask, }; - /* * PIIX4-8259 master/virtual functions to handle interrupt requests * from legacy devices: floppy, parallel, serial, rtc. @@ -549,9 +512,8 @@ static struct irq_chip piix4_virtual_irq_type = { */ static irqreturn_t piix4_master_intr(int irq, void *dev_id) { - int realirq; - struct irq_desc *desc; unsigned long flags; + int realirq; raw_spin_lock_irqsave(&i8259A_lock, flags); @@ -592,18 +554,10 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id) raw_spin_unlock_irqrestore(&i8259A_lock, flags); - desc = irq_to_desc(realirq); - /* * handle this 'virtual interrupt' as a Cobalt one now. */ - kstat_incr_irqs_this_cpu(realirq, desc); - - if (likely(desc->action != NULL)) - handle_IRQ_event(realirq, desc->action); - - if (!(desc->status & IRQ_DISABLED)) - legacy_pic->chip->unmask(realirq); + generic_handle_irq(realirq); return IRQ_HANDLED; @@ -624,41 +578,35 @@ static struct irqaction cascade_action = { static inline void set_piix4_virtual_irq_type(void) { - piix4_virtual_irq_type.shutdown = i8259A_chip.mask; piix4_virtual_irq_type.enable = i8259A_chip.unmask; piix4_virtual_irq_type.disable = i8259A_chip.mask; + piix4_virtual_irq_type.unmask = i8259A_chip.unmask; } -void init_VISWS_APIC_irqs(void) +static void __init visws_pre_intr_init(void) { int i; - for (i = 0; i < CO_IRQ_APIC0 + CO_APIC_LAST + 1; i++) { - struct irq_desc *desc = irq_to_desc(i); - - desc->status = IRQ_DISABLED; - desc->action = 0; - desc->depth = 1; + set_piix4_virtual_irq_type(); - if (i == 0) { - desc->chip = &cobalt_irq_type; - } - else if (i == CO_IRQ_IDE0) { - desc->chip = &cobalt_irq_type; - } - else if (i == CO_IRQ_IDE1) { - desc->chip = &cobalt_irq_type; - } - else if (i == CO_IRQ_8259) { - desc->chip = &piix4_master_irq_type; - } - else if (i < CO_IRQ_APIC0) { - set_piix4_virtual_irq_type(); - desc->chip = &piix4_virtual_irq_type; - } - else if (IS_CO_APIC(i)) { - desc->chip = &cobalt_irq_type; - } + for (i = 0; i < CO_IRQ_APIC0 + CO_APIC_LAST + 1; i++) { + struct irq_chip *chip = NULL; + + if (i == 0) + chip = &cobalt_irq_type; + else if (i == CO_IRQ_IDE0) + chip = &cobalt_irq_type; + else if (i == CO_IRQ_IDE1) + >chip = &cobalt_irq_type; + else if (i == CO_IRQ_8259) + chip = &piix4_master_irq_type; + else if (i < CO_IRQ_APIC0) + chip = &piix4_virtual_irq_type; + else if (IS_CO_APIC(i)) + chip = &cobalt_irq_type; + + if (chip) + set_irq_chip(i, chip); } setup_irq(CO_IRQ_8259, &master_action); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 77d8c0f4817..22b06f7660f 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1056,14 +1056,13 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu) vcpu->arch.apic = apic; - apic->regs_page = alloc_page(GFP_KERNEL); + apic->regs_page = alloc_page(GFP_KERNEL|__GFP_ZERO); if (apic->regs_page == NULL) { printk(KERN_ERR "malloc apic regs error for vcpu %x\n", vcpu->vcpu_id); goto nomem_free_apic; } apic->regs = page_address(apic->regs_page); - memset(apic->regs, 0, PAGE_SIZE); apic->vcpu = vcpu; hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC, diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 9d5f5584845..73b1e1a1f48 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -791,22 +791,22 @@ static void lguest_flush_tlb_kernel(void) * simple as setting a bit. We don't actually "ack" interrupts as such, we * just mask and unmask them. I wonder if we should be cleverer? */ -static void disable_lguest_irq(unsigned int irq) +static void disable_lguest_irq(struct irq_data *data) { - set_bit(irq, lguest_data.blocked_interrupts); + set_bit(data->irq, lguest_data.blocked_interrupts); } -static void enable_lguest_irq(unsigned int irq) +static void enable_lguest_irq(struct irq_data *data) { - clear_bit(irq, lguest_data.blocked_interrupts); + clear_bit(data->irq, lguest_data.blocked_interrupts); } /* This structure describes the lguest IRQ controller. */ static struct irq_chip lguest_irq_controller = { .name = "lguest", - .mask = disable_lguest_irq, - .mask_ack = disable_lguest_irq, - .unmask = enable_lguest_irq, + .irq_mask = disable_lguest_irq, + .irq_mask_ack = disable_lguest_irq, + .irq_unmask = enable_lguest_irq, }; /* @@ -838,12 +838,12 @@ static void __init lguest_init_IRQ(void) * rather than set them in lguest_init_IRQ we are called here every time an * lguest device needs an interrupt. * - * FIXME: irq_to_desc_alloc_node() can fail due to lack of memory, we should + * FIXME: irq_alloc_desc_at() can fail due to lack of memory, we should * pass that up! */ void lguest_setup_irq(unsigned int irq) { - irq_to_desc_alloc_node(irq, 0); + irq_alloc_desc_at(irq, 0); set_irq_chip_and_handler_name(irq, &lguest_irq_controller, handle_level_irq, "level"); } diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index bca79091b9d..558f2d33207 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -67,7 +67,7 @@ static __init void *alloc_low_page(void) panic("alloc_low_page: ran out of memory"); adr = __va(pfn * PAGE_SIZE); - memset(adr, 0, PAGE_SIZE); + clear_page(adr); return adr; } @@ -558,7 +558,7 @@ char swsusp_pg_dir[PAGE_SIZE] static inline void save_pg_dir(void) { - memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE); + copy_page(swsusp_pg_dir, swapper_pg_dir); } #else /* !CONFIG_ACPI_SLEEP */ static inline void save_pg_dir(void) diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 9a6674689a2..7c48ad4faca 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -293,7 +293,7 @@ static __ref void *alloc_low_page(unsigned long *phys) panic("alloc_low_page: ran out of memory"); adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE); - memset(adr, 0, PAGE_SIZE); + clear_page(adr); *phys = pfn * PAGE_SIZE; return adr; } diff --git a/arch/x86/mm/k8topology_64.c b/arch/x86/mm/k8topology_64.c index 970ed579d4e..240f86462a8 100644 --- a/arch/x86/mm/k8topology_64.c +++ b/arch/x86/mm/k8topology_64.c @@ -54,8 +54,8 @@ static __init int find_northbridge(void) static __init void early_get_boot_cpu_id(void) { /* - * need to get boot_cpu_id so can use that to create apicid_to_node - * in k8_scan_nodes() + * need to get the APIC ID of the BSP so can use that to + * create apicid_to_node in k8_scan_nodes() */ #ifdef CONFIG_X86_MPPARSE /* @@ -212,7 +212,7 @@ int __init k8_scan_nodes(void) bits = boot_cpu_data.x86_coreid_bits; cores = (1<<bits); apicid_base = 0; - /* need to get boot_cpu_id early for system with apicid lifting */ + /* get the APIC ID of the BSP early for systems with apicid lifting */ early_get_boot_cpu_id(); if (boot_cpu_physical_apicid > 0) { pr_info("BSP APIC ID: %02x\n", boot_cpu_physical_apicid); diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c index c64a5d387de..87508886cbb 100644 --- a/arch/xtensa/kernel/irq.c +++ b/arch/xtensa/kernel/irq.c @@ -92,7 +92,7 @@ int show_interrupts(struct seq_file *p, void *v) for_each_online_cpu(j) seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); #endif - seq_printf(p, " %14s", irq_desc[i].chip->typename); + seq_printf(p, " %14s", irq_desc[i].chip->name); seq_printf(p, " %s", action->name); for (action=action->next; action; action = action->next) diff --git a/drivers/isdn/act2000/act2000.h b/drivers/isdn/act2000/act2000.h index d4c50512a1f..88c9423500d 100644 --- a/drivers/isdn/act2000/act2000.h +++ b/drivers/isdn/act2000/act2000.h @@ -141,9 +141,9 @@ typedef struct irq_data_isa { __u8 rcvhdr[8]; } irq_data_isa; -typedef union irq_data { +typedef union act2000_irq_data { irq_data_isa isa; -} irq_data; +} act2000_irq_data; /* * Per card driver data @@ -176,7 +176,7 @@ typedef struct act2000_card { char *status_buf_read; char *status_buf_write; char *status_buf_end; - irq_data idat; /* Data used for IRQ handler */ + act2000_irq_data idat; /* Data used for IRQ handler */ isdn_if interface; /* Interface to upper layer */ char regname[35]; /* Name used for request_region */ } act2000_card; diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c index 6f9afcd5ca4..b133378d4dc 100644 --- a/drivers/isdn/hisax/config.c +++ b/drivers/isdn/hisax/config.c @@ -801,6 +801,16 @@ static void closecard(int cardnr) ll_unload(csta); } +static irqreturn_t card_irq(int intno, void *dev_id) +{ + struct IsdnCardState *cs = dev_id; + irqreturn_t ret = cs->irq_func(intno, cs); + + if (ret == IRQ_HANDLED) + cs->irq_cnt++; + return ret; +} + static int init_card(struct IsdnCardState *cs) { int irq_cnt, cnt = 3, ret; @@ -809,10 +819,10 @@ static int init_card(struct IsdnCardState *cs) ret = cs->cardmsg(cs, CARD_INIT, NULL); return(ret); } - irq_cnt = kstat_irqs(cs->irq); + irq_cnt = cs->irq_cnt = 0; printk(KERN_INFO "%s: IRQ %d count %d\n", CardType[cs->typ], cs->irq, irq_cnt); - if (request_irq(cs->irq, cs->irq_func, cs->irq_flags, "HiSax", cs)) { + if (request_irq(cs->irq, card_irq, cs->irq_flags, "HiSax", cs)) { printk(KERN_WARNING "HiSax: couldn't get interrupt %d\n", cs->irq); return 1; @@ -822,8 +832,8 @@ static int init_card(struct IsdnCardState *cs) /* Timeout 10ms */ msleep(10); printk(KERN_INFO "%s: IRQ %d count %d\n", - CardType[cs->typ], cs->irq, kstat_irqs(cs->irq)); - if (kstat_irqs(cs->irq) == irq_cnt) { + CardType[cs->typ], cs->irq, cs->irq_cnt); + if (cs->irq_cnt == irq_cnt) { printk(KERN_WARNING "%s: IRQ(%d) getting no interrupts during init %d\n", CardType[cs->typ], cs->irq, 4 - cnt); diff --git a/drivers/isdn/hisax/hisax.h b/drivers/isdn/hisax/hisax.h index 832a87855ff..32ab3924aa7 100644 --- a/drivers/isdn/hisax/hisax.h +++ b/drivers/isdn/hisax/hisax.h @@ -959,6 +959,7 @@ struct IsdnCardState { u_long event; struct work_struct tqueue; struct timer_list dbusytimer; + unsigned int irq_cnt; #ifdef ERROR_STATISTIC int err_crc; int err_tx; diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c index 097f24d8bce..b9fda7018ce 100644 --- a/drivers/mfd/twl4030-irq.c +++ b/drivers/mfd/twl4030-irq.c @@ -78,7 +78,7 @@ struct sih { u8 irq_lines; /* number of supported irq lines */ /* SIR ignored -- set interrupt, for testing only */ - struct irq_data { + struct sih_irq_data { u8 isr_offset; u8 imr_offset; } mask[2]; @@ -810,7 +810,7 @@ int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end) twl4030_irq_chip = dummy_irq_chip; twl4030_irq_chip.name = "twl4030"; - twl4030_sih_irq_chip.ack = dummy_irq_chip.ack; + twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack; for (i = irq_base; i < irq_end; i++) { set_irq_chip_and_handler(i, &twl4030_irq_chip, diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index 0a19708074c..3de3a436a43 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c @@ -1221,9 +1221,9 @@ const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type) } } -void dmar_msi_unmask(unsigned int irq) +void dmar_msi_unmask(struct irq_data *data) { - struct intel_iommu *iommu = get_irq_data(irq); + struct intel_iommu *iommu = irq_data_get_irq_data(data); unsigned long flag; /* unmask it */ @@ -1234,10 +1234,10 @@ void dmar_msi_unmask(unsigned int irq) spin_unlock_irqrestore(&iommu->register_lock, flag); } -void dmar_msi_mask(unsigned int irq) +void dmar_msi_mask(struct irq_data *data) { unsigned long flag; - struct intel_iommu *iommu = get_irq_data(irq); + struct intel_iommu *iommu = irq_data_get_irq_data(data); /* mask it */ spin_lock_irqsave(&iommu->register_lock, flag); diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c index 98abf8b9129..834842aa5bb 100644 --- a/drivers/pci/htirq.c +++ b/drivers/pci/htirq.c @@ -57,28 +57,22 @@ void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg) *msg = cfg->msg; } -void mask_ht_irq(unsigned int irq) +void mask_ht_irq(struct irq_data *data) { - struct ht_irq_cfg *cfg; - struct ht_irq_msg msg; - - cfg = get_irq_data(irq); + struct ht_irq_cfg *cfg = irq_data_get_irq_data(data); + struct ht_irq_msg msg = cfg->msg; - msg = cfg->msg; msg.address_lo |= 1; - write_ht_irq_msg(irq, &msg); + write_ht_irq_msg(data->irq, &msg); } -void unmask_ht_irq(unsigned int irq) +void unmask_ht_irq(struct irq_data *data) { - struct ht_irq_cfg *cfg; - struct ht_irq_msg msg; - - cfg = get_irq_data(irq); + struct ht_irq_cfg *cfg = irq_data_get_irq_data(data); + struct ht_irq_msg msg = cfg->msg; - msg = cfg->msg; msg.address_lo &= ~1; - write_ht_irq_msg(irq, &msg); + write_ht_irq_msg(data->irq, &msg); } /** diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c index fd1d2867cdc..ec87cd66f3e 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/pci/intr_remapping.c @@ -46,109 +46,24 @@ static __init int setup_intremap(char *str) } early_param("intremap", setup_intremap); -struct irq_2_iommu { - struct intel_iommu *iommu; - u16 irte_index; - u16 sub_handle; - u8 irte_mask; -}; - -#ifdef CONFIG_GENERIC_HARDIRQS -static struct irq_2_iommu *get_one_free_irq_2_iommu(int node) -{ - struct irq_2_iommu *iommu; - - iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node); - printk(KERN_DEBUG "alloc irq_2_iommu on node %d\n", node); - - return iommu; -} - -static struct irq_2_iommu *irq_2_iommu(unsigned int irq) -{ - struct irq_desc *desc; - - desc = irq_to_desc(irq); - - if (WARN_ON_ONCE(!desc)) - return NULL; - - return desc->irq_2_iommu; -} - -static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) -{ - struct irq_desc *desc; - struct irq_2_iommu *irq_iommu; - - desc = irq_to_desc(irq); - if (!desc) { - printk(KERN_INFO "can not get irq_desc for %d\n", irq); - return NULL; - } - - irq_iommu = desc->irq_2_iommu; - - if (!irq_iommu) - desc->irq_2_iommu = get_one_free_irq_2_iommu(irq_node(irq)); - - return desc->irq_2_iommu; -} - -#else /* !CONFIG_SPARSE_IRQ */ - -static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; - -static struct irq_2_iommu *irq_2_iommu(unsigned int irq) -{ - if (irq < nr_irqs) - return &irq_2_iommuX[irq]; - - return NULL; -} -static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) -{ - return irq_2_iommu(irq); -} -#endif - static DEFINE_SPINLOCK(irq_2_ir_lock); -static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq) -{ - struct irq_2_iommu *irq_iommu; - - irq_iommu = irq_2_iommu(irq); - - if (!irq_iommu) - return NULL; - - if (!irq_iommu->iommu) - return NULL; - - return irq_iommu; -} - -int irq_remapped(int irq) +static struct irq_2_iommu *irq_2_iommu(unsigned int irq) { - return valid_irq_2_iommu(irq) != NULL; + struct irq_cfg *cfg = get_irq_chip_data(irq); + return cfg ? &cfg->irq_2_iommu : NULL; } int get_irte(int irq, struct irte *entry) { - int index; - struct irq_2_iommu *irq_iommu; + struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); unsigned long flags; + int index; - if (!entry) + if (!entry || !irq_iommu) return -1; spin_lock_irqsave(&irq_2_ir_lock, flags); - irq_iommu = valid_irq_2_iommu(irq); - if (!irq_iommu) { - spin_unlock_irqrestore(&irq_2_ir_lock, flags); - return -1; - } index = irq_iommu->irte_index + irq_iommu->sub_handle; *entry = *(irq_iommu->iommu->ir_table->base + index); @@ -160,20 +75,14 @@ int get_irte(int irq, struct irte *entry) int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) { struct ir_table *table = iommu->ir_table; - struct irq_2_iommu *irq_iommu; + struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); u16 index, start_index; unsigned int mask = 0; unsigned long flags; int i; - if (!count) - return -1; - -#ifndef CONFIG_SPARSE_IRQ - /* protect irq_2_iommu_alloc later */ - if (irq >= nr_irqs) + if (!count || !irq_iommu) return -1; -#endif /* * start the IRTE search from index 0. @@ -214,13 +123,6 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) for (i = index; i < index + count; i++) table->base[i].present = 1; - irq_iommu = irq_2_iommu_alloc(irq); - if (!irq_iommu) { - spin_unlock_irqrestore(&irq_2_ir_lock, flags); - printk(KERN_ERR "can't allocate irq_2_iommu\n"); - return -1; - } - irq_iommu->iommu = iommu; irq_iommu->irte_index = index; irq_iommu->sub_handle = 0; @@ -244,17 +146,14 @@ static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask) int map_irq_to_irte_handle(int irq, u16 *sub_handle) { - int index; - struct irq_2_iommu *irq_iommu; + struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); unsigned long flags; + int index; - spin_lock_irqsave(&irq_2_ir_lock, flags); - irq_iommu = valid_irq_2_iommu(irq); - if (!irq_iommu) { - spin_unlock_irqrestore(&irq_2_ir_lock, flags); + if (!irq_iommu) return -1; - } + spin_lock_irqsave(&irq_2_ir_lock, flags); *sub_handle = irq_iommu->sub_handle; index = irq_iommu->irte_index; spin_unlock_irqrestore(&irq_2_ir_lock, flags); @@ -263,18 +162,13 @@ int map_irq_to_irte_handle(int irq, u16 *sub_handle) int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) { - struct irq_2_iommu *irq_iommu; + struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); unsigned long flags; - spin_lock_irqsave(&irq_2_ir_lock, flags); - - irq_iommu = irq_2_iommu_alloc(irq); - - if (!irq_iommu) { - spin_unlock_irqrestore(&irq_2_ir_lock, flags); - printk(KERN_ERR "can't allocate irq_2_iommu\n"); + if (!irq_iommu) return -1; - } + + spin_lock_irqsave(&irq_2_ir_lock, flags); irq_iommu->iommu = iommu; irq_iommu->irte_index = index; @@ -286,43 +180,18 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) return 0; } -int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) -{ - struct irq_2_iommu *irq_iommu; - unsigned long flags; - - spin_lock_irqsave(&irq_2_ir_lock, flags); - irq_iommu = valid_irq_2_iommu(irq); - if (!irq_iommu) { - spin_unlock_irqrestore(&irq_2_ir_lock, flags); - return -1; - } - - irq_iommu->iommu = NULL; - irq_iommu->irte_index = 0; - irq_iommu->sub_handle = 0; - irq_2_iommu(irq)->irte_mask = 0; - - spin_unlock_irqrestore(&irq_2_ir_lock, flags); - - return 0; -} - int modify_irte(int irq, struct irte *irte_modified) { - int rc; - int index; - struct irte *irte; + struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); struct intel_iommu *iommu; - struct irq_2_iommu *irq_iommu; unsigned long flags; + struct irte *irte; + int rc, index; - spin_lock_irqsave(&irq_2_ir_lock, flags); - irq_iommu = valid_irq_2_iommu(irq); - if (!irq_iommu) { - spin_unlock_irqrestore(&irq_2_ir_lock, flags); + if (!irq_iommu) return -1; - } + + spin_lock_irqsave(&irq_2_ir_lock, flags); iommu = irq_iommu->iommu; @@ -339,31 +208,6 @@ int modify_irte(int irq, struct irte *irte_modified) return rc; } -int flush_irte(int irq) -{ - int rc; - int index; - struct intel_iommu *iommu; - struct irq_2_iommu *irq_iommu; - unsigned long flags; - - spin_lock_irqsave(&irq_2_ir_lock, flags); - irq_iommu = valid_irq_2_iommu(irq); - if (!irq_iommu) { - spin_unlock_irqrestore(&irq_2_ir_lock, flags); - return -1; - } - - iommu = irq_iommu->iommu; - - index = irq_iommu->irte_index + irq_iommu->sub_handle; - - rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); - spin_unlock_irqrestore(&irq_2_ir_lock, flags); - - return rc; -} - struct intel_iommu *map_hpet_to_ir(u8 hpet_id) { int i; @@ -420,16 +264,14 @@ static int clear_entries(struct irq_2_iommu *irq_iommu) int free_irte(int irq) { - int rc = 0; - struct irq_2_iommu *irq_iommu; + struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); unsigned long flags; + int rc; - spin_lock_irqsave(&irq_2_ir_lock, flags); - irq_iommu = valid_irq_2_iommu(irq); - if (!irq_iommu) { - spin_unlock_irqrestore(&irq_2_ir_lock, flags); + if (!irq_iommu) return -1; - } + + spin_lock_irqsave(&irq_2_ir_lock, flags); rc = clear_entries(irq_iommu); diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 69b7be33b3a..5fcf5aec680 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -170,33 +170,31 @@ static void msix_mask_irq(struct msi_desc *desc, u32 flag) desc->masked = __msix_mask_irq(desc, flag); } -static void msi_set_mask_bit(unsigned irq, u32 flag) +static void msi_set_mask_bit(struct irq_data *data, u32 flag) { - struct msi_desc *desc = get_irq_msi(irq); + struct msi_desc *desc = irq_data_get_msi(data); if (desc->msi_attrib.is_msix) { msix_mask_irq(desc, flag); readl(desc->mask_base); /* Flush write to device */ } else { - unsigned offset = irq - desc->dev->irq; + unsigned offset = data->irq - desc->dev->irq; msi_mask_irq(desc, 1 << offset, flag << offset); } } -void mask_msi_irq(unsigned int irq) +void mask_msi_irq(struct irq_data *data) { - msi_set_mask_bit(irq, 1); + msi_set_mask_bit(data, 1); } -void unmask_msi_irq(unsigned int irq) +void unmask_msi_irq(struct irq_data *data) { - msi_set_mask_bit(irq, 0); + msi_set_mask_bit(data, 0); } -void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) +void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) { - struct msi_desc *entry = get_irq_desc_msi(desc); - BUG_ON(entry->dev->current_state != PCI_D0); if (entry->msi_attrib.is_msix) { @@ -227,15 +225,13 @@ void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) void read_msi_msg(unsigned int irq, struct msi_msg *msg) { - struct irq_desc *desc = irq_to_desc(irq); + struct msi_desc *entry = get_irq_msi(irq); - read_msi_msg_desc(desc, msg); + __read_msi_msg(entry, msg); } -void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) +void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg) { - struct msi_desc *entry = get_irq_desc_msi(desc); - /* Assert that the cache is valid, assuming that * valid messages are not all-zeroes. */ BUG_ON(!(entry->msg.address_hi | entry->msg.address_lo | @@ -246,15 +242,13 @@ void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) { - struct irq_desc *desc = irq_to_desc(irq); + struct msi_desc *entry = get_irq_msi(irq); - get_cached_msi_msg_desc(desc, msg); + __get_cached_msi_msg(entry, msg); } -void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) +void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) { - struct msi_desc *entry = get_irq_desc_msi(desc); - if (entry->dev->current_state != PCI_D0) { /* Don't touch the hardware now */ } else if (entry->msi_attrib.is_msix) { @@ -292,9 +286,9 @@ void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) void write_msi_msg(unsigned int irq, struct msi_msg *msg) { - struct irq_desc *desc = irq_to_desc(irq); + struct msi_desc *entry = get_irq_msi(irq); - write_msi_msg_desc(desc, msg); + __write_msi_msg(entry, msg); } static void free_msi_irqs(struct pci_dev *dev) diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 13365ba3521..7d24b0d94ed 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -338,30 +338,29 @@ static void unmask_evtchn(int port) static int find_unbound_irq(void) { - int irq; - struct irq_desc *desc; + struct irq_data *data; + int irq, res; for (irq = 0; irq < nr_irqs; irq++) { - desc = irq_to_desc(irq); + data = irq_get_irq_data(irq); /* only 0->15 have init'd desc; handle irq > 16 */ - if (desc == NULL) + if (!data) break; - if (desc->chip == &no_irq_chip) + if (data->chip == &no_irq_chip) break; - if (desc->chip != &xen_dynamic_chip) + if (data->chip != &xen_dynamic_chip) continue; if (irq_info[irq].type == IRQT_UNBOUND) - break; + return irq; } if (irq == nr_irqs) panic("No available IRQ to bind to: increase nr_irqs!\n"); - desc = irq_to_desc_alloc_node(irq, 0); - if (WARN_ON(desc == NULL)) - return -1; + res = irq_alloc_desc_at(irq, 0); - dynamic_irq_init_keep_chip_data(irq); + if (WARN_ON(res != irq)) + return -1; return irq; } @@ -495,7 +494,7 @@ static void unbind_from_irq(unsigned int irq) if (irq_info[irq].type != IRQT_UNBOUND) { irq_info[irq] = mk_unbound_info(); - dynamic_irq_cleanup(irq); + irq_free_desc(irq); } spin_unlock(&irq_mapping_update_lock); diff --git a/include/linux/dmar.h b/include/linux/dmar.h index d7cecc90ed3..51651b76d40 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -106,6 +106,7 @@ struct irte { __u64 high; }; }; + #ifdef CONFIG_INTR_REMAP extern int intr_remapping_enabled; extern int intr_remapping_supported(void); @@ -119,11 +120,8 @@ extern int alloc_irte(struct intel_iommu *iommu, int irq, u16 count); extern int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 sub_handle); extern int map_irq_to_irte_handle(int irq, u16 *sub_handle); -extern int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index); -extern int flush_irte(int irq); extern int free_irte(int irq); -extern int irq_remapped(int irq); extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev); extern struct intel_iommu *map_ioapic_to_ir(int apic); extern struct intel_iommu *map_hpet_to_ir(u8 id); @@ -177,7 +175,6 @@ static inline int set_msi_sid(struct irte *irte, struct pci_dev *dev) return 0; } -#define irq_remapped(irq) (0) #define enable_intr_remapping(mode) (-1) #define disable_intr_remapping() (0) #define reenable_intr_remapping(mode) (0) @@ -187,8 +184,9 @@ static inline int set_msi_sid(struct irte *irte, struct pci_dev *dev) /* Can't use the common MSI interrupt functions * since DMAR is not a pci device */ -extern void dmar_msi_unmask(unsigned int irq); -extern void dmar_msi_mask(unsigned int irq); +struct irq_data; +extern void dmar_msi_unmask(struct irq_data *data); +extern void dmar_msi_mask(struct irq_data *data); extern void dmar_msi_read(int irq, struct msi_msg *msg); extern void dmar_msi_write(int irq, struct msi_msg *msg); extern int dmar_set_interrupt(struct intel_iommu *iommu); diff --git a/include/linux/htirq.h b/include/linux/htirq.h index c96ea46737d..70a1dbbf209 100644 --- a/include/linux/htirq.h +++ b/include/linux/htirq.h @@ -9,8 +9,9 @@ struct ht_irq_msg { /* Helper functions.. */ void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); -void mask_ht_irq(unsigned int irq); -void unmask_ht_irq(unsigned int irq); +struct irq_data; +void mask_ht_irq(struct irq_data *data); +void unmask_ht_irq(struct irq_data *data); /* The arch hook for getting things started */ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev); diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index a0384a4d1e6..19988983aea 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -641,11 +641,8 @@ static inline void init_irq_proc(void) struct seq_file; int show_interrupts(struct seq_file *p, void *v); -struct irq_desc; - extern int early_irq_init(void); extern int arch_probe_nr_irqs(void); extern int arch_early_irq_init(void); -extern int arch_init_chip_data(struct irq_desc *desc, int node); #endif diff --git a/include/linux/irq.h b/include/linux/irq.h index c03243ad84b..e9639115dff 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -72,6 +72,10 @@ typedef void (*irq_flow_handler_t)(unsigned int irq, #define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */ #define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */ +#define IRQF_MODIFY_MASK \ + (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ + IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL) + #ifdef CONFIG_IRQ_PER_CPU # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) # define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) @@ -80,36 +84,77 @@ typedef void (*irq_flow_handler_t)(unsigned int irq, # define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING #endif -struct proc_dir_entry; struct msi_desc; /** + * struct irq_data - per irq and irq chip data passed down to chip functions + * @irq: interrupt number + * @node: node index useful for balancing + * @chip: low level interrupt hardware access + * @handler_data: per-IRQ data for the irq_chip methods + * @chip_data: platform-specific per-chip private data for the chip + * methods, to allow shared chip implementations + * @msi_desc: MSI descriptor + * @affinity: IRQ affinity on SMP + * + * The fields here need to overlay the ones in irq_desc until we + * cleaned up the direct references and switched everything over to + * irq_data. + */ +struct irq_data { + unsigned int irq; + unsigned int node; + struct irq_chip *chip; + void *handler_data; + void *chip_data; + struct msi_desc *msi_desc; +#ifdef CONFIG_SMP + cpumask_var_t affinity; +#endif +}; + +/** * struct irq_chip - hardware interrupt chip descriptor * * @name: name for /proc/interrupts - * @startup: start up the interrupt (defaults to ->enable if NULL) - * @shutdown: shut down the interrupt (defaults to ->disable if NULL) - * @enable: enable the interrupt (defaults to chip->unmask if NULL) - * @disable: disable the interrupt - * @ack: start of a new interrupt - * @mask: mask an interrupt source - * @mask_ack: ack and mask an interrupt source - * @unmask: unmask an interrupt source - * @eoi: end of interrupt - chip level - * @end: end of interrupt - flow level - * @set_affinity: set the CPU affinity on SMP machines - * @retrigger: resend an IRQ to the CPU - * @set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ - * @set_wake: enable/disable power-management wake-on of an IRQ + * @startup: deprecated, replaced by irq_startup + * @shutdown: deprecated, replaced by irq_shutdown + * @enable: deprecated, replaced by irq_enable + * @disable: deprecated, replaced by irq_disable + * @ack: deprecated, replaced by irq_ack + * @mask: deprecated, replaced by irq_mask + * @mask_ack: deprecated, replaced by irq_mask_ack + * @unmask: deprecated, replaced by irq_unmask + * @eoi: deprecated, replaced by irq_eoi + * @end: deprecated, will go away with __do_IRQ() + * @set_affinity: deprecated, replaced by irq_set_affinity + * @retrigger: deprecated, replaced by irq_retrigger + * @set_type: deprecated, replaced by irq_set_type + * @set_wake: deprecated, replaced by irq_wake + * @bus_lock: deprecated, replaced by irq_bus_lock + * @bus_sync_unlock: deprecated, replaced by irq_bus_sync_unlock * - * @bus_lock: function to lock access to slow bus (i2c) chips - * @bus_sync_unlock: function to sync and unlock slow bus (i2c) chips + * @irq_startup: start up the interrupt (defaults to ->enable if NULL) + * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL) + * @irq_enable: enable the interrupt (defaults to chip->unmask if NULL) + * @irq_disable: disable the interrupt + * @irq_ack: start of a new interrupt + * @irq_mask: mask an interrupt source + * @irq_mask_ack: ack and mask an interrupt source + * @irq_unmask: unmask an interrupt source + * @irq_eoi: end of interrupt + * @irq_set_affinity: set the CPU affinity on SMP machines + * @irq_retrigger: resend an IRQ to the CPU + * @irq_set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ + * @irq_set_wake: enable/disable power-management wake-on of an IRQ + * @irq_bus_lock: function to lock access to slow bus (i2c) chips + * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips * * @release: release function solely used by UML - * @typename: obsoleted by name, kept as migration helper */ struct irq_chip { const char *name; +#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED unsigned int (*startup)(unsigned int irq); void (*shutdown)(unsigned int irq); void (*enable)(unsigned int irq); @@ -130,154 +175,66 @@ struct irq_chip { void (*bus_lock)(unsigned int irq); void (*bus_sync_unlock)(unsigned int irq); +#endif + unsigned int (*irq_startup)(struct irq_data *data); + void (*irq_shutdown)(struct irq_data *data); + void (*irq_enable)(struct irq_data *data); + void (*irq_disable)(struct irq_data *data); + + void (*irq_ack)(struct irq_data *data); + void (*irq_mask)(struct irq_data *data); + void (*irq_mask_ack)(struct irq_data *data); + void (*irq_unmask)(struct irq_data *data); + void (*irq_eoi)(struct irq_data *data); + + int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force); + int (*irq_retrigger)(struct irq_data *data); + int (*irq_set_type)(struct irq_data *data, unsigned int flow_type); + int (*irq_set_wake)(struct irq_data *data, unsigned int on); + + void (*irq_bus_lock)(struct irq_data *data); + void (*irq_bus_sync_unlock)(struct irq_data *data); /* Currently used only by UML, might disappear one day.*/ #ifdef CONFIG_IRQ_RELEASE_METHOD void (*release)(unsigned int irq, void *dev_id); #endif - /* - * For compatibility, ->typename is copied into ->name. - * Will disappear. - */ - const char *typename; }; -struct timer_rand_state; -struct irq_2_iommu; -/** - * struct irq_desc - interrupt descriptor - * @irq: interrupt number for this descriptor - * @timer_rand_state: pointer to timer rand state struct - * @kstat_irqs: irq stats per cpu - * @irq_2_iommu: iommu with this irq - * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] - * @chip: low level interrupt hardware access - * @msi_desc: MSI descriptor - * @handler_data: per-IRQ data for the irq_chip methods - * @chip_data: platform-specific per-chip private data for the chip - * methods, to allow shared chip implementations - * @action: the irq action chain - * @status: status information - * @depth: disable-depth, for nested irq_disable() calls - * @wake_depth: enable depth, for multiple set_irq_wake() callers - * @irq_count: stats field to detect stalled irqs - * @last_unhandled: aging timer for unhandled count - * @irqs_unhandled: stats field for spurious unhandled interrupts - * @lock: locking for SMP - * @affinity: IRQ affinity on SMP - * @node: node index useful for balancing - * @pending_mask: pending rebalanced interrupts - * @threads_active: number of irqaction threads currently running - * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers - * @dir: /proc/irq/ procfs entry - * @name: flow handler name for /proc/interrupts output - */ -struct irq_desc { - unsigned int irq; - struct timer_rand_state *timer_rand_state; - unsigned int *kstat_irqs; -#ifdef CONFIG_INTR_REMAP - struct irq_2_iommu *irq_2_iommu; -#endif - irq_flow_handler_t handle_irq; - struct irq_chip *chip; - struct msi_desc *msi_desc; - void *handler_data; - void *chip_data; - struct irqaction *action; /* IRQ action list */ - unsigned int status; /* IRQ status */ - - unsigned int depth; /* nested irq disables */ - unsigned int wake_depth; /* nested wake enables */ - unsigned int irq_count; /* For detecting broken IRQs */ - unsigned long last_unhandled; /* Aging timer for unhandled count */ - unsigned int irqs_unhandled; - raw_spinlock_t lock; -#ifdef CONFIG_SMP - cpumask_var_t affinity; - const struct cpumask *affinity_hint; - unsigned int node; -#ifdef CONFIG_GENERIC_PENDING_IRQ - cpumask_var_t pending_mask; -#endif -#endif - atomic_t threads_active; - wait_queue_head_t wait_for_threads; -#ifdef CONFIG_PROC_FS - struct proc_dir_entry *dir; -#endif - const char *name; -} ____cacheline_internodealigned_in_smp; +/* This include will go away once we isolated irq_desc usage to core code */ +#include <linux/irqdesc.h> -extern void arch_init_copy_chip_data(struct irq_desc *old_desc, - struct irq_desc *desc, int node); -extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc); +/* + * Pick up the arch-dependent methods: + */ +#include <asm/hw_irq.h> -#ifndef CONFIG_SPARSE_IRQ -extern struct irq_desc irq_desc[NR_IRQS]; +#ifndef NR_IRQS_LEGACY +# define NR_IRQS_LEGACY 0 #endif -#ifdef CONFIG_NUMA_IRQ_DESC -extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int node); -#else -static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) -{ - return desc; -} +#ifndef ARCH_IRQ_INIT_FLAGS +# define ARCH_IRQ_INIT_FLAGS 0 #endif -extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); - -/* - * Pick up the arch-dependent methods: - */ -#include <asm/hw_irq.h> +#define IRQ_DEFAULT_INIT_FLAGS (IRQ_DISABLED | ARCH_IRQ_INIT_FLAGS) +struct irqaction; extern int setup_irq(unsigned int irq, struct irqaction *new); extern void remove_irq(unsigned int irq, struct irqaction *act); #ifdef CONFIG_GENERIC_HARDIRQS -#ifdef CONFIG_SMP - -#ifdef CONFIG_GENERIC_PENDING_IRQ - +#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) void move_native_irq(int irq); void move_masked_irq(int irq); - -#else /* CONFIG_GENERIC_PENDING_IRQ */ - -static inline void move_irq(int irq) -{ -} - -static inline void move_native_irq(int irq) -{ -} - -static inline void move_masked_irq(int irq) -{ -} - -#endif /* CONFIG_GENERIC_PENDING_IRQ */ - -#else /* CONFIG_SMP */ - -#define move_native_irq(x) -#define move_masked_irq(x) - -#endif /* CONFIG_SMP */ +#else +static inline void move_native_irq(int irq) { } +static inline void move_masked_irq(int irq) { } +#endif extern int no_irq_affinity; -static inline int irq_balancing_disabled(unsigned int irq) -{ - struct irq_desc *desc; - - desc = irq_to_desc(irq); - return desc->status & IRQ_NO_BALANCING_MASK; -} - /* Handle irq action chains: */ extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action); @@ -293,42 +250,10 @@ extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); extern void handle_nested_irq(unsigned int irq); -/* - * Monolithic do_IRQ implementation. - */ -#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ -extern unsigned int __do_IRQ(unsigned int irq); -#endif - -/* - * Architectures call this to let the generic IRQ layer - * handle an interrupt. If the descriptor is attached to an - * irqchip-style controller then we call the ->handle_irq() handler, - * and it calls __do_IRQ() if it's attached to an irqtype-style controller. - */ -static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) -{ -#ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ - desc->handle_irq(irq, desc); -#else - if (likely(desc->handle_irq)) - desc->handle_irq(irq, desc); - else - __do_IRQ(irq); -#endif -} - -static inline void generic_handle_irq(unsigned int irq) -{ - generic_handle_irq_desc(irq, irq_to_desc(irq)); -} - /* Handling of unhandled and spurious interrupts: */ extern void note_interrupt(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret); -/* Resending of interrupts :*/ -void check_irq_resend(struct irq_desc *desc, unsigned int irq); /* Enable/disable irq debugging output: */ extern int noirqdebug_setup(char *str); @@ -351,16 +276,6 @@ extern void __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, const char *name); -/* caller has locked the irq_desc and both params are valid */ -static inline void __set_irq_handler_unlocked(int irq, - irq_flow_handler_t handler) -{ - struct irq_desc *desc; - - desc = irq_to_desc(irq); - desc->handle_irq = handler; -} - /* * Set a highlevel flow handler for a given IRQ: */ @@ -384,141 +299,121 @@ set_irq_chained_handler(unsigned int irq, extern void set_irq_nested_thread(unsigned int irq, int nest); -extern void set_irq_noprobe(unsigned int irq); -extern void set_irq_probe(unsigned int irq); +void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set); + +static inline void irq_set_status_flags(unsigned int irq, unsigned long set) +{ + irq_modify_status(irq, 0, set); +} + +static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr) +{ + irq_modify_status(irq, clr, 0); +} + +static inline void set_irq_noprobe(unsigned int irq) +{ + irq_modify_status(irq, 0, IRQ_NOPROBE); +} + +static inline void set_irq_probe(unsigned int irq) +{ + irq_modify_status(irq, IRQ_NOPROBE, 0); +} /* Handle dynamic irq creation and destruction */ extern unsigned int create_irq_nr(unsigned int irq_want, int node); extern int create_irq(void); extern void destroy_irq(unsigned int irq); -/* Test to see if a driver has successfully requested an irq */ -static inline int irq_has_action(unsigned int irq) +/* + * Dynamic irq helper functions. Obsolete. Use irq_alloc_desc* and + * irq_free_desc instead. + */ +extern void dynamic_irq_cleanup(unsigned int irq); +static inline void dynamic_irq_init(unsigned int irq) { - struct irq_desc *desc = irq_to_desc(irq); - return desc->action != NULL; + dynamic_irq_cleanup(irq); } -/* Dynamic irq helper functions */ -extern void dynamic_irq_init(unsigned int irq); -void dynamic_irq_init_keep_chip_data(unsigned int irq); -extern void dynamic_irq_cleanup(unsigned int irq); -void dynamic_irq_cleanup_keep_chip_data(unsigned int irq); - /* Set/get chip/data for an IRQ: */ extern int set_irq_chip(unsigned int irq, struct irq_chip *chip); extern int set_irq_data(unsigned int irq, void *data); extern int set_irq_chip_data(unsigned int irq, void *data); extern int set_irq_type(unsigned int irq, unsigned int type); extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); +extern struct irq_data *irq_get_irq_data(unsigned int irq); -#define get_irq_chip(irq) (irq_to_desc(irq)->chip) -#define get_irq_chip_data(irq) (irq_to_desc(irq)->chip_data) -#define get_irq_data(irq) (irq_to_desc(irq)->handler_data) -#define get_irq_msi(irq) (irq_to_desc(irq)->msi_desc) - -#define get_irq_desc_chip(desc) ((desc)->chip) -#define get_irq_desc_chip_data(desc) ((desc)->chip_data) -#define get_irq_desc_data(desc) ((desc)->handler_data) -#define get_irq_desc_msi(desc) ((desc)->msi_desc) - -#endif /* CONFIG_GENERIC_HARDIRQS */ - -#endif /* !CONFIG_S390 */ - -#ifdef CONFIG_SMP -/** - * alloc_desc_masks - allocate cpumasks for irq_desc - * @desc: pointer to irq_desc struct - * @node: node which will be handling the cpumasks - * @boot: true if need bootmem - * - * Allocates affinity and pending_mask cpumask if required. - * Returns true if successful (or not required). - */ -static inline bool alloc_desc_masks(struct irq_desc *desc, int node, - bool boot) +static inline struct irq_chip *get_irq_chip(unsigned int irq) { - gfp_t gfp = GFP_ATOMIC; - - if (boot) - gfp = GFP_NOWAIT; - -#ifdef CONFIG_CPUMASK_OFFSTACK - if (!alloc_cpumask_var_node(&desc->affinity, gfp, node)) - return false; + struct irq_data *d = irq_get_irq_data(irq); + return d ? d->chip : NULL; +} -#ifdef CONFIG_GENERIC_PENDING_IRQ - if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { - free_cpumask_var(desc->affinity); - return false; - } -#endif -#endif - return true; +static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d) +{ + return d->chip; } -static inline void init_desc_masks(struct irq_desc *desc) +static inline void *get_irq_chip_data(unsigned int irq) { - cpumask_setall(desc->affinity); -#ifdef CONFIG_GENERIC_PENDING_IRQ - cpumask_clear(desc->pending_mask); -#endif + struct irq_data *d = irq_get_irq_data(irq); + return d ? d->chip_data : NULL; } -/** - * init_copy_desc_masks - copy cpumasks for irq_desc - * @old_desc: pointer to old irq_desc struct - * @new_desc: pointer to new irq_desc struct - * - * Insures affinity and pending_masks are copied to new irq_desc. - * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the - * irq_desc struct so the copy is redundant. - */ +static inline void *irq_data_get_irq_chip_data(struct irq_data *d) +{ + return d->chip_data; +} -static inline void init_copy_desc_masks(struct irq_desc *old_desc, - struct irq_desc *new_desc) +static inline void *get_irq_data(unsigned int irq) { -#ifdef CONFIG_CPUMASK_OFFSTACK - cpumask_copy(new_desc->affinity, old_desc->affinity); + struct irq_data *d = irq_get_irq_data(irq); + return d ? d->handler_data : NULL; +} -#ifdef CONFIG_GENERIC_PENDING_IRQ - cpumask_copy(new_desc->pending_mask, old_desc->pending_mask); -#endif -#endif +static inline void *irq_data_get_irq_data(struct irq_data *d) +{ + return d->handler_data; } -static inline void free_desc_masks(struct irq_desc *old_desc, - struct irq_desc *new_desc) +static inline struct msi_desc *get_irq_msi(unsigned int irq) { - free_cpumask_var(old_desc->affinity); + struct irq_data *d = irq_get_irq_data(irq); + return d ? d->msi_desc : NULL; +} -#ifdef CONFIG_GENERIC_PENDING_IRQ - free_cpumask_var(old_desc->pending_mask); -#endif +static inline struct msi_desc *irq_data_get_msi(struct irq_data *d) +{ + return d->msi_desc; } -#else /* !CONFIG_SMP */ +int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node); +void irq_free_descs(unsigned int irq, unsigned int cnt); +int irq_reserve_irqs(unsigned int from, unsigned int cnt); -static inline bool alloc_desc_masks(struct irq_desc *desc, int node, - bool boot) +static inline int irq_alloc_desc(int node) { - return true; + return irq_alloc_descs(-1, 0, 1, node); } -static inline void init_desc_masks(struct irq_desc *desc) +static inline int irq_alloc_desc_at(unsigned int at, int node) { + return irq_alloc_descs(at, at, 1, node); } -static inline void init_copy_desc_masks(struct irq_desc *old_desc, - struct irq_desc *new_desc) +static inline int irq_alloc_desc_from(unsigned int from, int node) { + return irq_alloc_descs(-1, from, 1, node); } -static inline void free_desc_masks(struct irq_desc *old_desc, - struct irq_desc *new_desc) +static inline void irq_free_desc(unsigned int irq) { + irq_free_descs(irq, 1); } -#endif /* CONFIG_SMP */ + +#endif /* CONFIG_GENERIC_HARDIRQS */ + +#endif /* !CONFIG_S390 */ #endif /* _LINUX_IRQ_H */ diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h new file mode 100644 index 00000000000..979c68cc745 --- /dev/null +++ b/include/linux/irqdesc.h @@ -0,0 +1,159 @@ +#ifndef _LINUX_IRQDESC_H +#define _LINUX_IRQDESC_H + +/* + * Core internal functions to deal with irq descriptors + * + * This include will move to kernel/irq once we cleaned up the tree. + * For now it's included from <linux/irq.h> + */ + +struct proc_dir_entry; +struct timer_rand_state; +/** + * struct irq_desc - interrupt descriptor + * @irq_data: per irq and chip data passed down to chip functions + * @timer_rand_state: pointer to timer rand state struct + * @kstat_irqs: irq stats per cpu + * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] + * @action: the irq action chain + * @status: status information + * @depth: disable-depth, for nested irq_disable() calls + * @wake_depth: enable depth, for multiple set_irq_wake() callers + * @irq_count: stats field to detect stalled irqs + * @last_unhandled: aging timer for unhandled count + * @irqs_unhandled: stats field for spurious unhandled interrupts + * @lock: locking for SMP + * @pending_mask: pending rebalanced interrupts + * @threads_active: number of irqaction threads currently running + * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers + * @dir: /proc/irq/ procfs entry + * @name: flow handler name for /proc/interrupts output + */ +struct irq_desc { + +#ifdef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED + struct irq_data irq_data; +#else + /* + * This union will go away, once we fixed the direct access to + * irq_desc all over the place. The direct fields are a 1:1 + * overlay of irq_data. + */ + union { + struct irq_data irq_data; + struct { + unsigned int irq; + unsigned int node; + struct irq_chip *chip; + void *handler_data; + void *chip_data; + struct msi_desc *msi_desc; +#ifdef CONFIG_SMP + cpumask_var_t affinity; +#endif + }; + }; +#endif + + struct timer_rand_state *timer_rand_state; + unsigned int *kstat_irqs; + irq_flow_handler_t handle_irq; + struct irqaction *action; /* IRQ action list */ + unsigned int status; /* IRQ status */ + + unsigned int depth; /* nested irq disables */ + unsigned int wake_depth; /* nested wake enables */ + unsigned int irq_count; /* For detecting broken IRQs */ + unsigned long last_unhandled; /* Aging timer for unhandled count */ + unsigned int irqs_unhandled; + raw_spinlock_t lock; +#ifdef CONFIG_SMP + const struct cpumask *affinity_hint; +#ifdef CONFIG_GENERIC_PENDING_IRQ + cpumask_var_t pending_mask; +#endif +#endif + atomic_t threads_active; + wait_queue_head_t wait_for_threads; +#ifdef CONFIG_PROC_FS + struct proc_dir_entry *dir; +#endif + const char *name; +} ____cacheline_internodealigned_in_smp; + +#ifndef CONFIG_SPARSE_IRQ +extern struct irq_desc irq_desc[NR_IRQS]; +#endif + +/* Will be removed once the last users in power and sh are gone */ +extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); +static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) +{ + return desc; +} + +#ifdef CONFIG_GENERIC_HARDIRQS + +#define get_irq_desc_chip(desc) ((desc)->irq_data.chip) +#define get_irq_desc_chip_data(desc) ((desc)->irq_data.chip_data) +#define get_irq_desc_data(desc) ((desc)->irq_data.handler_data) +#define get_irq_desc_msi(desc) ((desc)->irq_data.msi_desc) + +/* + * Monolithic do_IRQ implementation. + */ +#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ +extern unsigned int __do_IRQ(unsigned int irq); +#endif + +/* + * Architectures call this to let the generic IRQ layer + * handle an interrupt. If the descriptor is attached to an + * irqchip-style controller then we call the ->handle_irq() handler, + * and it calls __do_IRQ() if it's attached to an irqtype-style controller. + */ +static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) +{ +#ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ + desc->handle_irq(irq, desc); +#else + if (likely(desc->handle_irq)) + desc->handle_irq(irq, desc); + else + __do_IRQ(irq); +#endif +} + +static inline void generic_handle_irq(unsigned int irq) +{ + generic_handle_irq_desc(irq, irq_to_desc(irq)); +} + +/* Test to see if a driver has successfully requested an irq */ +static inline int irq_has_action(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + return desc->action != NULL; +} + +static inline int irq_balancing_disabled(unsigned int irq) +{ + struct irq_desc *desc; + + desc = irq_to_desc(irq); + return desc->status & IRQ_NO_BALANCING_MASK; +} + +/* caller has locked the irq_desc and both params are valid */ +static inline void __set_irq_handler_unlocked(int irq, + irq_flow_handler_t handler) +{ + struct irq_desc *desc; + + desc = irq_to_desc(irq); + desc->handle_irq = handler; +} +#endif + +#endif diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h index 7bf89bc8cbc..05aa8c23483 100644 --- a/include/linux/irqnr.h +++ b/include/linux/irqnr.h @@ -25,6 +25,7 @@ extern int nr_irqs; extern struct irq_desc *irq_to_desc(unsigned int irq); +unsigned int irq_get_next_irq(unsigned int offset); # define for_each_irq_desc(irq, desc) \ for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \ @@ -47,6 +48,10 @@ extern struct irq_desc *irq_to_desc(unsigned int irq); #define irq_node(irq) 0 #endif +# define for_each_active_irq(irq) \ + for (irq = irq_get_next_irq(0); irq < nr_irqs; \ + irq = irq_get_next_irq(irq + 1)) + #endif /* CONFIG_GENERIC_HARDIRQS */ #define for_each_irq_nr(irq) \ diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 06aed8305bf..17d050ce7ab 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -424,14 +424,6 @@ do { \ #endif /* CONFIG_LOCKDEP */ -#ifdef CONFIG_GENERIC_HARDIRQS -extern void early_init_irq_lock_class(void); -#else -static inline void early_init_irq_lock_class(void) -{ -} -#endif - #ifdef CONFIG_TRACE_IRQFLAGS extern void early_boot_irqs_off(void); extern void early_boot_irqs_on(void); diff --git a/include/linux/msi.h b/include/linux/msi.h index 91b05c17185..05acced439a 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -10,12 +10,13 @@ struct msi_msg { }; /* Helper functions */ -struct irq_desc; -extern void mask_msi_irq(unsigned int irq); -extern void unmask_msi_irq(unsigned int irq); -extern void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); -extern void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); -extern void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); +struct irq_data; +struct msi_desc; +extern void mask_msi_irq(struct irq_data *data); +extern void unmask_msi_irq(struct irq_data *data); +extern void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); +extern void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg); +extern void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); extern void read_msi_msg(unsigned int irq, struct msi_msg *msg); extern void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); extern void write_msi_msg(unsigned int irq, struct msi_msg *msg); diff --git a/init/Kconfig b/init/Kconfig index 2de5b1cbadd..1df1a87cc59 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -332,6 +332,8 @@ config AUDIT_TREE depends on AUDITSYSCALL select FSNOTIFY +source "kernel/irq/Kconfig" + menu "RCU Subsystem" choice diff --git a/init/main.c b/init/main.c index 94ab488039a..9684c9670b4 100644 --- a/init/main.c +++ b/init/main.c @@ -556,7 +556,6 @@ asmlinkage void __init start_kernel(void) local_irq_disable(); early_boot_irqs_off(); - early_init_irq_lock_class(); /* * Interrupts are still disabled. Do necessary setups, then diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig new file mode 100644 index 00000000000..31d766bf5d2 --- /dev/null +++ b/kernel/irq/Kconfig @@ -0,0 +1,53 @@ +config HAVE_GENERIC_HARDIRQS + def_bool n + +if HAVE_GENERIC_HARDIRQS +menu "IRQ subsystem" +# +# Interrupt subsystem related configuration options +# +config GENERIC_HARDIRQS + def_bool y + +config GENERIC_HARDIRQS_NO__DO_IRQ + def_bool y + +# Select this to disable the deprecated stuff +config GENERIC_HARDIRQS_NO_DEPRECATED + def_bool n + +# Options selectable by the architecture code +config HAVE_SPARSE_IRQ + def_bool n + +config GENERIC_IRQ_PROBE + def_bool n + +config GENERIC_PENDING_IRQ + def_bool n + +config AUTO_IRQ_AFFINITY + def_bool n + +config IRQ_PER_CPU + def_bool n + +config HARDIRQS_SW_RESEND + def_bool n + +config SPARSE_IRQ + bool "Support sparse irq numbering" + depends on HAVE_SPARSE_IRQ + ---help--- + + Sparse irq numbering is useful for distro kernels that want + to define a high CONFIG_NR_CPUS value but still want to have + low kernel memory footprint on smaller machines. + + ( Sparse irqs can also be beneficial on NUMA boxes, as they spread + out the interrupt descriptors in a more NUMA-friendly way. ) + + If you don't know what to do here, say N. + +endmenu +endif diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile index 7d047808419..54329cd7b3e 100644 --- a/kernel/irq/Makefile +++ b/kernel/irq/Makefile @@ -1,7 +1,6 @@ -obj-y := handle.o manage.o spurious.o resend.o chip.o devres.o +obj-y := irqdesc.o handle.o manage.o spurious.o resend.o chip.o dummychip.o devres.o obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o -obj-$(CONFIG_NUMA_IRQ_DESC) += numa_migrate.o obj-$(CONFIG_PM_SLEEP) += pm.o diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c index 2295a31ef11..505798f86c3 100644 --- a/kernel/irq/autoprobe.c +++ b/kernel/irq/autoprobe.c @@ -57,9 +57,10 @@ unsigned long probe_irq_on(void) * Some chips need to know about probing in * progress: */ - if (desc->chip->set_type) - desc->chip->set_type(i, IRQ_TYPE_PROBE); - desc->chip->startup(i); + if (desc->irq_data.chip->irq_set_type) + desc->irq_data.chip->irq_set_type(&desc->irq_data, + IRQ_TYPE_PROBE); + desc->irq_data.chip->irq_startup(&desc->irq_data); } raw_spin_unlock_irq(&desc->lock); } @@ -76,7 +77,7 @@ unsigned long probe_irq_on(void) raw_spin_lock_irq(&desc->lock); if (!desc->action && !(desc->status & IRQ_NOPROBE)) { desc->status |= IRQ_AUTODETECT | IRQ_WAITING; - if (desc->chip->startup(i)) + if (desc->irq_data.chip->irq_startup(&desc->irq_data)) desc->status |= IRQ_PENDING; } raw_spin_unlock_irq(&desc->lock); @@ -98,7 +99,7 @@ unsigned long probe_irq_on(void) /* It triggered already - consider it spurious. */ if (!(status & IRQ_WAITING)) { desc->status = status & ~IRQ_AUTODETECT; - desc->chip->shutdown(i); + desc->irq_data.chip->irq_shutdown(&desc->irq_data); } else if (i < 32) mask |= 1 << i; @@ -137,7 +138,7 @@ unsigned int probe_irq_mask(unsigned long val) mask |= 1 << i; desc->status = status & ~IRQ_AUTODETECT; - desc->chip->shutdown(i); + desc->irq_data.chip->irq_shutdown(&desc->irq_data); } raw_spin_unlock_irq(&desc->lock); } @@ -181,7 +182,7 @@ int probe_irq_off(unsigned long val) nr_of_irqs++; } desc->status = status & ~IRQ_AUTODETECT; - desc->chip->shutdown(i); + desc->irq_data.chip->irq_shutdown(&desc->irq_data); } raw_spin_unlock_irq(&desc->lock); } diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index b7091d5ca2f..baa5c4acad8 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -18,108 +18,6 @@ #include "internals.h" -static void dynamic_irq_init_x(unsigned int irq, bool keep_chip_data) -{ - struct irq_desc *desc; - unsigned long flags; - - desc = irq_to_desc(irq); - if (!desc) { - WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq); - return; - } - - /* Ensure we don't have left over values from a previous use of this irq */ - raw_spin_lock_irqsave(&desc->lock, flags); - desc->status = IRQ_DISABLED; - desc->chip = &no_irq_chip; - desc->handle_irq = handle_bad_irq; - desc->depth = 1; - desc->msi_desc = NULL; - desc->handler_data = NULL; - if (!keep_chip_data) - desc->chip_data = NULL; - desc->action = NULL; - desc->irq_count = 0; - desc->irqs_unhandled = 0; -#ifdef CONFIG_SMP - cpumask_setall(desc->affinity); -#ifdef CONFIG_GENERIC_PENDING_IRQ - cpumask_clear(desc->pending_mask); -#endif -#endif - raw_spin_unlock_irqrestore(&desc->lock, flags); -} - -/** - * dynamic_irq_init - initialize a dynamically allocated irq - * @irq: irq number to initialize - */ -void dynamic_irq_init(unsigned int irq) -{ - dynamic_irq_init_x(irq, false); -} - -/** - * dynamic_irq_init_keep_chip_data - initialize a dynamically allocated irq - * @irq: irq number to initialize - * - * does not set irq_to_desc(irq)->chip_data to NULL - */ -void dynamic_irq_init_keep_chip_data(unsigned int irq) -{ - dynamic_irq_init_x(irq, true); -} - -static void dynamic_irq_cleanup_x(unsigned int irq, bool keep_chip_data) -{ - struct irq_desc *desc = irq_to_desc(irq); - unsigned long flags; - - if (!desc) { - WARN(1, KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq); - return; - } - - raw_spin_lock_irqsave(&desc->lock, flags); - if (desc->action) { - raw_spin_unlock_irqrestore(&desc->lock, flags); - WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n", - irq); - return; - } - desc->msi_desc = NULL; - desc->handler_data = NULL; - if (!keep_chip_data) - desc->chip_data = NULL; - desc->handle_irq = handle_bad_irq; - desc->chip = &no_irq_chip; - desc->name = NULL; - clear_kstat_irqs(desc); - raw_spin_unlock_irqrestore(&desc->lock, flags); -} - -/** - * dynamic_irq_cleanup - cleanup a dynamically allocated irq - * @irq: irq number to initialize - */ -void dynamic_irq_cleanup(unsigned int irq) -{ - dynamic_irq_cleanup_x(irq, false); -} - -/** - * dynamic_irq_cleanup_keep_chip_data - cleanup a dynamically allocated irq - * @irq: irq number to initialize - * - * does not set irq_to_desc(irq)->chip_data to NULL - */ -void dynamic_irq_cleanup_keep_chip_data(unsigned int irq) -{ - dynamic_irq_cleanup_x(irq, true); -} - - /** * set_irq_chip - set the irq chip for an irq * @irq: irq number @@ -140,7 +38,7 @@ int set_irq_chip(unsigned int irq, struct irq_chip *chip) raw_spin_lock_irqsave(&desc->lock, flags); irq_chip_set_defaults(chip); - desc->chip = chip; + desc->irq_data.chip = chip; raw_spin_unlock_irqrestore(&desc->lock, flags); return 0; @@ -193,7 +91,7 @@ int set_irq_data(unsigned int irq, void *data) } raw_spin_lock_irqsave(&desc->lock, flags); - desc->handler_data = data; + desc->irq_data.handler_data = data; raw_spin_unlock_irqrestore(&desc->lock, flags); return 0; } @@ -218,7 +116,7 @@ int set_irq_msi(unsigned int irq, struct msi_desc *entry) } raw_spin_lock_irqsave(&desc->lock, flags); - desc->msi_desc = entry; + desc->irq_data.msi_desc = entry; if (entry) entry->irq = irq; raw_spin_unlock_irqrestore(&desc->lock, flags); @@ -243,19 +141,27 @@ int set_irq_chip_data(unsigned int irq, void *data) return -EINVAL; } - if (!desc->chip) { + if (!desc->irq_data.chip) { printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq); return -EINVAL; } raw_spin_lock_irqsave(&desc->lock, flags); - desc->chip_data = data; + desc->irq_data.chip_data = data; raw_spin_unlock_irqrestore(&desc->lock, flags); return 0; } EXPORT_SYMBOL(set_irq_chip_data); +struct irq_data *irq_get_irq_data(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + + return desc ? &desc->irq_data : NULL; +} +EXPORT_SYMBOL_GPL(irq_get_irq_data); + /** * set_irq_nested_thread - Set/Reset the IRQ_NESTED_THREAD flag of an irq * @@ -287,93 +193,216 @@ EXPORT_SYMBOL_GPL(set_irq_nested_thread); /* * default enable function */ -static void default_enable(unsigned int irq) +static void default_enable(struct irq_data *data) { - struct irq_desc *desc = irq_to_desc(irq); + struct irq_desc *desc = irq_data_to_desc(data); - desc->chip->unmask(irq); + desc->irq_data.chip->irq_unmask(&desc->irq_data); desc->status &= ~IRQ_MASKED; } /* * default disable function */ -static void default_disable(unsigned int irq) +static void default_disable(struct irq_data *data) { } /* * default startup function */ -static unsigned int default_startup(unsigned int irq) +static unsigned int default_startup(struct irq_data *data) { - struct irq_desc *desc = irq_to_desc(irq); + struct irq_desc *desc = irq_data_to_desc(data); - desc->chip->enable(irq); + desc->irq_data.chip->irq_enable(data); return 0; } /* * default shutdown function */ -static void default_shutdown(unsigned int irq) +static void default_shutdown(struct irq_data *data) { - struct irq_desc *desc = irq_to_desc(irq); + struct irq_desc *desc = irq_data_to_desc(data); - desc->chip->mask(irq); + desc->irq_data.chip->irq_mask(&desc->irq_data); desc->status |= IRQ_MASKED; } +#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED +/* Temporary migration helpers */ +static void compat_irq_mask(struct irq_data *data) +{ + data->chip->mask(data->irq); +} + +static void compat_irq_unmask(struct irq_data *data) +{ + data->chip->unmask(data->irq); +} + +static void compat_irq_ack(struct irq_data *data) +{ + data->chip->ack(data->irq); +} + +static void compat_irq_mask_ack(struct irq_data *data) +{ + data->chip->mask_ack(data->irq); +} + +static void compat_irq_eoi(struct irq_data *data) +{ + data->chip->eoi(data->irq); +} + +static void compat_irq_enable(struct irq_data *data) +{ + data->chip->enable(data->irq); +} + +static void compat_irq_disable(struct irq_data *data) +{ + data->chip->disable(data->irq); +} + +static void compat_irq_shutdown(struct irq_data *data) +{ + data->chip->shutdown(data->irq); +} + +static unsigned int compat_irq_startup(struct irq_data *data) +{ + return data->chip->startup(data->irq); +} + +static int compat_irq_set_affinity(struct irq_data *data, + const struct cpumask *dest, bool force) +{ + return data->chip->set_affinity(data->irq, dest); +} + +static int compat_irq_set_type(struct irq_data *data, unsigned int type) +{ + return data->chip->set_type(data->irq, type); +} + +static int compat_irq_set_wake(struct irq_data *data, unsigned int on) +{ + return data->chip->set_wake(data->irq, on); +} + +static int compat_irq_retrigger(struct irq_data *data) +{ + return data->chip->retrigger(data->irq); +} + +static void compat_bus_lock(struct irq_data *data) +{ + data->chip->bus_lock(data->irq); +} + +static void compat_bus_sync_unlock(struct irq_data *data) +{ + data->chip->bus_sync_unlock(data->irq); +} +#endif + /* * Fixup enable/disable function pointers */ void irq_chip_set_defaults(struct irq_chip *chip) { - if (!chip->enable) - chip->enable = default_enable; - if (!chip->disable) - chip->disable = default_disable; - if (!chip->startup) - chip->startup = default_startup; +#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED /* - * We use chip->disable, when the user provided its own. When - * we have default_disable set for chip->disable, then we need + * Compat fixup functions need to be before we set the + * defaults for enable/disable/startup/shutdown + */ + if (chip->enable) + chip->irq_enable = compat_irq_enable; + if (chip->disable) + chip->irq_disable = compat_irq_disable; + if (chip->shutdown) + chip->irq_shutdown = compat_irq_shutdown; + if (chip->startup) + chip->irq_startup = compat_irq_startup; +#endif + /* + * The real defaults + */ + if (!chip->irq_enable) + chip->irq_enable = default_enable; + if (!chip->irq_disable) + chip->irq_disable = default_disable; + if (!chip->irq_startup) + chip->irq_startup = default_startup; + /* + * We use chip->irq_disable, when the user provided its own. When + * we have default_disable set for chip->irq_disable, then we need * to use default_shutdown, otherwise the irq line is not * disabled on free_irq(): */ - if (!chip->shutdown) - chip->shutdown = chip->disable != default_disable ? - chip->disable : default_shutdown; - if (!chip->name) - chip->name = chip->typename; + if (!chip->irq_shutdown) + chip->irq_shutdown = chip->irq_disable != default_disable ? + chip->irq_disable : default_shutdown; + +#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED if (!chip->end) chip->end = dummy_irq_chip.end; + + /* + * Now fix up the remaining compat handlers + */ + if (chip->bus_lock) + chip->irq_bus_lock = compat_bus_lock; + if (chip->bus_sync_unlock) + chip->irq_bus_sync_unlock = compat_bus_sync_unlock; + if (chip->mask) + chip->irq_mask = compat_irq_mask; + if (chip->unmask) + chip->irq_unmask = compat_irq_unmask; + if (chip->ack) + chip->irq_ack = compat_irq_ack; + if (chip->mask_ack) + chip->irq_mask_ack = compat_irq_mask_ack; + if (chip->eoi) + chip->irq_eoi = compat_irq_eoi; + if (chip->set_affinity) + chip->irq_set_affinity = compat_irq_set_affinity; + if (chip->set_type) + chip->irq_set_type = compat_irq_set_type; + if (chip->set_wake) + chip->irq_set_wake = compat_irq_set_wake; + if (chip->retrigger) + chip->irq_retrigger = compat_irq_retrigger; +#endif } -static inline void mask_ack_irq(struct irq_desc *desc, int irq) +static inline void mask_ack_irq(struct irq_desc *desc) { - if (desc->chip->mask_ack) - desc->chip->mask_ack(irq); + if (desc->irq_data.chip->irq_mask_ack) + desc->irq_data.chip->irq_mask_ack(&desc->irq_data); else { - desc->chip->mask(irq); - if (desc->chip->ack) - desc->chip->ack(irq); + desc->irq_data.chip->irq_mask(&desc->irq_data); + if (desc->irq_data.chip->irq_ack) + desc->irq_data.chip->irq_ack(&desc->irq_data); } desc->status |= IRQ_MASKED; } -static inline void mask_irq(struct irq_desc *desc, int irq) +static inline void mask_irq(struct irq_desc *desc) { - if (desc->chip->mask) { - desc->chip->mask(irq); + if (desc->irq_data.chip->irq_mask) { + desc->irq_data.chip->irq_mask(&desc->irq_data); desc->status |= IRQ_MASKED; } } -static inline void unmask_irq(struct irq_desc *desc, int irq) +static inline void unmask_irq(struct irq_desc *desc) { - if (desc->chip->unmask) { - desc->chip->unmask(irq); + if (desc->irq_data.chip->irq_unmask) { + desc->irq_data.chip->irq_unmask(&desc->irq_data); desc->status &= ~IRQ_MASKED; } } @@ -476,7 +505,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) irqreturn_t action_ret; raw_spin_lock(&desc->lock); - mask_ack_irq(desc, irq); + mask_ack_irq(desc); if (unlikely(desc->status & IRQ_INPROGRESS)) goto out_unlock; @@ -502,7 +531,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) desc->status &= ~IRQ_INPROGRESS; if (!(desc->status & (IRQ_DISABLED | IRQ_ONESHOT))) - unmask_irq(desc, irq); + unmask_irq(desc); out_unlock: raw_spin_unlock(&desc->lock); } @@ -539,7 +568,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) action = desc->action; if (unlikely(!action || (desc->status & IRQ_DISABLED))) { desc->status |= IRQ_PENDING; - mask_irq(desc, irq); + mask_irq(desc); goto out; } @@ -554,7 +583,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) raw_spin_lock(&desc->lock); desc->status &= ~IRQ_INPROGRESS; out: - desc->chip->eoi(irq); + desc->irq_data.chip->irq_eoi(&desc->irq_data); raw_spin_unlock(&desc->lock); } @@ -590,14 +619,13 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) || !desc->action)) { desc->status |= (IRQ_PENDING | IRQ_MASKED); - mask_ack_irq(desc, irq); + mask_ack_irq(desc); goto out_unlock; } kstat_incr_irqs_this_cpu(irq, desc); /* Start handling the irq */ - if (desc->chip->ack) - desc->chip->ack(irq); + desc->irq_data.chip->irq_ack(&desc->irq_data); /* Mark the IRQ currently in progress.*/ desc->status |= IRQ_INPROGRESS; @@ -607,7 +635,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) irqreturn_t action_ret; if (unlikely(!action)) { - mask_irq(desc, irq); + mask_irq(desc); goto out_unlock; } @@ -619,7 +647,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) if (unlikely((desc->status & (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) == (IRQ_PENDING | IRQ_MASKED))) { - unmask_irq(desc, irq); + unmask_irq(desc); } desc->status &= ~IRQ_PENDING; @@ -650,15 +678,15 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc) kstat_incr_irqs_this_cpu(irq, desc); - if (desc->chip->ack) - desc->chip->ack(irq); + if (desc->irq_data.chip->irq_ack) + desc->irq_data.chip->irq_ack(&desc->irq_data); action_ret = handle_IRQ_event(irq, desc->action); if (!noirqdebug) note_interrupt(irq, desc, action_ret); - if (desc->chip->eoi) - desc->chip->eoi(irq); + if (desc->irq_data.chip->irq_eoi) + desc->irq_data.chip->irq_eoi(&desc->irq_data); } void @@ -676,7 +704,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, if (!handle) handle = handle_bad_irq; - else if (desc->chip == &no_irq_chip) { + else if (desc->irq_data.chip == &no_irq_chip) { printk(KERN_WARNING "Trying to install %sinterrupt handler " "for IRQ%d\n", is_chained ? "chained " : "", irq); /* @@ -686,16 +714,16 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, * prevent us to setup the interrupt at all. Switch it to * dummy_irq_chip for easy transition. */ - desc->chip = &dummy_irq_chip; + desc->irq_data.chip = &dummy_irq_chip; } - chip_bus_lock(irq, desc); + chip_bus_lock(desc); raw_spin_lock_irqsave(&desc->lock, flags); /* Uninstall? */ if (handle == handle_bad_irq) { - if (desc->chip != &no_irq_chip) - mask_ack_irq(desc, irq); + if (desc->irq_data.chip != &no_irq_chip) + mask_ack_irq(desc); desc->status |= IRQ_DISABLED; desc->depth = 1; } @@ -706,10 +734,10 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, desc->status &= ~IRQ_DISABLED; desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE; desc->depth = 0; - desc->chip->startup(irq); + desc->irq_data.chip->irq_startup(&desc->irq_data); } raw_spin_unlock_irqrestore(&desc->lock, flags); - chip_bus_sync_unlock(irq, desc); + chip_bus_sync_unlock(desc); } EXPORT_SYMBOL_GPL(__set_irq_handler); @@ -729,32 +757,20 @@ set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, __set_irq_handler(irq, handle, 0, name); } -void set_irq_noprobe(unsigned int irq) +void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) { struct irq_desc *desc = irq_to_desc(irq); unsigned long flags; - if (!desc) { - printk(KERN_ERR "Trying to mark IRQ%d non-probeable\n", irq); + if (!desc) return; - } - - raw_spin_lock_irqsave(&desc->lock, flags); - desc->status |= IRQ_NOPROBE; - raw_spin_unlock_irqrestore(&desc->lock, flags); -} - -void set_irq_probe(unsigned int irq) -{ - struct irq_desc *desc = irq_to_desc(irq); - unsigned long flags; - if (!desc) { - printk(KERN_ERR "Trying to mark IRQ%d probeable\n", irq); - return; - } + /* Sanitize flags */ + set &= IRQF_MODIFY_MASK; + clr &= IRQF_MODIFY_MASK; raw_spin_lock_irqsave(&desc->lock, flags); - desc->status &= ~IRQ_NOPROBE; + desc->status &= ~clr; + desc->status |= set; raw_spin_unlock_irqrestore(&desc->lock, flags); } diff --git a/kernel/irq/dummychip.c b/kernel/irq/dummychip.c new file mode 100644 index 00000000000..20dc5474947 --- /dev/null +++ b/kernel/irq/dummychip.c @@ -0,0 +1,68 @@ +/* + * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar + * Copyright (C) 2005-2006, Thomas Gleixner, Russell King + * + * This file contains the dummy interrupt chip implementation + */ +#include <linux/interrupt.h> +#include <linux/irq.h> + +#include "internals.h" + +/* + * What should we do if we get a hw irq event on an illegal vector? + * Each architecture has to answer this themself. + */ +static void ack_bad(struct irq_data *data) +{ + struct irq_desc *desc = irq_data_to_desc(data); + + print_irq_desc(data->irq, desc); + ack_bad_irq(data->irq); +} + +/* + * NOP functions + */ +static void noop(struct irq_data *data) { } + +static unsigned int noop_ret(struct irq_data *data) +{ + return 0; +} + +#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED +static void compat_noop(unsigned int irq) { } +#define END_INIT .end = compat_noop +#else +#define END_INIT +#endif + +/* + * Generic no controller implementation + */ +struct irq_chip no_irq_chip = { + .name = "none", + .irq_startup = noop_ret, + .irq_shutdown = noop, + .irq_enable = noop, + .irq_disable = noop, + .irq_ack = ack_bad, + END_INIT +}; + +/* + * Generic dummy implementation which can be used for + * real dumb interrupt sources + */ +struct irq_chip dummy_irq_chip = { + .name = "dummy", + .irq_startup = noop_ret, + .irq_shutdown = noop, + .irq_enable = noop, + .irq_disable = noop, + .irq_ack = noop, + .irq_mask = noop, + .irq_unmask = noop, + END_INIT +}; diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 27e5c691122..e2347eb6330 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -11,24 +11,15 @@ */ #include <linux/irq.h> -#include <linux/sched.h> -#include <linux/slab.h> -#include <linux/module.h> #include <linux/random.h> +#include <linux/sched.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> -#include <linux/rculist.h> -#include <linux/hash.h> -#include <linux/radix-tree.h> + #include <trace/events/irq.h> #include "internals.h" -/* - * lockdep: we want to handle all irq_desc locks as a single lock-class: - */ -struct lock_class_key irq_desc_lock_class; - /** * handle_bad_irq - handle spurious and unhandled irqs * @irq: the interrupt number @@ -43,304 +34,6 @@ void handle_bad_irq(unsigned int irq, struct irq_desc *desc) ack_bad_irq(irq); } -#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) -static void __init init_irq_default_affinity(void) -{ - alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); - cpumask_setall(irq_default_affinity); -} -#else -static void __init init_irq_default_affinity(void) -{ -} -#endif - -/* - * Linux has a controller-independent interrupt architecture. - * Every controller has a 'controller-template', that is used - * by the main code to do the right thing. Each driver-visible - * interrupt source is transparently wired to the appropriate - * controller. Thus drivers need not be aware of the - * interrupt-controller. - * - * The code is designed to be easily extended with new/different - * interrupt controllers, without having to do assembly magic or - * having to touch the generic code. - * - * Controller mappings for all interrupt sources: - */ -int nr_irqs = NR_IRQS; -EXPORT_SYMBOL_GPL(nr_irqs); - -#ifdef CONFIG_SPARSE_IRQ - -static struct irq_desc irq_desc_init = { - .irq = -1, - .status = IRQ_DISABLED, - .chip = &no_irq_chip, - .handle_irq = handle_bad_irq, - .depth = 1, - .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), -}; - -void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr) -{ - void *ptr; - - ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), - GFP_ATOMIC, node); - - /* - * don't overwite if can not get new one - * init_copy_kstat_irqs() could still use old one - */ - if (ptr) { - printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node); - desc->kstat_irqs = ptr; - } -} - -static void init_one_irq_desc(int irq, struct irq_desc *desc, int node) -{ - memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); - - raw_spin_lock_init(&desc->lock); - desc->irq = irq; -#ifdef CONFIG_SMP - desc->node = node; -#endif - lockdep_set_class(&desc->lock, &irq_desc_lock_class); - init_kstat_irqs(desc, node, nr_cpu_ids); - if (!desc->kstat_irqs) { - printk(KERN_ERR "can not alloc kstat_irqs\n"); - BUG_ON(1); - } - if (!alloc_desc_masks(desc, node, false)) { - printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); - BUG_ON(1); - } - init_desc_masks(desc); - arch_init_chip_data(desc, node); -} - -/* - * Protect the sparse_irqs: - */ -DEFINE_RAW_SPINLOCK(sparse_irq_lock); - -static RADIX_TREE(irq_desc_tree, GFP_ATOMIC); - -static void set_irq_desc(unsigned int irq, struct irq_desc *desc) -{ - radix_tree_insert(&irq_desc_tree, irq, desc); -} - -struct irq_desc *irq_to_desc(unsigned int irq) -{ - return radix_tree_lookup(&irq_desc_tree, irq); -} - -void replace_irq_desc(unsigned int irq, struct irq_desc *desc) -{ - void **ptr; - - ptr = radix_tree_lookup_slot(&irq_desc_tree, irq); - if (ptr) - radix_tree_replace_slot(ptr, desc); -} - -static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { - [0 ... NR_IRQS_LEGACY-1] = { - .irq = -1, - .status = IRQ_DISABLED, - .chip = &no_irq_chip, - .handle_irq = handle_bad_irq, - .depth = 1, - .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), - } -}; - -static unsigned int *kstat_irqs_legacy; - -int __init early_irq_init(void) -{ - struct irq_desc *desc; - int legacy_count; - int node; - int i; - - init_irq_default_affinity(); - - /* initialize nr_irqs based on nr_cpu_ids */ - arch_probe_nr_irqs(); - printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs); - - desc = irq_desc_legacy; - legacy_count = ARRAY_SIZE(irq_desc_legacy); - node = first_online_node; - - /* allocate based on nr_cpu_ids */ - kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids * - sizeof(int), GFP_NOWAIT, node); - - for (i = 0; i < legacy_count; i++) { - desc[i].irq = i; -#ifdef CONFIG_SMP - desc[i].node = node; -#endif - desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; - lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); - alloc_desc_masks(&desc[i], node, true); - init_desc_masks(&desc[i]); - set_irq_desc(i, &desc[i]); - } - - return arch_early_irq_init(); -} - -struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) -{ - struct irq_desc *desc; - unsigned long flags; - - if (irq >= nr_irqs) { - WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n", - irq, nr_irqs); - return NULL; - } - - desc = irq_to_desc(irq); - if (desc) - return desc; - - raw_spin_lock_irqsave(&sparse_irq_lock, flags); - - /* We have to check it to avoid races with another CPU */ - desc = irq_to_desc(irq); - if (desc) - goto out_unlock; - - desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); - - printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node); - if (!desc) { - printk(KERN_ERR "can not alloc irq_desc\n"); - BUG_ON(1); - } - init_one_irq_desc(irq, desc, node); - - set_irq_desc(irq, desc); - -out_unlock: - raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); - - return desc; -} - -#else /* !CONFIG_SPARSE_IRQ */ - -struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { - [0 ... NR_IRQS-1] = { - .status = IRQ_DISABLED, - .chip = &no_irq_chip, - .handle_irq = handle_bad_irq, - .depth = 1, - .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), - } -}; - -static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS]; -int __init early_irq_init(void) -{ - struct irq_desc *desc; - int count; - int i; - - init_irq_default_affinity(); - - printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); - - desc = irq_desc; - count = ARRAY_SIZE(irq_desc); - - for (i = 0; i < count; i++) { - desc[i].irq = i; - alloc_desc_masks(&desc[i], 0, true); - init_desc_masks(&desc[i]); - desc[i].kstat_irqs = kstat_irqs_all[i]; - } - return arch_early_irq_init(); -} - -struct irq_desc *irq_to_desc(unsigned int irq) -{ - return (irq < NR_IRQS) ? irq_desc + irq : NULL; -} - -struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node) -{ - return irq_to_desc(irq); -} -#endif /* !CONFIG_SPARSE_IRQ */ - -void clear_kstat_irqs(struct irq_desc *desc) -{ - memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs))); -} - -/* - * What should we do if we get a hw irq event on an illegal vector? - * Each architecture has to answer this themself. - */ -static void ack_bad(unsigned int irq) -{ - struct irq_desc *desc = irq_to_desc(irq); - - print_irq_desc(irq, desc); - ack_bad_irq(irq); -} - -/* - * NOP functions - */ -static void noop(unsigned int irq) -{ -} - -static unsigned int noop_ret(unsigned int irq) -{ - return 0; -} - -/* - * Generic no controller implementation - */ -struct irq_chip no_irq_chip = { - .name = "none", - .startup = noop_ret, - .shutdown = noop, - .enable = noop, - .disable = noop, - .ack = ack_bad, - .end = noop, -}; - -/* - * Generic dummy implementation which can be used for - * real dumb interrupt sources - */ -struct irq_chip dummy_irq_chip = { - .name = "dummy", - .startup = noop_ret, - .shutdown = noop, - .enable = noop, - .disable = noop, - .ack = noop, - .mask = noop, - .unmask = noop, - .end = noop, -}; - /* * Special, empty irq handler: */ @@ -457,20 +150,20 @@ unsigned int __do_IRQ(unsigned int irq) /* * No locking required for CPU-local interrupts: */ - if (desc->chip->ack) - desc->chip->ack(irq); + if (desc->irq_data.chip->ack) + desc->irq_data.chip->ack(irq); if (likely(!(desc->status & IRQ_DISABLED))) { action_ret = handle_IRQ_event(irq, desc->action); if (!noirqdebug) note_interrupt(irq, desc, action_ret); } - desc->chip->end(irq); + desc->irq_data.chip->end(irq); return 1; } raw_spin_lock(&desc->lock); - if (desc->chip->ack) - desc->chip->ack(irq); + if (desc->irq_data.chip->ack) + desc->irq_data.chip->ack(irq); /* * REPLAY is when Linux resends an IRQ that was dropped earlier * WAITING is used by probe to mark irqs that are being tested @@ -530,27 +223,9 @@ out: * The ->end() handler has to deal with interrupts which got * disabled while the handler was running. */ - desc->chip->end(irq); + desc->irq_data.chip->end(irq); raw_spin_unlock(&desc->lock); return 1; } #endif - -void early_init_irq_lock_class(void) -{ - struct irq_desc *desc; - int i; - - for_each_irq_desc(i, desc) { - lockdep_set_class(&desc->lock, &irq_desc_lock_class); - } -} - -unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) -{ - struct irq_desc *desc = irq_to_desc(irq); - return desc ? desc->kstat_irqs[cpu] : 0; -} -EXPORT_SYMBOL(kstat_irqs_cpu); - diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index c63f3bc88f0..4571ae7e085 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -1,9 +1,12 @@ /* * IRQ subsystem internal functions and variables: */ +#include <linux/irqdesc.h> extern int noirqdebug; +#define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data) + /* Set default functions for irq_chip structures: */ extern void irq_chip_set_defaults(struct irq_chip *chip); @@ -15,21 +18,19 @@ extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); -extern struct lock_class_key irq_desc_lock_class; extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); -extern void clear_kstat_irqs(struct irq_desc *desc); -extern raw_spinlock_t sparse_irq_lock; -#ifdef CONFIG_SPARSE_IRQ -void replace_irq_desc(unsigned int irq, struct irq_desc *desc); -#endif +/* Resending of interrupts :*/ +void check_irq_resend(struct irq_desc *desc, unsigned int irq); #ifdef CONFIG_PROC_FS extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); +extern void unregister_irq_proc(unsigned int irq, struct irq_desc *desc); extern void register_handler_proc(unsigned int irq, struct irqaction *action); extern void unregister_handler_proc(unsigned int irq, struct irqaction *action); #else static inline void register_irq_proc(unsigned int irq, struct irq_desc *desc) { } +static inline void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) { } static inline void register_handler_proc(unsigned int irq, struct irqaction *action) { } static inline void unregister_handler_proc(unsigned int irq, @@ -40,17 +41,27 @@ extern int irq_select_affinity_usr(unsigned int irq); extern void irq_set_thread_affinity(struct irq_desc *desc); +#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED +static inline void irq_end(unsigned int irq, struct irq_desc *desc) +{ + if (desc->irq_data.chip && desc->irq_data.chip->end) + desc->irq_data.chip->end(irq); +} +#else +static inline void irq_end(unsigned int irq, struct irq_desc *desc) { } +#endif + /* Inline functions for support of irq chips on slow busses */ -static inline void chip_bus_lock(unsigned int irq, struct irq_desc *desc) +static inline void chip_bus_lock(struct irq_desc *desc) { - if (unlikely(desc->chip->bus_lock)) - desc->chip->bus_lock(irq); + if (unlikely(desc->irq_data.chip->irq_bus_lock)) + desc->irq_data.chip->irq_bus_lock(&desc->irq_data); } -static inline void chip_bus_sync_unlock(unsigned int irq, struct irq_desc *desc) +static inline void chip_bus_sync_unlock(struct irq_desc *desc) { - if (unlikely(desc->chip->bus_sync_unlock)) - desc->chip->bus_sync_unlock(irq); + if (unlikely(desc->irq_data.chip->irq_bus_sync_unlock)) + desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data); } /* @@ -67,8 +78,8 @@ static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled); printk("->handle_irq(): %p, ", desc->handle_irq); print_symbol("%s\n", (unsigned long)desc->handle_irq); - printk("->chip(): %p, ", desc->chip); - print_symbol("%s\n", (unsigned long)desc->chip); + printk("->irq_data.chip(): %p, ", desc->irq_data.chip); + print_symbol("%s\n", (unsigned long)desc->irq_data.chip); printk("->action(): %p\n", desc->action); if (desc->action) { printk("->action->handler(): %p, ", desc->action->handler); diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c new file mode 100644 index 00000000000..9d917ff7267 --- /dev/null +++ b/kernel/irq/irqdesc.c @@ -0,0 +1,395 @@ +/* + * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar + * Copyright (C) 2005-2006, Thomas Gleixner, Russell King + * + * This file contains the interrupt descriptor management code + * + * Detailed information is available in Documentation/DocBook/genericirq + * + */ +#include <linux/irq.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/kernel_stat.h> +#include <linux/radix-tree.h> +#include <linux/bitmap.h> + +#include "internals.h" + +/* + * lockdep: we want to handle all irq_desc locks as a single lock-class: + */ +static struct lock_class_key irq_desc_lock_class; + +#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) +static void __init init_irq_default_affinity(void) +{ + alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); + cpumask_setall(irq_default_affinity); +} +#else +static void __init init_irq_default_affinity(void) +{ +} +#endif + +#ifdef CONFIG_SMP +static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) +{ + if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node)) + return -ENOMEM; + +#ifdef CONFIG_GENERIC_PENDING_IRQ + if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { + free_cpumask_var(desc->irq_data.affinity); + return -ENOMEM; + } +#endif + return 0; +} + +static void desc_smp_init(struct irq_desc *desc, int node) +{ + desc->irq_data.node = node; + cpumask_copy(desc->irq_data.affinity, irq_default_affinity); +#ifdef CONFIG_GENERIC_PENDING_IRQ + cpumask_clear(desc->pending_mask); +#endif +} + +static inline int desc_node(struct irq_desc *desc) +{ + return desc->irq_data.node; +} + +#else +static inline int +alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; } +static inline void desc_smp_init(struct irq_desc *desc, int node) { } +static inline int desc_node(struct irq_desc *desc) { return 0; } +#endif + +static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node) +{ + desc->irq_data.irq = irq; + desc->irq_data.chip = &no_irq_chip; + desc->irq_data.chip_data = NULL; + desc->irq_data.handler_data = NULL; + desc->irq_data.msi_desc = NULL; + desc->status = IRQ_DEFAULT_INIT_FLAGS; + desc->handle_irq = handle_bad_irq; + desc->depth = 1; + desc->irq_count = 0; + desc->irqs_unhandled = 0; + desc->name = NULL; + memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs))); + desc_smp_init(desc, node); +} + +int nr_irqs = NR_IRQS; +EXPORT_SYMBOL_GPL(nr_irqs); + +static DEFINE_MUTEX(sparse_irq_lock); +static DECLARE_BITMAP(allocated_irqs, NR_IRQS); + +#ifdef CONFIG_SPARSE_IRQ + +static RADIX_TREE(irq_desc_tree, GFP_KERNEL); + +static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) +{ + radix_tree_insert(&irq_desc_tree, irq, desc); +} + +struct irq_desc *irq_to_desc(unsigned int irq) +{ + return radix_tree_lookup(&irq_desc_tree, irq); +} + +static void delete_irq_desc(unsigned int irq) +{ + radix_tree_delete(&irq_desc_tree, irq); +} + +#ifdef CONFIG_SMP +static void free_masks(struct irq_desc *desc) +{ +#ifdef CONFIG_GENERIC_PENDING_IRQ + free_cpumask_var(desc->pending_mask); +#endif + free_cpumask_var(desc->irq_data.affinity); +} +#else +static inline void free_masks(struct irq_desc *desc) { } +#endif + +static struct irq_desc *alloc_desc(int irq, int node) +{ + struct irq_desc *desc; + gfp_t gfp = GFP_KERNEL; + + desc = kzalloc_node(sizeof(*desc), gfp, node); + if (!desc) + return NULL; + /* allocate based on nr_cpu_ids */ + desc->kstat_irqs = kzalloc_node(nr_cpu_ids * sizeof(*desc->kstat_irqs), + gfp, node); + if (!desc->kstat_irqs) + goto err_desc; + + if (alloc_masks(desc, gfp, node)) + goto err_kstat; + + raw_spin_lock_init(&desc->lock); + lockdep_set_class(&desc->lock, &irq_desc_lock_class); + + desc_set_defaults(irq, desc, node); + + return desc; + +err_kstat: + kfree(desc->kstat_irqs); +err_desc: + kfree(desc); + return NULL; +} + +static void free_desc(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + + unregister_irq_proc(irq, desc); + + mutex_lock(&sparse_irq_lock); + delete_irq_desc(irq); + mutex_unlock(&sparse_irq_lock); + + free_masks(desc); + kfree(desc->kstat_irqs); + kfree(desc); +} + +static int alloc_descs(unsigned int start, unsigned int cnt, int node) +{ + struct irq_desc *desc; + int i; + + for (i = 0; i < cnt; i++) { + desc = alloc_desc(start + i, node); + if (!desc) + goto err; + mutex_lock(&sparse_irq_lock); + irq_insert_desc(start + i, desc); + mutex_unlock(&sparse_irq_lock); + } + return start; + +err: + for (i--; i >= 0; i--) + free_desc(start + i); + + mutex_lock(&sparse_irq_lock); + bitmap_clear(allocated_irqs, start, cnt); + mutex_unlock(&sparse_irq_lock); + return -ENOMEM; +} + +struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) +{ + int res = irq_alloc_descs(irq, irq, 1, node); + + if (res == -EEXIST || res == irq) + return irq_to_desc(irq); + return NULL; +} + +int __init early_irq_init(void) +{ + int i, initcnt, node = first_online_node; + struct irq_desc *desc; + + init_irq_default_affinity(); + + /* Let arch update nr_irqs and return the nr of preallocated irqs */ + initcnt = arch_probe_nr_irqs(); + printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt); + + for (i = 0; i < initcnt; i++) { + desc = alloc_desc(i, node); + set_bit(i, allocated_irqs); + irq_insert_desc(i, desc); + } + return arch_early_irq_init(); +} + +#else /* !CONFIG_SPARSE_IRQ */ + +struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { + [0 ... NR_IRQS-1] = { + .status = IRQ_DEFAULT_INIT_FLAGS, + .handle_irq = handle_bad_irq, + .depth = 1, + .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), + } +}; + +static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS]; +int __init early_irq_init(void) +{ + int count, i, node = first_online_node; + struct irq_desc *desc; + + init_irq_default_affinity(); + + printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); + + desc = irq_desc; + count = ARRAY_SIZE(irq_desc); + + for (i = 0; i < count; i++) { + desc[i].irq_data.irq = i; + desc[i].irq_data.chip = &no_irq_chip; + desc[i].kstat_irqs = kstat_irqs_all[i]; + alloc_masks(desc + i, GFP_KERNEL, node); + desc_smp_init(desc + i, node); + lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); + } + return arch_early_irq_init(); +} + +struct irq_desc *irq_to_desc(unsigned int irq) +{ + return (irq < NR_IRQS) ? irq_desc + irq : NULL; +} + +struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node) +{ + return irq_to_desc(irq); +} + +static void free_desc(unsigned int irq) +{ + dynamic_irq_cleanup(irq); +} + +static inline int alloc_descs(unsigned int start, unsigned int cnt, int node) +{ + return start; +} +#endif /* !CONFIG_SPARSE_IRQ */ + +/* Dynamic interrupt handling */ + +/** + * irq_free_descs - free irq descriptors + * @from: Start of descriptor range + * @cnt: Number of consecutive irqs to free + */ +void irq_free_descs(unsigned int from, unsigned int cnt) +{ + int i; + + if (from >= nr_irqs || (from + cnt) > nr_irqs) + return; + + for (i = 0; i < cnt; i++) + free_desc(from + i); + + mutex_lock(&sparse_irq_lock); + bitmap_clear(allocated_irqs, from, cnt); + mutex_unlock(&sparse_irq_lock); +} + +/** + * irq_alloc_descs - allocate and initialize a range of irq descriptors + * @irq: Allocate for specific irq number if irq >= 0 + * @from: Start the search from this irq number + * @cnt: Number of consecutive irqs to allocate. + * @node: Preferred node on which the irq descriptor should be allocated + * + * Returns the first irq number or error code + */ +int __ref +irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node) +{ + int start, ret; + + if (!cnt) + return -EINVAL; + + mutex_lock(&sparse_irq_lock); + + start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0); + ret = -EEXIST; + if (irq >=0 && start != irq) + goto err; + + ret = -ENOMEM; + if (start >= nr_irqs) + goto err; + + bitmap_set(allocated_irqs, start, cnt); + mutex_unlock(&sparse_irq_lock); + return alloc_descs(start, cnt, node); + +err: + mutex_unlock(&sparse_irq_lock); + return ret; +} + +/** + * irq_reserve_irqs - mark irqs allocated + * @from: mark from irq number + * @cnt: number of irqs to mark + * + * Returns 0 on success or an appropriate error code + */ +int irq_reserve_irqs(unsigned int from, unsigned int cnt) +{ + unsigned int start; + int ret = 0; + + if (!cnt || (from + cnt) > nr_irqs) + return -EINVAL; + + mutex_lock(&sparse_irq_lock); + start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0); + if (start == from) + bitmap_set(allocated_irqs, start, cnt); + else + ret = -EEXIST; + mutex_unlock(&sparse_irq_lock); + return ret; +} + +/** + * irq_get_next_irq - get next allocated irq number + * @offset: where to start the search + * + * Returns next irq number after offset or nr_irqs if none is found. + */ +unsigned int irq_get_next_irq(unsigned int offset) +{ + return find_next_bit(allocated_irqs, nr_irqs, offset); +} + +/** + * dynamic_irq_cleanup - cleanup a dynamically allocated irq + * @irq: irq number to initialize + */ +void dynamic_irq_cleanup(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + + raw_spin_lock_irqsave(&desc->lock, flags); + desc_set_defaults(irq, desc, desc_node(desc)); + raw_spin_unlock_irqrestore(&desc->lock, flags); +} + +unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) +{ + struct irq_desc *desc = irq_to_desc(irq); + return desc ? desc->kstat_irqs[cpu] : 0; +} diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index c3003e9d91a..644e8d5fa36 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -73,8 +73,8 @@ int irq_can_set_affinity(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); - if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip || - !desc->chip->set_affinity) + if (CHECK_IRQ_PER_CPU(desc->status) || !desc->irq_data.chip || + !desc->irq_data.chip->irq_set_affinity) return 0; return 1; @@ -109,17 +109,18 @@ void irq_set_thread_affinity(struct irq_desc *desc) int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) { struct irq_desc *desc = irq_to_desc(irq); + struct irq_chip *chip = desc->irq_data.chip; unsigned long flags; - if (!desc->chip->set_affinity) + if (!chip->irq_set_affinity) return -EINVAL; raw_spin_lock_irqsave(&desc->lock, flags); #ifdef CONFIG_GENERIC_PENDING_IRQ if (desc->status & IRQ_MOVE_PCNTXT) { - if (!desc->chip->set_affinity(irq, cpumask)) { - cpumask_copy(desc->affinity, cpumask); + if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) { + cpumask_copy(desc->irq_data.affinity, cpumask); irq_set_thread_affinity(desc); } } @@ -128,8 +129,8 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) cpumask_copy(desc->pending_mask, cpumask); } #else - if (!desc->chip->set_affinity(irq, cpumask)) { - cpumask_copy(desc->affinity, cpumask); + if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) { + cpumask_copy(desc->irq_data.affinity, cpumask); irq_set_thread_affinity(desc); } #endif @@ -168,16 +169,16 @@ static int setup_affinity(unsigned int irq, struct irq_desc *desc) * one of the targets is online. */ if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { - if (cpumask_any_and(desc->affinity, cpu_online_mask) + if (cpumask_any_and(desc->irq_data.affinity, cpu_online_mask) < nr_cpu_ids) goto set_affinity; else desc->status &= ~IRQ_AFFINITY_SET; } - cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity); + cpumask_and(desc->irq_data.affinity, cpu_online_mask, irq_default_affinity); set_affinity: - desc->chip->set_affinity(irq, desc->affinity); + desc->irq_data.chip->irq_set_affinity(&desc->irq_data, desc->irq_data.affinity, false); return 0; } @@ -223,7 +224,7 @@ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) if (!desc->depth++) { desc->status |= IRQ_DISABLED; - desc->chip->disable(irq); + desc->irq_data.chip->irq_disable(&desc->irq_data); } } @@ -246,11 +247,11 @@ void disable_irq_nosync(unsigned int irq) if (!desc) return; - chip_bus_lock(irq, desc); + chip_bus_lock(desc); raw_spin_lock_irqsave(&desc->lock, flags); __disable_irq(desc, irq, false); raw_spin_unlock_irqrestore(&desc->lock, flags); - chip_bus_sync_unlock(irq, desc); + chip_bus_sync_unlock(desc); } EXPORT_SYMBOL(disable_irq_nosync); @@ -313,7 +314,7 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) * IRQ line is re-enabled. * * This function may be called from IRQ context only when - * desc->chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! + * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! */ void enable_irq(unsigned int irq) { @@ -323,11 +324,11 @@ void enable_irq(unsigned int irq) if (!desc) return; - chip_bus_lock(irq, desc); + chip_bus_lock(desc); raw_spin_lock_irqsave(&desc->lock, flags); __enable_irq(desc, irq, false); raw_spin_unlock_irqrestore(&desc->lock, flags); - chip_bus_sync_unlock(irq, desc); + chip_bus_sync_unlock(desc); } EXPORT_SYMBOL(enable_irq); @@ -336,8 +337,8 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on) struct irq_desc *desc = irq_to_desc(irq); int ret = -ENXIO; - if (desc->chip->set_wake) - ret = desc->chip->set_wake(irq, on); + if (desc->irq_data.chip->irq_set_wake) + ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); return ret; } @@ -429,12 +430,12 @@ void compat_irq_chip_set_default_handler(struct irq_desc *desc) } int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, - unsigned long flags) + unsigned long flags) { int ret; - struct irq_chip *chip = desc->chip; + struct irq_chip *chip = desc->irq_data.chip; - if (!chip || !chip->set_type) { + if (!chip || !chip->irq_set_type) { /* * IRQF_TRIGGER_* but the PIC does not support multiple * flow-types? @@ -445,11 +446,11 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, } /* caller masked out all except trigger mode flags */ - ret = chip->set_type(irq, flags); + ret = chip->irq_set_type(&desc->irq_data, flags); if (ret) - pr_err("setting trigger mode %d for irq %u failed (%pF)\n", - (int)flags, irq, chip->set_type); + pr_err("setting trigger mode %lu for irq %u failed (%pF)\n", + flags, irq, chip->irq_set_type); else { if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) flags |= IRQ_LEVEL; @@ -457,8 +458,8 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); desc->status |= flags; - if (chip != desc->chip) - irq_chip_set_defaults(desc->chip); + if (chip != desc->irq_data.chip) + irq_chip_set_defaults(desc->irq_data.chip); } return ret; @@ -507,7 +508,7 @@ static int irq_wait_for_interrupt(struct irqaction *action) static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) { again: - chip_bus_lock(irq, desc); + chip_bus_lock(desc); raw_spin_lock_irq(&desc->lock); /* @@ -521,17 +522,17 @@ again: */ if (unlikely(desc->status & IRQ_INPROGRESS)) { raw_spin_unlock_irq(&desc->lock); - chip_bus_sync_unlock(irq, desc); + chip_bus_sync_unlock(desc); cpu_relax(); goto again; } if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { desc->status &= ~IRQ_MASKED; - desc->chip->unmask(irq); + desc->irq_data.chip->irq_unmask(&desc->irq_data); } raw_spin_unlock_irq(&desc->lock); - chip_bus_sync_unlock(irq, desc); + chip_bus_sync_unlock(desc); } #ifdef CONFIG_SMP @@ -556,7 +557,7 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) } raw_spin_lock_irq(&desc->lock); - cpumask_copy(mask, desc->affinity); + cpumask_copy(mask, desc->irq_data.affinity); raw_spin_unlock_irq(&desc->lock); set_cpus_allowed_ptr(current, mask); @@ -657,7 +658,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) if (!desc) return -EINVAL; - if (desc->chip == &no_irq_chip) + if (desc->irq_data.chip == &no_irq_chip) return -ENOSYS; /* * Some drivers like serial.c use request_irq() heavily, @@ -752,7 +753,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) } if (!shared) { - irq_chip_set_defaults(desc->chip); + irq_chip_set_defaults(desc->irq_data.chip); init_waitqueue_head(&desc->wait_for_threads); @@ -779,7 +780,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) if (!(desc->status & IRQ_NOAUTOEN)) { desc->depth = 0; desc->status &= ~IRQ_DISABLED; - desc->chip->startup(irq); + desc->irq_data.chip->irq_startup(&desc->irq_data); } else /* Undo nested disables: */ desc->depth = 1; @@ -912,17 +913,17 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) /* Currently used only by UML, might disappear one day: */ #ifdef CONFIG_IRQ_RELEASE_METHOD - if (desc->chip->release) - desc->chip->release(irq, dev_id); + if (desc->irq_data.chip->release) + desc->irq_data.chip->release(irq, dev_id); #endif /* If this was the last handler, shut down the IRQ line: */ if (!desc->action) { desc->status |= IRQ_DISABLED; - if (desc->chip->shutdown) - desc->chip->shutdown(irq); + if (desc->irq_data.chip->irq_shutdown) + desc->irq_data.chip->irq_shutdown(&desc->irq_data); else - desc->chip->disable(irq); + desc->irq_data.chip->irq_disable(&desc->irq_data); } #ifdef CONFIG_SMP @@ -997,9 +998,9 @@ void free_irq(unsigned int irq, void *dev_id) if (!desc) return; - chip_bus_lock(irq, desc); + chip_bus_lock(desc); kfree(__free_irq(irq, dev_id)); - chip_bus_sync_unlock(irq, desc); + chip_bus_sync_unlock(desc); } EXPORT_SYMBOL(free_irq); @@ -1086,9 +1087,9 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, action->name = devname; action->dev_id = dev_id; - chip_bus_lock(irq, desc); + chip_bus_lock(desc); retval = __setup_irq(irq, desc, action); - chip_bus_sync_unlock(irq, desc); + chip_bus_sync_unlock(desc); if (retval) kfree(action); diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index 24196228083..1d254194048 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c @@ -7,6 +7,7 @@ void move_masked_irq(int irq) { struct irq_desc *desc = irq_to_desc(irq); + struct irq_chip *chip = desc->irq_data.chip; if (likely(!(desc->status & IRQ_MOVE_PENDING))) return; @@ -24,7 +25,7 @@ void move_masked_irq(int irq) if (unlikely(cpumask_empty(desc->pending_mask))) return; - if (!desc->chip->set_affinity) + if (!chip->irq_set_affinity) return; assert_raw_spin_locked(&desc->lock); @@ -43,8 +44,9 @@ void move_masked_irq(int irq) */ if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)) - if (!desc->chip->set_affinity(irq, desc->pending_mask)) { - cpumask_copy(desc->affinity, desc->pending_mask); + if (!chip->irq_set_affinity(&desc->irq_data, + desc->pending_mask, false)) { + cpumask_copy(desc->irq_data.affinity, desc->pending_mask); irq_set_thread_affinity(desc); } @@ -61,8 +63,8 @@ void move_native_irq(int irq) if (unlikely(desc->status & IRQ_DISABLED)) return; - desc->chip->mask(irq); + desc->irq_data.chip->irq_mask(&desc->irq_data); move_masked_irq(irq); - desc->chip->unmask(irq); + desc->irq_data.chip->irq_unmask(&desc->irq_data); } diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c deleted file mode 100644 index 65d3845665a..00000000000 --- a/kernel/irq/numa_migrate.c +++ /dev/null @@ -1,120 +0,0 @@ -/* - * NUMA irq-desc migration code - * - * Migrate IRQ data structures (irq_desc, chip_data, etc.) over to - * the new "home node" of the IRQ. - */ - -#include <linux/irq.h> -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/random.h> -#include <linux/interrupt.h> -#include <linux/kernel_stat.h> - -#include "internals.h" - -static void init_copy_kstat_irqs(struct irq_desc *old_desc, - struct irq_desc *desc, - int node, int nr) -{ - init_kstat_irqs(desc, node, nr); - - if (desc->kstat_irqs != old_desc->kstat_irqs) - memcpy(desc->kstat_irqs, old_desc->kstat_irqs, - nr * sizeof(*desc->kstat_irqs)); -} - -static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc) -{ - if (old_desc->kstat_irqs == desc->kstat_irqs) - return; - - kfree(old_desc->kstat_irqs); - old_desc->kstat_irqs = NULL; -} - -static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, - struct irq_desc *desc, int node) -{ - memcpy(desc, old_desc, sizeof(struct irq_desc)); - if (!alloc_desc_masks(desc, node, false)) { - printk(KERN_ERR "irq %d: can not get new irq_desc cpumask " - "for migration.\n", irq); - return false; - } - raw_spin_lock_init(&desc->lock); - desc->node = node; - lockdep_set_class(&desc->lock, &irq_desc_lock_class); - init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids); - init_copy_desc_masks(old_desc, desc); - arch_init_copy_chip_data(old_desc, desc, node); - return true; -} - -static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc) -{ - free_kstat_irqs(old_desc, desc); - free_desc_masks(old_desc, desc); - arch_free_chip_data(old_desc, desc); -} - -static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, - int node) -{ - struct irq_desc *desc; - unsigned int irq; - unsigned long flags; - - irq = old_desc->irq; - - raw_spin_lock_irqsave(&sparse_irq_lock, flags); - - /* We have to check it to avoid races with another CPU */ - desc = irq_to_desc(irq); - - if (desc && old_desc != desc) - goto out_unlock; - - desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); - if (!desc) { - printk(KERN_ERR "irq %d: can not get new irq_desc " - "for migration.\n", irq); - /* still use old one */ - desc = old_desc; - goto out_unlock; - } - if (!init_copy_one_irq_desc(irq, old_desc, desc, node)) { - /* still use old one */ - kfree(desc); - desc = old_desc; - goto out_unlock; - } - - replace_irq_desc(irq, desc); - raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); - - /* free the old one */ - free_one_irq_desc(old_desc, desc); - kfree(old_desc); - - return desc; - -out_unlock: - raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); - - return desc; -} - -struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) -{ - /* those static or target node is -1, do not move them */ - if (desc->irq < NR_IRQS_LEGACY || node == -1) - return desc; - - if (desc->node != node) - desc = __real_move_irq_desc(desc, node); - - return desc; -} - diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 09a2ee540bd..01b1d3a8898 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -21,7 +21,7 @@ static struct proc_dir_entry *root_irq_dir; static int irq_affinity_proc_show(struct seq_file *m, void *v) { struct irq_desc *desc = irq_to_desc((long)m->private); - const struct cpumask *mask = desc->affinity; + const struct cpumask *mask = desc->irq_data.affinity; #ifdef CONFIG_GENERIC_PENDING_IRQ if (desc->status & IRQ_MOVE_PENDING) @@ -65,7 +65,7 @@ static ssize_t irq_affinity_proc_write(struct file *file, cpumask_var_t new_value; int err; - if (!irq_to_desc(irq)->chip->set_affinity || no_irq_affinity || + if (!irq_to_desc(irq)->irq_data.chip->irq_set_affinity || no_irq_affinity || irq_balancing_disabled(irq)) return -EIO; @@ -185,7 +185,7 @@ static int irq_node_proc_show(struct seq_file *m, void *v) { struct irq_desc *desc = irq_to_desc((long) m->private); - seq_printf(m, "%d\n", desc->node); + seq_printf(m, "%d\n", desc->irq_data.node); return 0; } @@ -269,7 +269,7 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc) { char name [MAX_NAMELEN]; - if (!root_irq_dir || (desc->chip == &no_irq_chip) || desc->dir) + if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir) return; memset(name, 0, MAX_NAMELEN); @@ -297,6 +297,24 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc) &irq_spurious_proc_fops, (void *)(long)irq); } +void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) +{ + char name [MAX_NAMELEN]; + + if (!root_irq_dir || !desc->dir) + return; +#ifdef CONFIG_SMP + remove_proc_entry("smp_affinity", desc->dir); + remove_proc_entry("affinity_hint", desc->dir); + remove_proc_entry("node", desc->dir); +#endif + remove_proc_entry("spurious", desc->dir); + + memset(name, 0, MAX_NAMELEN); + sprintf(name, "%u", irq); + remove_proc_entry(name, root_irq_dir); +} + #undef MAX_NAMELEN void unregister_handler_proc(unsigned int irq, struct irqaction *action) diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index 090c3763f3a..891115a929a 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c @@ -60,7 +60,7 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq) /* * Make sure the interrupt is enabled, before resending it: */ - desc->chip->enable(irq); + desc->irq_data.chip->irq_enable(&desc->irq_data); /* * We do not resend level type interrupts. Level type @@ -70,7 +70,8 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq) if ((status & (IRQ_LEVEL | IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY; - if (!desc->chip->retrigger || !desc->chip->retrigger(irq)) { + if (!desc->irq_data.chip->irq_retrigger || + !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) { #ifdef CONFIG_HARDIRQS_SW_RESEND /* Set it pending and activate the softirq: */ set_bit(irq, irqs_resend); diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 89fb90ae534..3089d3b9d5f 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c @@ -14,6 +14,8 @@ #include <linux/moduleparam.h> #include <linux/timer.h> +#include "internals.h" + static int irqfixup __read_mostly; #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) @@ -78,8 +80,8 @@ static int try_one_irq(int irq, struct irq_desc *desc) * If we did actual work for the real IRQ line we must let the * IRQ controller clean up too */ - if (work && desc->chip && desc->chip->end) - desc->chip->end(irq); + if (work) + irq_end(irq, desc); raw_spin_unlock(&desc->lock); return ok; @@ -254,7 +256,7 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc, printk(KERN_EMERG "Disabling IRQ #%d\n", irq); desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED; desc->depth++; - desc->chip->disable(irq); + desc->irq_data.chip->irq_disable(&desc->irq_data); mod_timer(&poll_spurious_irq_timer, jiffies + POLL_SPURIOUS_IRQ_INTERVAL); diff --git a/kernel/softirq.c b/kernel/softirq.c index 07b4f1b1a73..d19b1c9aa7c 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -886,17 +886,14 @@ int __init __weak early_irq_init(void) return 0; } +#ifdef CONFIG_GENERIC_HARDIRQS int __init __weak arch_probe_nr_irqs(void) { - return 0; + return NR_IRQS_LEGACY; } int __init __weak arch_early_irq_init(void) { return 0; } - -int __weak arch_init_chip_data(struct irq_desc *desc, int node) -{ - return 0; -} +#endif |