summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-12-12 20:23:11 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-12 20:23:11 -0800
commitf082f02c4731900a5065de69eb0d8cb5aab66196 (patch)
treeb7d3c90cb087d1ebcc0ab945bdb618c78b38051d /drivers
parent9465d9cc31fa732089cd8bec9f1bdfcdc174a5ce (diff)
parent2cae3a1ed36ded9b4c8859feeea73827f1c2130d (diff)
Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq updates from Thomas Gleixner: "The irq department provides: - a major update to the auto affinity management code, which is used by multi-queue devices - move of the microblaze irq chip driver into the common driver code so it can be shared between microblaze, powerpc and MIPS - a series of updates to the ARM GICV3 interrupt controller - the usual pile of fixes and small improvements all over the place" * 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (25 commits) powerpc/virtex: Use generic xilinx irqchip driver irqchip/xilinx: Try to fall back if xlnx,kind-of-intr not provided irqchip/xilinx: Add support for parent intc irqchip/xilinx: Rename get_irq to xintc_get_irq irqchip/xilinx: Restructure and use jump label api irqchip/xilinx: Clean up print messages microblaze/irqchip: Move intc driver to irqchip ARM: virt: Select ARM_GIC_V3_ITS ARM: gic-v3-its: Add 32bit support to GICv3 ITS irqchip/gic-v3-its: Specialise readq and writeq accesses irqchip/gic-v3-its: Specialise flush_dcache operation irqchip/gic-v3-its: Narrow down Entry Size when used as a divider irqchip/gic-v3-its: Change unsigned types for AArch32 compatibility irqchip/gic-v3: Use nops macro for Cavium ThunderX erratum 23154 irqchip/gic-v3: Convert arm64 GIC accessors to {read,write}_sysreg_s genirq/msi: Drop artificial PCI dependency irqchip/bcm7038-l1: Implement irq_cpu_offline() callback genirq/affinity: Use default affinity mask for reserved vectors genirq/affinity: Take reserved vectors into account when spreading irqs PCI: Remove the irq_affinity mask from struct pci_dev ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/irqchip/Kconfig4
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c26
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c75
-rw-r--r--drivers/irqchip/irq-xilinx-intc.c241
-rw-r--r--drivers/pci/msi.c78
6 files changed, 352 insertions, 73 deletions
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index bc0af3307bbf..ae96731cd2fb 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -211,6 +211,10 @@ config XTENSA_MX
bool
select IRQ_DOMAIN
+config XILINX_INTC
+ bool
+ select IRQ_DOMAIN
+
config IRQ_CROSSBAR
bool
help
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index e4dbfc85abdb..0e55d94065bf 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -52,6 +52,7 @@ obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o
obj-$(CONFIG_TS4800_IRQ) += irq-ts4800.o
obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o
obj-$(CONFIG_XTENSA_MX) += irq-xtensa-mx.o
+obj-$(CONFIG_XILINX_INTC) += irq-xilinx-intc.o
obj-$(CONFIG_IRQ_CROSSBAR) += irq-crossbar.o
obj-$(CONFIG_SOC_VF610) += irq-vf610-mscm-ir.o
obj-$(CONFIG_BCM6345_L1_IRQ) += irq-bcm6345-l1.o
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index 353c54986211..c2662a1bfdd3 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -215,6 +215,31 @@ static int bcm7038_l1_set_affinity(struct irq_data *d,
return 0;
}
+static void bcm7038_l1_cpu_offline(struct irq_data *d)
+{
+ struct cpumask *mask = irq_data_get_affinity_mask(d);
+ int cpu = smp_processor_id();
+ cpumask_t new_affinity;
+
+ /* This CPU was not on the affinity mask */
+ if (!cpumask_test_cpu(cpu, mask))
+ return;
+
+ if (cpumask_weight(mask) > 1) {
+ /*
+ * Multiple CPU affinity, remove this CPU from the affinity
+ * mask
+ */
+ cpumask_copy(&new_affinity, mask);
+ cpumask_clear_cpu(cpu, &new_affinity);
+ } else {
+ /* Only CPU, put on the lowest online CPU */
+ cpumask_clear(&new_affinity);
+ cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
+ }
+ irq_set_affinity_locked(d, &new_affinity, false);
+}
+
static int __init bcm7038_l1_init_one(struct device_node *dn,
unsigned int idx,
struct bcm7038_l1_chip *intc)
@@ -266,6 +291,7 @@ static struct irq_chip bcm7038_l1_irq_chip = {
.irq_mask = bcm7038_l1_mask,
.irq_unmask = bcm7038_l1_unmask,
.irq_set_affinity = bcm7038_l1_set_affinity,
+ .irq_cpu_offline = bcm7038_l1_cpu_offline,
};
static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq,
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index c5dee300e8a3..69b040f47d56 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -37,7 +37,6 @@
#include <linux/irqchip.h>
#include <linux/irqchip/arm-gic-v3.h>
-#include <asm/cacheflush.h>
#include <asm/cputype.h>
#include <asm/exception.h>
@@ -196,7 +195,7 @@ typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *,
static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
{
- cmd->raw_cmd[0] &= ~0xffUL;
+ cmd->raw_cmd[0] &= ~0xffULL;
cmd->raw_cmd[0] |= cmd_nr;
}
@@ -208,43 +207,43 @@ static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
{
- cmd->raw_cmd[1] &= ~0xffffffffUL;
+ cmd->raw_cmd[1] &= ~0xffffffffULL;
cmd->raw_cmd[1] |= id;
}
static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
{
- cmd->raw_cmd[1] &= 0xffffffffUL;
+ cmd->raw_cmd[1] &= 0xffffffffULL;
cmd->raw_cmd[1] |= ((u64)phys_id) << 32;
}
static void its_encode_size(struct its_cmd_block *cmd, u8 size)
{
- cmd->raw_cmd[1] &= ~0x1fUL;
+ cmd->raw_cmd[1] &= ~0x1fULL;
cmd->raw_cmd[1] |= size & 0x1f;
}
static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
{
- cmd->raw_cmd[2] &= ~0xffffffffffffUL;
- cmd->raw_cmd[2] |= itt_addr & 0xffffffffff00UL;
+ cmd->raw_cmd[2] &= ~0xffffffffffffULL;
+ cmd->raw_cmd[2] |= itt_addr & 0xffffffffff00ULL;
}
static void its_encode_valid(struct its_cmd_block *cmd, int valid)
{
- cmd->raw_cmd[2] &= ~(1UL << 63);
+ cmd->raw_cmd[2] &= ~(1ULL << 63);
cmd->raw_cmd[2] |= ((u64)!!valid) << 63;
}
static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
{
- cmd->raw_cmd[2] &= ~(0xffffffffUL << 16);
- cmd->raw_cmd[2] |= (target_addr & (0xffffffffUL << 16));
+ cmd->raw_cmd[2] &= ~(0xffffffffULL << 16);
+ cmd->raw_cmd[2] |= (target_addr & (0xffffffffULL << 16));
}
static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
{
- cmd->raw_cmd[2] &= ~0xffffUL;
+ cmd->raw_cmd[2] &= ~0xffffULL;
cmd->raw_cmd[2] |= col;
}
@@ -433,7 +432,7 @@ static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
* the ITS.
*/
if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
- __flush_dcache_area(cmd, sizeof(*cmd));
+ gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
else
dsb(ishst);
}
@@ -602,7 +601,7 @@ static void lpi_set_config(struct irq_data *d, bool enable)
* Humpf...
*/
if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
- __flush_dcache_area(cfg, sizeof(*cfg));
+ gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
else
dsb(ishst);
its_send_inv(its_dev, id);
@@ -657,8 +656,8 @@ static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
its = its_dev->its;
addr = its->phys_base + GITS_TRANSLATER;
- msg->address_lo = addr & ((1UL << 32) - 1);
- msg->address_hi = addr >> 32;
+ msg->address_lo = lower_32_bits(addr);
+ msg->address_hi = upper_32_bits(addr);
msg->data = its_get_event_id(d);
iommu_dma_map_msi_msg(d->irq, msg);
@@ -817,7 +816,7 @@ static int __init its_alloc_lpi_tables(void)
LPI_PROPBASE_SZ);
/* Make sure the GIC will observe the written configuration */
- __flush_dcache_area(page_address(gic_rdists->prop_page), LPI_PROPBASE_SZ);
+ gic_flush_dcache_to_poc(page_address(gic_rdists->prop_page), LPI_PROPBASE_SZ);
return 0;
}
@@ -836,7 +835,7 @@ static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
{
u32 idx = baser - its->tables;
- return readq_relaxed(its->base + GITS_BASER + (idx << 3));
+ return gits_read_baser(its->base + GITS_BASER + (idx << 3));
}
static void its_write_baser(struct its_node *its, struct its_baser *baser,
@@ -844,7 +843,7 @@ static void its_write_baser(struct its_node *its, struct its_baser *baser,
{
u32 idx = baser - its->tables;
- writeq_relaxed(val, its->base + GITS_BASER + (idx << 3));
+ gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
baser->val = its_read_baser(its, baser);
}
@@ -910,7 +909,7 @@ retry_baser:
shr = tmp & GITS_BASER_SHAREABILITY_MASK;
if (!shr) {
cache = GITS_BASER_nC;
- __flush_dcache_area(base, PAGE_ORDER_TO_SIZE(order));
+ gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
}
goto retry_baser;
}
@@ -935,9 +934,9 @@ retry_baser:
}
if (val != tmp) {
- pr_err("ITS@%pa: %s doesn't stick: %lx %lx\n",
+ pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
&its->phys_base, its_base_type_string[type],
- (unsigned long) val, (unsigned long) tmp);
+ val, tmp);
free_pages((unsigned long)base, order);
return -ENXIO;
}
@@ -948,7 +947,7 @@ retry_baser:
tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
- &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / tmp),
+ &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
its_base_type_string[type],
(unsigned long)virt_to_phys(base),
indirect ? "indirect" : "flat", (int)esz,
@@ -983,7 +982,7 @@ static bool its_parse_baser_device(struct its_node *its, struct its_baser *baser
* which is reported by ITS hardware times lvl1 table
* entry size.
*/
- ids -= ilog2(psz / esz);
+ ids -= ilog2(psz / (int)esz);
esz = GITS_LVL1_ENTRY_SIZE;
}
}
@@ -998,7 +997,7 @@ static bool its_parse_baser_device(struct its_node *its, struct its_baser *baser
new_order = max_t(u32, get_order(esz << ids), new_order);
if (new_order >= MAX_ORDER) {
new_order = MAX_ORDER - 1;
- ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / esz);
+ ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
pr_warn("ITS@%pa: Device Table too large, reduce ids %u->%u\n",
&its->phys_base, its->device_ids, ids);
}
@@ -1102,7 +1101,7 @@ static void its_cpu_init_lpis(void)
}
/* Make sure the GIC will observe the zero-ed page */
- __flush_dcache_area(page_address(pend_page), LPI_PENDBASE_SZ);
+ gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
paddr = page_to_phys(pend_page);
pr_info("CPU%d: using LPI pending table @%pa\n",
@@ -1126,8 +1125,8 @@ static void its_cpu_init_lpis(void)
GICR_PROPBASER_WaWb |
((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
- writeq_relaxed(val, rbase + GICR_PROPBASER);
- tmp = readq_relaxed(rbase + GICR_PROPBASER);
+ gicr_write_propbaser(val, rbase + GICR_PROPBASER);
+ tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
@@ -1139,7 +1138,7 @@ static void its_cpu_init_lpis(void)
val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
GICR_PROPBASER_CACHEABILITY_MASK);
val |= GICR_PROPBASER_nC;
- writeq_relaxed(val, rbase + GICR_PROPBASER);
+ gicr_write_propbaser(val, rbase + GICR_PROPBASER);
}
pr_info_once("GIC: using cache flushing for LPI property table\n");
gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
@@ -1150,8 +1149,8 @@ static void its_cpu_init_lpis(void)
GICR_PENDBASER_InnerShareable |
GICR_PENDBASER_WaWb);
- writeq_relaxed(val, rbase + GICR_PENDBASER);
- tmp = readq_relaxed(rbase + GICR_PENDBASER);
+ gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
+ tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
/*
@@ -1161,7 +1160,7 @@ static void its_cpu_init_lpis(void)
val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
GICR_PENDBASER_CACHEABILITY_MASK);
val |= GICR_PENDBASER_nC;
- writeq_relaxed(val, rbase + GICR_PENDBASER);
+ gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
}
/* Enable LPIs */
@@ -1287,13 +1286,13 @@ static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
/* Flush Lvl2 table to PoC if hw doesn't support coherency */
if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
- __flush_dcache_area(page_address(page), baser->psz);
+ gic_flush_dcache_to_poc(page_address(page), baser->psz);
table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
/* Flush Lvl1 entry to PoC if hw doesn't support coherency */
if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
- __flush_dcache_area(table + idx, GITS_LVL1_ENTRY_SIZE);
+ gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
/* Ensure updated table contents are visible to ITS hardware */
dsb(sy);
@@ -1340,7 +1339,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
return NULL;
}
- __flush_dcache_area(itt, sz);
+ gic_flush_dcache_to_poc(itt, sz);
dev->its = its;
dev->itt = itt;
@@ -1717,8 +1716,8 @@ static int __init its_probe_one(struct resource *res,
(ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
GITS_CBASER_VALID);
- writeq_relaxed(baser, its->base + GITS_CBASER);
- tmp = readq_relaxed(its->base + GITS_CBASER);
+ gits_write_cbaser(baser, its->base + GITS_CBASER);
+ tmp = gits_read_cbaser(its->base + GITS_CBASER);
if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
@@ -1730,13 +1729,13 @@ static int __init its_probe_one(struct resource *res,
baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
GITS_CBASER_CACHEABILITY_MASK);
baser |= GITS_CBASER_nC;
- writeq_relaxed(baser, its->base + GITS_CBASER);
+ gits_write_cbaser(baser, its->base + GITS_CBASER);
}
pr_info("ITS: using cache flushing for cmd queue\n");
its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
}
- writeq_relaxed(0, its->base + GITS_CWRITER);
+ gits_write_cwriter(0, its->base + GITS_CWRITER);
writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR);
err = its_init_domain(handle, its);
diff --git a/drivers/irqchip/irq-xilinx-intc.c b/drivers/irqchip/irq-xilinx-intc.c
new file mode 100644
index 000000000000..3db7ab1c9741
--- /dev/null
+++ b/drivers/irqchip/irq-xilinx-intc.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright (C) 2007-2013 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2012-2013 Xilinx, Inc.
+ * Copyright (C) 2007-2009 PetaLogix
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/jump_label.h>
+#include <linux/bug.h>
+#include <linux/of_irq.h>
+
+/* No one else should require these constants, so define them locally here. */
+#define ISR 0x00 /* Interrupt Status Register */
+#define IPR 0x04 /* Interrupt Pending Register */
+#define IER 0x08 /* Interrupt Enable Register */
+#define IAR 0x0c /* Interrupt Acknowledge Register */
+#define SIE 0x10 /* Set Interrupt Enable bits */
+#define CIE 0x14 /* Clear Interrupt Enable bits */
+#define IVR 0x18 /* Interrupt Vector Register */
+#define MER 0x1c /* Master Enable Register */
+
+#define MER_ME (1<<0)
+#define MER_HIE (1<<1)
+
+static DEFINE_STATIC_KEY_FALSE(xintc_is_be);
+
+struct xintc_irq_chip {
+ void __iomem *base;
+ struct irq_domain *root_domain;
+ u32 intr_mask;
+};
+
+static struct xintc_irq_chip *xintc_irqc;
+
+static void xintc_write(int reg, u32 data)
+{
+ if (static_branch_unlikely(&xintc_is_be))
+ iowrite32be(data, xintc_irqc->base + reg);
+ else
+ iowrite32(data, xintc_irqc->base + reg);
+}
+
+static unsigned int xintc_read(int reg)
+{
+ if (static_branch_unlikely(&xintc_is_be))
+ return ioread32be(xintc_irqc->base + reg);
+ else
+ return ioread32(xintc_irqc->base + reg);
+}
+
+static void intc_enable_or_unmask(struct irq_data *d)
+{
+ unsigned long mask = 1 << d->hwirq;
+
+ pr_debug("irq-xilinx: enable_or_unmask: %ld\n", d->hwirq);
+
+ /* ack level irqs because they can't be acked during
+ * ack function since the handle_level_irq function
+ * acks the irq before calling the interrupt handler
+ */
+ if (irqd_is_level_type(d))
+ xintc_write(IAR, mask);
+
+ xintc_write(SIE, mask);
+}
+
+static void intc_disable_or_mask(struct irq_data *d)
+{
+ pr_debug("irq-xilinx: disable: %ld\n", d->hwirq);
+ xintc_write(CIE, 1 << d->hwirq);
+}
+
+static void intc_ack(struct irq_data *d)
+{
+ pr_debug("irq-xilinx: ack: %ld\n", d->hwirq);
+ xintc_write(IAR, 1 << d->hwirq);
+}
+
+static void intc_mask_ack(struct irq_data *d)
+{
+ unsigned long mask = 1 << d->hwirq;
+
+ pr_debug("irq-xilinx: disable_and_ack: %ld\n", d->hwirq);
+ xintc_write(CIE, mask);
+ xintc_write(IAR, mask);
+}
+
+static struct irq_chip intc_dev = {
+ .name = "Xilinx INTC",
+ .irq_unmask = intc_enable_or_unmask,
+ .irq_mask = intc_disable_or_mask,
+ .irq_ack = intc_ack,
+ .irq_mask_ack = intc_mask_ack,
+};
+
+unsigned int xintc_get_irq(void)
+{
+ unsigned int hwirq, irq = -1;
+
+ hwirq = xintc_read(IVR);
+ if (hwirq != -1U)
+ irq = irq_find_mapping(xintc_irqc->root_domain, hwirq);
+
+ pr_debug("irq-xilinx: hwirq=%d, irq=%d\n", hwirq, irq);
+
+ return irq;
+}
+
+static int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
+{
+ if (xintc_irqc->intr_mask & (1 << hw)) {
+ irq_set_chip_and_handler_name(irq, &intc_dev,
+ handle_edge_irq, "edge");
+ irq_clear_status_flags(irq, IRQ_LEVEL);
+ } else {
+ irq_set_chip_and_handler_name(irq, &intc_dev,
+ handle_level_irq, "level");
+ irq_set_status_flags(irq, IRQ_LEVEL);
+ }
+ return 0;
+}
+
+static const struct irq_domain_ops xintc_irq_domain_ops = {
+ .xlate = irq_domain_xlate_onetwocell,
+ .map = xintc_map,
+};
+
+static void xil_intc_irq_handler(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 pending;
+
+ chained_irq_enter(chip, desc);
+ do {
+ pending = xintc_get_irq();
+ if (pending == -1U)
+ break;
+ generic_handle_irq(pending);
+ } while (true);
+ chained_irq_exit(chip, desc);
+}
+
+static int __init xilinx_intc_of_init(struct device_node *intc,
+ struct device_node *parent)
+{
+ u32 nr_irq;
+ int ret, irq;
+ struct xintc_irq_chip *irqc;
+
+ if (xintc_irqc) {
+ pr_err("irq-xilinx: Multiple instances aren't supported\n");
+ return -EINVAL;
+ }
+
+ irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
+ if (!irqc)
+ return -ENOMEM;
+
+ xintc_irqc = irqc;
+
+ irqc->base = of_iomap(intc, 0);
+ BUG_ON(!irqc->base);
+
+ ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &nr_irq);
+ if (ret < 0) {
+ pr_err("irq-xilinx: unable to read xlnx,num-intr-inputs\n");
+ goto err_alloc;
+ }
+
+ ret = of_property_read_u32(intc, "xlnx,kind-of-intr", &irqc->intr_mask);
+ if (ret < 0) {
+ pr_warn("irq-xilinx: unable to read xlnx,kind-of-intr\n");
+ irqc->intr_mask = 0;
+ }
+
+ if (irqc->intr_mask >> nr_irq)
+ pr_warn("irq-xilinx: mismatch in kind-of-intr param\n");
+
+ pr_info("irq-xilinx: %s: num_irq=%d, edge=0x%x\n",
+ intc->full_name, nr_irq, irqc->intr_mask);
+
+
+ /*
+ * Disable all external interrupts until they are
+ * explicity requested.
+ */
+ xintc_write(IER, 0);
+
+ /* Acknowledge any pending interrupts just in case. */
+ xintc_write(IAR, 0xffffffff);
+
+ /* Turn on the Master Enable. */
+ xintc_write(MER, MER_HIE | MER_ME);
+ if (!(xintc_read(MER) & (MER_HIE | MER_ME))) {
+ static_branch_enable(&xintc_is_be);
+ xintc_write(MER, MER_HIE | MER_ME);
+ }
+
+ irqc->root_domain = irq_domain_add_linear(intc, nr_irq,
+ &xintc_irq_domain_ops, irqc);
+ if (!irqc->root_domain) {
+ pr_err("irq-xilinx: Unable to create IRQ domain\n");
+ goto err_alloc;
+ }
+
+ if (parent) {
+ irq = irq_of_parse_and_map(intc, 0);
+ if (irq) {
+ irq_set_chained_handler_and_data(irq,
+ xil_intc_irq_handler,
+ irqc);
+ } else {
+ pr_err("irq-xilinx: interrupts property not in DT\n");
+ ret = -EINVAL;
+ goto err_alloc;
+ }
+ } else {
+ irq_set_default_host(irqc->root_domain);
+ }
+
+ return 0;
+
+err_alloc:
+ xintc_irqc = NULL;
+ kfree(irqc);
+ return ret;
+
+}
+
+IRQCHIP_DECLARE(xilinx_intc_xps, "xlnx,xps-intc-1.00.a", xilinx_intc_of_init);
+IRQCHIP_DECLARE(xilinx_intc_opb, "xlnx,opb-intc-1.00.c", xilinx_intc_of_init);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index ad70507cfb56..dd27f73a45fc 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -551,14 +551,14 @@ error_attrs:
}
static struct msi_desc *
-msi_setup_entry(struct pci_dev *dev, int nvec, bool affinity)
+msi_setup_entry(struct pci_dev *dev, int nvec, const struct irq_affinity *affd)
{
struct cpumask *masks = NULL;
struct msi_desc *entry;
u16 control;
- if (affinity) {
- masks = irq_create_affinity_masks(dev->irq_affinity, nvec);
+ if (affd) {
+ masks = irq_create_affinity_masks(nvec, affd);
if (!masks)
pr_err("Unable to allocate affinity masks, ignoring\n");
}
@@ -618,7 +618,8 @@ static int msi_verify_entries(struct pci_dev *dev)
* an error, and a positive return value indicates the number of interrupts
* which could have been allocated.
*/
-static int msi_capability_init(struct pci_dev *dev, int nvec, bool affinity)
+static int msi_capability_init(struct pci_dev *dev, int nvec,
+ const struct irq_affinity *affd)
{
struct msi_desc *entry;
int ret;
@@ -626,7 +627,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec, bool affinity)
pci_msi_set_enable(dev, 0); /* Disable MSI during set up */
- entry = msi_setup_entry(dev, nvec, affinity);
+ entry = msi_setup_entry(dev, nvec, affd);
if (!entry)
return -ENOMEM;
@@ -690,14 +691,14 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries)
static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
struct msix_entry *entries, int nvec,
- bool affinity)
+ const struct irq_affinity *affd)
{
struct cpumask *curmsk, *masks = NULL;
struct msi_desc *entry;
int ret, i;
- if (affinity) {
- masks = irq_create_affinity_masks(dev->irq_affinity, nvec);
+ if (affd) {
+ masks = irq_create_affinity_masks(nvec, affd);
if (!masks)
pr_err("Unable to allocate affinity masks, ignoring\n");
}
@@ -753,14 +754,14 @@ static void msix_program_entries(struct pci_dev *dev,
* @dev: pointer to the pci_dev data structure of MSI-X device function
* @entries: pointer to an array of struct msix_entry entries
* @nvec: number of @entries
- * @affinity: flag to indicate cpu irq affinity mask should be set
+ * @affd: Optional pointer to enable automatic affinity assignement
*
* Setup the MSI-X capability structure of device function with a
* single MSI-X irq. A return of zero indicates the successful setup of
* requested MSI-X entries with allocated irqs or non-zero for otherwise.
**/
static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
- int nvec, bool affinity)
+ int nvec, const struct irq_affinity *affd)
{
int ret;
u16 control;
@@ -775,7 +776,7 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
if (!base)
return -ENOMEM;
- ret = msix_setup_entries(dev, base, entries, nvec, affinity);
+ ret = msix_setup_entries(dev, base, entries, nvec, affd);
if (ret)
return ret;
@@ -956,7 +957,7 @@ int pci_msix_vec_count(struct pci_dev *dev)
EXPORT_SYMBOL(pci_msix_vec_count);
static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
- int nvec, bool affinity)
+ int nvec, const struct irq_affinity *affd)
{
int nr_entries;
int i, j;
@@ -988,7 +989,7 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
dev_info(&dev->dev, "can't enable MSI-X (MSI IRQ already assigned)\n");
return -EINVAL;
}
- return msix_capability_init(dev, entries, nvec, affinity);
+ return msix_capability_init(dev, entries, nvec, affd);
}
/**
@@ -1008,7 +1009,7 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
**/
int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
{
- return __pci_enable_msix(dev, entries, nvec, false);
+ return __pci_enable_msix(dev, entries, nvec, NULL);
}
EXPORT_SYMBOL(pci_enable_msix);
@@ -1059,9 +1060,8 @@ int pci_msi_enabled(void)
EXPORT_SYMBOL(pci_msi_enabled);
static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
- unsigned int flags)
+ const struct irq_affinity *affd)
{
- bool affinity = flags & PCI_IRQ_AFFINITY;
int nvec;
int rc;
@@ -1090,14 +1090,13 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
nvec = maxvec;
for (;;) {
- if (affinity) {
- nvec = irq_calc_affinity_vectors(dev->irq_affinity,
- nvec);
+ if (affd) {
+ nvec = irq_calc_affinity_vectors(nvec, affd);
if (nvec < minvec)
return -ENOSPC;
}
- rc = msi_capability_init(dev, nvec, affinity);
+ rc = msi_capability_init(dev, nvec, affd);
if (rc == 0)
return nvec;
@@ -1124,29 +1123,27 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
**/
int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
{
- return __pci_enable_msi_range(dev, minvec, maxvec, 0);
+ return __pci_enable_msi_range(dev, minvec, maxvec, NULL);
}
EXPORT_SYMBOL(pci_enable_msi_range);
static int __pci_enable_msix_range(struct pci_dev *dev,
- struct msix_entry *entries, int minvec, int maxvec,
- unsigned int flags)
+ struct msix_entry *entries, int minvec,
+ int maxvec, const struct irq_affinity *affd)
{
- bool affinity = flags & PCI_IRQ_AFFINITY;
int rc, nvec = maxvec;
if (maxvec < minvec)
return -ERANGE;
for (;;) {
- if (affinity) {
- nvec = irq_calc_affinity_vectors(dev->irq_affinity,
- nvec);
+ if (affd) {
+ nvec = irq_calc_affinity_vectors(nvec, affd);
if (nvec < minvec)
return -ENOSPC;
}
- rc = __pci_enable_msix(dev, entries, nvec, affinity);
+ rc = __pci_enable_msix(dev, entries, nvec, affd);
if (rc == 0)
return nvec;
@@ -1177,16 +1174,17 @@ static int __pci_enable_msix_range(struct pci_dev *dev,
int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
int minvec, int maxvec)
{
- return __pci_enable_msix_range(dev, entries, minvec, maxvec, 0);
+ return __pci_enable_msix_range(dev, entries, minvec, maxvec, NULL);
}
EXPORT_SYMBOL(pci_enable_msix_range);
/**
- * pci_alloc_irq_vectors - allocate multiple IRQs for a device
+ * pci_alloc_irq_vectors_affinity - allocate multiple IRQs for a device
* @dev: PCI device to operate on
* @min_vecs: minimum number of vectors required (must be >= 1)
* @max_vecs: maximum (desired) number of vectors
* @flags: flags or quirks for the allocation
+ * @affd: optional description of the affinity requirements
*
* Allocate up to @max_vecs interrupt vectors for @dev, using MSI-X or MSI
* vectors if available, and fall back to a single legacy vector
@@ -1198,20 +1196,30 @@ EXPORT_SYMBOL(pci_enable_msix_range);
* To get the Linux IRQ number used for a vector that can be passed to
* request_irq() use the pci_irq_vector() helper.
*/
-int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
- unsigned int max_vecs, unsigned int flags)
+int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
+ unsigned int max_vecs, unsigned int flags,
+ const struct irq_affinity *affd)
{
+ static const struct irq_affinity msi_default_affd;
int vecs = -ENOSPC;
+ if (flags & PCI_IRQ_AFFINITY) {
+ if (!affd)
+ affd = &msi_default_affd;
+ } else {
+ if (WARN_ON(affd))
+ affd = NULL;
+ }
+
if (flags & PCI_IRQ_MSIX) {
vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs,
- flags);
+ affd);
if (vecs > 0)
return vecs;
}
if (flags & PCI_IRQ_MSI) {
- vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, flags);
+ vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, affd);
if (vecs > 0)
return vecs;
}
@@ -1224,7 +1232,7 @@ int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
return vecs;
}
-EXPORT_SYMBOL(pci_alloc_irq_vectors);
+EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity);
/**
* pci_free_irq_vectors - free previously allocated IRQs for a device