diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-23 14:15:07 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-23 14:15:07 -0700 |
commit | 0b0c9d3a5872e8a02a071c6f0775ee6bf00a1206 (patch) | |
tree | 12f6cc1eedffd62e4dfa3b0d41b2c643462426db /drivers/iommu/amd_iommu_init.c | |
parent | fc2bb8d1cde1296d210a0f1ff9ee979a447d0a34 (diff) | |
parent | 7de473066f1512e52ea806e3c9698e5ea325b26c (diff) |
Merge tag 'iommu-updates-v3.4' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull IOMMU updates from Joerg Roedel:
"The IOMMU updates for this round are not very large patch-wise. But
they contain two new IOMMU drivers for the ARM Tegra 2 and 3
platforms. Besides that there are also a few patches for the AMD
IOMMU which prepare the driver for adding intr-remapping support and a
couple of fixes."
* tag 'iommu-updates-v3.4' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
iommu/amd: Fix section mismatch
iommu/amd: Move interrupt setup code into seperate function
iommu/amd: Make sure IOMMU interrupts are re-enabled on resume
iommu/amd: Fix section warning for prealloc_protection_domains
iommu/amd: Don't initialize IOMMUv2 resources when not required
iommu/amd: Update git-tree in MAINTAINERS
iommu/tegra-gart: fix spin_unlock in map failure path
iommu/amd: Fix double free of mem-region in error-path
iommu/amd: Split amd_iommu_init function
ARM: IOMMU: Tegra30: Add iommu_ops for SMMU driver
ARM: IOMMU: Tegra20: Add iommu_ops for GART driver
Diffstat (limited to 'drivers/iommu/amd_iommu_init.c')
-rw-r--r-- | drivers/iommu/amd_iommu_init.c | 187 |
1 files changed, 123 insertions, 64 deletions
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index a35e98ad972..c56790375e0 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -196,6 +196,8 @@ static u32 rlookup_table_size; /* size if the rlookup table */ */ extern void iommu_flush_all_caches(struct amd_iommu *iommu); +static int amd_iommu_enable_interrupts(void); + static inline void update_last_devid(u16 devid) { if (devid > amd_iommu_last_bdf) @@ -358,8 +360,6 @@ static void iommu_disable(struct amd_iommu *iommu) */ static u8 * __init iommu_map_mmio_space(u64 address) { - u8 *ret; - if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) { pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n", address); @@ -367,13 +367,7 @@ static u8 * __init iommu_map_mmio_space(u64 address) return NULL; } - ret = ioremap_nocache(address, MMIO_REGION_LENGTH); - if (ret != NULL) - return ret; - - release_mem_region(address, MMIO_REGION_LENGTH); - - return NULL; + return ioremap_nocache(address, MMIO_REGION_LENGTH); } static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) @@ -1131,8 +1125,9 @@ static int iommu_setup_msi(struct amd_iommu *iommu) { int r; - if (pci_enable_msi(iommu->dev)) - return 1; + r = pci_enable_msi(iommu->dev); + if (r) + return r; r = request_threaded_irq(iommu->dev->irq, amd_iommu_int_handler, @@ -1142,27 +1137,36 @@ static int iommu_setup_msi(struct amd_iommu *iommu) if (r) { pci_disable_msi(iommu->dev); - return 1; + return r; } iommu->int_enabled = true; - iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); - - if (iommu->ppr_log != NULL) - iommu_feature_enable(iommu, CONTROL_PPFINT_EN); return 0; } static int iommu_init_msi(struct amd_iommu *iommu) { + int ret; + if (iommu->int_enabled) - return 0; + goto enable_faults; if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI)) - return iommu_setup_msi(iommu); + ret = iommu_setup_msi(iommu); + else + ret = -ENODEV; - return 1; + if (ret) + return ret; + +enable_faults: + iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); + + if (iommu->ppr_log != NULL) + iommu_feature_enable(iommu, CONTROL_PPFINT_EN); + + return 0; } /**************************************************************************** @@ -1381,7 +1385,6 @@ static void enable_iommus(void) iommu_enable_ppr_log(iommu); iommu_enable_gt(iommu); iommu_set_exclusion_range(iommu); - iommu_init_msi(iommu); iommu_enable(iommu); iommu_flush_all_caches(iommu); } @@ -1409,6 +1412,8 @@ static void amd_iommu_resume(void) /* re-load the hardware */ enable_iommus(); + + amd_iommu_enable_interrupts(); } static int amd_iommu_suspend(void) @@ -1424,10 +1429,40 @@ static struct syscore_ops amd_iommu_syscore_ops = { .resume = amd_iommu_resume, }; +static void __init free_on_init_error(void) +{ + amd_iommu_uninit_devices(); + + free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, + get_order(MAX_DOMAIN_ID/8)); + + free_pages((unsigned long)amd_iommu_rlookup_table, + get_order(rlookup_table_size)); + + free_pages((unsigned long)amd_iommu_alias_table, + get_order(alias_table_size)); + + free_pages((unsigned long)amd_iommu_dev_table, + get_order(dev_table_size)); + + free_iommu_all(); + + free_unity_maps(); + +#ifdef CONFIG_GART_IOMMU + /* + * We failed to initialize the AMD IOMMU - try fallback to GART + * if possible. + */ + gart_iommu_init(); + +#endif +} + /* - * This is the core init function for AMD IOMMU hardware in the system. - * This function is called from the generic x86 DMA layer initialization - * code. + * This is the hardware init function for AMD IOMMU in the system. + * This function is called either from amd_iommu_init or from the interrupt + * remapping setup code. * * This function basically parses the ACPI table for AMD IOMMU (IVRS) * three times: @@ -1446,16 +1481,21 @@ static struct syscore_ops amd_iommu_syscore_ops = { * remapping requirements parsed out of the ACPI table in * this last pass. * - * After that the hardware is initialized and ready to go. In the last - * step we do some Linux specific things like registering the driver in - * the dma_ops interface and initializing the suspend/resume support - * functions. Finally it prints some information about AMD IOMMUs and - * the driver state and enables the hardware. + * After everything is set up the IOMMUs are enabled and the necessary + * hotplug and suspend notifiers are registered. */ -static int __init amd_iommu_init(void) +int __init amd_iommu_init_hardware(void) { int i, ret = 0; + if (!amd_iommu_detected) + return -ENODEV; + + if (amd_iommu_dev_table != NULL) { + /* Hardware already initialized */ + return 0; + } + /* * First parse ACPI tables to find the largest Bus/Dev/Func * we need to handle. Upon this information the shared data @@ -1472,9 +1512,8 @@ static int __init amd_iommu_init(void) alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); - ret = -ENOMEM; - /* Device table - directly used by all IOMMUs */ + ret = -ENOMEM; amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(dev_table_size)); if (amd_iommu_dev_table == NULL) @@ -1546,20 +1585,65 @@ static int __init amd_iommu_init(void) enable_iommus(); + amd_iommu_init_notifier(); + + register_syscore_ops(&amd_iommu_syscore_ops); + +out: + return ret; + +free: + free_on_init_error(); + + return ret; +} + +static int amd_iommu_enable_interrupts(void) +{ + struct amd_iommu *iommu; + int ret = 0; + + for_each_iommu(iommu) { + ret = iommu_init_msi(iommu); + if (ret) + goto out; + } + +out: + return ret; +} + +/* + * This is the core init function for AMD IOMMU hardware in the system. + * This function is called from the generic x86 DMA layer initialization + * code. + * + * The function calls amd_iommu_init_hardware() to setup and enable the + * IOMMU hardware if this has not happened yet. After that the driver + * registers for the DMA-API and for the IOMMU-API as necessary. + */ +static int __init amd_iommu_init(void) +{ + int ret = 0; + + ret = amd_iommu_init_hardware(); + if (ret) + goto out; + + ret = amd_iommu_enable_interrupts(); + if (ret) + goto free; + if (iommu_pass_through) ret = amd_iommu_init_passthrough(); else ret = amd_iommu_init_dma_ops(); if (ret) - goto free_disable; + goto free; amd_iommu_init_api(); - amd_iommu_init_notifier(); - - register_syscore_ops(&amd_iommu_syscore_ops); - if (iommu_pass_through) goto out; @@ -1569,39 +1653,14 @@ static int __init amd_iommu_init(void) printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n"); x86_platform.iommu_shutdown = disable_iommus; + out: return ret; -free_disable: - disable_iommus(); - free: - amd_iommu_uninit_devices(); - - free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, - get_order(MAX_DOMAIN_ID/8)); - - free_pages((unsigned long)amd_iommu_rlookup_table, - get_order(rlookup_table_size)); - - free_pages((unsigned long)amd_iommu_alias_table, - get_order(alias_table_size)); - - free_pages((unsigned long)amd_iommu_dev_table, - get_order(dev_table_size)); - - free_iommu_all(); - - free_unity_maps(); - -#ifdef CONFIG_GART_IOMMU - /* - * We failed to initialize the AMD IOMMU - try fallback to GART - * if possible. - */ - gart_iommu_init(); + disable_iommus(); -#endif + free_on_init_error(); goto out; } |