summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-03 16:24:35 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-03 16:24:35 -0700
commita9238741987386bb549d61572973c7e62b2a4145 (patch)
tree4e49f9c472f86b88cd569a088f7c0ac87ce8b78a /drivers
parent40031da445fb4d269af9c7c445b2adf674f171e7 (diff)
parente89c33168aad32436da842ddda307dcc31c0c4e2 (diff)
Merge tag 'pci-v3.12-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci
Pull PCI changes from Bjorn Helgaas: PCI device hotplug: - Use PCIe native hotplug, not ACPI hotplug, when possible (Neil Horman) - Assign resources on per-host bridge basis (Yinghai Lu) MPS (Max Payload Size): - Allow larger MPS settings below hotplug-capable Root Port (Yijing Wang) - Add warnings about unsafe MPS settings (Yijing Wang) - Simplify interface and messages (Bjorn Helgaas) SR-IOV: - Return -ENOSYS on non-SR-IOV devices (Stefan Assmann) - Update NumVFs register when disabling SR-IOV (Yijing Wang) Virtualization: - Add bus and slot reset support (Alex Williamson) - Fix ACS (Access Control Services) issues (Alex Williamson) Miscellaneous: - Simplify PCIe Capability accessors (Bjorn Helgaas) - Add pcibios_pm_ops for arch-specific hibernate stuff (Sebastian Ott) - Disable decoding during BAR sizing only when necessary (Zoltan Kiss) - Delay enabling bridges until they're needed (Yinghai Lu) - Split Designware support into Synopsys and Exynos parts (Jingoo Han) - Convert class code to use dev_groups (Greg Kroah-Hartman) - Cleanup Designware and Exynos I/O access wrappers (Seungwon Jeon) - Fix bridge I/O window alignment (Bjorn Helgaas) - Add pci_wait_for_pending_transaction() (Casey Leedom) - Use devm_ioremap_resource() in Marvell driver (Tushar Behera) * tag 'pci-v3.12-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci: (63 commits) PCI/ACPI: Fix _OSC ordering to allow PCIe hotplug use when available PCI: exynos: Add I/O access wrappers PCI: designware: Drop "addr" arg from dw_pcie_readl_rc()/dw_pcie_writel_rc() PCI: Remove pcie_cap_has_devctl() PCI: Support PCIe Capability Slot registers only for ports with slots PCI: Remove PCIe Capability version checks PCI: Allow PCIe Capability link-related register access for switches PCI: Add offsets of PCIe capability registers PCI: Tidy bitmasks and spacing of PCIe capability definitions PCI: Remove obsolete comment reference to pci_pcie_cap2() PCI: Clarify PCI_EXP_TYPE_PCI_BRIDGE comment PCI: Rename PCIe capability definitions to follow convention PCI: Warn if unsafe MPS settings detected PCI: Fix MPS peer-to-peer DMA comment syntax PCI: Disable decoding for BAR sizing only when it was actually enabled PCI: Add comment about needing pci_msi_off() even when CONFIG_PCI_MSI=n PCI: Add pcibios_pm_ops for optional arch-specific hibernate functionality PCI: Don't restrict MPS for slots below Root Ports PCI: Simplify MPS test for Downstream Port PCI: Remove unnecessary check for pcie_get_mps() failure ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/pci_root.c67
-rw-r--r--drivers/mfd/rts5227.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c18
-rw-r--r--drivers/parisc/lba_pci.c1
-rw-r--r--drivers/pci/access.c26
-rw-r--r--drivers/pci/bus.c19
-rw-r--r--drivers/pci/host/Kconfig1
-rw-r--r--drivers/pci/host/Makefile3
-rw-r--r--drivers/pci/host/pci-exynos.c552
-rw-r--r--drivers/pci/host/pci-mvebu.c7
-rw-r--r--drivers/pci/host/pcie-designware.c1044
-rw-r--r--drivers/pci/host/pcie-designware.h65
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c1
-rw-r--r--drivers/pci/hotplug/pciehp.h1
-rw-r--r--drivers/pci/hotplug/pciehp_core.c12
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c31
-rw-r--r--drivers/pci/hotplug/pcihp_slot.c5
-rw-r--r--drivers/pci/iov.c23
-rw-r--r--drivers/pci/pci-driver.c43
-rw-r--r--drivers/pci/pci-sysfs.c32
-rw-r--r--drivers/pci/pci.c573
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/pcie/Kconfig2
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c2
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h1
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c35
-rw-r--r--drivers/pci/probe.c80
-rw-r--r--drivers/pci/quirks.c147
-rw-r--r--drivers/pci/setup-bus.c176
-rw-r--r--drivers/pcmcia/cardbus.c1
30 files changed, 1848 insertions, 1124 deletions
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 5917839321b8..d3874f425653 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -378,6 +378,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
struct acpi_pci_root *root;
u32 flags, base_flags;
acpi_handle handle = device->handle;
+ bool no_aspm = false, clear_aspm = false;
root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
if (!root)
@@ -437,27 +438,6 @@ static int acpi_pci_root_add(struct acpi_device *device,
flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
acpi_pci_osc_support(root, flags);
- /*
- * TBD: Need PCI interface for enumeration/configuration of roots.
- */
-
- /*
- * Scan the Root Bridge
- * --------------------
- * Must do this prior to any attempt to bind the root device, as the
- * PCI namespace does not get created until this call is made (and
- * thus the root bridge's pci_dev does not exist).
- */
- root->bus = pci_acpi_scan_root(root);
- if (!root->bus) {
- dev_err(&device->dev,
- "Bus %04x:%02x not present in PCI namespace\n",
- root->segment, (unsigned int)root->secondary.start);
- result = -ENODEV;
- goto end;
- }
-
- /* Indicate support for various _OSC capabilities. */
if (pci_ext_cfg_avail())
flags |= OSC_EXT_PCI_CONFIG_SUPPORT;
if (pcie_aspm_support_enabled()) {
@@ -471,7 +451,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
if (ACPI_FAILURE(status)) {
dev_info(&device->dev, "ACPI _OSC support "
"notification failed, disabling PCIe ASPM\n");
- pcie_no_aspm();
+ no_aspm = true;
flags = base_flags;
}
}
@@ -503,7 +483,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
* We have ASPM control, but the FADT indicates
* that it's unsupported. Clear it.
*/
- pcie_clear_aspm(root->bus);
+ clear_aspm = true;
}
} else {
dev_info(&device->dev,
@@ -512,7 +492,14 @@ static int acpi_pci_root_add(struct acpi_device *device,
acpi_format_exception(status), flags);
dev_info(&device->dev,
"ACPI _OSC control for PCIe not granted, disabling ASPM\n");
- pcie_no_aspm();
+ /*
+ * We want to disable ASPM here, but aspm_disabled
+ * needs to remain in its state from boot so that we
+ * properly handle PCIe 1.1 devices. So we set this
+ * flag here, to defer the action until after the ACPI
+ * root scan.
+ */
+ no_aspm = true;
}
} else {
dev_info(&device->dev,
@@ -520,16 +507,40 @@ static int acpi_pci_root_add(struct acpi_device *device,
"(_OSC support mask: 0x%02x)\n", flags);
}
+ /*
+ * TBD: Need PCI interface for enumeration/configuration of roots.
+ */
+
+ /*
+ * Scan the Root Bridge
+ * --------------------
+ * Must do this prior to any attempt to bind the root device, as the
+ * PCI namespace does not get created until this call is made (and
+ * thus the root bridge's pci_dev does not exist).
+ */
+ root->bus = pci_acpi_scan_root(root);
+ if (!root->bus) {
+ dev_err(&device->dev,
+ "Bus %04x:%02x not present in PCI namespace\n",
+ root->segment, (unsigned int)root->secondary.start);
+ result = -ENODEV;
+ goto end;
+ }
+
+ if (clear_aspm) {
+ dev_info(&device->dev, "Disabling ASPM (FADT indicates it is unsupported)\n");
+ pcie_clear_aspm(root->bus);
+ }
+ if (no_aspm)
+ pcie_no_aspm();
+
pci_acpi_add_bus_pm_notifier(device, root->bus);
if (device->wakeup.flags.run_wake)
device_set_run_wake(root->bus->bridge, true);
if (system_state != SYSTEM_BOOTING) {
pcibios_resource_survey_bus(root->bus);
- pci_assign_unassigned_bus_resources(root->bus);
-
- /* need to after hot-added ioapic is registered */
- pci_enable_bridges(root->bus);
+ pci_assign_unassigned_root_bus_resources(root->bus);
}
pci_bus_add_devices(root->bus);
diff --git a/drivers/mfd/rts5227.c b/drivers/mfd/rts5227.c
index fc831dcb1480..164b7faa70c9 100644
--- a/drivers/mfd/rts5227.c
+++ b/drivers/mfd/rts5227.c
@@ -44,7 +44,7 @@ static int rts5227_extra_init_hw(struct rtsx_pcr *pcr)
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OLT_LED_CTL, 0x0F, 0x02);
/* Configure LTR */
pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &cap);
- if (cap & PCI_EXP_LTR_EN)
+ if (cap & PCI_EXP_DEVCTL2_LTR_EN)
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LTR_CTL, 0xFF, 0xA3);
/* Configure OBFF */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OBFF_CFG, 0x03, 0x03);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 1627a4e09c32..815f2dea6337 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -9960,8 +9960,6 @@ static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
static int bnx2x_do_flr(struct bnx2x *bp)
{
- int i;
- u16 status;
struct pci_dev *dev = bp->pdev;
if (CHIP_IS_E1x(bp)) {
@@ -9976,20 +9974,8 @@ static int bnx2x_do_flr(struct bnx2x *bp)
return -EINVAL;
}
- /* Wait for Transaction Pending bit clean */
- for (i = 0; i < 4; i++) {
- if (i)
- msleep((1 << (i - 1)) * 100);
-
- pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
- if (!(status & PCI_EXP_DEVSTA_TRPND))
- goto clear;
- }
-
- dev_err(&dev->dev,
- "transaction is not cleared; proceeding with reset anyway\n");
-
-clear:
+ if (!pci_wait_for_pending_transaction(dev))
+ dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
BNX2X_DEV_INFO("Initiating FLR\n");
bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 19f6f70c67d3..37e71ff6408d 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -1590,7 +1590,6 @@ lba_driver_probe(struct parisc_device *dev)
lba_dump_res(&lba_dev->hba.lmmio_space, 2);
#endif
}
- pci_enable_bridges(lba_bus);
/*
** Once PCI register ops has walked the bus, access to config
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 1cc23661f79b..0857ca981fae 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -475,37 +475,33 @@ static inline int pcie_cap_version(const struct pci_dev *dev)
return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS;
}
-static inline bool pcie_cap_has_devctl(const struct pci_dev *dev)
-{
- return true;
-}
-
static inline bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
{
int type = pci_pcie_type(dev);
- return pcie_cap_version(dev) > 1 ||
+ return type == PCI_EXP_TYPE_ENDPOINT ||
+ type == PCI_EXP_TYPE_LEG_END ||
type == PCI_EXP_TYPE_ROOT_PORT ||
- type == PCI_EXP_TYPE_ENDPOINT ||
- type == PCI_EXP_TYPE_LEG_END;
+ type == PCI_EXP_TYPE_UPSTREAM ||
+ type == PCI_EXP_TYPE_DOWNSTREAM ||
+ type == PCI_EXP_TYPE_PCI_BRIDGE ||
+ type == PCI_EXP_TYPE_PCIE_BRIDGE;
}
static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev)
{
int type = pci_pcie_type(dev);
- return pcie_cap_version(dev) > 1 ||
- type == PCI_EXP_TYPE_ROOT_PORT ||
- (type == PCI_EXP_TYPE_DOWNSTREAM &&
- pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT);
+ return (type == PCI_EXP_TYPE_ROOT_PORT ||
+ type == PCI_EXP_TYPE_DOWNSTREAM) &&
+ pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT;
}
static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev)
{
int type = pci_pcie_type(dev);
- return pcie_cap_version(dev) > 1 ||
- type == PCI_EXP_TYPE_ROOT_PORT ||
+ return type == PCI_EXP_TYPE_ROOT_PORT ||
type == PCI_EXP_TYPE_RC_EC;
}
@@ -520,7 +516,7 @@ static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
case PCI_EXP_DEVCAP:
case PCI_EXP_DEVCTL:
case PCI_EXP_DEVSTA:
- return pcie_cap_has_devctl(dev);
+ return true;
case PCI_EXP_LNKCAP:
case PCI_EXP_LNKCTL:
case PCI_EXP_LNKSTA:
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index b1ff02ab4f13..fc1b74013743 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -216,24 +216,6 @@ void pci_bus_add_devices(const struct pci_bus *bus)
}
}
-void pci_enable_bridges(struct pci_bus *bus)
-{
- struct pci_dev *dev;
- int retval;
-
- list_for_each_entry(dev, &bus->devices, bus_list) {
- if (dev->subordinate) {
- if (!pci_is_enabled(dev)) {
- retval = pci_enable_device(dev);
- if (retval)
- dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n", retval);
- pci_set_master(dev);
- }
- pci_enable_bridges(dev->subordinate);
- }
- }
-}
-
/** pci_walk_bus - walk devices on/under bus, calling callback.
* @top bus whose devices should be walked
* @cb callback to be called for each device found
@@ -301,4 +283,3 @@ EXPORT_SYMBOL(pci_bus_put);
EXPORT_SYMBOL(pci_bus_alloc_resource);
EXPORT_SYMBOL_GPL(pci_bus_add_device);
EXPORT_SYMBOL(pci_bus_add_devices);
-EXPORT_SYMBOL(pci_enable_bridges);
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 1184ff6fe864..e5ba4eb4e5b3 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -4,6 +4,7 @@ menu "PCI host controller drivers"
config PCI_MVEBU
bool "Marvell EBU PCIe controller"
depends on ARCH_MVEBU || ARCH_KIRKWOOD
+ depends on OF
config PCIE_DW
bool
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 086d8500e849..ab79ccb5bbff 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -1,2 +1,3 @@
-obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
obj-$(CONFIG_PCIE_DW) += pcie-designware.o
+obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
+obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c
new file mode 100644
index 000000000000..94e096bb2d0a
--- /dev/null
+++ b/drivers/pci/host/pci-exynos.c
@@ -0,0 +1,552 @@
+/*
+ * PCIe host controller driver for Samsung EXYNOS SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/signal.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+#define to_exynos_pcie(x) container_of(x, struct exynos_pcie, pp)
+
+struct exynos_pcie {
+ void __iomem *elbi_base;
+ void __iomem *phy_base;
+ void __iomem *block_base;
+ int reset_gpio;
+ struct clk *clk;
+ struct clk *bus_clk;
+ struct pcie_port pp;
+};
+
+/* PCIe ELBI registers */
+#define PCIE_IRQ_PULSE 0x000
+#define IRQ_INTA_ASSERT (0x1 << 0)
+#define IRQ_INTB_ASSERT (0x1 << 2)
+#define IRQ_INTC_ASSERT (0x1 << 4)
+#define IRQ_INTD_ASSERT (0x1 << 6)
+#define PCIE_IRQ_LEVEL 0x004
+#define PCIE_IRQ_SPECIAL 0x008
+#define PCIE_IRQ_EN_PULSE 0x00c
+#define PCIE_IRQ_EN_LEVEL 0x010
+#define PCIE_IRQ_EN_SPECIAL 0x014
+#define PCIE_PWR_RESET 0x018
+#define PCIE_CORE_RESET 0x01c
+#define PCIE_CORE_RESET_ENABLE (0x1 << 0)
+#define PCIE_STICKY_RESET 0x020
+#define PCIE_NONSTICKY_RESET 0x024
+#define PCIE_APP_INIT_RESET 0x028
+#define PCIE_APP_LTSSM_ENABLE 0x02c
+#define PCIE_ELBI_RDLH_LINKUP 0x064
+#define PCIE_ELBI_LTSSM_ENABLE 0x1
+#define PCIE_ELBI_SLV_AWMISC 0x11c
+#define PCIE_ELBI_SLV_ARMISC 0x120
+#define PCIE_ELBI_SLV_DBI_ENABLE (0x1 << 21)
+
+/* PCIe Purple registers */
+#define PCIE_PHY_GLOBAL_RESET 0x000
+#define PCIE_PHY_COMMON_RESET 0x004
+#define PCIE_PHY_CMN_REG 0x008
+#define PCIE_PHY_MAC_RESET 0x00c
+#define PCIE_PHY_PLL_LOCKED 0x010
+#define PCIE_PHY_TRSVREG_RESET 0x020
+#define PCIE_PHY_TRSV_RESET 0x024
+
+/* PCIe PHY registers */
+#define PCIE_PHY_IMPEDANCE 0x004
+#define PCIE_PHY_PLL_DIV_0 0x008
+#define PCIE_PHY_PLL_BIAS 0x00c
+#define PCIE_PHY_DCC_FEEDBACK 0x014
+#define PCIE_PHY_PLL_DIV_1 0x05c
+#define PCIE_PHY_TRSV0_EMP_LVL 0x084
+#define PCIE_PHY_TRSV0_DRV_LVL 0x088
+#define PCIE_PHY_TRSV0_RXCDR 0x0ac
+#define PCIE_PHY_TRSV0_LVCC 0x0dc
+#define PCIE_PHY_TRSV1_EMP_LVL 0x144
+#define PCIE_PHY_TRSV1_RXCDR 0x16c
+#define PCIE_PHY_TRSV1_LVCC 0x19c
+#define PCIE_PHY_TRSV2_EMP_LVL 0x204
+#define PCIE_PHY_TRSV2_RXCDR 0x22c
+#define PCIE_PHY_TRSV2_LVCC 0x25c
+#define PCIE_PHY_TRSV3_EMP_LVL 0x2c4
+#define PCIE_PHY_TRSV3_RXCDR 0x2ec
+#define PCIE_PHY_TRSV3_LVCC 0x31c
+
+static inline void exynos_elb_writel(struct exynos_pcie *pcie, u32 val, u32 reg)
+{
+ writel(val, pcie->elbi_base + reg);
+}
+
+static inline u32 exynos_elb_readl(struct exynos_pcie *pcie, u32 reg)
+{
+ return readl(pcie->elbi_base + reg);
+}
+
+static inline void exynos_phy_writel(struct exynos_pcie *pcie, u32 val, u32 reg)
+{
+ writel(val, pcie->phy_base + reg);
+}
+
+static inline u32 exynos_phy_readl(struct exynos_pcie *pcie, u32 reg)
+{
+ return readl(pcie->phy_base + reg);
+}
+
+static inline void exynos_blk_writel(struct exynos_pcie *pcie, u32 val, u32 reg)
+{
+ writel(val, pcie->block_base + reg);
+}
+
+static inline u32 exynos_blk_readl(struct exynos_pcie *pcie, u32 reg)
+{
+ return readl(pcie->block_base + reg);
+}
+
+static void exynos_pcie_sideband_dbi_w_mode(struct pcie_port *pp, bool on)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ if (on) {
+ val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_SLV_AWMISC);
+ val |= PCIE_ELBI_SLV_DBI_ENABLE;
+ exynos_elb_writel(exynos_pcie, val, PCIE_ELBI_SLV_AWMISC);
+ } else {
+ val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_SLV_AWMISC);
+ val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
+ exynos_elb_writel(exynos_pcie, val, PCIE_ELBI_SLV_AWMISC);
+ }
+}
+
+static void exynos_pcie_sideband_dbi_r_mode(struct pcie_port *pp, bool on)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ if (on) {
+ val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_SLV_ARMISC);
+ val |= PCIE_ELBI_SLV_DBI_ENABLE;
+ exynos_elb_writel(exynos_pcie, val, PCIE_ELBI_SLV_ARMISC);
+ } else {
+ val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_SLV_ARMISC);
+ val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
+ exynos_elb_writel(exynos_pcie, val, PCIE_ELBI_SLV_ARMISC);
+ }
+}
+
+static void exynos_pcie_assert_core_reset(struct pcie_port *pp)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ val = exynos_elb_readl(exynos_pcie, PCIE_CORE_RESET);
+ val &= ~PCIE_CORE_RESET_ENABLE;
+ exynos_elb_writel(exynos_pcie, val, PCIE_CORE_RESET);
+ exynos_elb_writel(exynos_pcie, 0, PCIE_PWR_RESET);
+ exynos_elb_writel(exynos_pcie, 0, PCIE_STICKY_RESET);
+ exynos_elb_writel(exynos_pcie, 0, PCIE_NONSTICKY_RESET);
+}
+
+static void exynos_pcie_deassert_core_reset(struct pcie_port *pp)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ val = exynos_elb_readl(exynos_pcie, PCIE_CORE_RESET);
+ val |= PCIE_CORE_RESET_ENABLE;
+
+ exynos_elb_writel(exynos_pcie, val, PCIE_CORE_RESET);
+ exynos_elb_writel(exynos_pcie, 1, PCIE_STICKY_RESET);
+ exynos_elb_writel(exynos_pcie, 1, PCIE_NONSTICKY_RESET);
+ exynos_elb_writel(exynos_pcie, 1, PCIE_APP_INIT_RESET);
+ exynos_elb_writel(exynos_pcie, 0, PCIE_APP_INIT_RESET);
+ exynos_blk_writel(exynos_pcie, 1, PCIE_PHY_MAC_RESET);
+}
+
+static void exynos_pcie_assert_phy_reset(struct pcie_port *pp)
+{
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_MAC_RESET);
+ exynos_blk_writel(exynos_pcie, 1, PCIE_PHY_GLOBAL_RESET);
+}
+
+static void exynos_pcie_deassert_phy_reset(struct pcie_port *pp)
+{
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_GLOBAL_RESET);
+ exynos_elb_writel(exynos_pcie, 1, PCIE_PWR_RESET);
+ exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_COMMON_RESET);
+ exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_CMN_REG);
+ exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_TRSVREG_RESET);
+ exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_TRSV_RESET);
+}
+
+static void exynos_pcie_init_phy(struct pcie_port *pp)
+{
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ /* DCC feedback control off */
+ exynos_phy_writel(exynos_pcie, 0x29, PCIE_PHY_DCC_FEEDBACK);
+
+ /* set TX/RX impedance */
+ exynos_phy_writel(exynos_pcie, 0xd5, PCIE_PHY_IMPEDANCE);
+
+ /* set 50Mhz PHY clock */
+ exynos_phy_writel(exynos_pcie, 0x14, PCIE_PHY_PLL_DIV_0);
+ exynos_phy_writel(exynos_pcie, 0x12, PCIE_PHY_PLL_DIV_1);
+
+ /* set TX Differential output for lane 0 */
+ exynos_phy_writel(exynos_pcie, 0x7f, PCIE_PHY_TRSV0_DRV_LVL);
+
+ /* set TX Pre-emphasis Level Control for lane 0 to minimum */
+ exynos_phy_writel(exynos_pcie, 0x0, PCIE_PHY_TRSV0_EMP_LVL);
+
+ /* set RX clock and data recovery bandwidth */
+ exynos_phy_writel(exynos_pcie, 0xe7, PCIE_PHY_PLL_BIAS);
+ exynos_phy_writel(exynos_pcie, 0x82, PCIE_PHY_TRSV0_RXCDR);
+ exynos_phy_writel(exynos_pcie, 0x82, PCIE_PHY_TRSV1_RXCDR);
+ exynos_phy_writel(exynos_pcie, 0x82, PCIE_PHY_TRSV2_RXCDR);
+ exynos_phy_writel(exynos_pcie, 0x82, PCIE_PHY_TRSV3_RXCDR);
+
+ /* change TX Pre-emphasis Level Control for lanes */
+ exynos_phy_writel(exynos_pcie, 0x39, PCIE_PHY_TRSV0_EMP_LVL);
+ exynos_phy_writel(exynos_pcie, 0x39, PCIE_PHY_TRSV1_EMP_LVL);
+ exynos_phy_writel(exynos_pcie, 0x39, PCIE_PHY_TRSV2_EMP_LVL);
+ exynos_phy_writel(exynos_pcie, 0x39, PCIE_PHY_TRSV3_EMP_LVL);
+
+ /* set LVCC */
+ exynos_phy_writel(exynos_pcie, 0x20, PCIE_PHY_TRSV0_LVCC);
+ exynos_phy_writel(exynos_pcie, 0xa0, PCIE_PHY_TRSV1_LVCC);
+ exynos_phy_writel(exynos_pcie, 0xa0, PCIE_PHY_TRSV2_LVCC);
+ exynos_phy_writel(exynos_pcie, 0xa0, PCIE_PHY_TRSV3_LVCC);
+}
+
+static void exynos_pcie_assert_reset(struct pcie_port *pp)
+{
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ if (exynos_pcie->reset_gpio >= 0)
+ devm_gpio_request_one(pp->dev, exynos_pcie->reset_gpio,
+ GPIOF_OUT_INIT_HIGH, "RESET");
+ return;
+}
+
+static int exynos_pcie_establish_link(struct pcie_port *pp)
+{
+ u32 val;
+ int count = 0;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ if (dw_pcie_link_up(pp)) {
+ dev_err(pp->dev, "Link already up\n");
+ return 0;
+ }
+
+ /* assert reset signals */
+ exynos_pcie_assert_core_reset(pp);
+ exynos_pcie_assert_phy_reset(pp);
+
+ /* de-assert phy reset */
+ exynos_pcie_deassert_phy_reset(pp);
+
+ /* initialize phy */
+ exynos_pcie_init_phy(pp);
+
+ /* pulse for common reset */
+ exynos_blk_writel(exynos_pcie, 1, PCIE_PHY_COMMON_RESET);
+ udelay(500);
+ exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_COMMON_RESET);
+
+ /* de-assert core reset */
+ exynos_pcie_deassert_core_reset(pp);
+
+ /* setup root complex */
+ dw_pcie_setup_rc(pp);
+
+ /* assert reset signal */
+ exynos_pcie_assert_reset(pp);
+
+ /* assert LTSSM enable */
+ exynos_elb_writel(exynos_pcie, PCIE_ELBI_LTSSM_ENABLE,
+ PCIE_APP_LTSSM_ENABLE);
+
+ /* check if the link is up or not */
+ while (!dw_pcie_link_up(pp)) {
+ mdelay(100);
+ count++;
+ if (count == 10) {
+ while (exynos_phy_readl(exynos_pcie,
+ PCIE_PHY_PLL_LOCKED) == 0) {
+ val = exynos_blk_readl(exynos_pcie,
+ PCIE_PHY_PLL_LOCKED);
+ dev_info(pp->dev, "PLL Locked: 0x%x\n", val);
+ }
+ dev_err(pp->dev, "PCIe Link Fail\n");
+ return -EINVAL;
+ }
+ }
+
+ dev_info(pp->dev, "Link up\n");
+
+ return 0;
+}
+
+static void exynos_pcie_clear_irq_pulse(struct pcie_port *pp)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ val = exynos_elb_readl(exynos_pcie, PCIE_IRQ_PULSE);
+ exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_PULSE);
+ return;
+}
+
+static void exynos_pcie_enable_irq_pulse(struct pcie_port *pp)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ /* enable INTX interrupt */
+ val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
+ IRQ_INTC_ASSERT | IRQ_INTD_ASSERT,
+ exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_EN_PULSE);
+ return;
+}
+
+static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
+{
+ struct pcie_port *pp = arg;
+
+ exynos_pcie_clear_irq_pulse(pp);
+ return IRQ_HANDLED;
+}
+
+static void exynos_pcie_enable_interrupts(struct pcie_port *pp)
+{
+ exynos_pcie_enable_irq_pulse(pp);
+ return;
+}
+
+static inline void exynos_pcie_readl_rc(struct pcie_port *pp,
+ void __iomem *dbi_base, u32 *val)
+{
+ exynos_pcie_sideband_dbi_r_mode(pp, true);
+ *val = readl(dbi_base);
+ exynos_pcie_sideband_dbi_r_mode(pp, false);
+ return;
+}
+
+static inline void exynos_pcie_writel_rc(struct pcie_port *pp,
+ u32 val, void __iomem *dbi_base)
+{
+ exynos_pcie_sideband_dbi_w_mode(pp, true);
+ writel(val, dbi_base);
+ exynos_pcie_sideband_dbi_w_mode(pp, false);
+ return;
+}
+
+static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
+ u32 *val)
+{
+ int ret;
+
+ exynos_pcie_sideband_dbi_r_mode(pp, true);
+ ret = cfg_read(pp->dbi_base + (where & ~0x3), where, size, val);
+ exynos_pcie_sideband_dbi_r_mode(pp, false);
+ return ret;
+}
+
+static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
+ u32 val)
+{
+ int ret;
+
+ exynos_pcie_sideband_dbi_w_mode(pp, true);
+ ret = cfg_write(pp->dbi_base + (where & ~0x3), where, size, val);
+ exynos_pcie_sideband_dbi_w_mode(pp, false);
+ return ret;
+}
+
+static int exynos_pcie_link_up(struct pcie_port *pp)
+{
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+ u32 val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_RDLH_LINKUP);
+
+ if (val == PCIE_ELBI_LTSSM_ENABLE)
+ return 1;
+
+ return 0;
+}
+
+static void exynos_pcie_host_init(struct pcie_port *pp)
+{
+ exynos_pcie_establish_link(pp);
+ exynos_pcie_enable_interrupts(pp);
+}
+
+static struct pcie_host_ops exynos_pcie_host_ops = {
+ .readl_rc = exynos_pcie_readl_rc,
+ .writel_rc = exynos_pcie_writel_rc,
+ .rd_own_conf = exynos_pcie_rd_own_conf,
+ .wr_own_conf = exynos_pcie_wr_own_conf,
+ .link_up = exynos_pcie_link_up,
+ .host_init = exynos_pcie_host_init,
+};
+
+static int add_pcie_port(struct pcie_port *pp, struct platform_device *pdev)
+{
+ int ret;
+
+ pp->irq = platform_get_irq(pdev, 1);
+ if (!pp->irq) {
+ dev_err(&pdev->dev, "failed to get irq\n");
+ return -ENODEV;
+ }
+ ret = devm_request_irq(&pdev->dev, pp->irq, exynos_pcie_irq_handler,
+ IRQF_SHARED, "exynos-pcie", pp);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request irq\n");
+ return ret;
+ }
+
+ pp->root_bus_nr = -1;
+ pp->ops = &exynos_pcie_host_ops;
+
+ spin_lock_init(&pp->conf_lock);
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __init exynos_pcie_probe(struct platform_device *pdev)
+{
+ struct exynos_pcie *exynos_pcie;
+ struct pcie_port *pp;
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *elbi_base;
+ struct resource *phy_base;
+ struct resource *block_base;
+ int ret;
+
+ exynos_pcie = devm_kzalloc(&pdev->dev, sizeof(*exynos_pcie),
+ GFP_KERNEL);
+ if (!exynos_pcie) {
+ dev_err(&pdev->dev, "no memory for exynos pcie\n");
+ return -ENOMEM;
+ }
+
+ pp = &exynos_pcie->pp;
+
+ pp->dev = &pdev->dev;
+
+ exynos_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
+
+ exynos_pcie->clk = devm_clk_get(&pdev->dev, "pcie");
+ if (IS_ERR(exynos_pcie->clk)) {
+ dev_err(&pdev->dev, "Failed to get pcie rc clock\n");
+ return PTR_ERR(exynos_pcie->clk);
+ }
+ ret = clk_prepare_enable(exynos_pcie->clk);
+ if (ret)
+ return ret;
+
+ exynos_pcie->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus");
+ if (IS_ERR(exynos_pcie->bus_clk)) {
+ dev_err(&pdev->dev, "Failed to get pcie bus clock\n");
+ ret = PTR_ERR(exynos_pcie->bus_clk);
+ goto fail_clk;
+ }
+ ret = clk_prepare_enable(exynos_pcie->bus_clk);
+ if (ret)
+ goto fail_clk;
+
+ elbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ exynos_pcie->elbi_base = devm_ioremap_resource(&pdev->dev, elbi_base);
+ if (IS_ERR(exynos_pcie->elbi_base))
+ return PTR_ERR(exynos_pcie->elbi_base);
+
+ phy_base = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ exynos_pcie->phy_base = devm_ioremap_resource(&pdev->dev, phy_base);
+ if (IS_ERR(exynos_pcie->phy_base))
+ return PTR_ERR(exynos_pcie->phy_base);
+
+ block_base = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ exynos_pcie->block_base = devm_ioremap_resource(&pdev->dev, block_base);
+ if (IS_ERR(exynos_pcie->block_base))
+ return PTR_ERR(exynos_pcie->block_base);
+
+ ret = add_pcie_port(pp, pdev);
+ if (ret < 0)
+ goto fail_bus_clk;
+
+ platform_set_drvdata(pdev, exynos_pcie);
+ return 0;
+
+fail_bus_clk:
+ clk_disable_unprepare(exynos_pcie->bus_clk);
+fail_clk:
+ clk_disable_unprepare(exynos_pcie->clk);
+ return ret;
+}
+
+static int __exit exynos_pcie_remove(struct platform_device *pdev)
+{
+ struct exynos_pcie *exynos_pcie = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(exynos_pcie->bus_clk);
+ clk_disable_unprepare(exynos_pcie->clk);
+
+ return 0;
+}
+
+static const struct of_device_id exynos_pcie_of_match[] = {
+ { .compatible = "samsung,exynos5440-pcie", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, exynos_pcie_of_match);
+
+static struct platform_driver exynos_pcie_driver = {
+ .remove = __exit_p(exynos_pcie_remove),
+ .driver = {
+ .name = "exynos-pcie",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(exynos_pcie_of_match),
+ },
+};
+
+/* Exynos PCIe driver does not allow module unload */
+
+static int __init pcie_init(void)
+{
+ return platform_driver_probe(&exynos_pcie_driver, exynos_pcie_probe);
+}
+subsys_initcall(pcie_init);
+
+MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
+MODULE_DESCRIPTION("Samsung PCIe host controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 7bf3926aecc0..ce1543a584a3 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -725,9 +725,9 @@ mvebu_pcie_map_registers(struct platform_device *pdev,
ret = of_address_to_resource(np, 0, &regs);
if (ret)
- return NULL;
+ return ERR_PTR(ret);
- return devm_request_and_ioremap(&pdev->dev, &regs);
+ return devm_ioremap_resource(&pdev->dev, &regs);
}
static int __init mvebu_pcie_probe(struct platform_device *pdev)
@@ -817,9 +817,10 @@ static int __init mvebu_pcie_probe(struct platform_device *pdev)
continue;
port->base = mvebu_pcie_map_registers(pdev, child, port);
- if (!port->base) {
+ if (IS_ERR(port->base)) {
dev_err(&pdev->dev, "PCIe%d.%d: cannot map registers\n",
port->port, port->lane);
+ port->base = NULL;
continue;
}
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index 26bdbda8ff90..c10e9ac9bbbc 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -1,5 +1,5 @@
/*
- * PCIe host controller driver for Samsung EXYNOS SoCs
+ * Synopsys Designware PCIe host controller driver
*
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com
@@ -11,74 +11,28 @@
* published by the Free Software Foundation.
*/
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/gpio.h>
-#include <linux/interrupt.h>
#include <linux/kernel.h>
-#include <linux/list.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_gpio.h>
-#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/pci_regs.h>
-#include <linux/platform_device.h>
-#include <linux/resource.h>
-#include <linux/signal.h>
-#include <linux/slab.h>
#include <linux/types.h>
-struct pcie_port_info {
- u32 cfg0_size;
- u32 cfg1_size;
- u32 io_size;
- u32 mem_size;
- phys_addr_t io_bus_addr;
- phys_addr_t mem_bus_addr;
-};
-
-struct pcie_port {
- struct device *dev;
- u8 controller;
- u8 root_bus_nr;
- void __iomem *dbi_base;
- void __iomem *elbi_base;
- void __iomem *phy_base;
- void __iomem *purple_base;
- u64 cfg0_base;
- void __iomem *va_cfg0_base;
- u64 cfg1_base;
- void __iomem *va_cfg1_base;
- u64 io_base;
- u64 mem_base;
- spinlock_t conf_lock;
- struct resource cfg;
- struct resource io;
- struct resource mem;
- struct pcie_port_info config;
- struct clk *clk;
- struct clk *bus_clk;
- int irq;
- int reset_gpio;
-};
-
-/*
- * Exynos PCIe IP consists of Synopsys specific part and Exynos
- * specific part. Only core block is a Synopsys designware part;
- * other parts are Exynos specific.
- */
+#include "pcie-designware.h"
/* Synopsis specific PCIE configuration registers */
#define PCIE_PORT_LINK_CONTROL 0x710
#define PORT_LINK_MODE_MASK (0x3f << 16)
+#define PORT_LINK_MODE_1_LANES (0x1 << 16)
+#define PORT_LINK_MODE_2_LANES (0x3 << 16)
#define PORT_LINK_MODE_4_LANES (0x7 << 16)
#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
#define PORT_LOGIC_LINK_WIDTH_MASK (0x1ff << 8)
-#define PORT_LOGIC_LINK_WIDTH_4_LANES (0x7 << 8)
+#define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8)
+#define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8)
+#define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8)
#define PCIE_MSI_ADDR_LO 0x820
#define PCIE_MSI_ADDR_HI 0x824
@@ -108,69 +62,16 @@ struct pcie_port {
#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16)
#define PCIE_ATU_UPPER_TARGET 0x91C
-/* Exynos specific PCIE configuration registers */
-
-/* PCIe ELBI registers */
-#define PCIE_IRQ_PULSE 0x000
-#define IRQ_INTA_ASSERT (0x1 << 0)
-#define IRQ_INTB_ASSERT (0x1 << 2)
-#define IRQ_INTC_ASSERT (0x1 << 4)
-#define IRQ_INTD_ASSERT (0x1 << 6)
-#define PCIE_IRQ_LEVEL 0x004
-#define PCIE_IRQ_SPECIAL 0x008
-#define PCIE_IRQ_EN_PULSE 0x00c
-#define PCIE_IRQ_EN_LEVEL 0x010
-#define PCIE_IRQ_EN_SPECIAL 0x014
-#define PCIE_PWR_RESET 0x018
-#define PCIE_CORE_RESET 0x01c
-#define PCIE_CORE_RESET_ENABLE (0x1 << 0)
-#define PCIE_STICKY_RESET 0x020
-#define PCIE_NONSTICKY_RESET 0x024
-#define PCIE_APP_INIT_RESET 0x028
-#define PCIE_APP_LTSSM_ENABLE 0x02c
-#define PCIE_ELBI_RDLH_LINKUP 0x064
-#define PCIE_ELBI_LTSSM_ENABLE 0x1
-#define PCIE_ELBI_SLV_AWMISC 0x11c
-#define PCIE_ELBI_SLV_ARMISC 0x120
-#define PCIE_ELBI_SLV_DBI_ENABLE (0x1 << 21)
-
-/* PCIe Purple registers */
-#define PCIE_PHY_GLOBAL_RESET 0x000
-#define PCIE_PHY_COMMON_RESET 0x004
-#define PCIE_PHY_CMN_REG 0x008
-#define PCIE_PHY_MAC_RESET 0x00c
-#define PCIE_PHY_PLL_LOCKED 0x010
-#define PCIE_PHY_TRSVREG_RESET 0x020
-#define PCIE_PHY_TRSV_RESET 0x024
-
-/* PCIe PHY registers */
-#define PCIE_PHY_IMPEDANCE 0x004
-#define PCIE_PHY_PLL_DIV_0 0x008
-#define PCIE_PHY_PLL_BIAS 0x00c
-#define PCIE_PHY_DCC_FEEDBACK 0x014
-#define PCIE_PHY_PLL_DIV_1 0x05c
-#define PCIE_PHY_TRSV0_EMP_LVL 0x084
-#define PCIE_PHY_TRSV0_DRV_LVL 0x088
-#define PCIE_PHY_TRSV0_RXCDR 0x0ac
-#define PCIE_PHY_TRSV0_LVCC 0x0dc
-#define PCIE_PHY_TRSV1_EMP_LVL 0x144
-#define PCIE_PHY_TRSV1_RXCDR 0x16c
-#define PCIE_PHY_TRSV1_LVCC 0x19c
-#define PCIE_PHY_TRSV2_EMP_LVL 0x204
-#define PCIE_PHY_TRSV2_RXCDR 0x22c
-#define PCIE_PHY_TRSV2_LVCC 0x25c
-#define PCIE_PHY_TRSV3_EMP_LVL 0x2c4
-#define PCIE_PHY_TRSV3_RXCDR 0x2ec
-#define PCIE_PHY_TRSV3_LVCC 0x31c
-
-static struct hw_pci exynos_pci;
+static struct hw_pci dw_pci;
+
+unsigned long global_io_offset;
static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
{
return sys->private_data;
}
-static inline int cfg_read(void *addr, int where, int size, u32 *val)
+int cfg_read(void __iomem *addr, int where, int size, u32 *val)
{
*val = readl(addr);
@@ -184,7 +85,7 @@ static inline int cfg_read(void *addr, int where, int size, u32 *val)
return PCIBIOS_SUCCESSFUL;
}
-static inline int cfg_write(void *addr, int where, int size, u32 val)
+int cfg_write(void __iomem *addr, int where, int size, u32 val)
{
if (size == 4)
writel(val, addr);
@@ -198,155 +99,217 @@ static inline int cfg_write(void *addr, int where, int size, u32 val)
return PCIBIOS_SUCCESSFUL;
}
-static void exynos_pcie_sideband_dbi_w_mode(struct pcie_port *pp, bool on)
+static inline void dw_pcie_readl_rc(struct pcie_port *pp, u32 reg, u32 *val)
{
- u32 val;
-
- if (on) {
- val = readl(pp->elbi_base + PCIE_ELBI_SLV_AWMISC);
- val |= PCIE_ELBI_SLV_DBI_ENABLE;
- writel(val, pp->elbi_base + PCIE_ELBI_SLV_AWMISC);
- } else {
- val = readl(pp->elbi_base + PCIE_ELBI_SLV_AWMISC);
- val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
- writel(val, pp->elbi_base + PCIE_ELBI_SLV_AWMISC);
- }
-}
-
-static void exynos_pcie_sideband_dbi_r_mode(struct pcie_port *pp, bool on)
-{
- u32 val;
-
- if (on) {
- val = readl(pp->elbi_base + PCIE_ELBI_SLV_ARMISC);
- val |= PCIE_ELBI_SLV_DBI_ENABLE;
- writel(val, pp->elbi_base + PCIE_ELBI_SLV_ARMISC);
- } else {
- val = readl(pp->elbi_base + PCIE_ELBI_SLV_ARMISC);
- val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
- writel(val, pp->elbi_base + PCIE_ELBI_SLV_ARMISC);
- }
-}
-
-static inline void readl_rc(struct pcie_port *pp, void *dbi_base, u32 *val)
-{
- exynos_pcie_sideband_dbi_r_mode(pp, true);
- *val = readl(dbi_base);
- exynos_pcie_sideband_dbi_r_mode(pp, false);
- return;
+ if (pp->ops->readl_rc)
+ pp->ops->readl_rc(pp, pp->dbi_base + reg, val);
+ else
+ *val = readl(pp->dbi_base + reg);
}
-static inline void writel_rc(struct pcie_port *pp, u32 val, void *dbi_base)
+static inline void dw_pcie_writel_rc(struct pcie_port *pp, u32 val, u32 reg)
{
- exynos_pcie_sideband_dbi_w_mode(pp, true);
- writel(val, dbi_base);
- exynos_pcie_sideband_dbi_w_mode(pp, false);
- return;
+ if (pp->ops->writel_rc)
+ pp->ops->writel_rc(pp, val, pp->dbi_base + reg);
+ else
+ writel(val, pp->dbi_base + reg);
}
-static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
+int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
u32 *val)
{
int ret;
- exynos_pcie_sideband_dbi_r_mode(pp, true);
- ret = cfg_read(pp->dbi_base + (where & ~0x3), where, size, val);
- exynos_pcie_sideband_dbi_r_mode(pp, false);
+ if (pp->ops->rd_own_conf)
+ ret = pp->ops->rd_own_conf(pp, where, size, val);
+ else
+ ret = cfg_read(pp->dbi_base + (where & ~0x3), where, size, val);
+
return ret;
}
-static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
+int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
u32 val)
{
int ret;
- exynos_pcie_sideband_dbi_w_mode(pp, true);
- ret = cfg_write(pp->dbi_base + (where & ~0x3), where, size, val);
- exynos_pcie_sideband_dbi_w_mode(pp, false);
+ if (pp->ops->wr_own_conf)
+ ret = pp->ops->wr_own_conf(pp, where, size, val);
+ else
+ ret = cfg_write(pp->dbi_base + (where & ~0x3), where, size,
+ val);
+
return ret;
}
-static void exynos_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev)
+int dw_pcie_link_up(struct pcie_port *pp)
+{
+ if (pp->ops->link_up)
+ return pp->ops->link_up(pp);
+ else
+ return 0;
+}
+
+int __init dw_pcie_host_init(struct pcie_port *pp)
{
+ struct device_node *np = pp->dev->of_node;
+ struct of_pci_range range;
+ struct of_pci_range_parser parser;
u32 val;
- void __iomem *dbi_base = pp->dbi_base;
- /* Program viewport 0 : OUTBOUND : CFG0 */
- val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0;
- writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT);
- writel_rc(pp, pp->cfg0_base, dbi_base + PCIE_ATU_LOWER_BASE);
- writel_rc(pp, (pp->cfg0_base >> 32), dbi_base + PCIE_ATU_UPPER_BASE);
- writel_rc(pp, pp->cfg0_base + pp->config.cfg0_size - 1,
- dbi_base + PCIE_ATU_LIMIT);
- writel_rc(pp, busdev, dbi_base + PCIE_ATU_LOWER_TARGET);
- writel_rc(pp, 0, dbi_base + PCIE_ATU_UPPER_TARGET);
- writel_rc(pp, PCIE_ATU_TYPE_CFG0, dbi_base + PCIE_ATU_CR1);
- val = PCIE_ATU_ENABLE;
- writel_rc(pp, val, dbi_base + PCIE_ATU_CR2);
+ if (of_pci_range_parser_init(&parser, np)) {
+ dev_err(pp->dev, "missing ranges property\n");
+ return -EINVAL;
+ }
+
+ /* Get the I/O and memory ranges from DT */
+ for_each_of_pci_range(&parser, &range) {
+ unsigned long restype = range.flags & IORESOURCE_TYPE_BITS;
+ if (restype == IORESOURCE_IO) {
+ of_pci_range_to_resource(&range, np, &pp->io);
+ pp->io.name = "I/O";
+ pp->io.start = max_t(resource_size_t,
+ PCIBIOS_MIN_IO,
+ range.pci_addr + global_io_offset);
+ pp->io.end = min_t(resource_size_t,
+ IO_SPACE_LIMIT,
+ range.pci_addr + range.size
+ + global_io_offset);
+ pp->config.io_size = resource_size(&pp->io);
+ pp->config.io_bus_addr = range.pci_addr;
+ }
+ if (restype == IORESOURCE_MEM) {
+ of_pci_range_to_resource(&range, np, &pp->mem);
+ pp->mem.name = "MEM";
+ pp->config.mem_size = resource_size(&pp->mem);
+ pp->config.mem_bus_addr = range.pci_addr;
+ }
+ if (restype == 0) {
+ of_pci_range_to_resource(&range, np, &pp->cfg);
+ pp->config.cfg0_size = resource_size(&pp->cfg)/2;
+ pp->config.cfg1_size = resource_size(&pp->cfg)/2;
+ }
+ }
+
+ if (!pp->dbi_base) {
+ pp->dbi_base = devm_ioremap(pp->dev, pp->cfg.start,
+ resource_size(&pp->cfg));
+ if (!pp->dbi_base) {
+ dev_err(pp->dev, "error with ioremap\n");
+ return -ENOMEM;
+ }
+ }
+
+ pp->cfg0_base = pp->cfg.start;
+ pp->cfg1_base = pp->cfg.start + pp->config.cfg0_size;
+ pp->io_base = pp->io.start;
+ pp->mem_base = pp->mem.start;
+
+ pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
+ pp->config.cfg0_size);
+ if (!pp->va_cfg0_base) {
+ dev_err(pp->dev, "error with ioremap in function\n");
+ return -ENOMEM;
+ }
+ pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base,
+ pp->config.cfg1_size);
+ if (!pp->va_cfg1_base) {
+ dev_err(pp->dev, "error with ioremap\n");
+ return -ENOMEM;
+ }
+
+ if (of_property_read_u32(np, "num-lanes", &pp->lanes)) {
+ dev_err(pp->dev, "Failed to parse the number of lanes\n");
+ return -EINVAL;
+ }
+
+ if (pp->ops->host_init)
+ pp->ops->host_init(pp);
+
+ dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
+
+ /* program correct class for RC */
+ dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
+
+ dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
+ val |= PORT_LOGIC_SPEED_CHANGE;
+ dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
+
+ dw_pci.nr_controllers = 1;
+ dw_pci.private_data = (void **)&pp;
+
+ pci_common_init(&dw_pci);
+ pci_assign_unassigned_resources();
+#ifdef CONFIG_PCI_DOMAINS
+ dw_pci.domain++;
+#endif
+
+ return 0;
}
-static void exynos_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev)
+static void dw_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev)
{
- u32 val;
- void __iomem *dbi_base = pp->dbi_base;
+ /* Program viewport 0 : OUTBOUND : CFG0 */
+ dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0,
+ PCIE_ATU_VIEWPORT);
+ dw_pcie_writel_rc(pp, pp->cfg0_base, PCIE_ATU_LOWER_BASE);
+ dw_pcie_writel_rc(pp, (pp->cfg0_base >> 32), PCIE_ATU_UPPER_BASE);
+ dw_pcie_writel_rc(pp, pp->cfg0_base + pp->config.cfg0_size - 1,
+ PCIE_ATU_LIMIT);
+ dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
+ dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
+ dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG0, PCIE_ATU_CR1);
+ dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+}
+static void dw_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev)
+{
/* Program viewport 1 : OUTBOUND : CFG1 */
- val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1;
- writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT);
- writel_rc(pp, PCIE_ATU_TYPE_CFG1, dbi_base + PCIE_ATU_CR1);
- val = PCIE_ATU_ENABLE;
- writel_rc(pp, val, dbi_base + PCIE_ATU_CR2);
- writel_rc(pp, pp->cfg1_base, dbi_base + PCIE_ATU_LOWER_BASE);
- writel_rc(pp, (pp->cfg1_base >> 32), dbi_base + PCIE_ATU_UPPER_BASE);
- writel_rc(pp, pp->cfg1_base + pp->config.cfg1_size - 1,
- dbi_base + PCIE_ATU_LIMIT);
- writel_rc(pp, busdev, dbi_base + PCIE_ATU_LOWER_TARGET);
- writel_rc(pp, 0, dbi_base + PCIE_ATU_UPPER_TARGET);
+ dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
+ PCIE_ATU_VIEWPORT);
+ dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1);
+ dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+ dw_pcie_writel_rc(pp, pp->cfg1_base, PCIE_ATU_LOWER_BASE);
+ dw_pcie_writel_rc(pp, (pp->cfg1_base >> 32), PCIE_ATU_UPPER_BASE);
+ dw_pcie_writel_rc(pp, pp->cfg1_base + pp->config.cfg1_size - 1,
+ PCIE_ATU_LIMIT);
+ dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
+ dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
}
-static void exynos_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
+static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
{
- u32 val;
- void __iomem *dbi_base = pp->dbi_base;
-
/* Program viewport 0 : OUTBOUND : MEM */
- val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0;
- writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT);
- writel_rc(pp, PCIE_ATU_TYPE_MEM, dbi_base + PCIE_ATU_CR1);
- val = PCIE_ATU_ENABLE;
- writel_rc(pp, val, dbi_base + PCIE_ATU_CR2);
- writel_rc(pp, pp->mem_base, dbi_base + PCIE_ATU_LOWER_BASE);
- writel_rc(pp, (pp->mem_base >> 32), dbi_base + PCIE_ATU_UPPER_BASE);
- writel_rc(pp, pp->mem_base + pp->config.mem_size - 1,
- dbi_base + PCIE_ATU_LIMIT);
- writel_rc(pp, pp->config.mem_bus_addr,
- dbi_base + PCIE_ATU_LOWER_TARGET);
- writel_rc(pp, upper_32_bits(pp->config.mem_bus_addr),
- dbi_base + PCIE_ATU_UPPER_TARGET);
+ dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0,
+ PCIE_ATU_VIEWPORT);
+ dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1);
+ dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+ dw_pcie_writel_rc(pp, pp->mem_base, PCIE_ATU_LOWER_BASE);
+ dw_pcie_writel_rc(pp, (pp->mem_base >> 32), PCIE_ATU_UPPER_BASE);
+ dw_pcie_writel_rc(pp, pp->mem_base + pp->config.mem_size - 1,
+ PCIE_ATU_LIMIT);
+ dw_pcie_writel_rc(pp, pp->config.mem_bus_addr, PCIE_ATU_LOWER_TARGET);
+ dw_pcie_writel_rc(pp, upper_32_bits(pp->config.mem_bus_addr),
+ PCIE_ATU_UPPER_TARGET);
}
-static void exynos_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
+static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
{
- u32 val;
- void __iomem *dbi_base = pp->dbi_base;
-
/* Program viewport 1 : OUTBOUND : IO */
- val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1;
- writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT);
- writel_rc(pp, PCIE_ATU_TYPE_IO, dbi_base + PCIE_ATU_CR1);
- val = PCIE_ATU_ENABLE;
- writel_rc(pp, val, dbi_base + PCIE_ATU_CR2);
- writel_rc(pp, pp->io_base, dbi_base + PCIE_ATU_LOWER_BASE);
- writel_rc(pp, (pp->io_base >> 32), dbi_base + PCIE_ATU_UPPER_BASE);
- writel_rc(pp, pp->io_base + pp->config.io_size - 1,
- dbi_base + PCIE_ATU_LIMIT);
- writel_rc(pp, pp->config.io_bus_addr,
- dbi_base + PCIE_ATU_LOWER_TARGET);
- writel_rc(pp, upper_32_bits(pp->config.io_bus_addr),
- dbi_base + PCIE_ATU_UPPER_TARGET);
-}
-
-static int exynos_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+ dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
+ PCIE_ATU_VIEWPORT);
+ dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1);
+ dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+ dw_pcie_writel_rc(pp, pp->io_base, PCIE_ATU_LOWER_BASE);
+ dw_pcie_writel_rc(pp, (pp->io_base >> 32), PCIE_ATU_UPPER_BASE);
+ dw_pcie_writel_rc(pp, pp->io_base + pp->config.io_size - 1,
+ PCIE_ATU_LIMIT);
+ dw_pcie_writel_rc(pp, pp->config.io_bus_addr, PCIE_ATU_LOWER_TARGET);
+ dw_pcie_writel_rc(pp, upper_32_bits(pp->config.io_bus_addr),
+ PCIE_ATU_UPPER_TARGET);
+}
+
+static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
u32 devfn, int where, int size, u32 *val)
{
int ret = PCIBIOS_SUCCESSFUL;
@@ -357,19 +320,19 @@ static int exynos_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
address = where & ~0x3;
if (bus->parent->number == pp->root_bus_nr) {
- exynos_pcie_prog_viewport_cfg0(pp, busdev);
+ dw_pcie_prog_viewport_cfg0(pp, busdev);
ret = cfg_read(pp->va_cfg0_base + address, where, size, val);
- exynos_pcie_prog_viewport_mem_outbound(pp);
+ dw_pcie_prog_viewport_mem_outbound(pp);
} else {
- exynos_pcie_prog_viewport_cfg1(pp, busdev);
+ dw_pcie_prog_viewport_cfg1(pp, busdev);
ret = cfg_read(pp->va_cfg1_base + address, where, size, val);
- exynos_pcie_prog_viewport_io_outbound(pp);
+ dw_pcie_prog_viewport_io_outbound(pp);
}
return ret;
}
-static int exynos_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
u32 devfn, int where, int size, u32 val)
{
int ret = PCIBIOS_SUCCESSFUL;
@@ -380,59 +343,25 @@ static int exynos_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
address = where & ~0x3;
if (bus->parent->number == pp->root_bus_nr) {
- exynos_pcie_prog_viewport_cfg0(pp, busdev);
+ dw_pcie_prog_viewport_cfg0(pp, busdev);
ret = cfg_write(pp->va_cfg0_base + address, where, size, val);
- exynos_pcie_prog_viewport_mem_outbound(pp);
+ dw_pcie_prog_viewport_mem_outbound(pp);
} else {
- exynos_pcie_prog_viewport_cfg1(pp, busdev);
+ dw_pcie_prog_viewport_cfg1(pp, busdev);
ret = cfg_write(pp->va_cfg1_base + address, where, size, val);
- exynos_pcie_prog_viewport_io_outbound(pp);
+ dw_pcie_prog_viewport_io_outbound(pp);
}
return ret;
}
-static unsigned long global_io_offset;
-
-static int exynos_pcie_setup(int nr, struct pci_sys_data *sys)
-{
- struct pcie_port *pp;
-
- pp = sys_to_pcie(sys);
-
- if (!pp)
- return 0;
-
- if (global_io_offset < SZ_1M && pp->config.io_size > 0) {
- sys->io_offset = global_io_offset - pp->config.io_bus_addr;
- pci_ioremap_io(sys->io_offset, pp->io.start);
- global_io_offset += SZ_64K;
- pci_add_resource_offset(&sys->resources, &pp->io,
- sys->io_offset);
- }
-
- sys->mem_offset = pp->mem.start - pp->config.mem_bus_addr;
- pci_add_resource_offset(&sys->resources, &pp->mem, sys->mem_offset);
-
- return 1;
-}
-
-static int exynos_pcie_link_up(struct pcie_port *pp)
-{
- u32 val = readl(pp->elbi_base + PCIE_ELBI_RDLH_LINKUP);
-
- if (val == PCIE_ELBI_LTSSM_ENABLE)
- return 1;
- return 0;
-}
-
-static int exynos_pcie_valid_config(struct pcie_port *pp,
+static int dw_pcie_valid_config(struct pcie_port *pp,
struct pci_bus *bus, int dev)
{
/* If there is no link, then there is no device */
if (bus->number != pp->root_bus_nr) {
- if (!exynos_pcie_link_up(pp))
+ if (!dw_pcie_link_up(pp))
return 0;
}
@@ -450,7 +379,7 @@ static int exynos_pcie_valid_config(struct pcie_port *pp,
return 1;
}
-static int exynos_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
+static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
int size, u32 *val)
{
struct pcie_port *pp = sys_to_pcie(bus->sysdata);
@@ -462,23 +391,23 @@ static int exynos_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
return -EINVAL;
}
- if (exynos_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) {
+ if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) {
*val = 0xffffffff;
return PCIBIOS_DEVICE_NOT_FOUND;
}
spin_lock_irqsave(&pp->conf_lock, flags);
if (bus->number != pp->root_bus_nr)
- ret = exynos_pcie_rd_other_conf(pp, bus, devfn,
+ ret = dw_pcie_rd_other_conf(pp, bus, devfn,
where, size, val);
else
- ret = exynos_pcie_rd_own_conf(pp, where, size, val);
+ ret = dw_pcie_rd_own_conf(pp, where, size, val);
spin_unlock_irqrestore(&pp->conf_lock, flags);
return ret;
}
-static int exynos_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
int where, int size, u32 val)
{
struct pcie_port *pp = sys_to_pcie(bus->sysdata);
@@ -490,34 +419,56 @@ static int exynos_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
return -EINVAL;
}
- if (exynos_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0)
+ if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0)
return PCIBIOS_DEVICE_NOT_FOUND;
spin_lock_irqsave(&pp->conf_lock, flags);
if (bus->number != pp->root_bus_nr)
- ret = exynos_pcie_wr_other_conf(pp, bus, devfn,
+ ret = dw_pcie_wr_other_conf(pp, bus, devfn,
where, size, val);
else
- ret = exynos_pcie_wr_own_conf(pp, where, size, val);
+ ret = dw_pcie_wr_own_conf(pp, where, size, val);
spin_unlock_irqrestore(&pp->conf_lock, flags);
return ret;
}
-static struct pci_ops exynos_pcie_ops = {
- .read = exynos_pcie_rd_conf,
- .write = exynos_pcie_wr_conf,
+static struct pci_ops dw_pcie_ops = {
+ .read = dw_pcie_rd_conf,
+ .write = dw_pcie_wr_conf,
};
-static struct pci_bus *exynos_pcie_scan_bus(int nr,
- struct pci_sys_data *sys)
+int dw_pcie_setup(int nr, struct pci_sys_data *sys)
+{
+ struct pcie_port *pp;
+
+ pp = sys_to_pcie(sys);
+
+ if (!pp)
+ return 0;
+
+ if (global_io_offset < SZ_1M && pp->config.io_size > 0) {
+ sys->io_offset = global_io_offset - pp->config.io_bus_addr;
+ pci_ioremap_io(sys->io_offset, pp->io.start);
+ global_io_offset += SZ_64K;
+ pci_add_resource_offset(&sys->resources, &pp->io,
+ sys->io_offset);
+ }
+
+ sys->mem_offset = pp->mem.start - pp->config.mem_bus_addr;
+ pci_add_resource_offset(&sys->resources, &pp->mem, sys->mem_offset);
+
+ return 1;
+}
+
+struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys)
{
struct pci_bus *bus;
struct pcie_port *pp = sys_to_pcie(sys);
if (pp) {
pp->root_bus_nr = sys->busnr;
- bus = pci_scan_root_bus(NULL, sys->busnr, &exynos_pcie_ops,
+ bus = pci_scan_root_bus(NULL, sys->busnr, &dw_pcie_ops,
sys, &sys->resources);
} else {
bus = NULL;
@@ -527,531 +478,88 @@ static struct pci_bus *exynos_pcie_scan_bus(int nr,
return bus;
}
-static int exynos_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
struct pcie_port *pp = sys_to_pcie(dev->bus->sysdata);
return pp->irq;
}
-static struct hw_pci exynos_pci = {
- .setup = exynos_pcie_setup,
- .scan = exynos_pcie_scan_bus,
- .map_irq = exynos_pcie_map_irq,
+static struct hw_pci dw_pci = {
+ .setup = dw_pcie_setup,
+ .scan = dw_pcie_scan_bus,
+ .map_irq = dw_pcie_map_irq,
};
-static void exynos_pcie_setup_rc(struct pcie_port *pp)
+void dw_pcie_setup_rc(struct pcie_port *pp)
{
struct pcie_port_info *config = &pp->config;
- void __iomem *dbi_base = pp->dbi_base;
u32 val;
u32 membase;
u32 memlimit;
/* set the number of lines as 4 */
- readl_rc(pp, dbi_base + PCIE_PORT_LINK_CONTROL, &val);
+ dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL, &val);
val &= ~PORT_LINK_MODE_MASK;
- val |= PORT_LINK_MODE_4_LANES;
- writel_rc(pp, val, dbi_base + PCIE_PORT_LINK_CONTROL);
+ switch (pp->lanes) {
+ case 1:
+ val |= PORT_LINK_MODE_1_LANES;
+ break;
+ case 2:
+ val |= PORT_LINK_MODE_2_LANES;
+ break;
+ case 4:
+ val |= PORT_LINK_MODE_4_LANES;
+ break;
+ }
+ dw_pcie_writel_rc(pp, val, PCIE_PORT_LINK_CONTROL);
/* set link width speed control register */
- readl_rc(pp, dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL, &val);
+ dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, &val);
val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
- val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
- writel_rc(pp, val, dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
+ switch (pp->lanes) {
+ case 1:
+ val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
+ break;
+ case 2:
+ val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
+ break;
+ case 4:
+ val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
+ break;
+ }
+ dw_pcie_writel_rc(pp, val, PCIE_LINK_WIDTH_SPEED_CONTROL);
/* setup RC BARs */
- writel_rc(pp, 0x00000004, dbi_base + PCI_BASE_ADDRESS_0);
- writel_rc(pp, 0x00000004, dbi_base + PCI_BASE_ADDRESS_1);
+ dw_pcie_writel_rc(pp, 0x00000004, PCI_BASE_ADDRESS_0);
+ dw_pcie_writel_rc(pp, 0x00000004, PCI_BASE_ADDRESS_1);
/* setup interrupt pins */
- readl_rc(pp, dbi_base + PCI_INTERRUPT_LINE, &val);
+ dw_pcie_readl_rc(pp, PCI_INTERRUPT_LINE, &val);
val &= 0xffff00ff;
val |= 0x00000100;
- writel_rc(pp, val, dbi_base + PCI_INTERRUPT_LINE);
+ dw_pcie_writel_rc(pp, val, PCI_INTERRUPT_LINE);
/* setup bus numbers */
- readl_rc(pp, dbi_base + PCI_PRIMARY_BUS, &val);
+ dw_pcie_readl_rc(pp, PCI_PRIMARY_BUS, &val);
val &= 0xff000000;
val |= 0x00010100;
- writel_rc(pp, val, dbi_base + PCI_PRIMARY_BUS);
+ dw_pcie_writel_rc(pp, val, PCI_PRIMARY_BUS);
/* setup memory base, memory limit */
membase = ((u32)pp->mem_base & 0xfff00000) >> 16;
memlimit = (config->mem_size + (u32)pp->mem_base) & 0xfff00000;
val = memlimit | membase;
- writel_rc(pp, val, dbi_base + PCI_MEMORY_BASE);
+ dw_pcie_writel_rc(pp, val, PCI_MEMORY_BASE);
/* setup command register */
- readl_rc(pp, dbi_base + PCI_COMMAND, &val);
+ dw_pcie_readl_rc(pp, PCI_COMMAND, &val);
val &= 0xffff0000;
val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
- writel_rc(pp, val, dbi_base + PCI_COMMAND);
-}
-
-static void exynos_pcie_assert_core_reset(struct pcie_port *pp)
-{
- u32 val;
- void __iomem *elbi_base = pp->elbi_base;
-
- val = readl(elbi_base + PCIE_CORE_RESET);
- val &= ~PCIE_CORE_RESET_ENABLE;
- writel(val, elbi_base + PCIE_CORE_RESET);
- writel(0, elbi_base + PCIE_PWR_RESET);
- writel(0, elbi_base + PCIE_STICKY_RESET);
- writel(0, elbi_base + PCIE_NONSTICKY_RESET);
-}
-
-static void exynos_pcie_deassert_core_reset(struct pcie_port *pp)
-{
- u32 val;
- void __iomem *elbi_base = pp->elbi_base;
- void __iomem *purple_base = pp->purple_base;
-
- val = readl(elbi_base + PCIE_CORE_RESET);
- val |= PCIE_CORE_RESET_ENABLE;
- writel(val, elbi_base + PCIE_CORE_RESET);
- writel(1, elbi_base + PCIE_STICKY_RESET);
- writel(1, elbi_base + PCIE_NONSTICKY_RESET);
- writel(1, elbi_base + PCIE_APP_INIT_RESET);
- writel(0, elbi_base + PCIE_APP_INIT_RESET);
- writel(1, purple_base + PCIE_PHY_MAC_RESET);
-}
-
-static void exynos_pcie_assert_phy_reset(struct pcie_port *pp)
-{
- void __iomem *purple_base = pp->purple_base;
-
- writel(0, purple_base + PCIE_PHY_MAC_RESET);
- writel(1, purple_base + PCIE_PHY_GLOBAL_RESET);
-}
-
-static void exynos_pcie_deassert_phy_reset(struct pcie_port *pp)
-{
- void __iomem *elbi_base = pp->elbi_base;
- void __iomem *purple_base = pp->purple_base;
-
- writel(0, purple_base + PCIE_PHY_GLOBAL_RESET);
- writel(1, elbi_base + PCIE_PWR_RESET);
- writel(0, purple_base + PCIE_PHY_COMMON_RESET);
- writel(0, purple_base + PCIE_PHY_CMN_REG);
- writel(0, purple_base + PCIE_PHY_TRSVREG_RESET);
- writel(0, purple_base + PCIE_PHY_TRSV_RESET);
-}
-
-static void exynos_pcie_init_phy(struct pcie_port *pp)
-{
- void __iomem *phy_base = pp->phy_base;
-
- /* DCC feedback control off */
- writel(0x29, phy_base + PCIE_PHY_DCC_FEEDBACK);
-
- /* set TX/RX impedance */
- writel(0xd5, phy_base + PCIE_PHY_IMPEDANCE);
-
- /* set 50Mhz PHY clock */
- writel(0x14, phy_base + PCIE_PHY_PLL_DIV_0);
- writel(0x12, phy_base + PCIE_PHY_PLL_DIV_1);
-
- /* set TX Differential output for lane 0 */
- writel(0x7f, phy_base + PCIE_PHY_TRSV0_DRV_LVL);
-
- /* set TX Pre-emphasis Level Control for lane 0 to minimum */
- writel(0x0, phy_base + PCIE_PHY_TRSV0_EMP_LVL);
-
- /* set RX clock and data recovery bandwidth */
- writel(0xe7, phy_base + PCIE_PHY_PLL_BIAS);
- writel(0x82, phy_base + PCIE_PHY_TRSV0_RXCDR);
- writel(0x82, phy_base + PCIE_PHY_TRSV1_RXCDR);
- writel(0x82, phy_base + PCIE_PHY_TRSV2_RXCDR);
- writel(0x82, phy_base + PCIE_PHY_TRSV3_RXCDR);
-
- /* change TX Pre-emphasis Level Control for lanes */
- writel(0x39, phy_base + PCIE_PHY_TRSV0_EMP_LVL);
- writel(0x39, phy_base + PCIE_PHY_TRSV1_EMP_LVL);
- writel(0x39, phy_base + PCIE_PHY_TRSV2_EMP_LVL);
- writel(0x39, phy_base + PCIE_PHY_TRSV3_EMP_LVL);
-
- /* set LVCC */
- writel(0x20, phy_base + PCIE_PHY_TRSV0_LVCC);
- writel(0xa0, phy_base + PCIE_PHY_TRSV1_LVCC);
- writel(0xa0, phy_base + PCIE_PHY_TRSV2_LVCC);
- writel(0xa0, phy_base + PCIE_PHY_TRSV3_LVCC);
-}
-
-static void exynos_pcie_assert_reset(struct pcie_port *pp)
-{
- if (pp->reset_gpio >= 0)
- devm_gpio_request_one(pp->dev, pp->reset_gpio,
- GPIOF_OUT_INIT_HIGH, "RESET");
- return;
-}
-
-static int exynos_pcie_establish_link(struct pcie_port *pp)
-{
- u32 val;
- int count = 0;
- void __iomem *elbi_base = pp->elbi_base;
- void __iomem *purple_base = pp->purple_base;
- void __iomem *phy_base = pp->phy_base;
-
- if (exynos_pcie_link_up(pp)) {
- dev_err(pp->dev, "Link already up\n");
- return 0;
- }
-
- /* assert reset signals */
- exynos_pcie_assert_core_reset(pp);
- exynos_pcie_assert_phy_reset(pp);
-
- /* de-assert phy reset */
- exynos_pcie_deassert_phy_reset(pp);
-
- /* initialize phy */
- exynos_pcie_init_phy(pp);
-
- /* pulse for common reset */
- writel(1, purple_base + PCIE_PHY_COMMON_RESET);
- udelay(500);
- writel(0, purple_base + PCIE_PHY_COMMON_RESET);
-
- /* de-assert core reset */
- exynos_pcie_deassert_core_reset(pp);
-
- /* setup root complex */
- exynos_pcie_setup_rc(pp);
-
- /* assert reset signal */
- exynos_pcie_assert_reset(pp);
-
- /* assert LTSSM enable */
- writel(PCIE_ELBI_LTSSM_ENABLE, elbi_base + PCIE_APP_LTSSM_ENABLE);
-
- /* check if the link is up or not */
- while (!exynos_pcie_link_up(pp)) {
- mdelay(100);
- count++;
- if (count == 10) {
- while (readl(phy_base + PCIE_PHY_PLL_LOCKED) == 0) {
- val = readl(purple_base + PCIE_PHY_PLL_LOCKED);
- dev_info(pp->dev, "PLL Locked: 0x%x\n", val);
- }
- dev_err(pp->dev, "PCIe Link Fail\n");
- return -EINVAL;
- }
- }
-
- dev_info(pp->dev, "Link up\n");
-
- return 0;
-}
-
-static void exynos_pcie_clear_irq_pulse(struct pcie_port *pp)
-{
- u32 val;
- void __iomem *elbi_base = pp->elbi_base;
-
- val = readl(elbi_base + PCIE_IRQ_PULSE);
- writel(val, elbi_base + PCIE_IRQ_PULSE);
- return;
-}
-
-static void exynos_pcie_enable_irq_pulse(struct pcie_port *pp)
-{
- u32 val;
- void __iomem *elbi_base = pp->elbi_base;
-
- /* enable INTX interrupt */
- val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
- IRQ_INTC_ASSERT | IRQ_INTD_ASSERT,
- writel(val, elbi_base + PCIE_IRQ_EN_PULSE);
- return;
-}
-
-static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
-{
- struct pcie_port *pp = arg;
-
- exynos_pcie_clear_irq_pulse(pp);
- return IRQ_HANDLED;
-}
-
-static void exynos_pcie_enable_interrupts(struct pcie_port *pp)
-{
- exynos_pcie_enable_irq_pulse(pp);
- return;
-}
-
-static void exynos_pcie_host_init(struct pcie_port *pp)
-{
- struct pcie_port_info *config = &pp->config;
- u32 val;
-
- /* Keep first 64K for IO */
- pp->cfg0_base = pp->cfg.start;
- pp->cfg1_base = pp->cfg.start + config->cfg0_size;
- pp->io_base = pp->io.start;
- pp->mem_base = pp->mem.start;
-
- /* enable link */
- exynos_pcie_establish_link(pp);
-
- exynos_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
-
- /* program correct class for RC */
- exynos_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
-
- exynos_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
- val |= PORT_LOGIC_SPEED_CHANGE;
- exynos_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
-
- exynos_pcie_enable_interrupts(pp);
-}
-
-static int add_pcie_port(struct pcie_port *pp, struct platform_device *pdev)
-{
- struct resource *elbi_base;
- struct resource *phy_base;
- struct resource *purple_base;
- int ret;
-
- elbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!elbi_base) {
- dev_err(&pdev->dev, "couldn't get elbi base resource\n");
- return -EINVAL;
- }
- pp->elbi_base = devm_ioremap_resource(&pdev->dev, elbi_base);
- if (IS_ERR(pp->elbi_base))
- return PTR_ERR(pp->elbi_base);
-
- phy_base = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!phy_base) {
- dev_err(&pdev->dev, "couldn't get phy base resource\n");
- return -EINVAL;
- }
- pp->phy_base = devm_ioremap_resource(&pdev->dev, phy_base);
- if (IS_ERR(pp->phy_base))
- return PTR_ERR(pp->phy_base);
-
- purple_base = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- if (!purple_base) {
- dev_err(&pdev->dev, "couldn't get purple base resource\n");
- return -EINVAL;
- }
- pp->purple_base = devm_ioremap_resource(&pdev->dev, purple_base);
- if (IS_ERR(pp->purple_base))
- return PTR_ERR(pp->purple_base);
-
- pp->irq = platform_get_irq(pdev, 1);
- if (!pp->irq) {
- dev_err(&pdev->dev, "failed to get irq\n");
- return -ENODEV;
- }
- ret = devm_request_irq(&pdev->dev, pp->irq, exynos_pcie_irq_handler,
- IRQF_SHARED, "exynos-pcie", pp);
- if (ret) {
- dev_err(&pdev->dev, "failed to request irq\n");
- return ret;
- }
-
- pp->dbi_base = devm_ioremap(&pdev->dev, pp->cfg.start,
- resource_size(&pp->cfg));
- if (!pp->dbi_base) {
- dev_err(&pdev->dev, "error with ioremap\n");
- return -ENOMEM;
- }
-
- pp->root_bus_nr = -1;
-
- spin_lock_init(&pp->conf_lock);
- exynos_pcie_host_init(pp);
- pp->va_cfg0_base = devm_ioremap(&pdev->dev, pp->cfg0_base,
- pp->config.cfg0_size);
- if (!pp->va_cfg0_base) {
- dev_err(pp->dev, "error with ioremap in function\n");
- return -ENOMEM;
- }
- pp->va_cfg1_base = devm_ioremap(&pdev->dev, pp->cfg1_base,
- pp->config.cfg1_size);
- if (!pp->va_cfg1_base) {
- dev_err(pp->dev, "error with ioremap\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static int __init exynos_pcie_probe(struct platform_device *pdev)
-{
- struct pcie_port *pp;
- struct device_node *np = pdev->dev.of_node;
- struct of_pci_range range;
- struct of_pci_range_parser parser;
- int ret;
-
- pp = devm_kzalloc(&pdev->dev, sizeof(*pp), GFP_KERNEL);
- if (!pp) {
- dev_err(&pdev->dev, "no memory for pcie port\n");
- return -ENOMEM;
- }
-
- pp->dev = &pdev->dev;
-
- if (of_pci_range_parser_init(&parser, np)) {
- dev_err(&pdev->dev, "missing ranges property\n");
- return -EINVAL;
- }
-
- /* Get the I/O and memory ranges from DT */
- for_each_of_pci_range(&parser, &range) {
- unsigned long restype = range.flags & IORESOURCE_TYPE_BITS;
- if (restype == IORESOURCE_IO) {
- of_pci_range_to_resource(&range, np, &pp->io);
- pp->io.name = "I/O";
- pp->io.start = max_t(resource_size_t,
- PCIBIOS_MIN_IO,
- range.pci_addr + global_io_offset);
- pp->io.end = min_t(resource_size_t,
- IO_SPACE_LIMIT,
- range.pci_addr + range.size
- + global_io_offset);
- pp->config.io_size = resource_size(&pp->io);
- pp->config.io_bus_addr = range.pci_addr;
- }
- if (restype == IORESOURCE_MEM) {
- of_pci_range_to_resource(&range, np, &pp->mem);
- pp->mem.name = "MEM";
- pp->config.mem_size = resource_size(&pp->mem);
- pp->config.mem_bus_addr = range.pci_addr;
- }
- if (restype == 0) {
- of_pci_range_to_resource(&range, np, &pp->cfg);
- pp->config.cfg0_size = resource_size(&pp->cfg)/2;
- pp->config.cfg1_size = resource_size(&pp->cfg)/2;
- }
- }
-
- pp->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
-
- pp->clk = devm_clk_get(&pdev->dev, "pcie");
- if (IS_ERR(pp->clk)) {
- dev_err(&pdev->dev, "Failed to get pcie rc clock\n");
- return PTR_ERR(pp->clk);
- }
- ret = clk_prepare_enable(pp->clk);
- if (ret)
- return ret;
-
- pp->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus");
- if (IS_ERR(pp->bus_clk)) {
- dev_err(&pdev->dev, "Failed to get pcie bus clock\n");
- ret = PTR_ERR(pp->bus_clk);
- goto fail_clk;
- }
- ret = clk_prepare_enable(pp->bus_clk);
- if (ret)
- goto fail_clk;
-
- ret = add_pcie_port(pp, pdev);
- if (ret < 0)
- goto fail_bus_clk;
-
- pp->controller = exynos_pci.nr_controllers;
- exynos_pci.nr_controllers = 1;
- exynos_pci.private_data = (void **)&pp;
-
- pci_common_init(&exynos_pci);
- pci_assign_unassigned_resources();
-#ifdef CONFIG_PCI_DOMAINS
- exynos_pci.domain++;
-#endif
-
- platform_set_drvdata(pdev, pp);
- return 0;
-
-fail_bus_clk:
- clk_disable_unprepare(pp->bus_clk);
-fail_clk:
- clk_disable_unprepare(pp->clk);
- return ret;
-}
-
-static int __exit exynos_pcie_remove(struct platform_device *pdev)
-{
- struct pcie_port *pp = platform_get_drvdata(pdev);
-
- clk_disable_unprepare(pp->bus_clk);
- clk_disable_unprepare(pp->clk);
-
- return 0;
-}
-
-static const struct of_device_id exynos_pcie_of_match[] = {
- { .compatible = "samsung,exynos5440-pcie", },
- {},
-};
-MODULE_DEVICE_TABLE(of, exynos_pcie_of_match);
-
-static struct platform_driver exynos_pcie_driver = {
- .remove = __exit_p(exynos_pcie_remove),
- .driver = {
- .name = "exynos-pcie",
- .owner = THIS_MODULE,
- .of_match_table = of_match_ptr(exynos_pcie_of_match),
- },
-};
-
-static int exynos_pcie_abort(unsigned long addr, unsigned int fsr,
- struct pt_regs *regs)
-{
- unsigned long pc = instruction_pointer(regs);
- unsigned long instr = *(unsigned long *)pc;
-
- WARN_ONCE(1, "pcie abort\n");
-
- /*
- * If the instruction being executed was a read,
- * make it look like it read all-ones.
- */
- if ((instr & 0x0c100000) == 0x04100000) {
- int reg = (instr >> 12) & 15;
- unsigned long val;
-
- if (instr & 0x00400000)
- val = 255;
- else
- val = -1;
-
- regs->uregs[reg] = val;
- regs->ARM_pc += 4;
- return 0;
- }
-
- if ((instr & 0x0e100090) == 0x00100090) {
- int reg = (instr >> 12) & 15;
-
- regs->uregs[reg] = -1;
- regs->ARM_pc += 4;
- return 0;
- }
-
- return 1;
-}
-
-/* Exynos PCIe driver does not allow module unload */
-
-static int __init pcie_init(void)
-{
- hook_fault_code(16 + 6, exynos_pcie_abort, SIGBUS, 0,
- "imprecise external abort");
-
- platform_driver_probe(&exynos_pcie_driver, exynos_pcie_probe);
-
- return 0;
+ dw_pcie_writel_rc(pp, val, PCI_COMMAND);
}
-subsys_initcall(pcie_init);
MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
-MODULE_DESCRIPTION("Samsung PCIe host controller driver");
+MODULE_DESCRIPTION("Designware PCIe host controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h
new file mode 100644
index 000000000000..133820f1da97
--- /dev/null
+++ b/drivers/pci/host/pcie-designware.h
@@ -0,0 +1,65 @@
+/*
+ * Synopsys Designware PCIe host controller driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+struct pcie_port_info {
+ u32 cfg0_size;
+ u32 cfg1_size;
+ u32 io_size;
+ u32 mem_size;
+ phys_addr_t io_bus_addr;
+ phys_addr_t mem_bus_addr;
+};
+
+struct pcie_port {
+ struct device *dev;
+ u8 root_bus_nr;
+ void __iomem *dbi_base;
+ u64 cfg0_base;
+ void __iomem *va_cfg0_base;
+ u64 cfg1_base;
+ void __iomem *va_cfg1_base;
+ u64 io_base;
+ u64 mem_base;
+ spinlock_t conf_lock;
+ struct resource cfg;
+ struct resource io;
+ struct resource mem;
+ struct pcie_port_info config;
+ int irq;
+ u32 lanes;
+ struct pcie_host_ops *ops;
+};
+
+struct pcie_host_ops {
+ void (*readl_rc)(struct pcie_port *pp,
+ void __iomem *dbi_base, u32 *val);
+ void (*writel_rc)(struct pcie_port *pp,
+ u32 val, void __iomem *dbi_base);
+ int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val);
+ int (*wr_own_conf)(struct pcie_port *pp, int where, int size, u32 val);
+ int (*link_up)(struct pcie_port *pp);
+ void (*host_init)(struct pcie_port *pp);
+};
+
+extern unsigned long global_io_offset;
+
+int cfg_read(void __iomem *addr, int where, int size, u32 *val);
+int cfg_write(void __iomem *addr, int where, int size, u32 val);
+int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, u32 val);
+int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, u32 *val);
+int dw_pcie_link_up(struct pcie_port *pp);
+void dw_pcie_setup_rc(struct pcie_port *pp);
+int dw_pcie_host_init(struct pcie_port *pp);
+int dw_pcie_setup(int nr, struct pci_sys_data *sys);
+struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys);
+int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 8054ddcdaed0..f6488adf3af1 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -572,7 +572,6 @@ static void __ref enable_slot(struct acpiphp_slot *slot)
acpiphp_sanitize_bus(bus);
acpiphp_set_hpp_values(bus);
acpiphp_set_acpi_region(slot);
- pci_enable_bridges(bus);
list_for_each_entry(dev, &bus->devices, bus_list) {
/* Assume that newly added devices are powered on already. */
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 7fb326983ed6..541bbe6d5343 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -155,6 +155,7 @@ void pciehp_green_led_off(struct slot *slot);
void pciehp_green_led_blink(struct slot *slot);
int pciehp_check_link_status(struct controller *ctrl);
void pciehp_release_ctrl(struct controller *ctrl);
+int pciehp_reset_slot(struct slot *slot, int probe);
static inline const char *slot_name(struct slot *slot)
{
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 7d72c5e2eba9..f4a18f51a29c 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -69,6 +69,7 @@ static int get_power_status (struct hotplug_slot *slot, u8 *value);
static int get_attention_status (struct hotplug_slot *slot, u8 *value);
static int get_latch_status (struct hotplug_slot *slot, u8 *value);
static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
+static int reset_slot (struct hotplug_slot *slot, int probe);
/**
* release_slot - free up the memory used by a slot
@@ -111,6 +112,7 @@ static int init_slot(struct controller *ctrl)
ops->disable_slot = disable_slot;
ops->get_power_status = get_power_status;
ops->get_adapter_status = get_adapter_status;
+ ops->reset_slot = reset_slot;
if (MRL_SENS(ctrl))
ops->get_latch_status = get_latch_status;
if (ATTN_LED(ctrl)) {
@@ -223,6 +225,16 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
return pciehp_get_adapter_status(slot, value);
}
+static int reset_slot(struct hotplug_slot *hotplug_slot, int probe)
+{
+ struct slot *slot = hotplug_slot->private;
+
+ ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
+ __func__, slot_name(slot));
+
+ return pciehp_reset_slot(slot, probe);
+}
+
static int pciehp_probe(struct pcie_device *dev)
{
int rc;
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index b2255736ac81..51f56ef4ab6f 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -749,6 +749,37 @@ static void pcie_disable_notification(struct controller *ctrl)
ctrl_warn(ctrl, "Cannot disable software notification\n");
}
+/*
+ * pciehp has a 1:1 bus:slot relationship so we ultimately want a secondary
+ * bus reset of the bridge, but if the slot supports surprise removal we need
+ * to disable presence detection around the bus reset and clear any spurious
+ * events after.
+ */
+int pciehp_reset_slot(struct slot *slot, int probe)
+{
+ struct controller *ctrl = slot->ctrl;
+
+ if (probe)
+ return 0;
+
+ if (HP_SUPR_RM(ctrl)) {
+ pcie_write_cmd(ctrl, 0, PCI_EXP_SLTCTL_PDCE);
+ if (pciehp_poll_mode)
+ del_timer_sync(&ctrl->poll_timer);
+ }
+
+ pci_reset_bridge_secondary_bus(ctrl->pcie->port);
+
+ if (HP_SUPR_RM(ctrl)) {
+ pciehp_writew(ctrl, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_PDC);
+ pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PDCE, PCI_EXP_SLTCTL_PDCE);
+ if (pciehp_poll_mode)
+ int_poll_timeout(ctrl->poll_timer.data);
+ }
+
+ return 0;
+}
+
int pcie_init_notification(struct controller *ctrl)
{
if (pciehp_request_irq(ctrl))
diff --git a/drivers/pci/hotplug/pcihp_slot.c b/drivers/pci/hotplug/pcihp_slot.c
index fec2d5b75440..16f920352317 100644
--- a/drivers/pci/hotplug/pcihp_slot.c
+++ b/drivers/pci/hotplug/pcihp_slot.c
@@ -160,9 +160,8 @@ void pci_configure_slot(struct pci_dev *dev)
(dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
return;
- if (dev->bus && dev->bus->self)
- pcie_bus_configure_settings(dev->bus,
- dev->bus->self->pcie_mpss);
+ if (dev->bus)
+ pcie_bus_configure_settings(dev->bus);
memset(&hpp, 0, sizeof(hpp));
ret = pci_get_hp_params(dev, &hpp);
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index de8ffacf9c9b..21a7182dccd4 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -286,7 +286,6 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
(!(iov->cap & PCI_SRIOV_CAP_VFM) && (nr_virtfn > initial)))
return -EINVAL;
- pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn);
pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &offset);
pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &stride);
if (!offset || (nr_virtfn > 1 && !stride))
@@ -324,7 +323,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
if (!pdev->is_physfn) {
pci_dev_put(pdev);
- return -ENODEV;
+ return -ENOSYS;
}
rc = sysfs_create_link(&dev->dev.kobj,
@@ -334,6 +333,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
return rc;
}
+ pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn);
iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
pci_cfg_access_lock(dev);
pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
@@ -368,6 +368,7 @@ failed:
iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
pci_cfg_access_lock(dev);
pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
+ pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, 0);
ssleep(1);
pci_cfg_access_unlock(dev);
@@ -401,6 +402,7 @@ static void sriov_disable(struct pci_dev *dev)
sysfs_remove_link(&dev->dev.kobj, "dep_link");
iov->num_VFs = 0;
+ pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, 0);
}
static int sriov_init(struct pci_dev *dev, int pos)
@@ -662,7 +664,7 @@ int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
might_sleep();
if (!dev->is_physfn)
- return -ENODEV;
+ return -ENOSYS;
return sriov_enable(dev, nr_virtfn);
}
@@ -722,7 +724,7 @@ EXPORT_SYMBOL_GPL(pci_num_vf);
* @dev: the PCI device
*
* Returns number of VFs belonging to this device that are assigned to a guest.
- * If device is not a physical function returns -ENODEV.
+ * If device is not a physical function returns 0.
*/
int pci_vfs_assigned(struct pci_dev *dev)
{
@@ -767,12 +769,15 @@ EXPORT_SYMBOL_GPL(pci_vfs_assigned);
* device's mutex held.
*
* Returns 0 if PF is an SRIOV-capable device and
- * value of numvfs valid. If not a PF with VFS, return -EINVAL;
+ * value of numvfs valid. If not a PF return -ENOSYS;
+ * if numvfs is invalid return -EINVAL;
* if VFs already enabled, return -EBUSY.
*/
int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
{
- if (!dev->is_physfn || (numvfs > dev->sriov->total_VFs))
+ if (!dev->is_physfn)
+ return -ENOSYS;
+ if (numvfs > dev->sriov->total_VFs)
return -EINVAL;
/* Shouldn't change if VFs already enabled */
@@ -786,17 +791,17 @@ int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
EXPORT_SYMBOL_GPL(pci_sriov_set_totalvfs);
/**
- * pci_sriov_get_totalvfs -- get total VFs supported on this devic3
+ * pci_sriov_get_totalvfs -- get total VFs supported on this device
* @dev: the PCI PF device
*
* For a PCIe device with SRIOV support, return the PCIe
* SRIOV capability value of TotalVFs or the value of driver_max_VFs
- * if the driver reduced it. Otherwise, -EINVAL.
+ * if the driver reduced it. Otherwise 0.
*/
int pci_sriov_get_totalvfs(struct pci_dev *dev)
{
if (!dev->is_physfn)
- return -EINVAL;
+ return 0;
if (dev->sriov->driver_max_VFs)
return dev->sriov->driver_max_VFs;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index e6515e21afa3..98f7b9b89507 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -763,6 +763,13 @@ static int pci_pm_resume(struct device *dev)
#ifdef CONFIG_HIBERNATE_CALLBACKS
+
+/*
+ * pcibios_pm_ops - provide arch-specific hooks when a PCI device is doing
+ * a hibernate transition
+ */
+struct dev_pm_ops __weak pcibios_pm_ops;
+
static int pci_pm_freeze(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
@@ -786,6 +793,9 @@ static int pci_pm_freeze(struct device *dev)
return error;
}
+ if (pcibios_pm_ops.freeze)
+ return pcibios_pm_ops.freeze(dev);
+
return 0;
}
@@ -811,6 +821,9 @@ static int pci_pm_freeze_noirq(struct device *dev)
pci_pm_set_unknown_state(pci_dev);
+ if (pcibios_pm_ops.freeze_noirq)
+ return pcibios_pm_ops.freeze_noirq(dev);
+
return 0;
}
@@ -820,6 +833,12 @@ static int pci_pm_thaw_noirq(struct device *dev)
struct device_driver *drv = dev->driver;
int error = 0;
+ if (pcibios_pm_ops.thaw_noirq) {
+ error = pcibios_pm_ops.thaw_noirq(dev);
+ if (error)
+ return error;
+ }
+
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_resume_early(dev);
@@ -837,6 +856,12 @@ static int pci_pm_thaw(struct device *dev)
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int error = 0;
+ if (pcibios_pm_ops.thaw) {
+ error = pcibios_pm_ops.thaw(dev);
+ if (error)
+ return error;
+ }
+
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_resume(dev);
@@ -878,6 +903,9 @@ static int pci_pm_poweroff(struct device *dev)
Fixup:
pci_fixup_device(pci_fixup_suspend, pci_dev);
+ if (pcibios_pm_ops.poweroff)
+ return pcibios_pm_ops.poweroff(dev);
+
return 0;
}
@@ -911,6 +939,9 @@ static int pci_pm_poweroff_noirq(struct device *dev)
if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
pci_write_config_word(pci_dev, PCI_COMMAND, 0);
+ if (pcibios_pm_ops.poweroff_noirq)
+ return pcibios_pm_ops.poweroff_noirq(dev);
+
return 0;
}
@@ -920,6 +951,12 @@ static int pci_pm_restore_noirq(struct device *dev)
struct device_driver *drv = dev->driver;
int error = 0;
+ if (pcibios_pm_ops.restore_noirq) {
+ error = pcibios_pm_ops.restore_noirq(dev);
+ if (error)
+ return error;
+ }
+
pci_pm_default_resume_early(pci_dev);
if (pci_has_legacy_pm_support(pci_dev))
@@ -937,6 +974,12 @@ static int pci_pm_restore(struct device *dev)
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int error = 0;
+ if (pcibios_pm_ops.restore) {
+ error = pcibios_pm_ops.restore(dev);
+ if (error)
+ return error;
+ }
+
/*
* This is necessary for the hibernation error path in which restore is
* called without restoring the standard config registers of the device.
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index c0dbe1f61362..7128cfdd64aa 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -131,19 +131,19 @@ static ssize_t pci_bus_show_cpuaffinity(struct device *dev,
return ret;
}
-static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t cpuaffinity_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
}
+static DEVICE_ATTR_RO(cpuaffinity);
-static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t cpulistaffinity_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return pci_bus_show_cpuaffinity(dev, 1, attr, buf);
}
+static DEVICE_ATTR_RO(cpulistaffinity);
/* show resources */
static ssize_t
@@ -379,6 +379,7 @@ dev_bus_rescan_store(struct device *dev, struct device_attribute *attr,
}
return count;
}
+static DEVICE_ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, dev_bus_rescan_store);
#if defined(CONFIG_PM_RUNTIME) && defined(CONFIG_ACPI)
static ssize_t d3cold_allowed_store(struct device *dev,
@@ -514,11 +515,20 @@ struct device_attribute pci_dev_attrs[] = {
__ATTR_NULL,
};
-struct device_attribute pcibus_dev_attrs[] = {
- __ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, dev_bus_rescan_store),
- __ATTR(cpuaffinity, S_IRUGO, pci_bus_show_cpumaskaffinity, NULL),
- __ATTR(cpulistaffinity, S_IRUGO, pci_bus_show_cpulistaffinity, NULL),
- __ATTR_NULL,
+static struct attribute *pcibus_attrs[] = {
+ &dev_attr_rescan.attr,
+ &dev_attr_cpuaffinity.attr,
+ &dev_attr_cpulistaffinity.attr,
+ NULL,
+};
+
+static const struct attribute_group pcibus_group = {
+ .attrs = pcibus_attrs,
+};
+
+const struct attribute_group *pcibus_groups[] = {
+ &pcibus_group,
+ NULL,
};
static ssize_t
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e37fea6e178d..b821a62958fd 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -22,6 +22,7 @@
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/pm_runtime.h>
+#include <linux/pci_hotplug.h>
#include <asm-generic/pci-bridge.h>
#include <asm/setup.h>
#include "pci.h"
@@ -1145,6 +1146,24 @@ int pci_reenable_device(struct pci_dev *dev)
return 0;
}
+static void pci_enable_bridge(struct pci_dev *dev)
+{
+ int retval;
+
+ if (!dev)
+ return;
+
+ pci_enable_bridge(dev->bus->self);
+
+ if (pci_is_enabled(dev))
+ return;
+ retval = pci_enable_device(dev);
+ if (retval)
+ dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n",
+ retval);
+ pci_set_master(dev);
+}
+
static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
{
int err;
@@ -1165,6 +1184,8 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
if (atomic_inc_return(&dev->enable_cnt) > 1)
return 0; /* already enabled */
+ pci_enable_bridge(dev->bus->self);
+
/* only skip sriov related */
for (i = 0; i <= PCI_ROM_RESOURCE; i++)
if (dev->resource[i].flags & flags)
@@ -1992,7 +2013,7 @@ static void pci_add_saved_cap(struct pci_dev *pci_dev,
}
/**
- * pci_add_save_buffer - allocate buffer for saving given capability registers
+ * pci_add_cap_save_buffer - allocate buffer for saving given capability registers
* @dev: the PCI device
* @cap: the capability to allocate the buffer for
* @size: requested size of the buffer
@@ -2095,9 +2116,9 @@ void pci_enable_ido(struct pci_dev *dev, unsigned long type)
u16 ctrl = 0;
if (type & PCI_EXP_IDO_REQUEST)
- ctrl |= PCI_EXP_IDO_REQ_EN;
+ ctrl |= PCI_EXP_DEVCTL2_IDO_REQ_EN;
if (type & PCI_EXP_IDO_COMPLETION)
- ctrl |= PCI_EXP_IDO_CMP_EN;
+ ctrl |= PCI_EXP_DEVCTL2_IDO_CMP_EN;
if (ctrl)
pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, ctrl);
}
@@ -2113,9 +2134,9 @@ void pci_disable_ido(struct pci_dev *dev, unsigned long type)
u16 ctrl = 0;
if (type & PCI_EXP_IDO_REQUEST)
- ctrl |= PCI_EXP_IDO_REQ_EN;
+ ctrl |= PCI_EXP_DEVCTL2_IDO_REQ_EN;
if (type & PCI_EXP_IDO_COMPLETION)
- ctrl |= PCI_EXP_IDO_CMP_EN;
+ ctrl |= PCI_EXP_DEVCTL2_IDO_CMP_EN;
if (ctrl)
pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, ctrl);
}
@@ -2147,7 +2168,7 @@ int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
int ret;
pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap);
- if (!(cap & PCI_EXP_OBFF_MASK))
+ if (!(cap & PCI_EXP_DEVCAP2_OBFF_MASK))
return -ENOTSUPP; /* no OBFF support at all */
/* Make sure the topology supports OBFF as well */
@@ -2158,17 +2179,17 @@ int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
}
pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &ctrl);
- if (cap & PCI_EXP_OBFF_WAKE)
- ctrl |= PCI_EXP_OBFF_WAKE_EN;
+ if (cap & PCI_EXP_DEVCAP2_OBFF_WAKE)
+ ctrl |= PCI_EXP_DEVCTL2_OBFF_WAKE_EN;
else {
switch (type) {
case PCI_EXP_OBFF_SIGNAL_L0:
- if (!(ctrl & PCI_EXP_OBFF_WAKE_EN))
- ctrl |= PCI_EXP_OBFF_MSGA_EN;
+ if (!(ctrl & PCI_EXP_DEVCTL2_OBFF_WAKE_EN))
+ ctrl |= PCI_EXP_DEVCTL2_OBFF_MSGA_EN;
break;
case PCI_EXP_OBFF_SIGNAL_ALWAYS:
- ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
- ctrl |= PCI_EXP_OBFF_MSGB_EN;
+ ctrl &= ~PCI_EXP_DEVCTL2_OBFF_WAKE_EN;
+ ctrl |= PCI_EXP_DEVCTL2_OBFF_MSGB_EN;
break;
default:
WARN(1, "bad OBFF signal type\n");
@@ -2189,7 +2210,8 @@ EXPORT_SYMBOL(pci_enable_obff);
*/
void pci_disable_obff(struct pci_dev *dev)
{
- pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_OBFF_WAKE_EN);
+ pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_OBFF_WAKE_EN);
}
EXPORT_SYMBOL(pci_disable_obff);
@@ -2237,7 +2259,8 @@ int pci_enable_ltr(struct pci_dev *dev)
return ret;
}
- return pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_LTR_EN);
+ return pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_LTR_EN);
}
EXPORT_SYMBOL(pci_enable_ltr);
@@ -2254,7 +2277,8 @@ void pci_disable_ltr(struct pci_dev *dev)
if (!pci_ltr_supported(dev))
return;
- pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_LTR_EN);
+ pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_LTR_EN);
}
EXPORT_SYMBOL(pci_disable_ltr);
@@ -2359,6 +2383,27 @@ void pci_enable_acs(struct pci_dev *dev)
pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
}
+static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
+{
+ int pos;
+ u16 cap, ctrl;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
+ if (!pos)
+ return false;
+
+ /*
+ * Except for egress control, capabilities are either required
+ * or only required if controllable. Features missing from the
+ * capability field can therefore be assumed as hard-wired enabled.
+ */
+ pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
+ acs_flags &= (cap | PCI_ACS_EC);
+
+ pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
+ return (ctrl & acs_flags) == acs_flags;
+}
+
/**
* pci_acs_enabled - test ACS against required flags for a given device
* @pdev: device to test
@@ -2366,36 +2411,76 @@ void pci_enable_acs(struct pci_dev *dev)
*
* Return true if the device supports the provided flags. Automatically
* filters out flags that are not implemented on multifunction devices.
+ *
+ * Note that this interface checks the effective ACS capabilities of the
+ * device rather than the actual capabilities. For instance, most single
+ * function endpoints are not required to support ACS because they have no
+ * opportunity for peer-to-peer access. We therefore return 'true'
+ * regardless of whether the device exposes an ACS capability. This makes
+ * it much easier for callers of this function to ignore the actual type
+ * or topology of the device when testing ACS support.
*/
bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
{
- int pos, ret;
- u16 ctrl;
+ int ret;
ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
if (ret >= 0)
return ret > 0;
+ /*
+ * Conventional PCI and PCI-X devices never support ACS, either
+ * effectively or actually. The shared bus topology implies that
+ * any device on the bus can receive or snoop DMA.
+ */
if (!pci_is_pcie(pdev))
return false;
- /* Filter out flags not applicable to multifunction */
- if (pdev->multifunction)
- acs_flags &= (PCI_ACS_RR | PCI_ACS_CR |
- PCI_ACS_EC | PCI_ACS_DT);
-
- if (pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM ||
- pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
- pdev->multifunction) {
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
- if (!pos)
- return false;
+ switch (pci_pcie_type(pdev)) {
+ /*
+ * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
+ * but since their primary inteface is PCI/X, we conservatively
+ * handle them as we would a non-PCIe device.
+ */
+ case PCI_EXP_TYPE_PCIE_BRIDGE:
+ /*
+ * PCIe 3.0, 6.12.1 excludes ACS on these devices. "ACS is never
+ * applicable... must never implement an ACS Extended Capability...".
+ * This seems arbitrary, but we take a conservative interpretation
+ * of this statement.
+ */
+ case PCI_EXP_TYPE_PCI_BRIDGE:
+ case PCI_EXP_TYPE_RC_EC:
+ return false;
+ /*
+ * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
+ * implement ACS in order to indicate their peer-to-peer capabilities,
+ * regardless of whether they are single- or multi-function devices.
+ */
+ case PCI_EXP_TYPE_DOWNSTREAM:
+ case PCI_EXP_TYPE_ROOT_PORT:
+ return pci_acs_flags_enabled(pdev, acs_flags);
+ /*
+ * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
+ * implemented by the remaining PCIe types to indicate peer-to-peer
+ * capabilities, but only when they are part of a multifunciton
+ * device. The footnote for section 6.12 indicates the specific
+ * PCIe types included here.
+ */
+ case PCI_EXP_TYPE_ENDPOINT:
+ case PCI_EXP_TYPE_UPSTREAM:
+ case PCI_EXP_TYPE_LEG_END:
+ case PCI_EXP_TYPE_RC_END:
+ if (!pdev->multifunction)
+ break;
- pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
- if ((ctrl & acs_flags) != acs_flags)
- return false;
+ return pci_acs_flags_enabled(pdev, acs_flags);
}
+ /*
+ * PCIe 3.0, 6.12.1.3 specifies no ACS capabilties are applicable
+ * to single function devices with the exception of downstream ports.
+ */
return true;
}
@@ -3059,18 +3144,23 @@ bool pci_check_and_unmask_intx(struct pci_dev *dev)
EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
/**
- * pci_msi_off - disables any msi or msix capabilities
+ * pci_msi_off - disables any MSI or MSI-X capabilities
* @dev: the PCI device to operate on
*
- * If you want to use msi see pci_enable_msi and friends.
- * This is a lower level primitive that allows us to disable
- * msi operation at the device level.
+ * If you want to use MSI, see pci_enable_msi() and friends.
+ * This is a lower-level primitive that allows us to disable
+ * MSI operation at the device level.
*/
void pci_msi_off(struct pci_dev *dev)
{
int pos;
u16 control;
+ /*
+ * This looks like it could go in msi.c, but we need it even when
+ * CONFIG_PCI_MSI=n. For the same reason, we can't use
+ * dev->msi_cap or dev->msix_cap here.
+ */
pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
if (pos) {
pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
@@ -3098,19 +3188,17 @@ int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
}
EXPORT_SYMBOL(pci_set_dma_seg_boundary);
-static int pcie_flr(struct pci_dev *dev, int probe)
+/**
+ * pci_wait_for_pending_transaction - waits for pending transaction
+ * @dev: the PCI device to operate on
+ *
+ * Return 0 if transaction is pending 1 otherwise.
+ */
+int pci_wait_for_pending_transaction(struct pci_dev *dev)
{
int i;
- u32 cap;
u16 status;
- pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
- if (!(cap & PCI_EXP_DEVCAP_FLR))
- return -ENOTTY;
-
- if (probe)
- return 0;
-
/* Wait for Transaction Pending bit clean */
for (i = 0; i < 4; i++) {
if (i)
@@ -3118,13 +3206,27 @@ static int pcie_flr(struct pci_dev *dev, int probe)
pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
if (!(status & PCI_EXP_DEVSTA_TRPND))
- goto clear;
+ return 1;
}
- dev_err(&dev->dev, "transaction is not cleared; "
- "proceeding with reset anyway\n");
+ return 0;
+}
+EXPORT_SYMBOL(pci_wait_for_pending_transaction);
+
+static int pcie_flr(struct pci_dev *dev, int probe)
+{
+ u32 cap;
+
+ pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
+ if (!(cap & PCI_EXP_DEVCAP_FLR))
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
+
+ if (!pci_wait_for_pending_transaction(dev))
+ dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
-clear:
pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
msleep(100);
@@ -3215,9 +3317,42 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
return 0;
}
-static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
+/**
+ * pci_reset_bridge_secondary_bus - Reset the secondary bus on a PCI bridge.
+ * @dev: Bridge device
+ *
+ * Use the bridge control register to assert reset on the secondary bus.
+ * Devices on the secondary bus are left in power-on state.
+ */
+void pci_reset_bridge_secondary_bus(struct pci_dev *dev)
{
u16 ctrl;
+
+ pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
+ ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
+ pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
+ /*
+ * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double
+ * this to 2ms to ensure that we meet the minium requirement.
+ */
+ msleep(2);
+
+ ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
+ pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
+
+ /*
+ * Trhfa for conventional PCI is 2^25 clock cycles.
+ * Assuming a minimum 33MHz clock this results in a 1s
+ * delay before we can consider subordinate devices to
+ * be re-initialized. PCIe has some ways to shorten this,
+ * but we don't make use of them yet.
+ */
+ ssleep(1);
+}
+EXPORT_SYMBOL_GPL(pci_reset_bridge_secondary_bus);
+
+static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
+{
struct pci_dev *pdev;
if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
@@ -3230,18 +3365,40 @@ static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
if (probe)
return 0;
- pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
- ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
- pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
- msleep(100);
-
- ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
- pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
- msleep(100);
+ pci_reset_bridge_secondary_bus(dev->bus->self);
return 0;
}
+static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
+{
+ int rc = -ENOTTY;
+
+ if (!hotplug || !try_module_get(hotplug->ops->owner))
+ return rc;
+
+ if (hotplug->ops->reset_slot)
+ rc = hotplug->ops->reset_slot(hotplug, probe);
+
+ module_put(hotplug->ops->owner);
+
+ return rc;
+}
+
+static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
+{
+ struct pci_dev *pdev;
+
+ if (dev->subordinate || !dev->slot)
+ return -ENOTTY;
+
+ list_for_each_entry(pdev, &dev->bus->devices, bus_list)
+ if (pdev != dev && pdev->slot == dev->slot)
+ return -ENOTTY;
+
+ return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
+}
+
static int __pci_dev_reset(struct pci_dev *dev, int probe)
{
int rc;
@@ -3264,27 +3421,65 @@ static int __pci_dev_reset(struct pci_dev *dev, int probe)
if (rc != -ENOTTY)
goto done;
+ rc = pci_dev_reset_slot_function(dev, probe);
+ if (rc != -ENOTTY)
+ goto done;
+
rc = pci_parent_bus_reset(dev, probe);
done:
return rc;
}
+static void pci_dev_lock(struct pci_dev *dev)
+{
+ pci_cfg_access_lock(dev);
+ /* block PM suspend, driver probe, etc. */
+ device_lock(&dev->dev);
+}
+
+static void pci_dev_unlock(struct pci_dev *dev)
+{
+ device_unlock(&dev->dev);
+ pci_cfg_access_unlock(dev);
+}
+
+static void pci_dev_save_and_disable(struct pci_dev *dev)
+{
+ /*
+ * Wake-up device prior to save. PM registers default to D0 after
+ * reset and a simple register restore doesn't reliably return
+ * to a non-D0 state anyway.
+ */
+ pci_set_power_state(dev, PCI_D0);
+
+ pci_save_state(dev);
+ /*
+ * Disable the device by clearing the Command register, except for
+ * INTx-disable which is set. This not only disables MMIO and I/O port
+ * BARs, but also prevents the device from being Bus Master, preventing
+ * DMA from the device including MSI/MSI-X interrupts. For PCI 2.3
+ * compliant devices, INTx-disable prevents legacy interrupts.
+ */
+ pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
+}
+
+static void pci_dev_restore(struct pci_dev *dev)
+{
+ pci_restore_state(dev);
+}
+
static int pci_dev_reset(struct pci_dev *dev, int probe)
{
int rc;
- if (!probe) {
- pci_cfg_access_lock(dev);
- /* block PM suspend, driver probe, etc. */
- device_lock(&dev->dev);
- }
+ if (!probe)
+ pci_dev_lock(dev);
rc = __pci_dev_reset(dev, probe);
- if (!probe) {
- device_unlock(&dev->dev);
- pci_cfg_access_unlock(dev);
- }
+ if (!probe)
+ pci_dev_unlock(dev);
+
return rc;
}
/**
@@ -3375,22 +3570,249 @@ int pci_reset_function(struct pci_dev *dev)
if (rc)
return rc;
- pci_save_state(dev);
-
- /*
- * both INTx and MSI are disabled after the Interrupt Disable bit
- * is set and the Bus Master bit is cleared.
- */
- pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
+ pci_dev_save_and_disable(dev);
rc = pci_dev_reset(dev, 0);
- pci_restore_state(dev);
+ pci_dev_restore(dev);
return rc;
}
EXPORT_SYMBOL_GPL(pci_reset_function);
+/* Lock devices from the top of the tree down */
+static void pci_bus_lock(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ pci_dev_lock(dev);
+ if (dev->subordinate)
+ pci_bus_lock(dev->subordinate);
+ }
+}
+
+/* Unlock devices from the bottom of the tree up */
+static void pci_bus_unlock(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ if (dev->subordinate)
+ pci_bus_unlock(dev->subordinate);
+ pci_dev_unlock(dev);
+ }
+}
+
+/* Lock devices from the top of the tree down */
+static void pci_slot_lock(struct pci_slot *slot)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+ if (!dev->slot || dev->slot != slot)
+ continue;
+ pci_dev_lock(dev);
+ if (dev->subordinate)
+ pci_bus_lock(dev->subordinate);
+ }
+}
+
+/* Unlock devices from the bottom of the tree up */
+static void pci_slot_unlock(struct pci_slot *slot)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+ if (!dev->slot || dev->slot != slot)
+ continue;
+ if (dev->subordinate)
+ pci_bus_unlock(dev->subordinate);
+ pci_dev_unlock(dev);
+ }
+}
+
+/* Save and disable devices from the top of the tree down */
+static void pci_bus_save_and_disable(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ pci_dev_save_and_disable(dev);
+ if (dev->subordinate)
+ pci_bus_save_and_disable(dev->subordinate);
+ }
+}
+
+/*
+ * Restore devices from top of the tree down - parent bridges need to be
+ * restored before we can get to subordinate devices.
+ */
+static void pci_bus_restore(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ pci_dev_restore(dev);
+ if (dev->subordinate)
+ pci_bus_restore(dev->subordinate);
+ }
+}
+
+/* Save and disable devices from the top of the tree down */
+static void pci_slot_save_and_disable(struct pci_slot *slot)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+ if (!dev->slot || dev->slot != slot)
+ continue;
+ pci_dev_save_and_disable(dev);
+ if (dev->subordinate)
+ pci_bus_save_and_disable(dev->subordinate);
+ }
+}
+
+/*
+ * Restore devices from top of the tree down - parent bridges need to be
+ * restored before we can get to subordinate devices.
+ */
+static void pci_slot_restore(struct pci_slot *slot)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+ if (!dev->slot || dev->slot != slot)
+ continue;
+ pci_dev_restore(dev);
+ if (dev->subordinate)
+ pci_bus_restore(dev->subordinate);
+ }
+}
+
+static int pci_slot_reset(struct pci_slot *slot, int probe)
+{
+ int rc;
+
+ if (!slot)
+ return -ENOTTY;
+
+ if (!probe)
+ pci_slot_lock(slot);
+
+ might_sleep();
+
+ rc = pci_reset_hotplug_slot(slot->hotplug, probe);
+
+ if (!probe)
+ pci_slot_unlock(slot);
+
+ return rc;
+}
+
+/**
+ * pci_probe_reset_slot - probe whether a PCI slot can be reset
+ * @slot: PCI slot to probe
+ *
+ * Return 0 if slot can be reset, negative if a slot reset is not supported.
+ */
+int pci_probe_reset_slot(struct pci_slot *slot)
+{
+ return pci_slot_reset(slot, 1);
+}
+EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
+
+/**
+ * pci_reset_slot - reset a PCI slot
+ * @slot: PCI slot to reset
+ *
+ * A PCI bus may host multiple slots, each slot may support a reset mechanism
+ * independent of other slots. For instance, some slots may support slot power
+ * control. In the case of a 1:1 bus to slot architecture, this function may
+ * wrap the bus reset to avoid spurious slot related events such as hotplug.
+ * Generally a slot reset should be attempted before a bus reset. All of the
+ * function of the slot and any subordinate buses behind the slot are reset
+ * through this function. PCI config space of all devices in the slot and
+ * behind the slot is saved before and restored after reset.
+ *
+ * Return 0 on success, non-zero on error.
+ */
+int pci_reset_slot(struct pci_slot *slot)
+{
+ int rc;
+
+ rc = pci_slot_reset(slot, 1);
+ if (rc)
+ return rc;
+
+ pci_slot_save_and_disable(slot);
+
+ rc = pci_slot_reset(slot, 0);
+
+ pci_slot_restore(slot);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pci_reset_slot);
+
+static int pci_bus_reset(struct pci_bus *bus, int probe)
+{
+ if (!bus->self)
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
+
+ pci_bus_lock(bus);
+
+ might_sleep();
+
+ pci_reset_bridge_secondary_bus(bus->self);
+
+ pci_bus_unlock(bus);
+
+ return 0;
+}
+
+/**
+ * pci_probe_reset_bus - probe whether a PCI bus can be reset
+ * @bus: PCI bus to probe
+ *
+ * Return 0 if bus can be reset, negative if a bus reset is not supported.
+ */
+int pci_probe_reset_bus(struct pci_bus *bus)
+{
+ return pci_bus_reset(bus, 1);
+}
+EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
+
+/**
+ * pci_reset_bus - reset a PCI bus
+ * @bus: top level PCI bus to reset
+ *
+ * Do a bus reset on the given bus and any subordinate buses, saving
+ * and restoring state of all devices.
+ *
+ * Return 0 on success, non-zero on error.
+ */
+int pci_reset_bus(struct pci_bus *bus)
+{
+ int rc;
+
+ rc = pci_bus_reset(bus, 1);
+ if (rc)
+ return rc;
+
+ pci_bus_save_and_disable(bus);
+
+ rc = pci_bus_reset(bus, 0);
+
+ pci_bus_restore(bus);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pci_reset_bus);
+
/**
* pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
* @dev: PCI device to query
@@ -3525,8 +3947,6 @@ int pcie_set_readrq(struct pci_dev *dev, int rq)
if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
int mps = pcie_get_mps(dev);
- if (mps < 0)
- return mps;
if (mps < rq)
rq = mps;
}
@@ -3543,7 +3963,6 @@ EXPORT_SYMBOL(pcie_set_readrq);
* @dev: PCI device to query
*
* Returns maximum payload size in bytes
- * or appropriate error value.
*/
int pcie_get_mps(struct pci_dev *dev)
{
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index d1182c4a754e..816c297f170c 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -151,7 +151,7 @@ static inline int pci_no_d1d2(struct pci_dev *dev)
}
extern struct device_attribute pci_dev_attrs[];
-extern struct device_attribute pcibus_dev_attrs[];
+extern const struct attribute_group *pcibus_groups[];
extern struct device_type pci_dev_type;
extern struct bus_attribute pci_bus_attrs[];
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 3b94cfcfa03b..7958e59d6077 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -2,7 +2,7 @@
# PCI Express Port Bus Configuration
#
config PCIEPORTBUS
- bool "PCI Express support"
+ bool "PCI Express Port Bus support"
depends on PCI
help
This automatically enables PCI Express Port Bus support. Users can
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 76ef634caf6f..0bf82a20a0fb 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -352,7 +352,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32);
- aer_do_secondary_bus_reset(dev);
+ pci_reset_bridge_secondary_bus(dev);
dev_printk(KERN_DEBUG, &dev->dev, "Root Port link has been reset\n");
/* Clear Root Error Status */
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index 90ea3e88041f..84420b7c9456 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -106,7 +106,6 @@ static inline pci_ers_result_t merge_result(enum pci_ers_result orig,
}
extern struct bus_type pcie_port_bus_type;
-void aer_do_secondary_bus_reset(struct pci_dev *dev);
int aer_init(struct pcie_device *dev);
void aer_isr(struct work_struct *work);
void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 8b68ae59b7b6..85ca36f2136d 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -367,39 +367,6 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
}
/**
- * aer_do_secondary_bus_reset - perform secondary bus reset
- * @dev: pointer to bridge's pci_dev data structure
- *
- * Invoked when performing link reset at Root Port or Downstream Port.
- */
-void aer_do_secondary_bus_reset(struct pci_dev *dev)
-{
- u16 p2p_ctrl;
-
- /* Assert Secondary Bus Reset */
- pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &p2p_ctrl);
- p2p_ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
- pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl);
-
- /*
- * we should send hot reset message for 2ms to allow it time to
- * propagate to all downstream ports
- */
- msleep(2);
-
- /* De-assert Secondary Bus Reset */
- p2p_ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
- pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl);
-
- /*
- * System software must wait for at least 100ms from the end
- * of a reset of one or more device before it is permitted
- * to issue Configuration Requests to those devices.
- */
- msleep(200);
-}
-
-/**
* default_reset_link - default reset function
* @dev: pointer to pci_dev data structure
*
@@ -408,7 +375,7 @@ void aer_do_secondary_bus_reset(struct pci_dev *dev)
*/
static pci_ers_result_t default_reset_link(struct pci_dev *dev)
{
- aer_do_secondary_bus_reset(dev);
+ pci_reset_bridge_secondary_bus(dev);
dev_printk(KERN_DEBUG, &dev->dev, "downstream link has been reset\n");
return PCI_ERS_RESULT_RECOVERED;
}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 46ada5c098eb..eeb50bd62402 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -96,7 +96,7 @@ static void release_pcibus_dev(struct device *dev)
static struct class pcibus_class = {
.name = "pci_bus",
.dev_release = &release_pcibus_dev,
- .dev_attrs = pcibus_dev_attrs,
+ .dev_groups = pcibus_groups,
};
static int __init pcibus_class_init(void)
@@ -156,6 +156,8 @@ static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
return flags;
}
+#define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
+
/**
* pci_read_base - read a PCI BAR
* @dev: the PCI device
@@ -178,8 +180,10 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
/* No printks while decoding is disabled! */
if (!dev->mmio_always_on) {
pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
- pci_write_config_word(dev, PCI_COMMAND,
- orig_cmd & ~(PCI_COMMAND_MEMORY | PCI_COMMAND_IO));
+ if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
+ pci_write_config_word(dev, PCI_COMMAND,
+ orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
+ }
}
res->name = pci_name(dev);
@@ -293,7 +297,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
fail:
res->flags = 0;
out:
- if (!dev->mmio_always_on)
+ if (!dev->mmio_always_on &&
+ (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
if (bar_too_big)
@@ -1491,24 +1496,23 @@ static int pcie_find_smpss(struct pci_dev *dev, void *data)
if (!pci_is_pcie(dev))
return 0;
- /* For PCIE hotplug enabled slots not connected directly to a
- * PCI-E root port, there can be problems when hotplugging
- * devices. This is due to the possibility of hotplugging a
- * device into the fabric with a smaller MPS that the devices
- * currently running have configured. Modifying the MPS on the
- * running devices could cause a fatal bus error due to an
- * incoming frame being larger than the newly configured MPS.
- * To work around this, the MPS for the entire fabric must be
- * set to the minimum size. Any devices hotplugged into this
- * fabric will have the minimum MPS set. If the PCI hotplug
- * slot is directly connected to the root port and there are not
- * other devices on the fabric (which seems to be the most
- * common case), then this is not an issue and MPS discovery
- * will occur as normal.
+ /*
+ * We don't have a way to change MPS settings on devices that have
+ * drivers attached. A hot-added device might support only the minimum
+ * MPS setting (MPS=128). Therefore, if the fabric contains a bridge
+ * where devices may be hot-added, we limit the fabric MPS to 128 so
+ * hot-added devices will work correctly.
+ *
+ * However, if we hot-add a device to a slot directly below a Root
+ * Port, it's impossible for there to be other existing devices below
+ * the port. We don't limit the MPS in this case because we can
+ * reconfigure MPS on both the Root Port and the hot-added device,
+ * and there are no other devices involved.
+ *
+ * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
*/
- if (dev->is_hotplug_bridge && (!list_is_singular(&dev->bus->devices) ||
- (dev->bus->self &&
- pci_pcie_type(dev->bus->self) != PCI_EXP_TYPE_ROOT_PORT)))
+ if (dev->is_hotplug_bridge &&
+ pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
*smpss = 0;
if (*smpss > dev->pcie_mpss)
@@ -1583,6 +1587,22 @@ static void pcie_write_mrrs(struct pci_dev *dev)
"with pci=pcie_bus_safe.\n");
}
+static void pcie_bus_detect_mps(struct pci_dev *dev)
+{
+ struct pci_dev *bridge = dev->bus->self;
+ int mps, p_mps;
+
+ if (!bridge)
+ return;
+
+ mps = pcie_get_mps(dev);
+ p_mps = pcie_get_mps(bridge);
+
+ if (mps != p_mps)
+ dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
+ mps, pci_name(bridge), p_mps);
+}
+
static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
{
int mps, orig_mps;
@@ -1590,13 +1610,18 @@ static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
if (!pci_is_pcie(dev))
return 0;
+ if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
+ pcie_bus_detect_mps(dev);
+ return 0;
+ }
+
mps = 128 << *(u8 *)data;
orig_mps = pcie_get_mps(dev);
pcie_write_mps(dev, mps);
pcie_write_mrrs(dev);
- dev_info(&dev->dev, "PCI-E Max Payload Size set to %4d/%4d (was %4d), "
+ dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), "
"Max Read Rq %4d\n", pcie_get_mps(dev), 128 << dev->pcie_mpss,
orig_mps, pcie_get_readrq(dev));
@@ -1607,25 +1632,25 @@ static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
* parents then children fashion. If this changes, then this code will not
* work as designed.
*/
-void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss)
+void pcie_bus_configure_settings(struct pci_bus *bus)
{
u8 smpss;
- if (!pci_is_pcie(bus->self))
+ if (!bus->self)
return;
- if (pcie_bus_config == PCIE_BUS_TUNE_OFF)
+ if (!pci_is_pcie(bus->self))
return;
/* FIXME - Peer to peer DMA is possible, though the endpoint would need
- * to be aware to the MPS of the destination. To work around this,
+ * to be aware of the MPS of the destination. To work around this,
* simply force the MPS of the entire system to the smallest possible.
*/
if (pcie_bus_config == PCIE_BUS_PEER2PEER)
smpss = 0;
if (pcie_bus_config == PCIE_BUS_SAFE) {
- smpss = mpss;
+ smpss = bus->self->pcie_mpss;
pcie_find_smpss(bus->self, &smpss);
pci_walk_bus(bus, pcie_find_smpss, &smpss);
@@ -1979,7 +2004,6 @@ unsigned int __ref pci_rescan_bus(struct pci_bus *bus)
max = pci_scan_child_bus(bus);
pci_assign_unassigned_bus_resources(bus);
- pci_enable_bridges(bus);
pci_bus_add_devices(bus);
return max;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index e85d23044ae0..f6c31fabf3af 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3126,9 +3126,6 @@ static int reset_intel_generic_dev(struct pci_dev *dev, int probe)
static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
{
- int i;
- u16 status;
-
/*
* http://www.intel.com/content/dam/doc/datasheet/82599-10-gbe-controller-datasheet.pdf
*
@@ -3140,20 +3137,9 @@ static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
if (probe)
return 0;
- /* Wait for Transaction Pending bit clean */
- for (i = 0; i < 4; i++) {
- if (i)
- msleep((1 << (i - 1)) * 100);
-
- pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
- if (!(status & PCI_EXP_DEVSTA_TRPND))
- goto clear;
- }
-
- dev_err(&dev->dev, "transaction is not cleared; "
- "proceeding with reset anyway\n");
+ if (!pci_wait_for_pending_transaction(dev))
+ dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
-clear:
pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
msleep(100);
@@ -3208,6 +3194,83 @@ reset_complete:
return 0;
}
+/*
+ * Device-specific reset method for Chelsio T4-based adapters.
+ */
+static int reset_chelsio_generic_dev(struct pci_dev *dev, int probe)
+{
+ u16 old_command;
+ u16 msix_flags;
+
+ /*
+ * If this isn't a Chelsio T4-based device, return -ENOTTY indicating
+ * that we have no device-specific reset method.
+ */
+ if ((dev->device & 0xf000) != 0x4000)
+ return -ENOTTY;
+
+ /*
+ * If this is the "probe" phase, return 0 indicating that we can
+ * reset this device.
+ */
+ if (probe)
+ return 0;
+
+ /*
+ * T4 can wedge if there are DMAs in flight within the chip and Bus
+ * Master has been disabled. We need to have it on till the Function
+ * Level Reset completes. (BUS_MASTER is disabled in
+ * pci_reset_function()).
+ */
+ pci_read_config_word(dev, PCI_COMMAND, &old_command);
+ pci_write_config_word(dev, PCI_COMMAND,
+ old_command | PCI_COMMAND_MASTER);
+
+ /*
+ * Perform the actual device function reset, saving and restoring
+ * configuration information around the reset.
+ */
+ pci_save_state(dev);
+
+ /*
+ * T4 also suffers a Head-Of-Line blocking problem if MSI-X interrupts
+ * are disabled when an MSI-X interrupt message needs to be delivered.
+ * So we briefly re-enable MSI-X interrupts for the duration of the
+ * FLR. The pci_restore_state() below will restore the original
+ * MSI-X state.
+ */
+ pci_read_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS, &msix_flags);
+ if ((msix_flags & PCI_MSIX_FLAGS_ENABLE) == 0)
+ pci_write_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS,
+ msix_flags |
+ PCI_MSIX_FLAGS_ENABLE |
+ PCI_MSIX_FLAGS_MASKALL);
+
+ /*
+ * Start of pcie_flr() code sequence. This reset code is a copy of
+ * the guts of pcie_flr() because that's not an exported function.
+ */
+
+ if (!pci_wait_for_pending_transaction(dev))
+ dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
+
+ pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
+ msleep(100);
+
+ /*
+ * End of pcie_flr() code sequence.
+ */
+
+ /*
+ * Restore the configuration information (BAR values, etc.) including
+ * the original PCI Configuration Space Command word, and return
+ * success.
+ */
+ pci_restore_state(dev);
+ pci_write_config_word(dev, PCI_COMMAND, old_command);
+ return 0;
+}
+
#define PCI_DEVICE_ID_INTEL_82599_SFP_VF 0x10ed
#define PCI_DEVICE_ID_INTEL_IVB_M_VGA 0x0156
#define PCI_DEVICE_ID_INTEL_IVB_M2_VGA 0x0166
@@ -3221,6 +3284,8 @@ static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
reset_ivb_igd },
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
reset_intel_generic_dev },
+ { PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
+ reset_chelsio_generic_dev },
{ 0 }
};
@@ -3295,11 +3360,61 @@ struct pci_dev *pci_get_dma_source(struct pci_dev *dev)
return pci_dev_get(dev);
}
+/*
+ * AMD has indicated that the devices below do not support peer-to-peer
+ * in any system where they are found in the southbridge with an AMD
+ * IOMMU in the system. Multifunction devices that do not support
+ * peer-to-peer between functions can claim to support a subset of ACS.
+ * Such devices effectively enable request redirect (RR) and completion
+ * redirect (CR) since all transactions are redirected to the upstream
+ * root complex.
+ *
+ * http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/94086
+ * http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/94102
+ * http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/99402
+ *
+ * 1002:4385 SBx00 SMBus Controller
+ * 1002:439c SB7x0/SB8x0/SB9x0 IDE Controller
+ * 1002:4383 SBx00 Azalia (Intel HDA)
+ * 1002:439d SB7x0/SB8x0/SB9x0 LPC host controller
+ * 1002:4384 SBx00 PCI to PCI Bridge
+ * 1002:4399 SB7x0/SB8x0/SB9x0 USB OHCI2 Controller
+ */
+static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
+{
+#ifdef CONFIG_ACPI
+ struct acpi_table_header *header = NULL;
+ acpi_status status;
+
+ /* Targeting multifunction devices on the SB (appears on root bus) */
+ if (!dev->multifunction || !pci_is_root_bus(dev->bus))
+ return -ENODEV;
+
+ /* The IVRS table describes the AMD IOMMU */
+ status = acpi_get_table("IVRS", 0, &header);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ /* Filter out flags not applicable to multifunction */
+ acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT);
+
+ return acs_flags & ~(PCI_ACS_RR | PCI_ACS_CR) ? 0 : 1;
+#else
+ return -ENODEV;
+#endif
+}
+
static const struct pci_dev_acs_enabled {
u16 vendor;
u16 device;
int (*acs_enabled)(struct pci_dev *dev, u16 acs_flags);
} pci_dev_acs_enabled[] = {
+ { PCI_VENDOR_ID_ATI, 0x4385, pci_quirk_amd_sb_acs },
+ { PCI_VENDOR_ID_ATI, 0x439c, pci_quirk_amd_sb_acs },
+ { PCI_VENDOR_ID_ATI, 0x4383, pci_quirk_amd_sb_acs },
+ { PCI_VENDOR_ID_ATI, 0x439d, pci_quirk_amd_sb_acs },
+ { PCI_VENDOR_ID_ATI, 0x4384, pci_quirk_amd_sb_acs },
+ { PCI_VENDOR_ID_ATI, 0x4399, pci_quirk_amd_sb_acs },
{ 0 }
};
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 64a7de22d9af..bc26d7990cc3 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -814,14 +814,14 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
{
struct pci_dev *dev;
struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO);
- unsigned long size = 0, size0 = 0, size1 = 0;
+ resource_size_t size = 0, size0 = 0, size1 = 0;
resource_size_t children_add_size = 0;
- resource_size_t min_align, io_align, align;
+ resource_size_t min_align, align;
if (!b_res)
return;
- io_align = min_align = window_alignment(bus, IORESOURCE_IO);
+ min_align = window_alignment(bus, IORESOURCE_IO);
list_for_each_entry(dev, &bus->devices, bus_list) {
int i;
@@ -848,9 +848,6 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
}
}
- if (min_align > io_align)
- min_align = io_align;
-
size0 = calculate_iosize(size, min_size, size1,
resource_size(b_res), min_align);
if (children_add_size > add_size)
@@ -874,8 +871,9 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
add_to_list(realloc_head, bus->self, b_res, size1-size0,
min_align);
dev_printk(KERN_DEBUG, &bus->self->dev, "bridge window "
- "%pR to %pR add_size %lx\n", b_res,
- &bus->busn_res, size1-size0);
+ "%pR to %pR add_size %llx\n", b_res,
+ &bus->busn_res,
+ (unsigned long long)size1-size0);
}
}
@@ -905,6 +903,8 @@ static inline resource_size_t calculate_mem_align(resource_size_t *aligns,
* pbus_size_mem() - size the memory window of a given bus
*
* @bus : the bus
+ * @mask: mask the resource flag, then compare it with type
+ * @type: the type of free resource from bridge
* @min_size : the minimum memory window that must to be allocated
* @add_size : additional optional memory window
* @realloc_head : track the additional memory window on this list
@@ -1364,39 +1364,21 @@ static void pci_bus_dump_resources(struct pci_bus *bus)
}
}
-static int __init pci_bus_get_depth(struct pci_bus *bus)
+static int pci_bus_get_depth(struct pci_bus *bus)
{
int depth = 0;
- struct pci_dev *dev;
+ struct pci_bus *child_bus;
- list_for_each_entry(dev, &bus->devices, bus_list) {
+ list_for_each_entry(child_bus, &bus->children, node){
int ret;
- struct pci_bus *b = dev->subordinate;
- if (!b)
- continue;
- ret = pci_bus_get_depth(b);
+ ret = pci_bus_get_depth(child_bus);
if (ret + 1 > depth)
depth = ret + 1;
}
return depth;
}
-static int __init pci_get_max_depth(void)
-{
- int depth = 0;
- struct pci_bus *bus;
-
- list_for_each_entry(bus, &pci_root_buses, node) {
- int ret;
-
- ret = pci_bus_get_depth(bus);
- if (ret > depth)
- depth = ret;
- }
-
- return depth;
-}
/*
* -1: undefined, will auto detect later
@@ -1413,7 +1395,7 @@ enum enable_type {
auto_enabled,
};
-static enum enable_type pci_realloc_enable __initdata = undefined;
+static enum enable_type pci_realloc_enable = undefined;
void __init pci_realloc_get_opt(char *str)
{
if (!strncmp(str, "off", 3))
@@ -1421,45 +1403,64 @@ void __init pci_realloc_get_opt(char *str)
else if (!strncmp(str, "on", 2))
pci_realloc_enable = user_enabled;
}
-static bool __init pci_realloc_enabled(void)
+static bool pci_realloc_enabled(enum enable_type enable)
{
- return pci_realloc_enable >= user_enabled;
+ return enable >= user_enabled;
}
-static void __init pci_realloc_detect(void)
-{
#if defined(CONFIG_PCI_IOV) && defined(CONFIG_PCI_REALLOC_ENABLE_AUTO)
- struct pci_dev *dev = NULL;
-
- if (pci_realloc_enable != undefined)
- return;
-
- for_each_pci_dev(dev) {
- int i;
+static int iov_resources_unassigned(struct pci_dev *dev, void *data)
+{
+ int i;
+ bool *unassigned = data;
- for (i = PCI_IOV_RESOURCES; i <= PCI_IOV_RESOURCE_END; i++) {
- struct resource *r = &dev->resource[i];
+ for (i = PCI_IOV_RESOURCES; i <= PCI_IOV_RESOURCE_END; i++) {
+ struct resource *r = &dev->resource[i];
+ struct pci_bus_region region;
- /* Not assigned, or rejected by kernel ? */
- if (r->flags && !r->start) {
- pci_realloc_enable = auto_enabled;
+ /* Not assigned or rejected by kernel? */
+ if (!r->flags)
+ continue;
- return;
- }
+ pcibios_resource_to_bus(dev, &region, r);
+ if (!region.start) {
+ *unassigned = true;
+ return 1; /* return early from pci_walk_bus() */
}
}
-#endif
+
+ return 0;
}
+static enum enable_type pci_realloc_detect(struct pci_bus *bus,
+ enum enable_type enable_local)
+{
+ bool unassigned = false;
+
+ if (enable_local != undefined)
+ return enable_local;
+
+ pci_walk_bus(bus, iov_resources_unassigned, &unassigned);
+ if (unassigned)
+ return auto_enabled;
+
+ return enable_local;
+}
+#else
+static enum enable_type pci_realloc_detect(struct pci_bus *bus,
+ enum enable_type enable_local)
+{
+ return enable_local;
+}
+#endif
+
/*
* first try will not touch pci bridge res
* second and later try will clear small leaf bridge res
* will stop till to the max deepth if can not find good one
*/
-void __init
-pci_assign_unassigned_resources(void)
+void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
{
- struct pci_bus *bus;
LIST_HEAD(realloc_head); /* list of resources that
want additional resources */
struct list_head *add_list = NULL;
@@ -1470,15 +1471,17 @@ pci_assign_unassigned_resources(void)
unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
IORESOURCE_PREFETCH;
int pci_try_num = 1;
+ enum enable_type enable_local;
/* don't realloc if asked to do so */
- pci_realloc_detect();
- if (pci_realloc_enabled()) {
- int max_depth = pci_get_max_depth();
+ enable_local = pci_realloc_detect(bus, pci_realloc_enable);
+ if (pci_realloc_enabled(enable_local)) {
+ int max_depth = pci_bus_get_depth(bus);
pci_try_num = max_depth + 1;
- printk(KERN_DEBUG "PCI: max bus depth: %d pci_try_num: %d\n",
- max_depth, pci_try_num);
+ dev_printk(KERN_DEBUG, &bus->dev,
+ "max bus depth: %d pci_try_num: %d\n",
+ max_depth, pci_try_num);
}
again:
@@ -1490,32 +1493,30 @@ again:
add_list = &realloc_head;
/* Depth first, calculate sizes and alignments of all
subordinate buses. */
- list_for_each_entry(bus, &pci_root_buses, node)
- __pci_bus_size_bridges(bus, add_list);
+ __pci_bus_size_bridges(bus, add_list);
/* Depth last, allocate resources and update the hardware. */
- list_for_each_entry(bus, &pci_root_buses, node)
- __pci_bus_assign_resources(bus, add_list, &fail_head);
+ __pci_bus_assign_resources(bus, add_list, &fail_head);
if (add_list)
BUG_ON(!list_empty(add_list));
tried_times++;
/* any device complain? */
if (list_empty(&fail_head))
- goto enable_and_dump;
+ goto dump;
if (tried_times >= pci_try_num) {
- if (pci_realloc_enable == undefined)
- printk(KERN_INFO "Some PCI device resources are unassigned, try booting with pci=realloc\n");
- else if (pci_realloc_enable == auto_enabled)
- printk(KERN_INFO "Automatically enabled pci realloc, if you have problem, try booting with pci=realloc=off\n");
+ if (enable_local == undefined)
+ dev_info(&bus->dev, "Some PCI device resources are unassigned, try booting with pci=realloc\n");
+ else if (enable_local == auto_enabled)
+ dev_info(&bus->dev, "Automatically enabled pci realloc, if you have problem, try booting with pci=realloc=off\n");
free_list(&fail_head);
- goto enable_and_dump;
+ goto dump;
}
- printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n",
- tried_times + 1);
+ dev_printk(KERN_DEBUG, &bus->dev,
+ "No. %d try to assign unassigned res\n", tried_times + 1);
/* third times and later will not check if it is leaf */
if ((tried_times + 1) > 2)
@@ -1525,12 +1526,11 @@ again:
* Try to release leaf bridge's resources that doesn't fit resource of
* child device under that bridge
*/
- list_for_each_entry(fail_res, &fail_head, list) {
- bus = fail_res->dev->bus;
- pci_bus_release_bridge_resources(bus,
+ list_for_each_entry(fail_res, &fail_head, list)
+ pci_bus_release_bridge_resources(fail_res->dev->bus,
fail_res->flags & type_mask,
rel_type);
- }
+
/* restore size and flags */
list_for_each_entry(fail_res, &fail_head, list) {
struct resource *res = fail_res->res;
@@ -1545,14 +1545,17 @@ again:
goto again;
-enable_and_dump:
- /* Depth last, update the hardware. */
- list_for_each_entry(bus, &pci_root_buses, node)
- pci_enable_bridges(bus);
-
+dump:
/* dump the resource on buses */
- list_for_each_entry(bus, &pci_root_buses, node)
- pci_bus_dump_resources(bus);
+ pci_bus_dump_resources(bus);
+}
+
+void __init pci_assign_unassigned_resources(void)
+{
+ struct pci_bus *root_bus;
+
+ list_for_each_entry(root_bus, &pci_root_buses, node)
+ pci_assign_unassigned_root_bus_resources(root_bus);
}
void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
@@ -1589,13 +1592,11 @@ again:
* Try to release leaf bridge's resources that doesn't fit resource of
* child device under that bridge
*/
- list_for_each_entry(fail_res, &fail_head, list) {
- struct pci_bus *bus = fail_res->dev->bus;
- unsigned long flags = fail_res->flags;
-
- pci_bus_release_bridge_resources(bus, flags & type_mask,
+ list_for_each_entry(fail_res, &fail_head, list)
+ pci_bus_release_bridge_resources(fail_res->dev->bus,
+ fail_res->flags & type_mask,
whole_subtree);
- }
+
/* restore size and flags */
list_for_each_entry(fail_res, &fail_head, list) {
struct resource *res = fail_res->res;
@@ -1615,7 +1616,6 @@ enable_all:
if (retval)
dev_err(&bridge->dev, "Error reenabling bridge (%d)\n", retval);
pci_set_master(bridge);
- pci_enable_bridges(parent);
}
EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources);
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index 9d3ac998fc1f..b2a98cdbd0d2 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -91,7 +91,6 @@ int __ref cb_alloc(struct pcmcia_socket *s)
if (s->tune_bridge)
s->tune_bridge(s, bus);
- pci_enable_bridges(bus);
pci_bus_add_devices(bus);
return 0;