summaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/Makefile17
-rw-r--r--arch/powerpc/kernel/asm-offsets.c3
-rw-r--r--arch/powerpc/kernel/btext.c2
-rw-r--r--arch/powerpc/kernel/cpu_setup_ppc970.S16
-rw-r--r--arch/powerpc/kernel/cputable.c143
-rw-r--r--arch/powerpc/kernel/crash.c63
-rw-r--r--arch/powerpc/kernel/dma_64.c249
-rw-r--r--arch/powerpc/kernel/entry_64.S51
-rw-r--r--arch/powerpc/kernel/head_32.S7
-rw-r--r--arch/powerpc/kernel/head_64.S207
-rw-r--r--arch/powerpc/kernel/ibmebus.c9
-rw-r--r--arch/powerpc/kernel/idle.c7
-rw-r--r--arch/powerpc/kernel/idle_power4.S8
-rw-r--r--arch/powerpc/kernel/io.c105
-rw-r--r--arch/powerpc/kernel/iomap.c2
-rw-r--r--arch/powerpc/kernel/iommu.c83
-rw-r--r--arch/powerpc/kernel/irq.c80
-rw-r--r--arch/powerpc/kernel/kprobes.c2
-rw-r--r--arch/powerpc/kernel/misc_32.S74
-rw-r--r--arch/powerpc/kernel/misc_64.S124
-rw-r--r--arch/powerpc/kernel/module_32.c50
-rw-r--r--arch/powerpc/kernel/module_64.c60
-rw-r--r--arch/powerpc/kernel/of_device.c177
-rw-r--r--arch/powerpc/kernel/of_platform.c489
-rw-r--r--arch/powerpc/kernel/pci_32.c247
-rw-r--r--arch/powerpc/kernel/pci_64.c122
-rw-r--r--arch/powerpc/kernel/pci_direct_iommu.c98
-rw-r--r--arch/powerpc/kernel/pci_iommu.c164
-rw-r--r--arch/powerpc/kernel/perfmon_fsl_booke.c221
-rw-r--r--arch/powerpc/kernel/pmc.c2
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c6
-rw-r--r--arch/powerpc/kernel/proc_ppc64.c6
-rw-r--r--arch/powerpc/kernel/process.c10
-rw-r--r--arch/powerpc/kernel/prom.c174
-rw-r--r--arch/powerpc/kernel/prom_init.c20
-rw-r--r--arch/powerpc/kernel/prom_parse.c290
-rw-r--r--arch/powerpc/kernel/rtas.c34
-rw-r--r--arch/powerpc/kernel/rtas_flash.c67
-rw-r--r--arch/powerpc/kernel/rtas_pci.c35
-rw-r--r--arch/powerpc/kernel/setup_32.c12
-rw-r--r--arch/powerpc/kernel/setup_64.c27
-rw-r--r--arch/powerpc/kernel/signal_32.c2
-rw-r--r--arch/powerpc/kernel/smp-tbsync.c5
-rw-r--r--arch/powerpc/kernel/smp.c1
-rw-r--r--arch/powerpc/kernel/sys_ppc32.c1
-rw-r--r--arch/powerpc/kernel/sysfs.c100
-rw-r--r--arch/powerpc/kernel/time.c112
-rw-r--r--arch/powerpc/kernel/traps.c85
-rw-r--r--arch/powerpc/kernel/vdso.c45
-rw-r--r--arch/powerpc/kernel/vdso32/vdso32.lds.S12
-rw-r--r--arch/powerpc/kernel/vdso64/gettimeofday.S6
-rw-r--r--arch/powerpc/kernel/vdso64/vdso64.lds.S10
-rw-r--r--arch/powerpc/kernel/vio.c98
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S15
54 files changed, 2119 insertions, 1936 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 8b133afbdc2..d2ded19e406 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -17,11 +17,11 @@ obj-y += vdso32/
obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \
signal_64.o ptrace32.o \
paca.o cpu_setup_ppc970.o \
- firmware.o sysfs.o
+ firmware.o sysfs.o nvram_64.o
obj-$(CONFIG_PPC64) += vdso64/
obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
-obj-$(CONFIG_PPC_OF) += of_device.o prom_parse.o
+obj-$(CONFIG_PPC_OF) += of_device.o of_platform.o prom_parse.o
procfs-$(CONFIG_PPC64) := proc_ppc64.o
obj-$(CONFIG_PROC_FS) += $(procfs-y)
rtaspci-$(CONFIG_PPC64) := rtas_pci.o
@@ -32,13 +32,11 @@ obj-$(CONFIG_LPARCFG) += lparcfg.o
obj-$(CONFIG_IBMVIO) += vio.o
obj-$(CONFIG_IBMEBUS) += ibmebus.o
obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o
-obj64-$(CONFIG_PPC_MULTIPLATFORM) += nvram_64.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o
obj-$(CONFIG_TAU) += tau_6xx.o
obj32-$(CONFIG_SOFTWARE_SUSPEND) += swsusp_32.o
obj32-$(CONFIG_MODULES) += module_32.o
-obj-$(CONFIG_E500) += perfmon_fsl_booke.o
ifeq ($(CONFIG_PPC_MERGE),y)
@@ -60,11 +58,11 @@ obj-$(CONFIG_BOOTX_TEXT) += btext.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o
+
module-$(CONFIG_PPC64) += module_64.o
obj-$(CONFIG_MODULES) += $(module-y)
-pci64-$(CONFIG_PPC64) += pci_64.o pci_dn.o pci_iommu.o \
- pci_direct_iommu.o iomap.o
+pci64-$(CONFIG_PPC64) += pci_64.o pci_dn.o
pci32-$(CONFIG_PPC32) := pci_32.o
obj-$(CONFIG_PCI) += $(pci64-y) $(pci32-y)
kexec-$(CONFIG_PPC64) := machine_kexec_64.o
@@ -73,8 +71,13 @@ obj-$(CONFIG_KEXEC) += machine_kexec.o crash.o $(kexec-y)
obj-$(CONFIG_AUDIT) += audit.o
obj64-$(CONFIG_AUDIT) += compat_audit.o
+ifneq ($(CONFIG_PPC_INDIRECT_IO),y)
+obj-y += iomap.o
+endif
+
ifeq ($(CONFIG_PPC_ISERIES),y)
-$(obj)/head_64.o: $(obj)/lparmap.s
+extra-y += lparmap.s
+$(obj)/head_64.o: $(obj)/lparmap.s
AFLAGS_head_64.o += -I$(obj)
endif
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index d06f378597b..e96521530d2 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -118,7 +118,8 @@ int main(void)
DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr));
DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1));
DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc));
- DEFINE(PACAPROCENABLED, offsetof(struct paca_struct, proc_enabled));
+ DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
+ DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
index 995fcef156f..93f21aaf7c8 100644
--- a/arch/powerpc/kernel/btext.c
+++ b/arch/powerpc/kernel/btext.c
@@ -182,7 +182,7 @@ int btext_initialize(struct device_node *np)
prop = get_property(np, "linux,bootx-linebytes", NULL);
if (prop == NULL)
prop = get_property(np, "linebytes", NULL);
- if (prop)
+ if (prop && *prop != 0xffffffffu)
pitch = *prop;
if (pitch == 1)
pitch = 0x1000;
diff --git a/arch/powerpc/kernel/cpu_setup_ppc970.S b/arch/powerpc/kernel/cpu_setup_ppc970.S
index 652594891d5..bf118c38575 100644
--- a/arch/powerpc/kernel/cpu_setup_ppc970.S
+++ b/arch/powerpc/kernel/cpu_setup_ppc970.S
@@ -83,6 +83,22 @@ _GLOBAL(__setup_cpu_ppc970)
rldimi r0,r11,52,8 /* set NAP and DPM */
li r11,0
rldimi r0,r11,32,31 /* clear EN_ATTN */
+ b load_hids /* Jump to shared code */
+
+
+_GLOBAL(__setup_cpu_ppc970MP)
+ /* Do nothing if not running in HV mode */
+ mfmsr r0
+ rldicl. r0,r0,4,63
+ beqlr
+
+ mfspr r0,SPRN_HID0
+ li r11,0x15 /* clear DOZE and SLEEP */
+ rldimi r0,r11,52,6 /* set DEEPNAP, NAP and DPM */
+ li r11,0
+ rldimi r0,r11,32,31 /* clear EN_ATTN */
+
+load_hids:
mtspr SPRN_HID0,r0
mfspr r0,SPRN_HID0
mfspr r0,SPRN_HID0
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 47a613cdd77..b742013bb9d 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -18,6 +18,7 @@
#include <asm/oprofile_impl.h>
#include <asm/cputable.h>
+#include <asm/prom.h> /* for PTRRELOC on ARCH=ppc */
struct cpu_spec* cur_cpu_spec = NULL;
EXPORT_SYMBOL(cur_cpu_spec);
@@ -41,6 +42,7 @@ extern void __setup_cpu_745x(unsigned long offset, struct cpu_spec* spec);
#endif /* CONFIG_PPC32 */
#ifdef CONFIG_PPC64
extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec);
+extern void __setup_cpu_ppc970MP(unsigned long offset, struct cpu_spec* spec);
extern void __restore_cpu_ppc970(void);
#endif /* CONFIG_PPC64 */
@@ -73,7 +75,7 @@ extern void __restore_cpu_ppc970(void);
#define PPC_FEATURE_SPE_COMP 0
#endif
-struct cpu_spec cpu_specs[] = {
+static struct cpu_spec cpu_specs[] = {
#ifdef CONFIG_PPC64
{ /* Power3 */
.pvr_mask = 0xffff0000,
@@ -221,8 +223,23 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
- .cpu_setup = __setup_cpu_ppc970,
+ .cpu_setup = __setup_cpu_ppc970MP,
.cpu_restore = __restore_cpu_ppc970,
+ .oprofile_cpu_type = "ppc64/970MP",
+ .oprofile_type = PPC_OPROFILE_POWER4,
+ .platform = "ppc970",
+ },
+ { /* PPC970GX */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x00450000,
+ .cpu_name = "PPC970GX",
+ .cpu_features = CPU_FTRS_PPC970,
+ .cpu_user_features = COMMON_USER_POWER4 |
+ PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 8,
+ .cpu_setup = __setup_cpu_ppc970,
.oprofile_cpu_type = "ppc64/970",
.oprofile_type = PPC_OPROFILE_POWER4,
.platform = "ppc970",
@@ -260,15 +277,50 @@ struct cpu_spec cpu_specs[] = {
.oprofile_mmcra_sipr = MMCRA_SIPR,
.platform = "power5+",
},
+ { /* POWER6 in P5+ mode; 2.04-compliant processor */
+ .pvr_mask = 0xffffffff,
+ .pvr_value = 0x0f000001,
+ .cpu_name = "POWER5+",
+ .cpu_features = CPU_FTRS_POWER5,
+ .cpu_user_features = COMMON_USER_POWER5_PLUS,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 6,
+ .oprofile_cpu_type = "ppc64/power6",
+ .oprofile_type = PPC_OPROFILE_POWER4,
+ .oprofile_mmcra_sihv = POWER6_MMCRA_SIHV,
+ .oprofile_mmcra_sipr = POWER6_MMCRA_SIPR,
+ .oprofile_mmcra_clear = POWER6_MMCRA_THRM |
+ POWER6_MMCRA_OTHER,
+ .platform = "power5+",
+ },
{ /* Power6 */
.pvr_mask = 0xffff0000,
.pvr_value = 0x003e0000,
- .cpu_name = "POWER6",
+ .cpu_name = "POWER6 (raw)",
+ .cpu_features = CPU_FTRS_POWER6,
+ .cpu_user_features = COMMON_USER_POWER6 |
+ PPC_FEATURE_POWER6_EXT,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 6,
+ .oprofile_cpu_type = "ppc64/power6",
+ .oprofile_type = PPC_OPROFILE_POWER4,
+ .oprofile_mmcra_sihv = POWER6_MMCRA_SIHV,
+ .oprofile_mmcra_sipr = POWER6_MMCRA_SIPR,
+ .oprofile_mmcra_clear = POWER6_MMCRA_THRM |
+ POWER6_MMCRA_OTHER,
+ .platform = "power6x",
+ },
+ { /* 2.05-compliant processor, i.e. Power6 "architected" mode */
+ .pvr_mask = 0xffffffff,
+ .pvr_value = 0x0f000002,
+ .cpu_name = "POWER6 (architected)",
.cpu_features = CPU_FTRS_POWER6,
.cpu_user_features = COMMON_USER_POWER6,
.icache_bsize = 128,
.dcache_bsize = 128,
- .num_pmcs = 8,
+ .num_pmcs = 6,
.oprofile_cpu_type = "ppc64/power6",
.oprofile_type = PPC_OPROFILE_POWER4,
.oprofile_mmcra_sihv = POWER6_MMCRA_SIHV,
@@ -287,6 +339,9 @@ struct cpu_spec cpu_specs[] = {
PPC_FEATURE_SMT,
.icache_bsize = 128,
.dcache_bsize = 128,
+ .num_pmcs = 4,
+ .oprofile_cpu_type = "ppc64/cell-be",
+ .oprofile_type = PPC_OPROFILE_CELL,
.platform = "ppc-cell-be",
},
{ /* PA Semi PA6T */
@@ -778,13 +833,24 @@ struct cpu_spec cpu_specs[] = {
.pvr_mask = 0x7fff0000,
.pvr_value = 0x00840000,
.cpu_name = "e300c2",
- .cpu_features = CPU_FTRS_E300,
+ .cpu_features = CPU_FTRS_E300C2,
.cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
.platform = "ppc603",
},
+ { /* e300c3 on 83xx */
+ .pvr_mask = 0x7fff0000,
+ .pvr_value = 0x00850000,
+ .cpu_name = "e300c3",
+ .cpu_features = CPU_FTRS_E300,
+ .cpu_user_features = COMMON_USER,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .cpu_setup = __setup_cpu_603,
+ .platform = "ppc603",
+ },
{ /* default match, we assume split I/D cache & TB (non-601)... */
.pvr_mask = 0x00000000,
.pvr_value = 0x00000000,
@@ -1070,8 +1136,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_mask = 0xff000fff,
.pvr_value = 0x53000890,
.cpu_name = "440SPe Rev. A",
- .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
- CPU_FTR_USE_TB,
+ .cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE,
.icache_bsize = 32,
.dcache_bsize = 32,
@@ -1152,3 +1217,67 @@ struct cpu_spec cpu_specs[] = {
#endif /* !CLASSIC_PPC */
#endif /* CONFIG_PPC32 */
};
+
+struct cpu_spec *identify_cpu(unsigned long offset, unsigned int pvr)
+{
+ struct cpu_spec *s = cpu_specs;
+ struct cpu_spec **cur = &cur_cpu_spec;
+ int i;
+
+ s = PTRRELOC(s);
+ cur = PTRRELOC(cur);
+
+ for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++)
+ if ((pvr & s->pvr_mask) == s->pvr_value) {
+ *cur = cpu_specs + i;
+#ifdef CONFIG_PPC64
+ /* ppc64 expects identify_cpu to also call setup_cpu
+ * for that processor. I will consolidate that at a
+ * later time, for now, just use our friend #ifdef.
+ * we also don't need to PTRRELOC the function pointer
+ * on ppc64 as we are running at 0 in real mode.
+ */
+ if (s->cpu_setup) {
+ s->cpu_setup(offset, s);
+ }
+#endif /* CONFIG_PPC64 */
+ return s;
+ }
+ BUG();
+ return NULL;
+}
+
+void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
+{
+ struct fixup_entry {
+ unsigned long mask;
+ unsigned long value;
+ long start_off;
+ long end_off;
+ } *fcur, *fend;
+
+ fcur = fixup_start;
+ fend = fixup_end;
+
+ for (; fcur < fend; fcur++) {
+ unsigned int *pstart, *pend, *p;
+
+ if ((value & fcur->mask) == fcur->value)
+ continue;
+
+ /* These PTRRELOCs will disappear once the new scheme for
+ * modules and vdso is implemented
+ */
+ pstart = ((unsigned int *)fcur) + (fcur->start_off / 4);
+ pend = ((unsigned int *)fcur) + (fcur->end_off / 4);
+
+ for (p = pstart; p < pend; p++) {
+ *p = 0x60000000u;
+ asm volatile ("dcbst 0, %0" : : "r" (p));
+ }
+ asm volatile ("sync" : : : "memory");
+ for (p = pstart; p < pend; p++)
+ asm volatile ("icbi 0,%0" : : "r" (p));
+ asm volatile ("sync; isync" : : : "memory");
+ }
+}
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 1af41f7616d..d3f2080d2ee 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -46,61 +46,6 @@ int crashing_cpu = -1;
static cpumask_t cpus_in_crash = CPU_MASK_NONE;
cpumask_t cpus_in_sr = CPU_MASK_NONE;
-static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
- size_t data_len)
-{
- struct elf_note note;
-
- note.n_namesz = strlen(name) + 1;
- note.n_descsz = data_len;
- note.n_type = type;
- memcpy(buf, &note, sizeof(note));
- buf += (sizeof(note) +3)/4;
- memcpy(buf, name, note.n_namesz);
- buf += (note.n_namesz + 3)/4;
- memcpy(buf, data, note.n_descsz);
- buf += (note.n_descsz + 3)/4;
-
- return buf;
-}
-
-static void final_note(u32 *buf)
-{
- struct elf_note note;
-
- note.n_namesz = 0;
- note.n_descsz = 0;
- note.n_type = 0;
- memcpy(buf, &note, sizeof(note));
-}
-
-static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
-{
- struct elf_prstatus prstatus;
- u32 *buf;
-
- if ((cpu < 0) || (cpu >= NR_CPUS))
- return;
-
- /* Using ELF notes here is opportunistic.
- * I need a well defined structure format
- * for the data I pass, and I need tags
- * on the data to indicate what information I have
- * squirrelled away. ELF notes happen to provide
- * all of that that no need to invent something new.
- */
- buf = (u32*)per_cpu_ptr(crash_notes, cpu);
- if (!buf)
- return;
-
- memset(&prstatus, 0, sizeof(prstatus));
- prstatus.pr_pid = current->pid;
- elf_core_copy_regs(&prstatus.pr_reg, regs);
- buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus,
- sizeof(prstatus));
- final_note(buf);
-}
-
#ifdef CONFIG_SMP
static atomic_t enter_on_soft_reset = ATOMIC_INIT(0);
@@ -111,9 +56,9 @@ void crash_ipi_callback(struct pt_regs *regs)
if (!cpu_online(cpu))
return;
- local_irq_disable();
+ hard_irq_disable();
if (!cpu_isset(cpu, cpus_in_crash))
- crash_save_this_cpu(regs, cpu);
+ crash_save_cpu(regs, cpu);
cpu_set(cpu, cpus_in_crash);
/*
@@ -289,7 +234,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
* an SMP system.
* The kernel is broken so disable interrupts.
*/
- local_irq_disable();
+ hard_irq_disable();
for_each_irq(irq) {
struct irq_desc *desc = irq_desc + irq;
@@ -306,7 +251,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
* such that another IPI will not be sent.
*/
crashing_cpu = smp_processor_id();
- crash_save_this_cpu(regs, crashing_cpu);
+ crash_save_cpu(regs, crashing_cpu);
crash_kexec_prepare_cpus(crashing_cpu);
cpu_set(crashing_cpu, cpus_in_crash);
if (ppc_md.kexec_cpu_down)
diff --git a/arch/powerpc/kernel/dma_64.c b/arch/powerpc/kernel/dma_64.c
index 6c168f6ea14..7b0e754383c 100644
--- a/arch/powerpc/kernel/dma_64.c
+++ b/arch/powerpc/kernel/dma_64.c
@@ -1,151 +1,194 @@
/*
- * Copyright (C) 2004 IBM Corporation
+ * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
*
- * Implements the generic device dma API for ppc64. Handles
- * the pci and vio busses
+ * Provide default implementations of the DMA mapping callbacks for
+ * directly mapped busses and busses using the iommu infrastructure
*/
#include <linux/device.h>
#include <linux/dma-mapping.h>
-/* Include the busses we support */
-#include <linux/pci.h>
-#include <asm/vio.h>
-#include <asm/ibmebus.h>
-#include <asm/scatterlist.h>
#include <asm/bug.h>
+#include <asm/iommu.h>
+#include <asm/abs_addr.h>
-static struct dma_mapping_ops *get_dma_ops(struct device *dev)
-{
-#ifdef CONFIG_PCI
- if (dev->bus == &pci_bus_type)
- return &pci_dma_ops;
-#endif
-#ifdef CONFIG_IBMVIO
- if (dev->bus == &vio_bus_type)
- return &vio_dma_ops;
-#endif
-#ifdef CONFIG_IBMEBUS
- if (dev->bus == &ibmebus_bus_type)
- return &ibmebus_dma_ops;
-#endif
- return NULL;
-}
+/*
+ * Generic iommu implementation
+ */
-int dma_supported(struct device *dev, u64 mask)
+static inline unsigned long device_to_mask(struct device *dev)
{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+ if (dev->dma_mask && *dev->dma_mask)
+ return *dev->dma_mask;
+ /* Assume devices without mask can take 32 bit addresses */
+ return 0xfffffffful;
+}
- BUG_ON(!dma_ops);
- return dma_ops->dma_supported(dev, mask);
+/* Allocates a contiguous real buffer and creates mappings over it.
+ * Returns the virtual address of the buffer and sets dma_handle
+ * to the dma address (mapping) of the first page.
+ */
+static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ return iommu_alloc_coherent(dev->archdata.dma_data, size, dma_handle,
+ device_to_mask(dev), flag,
+ dev->archdata.numa_node);
}
-EXPORT_SYMBOL(dma_supported);
-int dma_set_mask(struct device *dev, u64 dma_mask)
+static void dma_iommu_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
{
-#ifdef CONFIG_PCI
- if (dev->bus == &pci_bus_type)
- return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
-#endif
-#ifdef CONFIG_IBMVIO
- if (dev->bus == &vio_bus_type)
- return -EIO;
-#endif /* CONFIG_IBMVIO */
-#ifdef CONFIG_IBMEBUS
- if (dev->bus == &ibmebus_bus_type)
- return -EIO;
-#endif
- BUG();
- return 0;
+ iommu_free_coherent(dev->archdata.dma_data, size, vaddr, dma_handle);
}
-EXPORT_SYMBOL(dma_set_mask);
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+/* Creates TCEs for a user provided buffer. The user buffer must be
+ * contiguous real kernel storage (not vmalloc). The address of the buffer
+ * passed here is the kernel (virtual) address of the buffer. The buffer
+ * need not be page aligned, the dma_addr_t returned will point to the same
+ * byte within the page as vaddr.
+ */
+static dma_addr_t dma_iommu_map_single(struct device *dev, void *vaddr,
+ size_t size,
+ enum dma_data_direction direction)
{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
- BUG_ON(!dma_ops);
-
- return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
+ return iommu_map_single(dev->archdata.dma_data, vaddr, size,
+ device_to_mask(dev), direction);
}
-EXPORT_SYMBOL(dma_alloc_coherent);
-void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t dma_handle)
+
+static void dma_iommu_unmap_single(struct device *dev, dma_addr_t dma_handle,
+ size_t size,
+ enum dma_data_direction direction)
{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+ iommu_unmap_single(dev->archdata.dma_data, dma_handle, size, direction);
+}
- BUG_ON(!dma_ops);
- dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
+static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
+ int nelems, enum dma_data_direction direction)
+{
+ return iommu_map_sg(dev->archdata.dma_data, sglist, nelems,
+ device_to_mask(dev), direction);
}
-EXPORT_SYMBOL(dma_free_coherent);
-dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size,
- enum dma_data_direction direction)
+static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
+ int nelems, enum dma_data_direction direction)
{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
- BUG_ON(!dma_ops);
-
- return dma_ops->map_single(dev, cpu_addr, size, direction);
+ iommu_unmap_sg(dev->archdata.dma_data, sglist, nelems, direction);
}
-EXPORT_SYMBOL(dma_map_single);
-void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction)
+/* We support DMA to/from any memory page via the iommu */
+static int dma_iommu_dma_supported(struct device *dev, u64 mask)
{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
- BUG_ON(!dma_ops);
-
- dma_ops->unmap_single(dev, dma_addr, size, direction);
+ struct iommu_table *tbl = dev->archdata.dma_data;
+
+ if (!tbl || tbl->it_offset > mask) {
+ printk(KERN_INFO
+ "Warning: IOMMU offset too big for device mask\n");
+ if (tbl)
+ printk(KERN_INFO
+ "mask: 0x%08lx, table offset: 0x%08lx\n",
+ mask, tbl->it_offset);
+ else
+ printk(KERN_INFO "mask: 0x%08lx, table unavailable\n",
+ mask);
+ return 0;
+ } else
+ return 1;
}
-EXPORT_SYMBOL(dma_unmap_single);
-dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+struct dma_mapping_ops dma_iommu_ops = {
+ .alloc_coherent = dma_iommu_alloc_coherent,
+ .free_coherent = dma_iommu_free_coherent,
+ .map_single = dma_iommu_map_single,
+ .unmap_single = dma_iommu_unmap_single,
+ .map_sg = dma_iommu_map_sg,
+ .unmap_sg = dma_iommu_unmap_sg,
+ .dma_supported = dma_iommu_dma_supported,
+};
+EXPORT_SYMBOL(dma_iommu_ops);
- BUG_ON(!dma_ops);
+/*
+ * Generic direct DMA implementation
+ *
+ * This implementation supports a global offset that can be applied if
+ * the address at which memory is visible to devices is not 0.
+ */
+unsigned long dma_direct_offset;
- return dma_ops->map_single(dev, page_address(page) + offset, size,
- direction);
+static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ struct page *page;
+ void *ret;
+ int node = dev->archdata.numa_node;
+
+ /* TODO: Maybe use the numa node here too ? */
+ page = alloc_pages_node(node, flag, get_order(size));
+ if (page == NULL)
+ return NULL;
+ ret = page_address(page);
+ memset(ret, 0, size);
+ *dma_handle = virt_to_abs(ret) | dma_direct_offset;
+
+ return ret;
}
-EXPORT_SYMBOL(dma_map_page);
-void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
- enum dma_data_direction direction)
+static void dma_direct_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+ free_pages((unsigned long)vaddr, get_order(size));
+}
- BUG_ON(!dma_ops);
+static dma_addr_t dma_direct_map_single(struct device *dev, void *ptr,
+ size_t size,
+ enum dma_data_direction direction)
+{
+ return virt_to_abs(ptr) | dma_direct_offset;
+}
- dma_ops->unmap_single(dev, dma_address, size, direction);
+static void dma_direct_unmap_single(struct device *dev, dma_addr_t dma_addr,
+ size_t size,
+ enum dma_data_direction direction)
+{
}
-EXPORT_SYMBOL(dma_unmap_page);
-int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction direction)
+static int dma_direct_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction)
{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+ int i;
- BUG_ON(!dma_ops);
+ for (i = 0; i < nents; i++, sg++) {
+ sg->dma_address = (page_to_phys(sg->page) + sg->offset) |
+ dma_direct_offset;
+ sg->dma_length = sg->length;
+ }
- return dma_ops->map_sg(dev, sg, nents, direction);
+ return nents;
}
-EXPORT_SYMBOL(dma_map_sg);
-void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
- enum dma_data_direction direction)
+static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction)
{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
- BUG_ON(!dma_ops);
+}
- dma_ops->unmap_sg(dev, sg, nhwentries, direction);
+static int dma_direct_dma_supported(struct device *dev, u64 mask)
+{
+ /* Could be improved to check for memory though it better be
+ * done via some global so platforms can set the limit in case
+ * they have limited DMA windows
+ */
+ return mask >= DMA_32BIT_MASK;
}
-EXPORT_SYMBOL(dma_unmap_sg);
+
+struct dma_mapping_ops dma_direct_ops = {
+ .alloc_coherent = dma_direct_alloc_coherent,
+ .free_coherent = dma_direct_free_coherent,
+ .map_single = dma_direct_map_single,
+ .unmap_single = dma_direct_unmap_single,
+ .map_sg = dma_direct_map_sg,
+ .unmap_sg = dma_direct_unmap_sg,
+ .dma_supported = dma_direct_dma_supported,
+};
+EXPORT_SYMBOL(dma_direct_ops);
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 748e74fcf54..1a3d4de197d 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -87,15 +87,19 @@ system_call_common:
addi r9,r1,STACK_FRAME_OVERHEAD
ld r11,exception_marker@toc(r2)
std r11,-16(r9) /* "regshere" marker */
+ li r10,1
+ stb r10,PACASOFTIRQEN(r13)
+ stb r10,PACAHARDIRQEN(r13)
+ std r10,SOFTE(r1)
#ifdef CONFIG_PPC_ISERIES
BEGIN_FW_FTR_SECTION
/* Hack for handling interrupts when soft-enabling on iSeries */
cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
andi. r10,r12,MSR_PR /* from kernel */
crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
- beq hardware_interrupt_entry
- lbz r10,PACAPROCENABLED(r13)
- std r10,SOFTE(r1)
+ bne 2f
+ b hardware_interrupt_entry
+2:
END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
#endif
mfmsr r11
@@ -460,9 +464,9 @@ _GLOBAL(ret_from_except_lite)
#endif
restore:
+ ld r5,SOFTE(r1)
#ifdef CONFIG_PPC_ISERIES
BEGIN_FW_FTR_SECTION
- ld r5,SOFTE(r1)
cmpdi 0,r5,0
beq 4f
/* Check for pending interrupts (iSeries) */
@@ -472,21 +476,25 @@ BEGIN_FW_FTR_SECTION
beq+ 4f /* skip do_IRQ if no interrupts */
li r3,0
- stb r3,PACAPROCENABLED(r13) /* ensure we are soft-disabled */
+ stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */
ori r10,r10,MSR_EE
mtmsrd r10 /* hard-enable again */
addi r3,r1,STACK_FRAME_OVERHEAD
bl .do_IRQ
b .ret_from_except_lite /* loop back and handle more */
-
-4: stb r5,PACAPROCENABLED(r13)
+4:
END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
#endif
+ stb r5,PACASOFTIRQEN(r13)
ld r3,_MSR(r1)
andi. r0,r3,MSR_RI
beq- unrecov_restore
+ /* extract EE bit and use it to restore paca->hard_enabled */
+ rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
+ stb r4,PACAHARDIRQEN(r13)
+
andi. r0,r3,MSR_PR
/*
@@ -538,25 +546,15 @@ do_work:
/* Check that preempt_count() == 0 and interrupts are enabled */
lwz r8,TI_PREEMPT(r9)
cmpwi cr1,r8,0
-#ifdef CONFIG_PPC_ISERIES
-BEGIN_FW_FTR_SECTION
ld r0,SOFTE(r1)
cmpdi r0,0
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#endif
-BEGIN_FW_FTR_SECTION
- andi. r0,r3,MSR_EE
-END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
crandc eq,cr1*4+eq,eq
bne restore
/* here we are preempting the current task */
1:
-#ifdef CONFIG_PPC_ISERIES
-BEGIN_FW_FTR_SECTION
li r0,1
- stb r0,PACAPROCENABLED(r13)
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#endif
+ stb r0,PACASOFTIRQEN(r13)
+ stb r0,PACAHARDIRQEN(r13)
ori r10,r10,MSR_EE
mtmsrd r10,1 /* reenable interrupts */
bl .preempt_schedule
@@ -639,8 +637,7 @@ _GLOBAL(enter_rtas)
/* There is no way it is acceptable to get here with interrupts enabled,
* check it with the asm equivalent of WARN_ON
*/
- mfmsr r6
- andi. r0,r6,MSR_EE
+ lbz r0,PACASOFTIRQEN(r13)
1: tdnei r0,0
.section __bug_table,"a"
.llong 1b,__LINE__ + 0x1000000, 1f, 2f
@@ -649,7 +646,13 @@ _GLOBAL(enter_rtas)
1: .asciz __FILE__
2: .asciz "enter_rtas"
.previous
-
+
+ /* Hard-disable interrupts */
+ mfmsr r6
+ rldicl r7,r6,48,1
+ rotldi r7,r7,16
+ mtmsrd r7,1
+
/* Unfortunately, the stack pointer and the MSR are also clobbered,
* so they are saved in the PACA which allows us to restore
* our original state after RTAS returns.
@@ -735,8 +738,6 @@ _STATIC(rtas_restore_regs)
#endif /* CONFIG_PPC_RTAS */
-#ifdef CONFIG_PPC_MULTIPLATFORM
-
_GLOBAL(enter_prom)
mflr r0
std r0,16(r1)
@@ -821,5 +822,3 @@ _GLOBAL(enter_prom)
ld r0,16(r1)
mtlr r0
blr
-
-#endif /* CONFIG_PPC_MULTIPLATFORM */
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index d88e182e40b..9417cf5b4b7 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -437,6 +437,13 @@ Alignment:
/* Floating-point unavailable */
. = 0x800
FPUnavailable:
+BEGIN_FTR_SECTION
+/*
+ * Certain Freescale cores don't have a FPU and treat fp instructions
+ * as a FP Unavailable exception. Redirect to illegal/emulation handling.
+ */
+ b ProgramCheck
+END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
EXCEPTION_PROLOG
bne load_up_fpu /* if from user, just load it up */
addi r3,r1,STACK_FRAME_OVERHEAD
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 645c7f10fb2..71b1fe58e9e 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -35,9 +35,7 @@
#include <asm/thread_info.h>
#include <asm/firmware.h>
-#ifdef CONFIG_PPC_ISERIES
#define DO_SOFT_DISABLE
-#endif
/*
* We layout physical memory as follows:
@@ -74,13 +72,11 @@
.text
.globl _stext
_stext:
-#ifdef CONFIG_PPC_MULTIPLATFORM
_GLOBAL(__start)
/* NOP this out unconditionally */
BEGIN_FTR_SECTION
b .__start_initialization_multiplatform
END_FTR_SECTION(0, 1)
-#endif /* CONFIG_PPC_MULTIPLATFORM */
/* Catch branch to 0 in real mode */
trap
@@ -308,7 +304,9 @@ exception_marker:
std r9,_LINK(r1); \
mfctr r10; /* save CTR in stackframe */ \
std r10,_CTR(r1); \
+ lbz r10,PACASOFTIRQEN(r13); \
mfspr r11,SPRN_XER; /* save XER in stackframe */ \
+ std r10,SOFTE(r1); \
std r11,_XER(r1); \
li r9,(n)+1; \
std r9,_TRAP(r1); /* set trap number */ \
@@ -343,6 +341,34 @@ label##_pSeries: \
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
+#define MASKABLE_EXCEPTION_PSERIES(n, label) \
+ . = n; \
+ .globl label##_pSeries; \
+label##_pSeries: \
+ HMT_MEDIUM; \
+ mtspr SPRN_SPRG1,r13; /* save r13 */ \
+ mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
+ std r9,PACA_EXGEN+EX_R9(r13); /* save r9, r10 */ \
+ std r10,PACA_EXGEN+EX_R10(r13); \
+ lbz r10,PACASOFTIRQEN(r13); \
+ mfcr r9; \
+ cmpwi r10,0; \
+ beq masked_interrupt; \
+ mfspr r10,SPRN_SPRG1; \
+ std r10,PACA_EXGEN+EX_R13(r13); \
+ std r11,PACA_EXGEN+EX_R11(r13); \
+ std r12,PACA_EXGEN+EX_R12(r13); \
+ clrrdi r12,r13,32; /* get high part of &label */ \
+ mfmsr r10; \
+ mfspr r11,SPRN_SRR0; /* save SRR0 */ \
+ LOAD_HANDLER(r12,label##_common) \
+ ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
+ mtspr SPRN_SRR0,r12; \
+ mfspr r12,SPRN_SRR1; /* and SRR1 */ \
+ mtspr SPRN_SRR1,r10; \
+ rfid; \
+ b . /* prevent speculative execution */
+
#define STD_EXCEPTION_ISERIES(n, label, area) \
.globl label##_iSeries; \
label##_iSeries: \
@@ -358,40 +384,32 @@ label##_iSeries: \
HMT_MEDIUM; \
mtspr SPRN_SPRG1,r13; /* save r13 */ \
EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
- lbz r10,PACAPROCENABLED(r13); \
+ lbz r10,PACASOFTIRQEN(r13); \
cmpwi 0,r10,0; \
beq- label##_iSeries_masked; \
EXCEPTION_PROLOG_ISERIES_2; \
b label##_common; \
-#ifdef DO_SOFT_DISABLE
+#ifdef CONFIG_PPC_ISERIES
#define DISABLE_INTS \
-BEGIN_FW_FTR_SECTION; \
- lbz r10,PACAPROCENABLED(r13); \
li r11,0; \
- std r10,SOFTE(r1); \
+ stb r11,PACASOFTIRQEN(r13); \
+BEGIN_FW_FTR_SECTION; \
+ stb r11,PACAHARDIRQEN(r13); \
+END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES); \
+BEGIN_FW_FTR_SECTION; \
mfmsr r10; \
- stb r11,PACAPROCENABLED(r13); \
ori r10,r10,MSR_EE; \
mtmsrd r10,1; \
END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#define ENABLE_INTS \
-BEGIN_FW_FTR_SECTION; \
- lbz r10,PACAPROCENABLED(r13); \
- mfmsr r11; \
- std r10,SOFTE(r1); \
- ori r11,r11,MSR_EE; \
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES); \
-BEGIN_FW_FTR_SECTION; \
- ld r12,_MSR(r1); \
- mfmsr r11; \
- rlwimi r11,r12,0,MSR_EE; \
-END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES); \
- mtmsrd r11,1
+#else
+#define DISABLE_INTS \
+ li r11,0; \
+ stb r11,PACASOFTIRQEN(r13); \
+ stb r11,PACAHARDIRQEN(r13)
-#else /* hard enable/disable interrupts */
-#define DISABLE_INTS
+#endif /* CONFIG_PPC_ISERIES */
#define ENABLE_INTS \
ld r12,_MSR(r1); \
@@ -399,8 +417,6 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES); \
rlwimi r11,r12,0,MSR_EE; \
mtmsrd r11,1
-#endif
-
#define STD_EXCEPTION_COMMON(trap, label, hdlr) \
.align 7; \
.globl label##_common; \
@@ -487,7 +503,7 @@ BEGIN_FTR_SECTION
rlwimi r13,r12,16,0x20
mfcr r12
cmpwi r13,0x2c
- beq .do_stab_bolted_pSeries
+ beq do_stab_bolted_pSeries
mtcrf 0x80,r12
mfspr r12,SPRN_SPRG2
END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
@@ -541,11 +557,11 @@ instruction_access_slb_pSeries:
mfspr r12,SPRN_SRR1 /* and SRR1 */
b .slb_miss_realmode /* Rel. branch works in real mode */
- STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
+ MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt)
STD_EXCEPTION_PSERIES(0x600, alignment)
STD_EXCEPTION_PSERIES(0x700, program_check)
STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
- STD_EXCEPTION_PSERIES(0x900, decrementer)
+ MASKABLE_EXCEPTION_PSERIES(0x900, decrementer)
STD_EXCEPTION_PSERIES(0xa00, trap_0a)
STD_EXCEPTION_PSERIES(0xb00, trap_0b)
@@ -597,10 +613,27 @@ system_call_pSeries:
/*** pSeries interrupt support ***/
/* moved from 0xf00 */
- STD_EXCEPTION_PSERIES(., performance_monitor)
+ MASKABLE_EXCEPTION_PSERIES(., performance_monitor)
+
+/*
+ * An interrupt came in while soft-disabled; clear EE in SRR1,
+ * clear paca->hard_enabled and return.
+ */
+masked_interrupt:
+ stb r10,PACAHARDIRQEN(r13)
+ mtcrf 0x80,r9
+ ld r9,PACA_EXGEN+EX_R9(r13)
+ mfspr r10,SPRN_SRR1
+ rldicl r10,r10,48,1 /* clear MSR_EE */
+ rotldi r10,r10,16
+ mtspr SPRN_SRR1,r10
+ ld r10,PACA_EXGEN+EX_R10(r13)
+ mfspr r13,SPRN_SPRG1
+ rfid
+ b .
.align 7
-_GLOBAL(do_stab_bolted_pSeries)
+do_stab_bolted_pSeries:
mtcrf 0x80,r12
mfspr r12,SPRN_SPRG2
EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
@@ -792,7 +825,7 @@ system_reset_iSeries:
cmpwi 0,r23,0
beq iSeries_secondary_smp_loop /* Loop until told to go */
- bne .__secondary_start /* Loop until told to go */
+ bne __secondary_start /* Loop until told to go */
iSeries_secondary_smp_loop:
/* Let the Hypervisor know we are alive */
/* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
@@ -813,7 +846,6 @@ iSeries_secondary_smp_loop:
b 1b /* If SMP not configured, secondaries
* loop forever */
- .globl decrementer_iSeries_masked
decrementer_iSeries_masked:
/* We may not have a valid TOC pointer in here. */
li r11,1
@@ -824,7 +856,6 @@ decrementer_iSeries_masked:
mtspr SPRN_DEC,r12
/* fall through */
- .globl hardware_interrupt_iSeries_masked
hardware_interrupt_iSeries_masked:
mtcrf 0x80,r9 /* Restore regs */
ld r12,PACALPPACAPTR(r13)
@@ -926,10 +957,18 @@ bad_stack:
* any task or sent any task a signal, you should use
* ret_from_except or ret_from_except_lite instead of this.
*/
+fast_exc_return_irq: /* restores irq state too */
+ ld r3,SOFTE(r1)
+ ld r12,_MSR(r1)
+ stb r3,PACASOFTIRQEN(r13) /* restore paca->soft_enabled */
+ rldicl r4,r12,49,63 /* get MSR_EE to LSB */
+ stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */
+ b 1f
+
.globl fast_exception_return
fast_exception_return:
ld r12,_MSR(r1)
- ld r11,_NIP(r1)
+1: ld r11,_NIP(r1)
andi. r3,r12,MSR_RI /* check if RI is set */
beq- unrecov_fer
@@ -952,7 +991,8 @@ fast_exception_return:
REST_8GPRS(2, r1)
mfmsr r10
- clrrdi r10,r10,2 /* clear RI (LE is 0 already) */
+ rldicl r10,r10,48,1 /* clear EE */
+ rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */
mtmsrd r10,1
mtspr SPRN_SRR1,r12
@@ -1046,7 +1086,7 @@ slb_miss_fault:
li r5,0
std r4,_DAR(r1)
std r5,_DSISR(r1)
- b .handle_page_fault
+ b handle_page_fault
unrecov_user_slb:
EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
@@ -1174,12 +1214,13 @@ program_check_common:
.globl fp_unavailable_common
fp_unavailable_common:
EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
- bne .load_up_fpu /* if from user, just load it up */
+ bne 1f /* if from user, just load it up */
bl .save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD
ENABLE_INTS
bl .kernel_fp_unavailable_exception
BUG_OPCODE
+1: b .load_up_fpu
.align 7
.globl altivec_unavailable_common
@@ -1279,10 +1320,10 @@ _GLOBAL(do_hash_page)
std r4,_DSISR(r1)
andis. r0,r4,0xa450 /* weird error? */
- bne- .handle_page_fault /* if not, try to insert a HPTE */
+ bne- handle_page_fault /* if not, try to insert a HPTE */
BEGIN_FTR_SECTION
andis. r0,r4,0x0020 /* Is it a segment table fault? */
- bne- .do_ste_alloc /* If so handle it */
+ bne- do_ste_alloc /* If so handle it */
END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
/*
@@ -1324,7 +1365,17 @@ BEGIN_FW_FTR_SECTION
* because ret_from_except_lite will check for and handle pending
* interrupts if necessary.
*/
- beq .ret_from_except_lite
+ beq 13f
+END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
+#endif
+BEGIN_FW_FTR_SECTION
+ /*
+ * Here we have interrupts hard-disabled, so it is sufficient
+ * to restore paca->{soft,hard}_enable and get out.
+ */
+ beq fast_exc_return_irq /* Return from exception on success */
+END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
+
/* For a hash failure, we don't bother re-enabling interrupts */
ble- 12f
@@ -1336,24 +1387,16 @@ BEGIN_FW_FTR_SECTION
ld r3,SOFTE(r1)
bl .local_irq_restore
b 11f
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#endif
-BEGIN_FW_FTR_SECTION
- beq fast_exception_return /* Return from exception on success */
- ble- 12f /* Failure return from hash_page */
-
- /* fall through */
-END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
/* Here we have a page fault that hash_page can't handle. */
-_GLOBAL(handle_page_fault)
+handle_page_fault:
ENABLE_INTS
11: ld r4,_DAR(r1)
ld r5,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
bl .do_page_fault
cmpdi r3,0
- beq+ .ret_from_except_lite
+ beq+ 13f
bl .save_nvgprs
mr r5,r3
addi r3,r1,STACK_FRAME_OVERHEAD
@@ -1361,6 +1404,8 @@ _GLOBAL(handle_page_fault)
bl .bad_page_fault
b .ret_from_except
+13: b .ret_from_except_lite
+
/* We have a page fault that hash_page could handle but HV refused
* the PTE insertion
*/
@@ -1371,11 +1416,11 @@ _GLOBAL(handle_page_fault)
b .ret_from_except
/* here we have a segment miss */
-_GLOBAL(do_ste_alloc)
+do_ste_alloc:
bl .ste_allocate /* try to insert stab entry */
cmpdi r3,0
- beq+ fast_exception_return
- b .handle_page_fault
+ bne- handle_page_fault
+ b fast_exception_return
/*
* r13 points to the PACA, r9 contains the saved CR,
@@ -1557,7 +1602,7 @@ _GLOBAL(generic_secondary_smp_init)
ld r1,PACAEMERGSP(r13)
subi r1,r1,STACK_FRAME_OVERHEAD
- b .__secondary_start
+ b __secondary_start
#endif
#ifdef CONFIG_PPC_ISERIES
@@ -1580,11 +1625,6 @@ _STATIC(__start_initialization_iSeries)
li r0,0
stdu r0,-STACK_FRAME_OVERHEAD(r1)
- LOAD_REG_IMMEDIATE(r3,cpu_specs)
- LOAD_REG_IMMEDIATE(r4,cur_cpu_spec)
- li r5,0
- bl .identify_cpu
-
LOAD_REG_IMMEDIATE(r2,__toc_start)
addi r2,r2,0x4000
addi r2,r2,0x4000
@@ -1597,7 +1637,6 @@ _STATIC(__start_initialization_iSeries)
b .start_here_common
#endif /* CONFIG_PPC_ISERIES */
-#ifdef CONFIG_PPC_MULTIPLATFORM
_STATIC(__mmu_off)
mfmsr r3
@@ -1623,13 +1662,11 @@ _STATIC(__mmu_off)
*
*/
_GLOBAL(__start_initialization_multiplatform)
-#ifdef CONFIG_PPC_MULTIPLATFORM
/*
* Are we booted from a PROM Of-type client-interface ?
*/
cmpldi cr0,r5,0
bne .__boot_from_prom /* yes -> prom */
-#endif
/* Save parameters */
mr r31,r3
@@ -1646,6 +1683,8 @@ _GLOBAL(__start_initialization_multiplatform)
cmpwi r0,0x3c /* 970FX */
beq 1f
cmpwi r0,0x44 /* 970MP */
+ beq 1f
+ cmpwi r0,0x45 /* 970GX */
bne 2f
1: bl .__cpu_preinit_ppc970
2:
@@ -1656,7 +1695,6 @@ _GLOBAL(__start_initialization_multiplatform)
bl .__mmu_off
b .__after_prom_start
-#ifdef CONFIG_PPC_MULTIPLATFORM
_STATIC(__boot_from_prom)
/* Save parameters */
mr r31,r3
@@ -1696,7 +1734,6 @@ _STATIC(__boot_from_prom)
bl .prom_init
/* We never return */
trap
-#endif
/*
* At this point, r3 contains the physical address we are running at,
@@ -1752,8 +1789,6 @@ _STATIC(__after_prom_start)
bl .copy_and_flush /* copy the rest */
b .start_here_multiplatform
-#endif /* CONFIG_PPC_MULTIPLATFORM */
-
/*
* Copy routine used to copy the kernel to start at physical address 0
* and flush and invalidate the caches as needed.
@@ -1836,7 +1871,7 @@ _GLOBAL(pmac_secondary_start)
ld r1,PACAEMERGSP(r13)
subi r1,r1,STACK_FRAME_OVERHEAD
- b .__secondary_start
+ b __secondary_start
#endif /* CONFIG_PPC_PMAC */
@@ -1853,7 +1888,7 @@ _GLOBAL(pmac_secondary_start)
* r13 = paca virtual address
* SPRG3 = paca virtual address
*/
-_GLOBAL(__secondary_start)
+__secondary_start:
/* Set thread priority to MEDIUM */
HMT_MEDIUM
@@ -1877,11 +1912,16 @@ _GLOBAL(__secondary_start)
/* enable MMU and jump to start_secondary */
LOAD_REG_ADDR(r3, .start_secondary_prolog)
LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
-#ifdef DO_SOFT_DISABLE
+#ifdef CONFIG_PPC_ISERIES
BEGIN_FW_FTR_SECTION
ori r4,r4,MSR_EE
END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
#endif
+BEGIN_FW_FTR_SECTION
+ stb r7,PACASOFTIRQEN(r13)
+ stb r7,PACAHARDIRQEN(r13)
+END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
+
mtspr SPRN_SRR0,r3
mtspr SPRN_SRR1,r4
rfid
@@ -1913,7 +1953,6 @@ _GLOBAL(enable_64b_mode)
isync
blr
-#ifdef CONFIG_PPC_MULTIPLATFORM
/*
* This is where the main kernel code starts.
*/
@@ -1964,13 +2003,6 @@ _STATIC(start_here_multiplatform)
addi r2,r2,0x4000
add r2,r2,r26
- LOAD_REG_IMMEDIATE(r3, cpu_specs)
- add r3,r3,r26
- LOAD_REG_IMMEDIATE(r4,cur_cpu_spec)
- add r4,r4,r26
- mr r5,r26
- bl .identify_cpu
-
/* Do very early kernel initializations, including initial hash table,
* stab and slb setup before we turn on relocation. */
@@ -1984,7 +2016,6 @@ _STATIC(start_here_multiplatform)
mtspr SPRN_SRR1,r4
rfid
b . /* prevent speculative execution */
-#endif /* CONFIG_PPC_MULTIPLATFORM */
/* This is where all platforms converge execution */
_STATIC(start_here_common)
@@ -2000,13 +2031,6 @@ _STATIC(start_here_common)
li r0,0
stdu r0,-STACK_FRAME_OVERHEAD(r1)
- /* Apply the CPUs-specific fixups (nop out sections not relevant
- * to this CPU
- */
- li r3,0
- bl .do_cpu_ftr_fixups
- bl .do_fw_ftr_fixups
-
/* ptr to current */
LOAD_REG_IMMEDIATE(r4, init_task)
std r4,PACACURRENT(r13)
@@ -2019,15 +2043,18 @@ _STATIC(start_here_common)
/* Load up the kernel context */
5:
-#ifdef DO_SOFT_DISABLE
-BEGIN_FW_FTR_SECTION
li r5,0
- stb r5,PACAPROCENABLED(r13) /* Soft Disabled */
+ stb r5,PACASOFTIRQEN(r13) /* Soft Disabled */
+#ifdef CONFIG_PPC_ISERIES
+BEGIN_FW_FTR_SECTION
mfmsr r5
ori r5,r5,MSR_EE /* Hard Enabled */
mtmsrd r5
END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
#endif
+BEGIN_FW_FTR_SECTION
+ stb r5,PACAHARDIRQEN(r13)
+END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
bl .start_kernel
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index 39db7a3affe..82bd2f10770 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -112,7 +112,7 @@ static int ibmebus_dma_supported(struct device *dev, u64 mask)
return 1;
}
-struct dma_mapping_ops ibmebus_dma_ops = {
+static struct dma_mapping_ops ibmebus_dma_ops = {
.alloc_coherent = ibmebus_alloc_coherent,
.free_coherent = ibmebus_free_coherent,
.map_single = ibmebus_map_single,
@@ -176,6 +176,10 @@ static struct ibmebus_dev* __devinit ibmebus_register_device_common(
dev->ofdev.dev.bus = &ibmebus_bus_type;
dev->ofdev.dev.release = ibmebus_dev_release;
+ dev->ofdev.dev.archdata.of_node = dev->ofdev.node;
+ dev->ofdev.dev.archdata.dma_ops = &ibmebus_dma_ops;
+ dev->ofdev.dev.archdata.numa_node = of_node_to_nid(dev->ofdev.node);
+
/* An ibmebusdev is based on a of_device. We have to change the
* bus type to use our own DMA mapping operations.
*/
@@ -210,11 +214,10 @@ static struct ibmebus_dev* __devinit ibmebus_register_device_node(
return NULL;
}
- dev = kmalloc(sizeof(struct ibmebus_dev), GFP_KERNEL);
+ dev = kzalloc(sizeof(struct ibmebus_dev), GFP_KERNEL);
if (!dev) {
return NULL;
}
- memset(dev, 0, sizeof(struct ibmebus_dev));
dev->ofdev.node = of_node_get(dn);
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index 4180c3998b3..8994af327b4 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -39,6 +39,13 @@
#define cpu_should_die() 0
#endif
+static int __init powersave_off(char *arg)
+{
+ ppc_md.power_save = NULL;
+ return 0;
+}
+__setup("powersave=off", powersave_off);
+
/*
* The body of the idle task.
*/
diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S
index 30de81da7b4..ba319547860 100644
--- a/arch/powerpc/kernel/idle_power4.S
+++ b/arch/powerpc/kernel/idle_power4.S
@@ -30,6 +30,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
beqlr
/* Go to NAP now */
+ mfmsr r7
+ rldicl r0,r7,48,1
+ rotldi r0,r0,16
+ mtmsrd r0,1 /* hard-disable interrupts */
+ li r0,1
+ stb r0,PACASOFTIRQEN(r13) /* we'll hard-enable shortly */
+ stb r0,PACAHARDIRQEN(r13)
BEGIN_FTR_SECTION
DSSALL
sync
@@ -38,7 +45,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
ld r8,TI_LOCAL_FLAGS(r9) /* set napping bit */
ori r8,r8,_TLF_NAPPING /* so when we take an exception */
std r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */
- mfmsr r7
ori r7,r7,MSR_EE
oris r7,r7,MSR_POW@h
1: sync
diff --git a/arch/powerpc/kernel/io.c b/arch/powerpc/kernel/io.c
index e98180686b3..34ae11494dd 100644
--- a/arch/powerpc/kernel/io.c
+++ b/arch/powerpc/kernel/io.c
@@ -25,13 +25,11 @@
#include <asm/firmware.h>
#include <asm/bug.h>
-void _insb(volatile u8 __iomem *port, void *buf, long count)
+void _insb(const volatile u8 __iomem *port, void *buf, long count)
{
u8 *tbuf = buf;
u8 tmp;
- BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
if (unlikely(count <= 0))
return;
asm volatile("sync");
@@ -48,8 +46,6 @@ void _outsb(volatile u8 __iomem *port, const void *buf, long count)
{
const u8 *tbuf = buf;
- BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
if (unlikely(count <= 0))
return;
asm volatile("sync");
@@ -60,13 +56,11 @@ void _outsb(volatile u8 __iomem *port, const void *buf, long count)
}
EXPORT_SYMBOL(_outsb);
-void _insw_ns(volatile u16 __iomem *port, void *buf, long count)
+void _insw_ns(const volatile u16 __iomem *port, void *buf, long count)
{
u16 *tbuf = buf;
u16 tmp;
- BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
if (unlikely(count <= 0))
return;
asm volatile("sync");
@@ -83,8 +77,6 @@ void _outsw_ns(volatile u16 __iomem *port, const void *buf, long count)
{
const u16 *tbuf = buf;
- BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
if (unlikely(count <= 0))
return;
asm volatile("sync");
@@ -95,13 +87,11 @@ void _outsw_ns(volatile u16 __iomem *port, const void *buf, long count)
}
EXPORT_SYMBOL(_outsw_ns);
-void _insl_ns(volatile u32 __iomem *port, void *buf, long count)
+void _insl_ns(const volatile u32 __iomem *port, void *buf, long count)
{
u32 *tbuf = buf;
u32 tmp;
- BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
if (unlikely(count <= 0))
return;
asm volatile("sync");
@@ -118,8 +108,6 @@ void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count)
{
const u32 *tbuf = buf;
- BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
if (unlikely(count <= 0))
return;
asm volatile("sync");
@@ -129,3 +117,90 @@ void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count)
asm volatile("sync");
}
EXPORT_SYMBOL(_outsl_ns);
+
+#define IO_CHECK_ALIGN(v,a) ((((unsigned long)(v)) & ((a) - 1)) == 0)
+
+void _memset_io(volatile void __iomem *addr, int c, unsigned long n)
+{
+ void *p = (void __force *)addr;
+ u32 lc = c;
+ lc |= lc << 8;
+ lc |= lc << 16;
+
+ __asm__ __volatile__ ("sync" : : : "memory");
+ while(n && !IO_CHECK_ALIGN(p, 4)) {
+ *((volatile u8 *)p) = c;
+ p++;
+ n--;
+ }
+ while(n >= 4) {
+ *((volatile u32 *)p) = lc;
+ p += 4;
+ n -= 4;
+ }
+ while(n) {
+ *((volatile u8 *)p) = c;
+ p++;
+ n--;
+ }
+ __asm__ __volatile__ ("sync" : : : "memory");
+}
+EXPORT_SYMBOL(_memset_io);
+
+void _memcpy_fromio(void *dest, const volatile void __iomem *src,
+ unsigned long n)
+{
+ void *vsrc = (void __force *) src;
+
+ __asm__ __volatile__ ("sync" : : : "memory");
+ while(n && (!IO_CHECK_ALIGN(vsrc, 4) || !IO_CHECK_ALIGN(dest, 4))) {
+ *((u8 *)dest) = *((volatile u8 *)vsrc);
+ __asm__ __volatile__ ("eieio" : : : "memory");
+ vsrc++;
+ dest++;
+ n--;
+ }
+ while(n > 4) {
+ *((u32 *)dest) = *((volatile u32 *)vsrc);
+ __asm__ __volatile__ ("eieio" : : : "memory");
+ vsrc += 4;
+ dest += 4;
+ n -= 4;
+ }
+ while(n) {
+ *((u8 *)dest) = *((volatile u8 *)vsrc);
+ __asm__ __volatile__ ("eieio" : : : "memory");
+ vsrc++;
+ dest++;
+ n--;
+ }
+ __asm__ __volatile__ ("sync" : : : "memory");
+}
+EXPORT_SYMBOL(_memcpy_fromio);
+
+void _memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n)
+{
+ void *vdest = (void __force *) dest;
+
+ __asm__ __volatile__ ("sync" : : : "memory");
+ while(n && (!IO_CHECK_ALIGN(vdest, 4) || !IO_CHECK_ALIGN(src, 4))) {
+ *((volatile u8 *)vdest) = *((u8 *)src);
+ src++;
+ vdest++;
+ n--;
+ }
+ while(n > 4) {
+ *((volatile u32 *)vdest) = *((volatile u32 *)src);
+ src += 4;
+ vdest += 4;
+ n-=4;
+ }
+ while(n) {
+ *((volatile u8 *)vdest) = *((u8 *)src);
+ src++;
+ vdest++;
+ n--;
+ }
+ __asm__ __volatile__ ("sync" : : : "memory");
+}
+EXPORT_SYMBOL(_memcpy_toio);
diff --git a/arch/powerpc/kernel/iomap.c b/arch/powerpc/kernel/iomap.c
index a13a93dfc65..c6811337105 100644
--- a/arch/powerpc/kernel/iomap.c
+++ b/arch/powerpc/kernel/iomap.c
@@ -106,7 +106,7 @@ EXPORT_SYMBOL(iowrite32_rep);
void __iomem *ioport_map(unsigned long port, unsigned int len)
{
- return (void __iomem *) (port+pci_io_base);
+ return (void __iomem *) (port + _IO_BASE);
}
void ioport_unmap(void __iomem *addr)
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index f88a2a675d9..95edad4faf2 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -47,6 +47,17 @@ static int novmerge = 0;
static int novmerge = 1;
#endif
+static inline unsigned long iommu_num_pages(unsigned long vaddr,
+ unsigned long slen)
+{
+ unsigned long npages;
+
+ npages = IOMMU_PAGE_ALIGN(vaddr + slen) - (vaddr & IOMMU_PAGE_MASK);
+ npages >>= IOMMU_PAGE_SHIFT;
+
+ return npages;
+}
+
static int __init setup_iommu(char *str)
{
if (!strcmp(str, "novmerge"))
@@ -178,10 +189,10 @@ static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page,
}
entry += tbl->it_offset; /* Offset into real TCE table */
- ret = entry << PAGE_SHIFT; /* Set the return dma address */
+ ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */
/* Put the TCEs in the HW table */
- ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & PAGE_MASK,
+ ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK,
direction);
@@ -203,7 +214,7 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
unsigned long entry, free_entry;
unsigned long i;
- entry = dma_addr >> PAGE_SHIFT;
+ entry = dma_addr >> IOMMU_PAGE_SHIFT;
free_entry = entry - tbl->it_offset;
if (((free_entry + npages) > tbl->it_size) ||
@@ -247,9 +258,9 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
spin_unlock_irqrestore(&(tbl->it_lock), flags);
}
-int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
- struct scatterlist *sglist, int nelems,
- unsigned long mask, enum dma_data_direction direction)
+int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
+ int nelems, unsigned long mask,
+ enum dma_data_direction direction)
{
dma_addr_t dma_next = 0, dma_addr;
unsigned long flags;
@@ -270,7 +281,7 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
/* Init first segment length for backout at failure */
outs->dma_length = 0;
- DBG("mapping %d elements:\n", nelems);
+ DBG("sg mapping %d elements:\n", nelems);
spin_lock_irqsave(&(tbl->it_lock), flags);
@@ -285,9 +296,8 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
}
/* Allocate iommu entries for that segment */
vaddr = (unsigned long)page_address(s->page) + s->offset;
- npages = PAGE_ALIGN(vaddr + slen) - (vaddr & PAGE_MASK);
- npages >>= PAGE_SHIFT;
- entry = iommu_range_alloc(tbl, npages, &handle, mask >> PAGE_SHIFT, 0);
+ npages = iommu_num_pages(vaddr, slen);
+ entry = iommu_range_alloc(tbl, npages, &handle, mask >> IOMMU_PAGE_SHIFT, 0);
DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
@@ -301,14 +311,14 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
/* Convert entry to a dma_addr_t */
entry += tbl->it_offset;
- dma_addr = entry << PAGE_SHIFT;
- dma_addr |= s->offset;
+ dma_addr = entry << IOMMU_PAGE_SHIFT;
+ dma_addr |= (s->offset & ~IOMMU_PAGE_MASK);
- DBG(" - %lx pages, entry: %lx, dma_addr: %lx\n",
+ DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
npages, entry, dma_addr);
/* Insert into HW table */
- ppc_md.tce_build(tbl, entry, npages, vaddr & PAGE_MASK, direction);
+ ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK, direction);
/* If we are in an open segment, try merging */
if (segstart != s) {
@@ -323,7 +333,7 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
DBG(" can't merge, new segment.\n");
} else {
outs->dma_length += s->length;
- DBG(" merged, new len: %lx\n", outs->dma_length);
+ DBG(" merged, new len: %ux\n", outs->dma_length);
}
}
@@ -367,9 +377,8 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
if (s->dma_length != 0) {
unsigned long vaddr, npages;
- vaddr = s->dma_address & PAGE_MASK;
- npages = (PAGE_ALIGN(s->dma_address + s->dma_length) - vaddr)
- >> PAGE_SHIFT;
+ vaddr = s->dma_address & IOMMU_PAGE_MASK;
+ npages = iommu_num_pages(s->dma_address, s->dma_length);
__iommu_free(tbl, vaddr, npages);
s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0;
@@ -398,8 +407,7 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
if (sglist->dma_length == 0)
break;
- npages = (PAGE_ALIGN(dma_handle + sglist->dma_length)
- - (dma_handle & PAGE_MASK)) >> PAGE_SHIFT;
+ npages = iommu_num_pages(dma_handle,sglist->dma_length);
__iommu_free(tbl, dma_handle, npages);
sglist++;
}
@@ -532,12 +540,11 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
BUG_ON(direction == DMA_NONE);
uaddr = (unsigned long)vaddr;
- npages = PAGE_ALIGN(uaddr + size) - (uaddr & PAGE_MASK);
- npages >>= PAGE_SHIFT;
+ npages = iommu_num_pages(uaddr, size);
if (tbl) {
dma_handle = iommu_alloc(tbl, vaddr, npages, direction,
- mask >> PAGE_SHIFT, 0);
+ mask >> IOMMU_PAGE_SHIFT, 0);
if (dma_handle == DMA_ERROR_CODE) {
if (printk_ratelimit()) {
printk(KERN_INFO "iommu_alloc failed, "
@@ -545,7 +552,7 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
tbl, vaddr, npages);
}
} else
- dma_handle |= (uaddr & ~PAGE_MASK);
+ dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
}
return dma_handle;
@@ -554,11 +561,14 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction)
{
+ unsigned int npages;
+
BUG_ON(direction == DMA_NONE);
- if (tbl)
- iommu_free(tbl, dma_handle, (PAGE_ALIGN(dma_handle + size) -
- (dma_handle & PAGE_MASK)) >> PAGE_SHIFT);
+ if (tbl) {
+ npages = iommu_num_pages(dma_handle, size);
+ iommu_free(tbl, dma_handle, npages);
+ }
}
/* Allocates a contiguous real buffer and creates mappings over it.
@@ -570,11 +580,11 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
{
void *ret = NULL;
dma_addr_t mapping;
- unsigned int npages, order;
+ unsigned int order;
+ unsigned int nio_pages, io_order;
struct page *page;
size = PAGE_ALIGN(size);
- npages = size >> PAGE_SHIFT;
order = get_order(size);
/*
@@ -598,8 +608,10 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
memset(ret, 0, size);
/* Set up tces to cover the allocated range */
- mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL,
- mask >> PAGE_SHIFT, order);
+ nio_pages = size >> IOMMU_PAGE_SHIFT;
+ io_order = get_iommu_order(size);
+ mapping = iommu_alloc(tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
+ mask >> IOMMU_PAGE_SHIFT, io_order);
if (mapping == DMA_ERROR_CODE) {
free_pages((unsigned long)ret, order);
return NULL;
@@ -611,12 +623,13 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
void iommu_free_coherent(struct iommu_table *tbl, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
- unsigned int npages;
-
if (tbl) {
+ unsigned int nio_pages;
+
+ size = PAGE_ALIGN(size);
+ nio_pages = size >> IOMMU_PAGE_SHIFT;
+ iommu_free(tbl, dma_handle, nio_pages);
size = PAGE_ALIGN(size);
- npages = size >> PAGE_SHIFT;
- iommu_free(tbl, dma_handle, npages);
free_pages((unsigned long)vaddr, get_order(size));
}
}
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 5e37bf14ef2..0bd8c766583 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -64,8 +64,9 @@
#include <asm/ptrace.h>
#include <asm/machdep.h>
#include <asm/udbg.h>
-#ifdef CONFIG_PPC_ISERIES
+#ifdef CONFIG_PPC64
#include <asm/paca.h>
+#include <asm/firmware.h>
#endif
int __irq_offset_value;
@@ -95,6 +96,74 @@ extern atomic_t ipi_sent;
EXPORT_SYMBOL(irq_desc);
int distribute_irqs = 1;
+
+static inline unsigned long get_hard_enabled(void)
+{
+ unsigned long enabled;
+
+ __asm__ __volatile__("lbz %0,%1(13)"
+ : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled)));
+
+ return enabled;
+}
+
+static inline void set_soft_enabled(unsigned long enable)
+{
+ __asm__ __volatile__("stb %0,%1(13)"
+ : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
+}
+
+void local_irq_restore(unsigned long en)
+{
+ /*
+ * get_paca()->soft_enabled = en;
+ * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1?
+ * That was allowed before, and in such a case we do need to take care
+ * that gcc will set soft_enabled directly via r13, not choose to use
+ * an intermediate register, lest we're preempted to a different cpu.
+ */
+ set_soft_enabled(en);
+ if (!en)
+ return;
+
+ if (firmware_has_feature(FW_FEATURE_ISERIES)) {
+ /*
+ * Do we need to disable preemption here? Not really: in the
+ * unlikely event that we're preempted to a different cpu in
+ * between getting r13, loading its lppaca_ptr, and loading
+ * its any_int, we might call iseries_handle_interrupts without
+ * an interrupt pending on the new cpu, but that's no disaster,
+ * is it? And the business of preempting us off the old cpu
+ * would itself involve a local_irq_restore which handles the
+ * interrupt to that cpu.
+ *
+ * But use "local_paca->lppaca_ptr" instead of "get_lppaca()"
+ * to avoid any preemption checking added into get_paca().
+ */
+ if (local_paca->lppaca_ptr->int_dword.any_int)
+ iseries_handle_interrupts();
+ return;
+ }
+
+ /*
+ * if (get_paca()->hard_enabled) return;
+ * But again we need to take care that gcc gets hard_enabled directly
+ * via r13, not choose to use an intermediate register, lest we're
+ * preempted to a different cpu in between the two instructions.
+ */
+ if (get_hard_enabled())
+ return;
+
+ /*
+ * Need to hard-enable interrupts here. Since currently disabled,
+ * no need to take further asm precautions against preemption; but
+ * use local_paca instead of get_paca() to avoid preemption checking.
+ */
+ local_paca->hard_enabled = en;
+ if ((int)mfspr(SPRN_DEC) < 0)
+ mtspr(SPRN_DEC, 1);
+ hard_irq_enable();
+}
#endif /* CONFIG_PPC64 */
int show_interrupts(struct seq_file *p, void *v)
@@ -246,7 +315,8 @@ void do_IRQ(struct pt_regs *regs)
set_irq_regs(old_regs);
#ifdef CONFIG_PPC_ISERIES
- if (get_lppaca()->int_dword.fields.decr_int) {
+ if (firmware_has_feature(FW_FEATURE_ISERIES) &&
+ get_lppaca()->int_dword.fields.decr_int) {
get_lppaca()->int_dword.fields.decr_int = 0;
/* Signal a fake decrementer interrupt */
timer_interrupt(regs);
@@ -626,10 +696,14 @@ EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
void irq_dispose_mapping(unsigned int virq)
{
- struct irq_host *host = irq_map[virq].host;
+ struct irq_host *host;
irq_hw_number_t hwirq;
unsigned long flags;
+ if (virq == NO_IRQ)
+ return;
+
+ host = irq_map[virq].host;
WARN_ON (host == NULL);
if (host == NULL)
return;
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 7b8d12b9026..4657563f881 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -85,7 +85,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
mutex_lock(&kprobe_mutex);
- free_insn_slot(p->ainsn.insn);
+ free_insn_slot(p->ainsn.insn, 0);
mutex_unlock(&kprobe_mutex);
}
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 88fd73fdf04..412bea3cf81 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -102,80 +102,6 @@ _GLOBAL(reloc_got2)
blr
/*
- * identify_cpu,
- * called with r3 = data offset and r4 = CPU number
- * doesn't change r3
- */
-_GLOBAL(identify_cpu)
- addis r8,r3,cpu_specs@ha
- addi r8,r8,cpu_specs@l
- mfpvr r7
-1:
- lwz r5,CPU_SPEC_PVR_MASK(r8)
- and r5,r5,r7
- lwz r6,CPU_SPEC_PVR_VALUE(r8)
- cmplw 0,r6,r5
- beq 1f
- addi r8,r8,CPU_SPEC_ENTRY_SIZE
- b 1b
-1:
- addis r6,r3,cur_cpu_spec@ha
- addi r6,r6,cur_cpu_spec@l
- sub r8,r8,r3
- stw r8,0(r6)
- blr
-
-/*
- * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
- * and writes nop's over sections of code that don't apply for this cpu.
- * r3 = data offset (not changed)
- */
-_GLOBAL(do_cpu_ftr_fixups)
- /* Get CPU 0 features */
- addis r6,r3,cur_cpu_spec@ha
- addi r6,r6,cur_cpu_spec@l
- lwz r4,0(r6)
- add r4,r4,r3
- lwz r4,CPU_SPEC_FEATURES(r4)
-
- /* Get the fixup table */
- addis r6,r3,__start___ftr_fixup@ha
- addi r6,r6,__start___ftr_fixup@l
- addis r7,r3,__stop___ftr_fixup@ha
- addi r7,r7,__stop___ftr_fixup@l
-
- /* Do the fixup */
-1: cmplw 0,r6,r7
- bgelr
- addi r6,r6,16
- lwz r8,-16(r6) /* mask */
- and r8,r8,r4
- lwz r9,-12(r6) /* value */
- cmplw 0,r8,r9
- beq 1b
- lwz r8,-8(r6) /* section begin */
- lwz r9,-4(r6) /* section end */
- subf. r9,r8,r9
- beq 1b
- /* write nops over the section of code */
- /* todo: if large section, add a branch at the start of it */
- srwi r9,r9,2
- mtctr r9
- add r8,r8,r3
- lis r0,0x60000000@h /* nop */
-3: stw r0,0(r8)
- andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
- beq 2f
- dcbst 0,r8 /* suboptimal, but simpler */
- sync
- icbi 0,r8
-2: addi r8,r8,4
- bdnz 3b
- sync /* additional sync needed on g4 */
- isync
- b 1b
-
-/*
* call_setup_cpu - call the setup_cpu function for this cpu
* r3 = data offset, r24 = cpu number
*
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index c70e20708a1..21fd2c662a9 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -246,130 +246,6 @@ _GLOBAL(__flush_dcache_icache)
isync
blr
-/*
- * identify_cpu and calls setup_cpu
- * In: r3 = base of the cpu_specs array
- * r4 = address of cur_cpu_spec
- * r5 = relocation offset
- */
-_GLOBAL(identify_cpu)
- mfpvr r7
-1:
- lwz r8,CPU_SPEC_PVR_MASK(r3)
- and r8,r8,r7
- lwz r9,CPU_SPEC_PVR_VALUE(r3)
- cmplw 0,r9,r8
- beq 1f
- addi r3,r3,CPU_SPEC_ENTRY_SIZE
- b 1b
-1:
- sub r0,r3,r5
- std r0,0(r4)
- ld r4,CPU_SPEC_SETUP(r3)
- cmpdi 0,r4,0
- add r4,r4,r5
- beqlr
- ld r4,0(r4)
- add r4,r4,r5
- mtctr r4
- /* Calling convention for cpu setup is r3=offset, r4=cur_cpu_spec */
- mr r4,r3
- mr r3,r5
- bctr
-
-/*
- * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
- * and writes nop's over sections of code that don't apply for this cpu.
- * r3 = data offset (not changed)
- */
-_GLOBAL(do_cpu_ftr_fixups)
- /* Get CPU 0 features */
- LOAD_REG_IMMEDIATE(r6,cur_cpu_spec)
- sub r6,r6,r3
- ld r4,0(r6)
- sub r4,r4,r3
- ld r4,CPU_SPEC_FEATURES(r4)
- /* Get the fixup table */
- LOAD_REG_IMMEDIATE(r6,__start___ftr_fixup)
- sub r6,r6,r3
- LOAD_REG_IMMEDIATE(r7,__stop___ftr_fixup)
- sub r7,r7,r3
- /* Do the fixup */
-1: cmpld r6,r7
- bgelr
- addi r6,r6,32
- ld r8,-32(r6) /* mask */
- and r8,r8,r4
- ld r9,-24(r6) /* value */
- cmpld r8,r9
- beq 1b
- ld r8,-16(r6) /* section begin */
- ld r9,-8(r6) /* section end */
- subf. r9,r8,r9
- beq 1b
- /* write nops over the section of code */
- /* todo: if large section, add a branch at the start of it */
- srwi r9,r9,2
- mtctr r9
- sub r8,r8,r3
- lis r0,0x60000000@h /* nop */
-3: stw r0,0(r8)
- andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
- beq 2f
- dcbst 0,r8 /* suboptimal, but simpler */
- sync
- icbi 0,r8
-2: addi r8,r8,4
- bdnz 3b
- sync /* additional sync needed on g4 */
- isync
- b 1b
-
-/*
- * do_fw_ftr_fixups - goes through the list of firmware feature fixups
- * and writes nop's over sections of code that don't apply for this firmware.
- * r3 = data offset (not changed)
- */
-_GLOBAL(do_fw_ftr_fixups)
- /* Get firmware features */
- LOAD_REG_IMMEDIATE(r6,powerpc_firmware_features)
- sub r6,r6,r3
- ld r4,0(r6)
- /* Get the fixup table */
- LOAD_REG_IMMEDIATE(r6,__start___fw_ftr_fixup)
- sub r6,r6,r3
- LOAD_REG_IMMEDIATE(r7,__stop___fw_ftr_fixup)
- sub r7,r7,r3
- /* Do the fixup */
-1: cmpld r6,r7
- bgelr
- addi r6,r6,32
- ld r8,-32(r6) /* mask */
- and r8,r8,r4
- ld r9,-24(r6) /* value */
- cmpld r8,r9
- beq 1b
- ld r8,-16(r6) /* section begin */
- ld r9,-8(r6) /* section end */
- subf. r9,r8,r9
- beq 1b
- /* write nops over the section of code */
- /* todo: if large section, add a branch at the start of it */
- srwi r9,r9,2
- mtctr r9
- sub r8,r8,r3
- lis r0,0x60000000@h /* nop */
-3: stw r0,0(r8)
-BEGIN_FTR_SECTION
- dcbst 0,r8 /* suboptimal, but simpler */
- sync
- icbi 0,r8
-END_FTR_SECTION_IFSET(CPU_FTR_SPLIT_ID_CACHE)
- addi r8,r8,4
- bdnz 3b
- sync /* additional sync needed on g4 */
- isync
- b 1b
#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
/*
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index 92f4e5f64f0..8339fd609de 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -23,6 +23,9 @@
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/cache.h>
+#include <linux/bug.h>
+
+#include "setup.h"
#if 0
#define DEBUGP printk
@@ -269,39 +272,44 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
return 0;
}
-int module_finalize(const Elf_Ehdr *hdr,
- const Elf_Shdr *sechdrs,
- struct module *me)
+static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ const char *name)
{
char *secstrings;
unsigned int i;
- me->arch.bug_table = NULL;
- me->arch.num_bugs = 0;
-
- /* Find the __bug_table section, if present */
secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
- for (i = 1; i < hdr->e_shnum; i++) {
- if (strcmp(secstrings+sechdrs[i].sh_name, "__bug_table"))
- continue;
- me->arch.bug_table = (void *) sechdrs[i].sh_addr;
- me->arch.num_bugs = sechdrs[i].sh_size / sizeof(struct bug_entry);
- break;
- }
+ for (i = 1; i < hdr->e_shnum; i++)
+ if (strcmp(secstrings+sechdrs[i].sh_name, name) == 0)
+ return &sechdrs[i];
+ return NULL;
+}
+
+int module_finalize(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ struct module *me)
+{
+ const Elf_Shdr *sect;
+ int err;
+
+ err = module_bug_finalize(hdr, sechdrs, me);
+ if (err) /* never true, currently */
+ return err;
- /*
- * Strictly speaking this should have a spinlock to protect against
- * traversals, but since we only traverse on BUG()s, a spinlock
- * could potentially lead to deadlock and thus be counter-productive.
- */
- list_add(&me->arch.bug_list, &module_bug_list);
+ /* Apply feature fixups */
+ sect = find_section(hdr, sechdrs, "__ftr_fixup");
+ if (sect != NULL)
+ do_feature_fixups(cur_cpu_spec->cpu_features,
+ (void *)sect->sh_addr,
+ (void *)sect->sh_addr + sect->sh_size);
return 0;
}
void module_arch_cleanup(struct module *mod)
{
- list_del(&mod->arch.bug_list);
+ module_bug_cleanup(mod);
}
struct bug_entry *module_find_bug(unsigned long bugaddr)
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index ba34001fca8..75c7c4f1928 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -20,8 +20,12 @@
#include <linux/moduleloader.h>
#include <linux/err.h>
#include <linux/vmalloc.h>
+#include <linux/bug.h>
#include <asm/module.h>
#include <asm/uaccess.h>
+#include <asm/firmware.h>
+
+#include "setup.h"
/* FIXME: We don't do .init separately. To do this, we'd need to have
a separate r2 value in the init and core section, and stub between
@@ -400,6 +404,11 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
| (value & 0x03fffffc);
break;
+ case R_PPC64_REL64:
+ /* 64 bits relative (used by features fixups) */
+ *location = value - (unsigned long)location;
+ break;
+
default:
printk("%s: Unknown ADD relocation: %lu\n",
me->name,
@@ -413,38 +422,49 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
LIST_HEAD(module_bug_list);
-int module_finalize(const Elf_Ehdr *hdr,
- const Elf_Shdr *sechdrs, struct module *me)
+static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ const char *name)
{
char *secstrings;
unsigned int i;
- me->arch.bug_table = NULL;
- me->arch.num_bugs = 0;
-
- /* Find the __bug_table section, if present */
secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
- for (i = 1; i < hdr->e_shnum; i++) {
- if (strcmp(secstrings+sechdrs[i].sh_name, "__bug_table"))
- continue;
- me->arch.bug_table = (void *) sechdrs[i].sh_addr;
- me->arch.num_bugs = sechdrs[i].sh_size / sizeof(struct bug_entry);
- break;
- }
+ for (i = 1; i < hdr->e_shnum; i++)
+ if (strcmp(secstrings+sechdrs[i].sh_name, name) == 0)
+ return &sechdrs[i];
+ return NULL;
+}
- /*
- * Strictly speaking this should have a spinlock to protect against
- * traversals, but since we only traverse on BUG()s, a spinlock
- * could potentially lead to deadlock and thus be counter-productive.
- */
- list_add(&me->arch.bug_list, &module_bug_list);
+int module_finalize(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs, struct module *me)
+{
+ const Elf_Shdr *sect;
+ int err;
+
+ err = module_bug_finalize(hdr, sechdrs, me);
+ if (err)
+ return err;
+
+ /* Apply feature fixups */
+ sect = find_section(hdr, sechdrs, "__ftr_fixup");
+ if (sect != NULL)
+ do_feature_fixups(cur_cpu_spec->cpu_features,
+ (void *)sect->sh_addr,
+ (void *)sect->sh_addr + sect->sh_size);
+
+ sect = find_section(hdr, sechdrs, "__fw_ftr_fixup");
+ if (sect != NULL)
+ do_feature_fixups(powerpc_firmware_features,
+ (void *)sect->sh_addr,
+ (void *)sect->sh_addr + sect->sh_size);
return 0;
}
void module_arch_cleanup(struct module *mod)
{
- list_del(&mod->arch.bug_list);
+ module_bug_cleanup(mod);
}
struct bug_entry *module_find_bug(unsigned long bugaddr)
diff --git a/arch/powerpc/kernel/of_device.c b/arch/powerpc/kernel/of_device.c
index 397c83eda20..e921514e655 100644
--- a/arch/powerpc/kernel/of_device.c
+++ b/arch/powerpc/kernel/of_device.c
@@ -9,30 +9,26 @@
#include <asm/of_device.h>
/**
- * of_match_device - Tell if an of_device structure has a matching
- * of_match structure
+ * of_match_node - Tell if an device_node has a matching of_match structure
* @ids: array of of device match structures to search in
- * @dev: the of device structure to match against
+ * @node: the of device structure to match against
*
- * Used by a driver to check whether an of_device present in the
- * system is in its list of supported devices.
+ * Low level utility function used by device matching.
*/
-const struct of_device_id *of_match_device(const struct of_device_id *matches,
- const struct of_device *dev)
+const struct of_device_id *of_match_node(const struct of_device_id *matches,
+ const struct device_node *node)
{
- if (!dev->node)
- return NULL;
while (matches->name[0] || matches->type[0] || matches->compatible[0]) {
int match = 1;
if (matches->name[0])
- match &= dev->node->name
- && !strcmp(matches->name, dev->node->name);
+ match &= node->name
+ && !strcmp(matches->name, node->name);
if (matches->type[0])
- match &= dev->node->type
- && !strcmp(matches->type, dev->node->type);
+ match &= node->type
+ && !strcmp(matches->type, node->type);
if (matches->compatible[0])
- match &= device_is_compatible(dev->node,
- matches->compatible);
+ match &= device_is_compatible(node,
+ matches->compatible);
if (match)
return matches;
matches++;
@@ -40,16 +36,21 @@ const struct of_device_id *of_match_device(const struct of_device_id *matches,
return NULL;
}
-static int of_platform_bus_match(struct device *dev, struct device_driver *drv)
+/**
+ * of_match_device - Tell if an of_device structure has a matching
+ * of_match structure
+ * @ids: array of of device match structures to search in
+ * @dev: the of device structure to match against
+ *
+ * Used by a driver to check whether an of_device present in the
+ * system is in its list of supported devices.
+ */
+const struct of_device_id *of_match_device(const struct of_device_id *matches,
+ const struct of_device *dev)
{
- struct of_device * of_dev = to_of_device(dev);
- struct of_platform_driver * of_drv = to_of_platform_driver(drv);
- const struct of_device_id * matches = of_drv->match_table;
-
- if (!matches)
- return 0;
-
- return of_match_device(matches, of_dev) != NULL;
+ if (!dev->node)
+ return NULL;
+ return of_match_node(matches, dev->node);
}
struct of_device *of_dev_get(struct of_device *dev)
@@ -71,96 +72,8 @@ void of_dev_put(struct of_device *dev)
put_device(&dev->dev);
}
-
-static int of_device_probe(struct device *dev)
-{
- int error = -ENODEV;
- struct of_platform_driver *drv;
- struct of_device *of_dev;
- const struct of_device_id *match;
-
- drv = to_of_platform_driver(dev->driver);
- of_dev = to_of_device(dev);
-
- if (!drv->probe)
- return error;
-
- of_dev_get(of_dev);
-
- match = of_match_device(drv->match_table, of_dev);
- if (match)
- error = drv->probe(of_dev, match);
- if (error)
- of_dev_put(of_dev);
-
- return error;
-}
-
-static int of_device_remove(struct device *dev)
-{
- struct of_device * of_dev = to_of_device(dev);
- struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
-
- if (dev->driver && drv->remove)
- drv->remove(of_dev);
- return 0;
-}
-
-static int of_device_suspend(struct device *dev, pm_message_t state)
-{
- struct of_device * of_dev = to_of_device(dev);
- struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
- int error = 0;
-
- if (dev->driver && drv->suspend)
- error = drv->suspend(of_dev, state);
- return error;
-}
-
-static int of_device_resume(struct device * dev)
-{
- struct of_device * of_dev = to_of_device(dev);
- struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
- int error = 0;
-
- if (dev->driver && drv->resume)
- error = drv->resume(of_dev);
- return error;
-}
-
-struct bus_type of_platform_bus_type = {
- .name = "of_platform",
- .match = of_platform_bus_match,
- .probe = of_device_probe,
- .remove = of_device_remove,
- .suspend = of_device_suspend,
- .resume = of_device_resume,
-};
-
-static int __init of_bus_driver_init(void)
-{
- return bus_register(&of_platform_bus_type);
-}
-
-postcore_initcall(of_bus_driver_init);
-
-int of_register_driver(struct of_platform_driver *drv)
-{
- /* initialize common driver fields */
- drv->driver.name = drv->name;
- drv->driver.bus = &of_platform_bus_type;
-
- /* register with core */
- return driver_register(&drv->driver);
-}
-
-void of_unregister_driver(struct of_platform_driver *drv)
-{
- driver_unregister(&drv->driver);
-}
-
-
-static ssize_t dev_show_devspec(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t dev_show_devspec(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct of_device *ofdev;
@@ -196,9 +109,7 @@ int of_device_register(struct of_device *ofdev)
if (rc)
return rc;
- device_create_file(&ofdev->dev, &dev_attr_devspec);
-
- return 0;
+ return device_create_file(&ofdev->dev, &dev_attr_devspec);
}
void of_device_unregister(struct of_device *ofdev)
@@ -208,41 +119,11 @@ void of_device_unregister(struct of_device *ofdev)
device_unregister(&ofdev->dev);
}
-struct of_device* of_platform_device_create(struct device_node *np,
- const char *bus_id,
- struct device *parent)
-{
- struct of_device *dev;
-
- dev = kmalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return NULL;
- memset(dev, 0, sizeof(*dev));
-
- dev->node = of_node_get(np);
- dev->dma_mask = 0xffffffffUL;
- dev->dev.dma_mask = &dev->dma_mask;
- dev->dev.parent = parent;
- dev->dev.bus = &of_platform_bus_type;
- dev->dev.release = of_release_dev;
-
- strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE);
-
- if (of_device_register(dev) != 0) {
- kfree(dev);
- return NULL;
- }
-
- return dev;
-}
+EXPORT_SYMBOL(of_match_node);
EXPORT_SYMBOL(of_match_device);
-EXPORT_SYMBOL(of_platform_bus_type);
-EXPORT_SYMBOL(of_register_driver);
-EXPORT_SYMBOL(of_unregister_driver);
EXPORT_SYMBOL(of_device_register);
EXPORT_SYMBOL(of_device_unregister);
EXPORT_SYMBOL(of_dev_get);
EXPORT_SYMBOL(of_dev_put);
-EXPORT_SYMBOL(of_platform_device_create);
EXPORT_SYMBOL(of_release_dev);
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
new file mode 100644
index 00000000000..3002ea3a61a
--- /dev/null
+++ b/arch/powerpc/kernel/of_platform.c
@@ -0,0 +1,489 @@
+/*
+ * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ * and Arnd Bergmann, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#undef DEBUG
+
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+
+#include <asm/errno.h>
+#include <asm/dcr.h>
+#include <asm/of_device.h>
+#include <asm/of_platform.h>
+#include <asm/topology.h>
+#include <asm/pci-bridge.h>
+#include <asm/ppc-pci.h>
+#include <asm/atomic.h>
+
+
+/*
+ * The list of OF IDs below is used for matching bus types in the
+ * system whose devices are to be exposed as of_platform_devices.
+ *
+ * This is the default list valid for most platforms. This file provides
+ * functions who can take an explicit list if necessary though
+ *
+ * The search is always performed recursively looking for children of
+ * the provided device_node and recursively if such a children matches
+ * a bus type in the list
+ */
+
+static struct of_device_id of_default_bus_ids[] = {
+ { .type = "soc", },
+ { .compatible = "soc", },
+ { .type = "spider", },
+ { .type = "axon", },
+ { .type = "plb5", },
+ { .type = "plb4", },
+ { .type = "opb", },
+ {},
+};
+
+static atomic_t bus_no_reg_magic;
+
+/*
+ *
+ * OF platform device type definition & base infrastructure
+ *
+ */
+
+static int of_platform_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct of_device * of_dev = to_of_device(dev);
+ struct of_platform_driver * of_drv = to_of_platform_driver(drv);
+ const struct of_device_id * matches = of_drv->match_table;
+
+ if (!matches)
+ return 0;
+
+ return of_match_device(matches, of_dev) != NULL;
+}
+
+static int of_platform_device_probe(struct device *dev)
+{
+ int error = -ENODEV;
+ struct of_platform_driver *drv;
+ struct of_device *of_dev;
+ const struct of_device_id *match;
+
+ drv = to_of_platform_driver(dev->driver);
+ of_dev = to_of_device(dev);
+
+ if (!drv->probe)
+ return error;
+
+ of_dev_get(of_dev);
+
+ match = of_match_device(drv->match_table, of_dev);
+ if (match)
+ error = drv->probe(of_dev, match);
+ if (error)
+ of_dev_put(of_dev);
+
+ return error;
+}
+
+static int of_platform_device_remove(struct device *dev)
+{
+ struct of_device * of_dev = to_of_device(dev);
+ struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
+
+ if (dev->driver && drv->remove)
+ drv->remove(of_dev);
+ return 0;
+}
+
+static int of_platform_device_suspend(struct device *dev, pm_message_t state)
+{
+ struct of_device * of_dev = to_of_device(dev);
+ struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
+ int error = 0;
+
+ if (dev->driver && drv->suspend)
+ error = drv->suspend(of_dev, state);
+ return error;
+}
+
+static int of_platform_device_resume(struct device * dev)
+{
+ struct of_device * of_dev = to_of_device(dev);
+ struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
+ int error = 0;
+
+ if (dev->driver && drv->resume)
+ error = drv->resume(of_dev);
+ return error;
+}
+
+struct bus_type of_platform_bus_type = {
+ .name = "of_platform",
+ .match = of_platform_bus_match,
+ .probe = of_platform_device_probe,
+ .remove = of_platform_device_remove,
+ .suspend = of_platform_device_suspend,
+ .resume = of_platform_device_resume,
+};
+EXPORT_SYMBOL(of_platform_bus_type);
+
+static int __init of_bus_driver_init(void)
+{
+ return bus_register(&of_platform_bus_type);
+}
+
+postcore_initcall(of_bus_driver_init);
+
+int of_register_platform_driver(struct of_platform_driver *drv)
+{
+ /* initialize common driver fields */
+ drv->driver.name = drv->name;
+ drv->driver.bus = &of_platform_bus_type;
+
+ /* register with core */
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL(of_register_platform_driver);
+
+void of_unregister_platform_driver(struct of_platform_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL(of_unregister_platform_driver);
+
+static void of_platform_make_bus_id(struct of_device *dev)
+{
+ struct device_node *node = dev->node;
+ char *name = dev->dev.bus_id;
+ const u32 *reg;
+ u64 addr;
+ int magic;
+
+ /*
+ * If it's a DCR based device, use 'd' for native DCRs
+ * and 'D' for MMIO DCRs.
+ */
+#ifdef CONFIG_PPC_DCR
+ reg = get_property(node, "dcr-reg", NULL);
+ if (reg) {
+#ifdef CONFIG_PPC_DCR_NATIVE
+ snprintf(name, BUS_ID_SIZE, "d%x.%s",
+ *reg, node->name);
+#else /* CONFIG_PPC_DCR_NATIVE */
+ addr = of_translate_dcr_address(node, *reg, NULL);
+ if (addr != OF_BAD_ADDR) {
+ snprintf(name, BUS_ID_SIZE,
+ "D%llx.%s", (unsigned long long)addr,
+ node->name);
+ return;
+ }
+#endif /* !CONFIG_PPC_DCR_NATIVE */
+ }
+#endif /* CONFIG_PPC_DCR */
+
+ /*
+ * For MMIO, get the physical address
+ */
+ reg = get_property(node, "reg", NULL);
+ if (reg) {
+ addr = of_translate_address(node, reg);
+ if (addr != OF_BAD_ADDR) {
+ snprintf(name, BUS_ID_SIZE,
+ "%llx.%s", (unsigned long long)addr,
+ node->name);
+ return;
+ }
+ }
+
+ /*
+ * No BusID, use the node name and add a globally incremented
+ * counter (and pray...)
+ */
+ magic = atomic_add_return(1, &bus_no_reg_magic);
+ snprintf(name, BUS_ID_SIZE, "%s.%d", node->name, magic - 1);
+}
+
+struct of_device* of_platform_device_create(struct device_node *np,
+ const char *bus_id,
+ struct device *parent)
+{
+ struct of_device *dev;
+
+ dev = kmalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+ memset(dev, 0, sizeof(*dev));
+
+ dev->node = of_node_get(np);
+ dev->dma_mask = 0xffffffffUL;
+ dev->dev.dma_mask = &dev->dma_mask;
+ dev->dev.parent = parent;
+ dev->dev.bus = &of_platform_bus_type;
+ dev->dev.release = of_release_dev;
+ dev->dev.archdata.of_node = np;
+ dev->dev.archdata.numa_node = of_node_to_nid(np);
+
+ /* We do not fill the DMA ops for platform devices by default.
+ * This is currently the responsibility of the platform code
+ * to do such, possibly using a device notifier
+ */
+
+ if (bus_id)
+ strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE);
+ else
+ of_platform_make_bus_id(dev);
+
+ if (of_device_register(dev) != 0) {
+ kfree(dev);
+ return NULL;
+ }
+
+ return dev;
+}
+EXPORT_SYMBOL(of_platform_device_create);
+
+
+
+/**
+ * of_platform_bus_create - Create an OF device for a bus node and all its
+ * children. Optionally recursively instanciate matching busses.
+ * @bus: device node of the bus to instanciate
+ * @matches: match table, NULL to use the default, OF_NO_DEEP_PROBE to
+ * disallow recursive creation of child busses
+ */
+static int of_platform_bus_create(struct device_node *bus,
+ struct of_device_id *matches,
+ struct device *parent)
+{
+ struct device_node *child;
+ struct of_device *dev;
+ int rc = 0;
+
+ for (child = NULL; (child = of_get_next_child(bus, child)); ) {
+ pr_debug(" create child: %s\n", child->full_name);
+ dev = of_platform_device_create(child, NULL, parent);
+ if (dev == NULL)
+ rc = -ENOMEM;
+ else if (!of_match_node(matches, child))
+ continue;
+ if (rc == 0) {
+ pr_debug(" and sub busses\n");
+ rc = of_platform_bus_create(child, matches, &dev->dev);
+ } if (rc) {
+ of_node_put(child);
+ break;
+ }
+ }
+ return rc;
+}
+
+/**
+ * of_platform_bus_probe - Probe the device-tree for platform busses
+ * @root: parent of the first level to probe or NULL for the root of the tree
+ * @matches: match table, NULL to use the default
+ * @parent: parent to hook devices from, NULL for toplevel
+ *
+ * Note that children of the provided root are not instanciated as devices
+ * unless the specified root itself matches the bus list and is not NULL.
+ */
+
+int of_platform_bus_probe(struct device_node *root,
+ struct of_device_id *matches,
+ struct device *parent)
+{
+ struct device_node *child;
+ struct of_device *dev;
+ int rc = 0;
+
+ if (matches == NULL)
+ matches = of_default_bus_ids;
+ if (matches == OF_NO_DEEP_PROBE)
+ return -EINVAL;
+ if (root == NULL)
+ root = of_find_node_by_path("/");
+ else
+ of_node_get(root);
+
+ pr_debug("of_platform_bus_probe()\n");
+ pr_debug(" starting at: %s\n", root->full_name);
+
+ /* Do a self check of bus type, if there's a match, create
+ * children
+ */
+ if (of_match_node(matches, root)) {
+ pr_debug(" root match, create all sub devices\n");
+ dev = of_platform_device_create(root, NULL, parent);
+ if (dev == NULL) {
+ rc = -ENOMEM;
+ goto bail;
+ }
+ pr_debug(" create all sub busses\n");
+ rc = of_platform_bus_create(root, matches, &dev->dev);
+ goto bail;
+ }
+ for (child = NULL; (child = of_get_next_child(root, child)); ) {
+ if (!of_match_node(matches, child))
+ continue;
+
+ pr_debug(" match: %s\n", child->full_name);
+ dev = of_platform_device_create(child, NULL, parent);
+ if (dev == NULL)
+ rc = -ENOMEM;
+ else
+ rc = of_platform_bus_create(child, matches, &dev->dev);
+ if (rc) {
+ of_node_put(child);
+ break;
+ }
+ }
+ bail:
+ of_node_put(root);
+ return rc;
+}
+EXPORT_SYMBOL(of_platform_bus_probe);
+
+static int of_dev_node_match(struct device *dev, void *data)
+{
+ return to_of_device(dev)->node == data;
+}
+
+struct of_device *of_find_device_by_node(struct device_node *np)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&of_platform_bus_type,
+ NULL, np, of_dev_node_match);
+ if (dev)
+ return to_of_device(dev);
+ return NULL;
+}
+EXPORT_SYMBOL(of_find_device_by_node);
+
+static int of_dev_phandle_match(struct device *dev, void *data)
+{
+ phandle *ph = data;
+ return to_of_device(dev)->node->linux_phandle == *ph;
+}
+
+struct of_device *of_find_device_by_phandle(phandle ph)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&of_platform_bus_type,
+ NULL, &ph, of_dev_phandle_match);
+ if (dev)
+ return to_of_device(dev);
+ return NULL;
+}
+EXPORT_SYMBOL(of_find_device_by_phandle);
+
+
+#ifdef CONFIG_PPC_OF_PLATFORM_PCI
+
+/* The probing of PCI controllers from of_platform is currently
+ * 64 bits only, mostly due to gratuitous differences between
+ * the 32 and 64 bits PCI code on PowerPC and the 32 bits one
+ * lacking some bits needed here.
+ */
+
+static int __devinit of_pci_phb_probe(struct of_device *dev,
+ const struct of_device_id *match)
+{
+ struct pci_controller *phb;
+
+ /* Check if we can do that ... */
+ if (ppc_md.pci_setup_phb == NULL)
+ return -ENODEV;
+
+ printk(KERN_INFO "Setting up PCI bus %s\n", dev->node->full_name);
+
+ /* Alloc and setup PHB data structure */
+ phb = pcibios_alloc_controller(dev->node);
+ if (!phb)
+ return -ENODEV;
+
+ /* Setup parent in sysfs */
+ phb->parent = &dev->dev;
+
+ /* Setup the PHB using arch provided callback */
+ if (ppc_md.pci_setup_phb(phb)) {
+ pcibios_free_controller(phb);
+ return -ENODEV;
+ }
+
+ /* Process "ranges" property */
+ pci_process_bridge_OF_ranges(phb, dev->node, 0);
+
+ /* Setup IO space.
+ * This will not work properly for ISA IOs, something needs to be done
+ * about it if we ever generalize that way of probing PCI brigdes
+ */
+ pci_setup_phb_io_dynamic(phb, 0);
+
+ /* Init pci_dn data structures */
+ pci_devs_phb_init_dynamic(phb);
+
+ /* Register devices with EEH */
+#ifdef CONFIG_EEH
+ if (dev->node->child)
+ eeh_add_device_tree_early(dev->node);
+#endif /* CONFIG_EEH */
+
+ /* Scan the bus */
+ scan_phb(phb);
+
+ /* Claim resources. This might need some rework as well depending
+ * wether we are doing probe-only or not, like assigning unassigned
+ * resources etc...
+ */
+ pcibios_claim_one_bus(phb->bus);
+
+ /* Finish EEH setup */
+#ifdef CONFIG_EEH
+ eeh_add_device_tree_late(phb->bus);
+#endif
+
+ /* Add probed PCI devices to the device model */
+ pci_bus_add_devices(phb->bus);
+
+ return 0;
+}
+
+static struct of_device_id of_pci_phb_ids[] = {
+ { .type = "pci", },
+ { .type = "pcix", },
+ { .type = "pcie", },
+ { .type = "pciex", },
+ { .type = "ht", },
+ {}
+};
+
+static struct of_platform_driver of_pci_phb_driver = {
+ .name = "of-pci",
+ .match_table = of_pci_phb_ids,
+ .probe = of_pci_phb_probe,
+ .driver = {
+ .multithread_probe = 1,
+ },
+};
+
+static __init int of_pci_phb_init(void)
+{
+ return of_register_platform_driver(&of_pci_phb_driver);
+}
+
+device_initcall(of_pci_phb_init);
+
+#endif /* CONFIG_PPC_OF_PLATFORM_PCI */
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 9b49f8691d2..8336deafc62 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -12,6 +12,7 @@
#include <linux/errno.h>
#include <linux/bootmem.h>
#include <linux/irq.h>
+#include <linux/list.h>
#include <asm/processor.h>
#include <asm/io.h>
@@ -99,7 +100,7 @@ pcibios_fixup_resources(struct pci_dev *dev)
continue;
if (res->end == 0xffffffff) {
DBG("PCI:%s Resource %d [%016llx-%016llx] is unassigned\n",
- pci_name(dev), i, res->start, res->end);
+ pci_name(dev), i, (u64)res->start, (u64)res->end);
res->end -= res->start;
res->start = 0;
res->flags |= IORESOURCE_UNSET;
@@ -115,11 +116,9 @@ pcibios_fixup_resources(struct pci_dev *dev)
if (offset != 0) {
res->start += offset;
res->end += offset;
-#ifdef DEBUG
- printk("Fixup res %d (%lx) of dev %s: %llx -> %llx\n",
- i, res->flags, pci_name(dev),
- res->start - offset, res->start);
-#endif
+ DBG("Fixup res %d (%lx) of dev %s: %llx -> %llx\n",
+ i, res->flags, pci_name(dev),
+ (u64)res->start - offset, (u64)res->start);
}
}
@@ -255,7 +254,7 @@ pcibios_allocate_bus_resources(struct list_head *bus_list)
}
DBG("PCI: bridge rsrc %llx..%llx (%lx), parent %p\n",
- res->start, res->end, res->flags, pr);
+ (u64)res->start, (u64)res->end, res->flags, pr);
if (pr) {
if (request_resource(pr, res) == 0)
continue;
@@ -306,7 +305,7 @@ reparent_resources(struct resource *parent, struct resource *res)
for (p = res->child; p != NULL; p = p->sibling) {
p->parent = res;
DBG(KERN_INFO "PCI: reparented %s [%llx..%llx] under %s\n",
- p->name, p->start, p->end, res->name);
+ p->name, (u64)p->start, (u64)p->end, res->name);
}
return 0;
}
@@ -362,7 +361,7 @@ pci_relocate_bridge_resource(struct pci_bus *bus, int i)
}
if (request_resource(pr, res)) {
DBG(KERN_ERR "PCI: huh? couldn't move to %llx..%llx\n",
- res->start, res->end);
+ (u64)res->start, (u64)res->end);
return -1; /* "can't happen" */
}
update_bridge_base(bus, i);
@@ -441,14 +440,14 @@ update_bridge_base(struct pci_bus *bus, int i)
end = res->end - off;
io_base_lo = (start >> 8) & PCI_IO_RANGE_MASK;
io_limit_lo = (end >> 8) & PCI_IO_RANGE_MASK;
- if (end > 0xffff) {
- pci_write_config_word(dev, PCI_IO_BASE_UPPER16,
- start >> 16);
- pci_write_config_word(dev, PCI_IO_LIMIT_UPPER16,
- end >> 16);
+ if (end > 0xffff)
io_base_lo |= PCI_IO_RANGE_TYPE_32;
- } else
+ else
io_base_lo |= PCI_IO_RANGE_TYPE_16;
+ pci_write_config_word(dev, PCI_IO_BASE_UPPER16,
+ start >> 16);
+ pci_write_config_word(dev, PCI_IO_LIMIT_UPPER16,
+ end >> 16);
pci_write_config_byte(dev, PCI_IO_BASE, io_base_lo);
pci_write_config_byte(dev, PCI_IO_LIMIT, io_limit_lo);
@@ -480,14 +479,14 @@ static inline void alloc_resource(struct pci_dev *dev, int idx)
struct resource *pr, *r = &dev->resource[idx];
DBG("PCI:%s: Resource %d: %016llx-%016llx (f=%lx)\n",
- pci_name(dev), idx, r->start, r->end, r->flags);
+ pci_name(dev), idx, (u64)r->start, (u64)r->end, r->flags);
pr = pci_find_parent_resource(dev, r);
if (!pr || request_resource(pr, r) < 0) {
printk(KERN_ERR "PCI: Cannot allocate resource region %d"
" of device %s\n", idx, pci_name(dev));
if (pr)
DBG("PCI: parent is %p: %016llx-%016llx (f=%lx)\n",
- pr, pr->start, pr->end, pr->flags);
+ pr, (u64)pr->start, (u64)pr->end, pr->flags);
/* We'll assign a new address later */
r->flags |= IORESOURCE_UNSET;
r->end -= r->start;
@@ -737,25 +736,51 @@ scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void*
return NULL;
}
-static int
-scan_OF_pci_childs_iterator(struct device_node* node, void* data)
+static struct device_node *scan_OF_for_pci_dev(struct device_node *parent,
+ unsigned int devfn)
{
- const unsigned int *reg;
- u8* fdata = (u8*)data;
-
- reg = get_property(node, "reg", NULL);
- if (reg && ((reg[0] >> 8) & 0xff) == fdata[1]
- && ((reg[0] >> 16) & 0xff) == fdata[0])
- return 1;
- return 0;
+ struct device_node *np = NULL;
+ const u32 *reg;
+ unsigned int psize;
+
+ while ((np = of_get_next_child(parent, np)) != NULL) {
+ reg = get_property(np, "reg", &psize);
+ if (reg == NULL || psize < 4)
+ continue;
+ if (((reg[0] >> 8) & 0xff) == devfn)
+ return np;
+ }
+ return NULL;
}
-static struct device_node*
-scan_OF_childs_for_device(struct device_node* node, u8 bus, u8 dev_fn)
+
+static struct device_node *scan_OF_for_pci_bus(struct pci_bus *bus)
{
- u8 filter_data[2] = {bus, dev_fn};
+ struct device_node *parent, *np;
+
+ /* Are we a root bus ? */
+ if (bus->self == NULL || bus->parent == NULL) {
+ struct pci_controller *hose = pci_bus_to_hose(bus->number);
+ if (hose == NULL)
+ return NULL;
+ return of_node_get(hose->arch_data);
+ }
- return scan_OF_pci_childs(node, scan_OF_pci_childs_iterator, filter_data);
+ /* not a root bus, we need to get our parent */
+ parent = scan_OF_for_pci_bus(bus->parent);
+ if (parent == NULL)
+ return NULL;
+
+ /* now iterate for children for a match */
+ np = scan_OF_for_pci_dev(parent, bus->self->devfn);
+ of_node_put(parent);
+
+ /* sanity check */
+ if (strcmp(np->type, "pci") != 0)
+ printk(KERN_WARNING "pci: wrong type \"%s\" for bridge %s\n",
+ np->type, np->full_name);
+
+ return np;
}
/*
@@ -764,43 +789,25 @@ scan_OF_childs_for_device(struct device_node* node, u8 bus, u8 dev_fn)
struct device_node *
pci_busdev_to_OF_node(struct pci_bus *bus, int devfn)
{
- struct pci_controller *hose;
- struct device_node *node;
- int busnr;
+ struct device_node *parent, *np;
if (!have_of)
return NULL;
-
- /* Lookup the hose */
- busnr = bus->number;
- hose = pci_bus_to_hose(busnr);
- if (!hose)
- return NULL;
- /* Check it has an OF node associated */
- node = (struct device_node *) hose->arch_data;
- if (!node)
+ DBG("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn);
+ parent = scan_OF_for_pci_bus(bus);
+ if (parent == NULL)
return NULL;
-
- /* Fixup bus number according to what OF think it is. */
-#ifdef CONFIG_PPC_PMAC
- /* The G5 need a special case here. Basically, we don't remap all
- * busses on it so we don't create the pci-OF-map. However, we do
- * remap the AGP bus and so have to deal with it. A future better
- * fix has to be done by making the remapping per-host and always
- * filling the pci_to_OF map. --BenH
+ DBG(" parent is %s\n", parent ? parent->full_name : "<NULL>");
+ np = scan_OF_for_pci_dev(parent, devfn);
+ of_node_put(parent);
+ DBG(" result is %s\n", np ? np->full_name : "<NULL>");
+
+ /* XXX most callers don't release the returned node
+ * mostly because ppc64 doesn't increase the refcount,
+ * we need to fix that.
*/
- if (machine_is(powermac) && busnr >= 0xf0)
- busnr -= 0xf0;
- else
-#endif
- if (pci_to_OF_bus_map)
- busnr = pci_to_OF_bus_map[busnr];
- if (busnr == 0xff)
- return NULL;
-
- /* Now, lookup childs of the hose */
- return scan_OF_childs_for_device(node->child, busnr, devfn);
+ return np;
}
EXPORT_SYMBOL(pci_busdev_to_OF_node);
@@ -960,7 +967,7 @@ pci_process_bridge_OF_ranges(struct pci_controller *hose,
res->flags = IORESOURCE_IO;
res->start = ranges[2];
DBG("PCI: IO 0x%llx -> 0x%llx\n",
- res->start, res->start + size - 1);
+ (u64)res->start, (u64)res->start + size - 1);
break;
case 2: /* memory space */
memno = 0;
@@ -982,7 +989,7 @@ pci_process_bridge_OF_ranges(struct pci_controller *hose,
res->flags |= IORESOURCE_PREFETCH;
res->start = ranges[na+2];
DBG("PCI: MEM[%d] 0x%llx -> 0x%llx\n", memno,
- res->start, res->start + size - 1);
+ (u64)res->start, (u64)res->start + size - 1);
}
break;
}
@@ -1268,7 +1275,10 @@ pcibios_init(void)
if (pci_assign_all_buses)
hose->first_busno = next_busno;
hose->last_busno = 0xff;
- bus = pci_scan_bus(hose->first_busno, hose->ops, hose);
+ bus = pci_scan_bus_parented(hose->parent, hose->first_busno,
+ hose->ops, hose);
+ if (bus)
+ pci_bus_add_devices(bus);
hose->last_busno = bus->subordinate;
if (pci_assign_all_buses || next_busno <= hose->last_busno)
next_busno = hose->last_busno + pcibios_assign_bus_offset;
@@ -1282,10 +1292,6 @@ pcibios_init(void)
if (pci_assign_all_buses && have_of)
pcibios_make_OF_bus_map();
- /* Do machine dependent PCI interrupt routing */
- if (ppc_md.pci_swizzle && ppc_md.pci_map_irq)
- pci_fixup_irqs(ppc_md.pci_swizzle, ppc_md.pci_map_irq);
-
/* Call machine dependent fixup */
if (ppc_md.pcibios_fixup)
ppc_md.pcibios_fixup();
@@ -1308,25 +1314,6 @@ pcibios_init(void)
subsys_initcall(pcibios_init);
-unsigned char __init
-common_swizzle(struct pci_dev *dev, unsigned char *pinp)
-{
- struct pci_controller *hose = dev->sysdata;
-
- if (dev->bus->number != hose->first_busno) {
- u8 pin = *pinp;
- do {
- pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
- /* Move up the chain of bridges. */
- dev = dev->bus->self;
- } while (dev->bus->self);
- *pinp = pin;
-
- /* The slot is the idsel of the last bridge. */
- }
- return PCI_SLOT(dev->devfn);
-}
-
unsigned long resource_fixup(struct pci_dev * dev, struct resource * res,
unsigned long start, unsigned long size)
{
@@ -1338,6 +1325,7 @@ void __init pcibios_fixup_bus(struct pci_bus *bus)
struct pci_controller *hose = (struct pci_controller *) bus->sysdata;
unsigned long io_offset;
struct resource *res;
+ struct pci_dev *dev;
int i;
io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
@@ -1390,8 +1378,16 @@ void __init pcibios_fixup_bus(struct pci_bus *bus)
}
}
+ /* Platform specific bus fixups */
if (ppc_md.pcibios_fixup_bus)
ppc_md.pcibios_fixup_bus(bus);
+
+ /* Read default IRQs and fixup if necessary */
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ pci_read_irq_line(dev);
+ if (ppc_md.pci_irq_fixup)
+ ppc_md.pci_irq_fixup(dev);
+ }
}
char __init *pcibios_setup(char *str)
@@ -1556,7 +1552,7 @@ pci_resource_to_bus(struct pci_dev *pdev, struct resource *res)
static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
- unsigned long *offset,
+ resource_size_t *offset,
enum pci_mmap_state mmap_state)
{
struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
@@ -1568,10 +1564,12 @@ static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
/* If memory, add on the PCI bridge address offset */
if (mmap_state == pci_mmap_mem) {
+#if 0 /* See comment in pci_resource_to_user() for why this is disabled */
*offset += hose->pci_mem_offset;
+#endif
res_bit = IORESOURCE_MEM;
} else {
- io_offset = hose->io_base_virt - ___IO_BASE;
+ io_offset = hose->io_base_virt - (void __iomem *)_IO_BASE;
*offset += io_offset;
res_bit = IORESOURCE_IO;
}
@@ -1636,9 +1634,6 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
else
prot |= _PAGE_GUARDED;
- printk("PCI map for %s:%llx, prot: %lx\n", pci_name(dev),
- (unsigned long long)rp->start, prot);
-
return __pgprot(prot);
}
@@ -1707,7 +1702,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state,
int write_combine)
{
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ resource_size_t offset = vma->vm_pgoff << PAGE_SHIFT;
struct resource *rp;
int ret;
@@ -1820,21 +1815,42 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
resource_size_t *start, resource_size_t *end)
{
struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
- unsigned long offset = 0;
+ resource_size_t offset = 0;
if (hose == NULL)
return;
if (rsrc->flags & IORESOURCE_IO)
- offset = ___IO_BASE - hose->io_base_virt + hose->io_base_phys;
+ offset = (unsigned long)hose->io_base_virt - _IO_BASE;
+
+ /* We pass a fully fixed up address to userland for MMIO instead of
+ * a BAR value because X is lame and expects to be able to use that
+ * to pass to /dev/mem !
+ *
+ * That means that we'll have potentially 64 bits values where some
+ * userland apps only expect 32 (like X itself since it thinks only
+ * Sparc has 64 bits MMIO) but if we don't do that, we break it on
+ * 32 bits CHRPs :-(
+ *
+ * Hopefully, the sysfs insterface is immune to that gunk. Once X
+ * has been fixed (and the fix spread enough), we can re-enable the
+ * 2 lines below and pass down a BAR value to userland. In that case
+ * we'll also have to re-enable the matching code in
+ * __pci_mmap_make_offset().
+ *
+ * BenH.
+ */
+#if 0
+ else if (rsrc->flags & IORESOURCE_MEM)
+ offset = hose->pci_mem_offset;
+#endif
- *start = rsrc->start + offset;
- *end = rsrc->end + offset;
+ *start = rsrc->start - offset;
+ *end = rsrc->end - offset;
}
-void __init
-pci_init_resource(struct resource *res, unsigned long start, unsigned long end,
- int flags, char *name)
+void __init pci_init_resource(struct resource *res, resource_size_t start,
+ resource_size_t end, int flags, char *name)
{
res->start = start;
res->end = end;
@@ -1845,35 +1861,6 @@ pci_init_resource(struct resource *res, unsigned long start, unsigned long end,
res->child = NULL;
}
-void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
-{
- unsigned long start = pci_resource_start(dev, bar);
- unsigned long len = pci_resource_len(dev, bar);
- unsigned long flags = pci_resource_flags(dev, bar);
-
- if (!len)
- return NULL;
- if (max && len > max)
- len = max;
- if (flags & IORESOURCE_IO)
- return ioport_map(start, len);
- if (flags & IORESOURCE_MEM)
- /* Not checking IORESOURCE_CACHEABLE because PPC does
- * not currently distinguish between ioremap and
- * ioremap_nocache.
- */
- return ioremap(start, len);
- /* What? */
- return NULL;
-}
-
-void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
-{
- /* Nothing to do */
-}
-EXPORT_SYMBOL(pci_iomap);
-EXPORT_SYMBOL(pci_iounmap);
-
unsigned long pci_address_to_pio(phys_addr_t address)
{
struct pci_controller* hose = hose_head;
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 78d3c0fc8df..a6b7692c726 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -42,11 +42,9 @@
unsigned long pci_probe_only = 1;
int pci_assign_all_buses = 0;
-#ifdef CONFIG_PPC_MULTIPLATFORM
static void fixup_resource(struct resource *res, struct pci_dev *dev);
static void do_bus_setup(struct pci_bus *bus);
static void phbs_remap_io(void);
-#endif
/* pci_io_base -- the base address from which io bars are offsets.
* This is the lowest I/O base address (so bar values are always positive),
@@ -63,7 +61,7 @@ void iSeries_pcibios_init(void);
LIST_HEAD(hose_list);
-struct dma_mapping_ops pci_dma_ops;
+struct dma_mapping_ops *pci_dma_ops;
EXPORT_SYMBOL(pci_dma_ops);
int global_phb_number; /* Global phb counter */
@@ -199,13 +197,23 @@ struct pci_controller * pcibios_alloc_controller(struct device_node *dev)
pci_setup_pci_controller(phb);
phb->arch_data = dev;
phb->is_dynamic = mem_init_done;
- if (dev)
- PHB_SET_NODE(phb, of_node_to_nid(dev));
+ if (dev) {
+ int nid = of_node_to_nid(dev);
+
+ if (nid < 0 || !node_online(nid))
+ nid = -1;
+
+ PHB_SET_NODE(phb, nid);
+ }
return phb;
}
void pcibios_free_controller(struct pci_controller *phb)
{
+ spin_lock(&hose_spinlock);
+ list_del(&phb->list_node);
+ spin_unlock(&hose_spinlock);
+
if (phb->is_dynamic)
kfree(phb);
}
@@ -245,7 +253,6 @@ static void __init pcibios_claim_of_setup(void)
pcibios_claim_one_bus(b);
}
-#ifdef CONFIG_PPC_MULTIPLATFORM
static u32 get_int_prop(struct device_node *np, const char *name, u32 def)
{
const u32 *prop;
@@ -323,7 +330,7 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
struct pci_dev *dev;
const char *type;
- dev = kmalloc(sizeof(struct pci_dev), GFP_KERNEL);
+ dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
if (!dev)
return NULL;
type = get_property(node, "device_type", NULL);
@@ -332,7 +339,6 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
DBG(" create device, devfn: %x, type: %s\n", devfn, type);
- memset(dev, 0, sizeof(struct pci_dev));
dev->bus = bus;
dev->sysdata = node;
dev->dev.parent = bus->bridge;
@@ -500,7 +506,6 @@ void __devinit of_scan_pci_bridge(struct device_node *node,
pci_scan_child_bus(bus);
}
EXPORT_SYMBOL(of_scan_pci_bridge);
-#endif /* CONFIG_PPC_MULTIPLATFORM */
void __devinit scan_phb(struct pci_controller *hose)
{
@@ -511,7 +516,7 @@ void __devinit scan_phb(struct pci_controller *hose)
DBG("Scanning PHB %s\n", node ? node->full_name : "<NO NAME>");
- bus = pci_create_bus(NULL, hose->first_busno, hose->ops, node);
+ bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, node);
if (bus == NULL) {
printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
hose->global_number);
@@ -534,7 +539,7 @@ void __devinit scan_phb(struct pci_controller *hose)
}
mode = PCI_PROBE_NORMAL;
-#ifdef CONFIG_PPC_MULTIPLATFORM
+
if (node && ppc_md.pci_probe_mode)
mode = ppc_md.pci_probe_mode(bus);
DBG(" probe mode: %d\n", mode);
@@ -542,7 +547,7 @@ void __devinit scan_phb(struct pci_controller *hose)
bus->subordinate = hose->last_busno;
of_scan_bus(node, bus);
}
-#endif /* CONFIG_PPC_MULTIPLATFORM */
+
if (mode == PCI_PROBE_NORMAL)
hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
}
@@ -586,11 +591,9 @@ static int __init pcibios_init(void)
if (ppc64_isabridge_dev != NULL)
printk(KERN_DEBUG "ISA bridge at %s\n", pci_name(ppc64_isabridge_dev));
-#ifdef CONFIG_PPC_MULTIPLATFORM
if (!firmware_has_feature(FW_FEATURE_ISERIES))
/* map in PCI I/O space */
phbs_remap_io();
-#endif
printk(KERN_DEBUG "PCI: Probing PCI hardware done\n");
@@ -679,7 +682,7 @@ int pci_proc_domain(struct pci_bus *bus)
* Returns negative error code on failure, zero on success.
*/
static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
- unsigned long *offset,
+ resource_size_t *offset,
enum pci_mmap_state mmap_state)
{
struct pci_controller *hose = pci_bus_to_host(dev->bus);
@@ -691,7 +694,9 @@ static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
/* If memory, add on the PCI bridge address offset */
if (mmap_state == pci_mmap_mem) {
+#if 0 /* See comment in pci_resource_to_user() for why this is disabled */
*offset += hose->pci_mem_offset;
+#endif
res_bit = IORESOURCE_MEM;
} else {
io_offset = (unsigned long)hose->io_base_virt - pci_io_base;
@@ -759,9 +764,6 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
else
prot |= _PAGE_GUARDED;
- printk(KERN_DEBUG "PCI map for %s:%lx, prot: %lx\n", pci_name(dev), rp->start,
- prot);
-
return __pgprot(prot);
}
@@ -829,7 +831,7 @@ pgprot_t pci_phys_mem_access_prot(struct file *file,
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ resource_size_t offset = vma->vm_pgoff << PAGE_SHIFT;
struct resource *rp;
int ret;
@@ -867,8 +869,6 @@ void pcibios_add_platform_entries(struct pci_dev *pdev)
device_create_file(&pdev->dev, &dev_attr_devspec);
}
-#ifdef CONFIG_PPC_MULTIPLATFORM
-
#define ISA_SPACE_MASK 0x1
#define ISA_SPACE_IO 0x1
@@ -969,11 +969,7 @@ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
res = NULL;
pci_space = ranges[0];
pci_addr = ((unsigned long)ranges[1] << 32) | ranges[2];
-
- cpu_phys_addr = ranges[3];
- if (na >= 2)
- cpu_phys_addr = (cpu_phys_addr << 32) | ranges[4];
-
+ cpu_phys_addr = of_translate_address(dev, &ranges[3]);
size = ((unsigned long)ranges[na+3] << 32) | ranges[na+4];
ranges += np;
if (size == 0)
@@ -1139,7 +1135,7 @@ int unmap_bus_range(struct pci_bus *bus)
if (get_bus_io_range(bus, &start_phys, &start_virt, &size))
return 1;
- if (iounmap_explicit((void __iomem *) start_virt, size))
+ if (__iounmap_explicit((void __iomem *) start_virt, size))
return 1;
return 0;
@@ -1207,23 +1203,52 @@ void __devinit pcibios_fixup_device_resources(struct pci_dev *dev,
}
EXPORT_SYMBOL(pcibios_fixup_device_resources);
+void __devinit pcibios_setup_new_device(struct pci_dev *dev)
+{
+ struct dev_archdata *sd = &dev->dev.archdata;
+
+ sd->of_node = pci_device_to_OF_node(dev);
+
+ DBG("PCI device %s OF node: %s\n", pci_name(dev),
+ sd->of_node ? sd->of_node->full_name : "<none>");
+
+ sd->dma_ops = pci_dma_ops;
+#ifdef CONFIG_NUMA
+ sd->numa_node = pcibus_to_node(dev->bus);
+#else
+ sd->numa_node = -1;
+#endif
+ if (ppc_md.pci_dma_dev_setup)
+ ppc_md.pci_dma_dev_setup(dev);
+}
+EXPORT_SYMBOL(pcibios_setup_new_device);
static void __devinit do_bus_setup(struct pci_bus *bus)
{
struct pci_dev *dev;
- ppc_md.iommu_bus_setup(bus);
+ if (ppc_md.pci_dma_bus_setup)
+ ppc_md.pci_dma_bus_setup(bus);
list_for_each_entry(dev, &bus->devices, bus_list)
- ppc_md.iommu_dev_setup(dev);
+ pcibios_setup_new_device(dev);
- if (ppc_md.irq_bus_setup)
- ppc_md.irq_bus_setup(bus);
+ /* Read default IRQs and fixup if necessary */
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ pci_read_irq_line(dev);
+ if (ppc_md.pci_irq_fixup)
+ ppc_md.pci_irq_fixup(dev);
+ }
}
void __devinit pcibios_fixup_bus(struct pci_bus *bus)
{
struct pci_dev *dev = bus->self;
+ struct device_node *np;
+
+ np = pci_bus_to_OF_node(bus);
+
+ DBG("pcibios_fixup_bus(%s)\n", np ? np->full_name : "<???>");
if (dev && pci_probe_only &&
(dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
@@ -1307,20 +1332,41 @@ EXPORT_SYMBOL(pci_read_irq_line);
void pci_resource_to_user(const struct pci_dev *dev, int bar,
const struct resource *rsrc,
- u64 *start, u64 *end)
+ resource_size_t *start, resource_size_t *end)
{
struct pci_controller *hose = pci_bus_to_host(dev->bus);
- unsigned long offset = 0;
+ resource_size_t offset = 0;
if (hose == NULL)
return;
if (rsrc->flags & IORESOURCE_IO)
- offset = pci_io_base - (unsigned long)hose->io_base_virt +
- hose->io_base_phys;
+ offset = (unsigned long)hose->io_base_virt - pci_io_base;
+
+ /* We pass a fully fixed up address to userland for MMIO instead of
+ * a BAR value because X is lame and expects to be able to use that
+ * to pass to /dev/mem !
+ *
+ * That means that we'll have potentially 64 bits values where some
+ * userland apps only expect 32 (like X itself since it thinks only
+ * Sparc has 64 bits MMIO) but if we don't do that, we break it on
+ * 32 bits CHRPs :-(
+ *
+ * Hopefully, the sysfs insterface is immune to that gunk. Once X
+ * has been fixed (and the fix spread enough), we can re-enable the
+ * 2 lines below and pass down a BAR value to userland. In that case
+ * we'll also have to re-enable the matching code in
+ * __pci_mmap_make_offset().
+ *
+ * BenH.
+ */
+#if 0
+ else if (rsrc->flags & IORESOURCE_MEM)
+ offset = hose->pci_mem_offset;
+#endif
- *start = rsrc->start + offset;
- *end = rsrc->end + offset;
+ *start = rsrc->start - offset;
+ *end = rsrc->end - offset;
}
struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
@@ -1337,8 +1383,6 @@ struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
return NULL;
}
-#endif /* CONFIG_PPC_MULTIPLATFORM */
-
unsigned long pci_address_to_pio(phys_addr_t address)
{
struct pci_controller *hose, *tmp;
diff --git a/arch/powerpc/kernel/pci_direct_iommu.c b/arch/powerpc/kernel/pci_direct_iommu.c
deleted file mode 100644
index 72ce082ce73..00000000000
--- a/arch/powerpc/kernel/pci_direct_iommu.c
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Support for DMA from PCI devices to main memory on
- * machines without an iommu or with directly addressable
- * RAM (typically a pmac with 2Gb of RAM or less)
- *
- * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/bootmem.h>
-#include <linux/mm.h>
-#include <linux/dma-mapping.h>
-
-#include <asm/sections.h>
-#include <asm/io.h>
-#include <asm/prom.h>
-#include <asm/pci-bridge.h>
-#include <asm/machdep.h>
-#include <asm/pmac_feature.h>
-#include <asm/abs_addr.h>
-#include <asm/ppc-pci.h>
-
-static void *pci_direct_alloc_coherent(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
-{
- void *ret;
-
- ret = (void *)__get_free_pages(flag, get_order(size));
- if (ret != NULL) {
- memset(ret, 0, size);
- *dma_handle = virt_to_abs(ret);
- }
- return ret;
-}
-
-static void pci_direct_free_coherent(struct device *hwdev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
-{
- free_pages((unsigned long)vaddr, get_order(size));
-}
-
-static dma_addr_t pci_direct_map_single(struct device *hwdev, void *ptr,
- size_t size, enum dma_data_direction direction)
-{
- return virt_to_abs(ptr);
-}
-
-static void pci_direct_unmap_single(struct device *hwdev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction)
-{
-}
-
-static int pci_direct_map_sg(struct device *hwdev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction)
-{
- int i;
-
- for (i = 0; i < nents; i++, sg++) {
- sg->dma_address = page_to_phys(sg->page) + sg->offset;
- sg->dma_length = sg->length;
- }
-
- return nents;
-}
-
-static void pci_direct_unmap_sg(struct device *hwdev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction)
-{
-}
-
-static int pci_direct_dma_supported(struct device *dev, u64 mask)
-{
- return mask < 0x100000000ull;
-}
-
-static struct dma_mapping_ops pci_direct_ops = {
- .alloc_coherent = pci_direct_alloc_coherent,
- .free_coherent = pci_direct_free_coherent,
- .map_single = pci_direct_map_single,
- .unmap_single = pci_direct_unmap_single,
- .map_sg = pci_direct_map_sg,
- .unmap_sg = pci_direct_unmap_sg,
- .dma_supported = pci_direct_dma_supported,
-};
-
-void __init pci_direct_iommu_init(void)
-{
- pci_dma_ops = pci_direct_ops;
-}
diff --git a/arch/powerpc/kernel/pci_iommu.c b/arch/powerpc/kernel/pci_iommu.c
deleted file mode 100644
index 0688b2534ac..00000000000
--- a/arch/powerpc/kernel/pci_iommu.c
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
- *
- * Rewrite, cleanup, new allocation schemes:
- * Copyright (C) 2004 Olof Johansson, IBM Corporation
- *
- * Dynamic DMA mapping support, platform-independent parts.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/spinlock.h>
-#include <linux/string.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <asm/io.h>
-#include <asm/prom.h>
-#include <asm/iommu.h>
-#include <asm/pci-bridge.h>
-#include <asm/machdep.h>
-#include <asm/ppc-pci.h>
-
-/*
- * We can use ->sysdata directly and avoid the extra work in
- * pci_device_to_OF_node since ->sysdata will have been initialised
- * in the iommu init code for all devices.
- */
-#define PCI_GET_DN(dev) ((struct device_node *)((dev)->sysdata))
-
-static inline struct iommu_table *device_to_table(struct device *hwdev)
-{
- struct pci_dev *pdev;
-
- if (!hwdev) {
- pdev = ppc64_isabridge_dev;
- if (!pdev)
- return NULL;
- } else
- pdev = to_pci_dev(hwdev);
-
- return PCI_DN(PCI_GET_DN(pdev))->iommu_table;
-}
-
-
-static inline unsigned long device_to_mask(struct device *hwdev)
-{
- struct pci_dev *pdev;
-
- if (!hwdev) {
- pdev = ppc64_isabridge_dev;
- if (!pdev) /* This is the best guess we can do */
- return 0xfffffffful;
- } else
- pdev = to_pci_dev(hwdev);
-
- if (pdev->dma_mask)
- return pdev->dma_mask;
-
- /* Assume devices without mask can take 32 bit addresses */
- return 0xfffffffful;
-}
-
-
-/* Allocates a contiguous real buffer and creates mappings over it.
- * Returns the virtual address of the buffer and sets dma_handle
- * to the dma address (mapping) of the first page.
- */
-static void *pci_iommu_alloc_coherent(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
-{
- return iommu_alloc_coherent(device_to_table(hwdev), size, dma_handle,
- device_to_mask(hwdev), flag,
- pcibus_to_node(to_pci_dev(hwdev)->bus));
-}
-
-static void pci_iommu_free_coherent(struct device *hwdev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
-{
- iommu_free_coherent(device_to_table(hwdev), size, vaddr, dma_handle);
-}
-
-/* Creates TCEs for a user provided buffer. The user buffer must be
- * contiguous real kernel storage (not vmalloc). The address of the buffer
- * passed here is the kernel (virtual) address of the buffer. The buffer
- * need not be page aligned, the dma_addr_t returned will point to the same
- * byte within the page as vaddr.
- */
-static dma_addr_t pci_iommu_map_single(struct device *hwdev, void *vaddr,
- size_t size, enum dma_data_direction direction)
-{
- return iommu_map_single(device_to_table(hwdev), vaddr, size,
- device_to_mask(hwdev), direction);
-}
-
-
-static void pci_iommu_unmap_single(struct device *hwdev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction)
-{
- iommu_unmap_single(device_to_table(hwdev), dma_handle, size, direction);
-}
-
-
-static int pci_iommu_map_sg(struct device *pdev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction direction)
-{
- return iommu_map_sg(pdev, device_to_table(pdev), sglist,
- nelems, device_to_mask(pdev), direction);
-}
-
-static void pci_iommu_unmap_sg(struct device *pdev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction direction)
-{
- iommu_unmap_sg(device_to_table(pdev), sglist, nelems, direction);
-}
-
-/* We support DMA to/from any memory page via the iommu */
-static int pci_iommu_dma_supported(struct device *dev, u64 mask)
-{
- struct iommu_table *tbl = device_to_table(dev);
-
- if (!tbl || tbl->it_offset > mask) {
- printk(KERN_INFO "Warning: IOMMU table offset too big for device mask\n");
- if (tbl)
- printk(KERN_INFO "mask: 0x%08lx, table offset: 0x%08lx\n",
- mask, tbl->it_offset);
- else
- printk(KERN_INFO "mask: 0x%08lx, table unavailable\n",
- mask);
- return 0;
- } else
- return 1;
-}
-
-struct dma_mapping_ops pci_iommu_ops = {
- .alloc_coherent = pci_iommu_alloc_coherent,
- .free_coherent = pci_iommu_free_coherent,
- .map_single = pci_iommu_map_single,
- .unmap_single = pci_iommu_unmap_single,
- .map_sg = pci_iommu_map_sg,
- .unmap_sg = pci_iommu_unmap_sg,
- .dma_supported = pci_iommu_dma_supported,
-};
-
-void pci_iommu_init(void)
-{
- pci_dma_ops = pci_iommu_ops;
-}
diff --git a/arch/powerpc/kernel/perfmon_fsl_booke.c b/arch/powerpc/kernel/perfmon_fsl_booke.c
deleted file mode 100644
index e0dcf2b41fb..00000000000
--- a/arch/powerpc/kernel/perfmon_fsl_booke.c
+++ /dev/null
@@ -1,221 +0,0 @@
-/* arch/powerpc/kernel/perfmon_fsl_booke.c
- * Freescale Book-E Performance Monitor code
- *
- * Author: Andy Fleming
- * Copyright (c) 2004 Freescale Semiconductor, Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/stddef.h>
-#include <linux/unistd.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/user.h>
-#include <linux/a.out.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/prctl.h>
-
-#include <asm/pgtable.h>
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/reg.h>
-#include <asm/xmon.h>
-#include <asm/pmc.h>
-
-static inline u32 get_pmlca(int ctr);
-static inline void set_pmlca(int ctr, u32 pmlca);
-
-static inline u32 get_pmlca(int ctr)
-{
- u32 pmlca;
-
- switch (ctr) {
- case 0:
- pmlca = mfpmr(PMRN_PMLCA0);
- break;
- case 1:
- pmlca = mfpmr(PMRN_PMLCA1);
- break;
- case 2:
- pmlca = mfpmr(PMRN_PMLCA2);
- break;
- case 3:
- pmlca = mfpmr(PMRN_PMLCA3);
- break;
- default:
- panic("Bad ctr number\n");
- }
-
- return pmlca;
-}
-
-static inline void set_pmlca(int ctr, u32 pmlca)
-{
- switch (ctr) {
- case 0:
- mtpmr(PMRN_PMLCA0, pmlca);
- break;
- case 1:
- mtpmr(PMRN_PMLCA1, pmlca);
- break;
- case 2:
- mtpmr(PMRN_PMLCA2, pmlca);
- break;
- case 3:
- mtpmr(PMRN_PMLCA3, pmlca);
- break;
- default:
- panic("Bad ctr number\n");
- }
-}
-
-void init_pmc_stop(int ctr)
-{
- u32 pmlca = (PMLCA_FC | PMLCA_FCS | PMLCA_FCU |
- PMLCA_FCM1 | PMLCA_FCM0);
- u32 pmlcb = 0;
-
- switch (ctr) {
- case 0:
- mtpmr(PMRN_PMLCA0, pmlca);
- mtpmr(PMRN_PMLCB0, pmlcb);
- break;
- case 1:
- mtpmr(PMRN_PMLCA1, pmlca);
- mtpmr(PMRN_PMLCB1, pmlcb);
- break;
- case 2:
- mtpmr(PMRN_PMLCA2, pmlca);
- mtpmr(PMRN_PMLCB2, pmlcb);
- break;
- case 3:
- mtpmr(PMRN_PMLCA3, pmlca);
- mtpmr(PMRN_PMLCB3, pmlcb);
- break;
- default:
- panic("Bad ctr number!\n");
- }
-}
-
-void set_pmc_event(int ctr, int event)
-{
- u32 pmlca;
-
- pmlca = get_pmlca(ctr);
-
- pmlca = (pmlca & ~PMLCA_EVENT_MASK) |
- ((event << PMLCA_EVENT_SHIFT) &
- PMLCA_EVENT_MASK);
-
- set_pmlca(ctr, pmlca);
-}
-
-void set_pmc_user_kernel(int ctr, int user, int kernel)
-{
- u32 pmlca;
-
- pmlca = get_pmlca(ctr);
-
- if(user)
- pmlca &= ~PMLCA_FCU;
- else
- pmlca |= PMLCA_FCU;
-
- if(kernel)
- pmlca &= ~PMLCA_FCS;
- else
- pmlca |= PMLCA_FCS;
-
- set_pmlca(ctr, pmlca);
-}
-
-void set_pmc_marked(int ctr, int mark0, int mark1)
-{
- u32 pmlca = get_pmlca(ctr);
-
- if(mark0)
- pmlca &= ~PMLCA_FCM0;
- else
- pmlca |= PMLCA_FCM0;
-
- if(mark1)
- pmlca &= ~PMLCA_FCM1;
- else
- pmlca |= PMLCA_FCM1;
-
- set_pmlca(ctr, pmlca);
-}
-
-void pmc_start_ctr(int ctr, int enable)
-{
- u32 pmlca = get_pmlca(ctr);
-
- pmlca &= ~PMLCA_FC;
-
- if (enable)
- pmlca |= PMLCA_CE;
- else
- pmlca &= ~PMLCA_CE;
-
- set_pmlca(ctr, pmlca);
-}
-
-void pmc_start_ctrs(int enable)
-{
- u32 pmgc0 = mfpmr(PMRN_PMGC0);
-
- pmgc0 &= ~PMGC0_FAC;
- pmgc0 |= PMGC0_FCECE;
-
- if (enable)
- pmgc0 |= PMGC0_PMIE;
- else
- pmgc0 &= ~PMGC0_PMIE;
-
- mtpmr(PMRN_PMGC0, pmgc0);
-}
-
-void pmc_stop_ctrs(void)
-{
- u32 pmgc0 = mfpmr(PMRN_PMGC0);
-
- pmgc0 |= PMGC0_FAC;
-
- pmgc0 &= ~(PMGC0_PMIE | PMGC0_FCECE);
-
- mtpmr(PMRN_PMGC0, pmgc0);
-}
-
-void dump_pmcs(void)
-{
- printk("pmgc0: %x\n", mfpmr(PMRN_PMGC0));
- printk("pmc\t\tpmlca\t\tpmlcb\n");
- printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC0),
- mfpmr(PMRN_PMLCA0), mfpmr(PMRN_PMLCB0));
- printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC1),
- mfpmr(PMRN_PMLCA1), mfpmr(PMRN_PMLCB1));
- printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC2),
- mfpmr(PMRN_PMLCA2), mfpmr(PMRN_PMLCB2));
- printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC3),
- mfpmr(PMRN_PMLCA3), mfpmr(PMRN_PMLCB3));
-}
-
-EXPORT_SYMBOL(init_pmc_stop);
-EXPORT_SYMBOL(set_pmc_event);
-EXPORT_SYMBOL(set_pmc_user_kernel);
-EXPORT_SYMBOL(set_pmc_marked);
-EXPORT_SYMBOL(pmc_start_ctr);
-EXPORT_SYMBOL(pmc_start_ctrs);
-EXPORT_SYMBOL(pmc_stop_ctrs);
-EXPORT_SYMBOL(dump_pmcs);
diff --git a/arch/powerpc/kernel/pmc.c b/arch/powerpc/kernel/pmc.c
index a0a2efadeab..3d8f6f44641 100644
--- a/arch/powerpc/kernel/pmc.c
+++ b/arch/powerpc/kernel/pmc.c
@@ -71,7 +71,7 @@ int reserve_pmc_hardware(perf_irq_t new_perf_irq)
}
pmc_owner_caller = __builtin_return_address(0);
- perf_irq = new_perf_irq ? : dummy_perf;
+ perf_irq = new_perf_irq ? new_perf_irq : dummy_perf;
out:
spin_unlock(&pmc_owner_lock);
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 807193a3c78..95776b6af4e 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -49,6 +49,10 @@
#include <asm/commproc.h>
#endif
+#ifdef CONFIG_PPC64
+EXPORT_SYMBOL(local_irq_restore);
+#endif
+
#ifdef CONFIG_PPC32
extern void transfer_to_handler(void);
extern void do_IRQ(struct pt_regs *regs);
@@ -204,7 +208,7 @@ EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */
extern long *intercept_table;
EXPORT_SYMBOL(intercept_table);
#endif /* CONFIG_PPC_STD_MMU_32 */
-#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
+#ifdef CONFIG_PPC_DCR_NATIVE
EXPORT_SYMBOL(__mtdcr);
EXPORT_SYMBOL(__mfdcr);
#endif
diff --git a/arch/powerpc/kernel/proc_ppc64.c b/arch/powerpc/kernel/proc_ppc64.c
index f598cb51953..dd7001cacf7 100644
--- a/arch/powerpc/kernel/proc_ppc64.c
+++ b/arch/powerpc/kernel/proc_ppc64.c
@@ -83,7 +83,7 @@ __initcall(proc_ppc64_init);
static loff_t page_map_seek( struct file *file, loff_t off, int whence)
{
loff_t new;
- struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode);
+ struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
switch(whence) {
case 0:
@@ -106,13 +106,13 @@ static loff_t page_map_seek( struct file *file, loff_t off, int whence)
static ssize_t page_map_read( struct file *file, char __user *buf, size_t nbytes,
loff_t *ppos)
{
- struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode);
+ struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
return simple_read_from_buffer(buf, nbytes, ppos, dp->data, dp->size);
}
static int page_map_mmap( struct file *file, struct vm_area_struct *vma )
{
- struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode);
+ struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
if ((vma->vm_end - vma->vm_start) > dp->size)
return -EINVAL;
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 7b2f6452ba7..f3d4dd580dd 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -341,13 +341,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
static int instructions_to_print = 16;
-#ifdef CONFIG_PPC64
-#define BAD_PC(pc) ((REGION_ID(pc) != KERNEL_REGION_ID) && \
- (REGION_ID(pc) != VMALLOC_REGION_ID))
-#else
-#define BAD_PC(pc) ((pc) < KERNELBASE)
-#endif
-
static void show_instructions(struct pt_regs *regs)
{
int i;
@@ -366,7 +359,8 @@ static void show_instructions(struct pt_regs *regs)
* bad address because the pc *should* only be a
* kernel address.
*/
- if (BAD_PC(pc) || __get_user(instr, (unsigned int __user *)pc)) {
+ if (!__kernel_text_address(pc) ||
+ __get_user(instr, (unsigned int __user *)pc)) {
printk("XXXXXXXX ");
} else {
if (regs->nip == pc)
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 865b9648d0d..1fc732a552d 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -538,35 +538,31 @@ static struct ibm_pa_feature {
{CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
};
-static void __init check_cpu_pa_features(unsigned long node)
+static void __init scan_features(unsigned long node, unsigned char *ftrs,
+ unsigned long tablelen,
+ struct ibm_pa_feature *fp,
+ unsigned long ft_size)
{
- unsigned char *pa_ftrs;
- unsigned long len, tablelen, i, bit;
-
- pa_ftrs = of_get_flat_dt_prop(node, "ibm,pa-features", &tablelen);
- if (pa_ftrs == NULL)
- return;
+ unsigned long i, len, bit;
/* find descriptor with type == 0 */
for (;;) {
if (tablelen < 3)
return;
- len = 2 + pa_ftrs[0];
+ len = 2 + ftrs[0];
if (tablelen < len)
return; /* descriptor 0 not found */
- if (pa_ftrs[1] == 0)
+ if (ftrs[1] == 0)
break;
tablelen -= len;
- pa_ftrs += len;
+ ftrs += len;
}
/* loop over bits we know about */
- for (i = 0; i < ARRAY_SIZE(ibm_pa_features); ++i) {
- struct ibm_pa_feature *fp = &ibm_pa_features[i];
-
- if (fp->pabyte >= pa_ftrs[0])
+ for (i = 0; i < ft_size; ++i, ++fp) {
+ if (fp->pabyte >= ftrs[0])
continue;
- bit = (pa_ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1;
+ bit = (ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1;
if (bit ^ fp->invert) {
cur_cpu_spec->cpu_features |= fp->cpu_features;
cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
@@ -577,16 +573,59 @@ static void __init check_cpu_pa_features(unsigned long node)
}
}
+static void __init check_cpu_pa_features(unsigned long node)
+{
+ unsigned char *pa_ftrs;
+ unsigned long tablelen;
+
+ pa_ftrs = of_get_flat_dt_prop(node, "ibm,pa-features", &tablelen);
+ if (pa_ftrs == NULL)
+ return;
+
+ scan_features(node, pa_ftrs, tablelen,
+ ibm_pa_features, ARRAY_SIZE(ibm_pa_features));
+}
+
+static struct feature_property {
+ const char *name;
+ u32 min_value;
+ unsigned long cpu_feature;
+ unsigned long cpu_user_ftr;
+} feature_properties[] __initdata = {
+#ifdef CONFIG_ALTIVEC
+ {"altivec", 0, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
+ {"ibm,vmx", 1, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
+#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_PPC64
+ {"ibm,dfp", 1, 0, PPC_FEATURE_HAS_DFP},
+ {"ibm,purr", 1, CPU_FTR_PURR, 0},
+ {"ibm,spurr", 1, CPU_FTR_SPURR, 0},
+#endif /* CONFIG_PPC64 */
+};
+
+static void __init check_cpu_feature_properties(unsigned long node)
+{
+ unsigned long i;
+ struct feature_property *fp = feature_properties;
+ const u32 *prop;
+
+ for (i = 0; i < ARRAY_SIZE(feature_properties); ++i, ++fp) {
+ prop = of_get_flat_dt_prop(node, fp->name, NULL);
+ if (prop && *prop >= fp->min_value) {
+ cur_cpu_spec->cpu_features |= fp->cpu_feature;
+ cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftr;
+ }
+ }
+}
+
static int __init early_init_dt_scan_cpus(unsigned long node,
const char *uname, int depth,
void *data)
{
static int logical_cpuid = 0;
char *type = of_get_flat_dt_prop(node, "device_type", NULL);
-#ifdef CONFIG_ALTIVEC
- u32 *prop;
-#endif
- u32 *intserv;
+ const u32 *prop;
+ const u32 *intserv;
int i, nthreads;
unsigned long len;
int found = 0;
@@ -643,24 +682,27 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
intserv[i]);
boot_cpuid = logical_cpuid;
set_hard_smp_processor_id(boot_cpuid, intserv[i]);
- }
-#ifdef CONFIG_ALTIVEC
- /* Check if we have a VMX and eventually update CPU features */
- prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", NULL);
- if (prop && (*prop) > 0) {
- cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
- cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
- }
-
- /* Same goes for Apple's "altivec" property */
- prop = (u32 *)of_get_flat_dt_prop(node, "altivec", NULL);
- if (prop) {
- cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
- cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
+ /*
+ * PAPR defines "logical" PVR values for cpus that
+ * meet various levels of the architecture:
+ * 0x0f000001 Architecture version 2.04
+ * 0x0f000002 Architecture version 2.05
+ * If the cpu-version property in the cpu node contains
+ * such a value, we call identify_cpu again with the
+ * logical PVR value in order to use the cpu feature
+ * bits appropriate for the architecture level.
+ *
+ * A POWER6 partition in "POWER6 architected" mode
+ * uses the 0x0f000002 PVR value; in POWER5+ mode
+ * it uses 0x0f000001.
+ */
+ prop = of_get_flat_dt_prop(node, "cpu-version", NULL);
+ if (prop && (*prop & 0xff000000) == 0x0f000000)
+ identify_cpu(0, *prop);
}
-#endif /* CONFIG_ALTIVEC */
+ check_cpu_feature_properties(node);
check_cpu_pa_features(node);
#ifdef CONFIG_PPC_PSERIES
@@ -762,6 +804,56 @@ static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp)
return of_read_ulong(p, s);
}
+#ifdef CONFIG_PPC_PSERIES
+/*
+ * Interpret the ibm,dynamic-memory property in the
+ * /ibm,dynamic-reconfiguration-memory node.
+ * This contains a list of memory blocks along with NUMA affinity
+ * information.
+ */
+static int __init early_init_dt_scan_drconf_memory(unsigned long node)
+{
+ cell_t *dm, *ls;
+ unsigned long l, n;
+ unsigned long base, size, lmb_size, flags;
+
+ ls = (cell_t *)of_get_flat_dt_prop(node, "ibm,lmb-size", &l);
+ if (ls == NULL || l < dt_root_size_cells * sizeof(cell_t))
+ return 0;
+ lmb_size = dt_mem_next_cell(dt_root_size_cells, &ls);
+
+ dm = (cell_t *)of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l);
+ if (dm == NULL || l < sizeof(cell_t))
+ return 0;
+
+ n = *dm++; /* number of entries */
+ if (l < (n * (dt_root_addr_cells + 4) + 1) * sizeof(cell_t))
+ return 0;
+
+ for (; n != 0; --n) {
+ base = dt_mem_next_cell(dt_root_addr_cells, &dm);
+ flags = dm[3];
+ /* skip DRC index, pad, assoc. list index, flags */
+ dm += 4;
+ /* skip this block if the reserved bit is set in flags (0x80)
+ or if the block is not assigned to this partition (0x8) */
+ if ((flags & 0x80) || !(flags & 0x8))
+ continue;
+ size = lmb_size;
+ if (iommu_is_off) {
+ if (base >= 0x80000000ul)
+ continue;
+ if ((base + size) > 0x80000000ul)
+ size = 0x80000000ul - base;
+ }
+ lmb_add(base, size);
+ }
+ lmb_dump_all();
+ return 0;
+}
+#else
+#define early_init_dt_scan_drconf_memory(node) 0
+#endif /* CONFIG_PPC_PSERIES */
static int __init early_init_dt_scan_memory(unsigned long node,
const char *uname, int depth, void *data)
@@ -770,6 +862,11 @@ static int __init early_init_dt_scan_memory(unsigned long node,
cell_t *reg, *endp;
unsigned long l;
+ /* Look for the ibm,dynamic-reconfiguration-memory node */
+ if (depth == 1 &&
+ strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0)
+ return early_init_dt_scan_drconf_memory(node);
+
/* We are scanning "memory" nodes only */
if (type == NULL) {
/*
@@ -1014,7 +1111,7 @@ EXPORT_SYMBOL(find_all_nodes);
/** Checks if the given "compat" string matches one of the strings in
* the device's "compatible" property
*/
-int device_is_compatible(struct device_node *device, const char *compat)
+int device_is_compatible(const struct device_node *device, const char *compat)
{
const char* cp;
int cplen, l;
@@ -1491,7 +1588,8 @@ static int __init prom_reconfig_setup(void)
__initcall(prom_reconfig_setup);
#endif
-struct property *of_find_property(struct device_node *np, const char *name,
+struct property *of_find_property(const struct device_node *np,
+ const char *name,
int *lenp)
{
struct property *pp;
@@ -1512,7 +1610,8 @@ struct property *of_find_property(struct device_node *np, const char *name,
* Find a property with a given name for a given node
* and return the value.
*/
-const void *get_property(struct device_node *np, const char *name, int *lenp)
+const void *get_property(const struct device_node *np, const char *name,
+ int *lenp)
{
struct property *pp = of_find_property(np,name,lenp);
return pp ? pp->value : NULL;
@@ -1672,6 +1771,7 @@ struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
}
return NULL;
}
+EXPORT_SYMBOL(of_get_cpu_node);
#ifdef DEBUG
static struct debugfs_blob_wrapper flat_dt_blob;
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index b91761639d9..520ef42f642 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -173,8 +173,8 @@ static unsigned long __initdata dt_string_start, dt_string_end;
static unsigned long __initdata prom_initrd_start, prom_initrd_end;
#ifdef CONFIG_PPC64
-static int __initdata iommu_force_on;
-static int __initdata ppc64_iommu_off;
+static int __initdata prom_iommu_force_on;
+static int __initdata prom_iommu_off;
static unsigned long __initdata prom_tce_alloc_start;
static unsigned long __initdata prom_tce_alloc_end;
#endif
@@ -582,9 +582,9 @@ static void __init early_cmdline_parse(void)
while (*opt && *opt == ' ')
opt++;
if (!strncmp(opt, RELOC("off"), 3))
- RELOC(ppc64_iommu_off) = 1;
+ RELOC(prom_iommu_off) = 1;
else if (!strncmp(opt, RELOC("force"), 5))
- RELOC(iommu_force_on) = 1;
+ RELOC(prom_iommu_force_on) = 1;
}
#endif
}
@@ -627,6 +627,7 @@ static void __init early_cmdline_parse(void)
/* Option vector 3: processor options supported */
#define OV3_FP 0x80 /* floating point */
#define OV3_VMX 0x40 /* VMX/Altivec */
+#define OV3_DFP 0x20 /* decimal FP */
/* Option vector 5: PAPR/OF options supported */
#define OV5_LPAR 0x80 /* logical partitioning supported */
@@ -642,6 +643,7 @@ static void __init early_cmdline_parse(void)
static unsigned char ibm_architecture_vec[] = {
W(0xfffe0000), W(0x003a0000), /* POWER5/POWER5+ */
W(0xffff0000), W(0x003e0000), /* POWER6 */
+ W(0xffffffff), W(0x0f000002), /* all 2.05-compliant */
W(0xfffffffe), W(0x0f000001), /* all 2.04-compliant and earlier */
5 - 1, /* 5 option vectors */
@@ -668,7 +670,7 @@ static unsigned char ibm_architecture_vec[] = {
/* option vector 3: processor options supported */
3 - 2, /* length */
0, /* don't ignore, don't halt */
- OV3_FP | OV3_VMX,
+ OV3_FP | OV3_VMX | OV3_DFP,
/* option vector 4: IBM PAPR implementation */
2 - 2, /* length */
@@ -677,7 +679,7 @@ static unsigned char ibm_architecture_vec[] = {
/* option vector 5: PAPR/OF options */
3 - 2, /* length */
0, /* don't ignore, don't halt */
- OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES,
+ OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY,
};
/* Old method - ELF header with PT_NOTE sections */
@@ -1167,7 +1169,7 @@ static void __init prom_initialize_tce_table(void)
u64 local_alloc_top, local_alloc_bottom;
u64 i;
- if (RELOC(ppc64_iommu_off))
+ if (RELOC(prom_iommu_off))
return;
prom_debug("starting prom_initialize_tce_table\n");
@@ -2283,11 +2285,11 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
* Fill in some infos for use by the kernel later on
*/
#ifdef CONFIG_PPC64
- if (RELOC(ppc64_iommu_off))
+ if (RELOC(prom_iommu_off))
prom_setprop(_prom->chosen, "/chosen", "linux,iommu-off",
NULL, 0);
- if (RELOC(iommu_force_on))
+ if (RELOC(prom_iommu_force_on))
prom_setprop(_prom->chosen, "/chosen", "linux,iommu-force-on",
NULL, 0);
diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c
index 603dff3ad62..0dfbe1cd28e 100644
--- a/arch/powerpc/kernel/prom_parse.c
+++ b/arch/powerpc/kernel/prom_parse.c
@@ -25,6 +25,12 @@
#define OF_CHECK_COUNTS(na, ns) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS && \
(ns) > 0)
+static struct of_bus *of_match_bus(struct device_node *np);
+static int __of_address_to_resource(struct device_node *dev,
+ const u32 *addrp, u64 size, unsigned int flags,
+ struct resource *r);
+
+
/* Debug utility */
#ifdef DEBUG
static void of_dump_addr(const char *s, const u32 *addr, int na)
@@ -101,6 +107,7 @@ static unsigned int of_bus_default_get_flags(const u32 *addr)
}
+#ifdef CONFIG_PCI
/*
* PCI bus specific translator
*/
@@ -153,15 +160,156 @@ static unsigned int of_bus_pci_get_flags(const u32 *addr)
switch((w >> 24) & 0x03) {
case 0x01:
flags |= IORESOURCE_IO;
+ break;
case 0x02: /* 32 bits */
case 0x03: /* 64 bits */
flags |= IORESOURCE_MEM;
+ break;
}
if (w & 0x40000000)
flags |= IORESOURCE_PREFETCH;
return flags;
}
+const u32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size,
+ unsigned int *flags)
+{
+ const u32 *prop;
+ unsigned int psize;
+ struct device_node *parent;
+ struct of_bus *bus;
+ int onesize, i, na, ns;
+
+ /* Get parent & match bus type */
+ parent = of_get_parent(dev);
+ if (parent == NULL)
+ return NULL;
+ bus = of_match_bus(parent);
+ if (strcmp(bus->name, "pci")) {
+ of_node_put(parent);
+ return NULL;
+ }
+ bus->count_cells(dev, &na, &ns);
+ of_node_put(parent);
+ if (!OF_CHECK_COUNTS(na, ns))
+ return NULL;
+
+ /* Get "reg" or "assigned-addresses" property */
+ prop = get_property(dev, bus->addresses, &psize);
+ if (prop == NULL)
+ return NULL;
+ psize /= 4;
+
+ onesize = na + ns;
+ for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++)
+ if ((prop[0] & 0xff) == ((bar_no * 4) + PCI_BASE_ADDRESS_0)) {
+ if (size)
+ *size = of_read_number(prop + na, ns);
+ if (flags)
+ *flags = bus->get_flags(prop);
+ return prop;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(of_get_pci_address);
+
+int of_pci_address_to_resource(struct device_node *dev, int bar,
+ struct resource *r)
+{
+ const u32 *addrp;
+ u64 size;
+ unsigned int flags;
+
+ addrp = of_get_pci_address(dev, bar, &size, &flags);
+ if (addrp == NULL)
+ return -EINVAL;
+ return __of_address_to_resource(dev, addrp, size, flags, r);
+}
+EXPORT_SYMBOL_GPL(of_pci_address_to_resource);
+
+static u8 of_irq_pci_swizzle(u8 slot, u8 pin)
+{
+ return (((pin - 1) + slot) % 4) + 1;
+}
+
+int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
+{
+ struct device_node *dn, *ppnode;
+ struct pci_dev *ppdev;
+ u32 lspec;
+ u32 laddr[3];
+ u8 pin;
+ int rc;
+
+ /* Check if we have a device node, if yes, fallback to standard OF
+ * parsing
+ */
+ dn = pci_device_to_OF_node(pdev);
+ if (dn)
+ return of_irq_map_one(dn, 0, out_irq);
+
+ /* Ok, we don't, time to have fun. Let's start by building up an
+ * interrupt spec. we assume #interrupt-cells is 1, which is standard
+ * for PCI. If you do different, then don't use that routine.
+ */
+ rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
+ if (rc != 0)
+ return rc;
+ /* No pin, exit */
+ if (pin == 0)
+ return -ENODEV;
+
+ /* Now we walk up the PCI tree */
+ lspec = pin;
+ for (;;) {
+ /* Get the pci_dev of our parent */
+ ppdev = pdev->bus->self;
+
+ /* Ouch, it's a host bridge... */
+ if (ppdev == NULL) {
+#ifdef CONFIG_PPC64
+ ppnode = pci_bus_to_OF_node(pdev->bus);
+#else
+ struct pci_controller *host;
+ host = pci_bus_to_host(pdev->bus);
+ ppnode = host ? host->arch_data : NULL;
+#endif
+ /* No node for host bridge ? give up */
+ if (ppnode == NULL)
+ return -EINVAL;
+ } else
+ /* We found a P2P bridge, check if it has a node */
+ ppnode = pci_device_to_OF_node(ppdev);
+
+ /* Ok, we have found a parent with a device-node, hand over to
+ * the OF parsing code.
+ * We build a unit address from the linux device to be used for
+ * resolution. Note that we use the linux bus number which may
+ * not match your firmware bus numbering.
+ * Fortunately, in most cases, interrupt-map-mask doesn't include
+ * the bus number as part of the matching.
+ * You should still be careful about that though if you intend
+ * to rely on this function (you ship a firmware that doesn't
+ * create device nodes for all PCI devices).
+ */
+ if (ppnode)
+ break;
+
+ /* We can only get here if we hit a P2P bridge with no node,
+ * let's do standard swizzling and try again
+ */
+ lspec = of_irq_pci_swizzle(PCI_SLOT(pdev->devfn), lspec);
+ pdev = ppdev;
+ }
+
+ laddr[0] = (pdev->bus->number << 16)
+ | (pdev->devfn << 8);
+ laddr[1] = laddr[2] = 0;
+ return of_irq_map_raw(ppnode, &lspec, 1, laddr, out_irq);
+}
+EXPORT_SYMBOL_GPL(of_irq_map_pci);
+#endif /* CONFIG_PCI */
+
/*
* ISA bus specific translator
*/
@@ -223,6 +371,7 @@ static unsigned int of_bus_isa_get_flags(const u32 *addr)
*/
static struct of_bus of_busses[] = {
+#ifdef CONFIG_PCI
/* PCI */
{
.name = "pci",
@@ -233,6 +382,7 @@ static struct of_bus of_busses[] = {
.translate = of_bus_pci_translate,
.get_flags = of_bus_pci_get_flags,
},
+#endif /* CONFIG_PCI */
/* ISA */
{
.name = "isa",
@@ -445,48 +595,6 @@ const u32 *of_get_address(struct device_node *dev, int index, u64 *size,
}
EXPORT_SYMBOL(of_get_address);
-const u32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size,
- unsigned int *flags)
-{
- const u32 *prop;
- unsigned int psize;
- struct device_node *parent;
- struct of_bus *bus;
- int onesize, i, na, ns;
-
- /* Get parent & match bus type */
- parent = of_get_parent(dev);
- if (parent == NULL)
- return NULL;
- bus = of_match_bus(parent);
- if (strcmp(bus->name, "pci")) {
- of_node_put(parent);
- return NULL;
- }
- bus->count_cells(dev, &na, &ns);
- of_node_put(parent);
- if (!OF_CHECK_COUNTS(na, ns))
- return NULL;
-
- /* Get "reg" or "assigned-addresses" property */
- prop = get_property(dev, bus->addresses, &psize);
- if (prop == NULL)
- return NULL;
- psize /= 4;
-
- onesize = na + ns;
- for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++)
- if ((prop[0] & 0xff) == ((bar_no * 4) + PCI_BASE_ADDRESS_0)) {
- if (size)
- *size = of_read_number(prop + na, ns);
- if (flags)
- *flags = bus->get_flags(prop);
- return prop;
- }
- return NULL;
-}
-EXPORT_SYMBOL(of_get_pci_address);
-
static int __of_address_to_resource(struct device_node *dev, const u32 *addrp,
u64 size, unsigned int flags,
struct resource *r)
@@ -529,20 +637,6 @@ int of_address_to_resource(struct device_node *dev, int index,
}
EXPORT_SYMBOL_GPL(of_address_to_resource);
-int of_pci_address_to_resource(struct device_node *dev, int bar,
- struct resource *r)
-{
- const u32 *addrp;
- u64 size;
- unsigned int flags;
-
- addrp = of_get_pci_address(dev, bar, &size, &flags);
- if (addrp == NULL)
- return -EINVAL;
- return __of_address_to_resource(dev, addrp, size, flags, r);
-}
-EXPORT_SYMBOL_GPL(of_pci_address_to_resource);
-
void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
unsigned long *busno, unsigned long *phys, unsigned long *size)
{
@@ -898,87 +992,3 @@ int of_irq_map_one(struct device_node *device, int index, struct of_irq *out_irq
return res;
}
EXPORT_SYMBOL_GPL(of_irq_map_one);
-
-#ifdef CONFIG_PCI
-static u8 of_irq_pci_swizzle(u8 slot, u8 pin)
-{
- return (((pin - 1) + slot) % 4) + 1;
-}
-
-int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
-{
- struct device_node *dn, *ppnode;
- struct pci_dev *ppdev;
- u32 lspec;
- u32 laddr[3];
- u8 pin;
- int rc;
-
- /* Check if we have a device node, if yes, fallback to standard OF
- * parsing
- */
- dn = pci_device_to_OF_node(pdev);
- if (dn)
- return of_irq_map_one(dn, 0, out_irq);
-
- /* Ok, we don't, time to have fun. Let's start by building up an
- * interrupt spec. we assume #interrupt-cells is 1, which is standard
- * for PCI. If you do different, then don't use that routine.
- */
- rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
- if (rc != 0)
- return rc;
- /* No pin, exit */
- if (pin == 0)
- return -ENODEV;
-
- /* Now we walk up the PCI tree */
- lspec = pin;
- for (;;) {
- /* Get the pci_dev of our parent */
- ppdev = pdev->bus->self;
-
- /* Ouch, it's a host bridge... */
- if (ppdev == NULL) {
-#ifdef CONFIG_PPC64
- ppnode = pci_bus_to_OF_node(pdev->bus);
-#else
- struct pci_controller *host;
- host = pci_bus_to_host(pdev->bus);
- ppnode = host ? host->arch_data : NULL;
-#endif
- /* No node for host bridge ? give up */
- if (ppnode == NULL)
- return -EINVAL;
- } else
- /* We found a P2P bridge, check if it has a node */
- ppnode = pci_device_to_OF_node(ppdev);
-
- /* Ok, we have found a parent with a device-node, hand over to
- * the OF parsing code.
- * We build a unit address from the linux device to be used for
- * resolution. Note that we use the linux bus number which may
- * not match your firmware bus numbering.
- * Fortunately, in most cases, interrupt-map-mask doesn't include
- * the bus number as part of the matching.
- * You should still be careful about that though if you intend
- * to rely on this function (you ship a firmware that doesn't
- * create device nodes for all PCI devices).
- */
- if (ppnode)
- break;
-
- /* We can only get here if we hit a P2P bridge with no node,
- * let's do standard swizzling and try again
- */
- lspec = of_irq_pci_swizzle(PCI_SLOT(pdev->devfn), lspec);
- pdev = ppdev;
- }
-
- laddr[0] = (pdev->bus->number << 16)
- | (pdev->devfn << 8);
- laddr[1] = laddr[2] = 0;
- return of_irq_map_raw(ppnode, &lspec, 1, laddr, out_irq);
-}
-EXPORT_SYMBOL_GPL(of_irq_map_pci);
-#endif /* CONFIG_PCI */
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 6ef80d4e38d..76b5d7ebdcc 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -303,6 +303,12 @@ int rtas_token(const char *service)
}
EXPORT_SYMBOL(rtas_token);
+int rtas_service_present(const char *service)
+{
+ return rtas_token(service) != RTAS_UNKNOWN_SERVICE;
+}
+EXPORT_SYMBOL(rtas_service_present);
+
#ifdef CONFIG_RTAS_ERROR_LOGGING
/*
* Return the firmware-specified size of the error log buffer
@@ -810,31 +816,6 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
return 0;
}
-/* This version can't take the spinlock, because it never returns */
-
-struct rtas_args rtas_stop_self_args = {
- /* The token is initialized for real in setup_system() */
- .token = RTAS_UNKNOWN_SERVICE,
- .nargs = 0,
- .nret = 1,
- .rets = &rtas_stop_self_args.args[0],
-};
-
-void rtas_stop_self(void)
-{
- struct rtas_args *rtas_args = &rtas_stop_self_args;
-
- local_irq_disable();
-
- BUG_ON(rtas_args->token == RTAS_UNKNOWN_SERVICE);
-
- printk("cpu %u (hwid %u) Ready to die...\n",
- smp_processor_id(), hard_smp_processor_id());
- enter_rtas(__pa(rtas_args));
-
- panic("Alas, I survived.\n");
-}
-
/*
* Call early during boot, before mem init or bootmem, to retrieve the RTAS
* informations from the device-tree and allocate the RMO buffer for userland
@@ -879,9 +860,6 @@ void __init rtas_initialize(void)
#endif
rtas_rmo_buf = lmb_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region);
-#ifdef CONFIG_HOTPLUG_CPU
- rtas_stop_self_args.token = rtas_token("stop-self");
-#endif /* CONFIG_HOTPLUG_CPU */
#ifdef CONFIG_RTAS_ERROR_LOGGING
rtas_last_error_token = rtas_token("rtas-last-error");
#endif
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 1442b63a75d..0c4fcd34bfe 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -72,6 +72,10 @@
#define VALIDATE_BUF_SIZE 4096
#define RTAS_MSG_MAXLEN 64
+/* Quirk - RTAS requires 4k list length and block size */
+#define RTAS_BLKLIST_LENGTH 4096
+#define RTAS_BLK_SIZE 4096
+
struct flash_block {
char *data;
unsigned long length;
@@ -83,7 +87,7 @@ struct flash_block {
* into a version/length and translate the pointers
* to absolute.
*/
-#define FLASH_BLOCKS_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct flash_block))
+#define FLASH_BLOCKS_PER_NODE ((RTAS_BLKLIST_LENGTH - 16) / sizeof(struct flash_block))
struct flash_block_list {
unsigned long num_blocks;
struct flash_block_list *next;
@@ -96,6 +100,9 @@ struct flash_block_list_header { /* just the header of flash_block_list */
static struct flash_block_list_header rtas_firmware_flash_list = {0, NULL};
+/* Use slab cache to guarantee 4k alignment */
+static struct kmem_cache *flash_block_cache = NULL;
+
#define FLASH_BLOCK_LIST_VERSION (1UL)
/* Local copy of the flash block list.
@@ -153,7 +160,7 @@ static int flash_list_valid(struct flash_block_list *flist)
return FLASH_IMG_NULL_DATA;
}
block_size = f->blocks[i].length;
- if (block_size <= 0 || block_size > PAGE_SIZE) {
+ if (block_size <= 0 || block_size > RTAS_BLK_SIZE) {
return FLASH_IMG_BAD_LEN;
}
image_size += block_size;
@@ -177,16 +184,16 @@ static void free_flash_list(struct flash_block_list *f)
while (f) {
for (i = 0; i < f->num_blocks; i++)
- free_page((unsigned long)(f->blocks[i].data));
+ kmem_cache_free(flash_block_cache, f->blocks[i].data);
next = f->next;
- free_page((unsigned long)f);
+ kmem_cache_free(flash_block_cache, f);
f = next;
}
}
static int rtas_flash_release(struct inode *inode, struct file *file)
{
- struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode);
+ struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
struct rtas_update_flash_t *uf;
uf = (struct rtas_update_flash_t *) dp->data;
@@ -248,7 +255,7 @@ static void get_flash_status_msg(int status, char *buf)
static ssize_t rtas_flash_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode);
+ struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
struct rtas_update_flash_t *uf;
char msg[RTAS_MSG_MAXLEN];
int msglen;
@@ -278,6 +285,12 @@ static ssize_t rtas_flash_read(struct file *file, char __user *buf,
return msglen;
}
+/* constructor for flash_block_cache */
+void rtas_block_ctor(void *ptr, struct kmem_cache *cache, unsigned long flags)
+{
+ memset(ptr, 0, RTAS_BLK_SIZE);
+}
+
/* We could be much more efficient here. But to keep this function
* simple we allocate a page to the block list no matter how small the
* count is. If the system is low on memory it will be just as well
@@ -286,7 +299,7 @@ static ssize_t rtas_flash_read(struct file *file, char __user *buf,
static ssize_t rtas_flash_write(struct file *file, const char __user *buffer,
size_t count, loff_t *off)
{
- struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode);
+ struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
struct rtas_update_flash_t *uf;
char *p;
int next_free;
@@ -302,7 +315,7 @@ static ssize_t rtas_flash_write(struct file *file, const char __user *buffer,
* proc file
*/
if (uf->flist == NULL) {
- uf->flist = (struct flash_block_list *) get_zeroed_page(GFP_KERNEL);
+ uf->flist = kmem_cache_alloc(flash_block_cache, GFP_KERNEL);
if (!uf->flist)
return -ENOMEM;
}
@@ -313,21 +326,21 @@ static ssize_t rtas_flash_write(struct file *file, const char __user *buffer,
next_free = fl->num_blocks;
if (next_free == FLASH_BLOCKS_PER_NODE) {
/* Need to allocate another block_list */
- fl->next = (struct flash_block_list *)get_zeroed_page(GFP_KERNEL);
+ fl->next = kmem_cache_alloc(flash_block_cache, GFP_KERNEL);
if (!fl->next)
return -ENOMEM;
fl = fl->next;
next_free = 0;
}
- if (count > PAGE_SIZE)
- count = PAGE_SIZE;
- p = (char *)get_zeroed_page(GFP_KERNEL);
+ if (count > RTAS_BLK_SIZE)
+ count = RTAS_BLK_SIZE;
+ p = kmem_cache_alloc(flash_block_cache, GFP_KERNEL);
if (!p)
return -ENOMEM;
if(copy_from_user(p, buffer, count)) {
- free_page((unsigned long)p);
+ kmem_cache_free(flash_block_cache, p);
return -EFAULT;
}
fl->blocks[next_free].data = p;
@@ -378,7 +391,7 @@ static void manage_flash(struct rtas_manage_flash_t *args_buf)
static ssize_t manage_flash_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode);
+ struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
struct rtas_manage_flash_t *args_buf;
char msg[RTAS_MSG_MAXLEN];
int msglen;
@@ -408,7 +421,7 @@ static ssize_t manage_flash_read(struct file *file, char __user *buf,
static ssize_t manage_flash_write(struct file *file, const char __user *buf,
size_t count, loff_t *off)
{
- struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode);
+ struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
struct rtas_manage_flash_t *args_buf;
const char reject_str[] = "0";
const char commit_str[] = "1";
@@ -479,7 +492,7 @@ static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf,
static ssize_t validate_flash_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode);
+ struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
struct rtas_validate_flash_t *args_buf;
char msg[RTAS_MSG_MAXLEN];
int msglen;
@@ -507,7 +520,7 @@ static ssize_t validate_flash_read(struct file *file, char __user *buf,
static ssize_t validate_flash_write(struct file *file, const char __user *buf,
size_t count, loff_t *off)
{
- struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode);
+ struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
struct rtas_validate_flash_t *args_buf;
int rc;
@@ -556,7 +569,7 @@ done:
static int validate_flash_release(struct inode *inode, struct file *file)
{
- struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode);
+ struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
struct rtas_validate_flash_t *args_buf;
args_buf = (struct rtas_validate_flash_t *) dp->data;
@@ -668,14 +681,12 @@ static int initialize_flash_pde_data(const char *rtas_call_name,
int *status;
int token;
- dp->data = kmalloc(buf_size, GFP_KERNEL);
+ dp->data = kzalloc(buf_size, GFP_KERNEL);
if (dp->data == NULL) {
remove_flash_pde(dp);
return -ENOMEM;
}
- memset(dp->data, 0, buf_size);
-
/*
* This code assumes that the status int is the first member of the
* struct
@@ -791,6 +802,16 @@ int __init rtas_flash_init(void)
goto cleanup;
rtas_flash_term_hook = rtas_flash_firmware;
+
+ flash_block_cache = kmem_cache_create("rtas_flash_cache",
+ RTAS_BLK_SIZE, RTAS_BLK_SIZE, 0,
+ rtas_block_ctor, NULL);
+ if (!flash_block_cache) {
+ printk(KERN_ERR "%s: failed to create block cache\n",
+ __FUNCTION__);
+ rc = -ENOMEM;
+ goto cleanup;
+ }
return 0;
cleanup:
@@ -805,6 +826,10 @@ cleanup:
void __exit rtas_flash_cleanup(void)
{
rtas_flash_term_hook = NULL;
+
+ if (flash_block_cache)
+ kmem_cache_destroy(flash_block_cache);
+
remove_flash_pde(firmware_flash_pde);
remove_flash_pde(firmware_update_pde);
remove_flash_pde(validate_pde);
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index b4a0de79c06..ace9f4c86e6 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -38,6 +38,7 @@
#include <asm/rtas.h>
#include <asm/mpic.h>
#include <asm/ppc-pci.h>
+#include <asm/eeh.h>
/* RTAS tokens */
static int read_pci_config;
@@ -231,32 +232,13 @@ void __init init_pci_config_tokens (void)
unsigned long __devinit get_phb_buid (struct device_node *phb)
{
- int addr_cells;
- const unsigned int *buid_vals;
- unsigned int len;
- unsigned long buid;
-
- if (ibm_read_pci_config == -1) return 0;
+ struct resource r;
- /* PHB's will always be children of the root node,
- * or so it is promised by the current firmware. */
- if (phb->parent == NULL)
+ if (ibm_read_pci_config == -1)
return 0;
- if (phb->parent->parent)
- return 0;
-
- buid_vals = get_property(phb, "reg", &len);
- if (buid_vals == NULL)
+ if (of_address_to_resource(phb, 0, &r))
return 0;
-
- addr_cells = prom_n_addr_cells(phb);
- if (addr_cells == 1) {
- buid = (unsigned long) buid_vals[0];
- } else {
- buid = (((unsigned long)buid_vals[0]) << 32UL) |
- (((unsigned long)buid_vals[1]) & 0xffffffff);
- }
- return buid;
+ return r.start;
}
static int phb_set_bus_ranges(struct device_node *dev,
@@ -276,8 +258,10 @@ static int phb_set_bus_ranges(struct device_node *dev,
return 0;
}
-int __devinit setup_phb(struct device_node *dev, struct pci_controller *phb)
+int __devinit rtas_setup_phb(struct pci_controller *phb)
{
+ struct device_node *dev = phb->arch_data;
+
if (is_python(dev))
python_countermeasures(dev);
@@ -309,7 +293,7 @@ unsigned long __init find_and_init_phbs(void)
phb = pcibios_alloc_controller(node);
if (!phb)
continue;
- setup_phb(node, phb);
+ rtas_setup_phb(phb);
pci_process_bridge_OF_ranges(phb, node, 0);
pci_setup_phb_io(phb, index == 0);
index++;
@@ -381,7 +365,6 @@ int pcibios_remove_root_bus(struct pci_controller *phb)
}
}
- list_del(&phb->list_node);
pcibios_free_controller(phb);
return 0;
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 191d0ab0922..61c65d19ef0 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -63,10 +63,6 @@ unsigned int DMA_MODE_WRITE;
int have_of = 1;
-#ifdef CONFIG_PPC_MULTIPLATFORM
-dev_t boot_dev;
-#endif /* CONFIG_PPC_MULTIPLATFORM */
-
#ifdef CONFIG_VGA_CONSOLE
unsigned long vgacon_remap_base;
#endif
@@ -91,6 +87,7 @@ int ucache_bsize;
unsigned long __init early_init(unsigned long dt_ptr)
{
unsigned long offset = reloc_offset();
+ struct cpu_spec *spec;
/* First zero the BSS -- use memset_io, some platforms don't have
* caches on yet */
@@ -100,8 +97,11 @@ unsigned long __init early_init(unsigned long dt_ptr)
* Identify the CPU type and fix up code sections
* that depend on which cpu we have.
*/
- identify_cpu(offset, 0);
- do_cpu_ftr_fixups(offset);
+ spec = identify_cpu(offset, mfspr(SPRN_PVR));
+
+ do_feature_fixups(spec->cpu_features,
+ PTRRELOC(&__start___ftr_fixup),
+ PTRRELOC(&__stop___ftr_fixup));
return KERNELBASE + offset;
}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 4b2e32eab9d..3733de30e84 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -33,6 +33,7 @@
#include <linux/serial.h>
#include <linux/serial_8250.h>
#include <linux/bootmem.h>
+#include <linux/pci.h>
#include <asm/io.h>
#include <asm/kdump.h>
#include <asm/prom.h>
@@ -71,7 +72,6 @@
int have_of = 1;
int boot_cpuid = 0;
-dev_t boot_dev;
u64 ppc64_pft_size;
/* Pick defaults since we might want to patch instructions
@@ -170,6 +170,9 @@ void __init setup_paca(int cpu)
void __init early_setup(unsigned long dt_ptr)
{
+ /* Identify CPU type */
+ identify_cpu(0, mfspr(SPRN_PVR));
+
/* Assume we're on cpu 0 for now. Don't write to the paca yet! */
setup_paca(0);
@@ -223,8 +226,8 @@ void early_setup_secondary(void)
{
struct paca_struct *lpaca = get_paca();
- /* Mark enabled in PACA */
- lpaca->proc_enabled = 0;
+ /* Mark interrupts enabled in PACA */
+ lpaca->soft_enabled = 0;
/* Initialize hash table for that CPU */
htab_initialize_secondary();
@@ -348,6 +351,14 @@ void __init setup_system(void)
{
DBG(" -> setup_system()\n");
+ /* Apply the CPUs-specific and firmware specific fixups to kernel
+ * text (nop out sections not relevant to this CPU or this firmware)
+ */
+ do_feature_fixups(cur_cpu_spec->cpu_features,
+ &__start___ftr_fixup, &__stop___ftr_fixup);
+ do_feature_fixups(powerpc_firmware_features,
+ &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
+
/*
* Unflatten the device-tree passed by prom_init or kexec
*/
@@ -381,7 +392,8 @@ void __init setup_system(void)
* setting up the hash table pointers. It also sets up some interrupt-mapping
* related options that will be used by finish_device_tree()
*/
- ppc_md.init_early();
+ if (ppc_md.init_early)
+ ppc_md.init_early();
/*
* We can discover serial ports now since the above did setup the
@@ -587,3 +599,10 @@ void __init setup_per_cpu_areas(void)
}
}
#endif
+
+
+#ifdef CONFIG_PPC_INDIRECT_IO
+struct ppc_pci_io ppc_pci_io;
+EXPORT_SYMBOL(ppc_pci_io);
+#endif /* CONFIG_PPC_INDIRECT_IO */
+
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 320353f0926..e4ebe1a6228 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -36,7 +36,7 @@
#include <linux/stddef.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
#endif
#include <asm/uaccess.h>
diff --git a/arch/powerpc/kernel/smp-tbsync.c b/arch/powerpc/kernel/smp-tbsync.c
index de59c6c31a5..bc892e69b4f 100644
--- a/arch/powerpc/kernel/smp-tbsync.c
+++ b/arch/powerpc/kernel/smp-tbsync.c
@@ -78,7 +78,7 @@ static int __devinit start_contest(int cmd, long offset, int num)
{
int i, score=0;
u64 tb;
- long mark;
+ u64 mark;
tbsync->cmd = cmd;
@@ -116,8 +116,7 @@ void __devinit smp_generic_give_timebase(void)
printk("Synchronizing timebase\n");
/* if this fails then this kernel won't work anyway... */
- tbsync = kmalloc( sizeof(*tbsync), GFP_KERNEL );
- memset( tbsync, 0, sizeof(*tbsync) );
+ tbsync = kzalloc( sizeof(*tbsync), GFP_KERNEL );
mb();
running = 1;
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 35c6309bdb7..9b28c238b6c 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -65,6 +65,7 @@ cpumask_t cpu_sibling_map[NR_CPUS] = { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
EXPORT_SYMBOL(cpu_online_map);
EXPORT_SYMBOL(cpu_possible_map);
+EXPORT_SYMBOL(cpu_sibling_map);
/* SMP operations for this machine */
struct smp_ops_t *smp_ops;
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index d15c33e9595..03a2a2f30d6 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -51,6 +51,7 @@
#include <asm/time.h>
#include <asm/mmu_context.h>
#include <asm/ppc-pci.h>
+#include <asm/syscalls.h>
/* readdir & getdents */
#define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de)))
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index d45a168bdac..400ab2b946e 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -181,6 +181,8 @@ SYSFS_PMCSETUP(pmc6, SPRN_PMC6);
SYSFS_PMCSETUP(pmc7, SPRN_PMC7);
SYSFS_PMCSETUP(pmc8, SPRN_PMC8);
SYSFS_PMCSETUP(purr, SPRN_PURR);
+SYSFS_PMCSETUP(spurr, SPRN_SPURR);
+SYSFS_PMCSETUP(dscr, SPRN_DSCR);
static SYSDEV_ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0);
static SYSDEV_ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1);
@@ -194,16 +196,17 @@ static SYSDEV_ATTR(pmc6, 0600, show_pmc6, store_pmc6);
static SYSDEV_ATTR(pmc7, 0600, show_pmc7, store_pmc7);
static SYSDEV_ATTR(pmc8, 0600, show_pmc8, store_pmc8);
static SYSDEV_ATTR(purr, 0600, show_purr, NULL);
+static SYSDEV_ATTR(spurr, 0600, show_spurr, NULL);
+static SYSDEV_ATTR(dscr, 0600, show_dscr, store_dscr);
static void register_cpu_online(unsigned int cpu)
{
struct cpu *c = &per_cpu(cpu_devices, cpu);
struct sys_device *s = &c->sysdev;
-#ifndef CONFIG_PPC_ISERIES
- if (cpu_has_feature(CPU_FTR_SMT))
+ if (!firmware_has_feature(FW_FEATURE_ISERIES) &&
+ cpu_has_feature(CPU_FTR_SMT))
sysdev_create_file(s, &attr_smt_snooze_delay);
-#endif
/* PMC stuff */
@@ -232,6 +235,12 @@ static void register_cpu_online(unsigned int cpu)
if (cpu_has_feature(CPU_FTR_PURR))
sysdev_create_file(s, &attr_purr);
+
+ if (cpu_has_feature(CPU_FTR_SPURR))
+ sysdev_create_file(s, &attr_spurr);
+
+ if (cpu_has_feature(CPU_FTR_DSCR))
+ sysdev_create_file(s, &attr_dscr);
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -240,12 +249,11 @@ static void unregister_cpu_online(unsigned int cpu)
struct cpu *c = &per_cpu(cpu_devices, cpu);
struct sys_device *s = &c->sysdev;
- BUG_ON(c->no_control);
+ BUG_ON(!c->hotpluggable);
-#ifndef CONFIG_PPC_ISERIES
- if (cpu_has_feature(CPU_FTR_SMT))
+ if (!firmware_has_feature(FW_FEATURE_ISERIES) &&
+ cpu_has_feature(CPU_FTR_SMT))
sysdev_remove_file(s, &attr_smt_snooze_delay);
-#endif
/* PMC stuff */
@@ -274,6 +282,12 @@ static void unregister_cpu_online(unsigned int cpu)
if (cpu_has_feature(CPU_FTR_PURR))
sysdev_remove_file(s, &attr_purr);
+
+ if (cpu_has_feature(CPU_FTR_SPURR))
+ sysdev_remove_file(s, &attr_spurr);
+
+ if (cpu_has_feature(CPU_FTR_DSCR))
+ sysdev_remove_file(s, &attr_dscr);
}
#endif /* CONFIG_HOTPLUG_CPU */
@@ -299,6 +313,72 @@ static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
.notifier_call = sysfs_cpu_notify,
};
+static DEFINE_MUTEX(cpu_mutex);
+
+int cpu_add_sysdev_attr(struct sysdev_attribute *attr)
+{
+ int cpu;
+
+ mutex_lock(&cpu_mutex);
+
+ for_each_possible_cpu(cpu) {
+ sysdev_create_file(get_cpu_sysdev(cpu), attr);
+ }
+
+ mutex_unlock(&cpu_mutex);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cpu_add_sysdev_attr);
+
+int cpu_add_sysdev_attr_group(struct attribute_group *attrs)
+{
+ int cpu;
+ struct sys_device *sysdev;
+
+ mutex_lock(&cpu_mutex);
+
+ for_each_possible_cpu(cpu) {
+ sysdev = get_cpu_sysdev(cpu);
+ sysfs_create_group(&sysdev->kobj, attrs);
+ }
+
+ mutex_unlock(&cpu_mutex);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cpu_add_sysdev_attr_group);
+
+
+void cpu_remove_sysdev_attr(struct sysdev_attribute *attr)
+{
+ int cpu;
+
+ mutex_lock(&cpu_mutex);
+
+ for_each_possible_cpu(cpu) {
+ sysdev_remove_file(get_cpu_sysdev(cpu), attr);
+ }
+
+ mutex_unlock(&cpu_mutex);
+}
+EXPORT_SYMBOL_GPL(cpu_remove_sysdev_attr);
+
+void cpu_remove_sysdev_attr_group(struct attribute_group *attrs)
+{
+ int cpu;
+ struct sys_device *sysdev;
+
+ mutex_lock(&cpu_mutex);
+
+ for_each_possible_cpu(cpu) {
+ sysdev = get_cpu_sysdev(cpu);
+ sysfs_remove_group(&sysdev->kobj, attrs);
+ }
+
+ mutex_unlock(&cpu_mutex);
+}
+EXPORT_SYMBOL_GPL(cpu_remove_sysdev_attr_group);
+
+
/* NUMA stuff */
#ifdef CONFIG_NUMA
@@ -360,10 +440,10 @@ static int __init topology_init(void)
* CPU. For instance, the boot cpu might never be valid
* for hotplugging.
*/
- if (!ppc_md.cpu_die)
- c->no_control = 1;
+ if (ppc_md.cpu_die)
+ c->hotpluggable = 1;
- if (cpu_online(cpu) || (c->no_control == 0)) {
+ if (cpu_online(cpu) || c->hotpluggable) {
register_cpu(c, cpu);
sysdev_create_file(&c->sysdev, &attr_physical_id);
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 5b59bc18dfe..f6f0c6b07c4 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -220,11 +220,8 @@ static void account_process_time(struct pt_regs *regs)
*/
struct cpu_purr_data {
int initialized; /* thread is running */
- u64 tb0; /* timebase at origin time */
- u64 purr0; /* PURR at origin time */
u64 tb; /* last TB value read */
u64 purr; /* last PURR value read */
- u64 stolen; /* stolen time so far */
spinlock_t lock;
};
@@ -234,10 +231,8 @@ static void snapshot_tb_and_purr(void *data)
{
struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data);
- p->tb0 = mftb();
- p->purr0 = mfspr(SPRN_PURR);
- p->tb = p->tb0;
- p->purr = 0;
+ p->tb = mftb();
+ p->purr = mfspr(SPRN_PURR);
wmb();
p->initialized = 1;
}
@@ -258,37 +253,24 @@ void snapshot_timebases(void)
void calculate_steal_time(void)
{
- u64 tb, purr, t0;
+ u64 tb, purr;
s64 stolen;
- struct cpu_purr_data *p0, *pme, *phim;
- int cpu;
+ struct cpu_purr_data *pme;
if (!cpu_has_feature(CPU_FTR_PURR))
return;
- cpu = smp_processor_id();
- pme = &per_cpu(cpu_purr_data, cpu);
+ pme = &per_cpu(cpu_purr_data, smp_processor_id());
if (!pme->initialized)
return; /* this can happen in early boot */
- p0 = &per_cpu(cpu_purr_data, cpu & ~1);
- phim = &per_cpu(cpu_purr_data, cpu ^ 1);
- spin_lock(&p0->lock);
+ spin_lock(&pme->lock);
tb = mftb();
- purr = mfspr(SPRN_PURR) - pme->purr0;
- if (!phim->initialized || !cpu_online(cpu ^ 1)) {
- stolen = (tb - pme->tb) - (purr - pme->purr);
- } else {
- t0 = pme->tb0;
- if (phim->tb0 < t0)
- t0 = phim->tb0;
- stolen = phim->tb - t0 - phim->purr - purr - p0->stolen;
- }
- if (stolen > 0) {
+ purr = mfspr(SPRN_PURR);
+ stolen = (tb - pme->tb) - (purr - pme->purr);
+ if (stolen > 0)
account_steal_time(current, stolen);
- p0->stolen += stolen;
- }
pme->tb = tb;
pme->purr = purr;
- spin_unlock(&p0->lock);
+ spin_unlock(&pme->lock);
}
/*
@@ -297,30 +279,17 @@ void calculate_steal_time(void)
*/
static void snapshot_purr(void)
{
- int cpu;
- u64 purr;
- struct cpu_purr_data *p0, *pme, *phim;
+ struct cpu_purr_data *pme;
unsigned long flags;
if (!cpu_has_feature(CPU_FTR_PURR))
return;
- cpu = smp_processor_id();
- pme = &per_cpu(cpu_purr_data, cpu);
- p0 = &per_cpu(cpu_purr_data, cpu & ~1);
- phim = &per_cpu(cpu_purr_data, cpu ^ 1);
- spin_lock_irqsave(&p0->lock, flags);
- pme->tb = pme->tb0 = mftb();
- purr = mfspr(SPRN_PURR);
- if (!phim->initialized) {
- pme->purr = 0;
- pme->purr0 = purr;
- } else {
- /* set p->purr and p->purr0 for no change in p0->stolen */
- pme->purr = phim->tb - phim->tb0 - phim->purr - p0->stolen;
- pme->purr0 = purr - pme->purr;
- }
+ pme = &per_cpu(cpu_purr_data, smp_processor_id());
+ spin_lock_irqsave(&pme->lock, flags);
+ pme->tb = mftb();
+ pme->purr = mfspr(SPRN_PURR);
pme->initialized = 1;
- spin_unlock_irqrestore(&p0->lock, flags);
+ spin_unlock_irqrestore(&pme->lock, flags);
}
#endif /* CONFIG_PPC_SPLPAR */
@@ -662,7 +631,8 @@ void timer_interrupt(struct pt_regs * regs)
calculate_steal_time();
#ifdef CONFIG_PPC_ISERIES
- get_lppaca()->int_dword.fields.decr_int = 0;
+ if (firmware_has_feature(FW_FEATURE_ISERIES))
+ get_lppaca()->int_dword.fields.decr_int = 0;
#endif
while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu)))
@@ -705,7 +675,7 @@ void timer_interrupt(struct pt_regs * regs)
set_dec(next_dec);
#ifdef CONFIG_PPC_ISERIES
- if (hvlpevent_is_pending())
+ if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending())
process_hvlpevents();
#endif
@@ -805,7 +775,7 @@ int do_settimeofday(struct timespec *tv)
* settimeofday to perform this operation.
*/
#ifdef CONFIG_PPC_ISERIES
- if (first_settimeofday) {
+ if (firmware_has_feature(FW_FEATURE_ISERIES) && first_settimeofday) {
iSeries_tb_recal();
first_settimeofday = 0;
}
@@ -1045,48 +1015,6 @@ void __init time_init(void)
set_dec(tb_ticks_per_jiffy);
}
-#ifdef CONFIG_RTC_CLASS
-static int set_rtc_class_time(struct rtc_time *tm)
-{
- int err;
- struct class_device *class_dev =
- rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
-
- if (class_dev == NULL)
- return -ENODEV;
-
- err = rtc_set_time(class_dev, tm);
-
- rtc_class_close(class_dev);
-
- return 0;
-}
-
-static void get_rtc_class_time(struct rtc_time *tm)
-{
- int err;
- struct class_device *class_dev =
- rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
-
- if (class_dev == NULL)
- return;
-
- err = rtc_read_time(class_dev, tm);
-
- rtc_class_close(class_dev);
-
- return;
-}
-
-int __init rtc_class_hookup(void)
-{
- ppc_md.get_rtc_time = get_rtc_class_time;
- ppc_md.set_rtc_time = set_rtc_class_time;
-
- return 0;
-}
-#endif /* CONFIG_RTC_CLASS */
-
#define FEBRUARY 2
#define STARTOFTIME 1970
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index d9f10f2fc37..535f5066564 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -32,6 +32,7 @@
#include <linux/kprobes.h>
#include <linux/kexec.h>
#include <linux/backlight.h>
+#include <linux/bug.h>
#include <asm/kdebug.h>
#include <asm/pgtable.h>
@@ -53,10 +54,6 @@
#endif
#include <asm/kexec.h>
-#ifdef CONFIG_PPC64 /* XXX */
-#define _IO_BASE pci_io_base
-#endif
-
#ifdef CONFIG_DEBUGGER
int (*__debugger)(struct pt_regs *regs);
int (*__debugger_ipi)(struct pt_regs *regs);
@@ -241,7 +238,7 @@ void system_reset_exception(struct pt_regs *regs)
*/
static inline int check_io_access(struct pt_regs *regs)
{
-#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
+#ifdef CONFIG_PPC32
unsigned long msr = regs->msr;
const struct exception_table_entry *entry;
unsigned int *nip = (unsigned int *)regs->nip;
@@ -274,7 +271,7 @@ static inline int check_io_access(struct pt_regs *regs)
return 1;
}
}
-#endif /* CONFIG_PPC_PMAC && CONFIG_PPC32 */
+#endif /* CONFIG_PPC32 */
return 0;
}
@@ -731,54 +728,9 @@ static int emulate_instruction(struct pt_regs *regs)
return -EINVAL;
}
-/*
- * Look through the list of trap instructions that are used for BUG(),
- * BUG_ON() and WARN_ON() and see if we hit one. At this point we know
- * that the exception was caused by a trap instruction of some kind.
- * Returns 1 if we should continue (i.e. it was a WARN_ON) or 0
- * otherwise.
- */
-extern struct bug_entry __start___bug_table[], __stop___bug_table[];
-
-#ifndef CONFIG_MODULES
-#define module_find_bug(x) NULL
-#endif
-
-struct bug_entry *find_bug(unsigned long bugaddr)
-{
- struct bug_entry *bug;
-
- for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
- if (bugaddr == bug->bug_addr)
- return bug;
- return module_find_bug(bugaddr);
-}
-
-static int check_bug_trap(struct pt_regs *regs)
+int is_valid_bugaddr(unsigned long addr)
{
- struct bug_entry *bug;
- unsigned long addr;
-
- if (regs->msr & MSR_PR)
- return 0; /* not in kernel */
- addr = regs->nip; /* address of trap instruction */
- if (addr < PAGE_OFFSET)
- return 0;
- bug = find_bug(regs->nip);
- if (bug == NULL)
- return 0;
- if (bug->line & BUG_WARNING_TRAP) {
- /* this is a WARN_ON rather than BUG/BUG_ON */
- printk(KERN_ERR "Badness in %s at %s:%ld\n",
- bug->function, bug->file,
- bug->line & ~BUG_WARNING_TRAP);
- dump_stack();
- return 1;
- }
- printk(KERN_CRIT "kernel BUG in %s at %s:%ld!\n",
- bug->function, bug->file, bug->line);
-
- return 0;
+ return is_kernel_addr(addr);
}
void __kprobes program_check_exception(struct pt_regs *regs)
@@ -786,6 +738,8 @@ void __kprobes program_check_exception(struct pt_regs *regs)
unsigned int reason = get_reason(regs);
extern int do_mathemu(struct pt_regs *regs);
+ /* We can now get here via a FP Unavailable exception if the core
+ * has no FPU, in that case no reason flags will be set */
#ifdef CONFIG_MATH_EMULATION
/* (reason & REASON_ILLEGAL) would be the obvious thing here,
* but there seems to be a hardware bug on the 405GP (RevD)
@@ -812,7 +766,9 @@ void __kprobes program_check_exception(struct pt_regs *regs)
return;
if (debugger_bpt(regs))
return;
- if (check_bug_trap(regs)) {
+
+ if (!(regs->msr & MSR_PR) && /* not user-mode */
+ report_bug(regs->nip) == BUG_TRAP_TYPE_WARN) {
regs->nip += 4;
return;
}
@@ -843,7 +799,7 @@ void __kprobes program_check_exception(struct pt_regs *regs)
void alignment_exception(struct pt_regs *regs)
{
- int fixed = 0;
+ int sig, code, fixed = 0;
/* we don't implement logging of alignment exceptions */
if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
@@ -857,14 +813,16 @@ void alignment_exception(struct pt_regs *regs)
/* Operand address was bad */
if (fixed == -EFAULT) {
- if (user_mode(regs))
- _exception(SIGSEGV, regs, SEGV_ACCERR, regs->dar);
- else
- /* Search exception table */
- bad_page_fault(regs, regs->dar, SIGSEGV);
- return;
+ sig = SIGSEGV;
+ code = SEGV_ACCERR;
+ } else {
+ sig = SIGBUS;
+ code = BUS_ADRALN;
}
- _exception(SIGBUS, regs, BUS_ADRALN, regs->dar);
+ if (user_mode(regs))
+ _exception(sig, regs, code, regs->dar);
+ else
+ bad_page_fault(regs, regs->dar, sig);
}
void StackOverflow(struct pt_regs *regs)
@@ -900,14 +858,13 @@ void kernel_fp_unavailable_exception(struct pt_regs *regs)
void altivec_unavailable_exception(struct pt_regs *regs)
{
-#if !defined(CONFIG_ALTIVEC)
if (user_mode(regs)) {
/* A user program has executed an altivec instruction,
but this kernel doesn't support altivec. */
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
return;
}
-#endif
+
printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
"%lx at %lx\n", regs->trap, regs->nip);
die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 1a7e19cdab3..a4b28c73bba 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -36,6 +36,8 @@
#include <asm/vdso.h>
#include <asm/vdso_datapage.h>
+#include "setup.h"
+
#undef DEBUG
#ifdef DEBUG
@@ -262,7 +264,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
/* Allocate a VMA structure and fill it up */
- vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL);
+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (vma == NULL) {
rc = -ENOMEM;
goto fail_mmapsem;
@@ -586,6 +588,43 @@ static __init int vdso_fixup_datapage(struct lib32_elfinfo *v32,
return 0;
}
+
+static __init int vdso_fixup_features(struct lib32_elfinfo *v32,
+ struct lib64_elfinfo *v64)
+{
+ void *start32;
+ unsigned long size32;
+
+#ifdef CONFIG_PPC64
+ void *start64;
+ unsigned long size64;
+
+ start64 = find_section64(v64->hdr, "__ftr_fixup", &size64);
+ if (start64)
+ do_feature_fixups(cur_cpu_spec->cpu_features,
+ start64, start64 + size64);
+
+ start64 = find_section64(v64->hdr, "__fw_ftr_fixup", &size64);
+ if (start64)
+ do_feature_fixups(powerpc_firmware_features,
+ start64, start64 + size64);
+#endif /* CONFIG_PPC64 */
+
+ start32 = find_section32(v32->hdr, "__ftr_fixup", &size32);
+ if (start32)
+ do_feature_fixups(cur_cpu_spec->cpu_features,
+ start32, start32 + size32);
+
+#ifdef CONFIG_PPC64
+ start32 = find_section32(v32->hdr, "__fw_ftr_fixup", &size32);
+ if (start32)
+ do_feature_fixups(powerpc_firmware_features,
+ start32, start32 + size32);
+#endif /* CONFIG_PPC64 */
+
+ return 0;
+}
+
static __init int vdso_fixup_alt_funcs(struct lib32_elfinfo *v32,
struct lib64_elfinfo *v64)
{
@@ -634,6 +673,9 @@ static __init int vdso_setup(void)
if (vdso_fixup_datapage(&v32, &v64))
return -1;
+ if (vdso_fixup_features(&v32, &v64))
+ return -1;
+
if (vdso_fixup_alt_funcs(&v32, &v64))
return -1;
@@ -714,6 +756,7 @@ void __init vdso_init(void)
* Setup the syscall map in the vDOS
*/
vdso_setup_syscall_map();
+
/*
* Initialize the vDSO images in memory, that is do necessary
* fixups of vDSO symbols, locate trampolines, etc...
diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S
index 6187af2d54c..26e138c4ce1 100644
--- a/arch/powerpc/kernel/vdso32/vdso32.lds.S
+++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S
@@ -32,6 +32,18 @@ SECTIONS
PROVIDE (_etext = .);
PROVIDE (etext = .);
+ . = ALIGN(8);
+ __ftr_fixup : {
+ *(__ftr_fixup)
+ }
+
+#ifdef CONFIG_PPC64
+ . = ALIGN(8);
+ __fw_ftr_fixup : {
+ *(__fw_ftr_fixup)
+ }
+#endif
+
/* Other stuff is appended to the text segment: */
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S
index 56e76ff5498..40ffd9b6cef 100644
--- a/arch/powerpc/kernel/vdso64/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso64/gettimeofday.S
@@ -229,8 +229,10 @@ V_FUNCTION_BEGIN(__do_get_xsec)
xor r0,r8,r8 /* create dependency */
add r3,r3,r0
- /* Get TB & offset it */
- mftb r7
+ /* Get TB & offset it. We use the MFTB macro which will generate
+ * workaround code for Cell.
+ */
+ MFTB(r7)
ld r9,CFG_TB_ORIG_STAMP(r3)
subf r7,r9,r7
diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S
index 4a2b6dc0960..2d70f35d50b 100644
--- a/arch/powerpc/kernel/vdso64/vdso64.lds.S
+++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S
@@ -31,6 +31,16 @@ SECTIONS
PROVIDE (_etext = .);
PROVIDE (etext = .);
+ . = ALIGN(8);
+ __ftr_fixup : {
+ *(__ftr_fixup)
+ }
+
+ . = ALIGN(8);
+ __fw_ftr_fixup : {
+ *(__fw_ftr_fixup)
+ }
+
/* Other stuff is appended to the text segment: */
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index cb87e71eec6..a80f8f1d2e5 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -81,20 +81,20 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
struct iommu_table *tbl;
unsigned long offset, size;
- dma_window = get_property(dev->dev.platform_data,
- "ibm,my-dma-window", NULL);
+ dma_window = get_property(dev->dev.archdata.of_node,
+ "ibm,my-dma-window", NULL);
if (!dma_window)
return NULL;
tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
- of_parse_dma_window(dev->dev.platform_data, dma_window,
- &tbl->it_index, &offset, &size);
+ of_parse_dma_window(dev->dev.archdata.of_node, dma_window,
+ &tbl->it_index, &offset, &size);
/* TCE table size - measured in tce entries */
- tbl->it_size = size >> PAGE_SHIFT;
+ tbl->it_size = size >> IOMMU_PAGE_SHIFT;
/* offset for VIO should always be 0 */
- tbl->it_offset = offset >> PAGE_SHIFT;
+ tbl->it_offset = offset >> IOMMU_PAGE_SHIFT;
tbl->it_busno = 0;
tbl->it_type = TCE_VB;
@@ -117,7 +117,8 @@ static const struct vio_device_id *vio_match_device(
{
while (ids->type[0] != '\0') {
if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) &&
- device_is_compatible(dev->dev.platform_data, ids->compat))
+ device_is_compatible(dev->dev.archdata.of_node,
+ ids->compat))
return ids;
ids++;
}
@@ -198,9 +199,9 @@ EXPORT_SYMBOL(vio_unregister_driver);
/* vio_dev refcount hit 0 */
static void __devinit vio_dev_release(struct device *dev)
{
- if (dev->platform_data) {
- /* XXX free TCE table */
- of_node_put(dev->platform_data);
+ if (dev->archdata.of_node) {
+ /* XXX should free TCE table */
+ of_node_put(dev->archdata.of_node);
}
kfree(to_vio_dev(dev));
}
@@ -210,7 +211,7 @@ static void __devinit vio_dev_release(struct device *dev)
* @of_node: The OF node for this device.
*
* Creates and initializes a vio_dev structure from the data in
- * of_node (dev.platform_data) and adds it to the list of virtual devices.
+ * of_node and adds it to the list of virtual devices.
* Returns a pointer to the created vio_dev or NULL if node has
* NULL device_type or compatible fields.
*/
@@ -240,8 +241,6 @@ struct vio_dev * __devinit vio_register_device_node(struct device_node *of_node)
if (viodev == NULL)
return NULL;
- viodev->dev.platform_data = of_node_get(of_node);
-
viodev->irq = irq_of_parse_and_map(of_node, 0);
snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%x", *unit_address);
@@ -254,7 +253,10 @@ struct vio_dev * __devinit vio_register_device_node(struct device_node *of_node)
if (unit_address != NULL)
viodev->unit_address = *unit_address;
}
- viodev->iommu_table = vio_build_iommu_table(viodev);
+ viodev->dev.archdata.of_node = of_node_get(of_node);
+ viodev->dev.archdata.dma_ops = &dma_iommu_ops;
+ viodev->dev.archdata.dma_data = vio_build_iommu_table(viodev);
+ viodev->dev.archdata.numa_node = of_node_to_nid(of_node);
/* init generic 'struct device' fields: */
viodev->dev.parent = &vio_bus_device.dev;
@@ -285,10 +287,11 @@ static int __init vio_bus_init(void)
#ifdef CONFIG_PPC_ISERIES
if (firmware_has_feature(FW_FEATURE_ISERIES)) {
iommu_vio_init();
- vio_bus_device.iommu_table = &vio_iommu_table;
+ vio_bus_device.dev.archdata.dma_ops = &dma_iommu_ops;
+ vio_bus_device.dev.archdata.dma_data = &vio_iommu_table;
iSeries_vio_dev = &vio_bus_device.dev;
}
-#endif
+#endif /* CONFIG_PPC_ISERIES */
err = bus_register(&vio_bus_type);
if (err) {
@@ -336,7 +339,7 @@ static ssize_t name_show(struct device *dev,
static ssize_t devspec_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct device_node *of_node = dev->platform_data;
+ struct device_node *of_node = dev->archdata.of_node;
return sprintf(buf, "%s\n", of_node ? of_node->full_name : "none");
}
@@ -353,62 +356,6 @@ void __devinit vio_unregister_device(struct vio_dev *viodev)
}
EXPORT_SYMBOL(vio_unregister_device);
-static dma_addr_t vio_map_single(struct device *dev, void *vaddr,
- size_t size, enum dma_data_direction direction)
-{
- return iommu_map_single(to_vio_dev(dev)->iommu_table, vaddr, size,
- ~0ul, direction);
-}
-
-static void vio_unmap_single(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction)
-{
- iommu_unmap_single(to_vio_dev(dev)->iommu_table, dma_handle, size,
- direction);
-}
-
-static int vio_map_sg(struct device *dev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction direction)
-{
- return iommu_map_sg(dev, to_vio_dev(dev)->iommu_table, sglist,
- nelems, ~0ul, direction);
-}
-
-static void vio_unmap_sg(struct device *dev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction direction)
-{
- iommu_unmap_sg(to_vio_dev(dev)->iommu_table, sglist, nelems, direction);
-}
-
-static void *vio_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
-{
- return iommu_alloc_coherent(to_vio_dev(dev)->iommu_table, size,
- dma_handle, ~0ul, flag, -1);
-}
-
-static void vio_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
-{
- iommu_free_coherent(to_vio_dev(dev)->iommu_table, size, vaddr,
- dma_handle);
-}
-
-static int vio_dma_supported(struct device *dev, u64 mask)
-{
- return 1;
-}
-
-struct dma_mapping_ops vio_dma_ops = {
- .alloc_coherent = vio_alloc_coherent,
- .free_coherent = vio_free_coherent,
- .map_single = vio_map_single,
- .unmap_single = vio_unmap_single,
- .map_sg = vio_map_sg,
- .unmap_sg = vio_unmap_sg,
- .dma_supported = vio_dma_supported,
-};
-
static int vio_bus_match(struct device *dev, struct device_driver *drv)
{
const struct vio_dev *vio_dev = to_vio_dev(dev);
@@ -422,13 +369,14 @@ static int vio_hotplug(struct device *dev, char **envp, int num_envp,
char *buffer, int buffer_size)
{
const struct vio_dev *vio_dev = to_vio_dev(dev);
- struct device_node *dn = dev->platform_data;
+ struct device_node *dn;
const char *cp;
int length;
if (!num_envp)
return -ENOMEM;
+ dn = dev->archdata.of_node;
if (!dn)
return -ENODEV;
cp = get_property(dn, "compatible", &length);
@@ -465,7 +413,7 @@ struct bus_type vio_bus_type = {
*/
const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length)
{
- return get_property(vdev->dev.platform_data, which, length);
+ return get_property(vdev->dev.archdata.of_node, which, length);
}
EXPORT_SYMBOL(vio_get_attribute);
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index cb0e8d46c3e..04b8e71bf5b 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -33,6 +33,7 @@ SECTIONS
/* Text and gots */
.text : {
+ _text = .;
*(.text .text.*)
SCHED_TEXT
LOCK_TEXT
@@ -61,11 +62,7 @@ SECTIONS
__stop___ex_table = .;
}
- __bug_table : {
- __start___bug_table = .;
- *(__bug_table)
- __stop___bug_table = .;
- }
+ BUG_TABLE
/*
* Init sections discarded at runtime
@@ -108,13 +105,7 @@ SECTIONS
.initcall.init : {
__initcall_start = .;
- *(.initcall1.init)
- *(.initcall2.init)
- *(.initcall3.init)
- *(.initcall4.init)
- *(.initcall5.init)
- *(.initcall6.init)
- *(.initcall7.init)
+ INITCALLS
__initcall_end = .;
}