summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-08-17 11:32:50 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-17 11:32:50 -0700
commit5e2d059b52e397d9ac42f4c4d9d9a841887b5818 (patch)
treec8cd8fd7187113be33e29fcc75f45a8bbc27e6b2 /arch/powerpc
parentd190775206d06397a9309421cac5ba2f2c243521 (diff)
parenta2dc009afa9ae8b92305be7728676562a104cb40 (diff)
Merge tag 'powerpc-4.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc updates from Michael Ellerman: "Notable changes: - A fix for a bug in our page table fragment allocator, where a page table page could be freed and reallocated for something else while still in use, leading to memory corruption etc. The fix reuses pt_mm in struct page (x86 only) for a powerpc only refcount. - Fixes to our pkey support. Several are user-visible changes, but bring us in to line with x86 behaviour and/or fix outright bugs. Thanks to Florian Weimer for reporting many of these. - A series to improve the hvc driver & related OPAL console code, which have been seen to cause hardlockups at times. The hvc driver changes in particular have been in linux-next for ~month. - Increase our MAX_PHYSMEM_BITS to 128TB when SPARSEMEM_VMEMMAP=y. - Remove Power8 DD1 and Power9 DD1 support, neither chip should be in use anywhere other than as a paper weight. - An optimised memcmp implementation using Power7-or-later VMX instructions - Support for barrier_nospec on some NXP CPUs. - Support for flushing the count cache on context switch on some IBM CPUs (controlled by firmware), as a Spectre v2 mitigation. - A series to enhance the information we print on unhandled signals to bring it into line with other arches, including showing the offending VMA and dumping the instructions around the fault. Thanks to: Aaro Koskinen, Akshay Adiga, Alastair D'Silva, Alexey Kardashevskiy, Alexey Spirkov, Alistair Popple, Andrew Donnellan, Aneesh Kumar K.V, Anju T Sudhakar, Arnd Bergmann, Bartosz Golaszewski, Benjamin Herrenschmidt, Bharat Bhushan, Bjoern Noetel, Boqun Feng, Breno Leitao, Bryant G. Ly, Camelia Groza, Christophe Leroy, Christoph Hellwig, Cyril Bur, Dan Carpenter, Daniel Klamt, Darren Stevens, Dave Young, David Gibson, Diana Craciun, Finn Thain, Florian Weimer, Frederic Barrat, Gautham R. Shenoy, Geert Uytterhoeven, Geoff Levand, Guenter Roeck, Gustavo Romero, Haren Myneni, Hari Bathini, Joel Stanley, Jonathan Neuschäfer, Kees Cook, Madhavan Srinivasan, Mahesh Salgaonkar, Markus Elfring, Mathieu Malaterre, Mauro S. M. Rodrigues, Michael Hanselmann, Michael Neuling, Michael Schmitz, Mukesh Ojha, Murilo Opsfelder Araujo, Nicholas Piggin, Parth Y Shah, Paul Mackerras, Paul Menzel, Ram Pai, Randy Dunlap, Rashmica Gupta, Reza Arbab, Rodrigo R. Galvao, Russell Currey, Sam Bobroff, Scott Wood, Shilpasri G Bhat, Simon Guo, Souptick Joarder, Stan Johnson, Thiago Jung Bauermann, Tyrel Datwyler, Vaibhav Jain, Vasant Hegde, Venkat Rao, zhong jiang" * tag 'powerpc-4.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (234 commits) powerpc/mm/book3s/radix: Add mapping statistics powerpc/uaccess: Enable get_user(u64, *p) on 32-bit powerpc/mm/hash: Remove unnecessary do { } while(0) loop powerpc/64s: move machine check SLB flushing to mm/slb.c powerpc/powernv/idle: Fix build error powerpc/mm/tlbflush: update the mmu_gather page size while iterating address range powerpc/mm: remove warning about ‘type’ being set powerpc/32: Include setup.h header file to fix warnings powerpc: Move `path` variable inside DEBUG_PROM powerpc/powermac: Make some functions static powerpc/powermac: Remove variable x that's never read cxl: remove a dead branch powerpc/powermac: Add missing include of header pmac.h powerpc/kexec: Use common error handling code in setup_new_fdt() powerpc/xmon: Add address lookup for percpu symbols powerpc/mm: remove huge_pte_offset_and_shift() prototype powerpc/lib: Use patch_site to patch copy_32 functions once cache is enabled powerpc/pseries: Fix endianness while restoring of r3 in MCE handler. powerpc/fadump: merge adjacent memory ranges to reduce PT_LOAD segements powerpc/fadump: handle crash memory ranges array index overflow ...
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig12
-rw-r--r--arch/powerpc/Makefile49
-rw-r--r--arch/powerpc/boot/Makefile2
-rw-r--r--arch/powerpc/boot/dts/ac14xx.dts20
-rw-r--r--arch/powerpc/boot/dts/fsl/kmcent2.dts2
-rw-r--r--arch/powerpc/boot/dts/fsl/t2080rdb.dts4
-rw-r--r--arch/powerpc/boot/dts/fsl/t4240rdb.dts8
-rw-r--r--arch/powerpc/boot/dts/pdm360ng.dts2
-rw-r--r--arch/powerpc/configs/book3s_32.config2
-rw-r--r--arch/powerpc/configs/dpaa.config1
-rw-r--r--arch/powerpc/crypto/md5-asm.S1
-rw-r--r--arch/powerpc/crypto/sha1-powerpc-asm.S1
-rw-r--r--arch/powerpc/include/asm/asm-405.h19
-rw-r--r--arch/powerpc/include/asm/asm-compat.h26
-rw-r--r--arch/powerpc/include/asm/asm-const.h14
-rw-r--r--arch/powerpc/include/asm/asm-prototypes.h11
-rw-r--r--arch/powerpc/include/asm/atomic.h1
-rw-r--r--arch/powerpc/include/asm/barrier.h14
-rw-r--r--arch/powerpc/include/asm/bitops.h1
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h7
-rw-r--r--arch/powerpc/include/asm/book3s/32/tlbflush.h25
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-64k.h5
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash.h2
-rw-r--r--arch/powerpc/include/asm/book3s/64/hugetlb.h20
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu-hash.h15
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgalloc.h30
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h5
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix.h40
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-radix.h2
-rw-r--r--arch/powerpc/include/asm/book3s/tlbflush.h11
-rw-r--r--arch/powerpc/include/asm/cacheflush.h1
-rw-r--r--arch/powerpc/include/asm/cmpxchg.h2
-rw-r--r--arch/powerpc/include/asm/code-patching-asm.h18
-rw-r--r--arch/powerpc/include/asm/code-patching.h3
-rw-r--r--arch/powerpc/include/asm/cpuidle.h13
-rw-r--r--arch/powerpc/include/asm/cputable.h29
-rw-r--r--arch/powerpc/include/asm/cputime.h1
-rw-r--r--arch/powerpc/include/asm/dbell.h2
-rw-r--r--arch/powerpc/include/asm/dcr-native.h1
-rw-r--r--arch/powerpc/include/asm/debug.h1
-rw-r--r--arch/powerpc/include/asm/dt_cpu_ftrs.h2
-rw-r--r--arch/powerpc/include/asm/eeh.h15
-rw-r--r--arch/powerpc/include/asm/exception-64s.h118
-rw-r--r--arch/powerpc/include/asm/fadump.h3
-rw-r--r--arch/powerpc/include/asm/feature-fixups.h2
-rw-r--r--arch/powerpc/include/asm/firmware.h3
-rw-r--r--arch/powerpc/include/asm/fixmap.h1
-rw-r--r--arch/powerpc/include/asm/futex.h2
-rw-r--r--arch/powerpc/include/asm/head-64.h16
-rw-r--r--arch/powerpc/include/asm/highmem.h2
-rw-r--r--arch/powerpc/include/asm/hugetlb.h3
-rw-r--r--arch/powerpc/include/asm/hvcall.h2
-rw-r--r--arch/powerpc/include/asm/hw_breakpoint.h1
-rw-r--r--arch/powerpc/include/asm/hw_irq.h10
-rw-r--r--arch/powerpc/include/asm/iommu.h12
-rw-r--r--arch/powerpc/include/asm/jump_label.h2
-rw-r--r--arch/powerpc/include/asm/kvm_booke_hv_asm.h2
-rw-r--r--arch/powerpc/include/asm/mmu-44x.h10
-rw-r--r--arch/powerpc/include/asm/mmu.h3
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h7
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h19
-rw-r--r--arch/powerpc/include/asm/nohash/tlbflush.h53
-rw-r--r--arch/powerpc/include/asm/opal-api.h4
-rw-r--r--arch/powerpc/include/asm/opal.h5
-rw-r--r--arch/powerpc/include/asm/paca.h8
-rw-r--r--arch/powerpc/include/asm/page.h3
-rw-r--r--arch/powerpc/include/asm/page_64.h2
-rw-r--r--arch/powerpc/include/asm/pkeys.h38
-rw-r--r--arch/powerpc/include/asm/pnv-pci.h7
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h14
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h1
-rw-r--r--arch/powerpc/include/asm/processor.h5
-rw-r--r--arch/powerpc/include/asm/ptrace.h1
-rw-r--r--arch/powerpc/include/asm/reg.h2
-rw-r--r--arch/powerpc/include/asm/reg_a2.h2
-rw-r--r--arch/powerpc/include/asm/reg_fsl_emb.h2
-rw-r--r--arch/powerpc/include/asm/security_features.h7
-rw-r--r--arch/powerpc/include/asm/setup.h6
-rw-r--r--arch/powerpc/include/asm/smp.h1
-rw-r--r--arch/powerpc/include/asm/sparsemem.h13
-rw-r--r--arch/powerpc/include/asm/spinlock.h2
-rw-r--r--arch/powerpc/include/asm/stacktrace.h13
-rw-r--r--arch/powerpc/include/asm/string.h2
-rw-r--r--arch/powerpc/include/asm/synch.h2
-rw-r--r--arch/powerpc/include/asm/thread_info.h3
-rw-r--r--arch/powerpc/include/asm/tlb.h4
-rw-r--r--arch/powerpc/include/asm/tlbflush.h86
-rw-r--r--arch/powerpc/include/asm/uaccess.h14
-rw-r--r--arch/powerpc/include/asm/xive.h1
-rw-r--r--arch/powerpc/kernel/Makefile17
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/cpu_setup_6xx.S1
-rw-r--r--arch/powerpc/kernel/cputable.c38
-rw-r--r--arch/powerpc/kernel/crash.c1
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c4
-rw-r--r--arch/powerpc/kernel/eeh.c2
-rw-r--r--arch/powerpc/kernel/entry_32.S12
-rw-r--r--arch/powerpc/kernel/entry_64.S64
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S5
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S48
-rw-r--r--arch/powerpc/kernel/fadump.c126
-rw-r--r--arch/powerpc/kernel/fpu.S2
-rw-r--r--arch/powerpc/kernel/head_32.S1
-rw-r--r--arch/powerpc/kernel/head_40x.S1
-rw-r--r--arch/powerpc/kernel/head_64.S1
-rw-r--r--arch/powerpc/kernel/head_8xx.S8
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S1
-rw-r--r--arch/powerpc/kernel/idle_6xx.S1
-rw-r--r--arch/powerpc/kernel/idle_book3e.S7
-rw-r--r--arch/powerpc/kernel/idle_book3s.S52
-rw-r--r--arch/powerpc/kernel/idle_e500.S1
-rw-r--r--arch/powerpc/kernel/idle_power4.S1
-rw-r--r--arch/powerpc/kernel/irq.c41
-rw-r--r--arch/powerpc/kernel/kvm_emul.S1
-rw-r--r--arch/powerpc/kernel/l2cr_6xx.S1
-rw-r--r--arch/powerpc/kernel/machine_kexec.c8
-rw-r--r--arch/powerpc/kernel/machine_kexec_file_64.c28
-rw-r--r--arch/powerpc/kernel/mce_power.c44
-rw-r--r--arch/powerpc/kernel/misc_32.S1
-rw-r--r--arch/powerpc/kernel/misc_64.S1
-rw-r--r--arch/powerpc/kernel/module.c4
-rw-r--r--arch/powerpc/kernel/pci-common.c3
-rw-r--r--arch/powerpc/kernel/ppc_save_regs.S1
-rw-r--r--arch/powerpc/kernel/process.c64
-rw-r--r--arch/powerpc/kernel/prom.c33
-rw-r--r--arch/powerpc/kernel/prom_init.c12
-rw-r--r--arch/powerpc/kernel/security.c141
-rw-r--r--arch/powerpc/kernel/setup-common.c2
-rw-r--r--arch/powerpc/kernel/setup_32.c11
-rw-r--r--arch/powerpc/kernel/setup_64.c1
-rw-r--r--arch/powerpc/kernel/smp.c50
-rw-r--r--arch/powerpc/kernel/swsusp_32.S1
-rw-r--r--arch/powerpc/kernel/swsusp_asm64.S1
-rw-r--r--arch/powerpc/kernel/tm.S5
-rw-r--r--arch/powerpc/kernel/traps.c52
-rw-r--r--arch/powerpc/kernel/vdso.c1
-rw-r--r--arch/powerpc/kernel/vector.S1
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S4
-rw-r--r--arch/powerpc/kernel/watchdog.c1
-rw-r--r--arch/powerpc/kvm/book3s.c1
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu.c1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c15
-rw-r--r--arch/powerpc/kvm/book3s_64_slb.S3
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c12
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c20
-rw-r--r--arch/powerpc/kvm/book3s_hv.c11
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c1
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S18
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S1
-rw-r--r--arch/powerpc/kvm/book3s_pr.c1
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S1
-rw-r--r--arch/powerpc/kvm/book3s_segment.S3
-rw-r--r--arch/powerpc/kvm/book3s_xive_template.c39
-rw-r--r--arch/powerpc/kvm/e500.c1
-rw-r--r--arch/powerpc/kvm/e500mc.c1
-rw-r--r--arch/powerpc/kvm/powerpc.c1
-rw-r--r--arch/powerpc/lib/Makefile2
-rw-r--r--arch/powerpc/lib/code-patching.c16
-rw-r--r--arch/powerpc/lib/copy_32.S9
-rw-r--r--arch/powerpc/lib/copypage_64.S1
-rw-r--r--arch/powerpc/lib/copypage_power7.S4
-rw-r--r--arch/powerpc/lib/copyuser_64.S587
-rw-r--r--arch/powerpc/lib/copyuser_power7.S21
-rw-r--r--arch/powerpc/lib/feature-fixups-test.S1
-rw-r--r--arch/powerpc/lib/feature-fixups.c35
-rw-r--r--arch/powerpc/lib/hweight_64.S1
-rw-r--r--arch/powerpc/lib/ldstfp.S1
-rw-r--r--arch/powerpc/lib/locks.c1
-rw-r--r--arch/powerpc/lib/memcmp_64.S414
-rw-r--r--arch/powerpc/lib/memcpy_64.S11
-rw-r--r--arch/powerpc/lib/memcpy_power7.S28
-rw-r--r--arch/powerpc/lib/strlen_32.S78
-rw-r--r--arch/powerpc/lib/vmx-helper.c4
-rw-r--r--arch/powerpc/mm/44x_mmu.c2
-rw-r--r--arch/powerpc/mm/Makefile4
-rw-r--r--arch/powerpc/mm/dump_hashpagetable.c3
-rw-r--r--arch/powerpc/mm/fault.c1
-rw-r--r--arch/powerpc/mm/hash64_4k.c8
-rw-r--r--arch/powerpc/mm/hash64_64k.c15
-rw-r--r--arch/powerpc/mm/hash_low_32.S1
-rw-r--r--arch/powerpc/mm/hash_native_64.c78
-rw-r--r--arch/powerpc/mm/hash_utils_64.c41
-rw-r--r--arch/powerpc/mm/highmem.c2
-rw-r--r--arch/powerpc/mm/hugepage-hash64.c9
-rw-r--r--arch/powerpc/mm/hugetlbpage.c26
-rw-r--r--arch/powerpc/mm/mmu_context_book3s64.c25
-rw-r--r--arch/powerpc/mm/mmu_context_hash32.c1
-rw-r--r--arch/powerpc/mm/mmu_decl.h1
-rw-r--r--arch/powerpc/mm/pgtable-book3s64.c39
-rw-r--r--arch/powerpc/mm/pgtable-radix.c79
-rw-r--r--arch/powerpc/mm/pkeys.c141
-rw-r--r--arch/powerpc/mm/slb.c39
-rw-r--r--arch/powerpc/mm/slb_low.S1
-rw-r--r--arch/powerpc/mm/subpage-prot.c1
-rw-r--r--arch/powerpc/mm/tlb-radix.c18
-rw-r--r--arch/powerpc/mm/tlb_low_64e.S1
-rw-r--r--arch/powerpc/mm/tlb_nohash_low.S2
-rw-r--r--arch/powerpc/net/Makefile2
-rw-r--r--arch/powerpc/net/bpf_jit32.h1
-rw-r--r--arch/powerpc/net/bpf_jit_asm.S1
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c1
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c1
-rw-r--r--arch/powerpc/perf/core-book3s.c34
-rw-r--r--arch/powerpc/perf/imc-pmu.c108
-rw-r--r--arch/powerpc/perf/isa207-common.c12
-rw-r--r--arch/powerpc/perf/isa207-common.h5
-rw-r--r--arch/powerpc/perf/power9-pmu.c54
-rw-r--r--arch/powerpc/perf/req-gen/_begin.h2
-rw-r--r--arch/powerpc/perf/req-gen/perf.h1
-rw-r--r--arch/powerpc/platforms/4xx/msi.c51
-rw-r--r--arch/powerpc/platforms/52xx/Makefile2
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pm.c5
-rw-r--r--arch/powerpc/platforms/85xx/t1042rdb_diu.c4
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype39
-rw-r--r--arch/powerpc/platforms/cell/Makefile2
-rw-r--r--arch/powerpc/platforms/cell/cbe_thermal.c1
-rw-r--r--arch/powerpc/platforms/cell/spufs/sputrace.h1
-rw-r--r--arch/powerpc/platforms/chrp/nvram.c3
-rw-r--r--arch/powerpc/platforms/embedded6xx/wii.c10
-rw-r--r--arch/powerpc/platforms/pasemi/dma_lib.c6
-rw-r--r--arch/powerpc/platforms/pasemi/gpio_mdio.c2
-rw-r--r--arch/powerpc/platforms/pasemi/idle.c4
-rw-r--r--arch/powerpc/platforms/pasemi/iommu.c2
-rw-r--r--arch/powerpc/platforms/pasemi/misc.c4
-rw-r--r--arch/powerpc/platforms/pasemi/pci.c16
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c53
-rw-r--r--arch/powerpc/platforms/powermac/cache.S1
-rw-r--r--arch/powerpc/platforms/powermac/feature.c2
-rw-r--r--arch/powerpc/platforms/powermac/pci.c12
-rw-r--r--arch/powerpc/platforms/powermac/sleep.S1
-rw-r--r--arch/powerpc/platforms/powermac/time.c4
-rw-r--r--arch/powerpc/platforms/powermac/udbg_scc.c4
-rw-r--r--arch/powerpc/platforms/powernv/Makefile2
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c12
-rw-r--r--arch/powerpc/platforms/powernv/idle.c245
-rw-r--r--arch/powerpc/platforms/powernv/memtrace.c121
-rw-r--r--arch/powerpc/platforms/powernv/npu-dma.c17
-rw-r--r--arch/powerpc/platforms/powernv/opal-dump.c18
-rw-r--r--arch/powerpc/platforms/powernv/opal-irqchip.c126
-rw-r--r--arch/powerpc/platforms/powernv/opal-kmsg.c30
-rw-r--r--arch/powerpc/platforms/powernv/opal-sensor-groups.c28
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S4
-rw-r--r--arch/powerpc/platforms/powernv/opal.c154
-rw-r--r--arch/powerpc/platforms/powernv/pci-cxl.c199
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda-tce.c399
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c208
-rw-r--r--arch/powerpc/platforms/powernv/pci.c158
-rw-r--r--arch/powerpc/platforms/powernv/pci.h53
-rw-r--r--arch/powerpc/platforms/powernv/setup.c10
-rw-r--r--arch/powerpc/platforms/powernv/smp.c27
-rw-r--r--arch/powerpc/platforms/powernv/vas.h1
-rw-r--r--arch/powerpc/platforms/pseries/Makefile2
-rw-r--r--arch/powerpc/platforms/pseries/hvCall.S1
-rw-r--r--arch/powerpc/platforms/pseries/kexec.c2
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c51
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c1
-rw-r--r--arch/powerpc/platforms/pseries/ras.c51
-rw-r--r--arch/powerpc/platforms/pseries/setup.c50
-rw-r--r--arch/powerpc/purgatory/trampoline.S10
-rw-r--r--arch/powerpc/sysdev/Makefile2
-rw-r--r--arch/powerpc/sysdev/cpm1.c1
-rw-r--r--arch/powerpc/sysdev/fsl_mpic_err.c2
-rw-r--r--arch/powerpc/sysdev/mpic.c6
-rw-r--r--arch/powerpc/sysdev/mpic_msgr.c2
-rw-r--r--arch/powerpc/sysdev/msi_bitmap.c15
-rw-r--r--arch/powerpc/sysdev/xive/common.c30
-rw-r--r--arch/powerpc/sysdev/xive/native.c34
-rwxr-xr-xarch/powerpc/tools/checkpatch.sh22
-rw-r--r--arch/powerpc/xmon/spr_access.S1
-rw-r--r--arch/powerpc/xmon/xmon.c38
273 files changed, 3575 insertions, 2762 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 1c10ff0406f2..a80669209155 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -165,7 +165,7 @@ config PPC
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
select GENERIC_CMOS_UPDATE
select GENERIC_CPU_AUTOPROBE
- select GENERIC_CPU_VULNERABILITIES if PPC_BOOK3S_64
+ select GENERIC_CPU_VULNERABILITIES if PPC_BARRIER_NOSPEC
select GENERIC_IRQ_SHOW
select GENERIC_IRQ_SHOW_LEVEL
select GENERIC_SMP_IDLE_THREAD
@@ -197,6 +197,7 @@ config PPC
select HAVE_IOREMAP_PROT
select HAVE_IRQ_EXIT_ON_IRQ_STACK
select HAVE_KERNEL_GZIP
+ select HAVE_KERNEL_XZ if PPC_BOOK3S
select HAVE_KPROBES
select HAVE_KPROBES_ON_FTRACE
select HAVE_KRETPROBES
@@ -225,6 +226,7 @@ config PPC
select IRQ_DOMAIN
select IRQ_FORCED_THREADING
select MODULES_USE_ELF_RELA
+ select NEED_DMA_MAP_STATE if PPC64 || NOT_COHERENT_CACHE
select NEED_SG_DMA_LENGTH
select NO_BOOTMEM
select OF
@@ -240,6 +242,11 @@ config PPC
# Please keep this list sorted alphabetically.
#
+config PPC_BARRIER_NOSPEC
+ bool
+ default y
+ depends on PPC_BOOK3S_64 || PPC_FSL_BOOK3E
+
config GENERIC_CSUM
def_bool n
@@ -876,9 +883,6 @@ config ZONE_DMA
bool
default y
-config NEED_DMA_MAP_STATE
- def_bool (PPC64 || NOT_COHERENT_CACHE)
-
config GENERIC_ISA_DMA
bool
depends on ISA_DMA_API
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index fb96206de317..8397c7bd5880 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -36,7 +36,7 @@ else
KBUILD_DEFCONFIG := ppc64_defconfig
endif
-ifeq ($(CONFIG_PPC64),y)
+ifdef CONFIG_PPC64
new_nm := $(shell if $(NM) --help 2>&1 | grep -- '--synthetic' > /dev/null; then echo y; else echo n; fi)
ifeq ($(new_nm),y)
@@ -74,7 +74,7 @@ KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o
endif
endif
-ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
+ifdef CONFIG_CPU_LITTLE_ENDIAN
KBUILD_CFLAGS += -mlittle-endian
LDFLAGS += -EL
LDEMULATION := lppc
@@ -117,7 +117,7 @@ LDFLAGS_vmlinux-$(CONFIG_RELOCATABLE) := -pie
LDFLAGS_vmlinux := $(LDFLAGS_vmlinux-y)
LDFLAGS_vmlinux += $(call ld-option,--orphan-handling=warn)
-ifeq ($(CONFIG_PPC64),y)
+ifdef CONFIG_PPC64
ifeq ($(call cc-option-yn,-mcmodel=medium),y)
# -mcmodel=medium breaks modules because it uses 32bit offsets from
# the TOC pointer to create pointers where possible. Pointers into the
@@ -134,7 +134,7 @@ endif
endif
CFLAGS-$(CONFIG_PPC64) := $(call cc-option,-mtraceback=no)
-ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
+ifdef CONFIG_CPU_LITTLE_ENDIAN
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2,$(call cc-option,-mcall-aixdesc))
AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2)
else
@@ -148,8 +148,8 @@ CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions)
CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 $(MULTIPLEWORD)
CFLAGS-$(CONFIG_PPC32) += $(call cc-option,-mno-readonly-in-sdata)
-ifeq ($(CONFIG_PPC_BOOK3S_64),y)
-ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
+ifdef CONFIG_PPC_BOOK3S_64
+ifdef CONFIG_CPU_LITTLE_ENDIAN
CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=power8
CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power9,-mtune=power8)
else
@@ -164,16 +164,10 @@ ifdef CONFIG_MPROFILE_KERNEL
CC_FLAGS_FTRACE := -pg -mprofile-kernel
endif
-CFLAGS-$(CONFIG_CELL_CPU) += $(call cc-option,-mcpu=cell)
-CFLAGS-$(CONFIG_POWER5_CPU) += $(call cc-option,-mcpu=power5)
-CFLAGS-$(CONFIG_POWER6_CPU) += $(call cc-option,-mcpu=power6)
-CFLAGS-$(CONFIG_POWER7_CPU) += $(call cc-option,-mcpu=power7)
-CFLAGS-$(CONFIG_POWER8_CPU) += $(call cc-option,-mcpu=power8)
-CFLAGS-$(CONFIG_POWER9_CPU) += $(call cc-option,-mcpu=power9)
-CFLAGS-$(CONFIG_PPC_8xx) += $(call cc-option,-mcpu=860)
+CFLAGS-$(CONFIG_TARGET_CPU_BOOL) += $(call cc-option,-mcpu=$(CONFIG_TARGET_CPU))
# Altivec option not allowed with e500mc64 in GCC.
-ifeq ($(CONFIG_ALTIVEC),y)
+ifdef CONFIG_ALTIVEC
E5500_CPU := -mcpu=powerpc64
else
E5500_CPU := $(call cc-option,-mcpu=e500mc64,-mcpu=powerpc64)
@@ -181,8 +175,8 @@ endif
CFLAGS-$(CONFIG_E5500_CPU) += $(E5500_CPU)
CFLAGS-$(CONFIG_E6500_CPU) += $(call cc-option,-mcpu=e6500,$(E5500_CPU))
-ifeq ($(CONFIG_PPC32),y)
-ifeq ($(CONFIG_PPC_E500MC),y)
+ifdef CONFIG_PPC32
+ifdef CONFIG_PPC_E500MC
CFLAGS-y += $(call cc-option,-mcpu=e500mc,-mcpu=powerpc)
else
CFLAGS-$(CONFIG_E500) += $(call cc-option,-mcpu=8540 -msoft-float,-mcpu=powerpc)
@@ -204,7 +198,7 @@ else
CHECKFLAGS += -D__LITTLE_ENDIAN__
endif
-ifeq ($(CONFIG_476FPE_ERR46),y)
+ifdef CONFIG_476FPE_ERR46
KBUILD_LDFLAGS_MODULE += --ppc476-workaround \
-T $(srctree)/arch/powerpc/platforms/44x/ppc476_modules.lds
endif
@@ -231,12 +225,12 @@ KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm)
# often slow when they are implemented at all
KBUILD_CFLAGS += $(call cc-option,-mno-string)
-ifeq ($(CONFIG_6xx),y)
+ifdef CONFIG_6xx
KBUILD_CFLAGS += -mcpu=powerpc
endif
# Work around a gcc code-gen bug with -fno-omit-frame-pointer.
-ifeq ($(CONFIG_FUNCTION_TRACER),y)
+ifdef CONFIG_FUNCTION_TRACER
KBUILD_CFLAGS += -mno-sched-epilog
endif
@@ -355,6 +349,21 @@ mpc86xx_smp_defconfig:
$(call merge_into_defconfig,mpc86xx_basic_defconfig,\
86xx-smp 86xx-hw fsl-emb-nonhw)
+PHONY += ppc32_allmodconfig
+ppc32_allmodconfig:
+ $(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/powerpc/configs/book3s_32.config \
+ -f $(srctree)/Makefile allmodconfig
+
+PHONY += ppc64le_allmodconfig
+ppc64le_allmodconfig:
+ $(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/powerpc/configs/le.config \
+ -f $(srctree)/Makefile allmodconfig
+
+PHONY += ppc64_book3e_allmodconfig
+ppc64_book3e_allmodconfig:
+ $(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/powerpc/configs/85xx-64bit.config \
+ -f $(srctree)/Makefile allmodconfig
+
define archhelp
@echo '* zImage - Build default images selected by kernel config'
@echo ' zImage.* - Compressed kernel image (arch/$(ARCH)/boot/zImage.*)'
@@ -381,7 +390,7 @@ install:
$(Q)$(MAKE) $(build)=$(boot) install
vdso_install:
-ifeq ($(CONFIG_PPC64),y)
+ifdef CONFIG_PPC64
$(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso64 $@
endif
$(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso32 $@
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index deea20c334df..0fb96c26136f 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -354,7 +354,7 @@ image-$(CONFIG_AMIGAONE) += cuImage.amigaone
# For 32-bit powermacs, build the COFF and miboot images
# as well as the ELF images.
-ifeq ($(CONFIG_PPC32),y)
+ifdef CONFIG_PPC32
image-$(CONFIG_PPC_PMAC) += zImage.coff zImage.miboot
endif
diff --git a/arch/powerpc/boot/dts/ac14xx.dts b/arch/powerpc/boot/dts/ac14xx.dts
index 83bcfd865167..0be5c4f3265d 100644
--- a/arch/powerpc/boot/dts/ac14xx.dts
+++ b/arch/powerpc/boot/dts/ac14xx.dts
@@ -176,12 +176,12 @@
clock-frequency = <400000>;
at24@30 {
- compatible = "at24,24c01";
+ compatible = "atmel,24c01";
reg = <0x30>;
};
at24@31 {
- compatible = "at24,24c01";
+ compatible = "atmel,24c01";
reg = <0x31>;
};
@@ -191,42 +191,42 @@
};
at24@50 {
- compatible = "at24,24c01";
+ compatible = "atmel,24c01";
reg = <0x50>;
};
at24@51 {
- compatible = "at24,24c01";
+ compatible = "atmel,24c01";
reg = <0x51>;
};
at24@52 {
- compatible = "at24,24c01";
+ compatible = "atmel,24c01";
reg = <0x52>;
};
at24@53 {
- compatible = "at24,24c01";
+ compatible = "atmel,24c01";
reg = <0x53>;
};
at24@54 {
- compatible = "at24,24c01";
+ compatible = "atmel,24c01";
reg = <0x54>;
};
at24@55 {
- compatible = "at24,24c01";
+ compatible = "atmel,24c01";
reg = <0x55>;
};
at24@56 {
- compatible = "at24,24c01";
+ compatible = "atmel,24c01";
reg = <0x56>;
};
at24@57 {
- compatible = "at24,24c01";
+ compatible = "atmel,24c01";
reg = <0x57>;
};
diff --git a/arch/powerpc/boot/dts/fsl/kmcent2.dts b/arch/powerpc/boot/dts/fsl/kmcent2.dts
index 5922c1ea0e96..3094df05f5ea 100644
--- a/arch/powerpc/boot/dts/fsl/kmcent2.dts
+++ b/arch/powerpc/boot/dts/fsl/kmcent2.dts
@@ -130,7 +130,7 @@
#size-cells = <0>;
eeprom@54 {
- compatible = "24c02";
+ compatible = "atmel,24c02";
reg = <0x54>;
pagesize = <2>;
read-only;
diff --git a/arch/powerpc/boot/dts/fsl/t2080rdb.dts b/arch/powerpc/boot/dts/fsl/t2080rdb.dts
index 836e4c965b22..55c0210a771d 100644
--- a/arch/powerpc/boot/dts/fsl/t2080rdb.dts
+++ b/arch/powerpc/boot/dts/fsl/t2080rdb.dts
@@ -97,12 +97,12 @@
mdio@fd000 {
xg_cs4315_phy1: ethernet-phy@c {
- compatible = "ethernet-phy-ieee802.3-c45";
+ compatible = "ethernet-phy-id13e5.1002";
reg = <0xc>;
};
xg_cs4315_phy2: ethernet-phy@d {
- compatible = "ethernet-phy-ieee802.3-c45";
+ compatible = "ethernet-phy-id13e5.1002";
reg = <0xd>;
};
diff --git a/arch/powerpc/boot/dts/fsl/t4240rdb.dts b/arch/powerpc/boot/dts/fsl/t4240rdb.dts
index 15eb0a3f7290..a56a705d41f7 100644
--- a/arch/powerpc/boot/dts/fsl/t4240rdb.dts
+++ b/arch/powerpc/boot/dts/fsl/t4240rdb.dts
@@ -267,22 +267,22 @@
mdio@fd000 {
xfiphy1: ethernet-phy@10 {
- compatible = "ethernet-phy-ieee802.3-c45";
+ compatible = "ethernet-phy-id13e5.1002";
reg = <0x10>;
};
xfiphy2: ethernet-phy@11 {
- compatible = "ethernet-phy-ieee802.3-c45";
+ compatible = "ethernet-phy-id13e5.1002";
reg = <0x11>;
};
xfiphy3: ethernet-phy@13 {
- compatible = "ethernet-phy-ieee802.3-c45";
+ compatible = "ethernet-phy-id13e5.1002";
reg = <0x13>;
};
xfiphy4: ethernet-phy@12 {
- compatible = "ethernet-phy-ieee802.3-c45";
+ compatible = "ethernet-phy-id13e5.1002";
reg = <0x12>;
};
};
diff --git a/arch/powerpc/boot/dts/pdm360ng.dts b/arch/powerpc/boot/dts/pdm360ng.dts
index 445b88114009..df1283b63d9b 100644
--- a/arch/powerpc/boot/dts/pdm360ng.dts
+++ b/arch/powerpc/boot/dts/pdm360ng.dts
@@ -98,7 +98,7 @@
fsl,preserve-clocking;
eeprom@50 {
- compatible = "at,24c01";
+ compatible = "atmel,24c01";
reg = <0x50>;
};
diff --git a/arch/powerpc/configs/book3s_32.config b/arch/powerpc/configs/book3s_32.config
new file mode 100644
index 000000000000..8721eb7b1294
--- /dev/null
+++ b/arch/powerpc/configs/book3s_32.config
@@ -0,0 +1,2 @@
+CONFIG_PPC64=n
+CONFIG_PPC_BOOK3S_32=y
diff --git a/arch/powerpc/configs/dpaa.config b/arch/powerpc/configs/dpaa.config
index 2fe76f5e938a..4ffacafe4036 100644
--- a/arch/powerpc/configs/dpaa.config
+++ b/arch/powerpc/configs/dpaa.config
@@ -2,3 +2,4 @@ CONFIG_FSL_DPAA=y
CONFIG_FSL_PAMU=y
CONFIG_FSL_FMAN=y
CONFIG_FSL_DPAA_ETH=y
+CONFIG_CORTINA_PHY=y
diff --git a/arch/powerpc/crypto/md5-asm.S b/arch/powerpc/crypto/md5-asm.S
index 10cdf5bceebb..1834065362c7 100644
--- a/arch/powerpc/crypto/md5-asm.S
+++ b/arch/powerpc/crypto/md5-asm.S
@@ -11,6 +11,7 @@
*/
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
+#include <asm/asm-compat.h>
#define rHP r3
#define rWP r4
diff --git a/arch/powerpc/crypto/sha1-powerpc-asm.S b/arch/powerpc/crypto/sha1-powerpc-asm.S
index c8951ce0dcc4..23e248beff71 100644
--- a/arch/powerpc/crypto/sha1-powerpc-asm.S
+++ b/arch/powerpc/crypto/sha1-powerpc-asm.S
@@ -7,6 +7,7 @@
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
+#include <asm/asm-compat.h>
#ifdef __BIG_ENDIAN__
#define LWZ(rt, d, ra) \
diff --git a/arch/powerpc/include/asm/asm-405.h b/arch/powerpc/include/asm/asm-405.h
new file mode 100644
index 000000000000..7270d3ae7c8e
--- /dev/null
+++ b/arch/powerpc/include/asm/asm-405.h
@@ -0,0 +1,19 @@
+#ifndef _ASM_POWERPC_ASM_405_H
+#define _ASM_POWERPC_ASM_405_H
+
+#include <asm/asm-const.h>
+
+#ifdef __KERNEL__
+#ifdef CONFIG_IBM405_ERR77
+/* Erratum #77 on the 405 means we need a sync or dcbt before every
+ * stwcx. The old ATOMIC_SYNC_FIX covered some but not all of this.
+ */
+#define PPC405_ERR77(ra,rb) stringify_in_c(dcbt ra, rb;)
+#define PPC405_ERR77_SYNC stringify_in_c(sync;)
+#else
+#define PPC405_ERR77(ra,rb)
+#define PPC405_ERR77_SYNC
+#endif
+#endif
+
+#endif /* _ASM_POWERPC_ASM_405_H */
diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h
index 7f2a7702596c..19b70c5b5f18 100644
--- a/arch/powerpc/include/asm/asm-compat.h
+++ b/arch/powerpc/include/asm/asm-compat.h
@@ -1,21 +1,10 @@
#ifndef _ASM_POWERPC_ASM_COMPAT_H
#define _ASM_POWERPC_ASM_COMPAT_H
+#include <asm/asm-const.h>
#include <asm/types.h>
#include <asm/ppc-opcode.h>
-#ifdef __ASSEMBLY__
-# define stringify_in_c(...) __VA_ARGS__
-# define ASM_CONST(x) x
-#else
-/* This version of stringify will deal with commas... */
-# define __stringify_in_c(...) #__VA_ARGS__
-# define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " "
-# define __ASM_CONST(x) x##UL
-# define ASM_CONST(x) __ASM_CONST(x)
-#endif
-
-
#ifdef __powerpc64__
/* operations for longs and pointers */
@@ -70,17 +59,4 @@
#endif
-#ifdef __KERNEL__
-#ifdef CONFIG_IBM405_ERR77
-/* Erratum #77 on the 405 means we need a sync or dcbt before every
- * stwcx. The old ATOMIC_SYNC_FIX covered some but not all of this.
- */
-#define PPC405_ERR77(ra,rb) stringify_in_c(dcbt ra, rb;)
-#define PPC405_ERR77_SYNC stringify_in_c(sync;)
-#else
-#define PPC405_ERR77(ra,rb)
-#define PPC405_ERR77_SYNC
-#endif
-#endif
-
#endif /* _ASM_POWERPC_ASM_COMPAT_H */
diff --git a/arch/powerpc/include/asm/asm-const.h b/arch/powerpc/include/asm/asm-const.h
new file mode 100644
index 000000000000..082c1538c562
--- /dev/null
+++ b/arch/powerpc/include/asm/asm-const.h
@@ -0,0 +1,14 @@
+#ifndef _ASM_POWERPC_ASM_CONST_H
+#define _ASM_POWERPC_ASM_CONST_H
+
+#ifdef __ASSEMBLY__
+# define stringify_in_c(...) __VA_ARGS__
+# define ASM_CONST(x) x
+#else
+/* This version of stringify will deal with commas... */
+# define __stringify_in_c(...) #__VA_ARGS__
+# define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " "
+# define __ASM_CONST(x) x##UL
+# define ASM_CONST(x) __ASM_CONST(x)
+#endif
+#endif /* _ASM_POWERPC_ASM_CONST_H */
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
index 7841b8a60657..1f4691ce4126 100644
--- a/arch/powerpc/include/asm/asm-prototypes.h
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -48,8 +48,8 @@ void __trace_opal_exit(long opcode, unsigned long retval);
/* VMX copying */
int enter_vmx_usercopy(void);
int exit_vmx_usercopy(void);
-int enter_vmx_copy(void);
-void * exit_vmx_copy(void *dest);
+int enter_vmx_ops(void);
+void *exit_vmx_ops(void *dest);
/* Traps */
long machine_check_early(struct pt_regs *regs);
@@ -143,4 +143,11 @@ struct kvm_vcpu;
void _kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
+/* Patch sites */
+extern s32 patch__call_flush_count_cache;
+extern s32 patch__flush_count_cache_return;
+extern s32 patch__memset_nocache, patch__memcpy_nocache;
+
+extern long flush_count_cache;
+
#endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 963abf8bf1c0..52eafaf74054 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -10,6 +10,7 @@
#include <linux/types.h>
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
+#include <asm/asm-405.h>
#define ATOMIC_INIT(i) { (i) }
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index f67b3f6e36be..fbe8df433019 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -5,6 +5,8 @@
#ifndef _ASM_POWERPC_BARRIER_H
#define _ASM_POWERPC_BARRIER_H
+#include <asm/asm-const.h>
+
/*
* Memory barrier.
* The sync instruction guarantees that all memory accesses initiated
@@ -77,19 +79,25 @@ do { \
})
#ifdef CONFIG_PPC_BOOK3S_64
+#define NOSPEC_BARRIER_SLOT nop
+#elif defined(CONFIG_PPC_FSL_BOOK3E)
+#define NOSPEC_BARRIER_SLOT nop; nop
+#endif
+
+#ifdef CONFIG_PPC_BARRIER_NOSPEC
/*
* Prevent execution of subsequent instructions until preceding branches have
* been fully resolved and are no longer executing speculatively.
*/
-#define barrier_nospec_asm NOSPEC_BARRIER_FIXUP_SECTION; nop
+#define barrier_nospec_asm NOSPEC_BARRIER_FIXUP_SECTION; NOSPEC_BARRIER_SLOT
// This also acts as a compiler barrier due to the memory clobber.
#define barrier_nospec() asm (stringify_in_c(barrier_nospec_asm) ::: "memory")
-#else /* !CONFIG_PPC_BOOK3S_64 */
+#else /* !CONFIG_PPC_BARRIER_NOSPEC */
#define barrier_nospec_asm
#define barrier_nospec()
-#endif
+#endif /* CONFIG_PPC_BARRIER_NOSPEC */
#include <asm-generic/barrier.h>
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index b750ffef83c7..ff71566dadee 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -45,6 +45,7 @@
#include <linux/compiler.h>
#include <asm/asm-compat.h>
#include <asm/synch.h>
+#include <asm/asm-405.h>
/* PPC bit number conversion */
#define PPC_BITLSHIFT(be) (BITS_PER_LONG - 1 - (be))
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 02f5acd7ccc4..751cf931bb3f 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -84,17 +84,12 @@
* of RAM. -- Cort
*/
#define VMALLOC_OFFSET (0x1000000) /* 16M */
-#ifdef PPC_PIN_SIZE
-#define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
-#else
#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
-#endif
#define VMALLOC_END ioremap_bot
#ifndef __ASSEMBLY__
#include <linux/sched.h>
#include <linux/threads.h>
-#include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */
extern unsigned long ioremap_bot;
@@ -164,7 +159,6 @@ static inline unsigned long pte_update(pte_t *p,
1: lwarx %0,0,%3\n\
andc %1,%0,%4\n\
or %1,%1,%5\n"
- PPC405_ERR77(0,%3)
" stwcx. %1,0,%3\n\
bne- 1b"
: "=&r" (old), "=&r" (tmp), "=m" (*p)
@@ -186,7 +180,6 @@ static inline unsigned long long pte_update(pte_t *p,
lwzx %0,0,%3\n\
andc %1,%L0,%5\n\
or %1,%1,%6\n"
- PPC405_ERR77(0,%3)
" stwcx. %1,0,%4\n\
bne- 1b"
: "=&r" (old), "=&r" (tmp), "=m" (*p)
diff --git a/arch/powerpc/include/asm/book3s/32/tlbflush.h b/arch/powerpc/include/asm/book3s/32/tlbflush.h
new file mode 100644
index 000000000000..068085b709fb
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/32/tlbflush.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_32_TLBFLUSH_H
+#define _ASM_POWERPC_BOOK3S_32_TLBFLUSH_H
+
+#define MMU_NO_CONTEXT (0)
+/*
+ * TLB flushing for "classic" hash-MMU 32-bit CPUs, 6xx, 7xx, 7xxx
+ */
+extern void flush_tlb_mm(struct mm_struct *mm);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+static inline void local_flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long vmaddr)
+{
+ flush_tlb_page(vma, vmaddr);
+}
+static inline void local_flush_tlb_mm(struct mm_struct *mm)
+{
+ flush_tlb_mm(mm);
+}
+
+#endif /* _ASM_POWERPC_TLBFLUSH_H */
diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h
index c81793d47af9..f82ee8a3b561 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h
@@ -137,10 +137,9 @@ extern bool __rpte_sub_valid(real_pte_t rpte, unsigned long index);
shift = mmu_psize_defs[psize].shift; \
for (index = 0; vpn < __end; index++, \
vpn += (1L << (shift - VPN_SHIFT))) { \
- if (!__split || __rpte_sub_valid(rpte, index)) \
- do {
+ if (!__split || __rpte_sub_valid(rpte, index))
-#define pte_iterate_hashed_end() } while(0); } } while(0)
+#define pte_iterate_hashed_end() } } while(0)
#define pte_pagesize_index(mm, addr, pte) \
(((pte) & H_PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K)
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index 0387b155f13d..d52a51b2ce7b 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -3,6 +3,8 @@
#define _ASM_POWERPC_BOOK3S_64_HASH_H
#ifdef __KERNEL__
+#include <asm/asm-const.h>
+
/*
* Common bits between 4K and 64K pages in a linux-style PTE.
* Additional bits may be defined in pgtable-hash64-*.h
diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h
index c459f937d484..50888388a359 100644
--- a/arch/powerpc/include/asm/book3s/64/hugetlb.h
+++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h
@@ -32,26 +32,6 @@ static inline int hstate_get_psize(struct hstate *hstate)
}
}
-#define arch_make_huge_pte arch_make_huge_pte
-static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
- struct page *page, int writable)
-{
- unsigned long page_shift;
-
- if (!cpu_has_feature(CPU_FTR_POWER9_DD1))
- return entry;
-
- page_shift = huge_page_shift(hstate_vma(vma));
- /*
- * We don't support 1G hugetlb pages yet.
- */
- VM_WARN_ON(page_shift == mmu_psize_defs[MMU_PAGE_1G].shift);
- if (page_shift == mmu_psize_defs[MMU_PAGE_2M].shift)
- return __pte(pte_val(entry) | R_PAGE_LARGE);
- else
- return entry;
-}
-
#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
static inline bool gigantic_page_supported(void)
{
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index 50ed64fba4ae..b3520b549cba 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -12,9 +12,9 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <asm/asm-compat.h>
#include <asm/page.h>
#include <asm/bug.h>
+#include <asm/asm-const.h>
/*
* This is necessary to get the definition of PGTABLE_RANGE which we
@@ -364,6 +364,16 @@ static inline unsigned long hpte_new_to_old_r(unsigned long r)
return r & ~HPTE_R_3_0_SSIZE_MASK;
}
+static inline unsigned long hpte_get_old_v(struct hash_pte *hptep)
+{
+ unsigned long hpte_v;
+
+ hpte_v = be64_to_cpu(hptep->v);
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
+ return hpte_v;
+}
+
/*
* This function sets the AVPN and L fields of the HPTE appropriately
* using the base page size and actual page size.
@@ -487,6 +497,9 @@ extern void hpte_init_native(void);
extern void slb_initialize(void);
extern void slb_flush_and_rebolt(void);
+void slb_flush_all_realmode(void);
+void __slb_restore_bolted_realmode(void);
+void slb_restore_bolted_realmode(void);
extern void slb_vmalloc_update(void);
extern void slb_set_size(u16 size);
diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
index 01ee40f11f3a..391ed2c3b697 100644
--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
@@ -9,6 +9,7 @@
#include <linux/slab.h>
#include <linux/cpumask.h>
+#include <linux/kmemleak.h>
#include <linux/percpu.h>
struct vmemmap_backing {
@@ -83,6 +84,13 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
pgtable_gfp_flags(mm, GFP_KERNEL));
/*
+ * Don't scan the PGD for pointers, it contains references to PUDs but
+ * those references are not full pointers and so can't be recognised by
+ * kmemleak.
+ */
+ kmemleak_no_scan(pgd);
+
+ /*
* With hugetlb, we don't clear the second half of the page table.
* If we share the same slab cache with the pmd or pud level table,
* we need to make sure we zero out the full table on alloc.
@@ -110,8 +118,19 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX),
- pgtable_gfp_flags(mm, GFP_KERNEL));
+ pud_t *pud;
+
+ pud = kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX),
+ pgtable_gfp_flags(mm, GFP_KERNEL));
+ /*
+ * Tell kmemleak to ignore the PUD, that means don't scan it for
+ * pointers and don't consider it a leak. PUDs are typically only
+ * referred to by their PGD, but kmemleak is not able to recognise those
+ * as pointers, leading to false leak reports.
+ */
+ kmemleak_ignore(pud);
+
+ return pud;
}
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
@@ -208,4 +227,11 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
#define check_pgt_cache() do { } while (0)
+extern atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
+static inline void update_page_count(int psize, long count)
+{
+ if (IS_ENABLED(CONFIG_PROC_FS))
+ atomic_long_add(count, &direct_pages_count[psize]);
+}
+
#endif /* _ASM_POWERPC_BOOK3S_64_PGALLOC_H */
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 42aafba7a308..676118743a06 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -479,9 +479,8 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
{
if (full && radix_enabled()) {
/*
- * Let's skip the DD1 style pte update here. We know that
- * this is a full mm pte clear and hence can be sure there is
- * no parallel set_pte.
+ * We know that this is a full mm pte clear and
+ * hence can be sure there is no parallel set_pte.
*/
return radix__ptep_get_and_clear_full(mm, addr, ptep, full);
}
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index ef9f96742ce1..7d1a3d1543fc 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -2,6 +2,8 @@
#ifndef _ASM_POWERPC_PGTABLE_RADIX_H
#define _ASM_POWERPC_PGTABLE_RADIX_H
+#include <asm/asm-const.h>
+
#ifndef __ASSEMBLY__
#include <asm/cmpxchg.h>
#endif
@@ -12,12 +14,6 @@
#include <asm/book3s/64/radix-4k.h>
#endif
-/*
- * For P9 DD1 only, we need to track whether the pte's huge.
- */
-#define R_PAGE_LARGE _RPAGE_RSV1
-
-
#ifndef __ASSEMBLY__
#include <asm/book3s/64/tlbflush-radix.h>
#include <asm/cpu_has_feature.h>
@@ -36,6 +32,9 @@
#define RADIX_PUD_BAD_BITS 0x60000000000000e0UL
#define RADIX_PGD_BAD_BITS 0x60000000000000e0UL
+#define RADIX_PMD_SHIFT (PAGE_SHIFT + RADIX_PTE_INDEX_SIZE)
+#define RADIX_PUD_SHIFT (RADIX_PMD_SHIFT + RADIX_PMD_INDEX_SIZE)
+#define RADIX_PGD_SHIFT (RADIX_PUD_SHIFT + RADIX_PUD_INDEX_SIZE)
/*
* Size of EA range mapped by our pagetables.
*/
@@ -154,20 +153,7 @@ static inline unsigned long radix__pte_update(struct mm_struct *mm,
{
unsigned long old_pte;
- if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
-
- unsigned long new_pte;
-
- old_pte = __radix_pte_update(ptep, ~0ul, 0);
- /*
- * new value of pte
- */
- new_pte = (old_pte | set) & ~clr;
- radix__flush_tlb_pte_p9_dd1(old_pte, mm, addr);
- if (new_pte)
- __radix_pte_update(ptep, 0, new_pte);
- } else
- old_pte = __radix_pte_update(ptep, clr, set);
+ old_pte = __radix_pte_update(ptep, clr, set);
if (!huge)
assert_pte_locked(mm, addr);
@@ -253,8 +239,6 @@ static inline int radix__pmd_trans_huge(pmd_t pmd)
static inline pmd_t radix__pmd_mkhuge(pmd_t pmd)
{
- if (cpu_has_feature(CPU_FTR_POWER9_DD1))
- return __pmd(pmd_val(pmd) | _PAGE_PTE | R_PAGE_LARGE);
return __pmd(pmd_val(pmd) | _PAGE_PTE);
}
@@ -285,18 +269,14 @@ static inline unsigned long radix__get_tree_size(void)
unsigned long rts_field;
/*
* We support 52 bits, hence:
- * DD1 52-28 = 24, 0b11000
- * Others 52-31 = 21, 0b10101
+ * bits 52 - 31 = 21, 0b10101
* RTS encoding details
* bits 0 - 3 of rts -> bits 6 - 8 unsigned long
* bits 4 - 5 of rts -> bits 62 - 63 of unsigned long
*/
- if (cpu_has_feature(CPU_FTR_POWER9_DD1))
- rts_field = (0x3UL << 61);
- else {
- rts_field = (0x5UL << 5); /* 6 - 8 bits */
- rts_field |= (0x2UL << 61);
- }
+ rts_field = (0x5UL << 5); /* 6 - 8 bits */
+ rts_field |= (0x2UL << 61);
+
return rts_field;
}
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
index ef5c3f2994c9..1154a6dc6d26 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
@@ -48,8 +48,6 @@ extern void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmad
extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
extern void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr);
extern void radix__flush_tlb_all(void);
-extern void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
- unsigned long address);
extern void radix__flush_tlb_lpid_page(unsigned int lpid,
unsigned long addr,
diff --git a/arch/powerpc/include/asm/book3s/tlbflush.h b/arch/powerpc/include/asm/book3s/tlbflush.h
new file mode 100644
index 000000000000..dec11de41055
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/tlbflush.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_TLBFLUSH_H
+#define _ASM_POWERPC_BOOK3S_TLBFLUSH_H
+
+#ifdef CONFIG_PPC64
+#include <asm/book3s/64/tlbflush.h>
+#else
+#include <asm/book3s/32/tlbflush.h>
+#endif
+
+#endif /* _ASM_POWERPC_BOOK3S_TLBFLUSH_H */
diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
index 0d72ec75da63..d5a8d7bf0759 100644
--- a/arch/powerpc/include/asm/cacheflush.h
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -11,7 +11,6 @@
#include <linux/mm.h>
#include <asm/cputable.h>
-#include <asm/cpu_has_feature.h>
/*
* No cache flushing is required when address mappings are changed,
diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
index 9b001f1f6b32..27183871eb3b 100644
--- a/arch/powerpc/include/asm/cmpxchg.h
+++ b/arch/powerpc/include/asm/cmpxchg.h
@@ -5,8 +5,8 @@
#ifdef __KERNEL__
#include <linux/compiler.h>
#include <asm/synch.h>
-#include <asm/asm-compat.h>
#include <linux/bug.h>
+#include <asm/asm-405.h>
#ifdef __BIG_ENDIAN
#define BITOFF_CAL(size, off) ((sizeof(u32) - size - off) * BITS_PER_BYTE)
diff --git a/arch/powerpc/include/asm/code-patching-asm.h b/arch/powerpc/include/asm/code-patching-asm.h
new file mode 100644
index 000000000000..ed7b1448493a
--- /dev/null
+++ b/arch/powerpc/include/asm/code-patching-asm.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2018, Michael Ellerman, IBM Corporation.
+ */
+#ifndef _ASM_POWERPC_CODE_PATCHING_ASM_H
+#define _ASM_POWERPC_CODE_PATCHING_ASM_H
+
+/* Define a "site" that can be patched */
+.macro patch_site label name
+ .pushsection ".rodata"
+ .balign 4
+ .global \name
+\name:
+ .4byte \label - .
+ .popsection
+.endm
+
+#endif /* _ASM_POWERPC_CODE_PATCHING_ASM_H */
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index 812535f40124..31733a95bbd0 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -14,6 +14,7 @@
#include <asm/ppc-opcode.h>
#include <linux/string.h>
#include <linux/kallsyms.h>
+#include <asm/asm-compat.h>
/* Flags for create_branch:
* "b" == create_branch(addr, target, 0);
@@ -32,6 +33,8 @@ unsigned int create_cond_branch(const unsigned int *addr,
int patch_branch(unsigned int *addr, unsigned long target, int flags);
int patch_instruction(unsigned int *addr, unsigned int instr);
int raw_patch_instruction(unsigned int *addr, unsigned int instr);
+int patch_instruction_site(s32 *addr, unsigned int instr);
+int patch_branch_site(s32 *site, unsigned long target, int flags);
int instr_is_relative_branch(unsigned int instr);
int instr_is_relative_link_branch(unsigned int instr);
diff --git a/arch/powerpc/include/asm/cpuidle.h b/arch/powerpc/include/asm/cpuidle.h
index e210a83eb196..43e5f31fe64d 100644
--- a/arch/powerpc/include/asm/cpuidle.h
+++ b/arch/powerpc/include/asm/cpuidle.h
@@ -79,6 +79,19 @@ struct stop_sprs {
u64 mmcra;
};
+#define PNV_IDLE_NAME_LEN 16
+struct pnv_idle_states_t {
+ char name[PNV_IDLE_NAME_LEN];
+ u32 latency_ns;
+ u32 residency_ns;
+ u64 psscr_val;
+ u64 psscr_mask;
+ u32 flags;
+ bool valid;
+};
+
+extern struct pnv_idle_states_t *pnv_idle_states;
+extern int nr_pnv_idle_states;
extern u32 pnv_fastsleep_workaround_at_entry[];
extern u32 pnv_fastsleep_workaround_at_exit[];
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 9c0a3083571b..29f49a35d6ee 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -4,9 +4,8 @@
#include <linux/types.h>
-#include <asm/asm-compat.h>
-#include <asm/feature-fixups.h>
#include <uapi/asm/cputable.h>
+#include <asm/asm-const.h>
#ifndef __ASSEMBLY__
@@ -210,7 +209,6 @@ static inline void cpu_feature_keys_init(void) { }
#define CPU_FTR_DAWR LONG_ASM_CONST(0x0000008000000000)
#define CPU_FTR_DABRX LONG_ASM_CONST(0x0000010000000000)
#define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x0000020000000000)
-#define CPU_FTR_POWER9_DD1 LONG_ASM_CONST(0x0000040000000000)
#define CPU_FTR_POWER9_DD2_1 LONG_ASM_CONST(0x0000080000000000)
#define CPU_FTR_P9_TM_HV_ASSIST LONG_ASM_CONST(0x0000100000000000)
#define CPU_FTR_P9_TM_XER_SO_BUG LONG_ASM_CONST(0x0000200000000000)
@@ -452,7 +450,6 @@ static inline void cpu_feature_keys_init(void) { }
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_PKEY)
#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
-#define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
#define CPU_FTRS_POWER9 (CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -464,8 +461,6 @@ static inline void cpu_feature_keys_init(void) { }
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \
CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_PKEY | \
CPU_FTR_P9_TLBIE_BUG | CPU_FTR_P9_TIDR)
-#define CPU_FTRS_POWER9_DD1 ((CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD1) & \
- (~CPU_FTR_SAO))
#define CPU_FTRS_POWER9_DD2_0 CPU_FTRS_POWER9
#define CPU_FTRS_POWER9_DD2_1 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1)
#define CPU_FTRS_POWER9_DD2_2 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1 | \
@@ -488,17 +483,15 @@ static inline void cpu_feature_keys_init(void) { }
#ifdef CONFIG_CPU_LITTLE_ENDIAN
#define CPU_FTRS_POSSIBLE \
(CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | CPU_FTRS_POWER8 | \
- CPU_FTRS_POWER8_DD1 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_VSX_COMP | \
- CPU_FTRS_POWER9 | CPU_FTRS_POWER9_DD1 | CPU_FTRS_POWER9_DD2_1 | \
- CPU_FTRS_POWER9_DD2_2)
+ CPU_FTR_ALTIVEC_COMP | CPU_FTR_VSX_COMP | CPU_FTRS_POWER9 | \
+ CPU_FTRS_POWER9_DD2_1 | CPU_FTRS_POWER9_DD2_2)
#else
#define CPU_FTRS_POSSIBLE \
(CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | \
CPU_FTRS_POWER6 | CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | \
- CPU_FTRS_POWER8 | CPU_FTRS_POWER8_DD1 | CPU_FTRS_CELL | \
- CPU_FTRS_PA6T | CPU_FTR_VSX_COMP | CPU_FTR_ALTIVEC_COMP | \
- CPU_FTRS_POWER9 | CPU_FTRS_POWER9_DD1 | CPU_FTRS_POWER9_DD2_1 | \
- CPU_FTRS_POWER9_DD2_2)
+ CPU_FTRS_POWER8 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \
+ CPU_FTR_VSX_COMP | CPU_FTR_ALTIVEC_COMP | CPU_FTRS_POWER9 | \
+ CPU_FTRS_POWER9_DD2_1 | CPU_FTRS_POWER9_DD2_2)
#endif /* CONFIG_CPU_LITTLE_ENDIAN */
#endif
#else
@@ -566,17 +559,15 @@ enum {
#ifdef CONFIG_CPU_LITTLE_ENDIAN
#define CPU_FTRS_ALWAYS \
(CPU_FTRS_POSSIBLE & ~CPU_FTR_HVMODE & CPU_FTRS_POWER7 & \
- CPU_FTRS_POWER8E & CPU_FTRS_POWER8 & CPU_FTRS_POWER8_DD1 & \
- CPU_FTRS_POWER9 & CPU_FTRS_POWER9_DD1 & CPU_FTRS_POWER9_DD2_1 & \
- CPU_FTRS_DT_CPU_BASE)
+ CPU_FTRS_POWER8E & CPU_FTRS_POWER8 & CPU_FTRS_POWER9 & \
+ CPU_FTRS_POWER9_DD2_1 & CPU_FTRS_DT_CPU_BASE)
#else
#define CPU_FTRS_ALWAYS \
(CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & \
CPU_FTRS_POWER6 & CPU_FTRS_POWER7 & CPU_FTRS_CELL & \
CPU_FTRS_PA6T & CPU_FTRS_POWER8 & CPU_FTRS_POWER8E & \
- CPU_FTRS_POWER8_DD1 & ~CPU_FTR_HVMODE & CPU_FTRS_POSSIBLE & \
- CPU_FTRS_POWER9 & CPU_FTRS_POWER9_DD1 & CPU_FTRS_POWER9_DD2_1 & \
- CPU_FTRS_DT_CPU_BASE)
+ ~CPU_FTR_HVMODE & CPU_FTRS_POSSIBLE & CPU_FTRS_POWER9 & \
+ CPU_FTRS_POWER9_DD2_1 & CPU_FTRS_DT_CPU_BASE)
#endif /* CONFIG_CPU_LITTLE_ENDIAN */
#endif
#else
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
index bc4903badb3f..133672744b2e 100644
--- a/arch/powerpc/include/asm/cputime.h
+++ b/arch/powerpc/include/asm/cputime.h
@@ -23,7 +23,6 @@
#include <asm/div64.h>
#include <asm/time.h>
#include <asm/param.h>
-#include <asm/cpu_has_feature.h>
typedef u64 __nocast cputime_t;
typedef u64 __nocast cputime64_t;
diff --git a/arch/powerpc/include/asm/dbell.h b/arch/powerpc/include/asm/dbell.h
index 9f2ae0d25e15..99b84db23e8c 100644
--- a/arch/powerpc/include/asm/dbell.h
+++ b/arch/powerpc/include/asm/dbell.h
@@ -16,7 +16,7 @@
#include <linux/threads.h>
#include <asm/ppc-opcode.h>
-#include <asm/cpu_has_feature.h>
+#include <asm/feature-fixups.h>
#define PPC_DBELL_MSG_BRDCAST (0x04000000)
#define PPC_DBELL_TYPE(x) (((x) & 0xf) << (63-36))
diff --git a/arch/powerpc/include/asm/dcr-native.h b/arch/powerpc/include/asm/dcr-native.h
index 4a2beef74277..151dff555f50 100644
--- a/arch/powerpc/include/asm/dcr-native.h
+++ b/arch/powerpc/include/asm/dcr-native.h
@@ -25,6 +25,7 @@
#include <linux/spinlock.h>
#include <asm/cputable.h>
#include <asm/cpu_has_feature.h>
+#include <linux/stringify.h>
typedef struct {
unsigned int base;
diff --git a/arch/powerpc/include/asm/debug.h b/arch/powerpc/include/asm/debug.h
index ce5da214ffe5..7756026b95ca 100644
--- a/arch/powerpc/include/asm/debug.h
+++ b/arch/powerpc/include/asm/debug.h
@@ -45,7 +45,6 @@ static inline int debugger_break_match(struct pt_regs *regs) { return 0; }
static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }
#endif
-void set_breakpoint(struct arch_hw_breakpoint *brk);
void __set_breakpoint(struct arch_hw_breakpoint *brk);
bool ppc_breakpoint_available(void);
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
diff --git a/arch/powerpc/include/asm/dt_cpu_ftrs.h b/arch/powerpc/include/asm/dt_cpu_ftrs.h
index 71515d909ed1..0c729e2d0e8a 100644
--- a/arch/powerpc/include/asm/dt_cpu_ftrs.h
+++ b/arch/powerpc/include/asm/dt_cpu_ftrs.h
@@ -10,8 +10,6 @@
*/
#include <linux/types.h>
-#include <asm/asm-compat.h>
-#include <asm/feature-fixups.h>
#include <uapi/asm/cputable.h>
#ifdef CONFIG_PPC_DT_CPU_FTRS
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index 677102baf3cd..219637ea69a1 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -36,13 +36,14 @@ struct pci_dn;
#ifdef CONFIG_EEH
/* EEH subsystem flags */
-#define EEH_ENABLED 0x01 /* EEH enabled */
-#define EEH_FORCE_DISABLED 0x02 /* EEH disabled */
-#define EEH_PROBE_MODE_DEV 0x04 /* From PCI device */
-#define EEH_PROBE_MODE_DEVTREE 0x08 /* From device tree */
-#define EEH_VALID_PE_ZERO 0x10 /* PE#0 is valid */
-#define EEH_ENABLE_IO_FOR_LOG 0x20 /* Enable IO for log */
-#define EEH_EARLY_DUMP_LOG 0x40 /* Dump log immediately */
+#define EEH_ENABLED 0x01 /* EEH enabled */
+#define EEH_FORCE_DISABLED 0x02 /* EEH disabled */
+#define EEH_PROBE_MODE_DEV 0x04 /* From PCI device */
+#define EEH_PROBE_MODE_DEVTREE 0x08 /* From device tree */
+#define EEH_VALID_PE_ZERO 0x10 /* PE#0 is valid */
+#define EEH_ENABLE_IO_FOR_LOG 0x20 /* Enable IO for log */
+#define EEH_EARLY_DUMP_LOG 0x40 /* Dump log immediately */
+#define EEH_POSTPONED_PROBE 0x80 /* Powernv may postpone device probe */
/*
* Delay for PE reset, all in ms
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index c40b4380951c..a86feddddad0 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -35,6 +35,7 @@
* implementations as possible.
*/
#include <asm/head-64.h>
+#include <asm/feature-fixups.h>
/* PACA save area offsets (exgen, exmc, etc) */
#define EX_R9 0
@@ -156,7 +157,7 @@
b hrfi_flush_fallback
#ifdef CONFIG_RELOCATABLE
-#define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
+#define __EXCEPTION_PROLOG_2_RELON(label, h) \
mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
LOAD_HANDLER(r12,label); \
mtctr r12; \
@@ -166,25 +167,26 @@
bctr;
#else
/* If not relocatable, we can jump directly -- and save messing with LR */
-#define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
+#define __EXCEPTION_PROLOG_2_RELON(label, h) \
mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
li r10,MSR_RI; \
mtmsrd r10,1; /* Set RI (EE=0) */ \
b label;
#endif
-#define EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
- __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
+#define EXCEPTION_PROLOG_2_RELON(label, h) \
+ __EXCEPTION_PROLOG_2_RELON(label, h)
/*
- * As EXCEPTION_PROLOG_PSERIES(), except we've already got relocation on
- * so no need to rfid. Save lr in case we're CONFIG_RELOCATABLE, in which
- * case EXCEPTION_RELON_PROLOG_PSERIES_1 will be using lr.
+ * As EXCEPTION_PROLOG(), except we've already got relocation on so no need to
+ * rfid. Save LR in case we're CONFIG_RELOCATABLE, in which case
+ * EXCEPTION_PROLOG_2_RELON will be using LR.
*/
-#define EXCEPTION_RELON_PROLOG_PSERIES(area, label, h, extra, vec) \
+#define EXCEPTION_RELON_PROLOG(area, label, h, extra, vec) \
+ SET_SCRATCH0(r13); /* save r13 */ \
EXCEPTION_PROLOG_0(area); \
EXCEPTION_PROLOG_1(area, extra, vec); \
- EXCEPTION_RELON_PROLOG_PSERIES_1(label, h)
+ EXCEPTION_PROLOG_2_RELON(label, h)
/*
* We're short on space and time in the exception prolog, so we can't
@@ -315,7 +317,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
#define EXCEPTION_PROLOG_1(area, extra, vec) \
_EXCEPTION_PROLOG_1(area, extra, vec)
-#define __EXCEPTION_PROLOG_PSERIES_1(label, h) \
+#define __EXCEPTION_PROLOG_2(label, h) \
ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \
mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
LOAD_HANDLER(r12,label) \
@@ -324,11 +326,11 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
mtspr SPRN_##h##SRR1,r10; \
h##RFI_TO_KERNEL; \
b . /* prevent speculative execution */
-#define EXCEPTION_PROLOG_PSERIES_1(label, h) \
- __EXCEPTION_PROLOG_PSERIES_1(label, h)
+#define EXCEPTION_PROLOG_2(label, h) \
+ __EXCEPTION_PROLOG_2(label, h)
/* _NORI variant keeps MSR_RI clear */
-#define __EXCEPTION_PROLOG_PSERIES_1_NORI(label, h) \
+#define __EXCEPTION_PROLOG_2_NORI(label, h) \
ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \
xori r10,r10,MSR_RI; /* Clear MSR_RI */ \
mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
@@ -339,13 +341,14 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
h##RFI_TO_KERNEL; \
b . /* prevent speculative execution */
-#define EXCEPTION_PROLOG_PSERIES_1_NORI(label, h) \
- __EXCEPTION_PROLOG_PSERIES_1_NORI(label, h)
+#define EXCEPTION_PROLOG_2_NORI(label, h) \
+ __EXCEPTION_PROLOG_2_NORI(label, h)
-#define EXCEPTION_PROLOG_PSERIES(area, label, h, extra, vec) \
+#define EXCEPTION_PROLOG(area, label, h, extra, vec) \
+ SET_SCRATCH0(r13); /* save r13 */ \
EXCEPTION_PROLOG_0(area); \
EXCEPTION_PROLOG_1(area, extra, vec); \
- EXCEPTION_PROLOG_PSERIES_1(label, h);
+ EXCEPTION_PROLOG_2(label, h);
#define __KVMTEST(h, n) \
lbz r10,HSTATE_IN_GUEST(r13); \
@@ -416,10 +419,10 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
#endif
/* Do not enable RI */
-#define EXCEPTION_PROLOG_PSERIES_NORI(area, label, h, extra, vec) \
+#define EXCEPTION_PROLOG_NORI(area, label, h, extra, vec) \
EXCEPTION_PROLOG_0(area); \
EXCEPTION_PROLOG_1(area, extra, vec); \
- EXCEPTION_PROLOG_PSERIES_1_NORI(label, h);
+ EXCEPTION_PROLOG_2_NORI(label, h);
#define __KVM_HANDLER(area, h, n) \
@@ -550,10 +553,8 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
/*
* Exception vectors.
*/
-#define STD_EXCEPTION_PSERIES(vec, label) \
- SET_SCRATCH0(r13); /* save r13 */ \
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label, \
- EXC_STD, KVMTEST_PR, vec); \
+#define STD_EXCEPTION(vec, label) \
+ EXCEPTION_PROLOG(PACA_EXGEN, label, EXC_STD, KVMTEST_PR, vec);
/* Version of above for when we have to branch out-of-line */
#define __OOL_EXCEPTION(vec, label, hdlr) \
@@ -561,36 +562,31 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
EXCEPTION_PROLOG_0(PACA_EXGEN) \
b hdlr;
-#define STD_EXCEPTION_PSERIES_OOL(vec, label) \
+#define STD_EXCEPTION_OOL(vec, label) \
EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, vec); \
- EXCEPTION_PROLOG_PSERIES_1(label, EXC_STD)
+ EXCEPTION_PROLOG_2(label, EXC_STD)
#define STD_EXCEPTION_HV(loc, vec, label) \
- SET_SCRATCH0(r13); /* save r13 */ \
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label, \
- EXC_HV, KVMTEST_HV, vec);
+ EXCEPTION_PROLOG(PACA_EXGEN, label, EXC_HV, KVMTEST_HV, vec);
#define STD_EXCEPTION_HV_OOL(vec, label) \
EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_HV, vec); \
- EXCEPTION_PROLOG_PSERIES_1(label, EXC_HV)
+ EXCEPTION_PROLOG_2(label, EXC_HV)
-#define STD_RELON_EXCEPTION_PSERIES(loc, vec, label) \
+#define STD_RELON_EXCEPTION(loc, vec, label) \
/* No guest interrupts come through here */ \
- SET_SCRATCH0(r13); /* save r13 */ \
- EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label, EXC_STD, NOTEST, vec);
+ EXCEPTION_RELON_PROLOG(PACA_EXGEN, label, EXC_STD, NOTEST, vec);
-#define STD_RELON_EXCEPTION_PSERIES_OOL(vec, label) \
+#define STD_RELON_EXCEPTION_OOL(vec, label) \
EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, vec); \
- EXCEPTION_RELON_PROLOG_PSERIES_1(label, EXC_STD)
+ EXCEPTION_PROLOG_2_RELON(label, EXC_STD)
#define STD_RELON_EXCEPTION_HV(loc, vec, label) \
- SET_SCRATCH0(r13); /* save r13 */ \
- EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label, \
- EXC_HV, KVMTEST_HV, vec);
+ EXCEPTION_RELON_PROLOG(PACA_EXGEN, label, EXC_HV, KVMTEST_HV, vec);
#define STD_RELON_EXCEPTION_HV_OOL(vec, label) \
EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_HV, vec); \
- EXCEPTION_RELON_PROLOG_PSERIES_1(label, EXC_HV)
+ EXCEPTION_PROLOG_2_RELON(label, EXC_HV)
/* This associate vector numbers with bits in paca->irq_happened */
#define SOFTEN_VALUE_0x500 PACA_IRQ_EE
@@ -627,55 +623,45 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
#define SOFTEN_NOTEST_PR(vec, bitmask) _SOFTEN_TEST(EXC_STD, vec, bitmask)
#define SOFTEN_NOTEST_HV(vec, bitmask) _SOFTEN_TEST(EXC_HV, vec, bitmask)
-#define __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra, bitmask) \
+#define __MASKABLE_EXCEPTION(vec, label, h, extra, bitmask) \
SET_SCRATCH0(r13); /* save r13 */ \
EXCEPTION_PROLOG_0(PACA_EXGEN); \
MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec, bitmask); \
- EXCEPTION_PROLOG_PSERIES_1(label, h);
+ EXCEPTION_PROLOG_2(label, h);
-#define _MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra, bitmask) \
- __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra, bitmask)
+#define MASKABLE_EXCEPTION(vec, label, bitmask) \
+ __MASKABLE_EXCEPTION(vec, label, EXC_STD, SOFTEN_TEST_PR, bitmask)
-#define MASKABLE_EXCEPTION_PSERIES(loc, vec, label, bitmask) \
- _MASKABLE_EXCEPTION_PSERIES(vec, label, \
- EXC_STD, SOFTEN_TEST_PR, bitmask)
-
-#define MASKABLE_EXCEPTION_PSERIES_OOL(vec, label, bitmask) \
+#define MASKABLE_EXCEPTION_OOL(vec, label, bitmask) \
MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_PR, vec, bitmask);\
- EXCEPTION_PROLOG_PSERIES_1(label, EXC_STD)
+ EXCEPTION_PROLOG_2(label, EXC_STD)
-#define MASKABLE_EXCEPTION_HV(loc, vec, label, bitmask) \
- _MASKABLE_EXCEPTION_PSERIES(vec, label, \
- EXC_HV, SOFTEN_TEST_HV, bitmask)
+#define MASKABLE_EXCEPTION_HV(vec, label, bitmask) \
+ __MASKABLE_EXCEPTION(vec, label, EXC_HV, SOFTEN_TEST_HV, bitmask)
#define MASKABLE_EXCEPTION_HV_OOL(vec, label, bitmask) \
MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_HV, vec, bitmask);\
- EXCEPTION_PROLOG_PSERIES_1(label, EXC_HV)
+ EXCEPTION_PROLOG_2(label, EXC_HV)
-#define __MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra, bitmask) \
+#define __MASKABLE_RELON_EXCEPTION(vec, label, h, extra, bitmask) \
SET_SCRATCH0(r13); /* save r13 */ \
EXCEPTION_PROLOG_0(PACA_EXGEN); \
MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec, bitmask); \
- EXCEPTION_RELON_PROLOG_PSERIES_1(label, h)
-
-#define _MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra, bitmask)\
- __MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra, bitmask)
+ EXCEPTION_PROLOG_2_RELON(label, h)
-#define MASKABLE_RELON_EXCEPTION_PSERIES(loc, vec, label, bitmask) \
- _MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, \
- EXC_STD, SOFTEN_NOTEST_PR, bitmask)
+#define MASKABLE_RELON_EXCEPTION(vec, label, bitmask) \
+ __MASKABLE_RELON_EXCEPTION(vec, label, EXC_STD, SOFTEN_NOTEST_PR, bitmask)
-#define MASKABLE_RELON_EXCEPTION_PSERIES_OOL(vec, label, bitmask) \
+#define MASKABLE_RELON_EXCEPTION_OOL(vec, label, bitmask) \
MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_NOTEST_PR, vec, bitmask);\
- EXCEPTION_PROLOG_PSERIES_1(label, EXC_STD);
+ EXCEPTION_PROLOG_2(label, EXC_STD);
-#define MASKABLE_RELON_EXCEPTION_HV(loc, vec, label, bitmask) \
- _MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, \
- EXC_HV, SOFTEN_TEST_HV, bitmask)
+#define MASKABLE_RELON_EXCEPTION_HV(vec, label, bitmask) \
+ __MASKABLE_RELON_EXCEPTION(vec, label, EXC_HV, SOFTEN_TEST_HV, bitmask)
#define MASKABLE_RELON_EXCEPTION_HV_OOL(vec, label, bitmask) \
MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_HV, vec, bitmask);\
- EXCEPTION_RELON_PROLOG_PSERIES_1(label, EXC_HV)
+ EXCEPTION_PROLOG_2_RELON(label, EXC_HV)
/*
* Our exception common code can be passed various "additions"
diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h
index 5a23010af600..1e7a33592e29 100644
--- a/arch/powerpc/include/asm/fadump.h
+++ b/arch/powerpc/include/asm/fadump.h
@@ -195,9 +195,6 @@ struct fadump_crash_info_header {
struct cpumask online_mask;
};
-/* Crash memory ranges */
-#define INIT_CRASHMEM_RANGES (INIT_MEMBLOCK_REGIONS + 2)
-
struct fad_crash_memory_ranges {
unsigned long long base;
unsigned long long size;
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
index fcfd05672b1b..33b6f9c892c8 100644
--- a/arch/powerpc/include/asm/feature-fixups.h
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -1,6 +1,8 @@
#ifndef __ASM_POWERPC_FEATURE_FIXUPS_H
#define __ASM_POWERPC_FEATURE_FIXUPS_H
+#include <asm/asm-const.h>
+
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
index 535add3f7791..7a051bd21f87 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -14,8 +14,7 @@
#ifdef __KERNEL__
-#include <asm/asm-compat.h>
-#include <asm/feature-fixups.h>
+#include <asm/asm-const.h>
/* firmware feature bitmask values */
diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
index 6c40dfda5912..41cc15c14eee 100644
--- a/arch/powerpc/include/asm/fixmap.h
+++ b/arch/powerpc/include/asm/fixmap.h
@@ -15,7 +15,6 @@
#define _ASM_FIXMAP_H
#ifndef __ASSEMBLY__
-#include <linux/kernel.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#ifdef CONFIG_HIGHMEM
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
index 1a944c18c539..94542776a62d 100644
--- a/arch/powerpc/include/asm/futex.h
+++ b/arch/powerpc/include/asm/futex.h
@@ -8,7 +8,7 @@
#include <linux/uaccess.h>
#include <asm/errno.h>
#include <asm/synch.h>
-#include <asm/asm-compat.h>
+#include <asm/asm-405.h>
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
__asm__ __volatile ( \
diff --git a/arch/powerpc/include/asm/head-64.h b/arch/powerpc/include/asm/head-64.h
index 7e0e93f24cb7..a4f947888744 100644
--- a/arch/powerpc/include/asm/head-64.h
+++ b/arch/powerpc/include/asm/head-64.h
@@ -260,22 +260,22 @@ name:
#define EXC_REAL(name, start, size) \
EXC_REAL_BEGIN(name, start, size); \
- STD_EXCEPTION_PSERIES(start, name##_common); \
+ STD_EXCEPTION(start, name##_common); \
EXC_REAL_END(name, start, size);
#define EXC_VIRT(name, start, size, realvec) \
EXC_VIRT_BEGIN(name, start, size); \
- STD_RELON_EXCEPTION_PSERIES(start, realvec, name##_common); \
+ STD_RELON_EXCEPTION(start, realvec, name##_common); \
EXC_VIRT_END(name, start, size);
#define EXC_REAL_MASKABLE(name, start, size, bitmask) \
EXC_REAL_BEGIN(name, start, size); \
- MASKABLE_EXCEPTION_PSERIES(start, start, name##_common, bitmask);\
+ MASKABLE_EXCEPTION(start, name##_common, bitmask); \
EXC_REAL_END(name, start, size);
#define EXC_VIRT_MASKABLE(name, start, size, realvec, bitmask) \
EXC_VIRT_BEGIN(name, start, size); \
- MASKABLE_RELON_EXCEPTION_PSERIES(start, realvec, name##_common, bitmask);\
+ MASKABLE_RELON_EXCEPTION(realvec, name##_common, bitmask); \
EXC_VIRT_END(name, start, size);
#define EXC_REAL_HV(name, start, size) \
@@ -295,7 +295,7 @@ name:
#define __TRAMP_REAL_OOL(name, vec) \
TRAMP_REAL_BEGIN(tramp_real_##name); \
- STD_EXCEPTION_PSERIES_OOL(vec, name##_common); \
+ STD_EXCEPTION_OOL(vec, name##_common);
#define EXC_REAL_OOL(name, start, size) \
__EXC_REAL_OOL(name, start, size); \
@@ -306,7 +306,7 @@ name:
#define __TRAMP_REAL_OOL_MASKABLE(name, vec, bitmask) \
TRAMP_REAL_BEGIN(tramp_real_##name); \
- MASKABLE_EXCEPTION_PSERIES_OOL(vec, name##_common, bitmask); \
+ MASKABLE_EXCEPTION_OOL(vec, name##_common, bitmask);
#define EXC_REAL_OOL_MASKABLE(name, start, size, bitmask) \
__EXC_REAL_OOL_MASKABLE(name, start, size); \
@@ -346,7 +346,7 @@ name:
#define __TRAMP_VIRT_OOL(name, realvec) \
TRAMP_VIRT_BEGIN(tramp_virt_##name); \
- STD_RELON_EXCEPTION_PSERIES_OOL(realvec, name##_common); \
+ STD_RELON_EXCEPTION_OOL(realvec, name##_common);
#define EXC_VIRT_OOL(name, start, size, realvec) \
__EXC_VIRT_OOL(name, start, size); \
@@ -357,7 +357,7 @@ name:
#define __TRAMP_VIRT_OOL_MASKABLE(name, realvec, bitmask) \
TRAMP_VIRT_BEGIN(tramp_virt_##name); \
- MASKABLE_RELON_EXCEPTION_PSERIES_OOL(realvec, name##_common, bitmask);\
+ MASKABLE_RELON_EXCEPTION_OOL(realvec, name##_common, bitmask);
#define EXC_VIRT_OOL_MASKABLE(name, start, size, realvec, bitmask) \
__EXC_VIRT_OOL_MASKABLE(name, start, size); \
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h
index cec820f961da..a4b65b186ec6 100644
--- a/arch/powerpc/include/asm/highmem.h
+++ b/arch/powerpc/include/asm/highmem.h
@@ -25,7 +25,7 @@
#include <linux/interrupt.h>
#include <asm/kmap_types.h>
-#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
#include <asm/page.h>
#include <asm/fixmap.h>
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index 3225eb6402cc..2d00cc530083 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -84,9 +84,6 @@ static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
return dir + idx;
}
-pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
- unsigned long addr, unsigned *shift);
-
void flush_dcache_icache_hugepage(struct page *page);
int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 662c8347d699..a0b17f9f1ea4 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -342,10 +342,12 @@
#define H_CPU_CHAR_BRANCH_HINTS_HONORED (1ull << 58) // IBM bit 5
#define H_CPU_CHAR_THREAD_RECONFIG_CTRL (1ull << 57) // IBM bit 6
#define H_CPU_CHAR_COUNT_CACHE_DISABLED (1ull << 56) // IBM bit 7
+#define H_CPU_CHAR_BCCTR_FLUSH_ASSIST (1ull << 54) // IBM bit 9
#define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0
#define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1
#define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2
+#define H_CPU_BEHAV_FLUSH_COUNT_CACHE (1ull << 58) // IBM bit 5
/* Flag values used in H_REGISTER_PROC_TBL hcall */
#define PROC_TABLE_OP_MASK 0x18
diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h
index 27d6e3c8fde9..ece4dc89c90b 100644
--- a/arch/powerpc/include/asm/hw_breakpoint.h
+++ b/arch/powerpc/include/asm/hw_breakpoint.h
@@ -56,6 +56,7 @@ struct perf_event_attr;
struct perf_event;
struct pmu;
struct perf_sample_data;
+struct task_struct;
#define HW_BREAKPOINT_ALIGN 0x7
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index e151774cb577..32a18f2f49bc 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -253,14 +253,16 @@ static inline bool lazy_irq_pending(void)
/*
* This is called by asynchronous interrupts to conditionally
- * re-enable hard interrupts when soft-disabled after having
- * cleared the source of the interrupt
+ * re-enable hard interrupts after having cleared the source
+ * of the interrupt. They are kept disabled if there is a different
+ * soft-masked interrupt pending that requires hard masking.
*/
static inline void may_hard_irq_enable(void)
{
- get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
- if (!(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK))
+ if (!(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)) {
+ get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
__hard_irq_enable();
+ }
}
static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index 20febe0b7f32..ab3a4fba38e3 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -30,6 +30,7 @@
#include <asm/machdep.h>
#include <asm/types.h>
#include <asm/pci-bridge.h>
+#include <asm/asm-const.h>
#define IOMMU_PAGE_SHIFT_4K 12
#define IOMMU_PAGE_SIZE_4K (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K)
@@ -69,6 +70,8 @@ struct iommu_table_ops {
long index,
unsigned long *hpa,
enum dma_data_direction *direction);
+
+ __be64 *(*useraddrptr)(struct iommu_table *tbl, long index, bool alloc);
#endif
void (*clear)(struct iommu_table *tbl,
long index, long npages);
@@ -117,15 +120,16 @@ struct iommu_table {
unsigned long *it_map; /* A simple allocation bitmap for now */
unsigned long it_page_shift;/* table iommu page size */
struct list_head it_group_list;/* List of iommu_table_group_link */
- unsigned long *it_userspace; /* userspace view of the table */
+ __be64 *it_userspace; /* userspace view of the table */
struct iommu_table_ops *it_ops;
struct kref it_kref;
+ int it_nid;
};
+#define IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry) \
+ ((tbl)->it_ops->useraddrptr((tbl), (entry), false))
#define IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry) \
- ((tbl)->it_userspace ? \
- &((tbl)->it_userspace[(entry) - (tbl)->it_offset]) : \
- NULL)
+ ((tbl)->it_ops->useraddrptr((tbl), (entry), true))
/* Pure 2^n version of get_order */
static inline __attribute_const__
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
index 9a287e0ac8b1..a3b2cf940b4e 100644
--- a/arch/powerpc/include/asm/jump_label.h
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -14,7 +14,7 @@
#include <linux/types.h>
#include <asm/feature-fixups.h>
-#include <asm/asm-compat.h>
+#include <asm/asm-const.h>
#define JUMP_ENTRY_TYPE stringify_in_c(FTR_ENTRY_LONG)
#define JUMP_LABEL_NOP_SIZE 4
diff --git a/arch/powerpc/include/asm/kvm_booke_hv_asm.h b/arch/powerpc/include/asm/kvm_booke_hv_asm.h
index e5f048bbcb7c..931260b59ac6 100644
--- a/arch/powerpc/include/asm/kvm_booke_hv_asm.h
+++ b/arch/powerpc/include/asm/kvm_booke_hv_asm.h
@@ -9,6 +9,8 @@
#ifndef ASM_KVM_BOOKE_HV_ASM_H
#define ASM_KVM_BOOKE_HV_ASM_H
+#include <asm/feature-fixups.h>
+
#ifdef __ASSEMBLY__
/*
diff --git a/arch/powerpc/include/asm/mmu-44x.h b/arch/powerpc/include/asm/mmu-44x.h
index cb57f29f531d..295b3dbb2698 100644
--- a/arch/powerpc/include/asm/mmu-44x.h
+++ b/arch/powerpc/include/asm/mmu-44x.h
@@ -5,7 +5,7 @@
* PPC440 support
*/
-#include <asm/page.h>
+#include <asm/asm-const.h>
#define PPC44x_MMUCR_TID 0x000000ff
#define PPC44x_MMUCR_STS 0x00010000
@@ -124,19 +124,19 @@ typedef struct {
/* Size of the TLBs used for pinning in lowmem */
#define PPC_PIN_SIZE (1 << 28) /* 256M */
-#if (PAGE_SHIFT == 12)
+#if defined(CONFIG_PPC_4K_PAGES)
#define PPC44x_TLBE_SIZE PPC44x_TLB_4K
#define PPC47x_TLBE_SIZE PPC47x_TLB0_4K
#define mmu_virtual_psize MMU_PAGE_4K
-#elif (PAGE_SHIFT == 14)
+#elif defined(CONFIG_PPC_16K_PAGES)
#define PPC44x_TLBE_SIZE PPC44x_TLB_16K
#define PPC47x_TLBE_SIZE PPC47x_TLB0_16K
#define mmu_virtual_psize MMU_PAGE_16K
-#elif (PAGE_SHIFT == 16)
+#elif defined(CONFIG_PPC_64K_PAGES)
#define PPC44x_TLBE_SIZE PPC44x_TLB_64K
#define PPC47x_TLBE_SIZE PPC47x_TLB0_64K
#define mmu_virtual_psize MMU_PAGE_64K
-#elif (PAGE_SHIFT == 18)
+#elif defined(CONFIG_PPC_256K_PAGES)
#define PPC44x_TLBE_SIZE PPC44x_TLB_256K
#define mmu_virtual_psize MMU_PAGE_256K
#else
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 61d15ce92278..13ea441ac531 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -5,8 +5,7 @@
#include <linux/types.h>
-#include <asm/asm-compat.h>
-#include <asm/feature-fixups.h>
+#include <asm/asm-const.h>
/*
* MMU features bit definitions
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index 7c46a98cc7f4..a507a65b0866 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -8,7 +8,8 @@
#ifndef __ASSEMBLY__
#include <linux/sched.h>
#include <linux/threads.h>
-#include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */
+#include <asm/mmu.h> /* For sub-arch specific PPC_PIN_SIZE */
+#include <asm/asm-405.h>
extern unsigned long ioremap_bot;
@@ -222,10 +223,6 @@ static inline unsigned long long pte_update(pte_t *p,
}
#endif /* CONFIG_PTE_64BIT */
-/*
- * 2.6 calls this without flushing the TLB entry; this is wrong
- * for our hash-based implementation, we fix that up here.
- */
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
{
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index dd0c7236208f..7cd6809f4d33 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -3,11 +3,12 @@
#define _ASM_POWERPC_NOHASH_64_PGTABLE_H
/*
* This file contains the functions and defines necessary to modify and use
- * the ppc64 hashed page table.
+ * the ppc64 non-hashed page table.
*/
#include <asm/nohash/64/pgtable-4k.h>
#include <asm/barrier.h>
+#include <asm/asm-const.h>
#ifdef CONFIG_PPC_64K_PAGES
#error "Page size not supported"
@@ -37,7 +38,7 @@
/*
* The vmalloc space starts at the beginning of that region, and
- * occupies half of it on hash CPUs and a quarter of it on Book3E
+ * occupies a quarter of it on Book3E
* (we keep a quarter for the virtual memmap)
*/
#define VMALLOC_START KERN_VIRT_START
@@ -77,7 +78,7 @@
/*
* Defines the address of the vmemap area, in its own region on
- * hash table CPUs and after the vmalloc space on Book3E
+ * after the vmalloc space on Book3E
*/
#define VMEMMAP_BASE VMALLOC_END
#define VMEMMAP_END KERN_IO_START
@@ -247,14 +248,6 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
pte_update(mm, addr, ptep, _PAGE_RW, 0, 1);
}
-/*
- * We currently remove entries from the hashtable regardless of whether
- * the entry was young or dirty. The generic routines only flush if the
- * entry was young or dirty which is not good enough.
- *
- * We should be more intelligent about this but for the moment we override
- * these functions and force a tlb flush unconditionally
- */
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
#define ptep_clear_flush_young(__vma, __address, __ptep) \
({ \
@@ -278,9 +271,7 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
}
-/* Set the dirty and/or accessed bits atomically in a linux PTE, this
- * function doesn't need to flush the hash entry
- */
+/* Set the dirty and/or accessed bits atomically in a linux PTE */
static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
pte_t *ptep, pte_t entry,
unsigned long address,
diff --git a/arch/powerpc/include/asm/nohash/tlbflush.h b/arch/powerpc/include/asm/nohash/tlbflush.h
new file mode 100644
index 000000000000..b1d8fec29169
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/tlbflush.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_NOHASH_TLBFLUSH_H
+#define _ASM_POWERPC_NOHASH_TLBFLUSH_H
+
+/*
+ * TLB flushing:
+ *
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ * - flush_tlb_page(vma, vmaddr) flushes one page
+ * - local_flush_tlb_mm(mm, full) flushes the specified mm context on
+ * the local processor
+ * - local_flush_tlb_page(vma, vmaddr) flushes one page on the local processor
+ * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
+ * - flush_tlb_range(vma, start, end) flushes a range of pages
+ * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
+ *
+ */
+
+/*
+ * TLB flushing for software loaded TLB chips
+ *
+ * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range &
+ * flush_tlb_kernel_range are best implemented as tlbia vs
+ * specific tlbie's
+ */
+
+struct vm_area_struct;
+struct mm_struct;
+
+#define MMU_NO_CONTEXT ((unsigned int)-1)
+
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+
+extern void local_flush_tlb_mm(struct mm_struct *mm);
+extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+
+extern void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
+ int tsize, int ind);
+
+#ifdef CONFIG_SMP
+extern void flush_tlb_mm(struct mm_struct *mm);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+extern void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
+ int tsize, int ind);
+#else
+#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
+#define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr)
+#define __flush_tlb_page(mm,addr,p,i) __local_flush_tlb_page(mm,addr,p,i)
+#endif
+
+#endif /* _ASM_POWERPC_NOHASH_TLBFLUSH_H */
diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h
index 3bab299eda49..8365353330b4 100644
--- a/arch/powerpc/include/asm/opal-api.h
+++ b/arch/powerpc/include/asm/opal-api.h
@@ -206,9 +206,11 @@
#define OPAL_NPU_SPA_CLEAR_CACHE 160
#define OPAL_NPU_TL_SET 161
#define OPAL_SENSOR_READ_U64 162
+#define OPAL_SENSOR_GROUP_ENABLE 163
#define OPAL_PCI_GET_PBCQ_TUNNEL_BAR 164
#define OPAL_PCI_SET_PBCQ_TUNNEL_BAR 165
-#define OPAL_LAST 165
+#define OPAL_NX_COPROC_INIT 167
+#define OPAL_LAST 167
#define QUIESCE_HOLD 1 /* Spin all calls at entry */
#define QUIESCE_REJECT 2 /* Fail all calls with OPAL_BUSY */
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index e1b2910c6e81..834e7e29f1e4 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -292,6 +292,8 @@ int opal_set_powercap(u32 handle, int token, u32 pcap);
int opal_get_power_shift_ratio(u32 handle, int token, u32 *psr);
int opal_set_power_shift_ratio(u32 handle, int token, u32 psr);
int opal_sensor_group_clear(u32 group_hndl, int token);
+int opal_sensor_group_enable(u32 group_hndl, int token, bool enable);
+int opal_nx_coproc_init(uint32_t chip_id, uint32_t ct);
s64 opal_signal_system_reset(s32 cpu);
s64 opal_quiesce(u64 shutdown_type, s32 cpu);
@@ -305,6 +307,8 @@ extern void opal_configure_cores(void);
extern int opal_get_chars(uint32_t vtermno, char *buf, int count);
extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len);
+extern int opal_put_chars_atomic(uint32_t vtermno, const char *buf, int total_len);
+extern int opal_flush_console(uint32_t vtermno);
extern void hvc_opal_init_early(void);
@@ -326,6 +330,7 @@ extern int opal_async_wait_response_interruptible(uint64_t token,
struct opal_msg *msg);
extern int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data);
extern int opal_get_sensor_data_u64(u32 sensor_hndl, u64 *sensor_data);
+extern int sensor_group_enable(u32 grp_hndl, bool enable);
struct rtc_time;
extern time64_t opal_get_boot_time(void);
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 6d34bd71139d..ad4f16164619 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -187,11 +187,6 @@ struct paca_struct {
u8 subcore_sibling_mask;
/* Flag to request this thread not to stop */
atomic_t dont_stop;
- /*
- * Pointer to an array which contains pointer
- * to the sibling threads' paca.
- */
- struct paca_struct **thread_sibling_pacas;
/* The PSSCR value that the kernel requested before going to stop */
u64 requested_psscr;
@@ -252,6 +247,9 @@ struct paca_struct {
void *rfi_flush_fallback_area;
u64 l1d_flush_size;
#endif
+#ifdef CONFIG_PPC_PSERIES
+ u8 *mce_data_buf; /* buffer to hold per cpu rtas errlog */
+#endif /* CONFIG_PPC_PSERIES */
} ____cacheline_aligned;
extern void copy_mm_to_paca(struct mm_struct *mm);
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index db7be0779d55..f6a1265face2 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -16,8 +16,7 @@
#else
#include <asm/types.h>
#endif
-#include <asm/asm-compat.h>
-#include <asm/kdump.h>
+#include <asm/asm-const.h>
/*
* On regular PPC32 page size is 4K (but we support 4K/16K/64K/256K pages
diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
index af04acdb873f..c0ce17e909ef 100644
--- a/arch/powerpc/include/asm/page_64.h
+++ b/arch/powerpc/include/asm/page_64.h
@@ -10,6 +10,8 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <asm/asm-const.h>
+
/*
* We always define HW_PAGE_SHIFT to 12 as use of 64K pages remains Linux
* specific, every notion of page number shared with the firmware, TCEs,
diff --git a/arch/powerpc/include/asm/pkeys.h b/arch/powerpc/include/asm/pkeys.h
index 5ba80cffb505..20ebf153c871 100644
--- a/arch/powerpc/include/asm/pkeys.h
+++ b/arch/powerpc/include/asm/pkeys.h
@@ -13,7 +13,8 @@
DECLARE_STATIC_KEY_TRUE(pkey_disabled);
extern int pkeys_total; /* total pkeys as per device tree */
-extern u32 initial_allocation_mask; /* bits set for reserved keys */
+extern u32 initial_allocation_mask; /* bits set for the initially allocated keys */
+extern u32 reserved_allocation_mask; /* bits set for reserved keys */
#define ARCH_VM_PKEY_FLAGS (VM_PKEY_BIT0 | VM_PKEY_BIT1 | VM_PKEY_BIT2 | \
VM_PKEY_BIT3 | VM_PKEY_BIT4)
@@ -83,19 +84,21 @@ static inline u16 pte_to_pkey_bits(u64 pteflags)
#define __mm_pkey_is_allocated(mm, pkey) \
(mm_pkey_allocation_map(mm) & pkey_alloc_mask(pkey))
-#define __mm_pkey_is_reserved(pkey) (initial_allocation_mask & \
+#define __mm_pkey_is_reserved(pkey) (reserved_allocation_mask & \
pkey_alloc_mask(pkey))
static inline bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey)
{
- /* A reserved key is never considered as 'explicitly allocated' */
- return ((pkey < arch_max_pkey()) &&
- !__mm_pkey_is_reserved(pkey) &&
- __mm_pkey_is_allocated(mm, pkey));
+ if (pkey < 0 || pkey >= arch_max_pkey())
+ return false;
+
+ /* Reserved keys are never allocated. */
+ if (__mm_pkey_is_reserved(pkey))
+ return false;
+
+ return __mm_pkey_is_allocated(mm, pkey);
}
-extern void __arch_activate_pkey(int pkey);
-extern void __arch_deactivate_pkey(int pkey);
/*
* Returns a positive, 5-bit key on success, or -1 on failure.
* Relies on the mmap_sem to protect against concurrency in mm_pkey_alloc() and
@@ -124,11 +127,6 @@ static inline int mm_pkey_alloc(struct mm_struct *mm)
ret = ffz((u32)mm_pkey_allocation_map(mm));
__mm_pkey_allocated(mm, ret);
- /*
- * Enable the key in the hardware
- */
- if (ret > 0)
- __arch_activate_pkey(ret);
return ret;
}
@@ -140,10 +138,6 @@ static inline int mm_pkey_free(struct mm_struct *mm, int pkey)
if (!mm_pkey_is_allocated(mm, pkey))
return -EINVAL;
- /*
- * Disable the key in the hardware
- */
- __arch_deactivate_pkey(pkey);
__mm_pkey_free(mm, pkey);
return 0;
@@ -187,6 +181,16 @@ static inline int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
{
if (static_branch_likely(&pkey_disabled))
return -EINVAL;
+
+ /*
+ * userspace should not change pkey-0 permissions.
+ * pkey-0 is associated with every page in the kernel.
+ * If userspace denies any permission on pkey-0, the
+ * kernel cannot operate.
+ */
+ if (pkey == 0)
+ return init_val ? -EINVAL : 0;
+
return __arch_set_user_pkey_access(tsk, pkey, init_val);
}
diff --git a/arch/powerpc/include/asm/pnv-pci.h b/arch/powerpc/include/asm/pnv-pci.h
index d2d8c28db336..7f627e3f4da4 100644
--- a/arch/powerpc/include/asm/pnv-pci.h
+++ b/arch/powerpc/include/asm/pnv-pci.h
@@ -50,13 +50,6 @@ int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs,
struct pci_dev *dev, int num);
void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs,
struct pci_dev *dev);
-
-/* Support for the cxl kernel api on the real PHB (instead of vPHB) */
-int pnv_cxl_enable_phb_kernel_api(struct pci_controller *hose, bool enable);
-bool pnv_pci_on_cxl_phb(struct pci_dev *dev);
-struct cxl_afu *pnv_cxl_phb_to_afu(struct pci_controller *hose);
-void pnv_cxl_phb_set_peer_afu(struct pci_dev *dev, struct cxl_afu *afu);
-
#endif
struct pnv_php_slot {
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 4436887bc415..665af14850e4 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -12,8 +12,7 @@
#ifndef _ASM_POWERPC_PPC_OPCODE_H
#define _ASM_POWERPC_PPC_OPCODE_H
-#include <linux/stringify.h>
-#include <asm/asm-compat.h>
+#include <asm/asm-const.h>
#define __REG_R0 0
#define __REG_R1 1
@@ -367,6 +366,8 @@
#define PPC_INST_STFDX 0x7c0005ae
#define PPC_INST_LVX 0x7c0000ce
#define PPC_INST_STVX 0x7c0001ce
+#define PPC_INST_VCMPEQUD 0x100000c7
+#define PPC_INST_VCMPEQUB 0x10000006
/* macros to insert fields into opcodes */
#define ___PPC_RA(a) (((a) & 0x1f) << 16)
@@ -397,6 +398,7 @@
#define __PPC_BI(s) (((s) & 0x1f) << 16)
#define __PPC_CT(t) (((t) & 0x0f) << 21)
#define __PPC_SPR(r) ((((r) & 0x1f) << 16) | ((((r) >> 5) & 0x1f) << 11))
+#define __PPC_RC21 (0x1 << 10)
/*
* Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a
@@ -568,4 +570,12 @@
((IH & 0x7) << 21))
#define PPC_INVALIDATE_ERAT PPC_SLBIA(7)
+#define VCMPEQUD_RC(vrt, vra, vrb) stringify_in_c(.long PPC_INST_VCMPEQUD | \
+ ___PPC_RT(vrt) | ___PPC_RA(vra) | \
+ ___PPC_RB(vrb) | __PPC_RC21)
+
+#define VCMPEQUB_RC(vrt, vra, vrb) stringify_in_c(.long PPC_INST_VCMPEQUB | \
+ ___PPC_RT(vrt) | ___PPC_RA(vra) | \
+ ___PPC_RB(vrb) | __PPC_RC21)
+
#endif /* _ASM_POWERPC_PPC_OPCODE_H */
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 75ece56dcd62..b5d023680801 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -9,6 +9,7 @@
#include <asm/processor.h>
#include <asm/ppc-opcode.h>
#include <asm/firmware.h>
+#include <asm/feature-fixups.h>
#ifdef __ASSEMBLY__
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 5debe337ea9d..52fadded5c1e 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -39,10 +39,9 @@
#endif /* CONFIG_PPC64 */
#ifndef __ASSEMBLY__
-#include <linux/compiler.h>
-#include <linux/cache.h>
+#include <linux/types.h>
+#include <asm/thread_info.h>
#include <asm/ptrace.h>
-#include <asm/types.h>
#include <asm/hw_breakpoint.h>
/* We do _not_ want to define new machine types at all, those must die
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index e4923686e43a..447cbd1bee99 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -24,6 +24,7 @@
#define _ASM_POWERPC_PTRACE_H
#include <uapi/asm/ptrace.h>
+#include <asm/asm-const.h>
#ifdef __powerpc64__
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 562568414cf4..486b7c83b8c5 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -13,6 +13,8 @@
#include <linux/stringify.h>
#include <asm/cputable.h>
+#include <asm/asm-const.h>
+#include <asm/feature-fixups.h>
/* Pickup Book E specific registers. */
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
diff --git a/arch/powerpc/include/asm/reg_a2.h b/arch/powerpc/include/asm/reg_a2.h
index 3ba9c6f096fc..74c2c57c492a 100644
--- a/arch/powerpc/include/asm/reg_a2.h
+++ b/arch/powerpc/include/asm/reg_a2.h
@@ -12,6 +12,8 @@
#ifndef __ASM_POWERPC_REG_A2_H__
#define __ASM_POWERPC_REG_A2_H__
+#include <asm/asm-const.h>
+
#define SPRN_TENSR 0x1b5
#define SPRN_TENS 0x1b6 /* Thread ENable Set */
#define SPRN_TENC 0x1b7 /* Thread ENable Clear */
diff --git a/arch/powerpc/include/asm/reg_fsl_emb.h b/arch/powerpc/include/asm/reg_fsl_emb.h
index d7ccf93e6279..a21f529c43d9 100644
--- a/arch/powerpc/include/asm/reg_fsl_emb.h
+++ b/arch/powerpc/include/asm/reg_fsl_emb.h
@@ -7,6 +7,8 @@
#ifndef __ASM_POWERPC_REG_FSL_EMB_H__
#define __ASM_POWERPC_REG_FSL_EMB_H__
+#include <linux/stringify.h>
+
#ifndef __ASSEMBLY__
/* Performance Monitor Registers */
#define mfpmr(rn) ({unsigned int rval; \
diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h
index 44989b22383c..759597bf0fd8 100644
--- a/arch/powerpc/include/asm/security_features.h
+++ b/arch/powerpc/include/asm/security_features.h
@@ -22,6 +22,7 @@ enum stf_barrier_type {
void setup_stf_barrier(void);
void do_stf_barrier_fixups(enum stf_barrier_type types);
+void setup_count_cache_flush(void);
static inline void security_ftr_set(unsigned long feature)
{
@@ -59,6 +60,9 @@ static inline bool security_ftr_enabled(unsigned long feature)
// Indirect branch prediction cache disabled
#define SEC_FTR_COUNT_CACHE_DISABLED 0x0000000000000020ull
+// bcctr 2,0,0 triggers a hardware assisted count cache flush
+#define SEC_FTR_BCCTR_FLUSH_ASSIST 0x0000000000000800ull
+
// Features indicating need for Spectre/Meltdown mitigations
@@ -74,6 +78,9 @@ static inline bool security_ftr_enabled(unsigned long feature)
// Firmware configuration indicates user favours security over performance
#define SEC_FTR_FAVOUR_SECURITY 0x0000000000000200ull
+// Software required to flush count cache on context switch
+#define SEC_FTR_FLUSH_COUNT_CACHE 0x0000000000000400ull
+
// Features enabled by default
#define SEC_FTR_DEFAULT \
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index 8721fd004291..1a951b00465d 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -52,11 +52,15 @@ enum l1d_flush_type {
void setup_rfi_flush(enum l1d_flush_type, bool enable);
void do_rfi_flush_fixups(enum l1d_flush_type types);
+#ifdef CONFIG_PPC_BARRIER_NOSPEC
void setup_barrier_nospec(void);
+#else
+static inline void setup_barrier_nospec(void) { };
+#endif
void do_barrier_nospec_fixups(bool enable);
extern bool barrier_nospec_enabled;
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_BARRIER_NOSPEC
void do_barrier_nospec_fixups_range(bool enable, void *start, void *end);
#else
static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { };
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 29ffaabdf75b..95b66a0c639b 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -56,7 +56,6 @@ struct smp_ops_t {
int (*cpu_bootable)(unsigned int nr);
};
-extern void smp_flush_nmi_ipi(u64 delay_us);
extern int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
extern int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
extern void smp_send_debugger_break(void);
diff --git a/arch/powerpc/include/asm/sparsemem.h b/arch/powerpc/include/asm/sparsemem.h
index bc66712bdc3c..28f5dae25db6 100644
--- a/arch/powerpc/include/asm/sparsemem.h
+++ b/arch/powerpc/include/asm/sparsemem.h
@@ -6,13 +6,20 @@
#ifdef CONFIG_SPARSEMEM
/*
* SECTION_SIZE_BITS 2^N: how big each section will be
- * MAX_PHYSADDR_BITS 2^N: how much physical address space we have
* MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space
*/
#define SECTION_SIZE_BITS 24
-
-#define MAX_PHYSADDR_BITS 46
+/*
+ * If we store section details in page->flags we can't increase the MAX_PHYSMEM_BITS
+ * if we increase SECTIONS_WIDTH we will not store node details in page->flags and
+ * page_to_nid does a page->section->node lookup
+ * Hence only increase for VMEMMAP.
+ */
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+#define MAX_PHYSMEM_BITS 47
+#else
#define MAX_PHYSMEM_BITS 46
+#endif
#endif /* CONFIG_SPARSEMEM */
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 72dc4ddc2972..685c72310f5d 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -24,9 +24,9 @@
#include <asm/paca.h>
#include <asm/hvcall.h>
#endif
-#include <asm/asm-compat.h>
#include <asm/synch.h>
#include <asm/ppc-opcode.h>
+#include <asm/asm-405.h>
#ifdef CONFIG_PPC64
/* use 0x800000yy when locked, where yy == CPU number */
diff --git a/arch/powerpc/include/asm/stacktrace.h b/arch/powerpc/include/asm/stacktrace.h
new file mode 100644
index 000000000000..6149b53b3bc8
--- /dev/null
+++ b/arch/powerpc/include/asm/stacktrace.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Stack trace functions.
+ *
+ * Copyright 2018, Murilo Opsfelder Araujo, IBM Corporation.
+ */
+
+#ifndef _ASM_POWERPC_STACKTRACE_H
+#define _ASM_POWERPC_STACKTRACE_H
+
+void show_user_instructions(struct pt_regs *regs);
+
+#endif /* _ASM_POWERPC_STACKTRACE_H */
diff --git a/arch/powerpc/include/asm/string.h b/arch/powerpc/include/asm/string.h
index 9b8cedf618f4..1647de15a31e 100644
--- a/arch/powerpc/include/asm/string.h
+++ b/arch/powerpc/include/asm/string.h
@@ -50,6 +50,8 @@ static inline void *memset64(uint64_t *p, uint64_t v, __kernel_size_t n)
return __memset64(p, v, n * 8);
}
#else
+#define __HAVE_ARCH_STRLEN
+
extern void *memset16(uint16_t *, uint16_t, __kernel_size_t);
#endif
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h
index 6ec546090ba1..aca70fb43147 100644
--- a/arch/powerpc/include/asm/synch.h
+++ b/arch/powerpc/include/asm/synch.h
@@ -3,8 +3,8 @@
#define _ASM_POWERPC_SYNCH_H
#ifdef __KERNEL__
-#include <linux/stringify.h>
#include <asm/feature-fixups.h>
+#include <asm/asm-const.h>
#ifndef __ASSEMBLY__
extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index f308dfeb2746..3c0002044bc9 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -9,6 +9,8 @@
#ifndef _ASM_POWERPC_THREAD_INFO_H
#define _ASM_POWERPC_THREAD_INFO_H
+#include <asm/asm-const.h>
+
#ifdef __KERNEL__
#define THREAD_SHIFT CONFIG_THREAD_SHIFT
@@ -25,7 +27,6 @@
#include <linux/cache.h>
#include <asm/processor.h>
#include <asm/page.h>
-#include <linux/stringify.h>
#include <asm/accounting.h>
/*
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index 9138baccebb0..f0e571b2dc7c 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -17,7 +17,6 @@
#include <asm/pgtable.h>
#endif
#include <asm/pgalloc.h>
-#include <asm/tlbflush.h>
#ifndef __powerpc64__
#include <asm/page.h>
#include <asm/mmu.h>
@@ -53,7 +52,8 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
if (!tlb->page_size)
tlb->page_size = page_size;
else if (tlb->page_size != page_size) {
- tlb_flush_mmu(tlb);
+ if (!tlb->fullmm)
+ tlb_flush_mmu(tlb);
/*
* update the page size after flush for the new
* mmu_gather.
diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h
index 7d5a157c7832..61fba43bf8b2 100644
--- a/arch/powerpc/include/asm/tlbflush.h
+++ b/arch/powerpc/include/asm/tlbflush.h
@@ -1,87 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_POWERPC_TLBFLUSH_H
#define _ASM_POWERPC_TLBFLUSH_H
-/*
- * TLB flushing:
- *
- * - flush_tlb_mm(mm) flushes the specified mm context TLB's
- * - flush_tlb_page(vma, vmaddr) flushes one page
- * - local_flush_tlb_mm(mm, full) flushes the specified mm context on
- * the local processor
- * - local_flush_tlb_page(vma, vmaddr) flushes one page on the local processor
- * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
- * - flush_tlb_range(vma, start, end) flushes a range of pages
- * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#ifdef __KERNEL__
-
-#ifdef CONFIG_PPC_MMU_NOHASH
-/*
- * TLB flushing for software loaded TLB chips
- *
- * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range &
- * flush_tlb_kernel_range are best implemented as tlbia vs
- * specific tlbie's
- */
-
-struct vm_area_struct;
-struct mm_struct;
-
-#define MMU_NO_CONTEXT ((unsigned int)-1)
-
-extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
- unsigned long end);
-extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
-
-extern void local_flush_tlb_mm(struct mm_struct *mm);
-extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
-
-extern void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
- int tsize, int ind);
-
-#ifdef CONFIG_SMP
-extern void flush_tlb_mm(struct mm_struct *mm);
-extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
-extern void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
- int tsize, int ind);
-#else
-#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
-#define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr)
-#define __flush_tlb_page(mm,addr,p,i) __local_flush_tlb_page(mm,addr,p,i)
-#endif
-
-#elif defined(CONFIG_PPC_STD_MMU_32)
-
-#define MMU_NO_CONTEXT (0)
-/*
- * TLB flushing for "classic" hash-MMU 32-bit CPUs, 6xx, 7xx, 7xxx
- */
-extern void flush_tlb_mm(struct mm_struct *mm);
-extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
-extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
-extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
- unsigned long end);
-extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
-static inline void local_flush_tlb_page(struct vm_area_struct *vma,
- unsigned long vmaddr)
-{
- flush_tlb_page(vma, vmaddr);
-}
-static inline void local_flush_tlb_mm(struct mm_struct *mm)
-{
- flush_tlb_mm(mm);
-}
-
-#elif defined(CONFIG_PPC_BOOK3S_64)
-#include <asm/book3s/64/tlbflush.h>
+#ifdef CONFIG_PPC_BOOK3S
+#include <asm/book3s/tlbflush.h>
#else
-#error Unsupported MMU type
-#endif
+#include <asm/nohash/tlbflush.h>
+#endif /* !CONFIG_PPC_BOOK3S */
-#endif /*__KERNEL__ */
#endif /* _ASM_POWERPC_TLBFLUSH_H */
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 468653ce844c..bac225bb7f64 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -2,7 +2,6 @@
#ifndef _ARCH_POWERPC_UACCESS_H
#define _ARCH_POWERPC_UACCESS_H
-#include <asm/asm-compat.h>
#include <asm/ppc_asm.h>
#include <asm/processor.h>
#include <asm/page.h>
@@ -250,10 +249,17 @@ do { \
} \
} while (0)
+/*
+ * This is a type: either unsigned long, if the argument fits into
+ * that type, or otherwise unsigned long long.
+ */
+#define __long_type(x) \
+ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
+
#define __get_user_nocheck(x, ptr, size) \
({ \
long __gu_err; \
- unsigned long __gu_val; \
+ __long_type(*(ptr)) __gu_val; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__chk_user_ptr(ptr); \
if (!is_kernel_addr((unsigned long)__gu_addr)) \
@@ -267,7 +273,7 @@ do { \
#define __get_user_check(x, ptr, size) \
({ \
long __gu_err = -EFAULT; \
- unsigned long __gu_val = 0; \
+ __long_type(*(ptr)) __gu_val = 0; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
might_fault(); \
if (access_ok(VERIFY_READ, __gu_addr, (size))) { \
@@ -281,7 +287,7 @@ do { \
#define __get_user_nosleep(x, ptr, size) \
({ \
long __gu_err; \
- unsigned long __gu_val; \
+ __long_type(*(ptr)) __gu_val; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__chk_user_ptr(ptr); \
barrier_nospec(); \
diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h
index 8d1a2792484f..3c704f5dd3ae 100644
--- a/arch/powerpc/include/asm/xive.h
+++ b/arch/powerpc/include/asm/xive.h
@@ -87,7 +87,6 @@ extern int xive_smp_prepare_cpu(unsigned int cpu);
extern void xive_smp_setup_cpu(void);
extern void xive_smp_disable_cpu(void);
extern void xive_teardown_cpu(void);
-extern void xive_kexec_teardown_cpu(int secondary);
extern void xive_shutdown(void);
extern void xive_flush_interrupt(void);
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 2b4c40b255e4..3b66f2c19c84 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -7,10 +7,10 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
-ifeq ($(CONFIG_PPC64),y)
+ifdef CONFIG_PPC64
CFLAGS_prom_init.o += $(NO_MINIMAL_TOC)
endif
-ifeq ($(CONFIG_PPC32),y)
+ifdef CONFIG_PPC32
CFLAGS_prom_init.o += -fPIC
CFLAGS_btext.o += -fPIC
endif
@@ -42,9 +42,10 @@ obj-$(CONFIG_VDSO32) += vdso32/
obj-$(CONFIG_PPC_WATCHDOG) += watchdog.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o
-obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o security.o
+obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o
obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o
obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
+obj-$(CONFIG_PPC_BARRIER_NOSPEC) += security.o
obj-$(CONFIG_PPC64) += vdso64/
obj-$(CONFIG_ALTIVEC) += vecemu.o
obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
@@ -62,13 +63,13 @@ obj-$(CONFIG_EEH) += eeh.o eeh_pe.o eeh_dev.o eeh_cache.o \
obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_FA_DUMP) += fadump.o
-ifeq ($(CONFIG_PPC32),y)
+ifdef CONFIG_PPC32
obj-$(CONFIG_E500) += idle_e500.o
endif
obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o
obj-$(CONFIG_TAU) += tau_6xx.o
obj-$(CONFIG_HIBERNATION) += swsusp.o suspend.o
-ifeq ($(CONFIG_FSL_BOOKE),y)
+ifdef CONFIG_FSL_BOOKE
obj-$(CONFIG_HIBERNATION) += swsusp_booke.o
else
obj-$(CONFIG_HIBERNATION) += swsusp_$(BITS).o
@@ -109,9 +110,11 @@ obj-$(CONFIG_PCI_MSI) += msi.o
obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o crash.o \
machine_kexec_$(BITS).o
obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file_$(BITS).o kexec_elf_$(BITS).o
-ifeq ($(CONFIG_HAVE_IMA_KEXEC)$(CONFIG_IMA),yy)
+ifdef CONFIG_HAVE_IMA_KEXEC
+ifdef CONFIG_IMA
obj-y += ima_kexec.o
endif
+endif
obj-$(CONFIG_AUDIT) += audit.o
obj64-$(CONFIG_AUDIT) += compat_audit.o
@@ -164,7 +167,7 @@ PHONY += systbl_chk
systbl_chk: $(src)/systbl_chk.sh $(obj)/systbl_chk.i
$(call cmd,systbl_chk)
-ifeq ($(CONFIG_PPC_OF_BOOT_TRAMPOLINE),y)
+ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
$(obj)/built-in.a: prom_init_check
quiet_cmd_prom_init_check = CALL $<
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 0a0544335950..89cf15566c4e 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -766,7 +766,6 @@ int main(void)
OFFSET(PACA_THREAD_IDLE_STATE, paca_struct, thread_idle_state);
OFFSET(PACA_THREAD_MASK, paca_struct, thread_mask);
OFFSET(PACA_SUBCORE_SIBLING_MASK, paca_struct, subcore_sibling_mask);
- OFFSET(PACA_SIBLING_PACA_PTRS, paca_struct, thread_sibling_pacas);
OFFSET(PACA_REQ_PSSCR, paca_struct, requested_psscr);
OFFSET(PACA_DONT_STOP, paca_struct, dont_stop);
#define STOP_SPR(x, f) OFFSET(x, paca_struct, stop_sprs.f)
diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S
index a9f3970693e1..fa3c2c91290c 100644
--- a/arch/powerpc/kernel/cpu_setup_6xx.S
+++ b/arch/powerpc/kernel/cpu_setup_6xx.S
@@ -16,6 +16,7 @@
#include <asm/asm-offsets.h>
#include <asm/cache.h>
#include <asm/mmu.h>
+#include <asm/feature-fixups.h>
_GLOBAL(__setup_cpu_603)
mflr r5
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index c8fc9691f8c7..2da01340c84c 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -447,25 +447,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check_early = __machine_check_early_realmode_p8,
.platform = "power8",
},
- { /* Power8 DD1: Does not support doorbell IPIs */
- .pvr_mask = 0xffffff00,
- .pvr_value = 0x004d0100,
- .cpu_name = "POWER8 (raw)",
- .cpu_features = CPU_FTRS_POWER8_DD1,
- .cpu_user_features = COMMON_USER_POWER8,
- .cpu_user_features2 = COMMON_USER2_POWER8,
- .mmu_features = MMU_FTRS_POWER8,
- .icache_bsize = 128,
- .dcache_bsize = 128,
- .num_pmcs = 6,
- .pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power8",
- .oprofile_type = PPC_OPROFILE_INVALID,
- .cpu_setup = __setup_cpu_power8,
- .cpu_restore = __restore_cpu_power8,
- .machine_check_early = __machine_check_early_realmode_p8,
- .platform = "power8",
- },
{ /* Power8 */
.pvr_mask = 0xffff0000,
.pvr_value = 0x004d0000,
@@ -485,25 +466,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check_early = __machine_check_early_realmode_p8,
.platform = "power8",
},
- { /* Power9 DD1*/
- .pvr_mask = 0xffffff00,
- .pvr_value = 0x004e0100,
- .cpu_name = "POWER9 (raw)",
- .cpu_features = CPU_FTRS_POWER9_DD1,
- .cpu_user_features = COMMON_USER_POWER9,
- .cpu_user_features2 = COMMON_USER2_POWER9,
- .mmu_features = MMU_FTRS_POWER9,
- .icache_bsize = 128,
- .dcache_bsize = 128,
- .num_pmcs = 6,
- .pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power9",
- .oprofile_type = PPC_OPROFILE_INVALID,
- .cpu_setup = __setup_cpu_power9,
- .cpu_restore = __restore_cpu_power9,
- .machine_check_early = __machine_check_early_realmode_p9,
- .platform = "power9",
- },
{ /* Power9 DD2.0 */
.pvr_mask = 0xffffefff,
.pvr_value = 0x004e0200,
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 17c8b99680f2..43a3ce2301e8 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -23,7 +23,6 @@
#include <asm/processor.h>
#include <asm/machdep.h>
#include <asm/kexec.h>
-#include <asm/kdump.h>
#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/setjmp.h>
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index 96dd3d871986..f432054234a4 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -701,9 +701,7 @@ static __init void cpufeatures_cpu_quirks(void)
/*
* Not all quirks can be derived from the cpufeatures device tree.
*/
- if ((version & 0xffffff00) == 0x004e0100)
- cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD1;
- else if ((version & 0xffffefff) == 0x004e0200)
+ if ((version & 0xffffefff) == 0x004e0200)
; /* DD2.0 has no feature flag */
else if ((version & 0xffffefff) == 0x004e0201)
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 5746809cfaad..6ebba3e48b01 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -1087,7 +1087,7 @@ static int eeh_init(void)
if (eeh_enabled())
pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n");
- else
+ else if (!eeh_has_flag(EEH_POSTPONED_PROBE))
pr_info("EEH: No capable adapters found\n");
return ret;
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 973577f2141c..e58c3f467db5 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -33,6 +33,9 @@
#include <asm/unistd.h>
#include <asm/ptrace.h>
#include <asm/export.h>
+#include <asm/asm-405.h>
+#include <asm/feature-fixups.h>
+#include <asm/barrier.h>
/*
* MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
@@ -358,6 +361,15 @@ syscall_dotrace_cont:
ori r10,r10,sys_call_table@l
slwi r0,r0,2
bge- 66f
+
+ barrier_nospec_asm
+ /*
+ * Prevent the load of the handler below (based on the user-passed
+ * system call number) being speculatively executed until the test
+ * against NR_syscalls and branch to .66f above has
+ * committed.
+ */
+
lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
mtlr r10
addi r9,r1,STACK_FRAME_OVERHEAD
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 729e9ef4d3bb..2206912ea4f0 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -25,6 +25,7 @@
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/thread_info.h>
+#include <asm/code-patching-asm.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/cputable.h>
@@ -38,11 +39,13 @@
#include <asm/ppc-opcode.h>
#include <asm/barrier.h>
#include <asm/export.h>
+#include <asm/asm-compat.h>
#ifdef CONFIG_PPC_BOOK3S
#include <asm/exception-64s.h>
#else
#include <asm/exception-64e.h>
#endif
+#include <asm/feature-fixups.h>
/*
* System calls.
@@ -504,6 +507,57 @@ _GLOBAL(ret_from_kernel_thread)
li r3,0
b .Lsyscall_exit
+#ifdef CONFIG_PPC_BOOK3S_64
+
+#define FLUSH_COUNT_CACHE \
+1: nop; \
+ patch_site 1b, patch__call_flush_count_cache
+
+
+#define BCCTR_FLUSH .long 0x4c400420
+
+.macro nops number
+ .rept \number
+ nop
+ .endr
+.endm
+
+.balign 32
+.global flush_count_cache
+flush_count_cache:
+ /* Save LR into r9 */
+ mflr r9
+
+ .rept 64
+ bl .+4
+ .endr
+ b 1f
+ nops 6
+
+ .balign 32
+ /* Restore LR */
+1: mtlr r9
+ li r9,0x7fff
+ mtctr r9
+
+ BCCTR_FLUSH
+
+2: nop
+ patch_site 2b patch__flush_count_cache_return
+
+ nops 3
+
+ .rept 278
+ .balign 32
+ BCCTR_FLUSH
+ nops 7
+ .endr
+
+ blr
+#else
+#define FLUSH_COUNT_CACHE
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
/*
* This routine switches between two different tasks. The process
* state of one is saved on its kernel stack. Then the state
@@ -535,6 +589,8 @@ _GLOBAL(_switch)
std r23,_CCR(r1)
std r1,KSP(r3) /* Set old stack pointer */
+ FLUSH_COUNT_CACHE
+
/*
* On SMP kernels, care must be taken because a task may be
* scheduled off CPUx and on to CPUy. Memory ordering must be
@@ -993,6 +1049,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
std r4,_TRAP(r1)
/*
+ * PACA_IRQ_HARD_DIS won't always be set here, so set it now
+ * to reconcile the IRQ state. Tracing is already accounted for.
+ */
+ lbz r4,PACAIRQHAPPENED(r13)
+ ori r4,r4,PACA_IRQ_HARD_DIS
+ stb r4,PACAIRQHAPPENED(r13)
+
+ /*
* Then find the right handler and call it. Interrupts are
* still soft-disabled and we keep them that way.
*/
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 9b6e653e501a..6d6e144a28ce 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -27,6 +27,7 @@
#include <asm/hw_irq.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_booke_hv_asm.h>
+#include <asm/feature-fixups.h>
/* XXX This will ultimately add space for a special exception save
* structure used to save things like SRR0/SRR1, SPRGs, MAS, etc...
@@ -949,7 +950,11 @@ kernel_dbg_exc:
.macro masked_interrupt_book3e paca_irq full_mask
lbz r10,PACAIRQHAPPENED(r13)
+ .if \full_mask == 1
+ ori r10,r10,\paca_irq | PACA_IRQ_HARD_DIS
+ .else
ori r10,r10,\paca_irq
+ .endif
stb r10,PACAIRQHAPPENED(r13)
.if \full_mask == 1
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 285c6465324a..ea04dfb8c092 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -18,6 +18,7 @@
#include <asm/ptrace.h>
#include <asm/cpuidle.h>
#include <asm/head-64.h>
+#include <asm/feature-fixups.h>
/*
* There are a few constraints to be concerned with.
@@ -126,8 +127,8 @@ EXC_REAL_BEGIN(system_reset, 0x100, 0x100)
* MSR_RI is not enabled, because PACA_EXNMI and nmi stack is
* being used, so a nested NMI exception would corrupt it.
*/
- EXCEPTION_PROLOG_PSERIES_NORI(PACA_EXNMI, system_reset_common, EXC_STD,
- IDLETEST, 0x100)
+ EXCEPTION_PROLOG_NORI(PACA_EXNMI, system_reset_common, EXC_STD,
+ IDLETEST, 0x100)
EXC_REAL_END(system_reset, 0x100, 0x100)
EXC_VIRT_NONE(0x4100, 0x100)
@@ -230,8 +231,8 @@ EXC_COMMON_BEGIN(system_reset_common)
TRAMP_REAL_BEGIN(system_reset_fwnmi)
SET_SCRATCH0(r13) /* save r13 */
/* See comment at system_reset exception */
- EXCEPTION_PROLOG_PSERIES_NORI(PACA_EXNMI, system_reset_common,
- EXC_STD, NOTEST, 0x100)
+ EXCEPTION_PROLOG_NORI(PACA_EXNMI, system_reset_common, EXC_STD,
+ NOTEST, 0x100)
#endif /* CONFIG_PPC_PSERIES */
@@ -276,9 +277,7 @@ BEGIN_FTR_SECTION
*
* This interrupt can wake directly from idle. If that is the case,
* the machine check is handled then the idle wakeup code is called
- * to restore state. In that case, the POWER9 DD1 idle PACA workaround
- * is not applied in the early machine check code, which will cause
- * bugs.
+ * to restore state.
*/
mr r11,r1 /* Save r1 */
lhz r10,PACA_IN_MCE(r13)
@@ -339,7 +338,7 @@ machine_check_pSeries_0:
* nested machine check corrupts it. machine_check_common enables
* MSR_RI.
*/
- EXCEPTION_PROLOG_PSERIES_1_NORI(machine_check_common, EXC_STD)
+ EXCEPTION_PROLOG_2_NORI(machine_check_common, EXC_STD)
TRAMP_KVM_SKIP(PACA_EXMC, 0x200)
@@ -769,13 +768,9 @@ EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100)
.globl hardware_interrupt_hv;
hardware_interrupt_hv:
BEGIN_FTR_SECTION
- _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt_common,
- EXC_HV, SOFTEN_TEST_HV,
- IRQS_DISABLED)
+ MASKABLE_EXCEPTION_HV(0x500, hardware_interrupt_common, IRQS_DISABLED)
FTR_SECTION_ELSE
- _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt_common,
- EXC_STD, SOFTEN_TEST_PR,
- IRQS_DISABLED)
+ MASKABLE_EXCEPTION(0x500, hardware_interrupt_common, IRQS_DISABLED)
ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
EXC_REAL_END(hardware_interrupt, 0x500, 0x100)
@@ -783,13 +778,11 @@ EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100)
.globl hardware_interrupt_relon_hv;
hardware_interrupt_relon_hv:
BEGIN_FTR_SECTION
- _MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt_common,
- EXC_HV, SOFTEN_TEST_HV,
- IRQS_DISABLED)
+ MASKABLE_RELON_EXCEPTION_HV(0x500, hardware_interrupt_common,
+ IRQS_DISABLED)
FTR_SECTION_ELSE
- _MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt_common,
- EXC_STD, SOFTEN_TEST_PR,
- IRQS_DISABLED)
+ __MASKABLE_RELON_EXCEPTION(0x500, hardware_interrupt_common,
+ EXC_STD, SOFTEN_TEST_PR, IRQS_DISABLED)
ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100)
@@ -1328,7 +1321,7 @@ EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x100)
#endif
KVMTEST_HV(0x1500)
- EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV)
+ EXCEPTION_PROLOG_2(denorm_common, EXC_HV)
EXC_REAL_END(denorm_exception_hv, 0x1500, 0x100)
#ifdef CONFIG_PPC_DENORMALISATION
@@ -1448,7 +1441,7 @@ EXC_VIRT_NONE(0x5800, 0x100)
std r12,PACA_EXGEN+EX_R12(r13); \
GET_SCRATCH0(r10); \
std r10,PACA_EXGEN+EX_R13(r13); \
- EXCEPTION_PROLOG_PSERIES_1(soft_nmi_common, _H)
+ EXCEPTION_PROLOG_2(soft_nmi_common, _H)
/*
* Branch to soft_nmi_interrupt using the emergency stack. The emergency
@@ -1500,7 +1493,10 @@ masked_##_H##interrupt: \
mfspr r10,SPRN_##_H##SRR1; \
xori r10,r10,MSR_EE; /* clear MSR_EE */ \
mtspr SPRN_##_H##SRR1,r10; \
-2: mtcrf 0x80,r9; \
+ ori r11,r11,PACA_IRQ_HARD_DIS; \
+ stb r11,PACAIRQHAPPENED(r13); \
+2: /* done */ \
+ mtcrf 0x80,r9; \
std r1,PACAR1(r13); \
ld r9,PACA_EXGEN+EX_R9(r13); \
ld r10,PACA_EXGEN+EX_R10(r13); \
@@ -1526,6 +1522,8 @@ TRAMP_REAL_BEGIN(stf_barrier_fallback)
TRAMP_REAL_BEGIN(rfi_flush_fallback)
SET_SCRATCH0(r13);
GET_PACA(r13);
+ std r1,PACA_EXRFI+EX_R12(r13)
+ ld r1,PACAKSAVE(r13)
std r9,PACA_EXRFI+EX_R9(r13)
std r10,PACA_EXRFI+EX_R10(r13)
std r11,PACA_EXRFI+EX_R11(r13)
@@ -1560,12 +1558,15 @@ TRAMP_REAL_BEGIN(rfi_flush_fallback)
ld r9,PACA_EXRFI+EX_R9(r13)
ld r10,PACA_EXRFI+EX_R10(r13)
ld r11,PACA_EXRFI+EX_R11(r13)
+ ld r1,PACA_EXRFI+EX_R12(r13)
GET_SCRATCH0(r13);
rfid
TRAMP_REAL_BEGIN(hrfi_flush_fallback)
SET_SCRATCH0(r13);
GET_PACA(r13);
+ std r1,PACA_EXRFI+EX_R12(r13)
+ ld r1,PACAKSAVE(r13)
std r9,PACA_EXRFI+EX_R9(r13)
std r10,PACA_EXRFI+EX_R10(r13)
std r11,PACA_EXRFI+EX_R11(r13)
@@ -1600,6 +1601,7 @@ TRAMP_REAL_BEGIN(hrfi_flush_fallback)
ld r9,PACA_EXRFI+EX_R9(r13)
ld r10,PACA_EXRFI+EX_R10(r13)
ld r11,PACA_EXRFI+EX_R11(r13)
+ ld r1,PACA_EXRFI+EX_R12(r13)
GET_SCRATCH0(r13);
hrfid
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 07e8396d472b..986ec476fd5d 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -47,8 +47,10 @@ static struct fadump_mem_struct fdm;
static const struct fadump_mem_struct *fdm_active;
static DEFINE_MUTEX(fadump_mutex);
-struct fad_crash_memory_ranges crash_memory_ranges[INIT_CRASHMEM_RANGES];
+struct fad_crash_memory_ranges *crash_memory_ranges;
+int crash_memory_ranges_size;
int crash_mem_ranges;
+int max_crash_mem_ranges;
/* Scan the Firmware Assisted dump configuration details. */
int __init early_init_dt_scan_fw_dump(unsigned long node,
@@ -868,38 +870,107 @@ static int __init process_fadump(const struct fadump_mem_struct *fdm_active)
return 0;
}
-static inline void fadump_add_crash_memory(unsigned long long base,
- unsigned long long end)
+static void free_crash_memory_ranges(void)
+{
+ kfree(crash_memory_ranges);
+ crash_memory_ranges = NULL;
+ crash_memory_ranges_size = 0;
+ max_crash_mem_ranges = 0;
+}
+
+/*
+ * Allocate or reallocate crash memory ranges array in incremental units
+ * of PAGE_SIZE.
+ */
+static int allocate_crash_memory_ranges(void)
{
+ struct fad_crash_memory_ranges *new_array;
+ u64 new_size;
+
+ new_size = crash_memory_ranges_size + PAGE_SIZE;
+ pr_debug("Allocating %llu bytes of memory for crash memory ranges\n",
+ new_size);
+
+ new_array = krealloc(crash_memory_ranges, new_size, GFP_KERNEL);
+ if (new_array == NULL) {
+ pr_err("Insufficient memory for setting up crash memory ranges\n");
+ free_crash_memory_ranges();
+ return -ENOMEM;
+ }
+
+ crash_memory_ranges = new_array;
+ crash_memory_ranges_size = new_size;
+ max_crash_mem_ranges = (new_size /
+ sizeof(struct fad_crash_memory_ranges));
+ return 0;
+}
+
+static inline int fadump_add_crash_memory(unsigned long long base,
+ unsigned long long end)
+{
+ u64 start, size;
+ bool is_adjacent = false;
+
if (base == end)
- return;
+ return 0;
+
+ /*
+ * Fold adjacent memory ranges to bring down the memory ranges/
+ * PT_LOAD segments count.
+ */
+ if (crash_mem_ranges) {
+ start = crash_memory_ranges[crash_mem_ranges - 1].base;
+ size = crash_memory_ranges[crash_mem_ranges - 1].size;
+
+ if ((start + size) == base)
+ is_adjacent = true;
+ }
+ if (!is_adjacent) {
+ /* resize the array on reaching the limit */
+ if (crash_mem_ranges == max_crash_mem_ranges) {
+ int ret;
+
+ ret = allocate_crash_memory_ranges();
+ if (ret)
+ return ret;
+ }
+ start = base;
+ crash_memory_ranges[crash_mem_ranges].base = start;
+ crash_mem_ranges++;
+ }
+
+ crash_memory_ranges[crash_mem_ranges - 1].size = (end - start);
pr_debug("crash_memory_range[%d] [%#016llx-%#016llx], %#llx bytes\n",
- crash_mem_ranges, base, end - 1, (end - base));
- crash_memory_ranges[crash_mem_ranges].base = base;
- crash_memory_ranges[crash_mem_ranges].size = end - base;
- crash_mem_ranges++;
+ (crash_mem_ranges - 1), start, end - 1, (end - start));
+ return 0;
}
-static void fadump_exclude_reserved_area(unsigned long long start,
+static int fadump_exclude_reserved_area(unsigned long long start,
unsigned long long end)
{
unsigned long long ra_start, ra_end;
+ int ret = 0;
ra_start = fw_dump.reserve_dump_area_start;
ra_end = ra_start + fw_dump.reserve_dump_area_size;
if ((ra_start < end) && (ra_end > start)) {
if ((start < ra_start) && (end > ra_end)) {
- fadump_add_crash_memory(start, ra_start);
- fadump_add_crash_memory(ra_end, end);
+ ret = fadump_add_crash_memory(start, ra_start);
+ if (ret)
+ return ret;
+
+ ret = fadump_add_crash_memory(ra_end, end);
} else if (start < ra_start) {
- fadump_add_crash_memory(start, ra_start);
+ ret = fadump_add_crash_memory(start, ra_start);
} else if (ra_end < end) {
- fadump_add_crash_memory(ra_end, end);
+ ret = fadump_add_crash_memory(ra_end, end);
}
} else
- fadump_add_crash_memory(start, end);
+ ret = fadump_add_crash_memory(start, end);
+
+ return ret;
}
static int fadump_init_elfcore_header(char *bufp)
@@ -939,13 +1010,22 @@ static int fadump_init_elfcore_header(char *bufp)
* Traverse through memblock structure and setup crash memory ranges. These
* ranges will be used create PT_LOAD program headers in elfcore header.
*/
-static void fadump_setup_crash_memory_ranges(void)
+static int fadump_setup_crash_memory_ranges(void)
{
struct memblock_region *reg;
unsigned long long start, end;
+ int ret;
pr_debug("Setup crash memory ranges.\n");
crash_mem_ranges = 0;
+
+ /* allocate memory for crash memory ranges for the first time */
+ if (!max_crash_mem_ranges) {
+ ret = allocate_crash_memory_ranges();
+ if (ret)
+ return ret;
+ }
+
/*
* add the first memory chunk (RMA_START through boot_memory_size) as
* a separate memory chunk. The reason is, at the time crash firmware
@@ -953,7 +1033,9 @@ static void fadump_setup_crash_memory_ranges(void)
* specified during fadump registration. We need to create a separate
* program header for this chunk with the correct offset.
*/
- fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size);
+ ret = fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size);
+ if (ret)
+ return ret;
for_each_memblock(memory, reg) {
start = (unsigned long long)reg->base;
@@ -973,8 +1055,12 @@ static void fadump_setup_crash_memory_ranges(void)
}
/* add this range excluding the reserved dump area. */
- fadump_exclude_reserved_area(start, end);
+ ret = fadump_exclude_reserved_area(start, end);
+ if (ret)
+ return ret;
}
+
+ return 0;
}
/*
@@ -1097,6 +1183,7 @@ static int register_fadump(void)
{
unsigned long addr;
void *vaddr;
+ int ret;
/*
* If no memory is reserved then we can not register for firmware-
@@ -1105,7 +1192,9 @@ static int register_fadump(void)
if (!fw_dump.reserve_dump_area_size)
return -ENODEV;
- fadump_setup_crash_memory_ranges();
+ ret = fadump_setup_crash_memory_ranges();
+ if (ret)
+ return ret;
addr = be64_to_cpu(fdm.rmr_region.destination_address) + be64_to_cpu(fdm.rmr_region.source_len);
/* Initialize fadump crash info header. */
@@ -1183,6 +1272,7 @@ void fadump_cleanup(void)
} else if (fw_dump.dump_registered) {
/* Un-register Firmware-assisted dump if it was registered. */
fadump_unregister_dump(&fdm);
+ free_crash_memory_ranges();
}
}
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index 6c509f39bbde..529dcc21c3f9 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -25,6 +25,8 @@
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
#include <asm/export.h>
+#include <asm/asm-compat.h>
+#include <asm/feature-fixups.h>
#ifdef CONFIG_VSX
#define __REST_32FPVSRS(n,c,base) \
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index 29b2fed93289..61ca27929355 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -35,6 +35,7 @@
#include <asm/bug.h>
#include <asm/kvm_book3s_asm.h>
#include <asm/export.h>
+#include <asm/feature-fixups.h>
/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */
#define LOAD_BAT(n, reg, RA, RB) \
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
index 41374a468d1c..b19d78410511 100644
--- a/arch/powerpc/kernel/head_40x.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -42,6 +42,7 @@
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
#include <asm/export.h>
+#include <asm/asm-405.h>
/* As with the other PowerPC ports, it is expected that when code
* execution begins here, the following registers contain valid, yet
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 6eca15f25c73..4898e9491a1c 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -44,6 +44,7 @@
#include <asm/cputhreads.h>
#include <asm/ppc-opcode.h>
#include <asm/export.h>
+#include <asm/feature-fixups.h>
/* The physical memory is laid out such that the secondary processor
* spin code sits at 0x0000...0x00ff. On server, the vectors follow
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 6cab07e76732..6582f824d620 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -30,7 +30,6 @@
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
-#include <asm/fixmap.h>
#include <asm/export.h>
#if CONFIG_TASK_SIZE <= 0x80000000 && CONFIG_PAGE_OFFSET >= 0x80000000
@@ -873,6 +872,10 @@ start_here:
li r0,0
stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
+ lis r6, swapper_pg_dir@ha
+ tophys(r6,r6)
+ mtspr SPRN_M_TW, r6
+
bl early_init /* We have to do this with MMU on */
/*
@@ -893,9 +896,6 @@ start_here:
* init's THREAD like the context switch code does, but this is
* easier......until someone changes init's static structures.
*/
- lis r6, swapper_pg_dir@ha
- tophys(r6,r6)
- mtspr SPRN_M_TW, r6
lis r4,2f@h
ori r4,r4,2f@l
tophys(r4,r4)
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index bf4c6021515f..e2750b856c8f 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -43,6 +43,7 @@
#include <asm/cache.h>
#include <asm/ptrace.h>
#include <asm/export.h>
+#include <asm/feature-fixups.h>
#include "head_booke.h"
/* As with the other PowerPC ports, it is expected that when code
diff --git a/arch/powerpc/kernel/idle_6xx.S b/arch/powerpc/kernel/idle_6xx.S
index 1686916cc7f0..ff026c9d3cab 100644
--- a/arch/powerpc/kernel/idle_6xx.S
+++ b/arch/powerpc/kernel/idle_6xx.S
@@ -20,6 +20,7 @@
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
+#include <asm/feature-fixups.h>
.text
diff --git a/arch/powerpc/kernel/idle_book3e.S b/arch/powerpc/kernel/idle_book3e.S
index 2b269315d377..4e0d94d02030 100644
--- a/arch/powerpc/kernel/idle_book3e.S
+++ b/arch/powerpc/kernel/idle_book3e.S
@@ -36,7 +36,7 @@ _GLOBAL(\name)
*/
lbz r3,PACAIRQHAPPENED(r13)
cmpwi cr0,r3,0
- bnelr
+ bne 2f
/* Now we are going to mark ourselves as soft and hard enabled in
* order to be able to take interrupts while asleep. We inform lockdep
@@ -72,6 +72,11 @@ _GLOBAL(\name)
wrteei 1
\loop
+2:
+ lbz r10,PACAIRQHAPPENED(r13)
+ ori r10,r10,PACA_IRQ_HARD_DIS
+ stb r10,PACAIRQHAPPENED(r13)
+ blr
.endm
.macro BOOK3E_IDLE_LOOP
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index 689306118b48..7f5ac2e8581b 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -23,6 +23,8 @@
#include <asm/exception-64s.h>
#include <asm/book3s/64/mmu-hash.h>
#include <asm/mmu.h>
+#include <asm/asm-compat.h>
+#include <asm/feature-fixups.h>
#undef DEBUG
@@ -469,43 +471,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
#endif
/*
- * On waking up from stop 0,1,2 with ESL=1 on POWER9 DD1,
- * HSPRG0 will be set to the HSPRG0 value of one of the
- * threads in this core. Thus the value we have in r13
- * may not be this thread's paca pointer.
- *
- * Fortunately, the TIR remains invariant. Since this thread's
- * paca pointer is recorded in all its sibling's paca, we can
- * correctly recover this thread's paca pointer if we
- * know the index of this thread in the core.
- *
- * This index can be obtained from the TIR.
- *
- * i.e, thread's position in the core = TIR.
- * If this value is i, then this thread's paca is
- * paca->thread_sibling_pacas[i].
- */
-power9_dd1_recover_paca:
- mfspr r4, SPRN_TIR
- /*
- * Since each entry in thread_sibling_pacas is 8 bytes
- * we need to left-shift by 3 bits. Thus r4 = i * 8
- */
- sldi r4, r4, 3
- /* Get &paca->thread_sibling_pacas[0] in r5 */
- ld r5, PACA_SIBLING_PACA_PTRS(r13)
- /* Load paca->thread_sibling_pacas[i] into r13 */
- ldx r13, r4, r5
- SET_PACA(r13)
- /*
- * Indicate that we have lost NVGPR state
- * which needs to be restored from the stack.
- */
- li r3, 1
- stb r3,PACA_NAPSTATELOST(r13)
- blr
-
-/*
* Called from machine check handler for powersave wakeups.
* Low level machine check processing has already been done. Now just
* go through the wake up path to get everything in order.
@@ -539,9 +504,6 @@ pnv_powersave_wakeup:
ld r2, PACATOC(r13)
BEGIN_FTR_SECTION
-BEGIN_FTR_SECTION_NESTED(70)
- bl power9_dd1_recover_paca
-END_FTR_SECTION_NESTED_IFSET(CPU_FTR_POWER9_DD1, 70)
bl pnv_restore_hyp_resource_arch300
FTR_SECTION_ELSE
bl pnv_restore_hyp_resource_arch207
@@ -604,22 +566,12 @@ END_FTR_SECTION_IFCLR(CPU_FTR_POWER9_DD2_1)
LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
ld r4,ADDROFF(pnv_first_deep_stop_state)(r5)
-BEGIN_FTR_SECTION_NESTED(71)
- /*
- * Assume that we are waking up from the state
- * same as the Requested Level (RL) in the PSSCR
- * which are Bits 60-63
- */
- ld r5,PACA_REQ_PSSCR(r13)
- rldicl r5,r5,0,60
-FTR_SECTION_ELSE_NESTED(71)
/*
* 0-3 bits correspond to Power-Saving Level Status
* which indicates the idle state we are waking up from
*/
mfspr r5, SPRN_PSSCR
rldicl r5,r5,4,60
-ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_POWER9_DD1, 71)
li r0, 0 /* clear requested_psscr to say we're awake */
std r0, PACA_REQ_PSSCR(r13)
cmpd cr4,r5,r4
diff --git a/arch/powerpc/kernel/idle_e500.S b/arch/powerpc/kernel/idle_e500.S
index b9b6ef510be1..583e55ac7d26 100644
--- a/arch/powerpc/kernel/idle_e500.S
+++ b/arch/powerpc/kernel/idle_e500.S
@@ -17,6 +17,7 @@
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
+#include <asm/feature-fixups.h>
.text
diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S
index 08faa93755f9..dd7471fe20bd 100644
--- a/arch/powerpc/kernel/idle_power4.S
+++ b/arch/powerpc/kernel/idle_power4.S
@@ -16,6 +16,7 @@
#include <asm/asm-offsets.h>
#include <asm/irqflags.h>
#include <asm/hw_irq.h>
+#include <asm/feature-fixups.h>
#undef DEBUG
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 0682fef1f385..916ddc4aac44 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -145,8 +145,20 @@ notrace unsigned int __check_irq_replay(void)
trace_hardirqs_on();
trace_hardirqs_off();
+ /*
+ * We are always hard disabled here, but PACA_IRQ_HARD_DIS may
+ * not be set, which means interrupts have only just been hard
+ * disabled as part of the local_irq_restore or interrupt return
+ * code. In that case, skip the decrementr check becaus it's
+ * expensive to read the TB.
+ *
+ * HARD_DIS then gets cleared here, but it's reconciled later.
+ * Either local_irq_disable will replay the interrupt and that
+ * will reconcile state like other hard interrupts. Or interrupt
+ * retur will replay the interrupt and in that case it sets
+ * PACA_IRQ_HARD_DIS by hand (see comments in entry_64.S).
+ */
if (happened & PACA_IRQ_HARD_DIS) {
- /* Clear bit 0 which we wouldn't clear otherwise */
local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
/*
@@ -248,24 +260,33 @@ notrace void arch_local_irq_restore(unsigned long mask)
* cannot have preempted.
*/
irq_happened = get_irq_happened();
- if (!irq_happened)
+ if (!irq_happened) {
+ /*
+ * FIXME. Here we'd like to be able to do:
+ *
+ * #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
+ * WARN_ON(!(mfmsr() & MSR_EE));
+ * #endif
+ *
+ * But currently it hits in a few paths, we should fix those and
+ * enable the warning.
+ */
return;
+ }
/*
* We need to hard disable to get a trusted value from
* __check_irq_replay(). We also need to soft-disable
* again to avoid warnings in there due to the use of
* per-cpu variables.
- *
- * We know that if the value in irq_happened is exactly 0x01
- * then we are already hard disabled (there are other less
- * common cases that we'll ignore for now), so we skip the
- * (expensive) mtmsrd.
*/
- if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
+ if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
+#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
+ WARN_ON(!(mfmsr() & MSR_EE));
+#endif
__hard_irq_disable();
#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
- else {
+ } else {
/*
* We should already be hard disabled here. We had bugs
* where that wasn't the case so let's dbl check it and
@@ -274,8 +295,8 @@ notrace void arch_local_irq_restore(unsigned long mask)
*/
if (WARN_ON(mfmsr() & MSR_EE))
__hard_irq_disable();
- }
#endif
+ }
irq_soft_mask_set(IRQS_ALL_DISABLED);
trace_hardirqs_off();
diff --git a/arch/powerpc/kernel/kvm_emul.S b/arch/powerpc/kernel/kvm_emul.S
index e100ff324a85..c005088f6c9c 100644
--- a/arch/powerpc/kernel/kvm_emul.S
+++ b/arch/powerpc/kernel/kvm_emul.S
@@ -23,6 +23,7 @@
#include <asm/reg.h>
#include <asm/page.h>
#include <asm/asm-offsets.h>
+#include <asm/asm-compat.h>
#define KVM_MAGIC_PAGE (-4096)
diff --git a/arch/powerpc/kernel/l2cr_6xx.S b/arch/powerpc/kernel/l2cr_6xx.S
index 6408f09dbbd9..6e7dbb7d527c 100644
--- a/arch/powerpc/kernel/l2cr_6xx.S
+++ b/arch/powerpc/kernel/l2cr_6xx.S
@@ -45,6 +45,7 @@
#include <asm/ppc_asm.h>
#include <asm/cache.h>
#include <asm/page.h>
+#include <asm/feature-fixups.h>
/* Usage:
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index 936c7e2d421e..63f5a9311a29 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -17,6 +17,7 @@
#include <linux/irq.h>
#include <linux/ftrace.h>
+#include <asm/kdump.h>
#include <asm/machdep.h>
#include <asm/pgalloc.h>
#include <asm/prom.h>
@@ -188,7 +189,12 @@ void __init reserve_crashkernel(void)
(unsigned long)(crashk_res.start >> 20),
(unsigned long)(memblock_phys_mem_size() >> 20));
- memblock_reserve(crashk_res.start, crash_size);
+ if (!memblock_is_region_memory(crashk_res.start, crash_size) ||
+ memblock_reserve(crashk_res.start, crash_size)) {
+ pr_err("Failed to reserve memory for crashkernel!\n");
+ crashk_res.start = crashk_res.end = 0;
+ return;
+ }
}
int overlaps_crashkernel(unsigned long start, unsigned long size)
diff --git a/arch/powerpc/kernel/machine_kexec_file_64.c b/arch/powerpc/kernel/machine_kexec_file_64.c
index 0bd23dc789a4..c77e95e9b384 100644
--- a/arch/powerpc/kernel/machine_kexec_file_64.c
+++ b/arch/powerpc/kernel/machine_kexec_file_64.c
@@ -269,18 +269,14 @@ int setup_new_fdt(const struct kimage *image, void *fdt,
ret = fdt_setprop_u64(fdt, chosen_node,
"linux,initrd-start",
initrd_load_addr);
- if (ret < 0) {
- pr_err("Error setting up the new device tree.\n");
- return -EINVAL;
- }
+ if (ret < 0)
+ goto err;
/* initrd-end is the first address after the initrd image. */
ret = fdt_setprop_u64(fdt, chosen_node, "linux,initrd-end",
initrd_load_addr + initrd_len);
- if (ret < 0) {
- pr_err("Error setting up the new device tree.\n");
- return -EINVAL;
- }
+ if (ret < 0)
+ goto err;
ret = fdt_add_mem_rsv(fdt, initrd_load_addr, initrd_len);
if (ret) {
@@ -292,10 +288,8 @@ int setup_new_fdt(const struct kimage *image, void *fdt,
if (cmdline != NULL) {
ret = fdt_setprop_string(fdt, chosen_node, "bootargs", cmdline);
- if (ret < 0) {
- pr_err("Error setting up the new device tree.\n");
- return -EINVAL;
- }
+ if (ret < 0)
+ goto err;
} else {
ret = fdt_delprop(fdt, chosen_node, "bootargs");
if (ret && ret != -FDT_ERR_NOTFOUND) {
@@ -311,10 +305,12 @@ int setup_new_fdt(const struct kimage *image, void *fdt,
}
ret = fdt_setprop(fdt, chosen_node, "linux,booted-from-kexec", NULL, 0);
- if (ret) {
- pr_err("Error setting up the new device tree.\n");
- return -EINVAL;
- }
+ if (ret)
+ goto err;
return 0;
+
+err:
+ pr_err("Error setting up the new device tree.\n");
+ return -EINVAL;
}
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index 38c5b4764bfe..3497c8329c1d 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -62,11 +62,8 @@ static unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
#ifdef CONFIG_PPC_BOOK3S_64
static void flush_and_reload_slb(void)
{
- struct slb_shadow *slb;
- unsigned long i, n;
-
/* Invalidate all SLBs */
- asm volatile("slbmte %0,%0; slbia" : : "r" (0));
+ slb_flush_all_realmode();
#ifdef CONFIG_KVM_BOOK3S_HANDLER
/*
@@ -76,22 +73,17 @@ static void flush_and_reload_slb(void)
if (get_paca()->kvm_hstate.in_guest)
return;
#endif
-
- /* For host kernel, reload the SLBs from shadow SLB buffer. */
- slb = get_slb_shadow();
- if (!slb)
+ if (early_radix_enabled())
return;
- n = min_t(u32, be32_to_cpu(slb->persistent), SLB_MIN_SIZE);
-
- /* Load up the SLB entries from shadow SLB */
- for (i = 0; i < n; i++) {
- unsigned long rb = be64_to_cpu(slb->save_area[i].esid);
- unsigned long rs = be64_to_cpu(slb->save_area[i].vsid);
+ /*
+ * This probably shouldn't happen, but it may be possible it's
+ * called in early boot before SLB shadows are allocated.
+ */
+ if (!get_slb_shadow())
+ return;
- rb = (rb & ~0xFFFul) | i;
- asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb));
- }
+ slb_restore_bolted_realmode();
}
#endif
@@ -257,12 +249,12 @@ static const struct mce_derror_table mce_p7_derror_table[] = {
{ 0x00000400, true,
MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT,
MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000080, true,
+ MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT, /* Before PARITY */
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
{ 0x00000100, true,
MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY,
MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
-{ 0x00000080, true,
- MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
{ 0x00000040, true,
MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_INDETERMINATE, /* BOTH */
MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
@@ -290,12 +282,12 @@ static const struct mce_derror_table mce_p8_derror_table[] = {
{ 0x00000200, true,
MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT, /* SECONDARY ERAT */
MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000080, true,
+ MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT, /* Before PARITY */
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
{ 0x00000100, true,
MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY,
MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
-{ 0x00000080, true,
- MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
{ 0, false, 0, 0, 0, 0 } };
static const struct mce_derror_table mce_p9_derror_table[] = {
@@ -320,12 +312,12 @@ static const struct mce_derror_table mce_p9_derror_table[] = {
{ 0x00000200, false,
MCE_ERROR_TYPE_USER, MCE_USER_ERROR_TLBIE,
MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000080, true,
+ MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT, /* Before PARITY */
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
{ 0x00000100, true,
MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY,
MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
-{ 0x00000080, true,
- MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
{ 0x00000040, true,
MCE_ERROR_TYPE_RA, MCE_RA_ERROR_LOAD,
MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 3f7a9a2d2435..695b24a2d954 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -34,6 +34,7 @@
#include <asm/bug.h>
#include <asm/ptrace.h>
#include <asm/export.h>
+#include <asm/feature-fixups.h>
.text
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index fa267e94090a..262ba9481781 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -28,6 +28,7 @@
#include <asm/ptrace.h>
#include <asm/mmu.h>
#include <asm/export.h>
+#include <asm/feature-fixups.h>
.text
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
index 1b3c6835e730..77371c9ef3d8 100644
--- a/arch/powerpc/kernel/module.c
+++ b/arch/powerpc/kernel/module.c
@@ -72,13 +72,15 @@ int module_finalize(const Elf_Ehdr *hdr,
do_feature_fixups(powerpc_firmware_features,
(void *)sect->sh_addr,
(void *)sect->sh_addr + sect->sh_size);
+#endif /* CONFIG_PPC64 */
+#ifdef CONFIG_PPC_BARRIER_NOSPEC
sect = find_section(hdr, sechdrs, "__spec_barrier_fixup");
if (sect != NULL)
do_barrier_nospec_fixups_range(barrier_nospec_enabled,
(void *)sect->sh_addr,
(void *)sect->sh_addr + sect->sh_size);
-#endif
+#endif /* CONFIG_PPC_BARRIER_NOSPEC */
sect = find_section(hdr, sechdrs, "__lwsync_fixup");
if (sect != NULL)
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 471aac313b89..88e4f69a09e5 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -368,9 +368,6 @@ static int pci_read_irq_line(struct pci_dev *pci_dev)
pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
-#ifdef DEBUG
- memset(&oirq, 0xff, sizeof(oirq));
-#endif
/* Try to get a mapping from the device-tree */
virq = of_irq_parse_and_map_pci(pci_dev, 0, 0);
if (virq <= 0) {
diff --git a/arch/powerpc/kernel/ppc_save_regs.S b/arch/powerpc/kernel/ppc_save_regs.S
index 8afbe213d729..6d1b42ee797c 100644
--- a/arch/powerpc/kernel/ppc_save_regs.S
+++ b/arch/powerpc/kernel/ppc_save_regs.S
@@ -12,6 +12,7 @@
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
+#include <asm/asm-compat.h>
/*
* Grab the register values as they are now.
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 9ef4aea9fffe..913c5725cdb2 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -583,6 +583,7 @@ static void save_all(struct task_struct *tsk)
__giveup_spe(tsk);
msr_check_and_clear(msr_all_available);
+ thread_pkey_regs_save(&tsk->thread);
}
void flush_all_to_thread(struct task_struct *tsk)
@@ -716,6 +717,13 @@ void switch_booke_debug_regs(struct debug_reg *new_debug)
EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
#ifndef CONFIG_HAVE_HW_BREAKPOINT
+static void set_breakpoint(struct arch_hw_breakpoint *brk)
+{
+ preempt_disable();
+ __set_breakpoint(brk);
+ preempt_enable();
+}
+
static void set_debug_reg_defaults(struct thread_struct *thread)
{
thread->hw_brk.address = 0;
@@ -828,13 +836,6 @@ void __set_breakpoint(struct arch_hw_breakpoint *brk)
WARN_ON_ONCE(1);
}
-void set_breakpoint(struct arch_hw_breakpoint *brk)
-{
- preempt_disable();
- __set_breakpoint(brk);
- preempt_enable();
-}
-
/* Check if we have DAWR or DABR hardware */
bool ppc_breakpoint_available(void)
{
@@ -866,8 +867,7 @@ static inline bool tm_enabled(struct task_struct *tsk)
return tsk && tsk->thread.regs && (tsk->thread.regs->msr & MSR_TM);
}
-static void tm_reclaim_thread(struct thread_struct *thr,
- struct thread_info *ti, uint8_t cause)
+static void tm_reclaim_thread(struct thread_struct *thr, uint8_t cause)
{
/*
* Use the current MSR TM suspended bit to track if we have
@@ -914,7 +914,7 @@ static void tm_reclaim_thread(struct thread_struct *thr,
void tm_reclaim_current(uint8_t cause)
{
tm_enable();
- tm_reclaim_thread(&current->thread, current_thread_info(), cause);
+ tm_reclaim_thread(&current->thread, cause);
}
static inline void tm_reclaim_task(struct task_struct *tsk)
@@ -945,7 +945,7 @@ static inline void tm_reclaim_task(struct task_struct *tsk)
thr->regs->ccr, thr->regs->msr,
thr->regs->trap);
- tm_reclaim_thread(thr, task_thread_info(tsk), TM_CAUSE_RESCHED);
+ tm_reclaim_thread(thr, TM_CAUSE_RESCHED);
TM_DEBUG("--- tm_reclaim on pid %d complete\n",
tsk->pid);
@@ -1250,17 +1250,9 @@ struct task_struct *__switch_to(struct task_struct *prev,
* mappings. If the new process has the foreign real address
* mappings, we must issue a cp_abort to clear any state and
* prevent snooping, corruption or a covert channel.
- *
- * DD1 allows paste into normal system memory so we do an
- * unpaired copy, rather than cp_abort, to clear the buffer,
- * since cp_abort is quite expensive.
*/
- if (current_thread_info()->task->thread.used_vas) {
+ if (current_thread_info()->task->thread.used_vas)
asm volatile(PPC_CP_ABORT);
- } else if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
- asm volatile(PPC_COPY(%0, %1)
- : : "r"(dummy_copy_buffer), "r"(0));
- }
}
#endif /* CONFIG_PPC_BOOK3S_64 */
@@ -1307,6 +1299,38 @@ static void show_instructions(struct pt_regs *regs)
pr_cont("\n");
}
+void show_user_instructions(struct pt_regs *regs)
+{
+ unsigned long pc;
+ int i;
+
+ pc = regs->nip - (instructions_to_print * 3 / 4 * sizeof(int));
+
+ pr_info("%s[%d]: code: ", current->comm, current->pid);
+
+ for (i = 0; i < instructions_to_print; i++) {
+ int instr;
+
+ if (!(i % 8) && (i > 0)) {
+ pr_cont("\n");
+ pr_info("%s[%d]: code: ", current->comm, current->pid);
+ }
+
+ if (probe_kernel_address((unsigned int __user *)pc, instr)) {
+ pr_cont("XXXXXXXX ");
+ } else {
+ if (regs->nip == pc)
+ pr_cont("<%08x> ", instr);
+ else
+ pr_cont("%08x ", instr);
+ }
+
+ pc += sizeof(int);
+ }
+
+ pr_cont("\n");
+}
+
struct regbit {
unsigned long bit;
const char *name;
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 05e7fb47a7a4..c4d7078e5295 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -23,7 +23,6 @@
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/pci.h>
-#include <linux/stringify.h>
#include <linux/delay.h>
#include <linux/initrd.h>
#include <linux/bitops.h>
@@ -440,6 +439,29 @@ static int __init early_init_dt_scan_chosen_ppc(unsigned long node,
return 1;
}
+/*
+ * Compare the range against max mem limit and update
+ * size if it cross the limit.
+ */
+
+#ifdef CONFIG_SPARSEMEM
+static bool validate_mem_limit(u64 base, u64 *size)
+{
+ u64 max_mem = 1UL << (MAX_PHYSMEM_BITS);
+
+ if (base >= max_mem)
+ return false;
+ if ((base + *size) > max_mem)
+ *size = max_mem - base;
+ return true;
+}
+#else
+static bool validate_mem_limit(u64 base, u64 *size)
+{
+ return true;
+}
+#endif
+
#ifdef CONFIG_PPC_PSERIES
/*
* Interpret the ibm dynamic reconfiguration memory LMBs.
@@ -494,7 +516,8 @@ static void __init early_init_drmem_lmb(struct drmem_lmb *lmb,
}
DBG("Adding: %llx -> %llx\n", base, size);
- memblock_add(base, size);
+ if (validate_mem_limit(base, &size))
+ memblock_add(base, size);
} while (--rngs);
}
#endif /* CONFIG_PPC_PSERIES */
@@ -548,8 +571,10 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
}
/* Add the chunk to the MEMBLOCK list */
- if (add_mem_to_memblock)
- memblock_add(base, size);
+ if (add_mem_to_memblock) {
+ if (validate_mem_limit(base, &size))
+ memblock_add(base, size);
+ }
}
static void __init early_reserve_mem_dt(void)
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 5425dd3d6a9f..9b38a2e5dd35 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -27,7 +27,6 @@
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
-#include <linux/stringify.h>
#include <linux/delay.h>
#include <linux/initrd.h>
#include <linux/bitops.h>
@@ -629,7 +628,7 @@ static void __init early_cmdline_parse(void)
const char *opt;
char *p;
- int l = 0;
+ int l __maybe_unused = 0;
prom_cmd_line[0] = 0;
p = prom_cmd_line;
@@ -1422,7 +1421,10 @@ static void __init reserve_mem(u64 base, u64 size)
static void __init prom_init_mem(void)
{
phandle node;
- char *path, type[64];
+#ifdef DEBUG_PROM
+ char *path;
+#endif
+ char type[64];
unsigned int plen;
cell_t *p, *endp;
__be32 val;
@@ -1443,7 +1445,9 @@ static void __init prom_init_mem(void)
prom_debug("root_size_cells: %x\n", rsc);
prom_debug("scanning memory:\n");
+#ifdef DEBUG_PROM
path = prom_scratch;
+#endif
for (node = 0; prom_next_node(&node); ) {
type[0] = 0;
@@ -2102,8 +2106,6 @@ static void __init prom_init_stdout(void)
stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout);
if (stdout_node != PROM_ERROR) {
val = cpu_to_be32(stdout_node);
- prom_setprop(prom.chosen, "/chosen", "linux,stdout-package",
- &val, sizeof(val));
/* If it's a display, note it */
memset(type, 0, sizeof(type));
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
index a8b277362931..f6f469fc4073 100644
--- a/arch/powerpc/kernel/security.c
+++ b/arch/powerpc/kernel/security.c
@@ -8,6 +8,8 @@
#include <linux/device.h>
#include <linux/seq_buf.h>
+#include <asm/asm-prototypes.h>
+#include <asm/code-patching.h>
#include <asm/debugfs.h>
#include <asm/security_features.h>
#include <asm/setup.h>
@@ -15,7 +17,15 @@
unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
+enum count_cache_flush_type {
+ COUNT_CACHE_FLUSH_NONE = 0x1,
+ COUNT_CACHE_FLUSH_SW = 0x2,
+ COUNT_CACHE_FLUSH_HW = 0x4,
+};
+static enum count_cache_flush_type count_cache_flush_type;
+
bool barrier_nospec_enabled;
+static bool no_nospec;
static void enable_barrier_nospec(bool enable)
{
@@ -42,8 +52,17 @@ void setup_barrier_nospec(void)
enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR);
- enable_barrier_nospec(enable);
+ if (!no_nospec)
+ enable_barrier_nospec(enable);
+}
+
+static int __init handle_nospectre_v1(char *p)
+{
+ no_nospec = true;
+
+ return 0;
}
+early_param("nospectre_v1", handle_nospectre_v1);
#ifdef CONFIG_DEBUG_FS
static int barrier_nospec_set(void *data, u64 val)
@@ -82,6 +101,7 @@ static __init int barrier_nospec_debugfs_init(void)
device_initcall(barrier_nospec_debugfs_init);
#endif /* CONFIG_DEBUG_FS */
+#ifdef CONFIG_PPC_BOOK3S_64
ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
{
bool thread_priv;
@@ -114,51 +134,72 @@ ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, cha
return sprintf(buf, "Vulnerable\n");
}
+#endif
ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
{
- if (!security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR))
- return sprintf(buf, "Not affected\n");
+ struct seq_buf s;
- if (barrier_nospec_enabled)
- return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+ seq_buf_init(&s, buf, PAGE_SIZE - 1);
- return sprintf(buf, "Vulnerable\n");
+ if (security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) {
+ if (barrier_nospec_enabled)
+ seq_buf_printf(&s, "Mitigation: __user pointer sanitization");
+ else
+ seq_buf_printf(&s, "Vulnerable");
+
+ if (security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31))
+ seq_buf_printf(&s, ", ori31 speculation barrier enabled");
+
+ seq_buf_printf(&s, "\n");
+ } else
+ seq_buf_printf(&s, "Not affected\n");
+
+ return s.len;
}
ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
{
- bool bcs, ccd, ori;
struct seq_buf s;
+ bool bcs, ccd;
seq_buf_init(&s, buf, PAGE_SIZE - 1);
bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
- ori = security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31);
- if (bcs || ccd) {
+ if (bcs || ccd || count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
+ bool comma = false;
seq_buf_printf(&s, "Mitigation: ");
- if (bcs)
+ if (bcs) {
seq_buf_printf(&s, "Indirect branch serialisation (kernel only)");
+ comma = true;
+ }
+
+ if (ccd) {
+ if (comma)
+ seq_buf_printf(&s, ", ");
+ seq_buf_printf(&s, "Indirect branch cache disabled");
+ comma = true;
+ }
- if (bcs && ccd)
+ if (comma)
seq_buf_printf(&s, ", ");
- if (ccd)
- seq_buf_printf(&s, "Indirect branch cache disabled");
+ seq_buf_printf(&s, "Software count cache flush");
+
+ if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW)
+ seq_buf_printf(&s, "(hardware accelerated)");
} else
seq_buf_printf(&s, "Vulnerable");
- if (ori)
- seq_buf_printf(&s, ", ori31 speculation barrier enabled");
-
seq_buf_printf(&s, "\n");
return s.len;
}
+#ifdef CONFIG_PPC_BOOK3S_64
/*
* Store-forwarding barrier support.
*/
@@ -306,3 +347,71 @@ static __init int stf_barrier_debugfs_init(void)
}
device_initcall(stf_barrier_debugfs_init);
#endif /* CONFIG_DEBUG_FS */
+
+static void toggle_count_cache_flush(bool enable)
+{
+ if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
+ patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP);
+ count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
+ pr_info("count-cache-flush: software flush disabled.\n");
+ return;
+ }
+
+ patch_branch_site(&patch__call_flush_count_cache,
+ (u64)&flush_count_cache, BRANCH_SET_LINK);
+
+ if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) {
+ count_cache_flush_type = COUNT_CACHE_FLUSH_SW;
+ pr_info("count-cache-flush: full software flush sequence enabled.\n");
+ return;
+ }
+
+ patch_instruction_site(&patch__flush_count_cache_return, PPC_INST_BLR);
+ count_cache_flush_type = COUNT_CACHE_FLUSH_HW;
+ pr_info("count-cache-flush: hardware assisted flush sequence enabled\n");
+}
+
+void setup_count_cache_flush(void)
+{
+ toggle_count_cache_flush(true);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int count_cache_flush_set(void *data, u64 val)
+{
+ bool enable;
+
+ if (val == 1)
+ enable = true;
+ else if (val == 0)
+ enable = false;
+ else
+ return -EINVAL;
+
+ toggle_count_cache_flush(enable);
+
+ return 0;
+}
+
+static int count_cache_flush_get(void *data, u64 *val)
+{
+ if (count_cache_flush_type == COUNT_CACHE_FLUSH_NONE)
+ *val = 0;
+ else
+ *val = 1;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_count_cache_flush, count_cache_flush_get,
+ count_cache_flush_set, "%llu\n");
+
+static __init int count_cache_flush_debugfs_init(void)
+{
+ debugfs_create_file("count_cache_flush", 0600, powerpc_debugfs_root,
+ NULL, &fops_count_cache_flush);
+ return 0;
+}
+device_initcall(count_cache_flush_debugfs_init);
+#endif /* CONFIG_DEBUG_FS */
+#endif /* CONFIG_PPC_BOOK3S_64 */
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 40b44bb53a4e..93fa0c99681e 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -972,6 +972,8 @@ void __init setup_arch(char **cmdline_p)
if (ppc_md.setup_arch)
ppc_md.setup_arch();
+ setup_barrier_nospec();
+
paging_init();
/* Initialize the MMU context management stuff. */
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 74457485574b..8c507be12c3c 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -40,6 +40,10 @@
#include <asm/code-patching.h>
#include <asm/cpu_has_feature.h>
#include <asm/asm-prototypes.h>
+#include <asm/kdump.h>
+#include <asm/feature-fixups.h>
+
+#include "setup.h"
#define DBG(fmt...)
@@ -95,11 +99,10 @@ notrace unsigned long __init early_init(unsigned long dt_ptr)
* We do the initial parsing of the flat device-tree and prepares
* for the MMU to be fully initialized.
*/
-extern unsigned int memset_nocache_branch; /* Insn to be replaced by NOP */
-
notrace void __init machine_init(u64 dt_ptr)
{
- unsigned int *addr = &memset_nocache_branch;
+ unsigned int *addr = (unsigned int *)((unsigned long)&patch__memset_nocache +
+ patch__memset_nocache);
unsigned long insn;
/* Configure static keys first, now that we're relocated. */
@@ -108,7 +111,7 @@ notrace void __init machine_init(u64 dt_ptr)
/* Enable early debugging if any specified (see udbg.h) */
udbg_early_init();
- patch_instruction((unsigned int *)&memcpy, PPC_INST_NOP);
+ patch_instruction_site(&patch__memcpy_nocache, PPC_INST_NOP);
insn = create_cond_branch(addr, branch_target(addr), 0x820000);
patch_instruction(addr, insn); /* replace b by bne cr0 */
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 225bc5f91049..6a501b25dd85 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -68,6 +68,7 @@
#include <asm/opal.h>
#include <asm/cputhreads.h>
#include <asm/hw_irq.h>
+#include <asm/feature-fixups.h>
#include "setup.h"
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 4794d6b4f4d2..b19d832ef386 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -423,7 +423,8 @@ int smp_handle_nmi_ipi(struct pt_regs *regs)
fn(regs);
nmi_ipi_lock();
- nmi_ipi_busy_count--;
+ if (nmi_ipi_busy_count > 1) /* Can race with caller time-out */
+ nmi_ipi_busy_count--;
out:
nmi_ipi_unlock_end(&flags);
@@ -448,29 +449,11 @@ static void do_smp_send_nmi_ipi(int cpu, bool safe)
}
}
-void smp_flush_nmi_ipi(u64 delay_us)
-{
- unsigned long flags;
-
- nmi_ipi_lock_start(&flags);
- while (nmi_ipi_busy_count) {
- nmi_ipi_unlock_end(&flags);
- udelay(1);
- if (delay_us) {
- delay_us--;
- if (!delay_us)
- return;
- }
- nmi_ipi_lock_start(&flags);
- }
- nmi_ipi_unlock_end(&flags);
-}
-
/*
* - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
* - fn is the target callback function.
* - delay_us > 0 is the delay before giving up waiting for targets to
- * enter the handler, == 0 specifies indefinite delay.
+ * complete executing the handler, == 0 specifies indefinite delay.
*/
int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool safe)
{
@@ -507,8 +490,23 @@ int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool
do_smp_send_nmi_ipi(cpu, safe);
+ nmi_ipi_lock();
+ /* nmi_ipi_busy_count is held here, so unlock/lock is okay */
while (!cpumask_empty(&nmi_ipi_pending_mask)) {
+ nmi_ipi_unlock();
udelay(1);
+ nmi_ipi_lock();
+ if (delay_us) {
+ delay_us--;
+ if (!delay_us)
+ break;
+ }
+ }
+
+ while (nmi_ipi_busy_count > 1) {
+ nmi_ipi_unlock();
+ udelay(1);
+ nmi_ipi_lock();
if (delay_us) {
delay_us--;
if (!delay_us)
@@ -516,12 +514,17 @@ int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool
}
}
- nmi_ipi_lock();
if (!cpumask_empty(&nmi_ipi_pending_mask)) {
- /* Could not gather all CPUs */
+ /* Timeout waiting for CPUs to call smp_handle_nmi_ipi */
ret = 0;
cpumask_clear(&nmi_ipi_pending_mask);
}
+ if (nmi_ipi_busy_count > 1) {
+ /* Timeout waiting for CPUs to execute fn */
+ ret = 0;
+ nmi_ipi_busy_count = 1;
+ }
+
nmi_ipi_busy_count--;
nmi_ipi_unlock_end(&flags);
@@ -597,7 +600,8 @@ static void nmi_stop_this_cpu(struct pt_regs *regs)
* IRQs are already hard disabled by the smp_handle_nmi_ipi.
*/
nmi_ipi_lock();
- nmi_ipi_busy_count--;
+ if (nmi_ipi_busy_count > 1)
+ nmi_ipi_busy_count--;
nmi_ipi_unlock();
spin_begin();
diff --git a/arch/powerpc/kernel/swsusp_32.S b/arch/powerpc/kernel/swsusp_32.S
index 34b73a262709..7a919e9a3400 100644
--- a/arch/powerpc/kernel/swsusp_32.S
+++ b/arch/powerpc/kernel/swsusp_32.S
@@ -7,6 +7,7 @@
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/mmu.h>
+#include <asm/feature-fixups.h>
/*
* Structure for storing CPU registers on the save area.
diff --git a/arch/powerpc/kernel/swsusp_asm64.S b/arch/powerpc/kernel/swsusp_asm64.S
index 82d8aae81c6a..f83bf6f72cb0 100644
--- a/arch/powerpc/kernel/swsusp_asm64.S
+++ b/arch/powerpc/kernel/swsusp_asm64.S
@@ -13,6 +13,7 @@
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
+#include <asm/feature-fixups.h>
/*
* Structure for storing CPU registers on the save area.
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index ff12f47a96b6..6bffbc5affe7 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -13,6 +13,7 @@
#include <asm/reg.h>
#include <asm/bug.h>
#include <asm/export.h>
+#include <asm/feature-fixups.h>
#ifdef CONFIG_VSX
/* See fpu.S, this is borrowed from there */
@@ -312,8 +313,8 @@ _GLOBAL(tm_reclaim)
blr
- /* void __tm_recheckpoint(struct thread_struct *thread,
- * unsigned long orig_msr)
+ /*
+ * void __tm_recheckpoint(struct thread_struct *thread)
* - Restore the checkpointed register state saved by tm_reclaim
* when we switch_to a process.
*
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 0e17dcb48720..070e96f1773a 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -70,6 +70,7 @@
#include <asm/hmi.h>
#include <sysdev/fsl_pci.h>
#include <asm/kprobes.h>
+#include <asm/stacktrace.h>
#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
int (*__debugger)(struct pt_regs *regs) __read_mostly;
@@ -96,6 +97,19 @@ EXPORT_SYMBOL(__debugger_fault_handler);
#define TM_DEBUG(x...) do { } while(0)
#endif
+static const char *signame(int signr)
+{
+ switch (signr) {
+ case SIGBUS: return "bus error";
+ case SIGFPE: return "floating point exception";
+ case SIGILL: return "illegal instruction";
+ case SIGSEGV: return "segfault";
+ case SIGTRAP: return "unhandled trap";
+ }
+
+ return "unknown signal";
+}
+
/*
* Trap & Exception support
*/
@@ -301,26 +315,44 @@ void user_single_step_siginfo(struct task_struct *tsk,
info->si_addr = (void __user *)regs->nip;
}
+static bool show_unhandled_signals_ratelimited(void)
+{
+ static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+ return show_unhandled_signals && __ratelimit(&rs);
+}
+
+static void show_signal_msg(int signr, struct pt_regs *regs, int code,
+ unsigned long addr)
+{
+ if (!show_unhandled_signals_ratelimited())
+ return;
+
+ if (!unhandled_signal(current, signr))
+ return;
+
+ pr_info("%s[%d]: %s (%d) at %lx nip %lx lr %lx code %x",
+ current->comm, current->pid, signame(signr), signr,
+ addr, regs->nip, regs->link, code);
+
+ print_vma_addr(KERN_CONT " in ", regs->nip);
+
+ pr_cont("\n");
+
+ show_user_instructions(regs);
+}
void _exception_pkey(int signr, struct pt_regs *regs, int code,
- unsigned long addr, int key)
+ unsigned long addr, int key)
{
siginfo_t info;
- const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \
- "at %08lx nip %08lx lr %08lx code %x\n";
- const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \
- "at %016lx nip %016lx lr %016lx code %x\n";
if (!user_mode(regs)) {
die("Exception in kernel mode", regs, signr);
return;
}
- if (show_unhandled_signals && unhandled_signal(current, signr)) {
- printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
- current->comm, current->pid, signr,
- addr, regs->nip, regs->link, code);
- }
+ show_signal_msg(signr, regs, code, addr);
if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))
local_irq_enable();
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index d2205b97628c..65b3bdb99f0b 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -22,7 +22,6 @@
#include <linux/security.h>
#include <linux/memblock.h>
-#include <asm/cpu_has_feature.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/mmu.h>
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index f314fd475491..21165da0052d 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -8,6 +8,7 @@
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/export.h>
+#include <asm/asm-compat.h>
/*
* Load state from memory into VMX registers including VSCR.
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 5baac79df97e..07ae018e550e 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -153,14 +153,16 @@ SECTIONS
*(__rfi_flush_fixup)
__stop___rfi_flush_fixup = .;
}
+#endif /* CONFIG_PPC64 */
+#ifdef CONFIG_PPC_BARRIER_NOSPEC
. = ALIGN(8);
__spec_barrier_fixup : AT(ADDR(__spec_barrier_fixup) - LOAD_OFFSET) {
__start___barrier_nospec_fixup = .;
*(__barrier_nospec_fixup)
__stop___barrier_nospec_fixup = .;
}
-#endif
+#endif /* CONFIG_PPC_BARRIER_NOSPEC */
EXCEPTION_TABLE(0)
diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
index 1d82274f7e9f..3c6ab22a0c4e 100644
--- a/arch/powerpc/kernel/watchdog.c
+++ b/arch/powerpc/kernel/watchdog.c
@@ -174,7 +174,6 @@ static void watchdog_smp_panic(int cpu, u64 tb)
continue;
smp_send_nmi_ipi(c, wd_lockup_ipi, 1000000);
}
- smp_flush_nmi_ipi(1000000);
}
/* Take the stuck CPUs out of the watch group */
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index edaf4720d156..87348e498c89 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -28,7 +28,6 @@
#include <asm/reg.h>
#include <asm/cputable.h>
#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/kvm_ppc.h>
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index 45c8ea4a0487..612169988a3d 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -23,7 +23,6 @@
#include <linux/kvm_host.h>
#include <linux/highmem.h>
-#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index cf9d686e8162..c92dd25bed23 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -23,7 +23,6 @@
#include <linux/kvm_host.h>
#include <linux/highmem.h>
-#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/book3s/64/mmu-hash.h>
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 7f3a8cf5d66f..3c0e8fb2b773 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -29,7 +29,6 @@
#include <linux/file.h>
#include <linux/debugfs.h>
-#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/book3s/64/mmu-hash.h>
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 176f911ee983..0af1c0aea1fe 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -66,10 +66,7 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
bits = root & RPDS_MASK;
root = root & RPDB_MASK;
- /* P9 DD1 interprets RTS (radix tree size) differently */
offset = rts + 31;
- if (cpu_has_feature(CPU_FTR_POWER9_DD1))
- offset -= 3;
/* current implementations only support 52-bit space */
if (offset != 52)
@@ -160,17 +157,7 @@ static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
unsigned long clr, unsigned long set,
unsigned long addr, unsigned int shift)
{
- unsigned long old = 0;
-
- if (!(clr & _PAGE_PRESENT) && cpu_has_feature(CPU_FTR_POWER9_DD1) &&
- pte_present(*ptep)) {
- /* have to invalidate it first */
- old = __radix_pte_update(ptep, _PAGE_PRESENT, 0);
- kvmppc_radix_tlbie_page(kvm, addr, shift);
- set |= _PAGE_PRESENT;
- old &= _PAGE_PRESENT;
- }
- return __radix_pte_update(ptep, clr, set) | old;
+ return __radix_pte_update(ptep, clr, set);
}
void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr,
diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S
index 688722acd692..066c665dc86f 100644
--- a/arch/powerpc/kvm/book3s_64_slb.S
+++ b/arch/powerpc/kvm/book3s_64_slb.S
@@ -17,6 +17,9 @@
* Authors: Alexander Graf <agraf@suse.de>
*/
+#include <asm/asm-compat.h>
+#include <asm/feature-fixups.h>
+
#define SHADOW_SLB_ENTRY_LEN 0x10
#define OFFSET_ESID(x) (SHADOW_SLB_ENTRY_LEN * x)
#define OFFSET_VSID(x) ((SHADOW_SLB_ENTRY_LEN * x) + 8)
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 8c456fa691a5..e8afdd4f4814 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -31,7 +31,6 @@
#include <linux/iommu.h>
#include <linux/file.h>
-#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/book3s/64/mmu-hash.h>
@@ -378,19 +377,19 @@ static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
{
struct mm_iommu_table_group_mem_t *mem = NULL;
const unsigned long pgsize = 1ULL << tbl->it_page_shift;
- unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
+ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
if (!pua)
/* it_userspace allocation might be delayed */
return H_TOO_HARD;
- mem = mm_iommu_lookup(kvm->mm, *pua, pgsize);
+ mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize);
if (!mem)
return H_TOO_HARD;
mm_iommu_mapped_dec(mem);
- *pua = 0;
+ *pua = cpu_to_be64(0);
return H_SUCCESS;
}
@@ -437,7 +436,8 @@ long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
enum dma_data_direction dir)
{
long ret;
- unsigned long hpa, *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
+ unsigned long hpa;
+ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
struct mm_iommu_table_group_mem_t *mem;
if (!pua)
@@ -464,7 +464,7 @@ long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
if (dir != DMA_NONE)
kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
- *pua = ua;
+ *pua = cpu_to_be64(ua);
return 0;
}
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index 5b298f5a1a14..506a4d400458 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -26,8 +26,8 @@
#include <linux/slab.h>
#include <linux/hugetlb.h>
#include <linux/list.h>
+#include <linux/stringify.h>
-#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/book3s/64/mmu-hash.h>
@@ -200,23 +200,19 @@ static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
{
struct mm_iommu_table_group_mem_t *mem = NULL;
const unsigned long pgsize = 1ULL << tbl->it_page_shift;
- unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
+ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry);
if (!pua)
/* it_userspace allocation might be delayed */
return H_TOO_HARD;
- pua = (void *) vmalloc_to_phys(pua);
- if (WARN_ON_ONCE_RM(!pua))
- return H_HARDWARE;
-
- mem = mm_iommu_lookup_rm(kvm->mm, *pua, pgsize);
+ mem = mm_iommu_lookup_rm(kvm->mm, be64_to_cpu(*pua), pgsize);
if (!mem)
return H_TOO_HARD;
mm_iommu_mapped_dec(mem);
- *pua = 0;
+ *pua = cpu_to_be64(0);
return H_SUCCESS;
}
@@ -268,7 +264,7 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
{
long ret;
unsigned long hpa = 0;
- unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
+ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry);
struct mm_iommu_table_group_mem_t *mem;
if (!pua)
@@ -283,10 +279,6 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
&hpa)))
return H_HARDWARE;
- pua = (void *) vmalloc_to_phys(pua);
- if (WARN_ON_ONCE_RM(!pua))
- return H_HARDWARE;
-
if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
return H_CLOSED;
@@ -303,7 +295,7 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
if (dir != DMA_NONE)
kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
- *pua = ua;
+ *pua = cpu_to_be64(ua);
return 0;
}
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index ee4a8854985e..96f1cc6c97b7 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -53,7 +53,6 @@
#include <asm/disassemble.h>
#include <asm/cputable.h>
#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/kvm_ppc.h>
@@ -1693,14 +1692,6 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
break;
case KVM_REG_PPC_TB_OFFSET:
- /*
- * POWER9 DD1 has an erratum where writing TBU40 causes
- * the timebase to lose ticks. So we don't let the
- * timebase offset be changed on P9 DD1. (It is
- * initialized to zero.)
- */
- if (cpu_has_feature(CPU_FTR_POWER9_DD1))
- break;
/* round up to multiple of 2^24 */
vcpu->arch.vcore->tb_offset =
ALIGN(set_reg_val(id, *val), 1UL << 24);
@@ -2026,8 +2017,6 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
/*
* Set the default HFSCR for the guest from the host value.
* This value is only used on POWER9.
- * On POWER9 DD1, TM doesn't work, so we make sure to
- * prevent the guest from using it.
* On POWER9, we want to virtualize the doorbell facility, so we
* turn off the HFSCR bit, which causes those instructions to trap.
*/
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 82f2ff9410b6..666b91c79eb4 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -27,6 +27,8 @@
#include <asm/asm-offsets.h>
#include <asm/exception-64s.h>
#include <asm/ppc-opcode.h>
+#include <asm/asm-compat.h>
+#include <asm/feature-fixups.h>
/*****************************************************************************
* *
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 1f22d9e977d4..a67cf1cdeda4 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/log2.h>
-#include <asm/tlbflush.h>
#include <asm/trace.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 153988d878e8..1d14046124a0 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -32,6 +32,8 @@
#include <asm/opal.h>
#include <asm/xive-regs.h>
#include <asm/thread_info.h>
+#include <asm/asm-compat.h>
+#include <asm/feature-fixups.h>
/* Sign-extend HDEC if not on POWER9 */
#define EXTEND_HDEC(reg) \
@@ -917,9 +919,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_DAWR)
mtspr SPRN_PID, r7
mtspr SPRN_WORT, r8
BEGIN_FTR_SECTION
- PPC_INVALIDATE_ERAT
-END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
-BEGIN_FTR_SECTION
/* POWER8-only registers */
ld r5, VCPU_TCSCR(r4)
ld r6, VCPU_ACOP(r4)
@@ -1912,7 +1911,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
ld r5, VCPU_KVM(r9)
lbz r0, KVM_RADIX(r5)
cmpwi cr2, r0, 0
- beq cr2, 4f
+ beq cr2, 2f
/*
* Radix: do eieio; tlbsync; ptesync sequence in case we
@@ -1952,11 +1951,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
bdnz 1b
ptesync
-2: /* Flush the ERAT on radix P9 DD1 guest exit */
-BEGIN_FTR_SECTION
- PPC_INVALIDATE_ERAT
-END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
-4:
+2:
#endif /* CONFIG_PPC_RADIX_MMU */
/*
@@ -3367,11 +3362,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
mtspr SPRN_CIABR, r0
mtspr SPRN_DAWRX, r0
- /* Flush the ERAT on radix P9 DD1 guest exit */
-BEGIN_FTR_SECTION
- PPC_INVALIDATE_ERAT
-END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
-
BEGIN_MMU_FTR_SECTION
b 4f
END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index c18e845019ec..d71dab16dc6f 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -23,6 +23,7 @@
#include <asm/page.h>
#include <asm/asm-offsets.h>
#include <asm/exception-64s.h>
+#include <asm/asm-compat.h>
#if defined(CONFIG_PPC_BOOK3S_64)
#ifdef PPC64_ELF_ABI_v2
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index c3b8006f0eac..47ee43bbd696 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -27,7 +27,6 @@
#include <asm/reg.h>
#include <asm/cputable.h>
#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/kvm_ppc.h>
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 34a5adeff084..b0089e04c8c8 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -23,6 +23,7 @@
#include <asm/mmu.h>
#include <asm/page.h>
#include <asm/asm-offsets.h>
+#include <asm/asm-compat.h>
#ifdef CONFIG_PPC_BOOK3S_64
#include <asm/exception-64s.h>
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 98ccc7ec5d48..e5c542a7c5ac 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -19,6 +19,9 @@
/* Real mode helpers */
+#include <asm/asm-compat.h>
+#include <asm/feature-fixups.h>
+
#if defined(CONFIG_PPC_BOOK3S_64)
#define GET_SHADOW_VCPU(reg) \
diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c
index 6e41ba7ec8f4..4171ede8722b 100644
--- a/arch/powerpc/kvm/book3s_xive_template.c
+++ b/arch/powerpc/kvm/book3s_xive_template.c
@@ -25,18 +25,6 @@ static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc)
*/
eieio();
- /*
- * DD1 bug workaround: If PIPR is less favored than CPPR
- * ignore the interrupt or we might incorrectly lose an IPB
- * bit.
- */
- if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
- __be64 qw1 = __x_readq(__x_tima + TM_QW1_OS);
- u8 pipr = be64_to_cpu(qw1) & 0xff;
- if (pipr >= xc->hw_cppr)
- return;
- }
-
/* Perform the acknowledge OS to register cycle. */
ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG));
@@ -89,8 +77,15 @@ static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
/* If the XIVE supports the new "store EOI facility, use it */
if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
__x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI);
- else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
+ else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW)
opal_int_eoi(hw_irq);
+ else if (xd->flags & XIVE_IRQ_FLAG_LSI) {
+ /*
+ * For LSIs the HW EOI cycle is used rather than PQ bits,
+ * as they are automatically re-triggred in HW when still
+ * pending.
+ */
+ __x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI);
} else {
uint64_t eoi_val;
@@ -102,20 +97,12 @@ static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
*
* This allows us to then do a re-trigger if Q was set
* rather than synthetizing an interrupt in software
- *
- * For LSIs, using the HW EOI cycle works around a problem
- * on P9 DD1 PHBs where the other ESB accesses don't work
- * properly.
*/
- if (xd->flags & XIVE_IRQ_FLAG_LSI)
- __x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI);
- else {
- eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00);
-
- /* Re-trigger if needed */
- if ((eoi_val & 1) && __x_trig_page(xd))
- __x_writeq(0, __x_trig_page(xd));
- }
+ eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00);
+
+ /* Re-trigger if needed */
+ if ((eoi_val & 1) && __x_trig_page(xd))
+ __x_writeq(0, __x_trig_page(xd));
}
}
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index f9f6468f4171..afd3c255a427 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -21,7 +21,6 @@
#include <asm/reg.h>
#include <asm/cputable.h>
-#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
#include "../mm/mmu_decl.h"
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index d0b6b5788afc..d31645491a93 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -21,7 +21,6 @@
#include <asm/reg.h>
#include <asm/cputable.h>
-#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
#include <asm/dbell.h>
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 0e8c20c5eaac..3ccc386b380d 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -33,7 +33,6 @@
#include <asm/cputable.h>
#include <linux/uaccess.h>
#include <asm/kvm_ppc.h>
-#include <asm/tlbflush.h>
#include <asm/cputhreads.h>
#include <asm/irqflags.h>
#include <asm/iommu.h>
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index d0ca13ad8231..670286808928 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -12,7 +12,7 @@ CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE)
obj-y += string.o alloc.o code-patching.o feature-fixups.o
-obj-$(CONFIG_PPC32) += div64.o copy_32.o crtsavres.o
+obj-$(CONFIG_PPC32) += div64.o copy_32.o crtsavres.o strlen_32.o
# See corresponding test in arch/powerpc/Makefile
# 64-bit linker creates .sfpr on demand for final link (vmlinux),
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index e0d881ab304e..850f3b8f4da5 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -195,6 +195,22 @@ int patch_branch(unsigned int *addr, unsigned long target, int flags)
return patch_instruction(addr, create_branch(addr, target, flags));
}
+int patch_branch_site(s32 *site, unsigned long target, int flags)
+{
+ unsigned int *addr;
+
+ addr = (unsigned int *)((unsigned long)site + *site);
+ return patch_instruction(addr, create_branch(addr, target, flags));
+}
+
+int patch_instruction_site(s32 *site, unsigned int instr)
+{
+ unsigned int *addr;
+
+ addr = (unsigned int *)((unsigned long)site + *site);
+ return patch_instruction(addr, instr);
+}
+
bool is_offset_in_branch_range(long offset)
{
/*
diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S
index da425bb6b369..ba66846fe973 100644
--- a/arch/powerpc/lib/copy_32.S
+++ b/arch/powerpc/lib/copy_32.S
@@ -13,6 +13,7 @@
#include <asm/errno.h>
#include <asm/ppc_asm.h>
#include <asm/export.h>
+#include <asm/code-patching-asm.h>
#define COPY_16_BYTES \
lwz r7,4(r4); \
@@ -107,8 +108,8 @@ _GLOBAL(memset)
* Skip optimised bloc until cache is enabled. Will be replaced
* by 'bne' during boot to use normal procedure if r4 is not zero
*/
-_GLOBAL(memset_nocache_branch)
- b 2f
+5: b 2f
+ patch_site 5b, patch__memset_nocache
clrlwi r7,r6,32-LG_CACHELINE_BYTES
add r8,r7,r5
@@ -168,7 +169,9 @@ _GLOBAL(memmove)
/* fall through */
_GLOBAL(memcpy)
- b generic_memcpy
+1: b generic_memcpy
+ patch_site 1b, patch__memcpy_nocache
+
add r7,r3,r5 /* test if the src & dst overlap */
add r8,r4,r5
cmplw 0,r4,r7
diff --git a/arch/powerpc/lib/copypage_64.S b/arch/powerpc/lib/copypage_64.S
index 8d5034f645f3..694390357667 100644
--- a/arch/powerpc/lib/copypage_64.S
+++ b/arch/powerpc/lib/copypage_64.S
@@ -11,6 +11,7 @@
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/export.h>
+#include <asm/feature-fixups.h>
.section ".toc","aw"
PPC64_CACHES:
diff --git a/arch/powerpc/lib/copypage_power7.S b/arch/powerpc/lib/copypage_power7.S
index 8fa73b7ab20e..e38f956f7d9f 100644
--- a/arch/powerpc/lib/copypage_power7.S
+++ b/arch/powerpc/lib/copypage_power7.S
@@ -57,7 +57,7 @@ _GLOBAL(copypage_power7)
std r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
std r0,16(r1)
stdu r1,-STACKFRAMESIZE(r1)
- bl enter_vmx_copy
+ bl enter_vmx_ops
cmpwi r3,0
ld r0,STACKFRAMESIZE+16(r1)
ld r3,STK_REG(R31)(r1)
@@ -100,7 +100,7 @@ _GLOBAL(copypage_power7)
addi r3,r3,128
bdnz 1b
- b exit_vmx_copy /* tail call optimise */
+ b exit_vmx_ops /* tail call optimise */
#else
li r0,(PAGE_SIZE/128)
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S
index 506677395681..96c514bee66b 100644
--- a/arch/powerpc/lib/copyuser_64.S
+++ b/arch/powerpc/lib/copyuser_64.S
@@ -9,6 +9,13 @@
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/export.h>
+#include <asm/asm-compat.h>
+#include <asm/feature-fixups.h>
+
+#ifndef SELFTEST_CASE
+/* 0 == most CPUs, 1 == POWER6, 2 == Cell */
+#define SELFTEST_CASE 0
+#endif
#ifdef __BIG_ENDIAN__
#define sLd sld /* Shift towards low-numbered address. */
@@ -18,6 +25,28 @@
#define sHd sld /* Shift towards high-numbered address. */
#endif
+/*
+ * These macros are used to generate exception table entries.
+ * The exception handlers below use the original arguments
+ * (stored on the stack) and the point where we're up to in
+ * the destination buffer, i.e. the address of the first
+ * unmodified byte. Generally r3 points into the destination
+ * buffer, but the first unmodified byte is at a variable
+ * offset from r3. In the code below, the symbol r3_offset
+ * is set to indicate the current offset at each point in
+ * the code. This offset is then used as a negative offset
+ * from the exception handler code, and those instructions
+ * before the exception handlers are addi instructions that
+ * adjust r3 to point to the correct place.
+ */
+ .macro lex /* exception handler for load */
+100: EX_TABLE(100b, .Lld_exc - r3_offset)
+ .endm
+
+ .macro stex /* exception handler for store */
+100: EX_TABLE(100b, .Lst_exc - r3_offset)
+ .endm
+
.align 7
_GLOBAL_TOC(__copy_tofrom_user)
#ifdef CONFIG_PPC_BOOK3S_64
@@ -28,7 +57,7 @@ FTR_SECTION_ELSE
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY)
#endif
_GLOBAL(__copy_tofrom_user_base)
- /* first check for a whole page copy on a page boundary */
+ /* first check for a 4kB copy on a 4kB boundary */
cmpldi cr1,r5,16
cmpdi cr6,r5,4096
or r0,r3,r4
@@ -49,6 +78,7 @@ _GLOBAL(__copy_tofrom_user_base)
* At the time of writing the only CPU that has this combination of bits
* set is Power6.
*/
+test_feature = (SELFTEST_CASE == 1)
BEGIN_FTR_SECTION
nop
FTR_SECTION_ELSE
@@ -57,6 +87,8 @@ ALT_FTR_SECTION_END(CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_CP_USE_DCBTZ, \
CPU_FTR_UNALIGNED_LD_STD)
.Ldst_aligned:
addi r3,r3,-16
+r3_offset = 16
+test_feature = (SELFTEST_CASE == 0)
BEGIN_FTR_SECTION
andi. r0,r4,7
bne .Lsrc_unaligned
@@ -64,57 +96,69 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
blt cr1,.Ldo_tail /* if < 16 bytes to copy */
srdi r0,r5,5
cmpdi cr1,r0,0
-20: ld r7,0(r4)
-220: ld r6,8(r4)
+lex; ld r7,0(r4)
+lex; ld r6,8(r4)
addi r4,r4,16
mtctr r0
andi. r0,r5,0x10
beq 22f
addi r3,r3,16
+r3_offset = 0
addi r4,r4,-16
mr r9,r7
mr r8,r6
beq cr1,72f
-21: ld r7,16(r4)
-221: ld r6,24(r4)
+21:
+lex; ld r7,16(r4)
+lex; ld r6,24(r4)
addi r4,r4,32
-70: std r9,0(r3)
-270: std r8,8(r3)
-22: ld r9,0(r4)
-222: ld r8,8(r4)
-71: std r7,16(r3)
-271: std r6,24(r3)
+stex; std r9,0(r3)
+r3_offset = 8
+stex; std r8,8(r3)
+r3_offset = 16
+22:
+lex; ld r9,0(r4)
+lex; ld r8,8(r4)
+stex; std r7,16(r3)
+r3_offset = 24
+stex; std r6,24(r3)
addi r3,r3,32
+r3_offset = 0
bdnz 21b
-72: std r9,0(r3)
-272: std r8,8(r3)
+72:
+stex; std r9,0(r3)
+r3_offset = 8
+stex; std r8,8(r3)
+r3_offset = 16
andi. r5,r5,0xf
beq+ 3f
addi r4,r4,16
.Ldo_tail:
addi r3,r3,16
+r3_offset = 0
bf cr7*4+0,246f
-244: ld r9,0(r4)
+lex; ld r9,0(r4)
addi r4,r4,8
-245: std r9,0(r3)
+stex; std r9,0(r3)
addi r3,r3,8
246: bf cr7*4+1,1f
-23: lwz r9,0(r4)
+lex; lwz r9,0(r4)
addi r4,r4,4
-73: stw r9,0(r3)
+stex; stw r9,0(r3)
addi r3,r3,4
1: bf cr7*4+2,2f
-44: lhz r9,0(r4)
+lex; lhz r9,0(r4)
addi r4,r4,2
-74: sth r9,0(r3)
+stex; sth r9,0(r3)
addi r3,r3,2
2: bf cr7*4+3,3f
-45: lbz r9,0(r4)
-75: stb r9,0(r3)
+lex; lbz r9,0(r4)
+stex; stb r9,0(r3)
3: li r3,0
blr
.Lsrc_unaligned:
+r3_offset = 16
srdi r6,r5,3
addi r5,r5,-16
subf r4,r0,r4
@@ -127,58 +171,69 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
add r5,r5,r0
bt cr7*4+0,28f
-24: ld r9,0(r4) /* 3+2n loads, 2+2n stores */
-25: ld r0,8(r4)
+lex; ld r9,0(r4) /* 3+2n loads, 2+2n stores */
+lex; ld r0,8(r4)
sLd r6,r9,r10
-26: ldu r9,16(r4)
+lex; ldu r9,16(r4)
sHd r7,r0,r11
sLd r8,r0,r10
or r7,r7,r6
blt cr6,79f
-27: ld r0,8(r4)
+lex; ld r0,8(r4)
b 2f
-28: ld r0,0(r4) /* 4+2n loads, 3+2n stores */
-29: ldu r9,8(r4)
+28:
+lex; ld r0,0(r4) /* 4+2n loads, 3+2n stores */
+lex; ldu r9,8(r4)
sLd r8,r0,r10
addi r3,r3,-8
+r3_offset = 24
blt cr6,5f
-30: ld r0,8(r4)
+lex; ld r0,8(r4)
sHd r12,r9,r11
sLd r6,r9,r10
-31: ldu r9,16(r4)
+lex; ldu r9,16(r4)
or r12,r8,r12
sHd r7,r0,r11
sLd r8,r0,r10
addi r3,r3,16
+r3_offset = 8
beq cr6,78f
1: or r7,r7,r6
-32: ld r0,8(r4)
-76: std r12,8(r3)
+lex; ld r0,8(r4)
+stex; std r12,8(r3)
+r3_offset = 16
2: sHd r12,r9,r11
sLd r6,r9,r10
-33: ldu r9,16(r4)
+lex; ldu r9,16(r4)
or r12,r8,r12
-77: stdu r7,16(r3)
+stex; stdu r7,16(r3)
+r3_offset = 8
sHd r7,r0,r11
sLd r8,r0,r10
bdnz 1b
-78: std r12,8(r3)
+78:
+stex; std r12,8(r3)
+r3_offset = 16
or r7,r7,r6
-79: std r7,16(r3)
+79:
+stex; std r7,16(r3)
+r3_offset = 24
5: sHd r12,r9,r11
or r12,r8,r12
-80: std r12,24(r3)
+stex; std r12,24(r3)
+r3_offset = 32
bne 6f
li r3,0
blr
6: cmpwi cr1,r5,8
addi r3,r3,32
+r3_offset = 0
sLd r9,r9,r10
ble cr1,7f
-34: ld r0,8(r4)
+lex; ld r0,8(r4)
sHd r7,r0,r11
or r9,r7,r9
7:
@@ -186,7 +241,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
#ifdef __BIG_ENDIAN__
rotldi r9,r9,32
#endif
-94: stw r9,0(r3)
+stex; stw r9,0(r3)
#ifdef __LITTLE_ENDIAN__
rotrdi r9,r9,32
#endif
@@ -195,7 +250,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
#ifdef __BIG_ENDIAN__
rotldi r9,r9,16
#endif
-95: sth r9,0(r3)
+stex; sth r9,0(r3)
#ifdef __LITTLE_ENDIAN__
rotrdi r9,r9,16
#endif
@@ -204,7 +259,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
#ifdef __BIG_ENDIAN__
rotldi r9,r9,8
#endif
-96: stb r9,0(r3)
+stex; stb r9,0(r3)
#ifdef __LITTLE_ENDIAN__
rotrdi r9,r9,8
#endif
@@ -212,47 +267,55 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
blr
.Ldst_unaligned:
+r3_offset = 0
PPC_MTOCRF(0x01,r6) /* put #bytes to 8B bdry into cr7 */
subf r5,r6,r5
li r7,0
cmpldi cr1,r5,16
bf cr7*4+3,1f
-35: lbz r0,0(r4)
-81: stb r0,0(r3)
+100: EX_TABLE(100b, .Lld_exc_r7)
+ lbz r0,0(r4)
+100: EX_TABLE(100b, .Lst_exc_r7)
+ stb r0,0(r3)
addi r7,r7,1
1: bf cr7*4+2,2f
-36: lhzx r0,r7,r4
-82: sthx r0,r7,r3
+100: EX_TABLE(100b, .Lld_exc_r7)
+ lhzx r0,r7,r4
+100: EX_TABLE(100b, .Lst_exc_r7)
+ sthx r0,r7,r3
addi r7,r7,2
2: bf cr7*4+1,3f
-37: lwzx r0,r7,r4
-83: stwx r0,r7,r3
+100: EX_TABLE(100b, .Lld_exc_r7)
+ lwzx r0,r7,r4
+100: EX_TABLE(100b, .Lst_exc_r7)
+ stwx r0,r7,r3
3: PPC_MTOCRF(0x01,r5)
add r4,r6,r4
add r3,r6,r3
b .Ldst_aligned
.Lshort_copy:
+r3_offset = 0
bf cr7*4+0,1f
-38: lwz r0,0(r4)
-39: lwz r9,4(r4)
+lex; lwz r0,0(r4)
+lex; lwz r9,4(r4)
addi r4,r4,8
-84: stw r0,0(r3)
-85: stw r9,4(r3)
+stex; stw r0,0(r3)
+stex; stw r9,4(r3)
addi r3,r3,8
1: bf cr7*4+1,2f
-40: lwz r0,0(r4)
+lex; lwz r0,0(r4)
addi r4,r4,4
-86: stw r0,0(r3)
+stex; stw r0,0(r3)
addi r3,r3,4
2: bf cr7*4+2,3f
-41: lhz r0,0(r4)
+lex; lhz r0,0(r4)
addi r4,r4,2
-87: sth r0,0(r3)
+stex; sth r0,0(r3)
addi r3,r3,2
3: bf cr7*4+3,4f
-42: lbz r0,0(r4)
-88: stb r0,0(r3)
+lex; lbz r0,0(r4)
+stex; stb r0,0(r3)
4: li r3,0
blr
@@ -260,48 +323,34 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
* exception handlers follow
* we have to return the number of bytes not copied
* for an exception on a load, we set the rest of the destination to 0
+ * Note that the number of bytes of instructions for adjusting r3 needs
+ * to equal the amount of the adjustment, due to the trick of using
+ * .Lld_exc - r3_offset as the handler address.
*/
-136:
-137:
+.Lld_exc_r7:
add r3,r3,r7
- b 1f
-130:
-131:
+ b .Lld_exc
+
+ /* adjust by 24 */
addi r3,r3,8
-120:
-320:
-122:
-322:
-124:
-125:
-126:
-127:
-128:
-129:
-133:
+ nop
+ /* adjust by 16 */
addi r3,r3,8
-132:
+ nop
+ /* adjust by 8 */
addi r3,r3,8
-121:
-321:
-344:
-134:
-135:
-138:
-139:
-140:
-141:
-142:
-123:
-144:
-145:
+ nop
/*
- * here we have had a fault on a load and r3 points to the first
- * unmodified byte of the destination
+ * Here we have had a fault on a load and r3 points to the first
+ * unmodified byte of the destination. We use the original arguments
+ * and r3 to work out how much wasn't copied. Since we load some
+ * distance ahead of the stores, we continue copying byte-by-byte until
+ * we hit the load fault again in order to copy as much as possible.
*/
-1: ld r6,-24(r1)
+.Lld_exc:
+ ld r6,-24(r1)
ld r4,-16(r1)
ld r5,-8(r1)
subf r6,r6,r3
@@ -312,9 +361,11 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
* first see if we can copy any more bytes before hitting another exception
*/
mtctr r5
+r3_offset = 0
+100: EX_TABLE(100b, .Ldone)
43: lbz r0,0(r4)
addi r4,r4,1
-89: stb r0,0(r3)
+stex; stb r0,0(r3)
addi r3,r3,1
bdnz 43b
li r3,0 /* huh? all copied successfully this time? */
@@ -323,116 +374,63 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
/*
* here we have trapped again, amount remaining is in ctr.
*/
-143: mfctr r3
+.Ldone:
+ mfctr r3
blr
/*
- * exception handlers for stores: we just need to work
- * out how many bytes weren't copied
+ * exception handlers for stores: we need to work out how many bytes
+ * weren't copied, and we may need to copy some more.
+ * Note that the number of bytes of instructions for adjusting r3 needs
+ * to equal the amount of the adjustment, due to the trick of using
+ * .Lst_exc - r3_offset as the handler address.
*/
-182:
-183:
+.Lst_exc_r7:
add r3,r3,r7
- b 1f
-371:
-180:
+ b .Lst_exc
+
+ /* adjust by 24 */
addi r3,r3,8
-171:
-177:
-179:
+ nop
+ /* adjust by 16 */
addi r3,r3,8
-370:
-372:
-176:
-178:
+ nop
+ /* adjust by 8 */
addi r3,r3,4
-185:
+ /* adjust by 4 */
addi r3,r3,4
-170:
-172:
-345:
-173:
-174:
-175:
-181:
-184:
-186:
-187:
-188:
-189:
-194:
-195:
-196:
-1:
- ld r6,-24(r1)
- ld r5,-8(r1)
- add r6,r6,r5
- subf r3,r3,r6 /* #bytes not copied */
+.Lst_exc:
+ ld r6,-24(r1) /* original destination pointer */
+ ld r4,-16(r1) /* original source pointer */
+ ld r5,-8(r1) /* original number of bytes */
+ add r7,r6,r5
+ /*
+ * If the destination pointer isn't 8-byte aligned,
+ * we may have got the exception as a result of a
+ * store that overlapped a page boundary, so we may be
+ * able to copy a few more bytes.
+ */
+17: andi. r0,r3,7
+ beq 19f
+ subf r8,r6,r3 /* #bytes copied */
+100: EX_TABLE(100b,19f)
+ lbzx r0,r8,r4
+100: EX_TABLE(100b,19f)
+ stb r0,0(r3)
+ addi r3,r3,1
+ cmpld r3,r7
+ blt 17b
+19: subf r3,r3,r7 /* #bytes not copied in r3 */
blr
- EX_TABLE(20b,120b)
- EX_TABLE(220b,320b)
- EX_TABLE(21b,121b)
- EX_TABLE(221b,321b)
- EX_TABLE(70b,170b)
- EX_TABLE(270b,370b)
- EX_TABLE(22b,122b)
- EX_TABLE(222b,322b)
- EX_TABLE(71b,171b)
- EX_TABLE(271b,371b)
- EX_TABLE(72b,172b)
- EX_TABLE(272b,372b)
- EX_TABLE(244b,344b)
- EX_TABLE(245b,345b)
- EX_TABLE(23b,123b)
- EX_TABLE(73b,173b)
- EX_TABLE(44b,144b)
- EX_TABLE(74b,174b)
- EX_TABLE(45b,145b)
- EX_TABLE(75b,175b)
- EX_TABLE(24b,124b)
- EX_TABLE(25b,125b)
- EX_TABLE(26b,126b)
- EX_TABLE(27b,127b)
- EX_TABLE(28b,128b)
- EX_TABLE(29b,129b)
- EX_TABLE(30b,130b)
- EX_TABLE(31b,131b)
- EX_TABLE(32b,132b)
- EX_TABLE(76b,176b)
- EX_TABLE(33b,133b)
- EX_TABLE(77b,177b)
- EX_TABLE(78b,178b)
- EX_TABLE(79b,179b)
- EX_TABLE(80b,180b)
- EX_TABLE(34b,134b)
- EX_TABLE(94b,194b)
- EX_TABLE(95b,195b)
- EX_TABLE(96b,196b)
- EX_TABLE(35b,135b)
- EX_TABLE(81b,181b)
- EX_TABLE(36b,136b)
- EX_TABLE(82b,182b)
- EX_TABLE(37b,137b)
- EX_TABLE(83b,183b)
- EX_TABLE(38b,138b)
- EX_TABLE(39b,139b)
- EX_TABLE(84b,184b)
- EX_TABLE(85b,185b)
- EX_TABLE(40b,140b)
- EX_TABLE(86b,186b)
- EX_TABLE(41b,141b)
- EX_TABLE(87b,187b)
- EX_TABLE(42b,142b)
- EX_TABLE(88b,188b)
- EX_TABLE(43b,143b)
- EX_TABLE(89b,189b)
-
/*
* Routine to copy a whole page of data, optimized for POWER4.
* On POWER4 it is more than 50% faster than the simple loop
* above (following the .Ldst_aligned label).
*/
+ .macro exc
+100: EX_TABLE(100b, .Labort)
+ .endm
.Lcopy_page_4K:
std r31,-32(1)
std r30,-40(1)
@@ -451,86 +449,86 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
li r0,5
0: addi r5,r5,-24
mtctr r0
-20: ld r22,640(4)
-21: ld r21,512(4)
-22: ld r20,384(4)
-23: ld r11,256(4)
-24: ld r9,128(4)
-25: ld r7,0(4)
-26: ld r25,648(4)
-27: ld r24,520(4)
-28: ld r23,392(4)
-29: ld r10,264(4)
-30: ld r8,136(4)
-31: ldu r6,8(4)
+exc; ld r22,640(4)
+exc; ld r21,512(4)
+exc; ld r20,384(4)
+exc; ld r11,256(4)
+exc; ld r9,128(4)
+exc; ld r7,0(4)
+exc; ld r25,648(4)
+exc; ld r24,520(4)
+exc; ld r23,392(4)
+exc; ld r10,264(4)
+exc; ld r8,136(4)
+exc; ldu r6,8(4)
cmpwi r5,24
1:
-32: std r22,648(3)
-33: std r21,520(3)
-34: std r20,392(3)
-35: std r11,264(3)
-36: std r9,136(3)
-37: std r7,8(3)
-38: ld r28,648(4)
-39: ld r27,520(4)
-40: ld r26,392(4)
-41: ld r31,264(4)
-42: ld r30,136(4)
-43: ld r29,8(4)
-44: std r25,656(3)
-45: std r24,528(3)
-46: std r23,400(3)
-47: std r10,272(3)
-48: std r8,144(3)
-49: std r6,16(3)
-50: ld r22,656(4)
-51: ld r21,528(4)
-52: ld r20,400(4)
-53: ld r11,272(4)
-54: ld r9,144(4)
-55: ld r7,16(4)
-56: std r28,664(3)
-57: std r27,536(3)
-58: std r26,408(3)
-59: std r31,280(3)
-60: std r30,152(3)
-61: stdu r29,24(3)
-62: ld r25,664(4)
-63: ld r24,536(4)
-64: ld r23,408(4)
-65: ld r10,280(4)
-66: ld r8,152(4)
-67: ldu r6,24(4)
+exc; std r22,648(3)
+exc; std r21,520(3)
+exc; std r20,392(3)
+exc; std r11,264(3)
+exc; std r9,136(3)
+exc; std r7,8(3)
+exc; ld r28,648(4)
+exc; ld r27,520(4)
+exc; ld r26,392(4)
+exc; ld r31,264(4)
+exc; ld r30,136(4)
+exc; ld r29,8(4)
+exc; std r25,656(3)
+exc; std r24,528(3)
+exc; std r23,400(3)
+exc; std r10,272(3)
+exc; std r8,144(3)
+exc; std r6,16(3)
+exc; ld r22,656(4)
+exc; ld r21,528(4)
+exc; ld r20,400(4)
+exc; ld r11,272(4)
+exc; ld r9,144(4)
+exc; ld r7,16(4)
+exc; std r28,664(3)
+exc; std r27,536(3)
+exc; std r26,408(3)
+exc; std r31,280(3)
+exc; std r30,152(3)
+exc; stdu r29,24(3)
+exc; ld r25,664(4)
+exc; ld r24,536(4)
+exc; ld r23,408(4)
+exc; ld r10,280(4)
+exc; ld r8,152(4)
+exc; ldu r6,24(4)
bdnz 1b
-68: std r22,648(3)
-69: std r21,520(3)
-70: std r20,392(3)
-71: std r11,264(3)
-72: std r9,136(3)
-73: std r7,8(3)
-74: addi r4,r4,640
-75: addi r3,r3,648
+exc; std r22,648(3)
+exc; std r21,520(3)
+exc; std r20,392(3)
+exc; std r11,264(3)
+exc; std r9,136(3)
+exc; std r7,8(3)
+ addi r4,r4,640
+ addi r3,r3,648
bge 0b
mtctr r5
-76: ld r7,0(4)
-77: ld r8,8(4)
-78: ldu r9,16(4)
+exc; ld r7,0(4)
+exc; ld r8,8(4)
+exc; ldu r9,16(4)
3:
-79: ld r10,8(4)
-80: std r7,8(3)
-81: ld r7,16(4)
-82: std r8,16(3)
-83: ld r8,24(4)
-84: std r9,24(3)
-85: ldu r9,32(4)
-86: stdu r10,32(3)
+exc; ld r10,8(4)
+exc; std r7,8(3)
+exc; ld r7,16(4)
+exc; std r8,16(3)
+exc; ld r8,24(4)
+exc; std r9,24(3)
+exc; ldu r9,32(4)
+exc; stdu r10,32(3)
bdnz 3b
4:
-87: ld r10,8(4)
-88: std r7,8(3)
-89: std r8,16(3)
-90: std r9,24(3)
-91: std r10,32(3)
+exc; ld r10,8(4)
+exc; std r7,8(3)
+exc; std r8,16(3)
+exc; std r9,24(3)
+exc; std r10,32(3)
9: ld r20,-120(1)
ld r21,-112(1)
ld r22,-104(1)
@@ -550,7 +548,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
* on an exception, reset to the beginning and jump back into the
* standard __copy_tofrom_user
*/
-100: ld r20,-120(1)
+.Labort:
+ ld r20,-120(1)
ld r21,-112(1)
ld r22,-104(1)
ld r23,-96(1)
@@ -566,78 +565,4 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
ld r4,-16(r1)
li r5,4096
b .Ldst_aligned
-
- EX_TABLE(20b,100b)
- EX_TABLE(21b,100b)
- EX_TABLE(22b,100b)
- EX_TABLE(23b,100b)
- EX_TABLE(24b,100b)
- EX_TABLE(25b,100b)
- EX_TABLE(26b,100b)
- EX_TABLE(27b,100b)
- EX_TABLE(28b,100b)
- EX_TABLE(29b,100b)
- EX_TABLE(30b,100b)
- EX_TABLE(31b,100b)
- EX_TABLE(32b,100b)
- EX_TABLE(33b,100b)
- EX_TABLE(34b,100b)
- EX_TABLE(35b,100b)
- EX_TABLE(36b,100b)
- EX_TABLE(37b,100b)
- EX_TABLE(38b,100b)
- EX_TABLE(39b,100b)
- EX_TABLE(40b,100b)
- EX_TABLE(41b,100b)
- EX_TABLE(42b,100b)
- EX_TABLE(43b,100b)
- EX_TABLE(44b,100b)
- EX_TABLE(45b,100b)
- EX_TABLE(46b,100b)
- EX_TABLE(47b,100b)
- EX_TABLE(48b,100b)
- EX_TABLE(49b,100b)
- EX_TABLE(50b,100b)
- EX_TABLE(51b,100b)
- EX_TABLE(52b,100b)
- EX_TABLE(53b,100b)
- EX_TABLE(54b,100b)
- EX_TABLE(55b,100b)
- EX_TABLE(56b,100b)
- EX_TABLE(57b,100b)
- EX_TABLE(58b,100b)
- EX_TABLE(59b,100b)
- EX_TABLE(60b,100b)
- EX_TABLE(61b,100b)
- EX_TABLE(62b,100b)
- EX_TABLE(63b,100b)
- EX_TABLE(64b,100b)
- EX_TABLE(65b,100b)
- EX_TABLE(66b,100b)
- EX_TABLE(67b,100b)
- EX_TABLE(68b,100b)
- EX_TABLE(69b,100b)
- EX_TABLE(70b,100b)
- EX_TABLE(71b,100b)
- EX_TABLE(72b,100b)
- EX_TABLE(73b,100b)
- EX_TABLE(74b,100b)
- EX_TABLE(75b,100b)
- EX_TABLE(76b,100b)
- EX_TABLE(77b,100b)
- EX_TABLE(78b,100b)
- EX_TABLE(79b,100b)
- EX_TABLE(80b,100b)
- EX_TABLE(81b,100b)
- EX_TABLE(82b,100b)
- EX_TABLE(83b,100b)
- EX_TABLE(84b,100b)
- EX_TABLE(85b,100b)
- EX_TABLE(86b,100b)
- EX_TABLE(87b,100b)
- EX_TABLE(88b,100b)
- EX_TABLE(89b,100b)
- EX_TABLE(90b,100b)
- EX_TABLE(91b,100b)
-
EXPORT_SYMBOL(__copy_tofrom_user)
diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S
index 215e4760c09f..1a1fe180af62 100644
--- a/arch/powerpc/lib/copyuser_power7.S
+++ b/arch/powerpc/lib/copyuser_power7.S
@@ -19,6 +19,11 @@
*/
#include <asm/ppc_asm.h>
+#ifndef SELFTEST_CASE
+/* 0 == don't use VMX, 1 == use VMX */
+#define SELFTEST_CASE 0
+#endif
+
#ifdef __BIG_ENDIAN__
#define LVS(VRT,RA,RB) lvsl VRT,RA,RB
#define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRA,VRB,VRC
@@ -80,7 +85,6 @@
_GLOBAL(__copy_tofrom_user_power7)
-#ifdef CONFIG_ALTIVEC
cmpldi r5,16
cmpldi cr1,r5,3328
@@ -89,15 +93,12 @@ _GLOBAL(__copy_tofrom_user_power7)
std r5,-STACKFRAMESIZE+STK_REG(R29)(r1)
blt .Lshort_copy
- bge cr1,.Lvmx_copy
-#else
- cmpldi r5,16
- std r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
- std r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
- std r5,-STACKFRAMESIZE+STK_REG(R29)(r1)
-
- blt .Lshort_copy
+#ifdef CONFIG_ALTIVEC
+test_feature = SELFTEST_CASE
+BEGIN_FTR_SECTION
+ bgt cr1,.Lvmx_copy
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
.Lnonvmx_copy:
@@ -278,8 +279,8 @@ err1; stb r0,0(r3)
addi r1,r1,STACKFRAMESIZE
b .Lnonvmx_copy
-#ifdef CONFIG_ALTIVEC
.Lvmx_copy:
+#ifdef CONFIG_ALTIVEC
mflr r0
std r0,16(r1)
stdu r1,-STACKFRAMESIZE(r1)
diff --git a/arch/powerpc/lib/feature-fixups-test.S b/arch/powerpc/lib/feature-fixups-test.S
index f16cec989506..ee7c5fd5fc64 100644
--- a/arch/powerpc/lib/feature-fixups-test.S
+++ b/arch/powerpc/lib/feature-fixups-test.S
@@ -11,6 +11,7 @@
#include <asm/feature-fixups.h>
#include <asm/ppc_asm.h>
#include <asm/synch.h>
+#include <asm/asm-compat.h>
.text
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 8b69f868298c..e613b02bb2f0 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -304,6 +304,9 @@ void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_
printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
}
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
+#ifdef CONFIG_PPC_BARRIER_NOSPEC
void do_barrier_nospec_fixups(bool enable)
{
void *start, *end;
@@ -313,8 +316,38 @@ void do_barrier_nospec_fixups(bool enable)
do_barrier_nospec_fixups_range(enable, start, end);
}
+#endif /* CONFIG_PPC_BARRIER_NOSPEC */
-#endif /* CONFIG_PPC_BOOK3S_64 */
+#ifdef CONFIG_PPC_FSL_BOOK3E
+void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end)
+{
+ unsigned int instr[2], *dest;
+ long *start, *end;
+ int i;
+
+ start = fixup_start;
+ end = fixup_end;
+
+ instr[0] = PPC_INST_NOP;
+ instr[1] = PPC_INST_NOP;
+
+ if (enable) {
+ pr_info("barrier-nospec: using isync; sync as speculation barrier\n");
+ instr[0] = PPC_INST_ISYNC;
+ instr[1] = PPC_INST_SYNC;
+ }
+
+ for (i = 0; start < end; start++, i++) {
+ dest = (void *)start + *start;
+
+ pr_devel("patching dest %lx\n", (unsigned long)dest);
+ patch_instruction(dest, instr[0]);
+ patch_instruction(dest + 1, instr[1]);
+ }
+
+ printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
+}
+#endif /* CONFIG_PPC_FSL_BOOK3E */
void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
{
diff --git a/arch/powerpc/lib/hweight_64.S b/arch/powerpc/lib/hweight_64.S
index 3de7ac154f24..0526b2225260 100644
--- a/arch/powerpc/lib/hweight_64.S
+++ b/arch/powerpc/lib/hweight_64.S
@@ -20,6 +20,7 @@
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/export.h>
+#include <asm/feature-fixups.h>
/* Note: This code relies on -mminimal-toc */
diff --git a/arch/powerpc/lib/ldstfp.S b/arch/powerpc/lib/ldstfp.S
index ae15eba49c1f..32e91994b6b2 100644
--- a/arch/powerpc/lib/ldstfp.S
+++ b/arch/powerpc/lib/ldstfp.S
@@ -15,6 +15,7 @@
#include <asm/ppc-opcode.h>
#include <asm/reg.h>
#include <asm/asm-offsets.h>
+#include <asm/asm-compat.h>
#include <linux/errno.h>
#ifdef CONFIG_PPC_FPU
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index b7b1237d4aa6..35a0ef932e1a 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/export.h>
-#include <linux/stringify.h>
#include <linux/smp.h>
/* waiting for a spinlock... */
diff --git a/arch/powerpc/lib/memcmp_64.S b/arch/powerpc/lib/memcmp_64.S
index d75d18b7bd55..844d8e774492 100644
--- a/arch/powerpc/lib/memcmp_64.S
+++ b/arch/powerpc/lib/memcmp_64.S
@@ -9,6 +9,7 @@
*/
#include <asm/ppc_asm.h>
#include <asm/export.h>
+#include <asm/ppc-opcode.h>
#define off8 r6
#define off16 r7
@@ -24,28 +25,102 @@
#define rH r31
#ifdef __LITTLE_ENDIAN__
+#define LH lhbrx
+#define LW lwbrx
#define LD ldbrx
+#define LVS lvsr
+#define VPERM(_VRT,_VRA,_VRB,_VRC) \
+ vperm _VRT,_VRB,_VRA,_VRC
#else
+#define LH lhzx
+#define LW lwzx
#define LD ldx
+#define LVS lvsl
+#define VPERM(_VRT,_VRA,_VRB,_VRC) \
+ vperm _VRT,_VRA,_VRB,_VRC
#endif
-_GLOBAL(memcmp)
+#define VMX_THRESH 4096
+#define ENTER_VMX_OPS \
+ mflr r0; \
+ std r3,-STACKFRAMESIZE+STK_REG(R31)(r1); \
+ std r4,-STACKFRAMESIZE+STK_REG(R30)(r1); \
+ std r5,-STACKFRAMESIZE+STK_REG(R29)(r1); \
+ std r0,16(r1); \
+ stdu r1,-STACKFRAMESIZE(r1); \
+ bl enter_vmx_ops; \
+ cmpwi cr1,r3,0; \
+ ld r0,STACKFRAMESIZE+16(r1); \
+ ld r3,STK_REG(R31)(r1); \
+ ld r4,STK_REG(R30)(r1); \
+ ld r5,STK_REG(R29)(r1); \
+ addi r1,r1,STACKFRAMESIZE; \
+ mtlr r0
+
+#define EXIT_VMX_OPS \
+ mflr r0; \
+ std r3,-STACKFRAMESIZE+STK_REG(R31)(r1); \
+ std r4,-STACKFRAMESIZE+STK_REG(R30)(r1); \
+ std r5,-STACKFRAMESIZE+STK_REG(R29)(r1); \
+ std r0,16(r1); \
+ stdu r1,-STACKFRAMESIZE(r1); \
+ bl exit_vmx_ops; \
+ ld r0,STACKFRAMESIZE+16(r1); \
+ ld r3,STK_REG(R31)(r1); \
+ ld r4,STK_REG(R30)(r1); \
+ ld r5,STK_REG(R29)(r1); \
+ addi r1,r1,STACKFRAMESIZE; \
+ mtlr r0
+
+/*
+ * LD_VSR_CROSS16B load the 2nd 16 bytes for _vaddr which is unaligned with
+ * 16 bytes boundary and permute the result with the 1st 16 bytes.
+
+ * | y y y y y y y y y y y y y 0 1 2 | 3 4 5 6 7 8 9 a b c d e f z z z |
+ * ^ ^ ^
+ * 0xbbbb10 0xbbbb20 0xbbb30
+ * ^
+ * _vaddr
+ *
+ *
+ * _vmask is the mask generated by LVS
+ * _v1st_qw is the 1st aligned QW of current addr which is already loaded.
+ * for example: 0xyyyyyyyyyyyyy012 for big endian
+ * _v2nd_qw is the 2nd aligned QW of cur _vaddr to be loaded.
+ * for example: 0x3456789abcdefzzz for big endian
+ * The permute result is saved in _v_res.
+ * for example: 0x0123456789abcdef for big endian.
+ */
+#define LD_VSR_CROSS16B(_vaddr,_vmask,_v1st_qw,_v2nd_qw,_v_res) \
+ lvx _v2nd_qw,_vaddr,off16; \
+ VPERM(_v_res,_v1st_qw,_v2nd_qw,_vmask)
+
+/*
+ * There are 2 categories for memcmp:
+ * 1) src/dst has the same offset to the 8 bytes boundary. The handlers
+ * are named like .Lsameoffset_xxxx
+ * 2) src/dst has different offset to the 8 bytes boundary. The handlers
+ * are named like .Ldiffoffset_xxxx
+ */
+_GLOBAL_TOC(memcmp)
cmpdi cr1,r5,0
- /* Use the short loop if both strings are not 8B aligned */
- or r6,r3,r4
+ /* Use the short loop if the src/dst addresses are not
+ * with the same offset of 8 bytes align boundary.
+ */
+ xor r6,r3,r4
andi. r6,r6,7
- /* Use the short loop if length is less than 32B */
- cmpdi cr6,r5,31
+ /* Fall back to short loop if compare at aligned addrs
+ * with less than 8 bytes.
+ */
+ cmpdi cr6,r5,7
beq cr1,.Lzero
- bne .Lshort
- bgt cr6,.Llong
+ bgt cr6,.Lno_short
.Lshort:
mtctr r5
-
1: lbz rA,0(r3)
lbz rB,0(r4)
subf. rC,rB,rA
@@ -78,11 +153,98 @@ _GLOBAL(memcmp)
li r3,0
blr
+.Lno_short:
+ dcbt 0,r3
+ dcbt 0,r4
+ bne .Ldiffoffset_8bytes_make_align_start
+
+
+.Lsameoffset_8bytes_make_align_start:
+ /* attempt to compare bytes not aligned with 8 bytes so that
+ * rest comparison can run based on 8 bytes alignment.
+ */
+ andi. r6,r3,7
+
+ /* Try to compare the first double word which is not 8 bytes aligned:
+ * load the first double word at (src & ~7UL) and shift left appropriate
+ * bits before comparision.
+ */
+ rlwinm r6,r3,3,26,28
+ beq .Lsameoffset_8bytes_aligned
+ clrrdi r3,r3,3
+ clrrdi r4,r4,3
+ LD rA,0,r3
+ LD rB,0,r4
+ sld rA,rA,r6
+ sld rB,rB,r6
+ cmpld cr0,rA,rB
+ srwi r6,r6,3
+ bne cr0,.LcmpAB_lightweight
+ subfic r6,r6,8
+ subf. r5,r6,r5
+ addi r3,r3,8
+ addi r4,r4,8
+ beq .Lzero
+
+.Lsameoffset_8bytes_aligned:
+ /* now we are aligned with 8 bytes.
+ * Use .Llong loop if left cmp bytes are equal or greater than 32B.
+ */
+ cmpdi cr6,r5,31
+ bgt cr6,.Llong
+
+.Lcmp_lt32bytes:
+ /* compare 1 ~ 31 bytes, at least r3 addr is 8 bytes aligned now */
+ cmpdi cr5,r5,7
+ srdi r0,r5,3
+ ble cr5,.Lcmp_rest_lt8bytes
+
+ /* handle 8 ~ 31 bytes */
+ clrldi r5,r5,61
+ mtctr r0
+2:
+ LD rA,0,r3
+ LD rB,0,r4
+ cmpld cr0,rA,rB
+ addi r3,r3,8
+ addi r4,r4,8
+ bne cr0,.LcmpAB_lightweight
+ bdnz 2b
+
+ cmpwi r5,0
+ beq .Lzero
+
+.Lcmp_rest_lt8bytes:
+ /* Here we have only less than 8 bytes to compare with. at least s1
+ * Address is aligned with 8 bytes.
+ * The next double words are load and shift right with appropriate
+ * bits.
+ */
+ subfic r6,r5,8
+ slwi r6,r6,3
+ LD rA,0,r3
+ LD rB,0,r4
+ srd rA,rA,r6
+ srd rB,rB,r6
+ cmpld cr0,rA,rB
+ bne cr0,.LcmpAB_lightweight
+ b .Lzero
+
.Lnon_zero:
mr r3,rC
blr
.Llong:
+#ifdef CONFIG_ALTIVEC
+BEGIN_FTR_SECTION
+ /* Try to use vmx loop if length is equal or greater than 4K */
+ cmpldi cr6,r5,VMX_THRESH
+ bge cr6,.Lsameoffset_vmx_cmp
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+
+.Llong_novmx_cmp:
+#endif
+ /* At least s1 addr is aligned with 8 bytes */
li off8,8
li off16,16
li off24,24
@@ -232,4 +394,240 @@ _GLOBAL(memcmp)
ld r28,-32(r1)
ld r27,-40(r1)
blr
+
+.LcmpAB_lightweight: /* skip NV GPRS restore */
+ li r3,1
+ bgtlr
+ li r3,-1
+ blr
+
+#ifdef CONFIG_ALTIVEC
+.Lsameoffset_vmx_cmp:
+ /* Enter with src/dst addrs has the same offset with 8 bytes
+ * align boundary.
+ *
+ * There is an optimization based on following fact: memcmp()
+ * prones to fail early at the first 32 bytes.
+ * Before applying VMX instructions which will lead to 32x128bits
+ * VMX regs load/restore penalty, we compare the first 32 bytes
+ * so that we can catch the ~80% fail cases.
+ */
+
+ li r0,4
+ mtctr r0
+.Lsameoffset_prechk_32B_loop:
+ LD rA,0,r3
+ LD rB,0,r4
+ cmpld cr0,rA,rB
+ addi r3,r3,8
+ addi r4,r4,8
+ bne cr0,.LcmpAB_lightweight
+ addi r5,r5,-8
+ bdnz .Lsameoffset_prechk_32B_loop
+
+ ENTER_VMX_OPS
+ beq cr1,.Llong_novmx_cmp
+
+3:
+ /* need to check whether r4 has the same offset with r3
+ * for 16 bytes boundary.
+ */
+ xor r0,r3,r4
+ andi. r0,r0,0xf
+ bne .Ldiffoffset_vmx_cmp_start
+
+ /* len is no less than 4KB. Need to align with 16 bytes further.
+ */
+ andi. rA,r3,8
+ LD rA,0,r3
+ beq 4f
+ LD rB,0,r4
+ cmpld cr0,rA,rB
+ addi r3,r3,8
+ addi r4,r4,8
+ addi r5,r5,-8
+
+ beq cr0,4f
+ /* save and restore cr0 */
+ mfocrf r5,128
+ EXIT_VMX_OPS
+ mtocrf 128,r5
+ b .LcmpAB_lightweight
+
+4:
+ /* compare 32 bytes for each loop */
+ srdi r0,r5,5
+ mtctr r0
+ clrldi r5,r5,59
+ li off16,16
+
+.balign 16
+5:
+ lvx v0,0,r3
+ lvx v1,0,r4
+ VCMPEQUD_RC(v0,v0,v1)
+ bnl cr6,7f
+ lvx v0,off16,r3
+ lvx v1,off16,r4
+ VCMPEQUD_RC(v0,v0,v1)
+ bnl cr6,6f
+ addi r3,r3,32
+ addi r4,r4,32
+ bdnz 5b
+
+ EXIT_VMX_OPS
+ cmpdi r5,0
+ beq .Lzero
+ b .Lcmp_lt32bytes
+
+6:
+ addi r3,r3,16
+ addi r4,r4,16
+
+7:
+ /* diff the last 16 bytes */
+ EXIT_VMX_OPS
+ LD rA,0,r3
+ LD rB,0,r4
+ cmpld cr0,rA,rB
+ li off8,8
+ bne cr0,.LcmpAB_lightweight
+
+ LD rA,off8,r3
+ LD rB,off8,r4
+ cmpld cr0,rA,rB
+ bne cr0,.LcmpAB_lightweight
+ b .Lzero
+#endif
+
+.Ldiffoffset_8bytes_make_align_start:
+ /* now try to align s1 with 8 bytes */
+ rlwinm r6,r3,3,26,28
+ beq .Ldiffoffset_align_s1_8bytes
+
+ clrrdi r3,r3,3
+ LD rA,0,r3
+ LD rB,0,r4 /* unaligned load */
+ sld rA,rA,r6
+ srd rA,rA,r6
+ srd rB,rB,r6
+ cmpld cr0,rA,rB
+ srwi r6,r6,3
+ bne cr0,.LcmpAB_lightweight
+
+ subfic r6,r6,8
+ subf. r5,r6,r5
+ addi r3,r3,8
+ add r4,r4,r6
+
+ beq .Lzero
+
+.Ldiffoffset_align_s1_8bytes:
+ /* now s1 is aligned with 8 bytes. */
+#ifdef CONFIG_ALTIVEC
+BEGIN_FTR_SECTION
+ /* only do vmx ops when the size equal or greater than 4K bytes */
+ cmpdi cr5,r5,VMX_THRESH
+ bge cr5,.Ldiffoffset_vmx_cmp
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+
+.Ldiffoffset_novmx_cmp:
+#endif
+
+
+ cmpdi cr5,r5,31
+ ble cr5,.Lcmp_lt32bytes
+
+#ifdef CONFIG_ALTIVEC
+ b .Llong_novmx_cmp
+#else
+ b .Llong
+#endif
+
+#ifdef CONFIG_ALTIVEC
+.Ldiffoffset_vmx_cmp:
+ /* perform a 32 bytes pre-checking before
+ * enable VMX operations.
+ */
+ li r0,4
+ mtctr r0
+.Ldiffoffset_prechk_32B_loop:
+ LD rA,0,r3
+ LD rB,0,r4
+ cmpld cr0,rA,rB
+ addi r3,r3,8
+ addi r4,r4,8
+ bne cr0,.LcmpAB_lightweight
+ addi r5,r5,-8
+ bdnz .Ldiffoffset_prechk_32B_loop
+
+ ENTER_VMX_OPS
+ beq cr1,.Ldiffoffset_novmx_cmp
+
+.Ldiffoffset_vmx_cmp_start:
+ /* Firstly try to align r3 with 16 bytes */
+ andi. r6,r3,0xf
+ li off16,16
+ beq .Ldiffoffset_vmx_s1_16bytes_align
+
+ LVS v3,0,r3
+ LVS v4,0,r4
+
+ lvx v5,0,r3
+ lvx v6,0,r4
+ LD_VSR_CROSS16B(r3,v3,v5,v7,v9)
+ LD_VSR_CROSS16B(r4,v4,v6,v8,v10)
+
+ VCMPEQUB_RC(v7,v9,v10)
+ bnl cr6,.Ldiffoffset_vmx_diff_found
+
+ subfic r6,r6,16
+ subf r5,r6,r5
+ add r3,r3,r6
+ add r4,r4,r6
+
+.Ldiffoffset_vmx_s1_16bytes_align:
+ /* now s1 is aligned with 16 bytes */
+ lvx v6,0,r4
+ LVS v4,0,r4
+ srdi r6,r5,5 /* loop for 32 bytes each */
+ clrldi r5,r5,59
+ mtctr r6
+
+.balign 16
+.Ldiffoffset_vmx_32bytesloop:
+ /* the first qw of r4 was saved in v6 */
+ lvx v9,0,r3
+ LD_VSR_CROSS16B(r4,v4,v6,v8,v10)
+ VCMPEQUB_RC(v7,v9,v10)
+ vor v6,v8,v8
+ bnl cr6,.Ldiffoffset_vmx_diff_found
+
+ addi r3,r3,16
+ addi r4,r4,16
+
+ lvx v9,0,r3
+ LD_VSR_CROSS16B(r4,v4,v6,v8,v10)
+ VCMPEQUB_RC(v7,v9,v10)
+ vor v6,v8,v8
+ bnl cr6,.Ldiffoffset_vmx_diff_found
+
+ addi r3,r3,16
+ addi r4,r4,16
+
+ bdnz .Ldiffoffset_vmx_32bytesloop
+
+ EXIT_VMX_OPS
+
+ cmpdi r5,0
+ beq .Lzero
+ b .Lcmp_lt32bytes
+
+.Ldiffoffset_vmx_diff_found:
+ EXIT_VMX_OPS
+ /* anyway, the diff will appear in next 16 bytes */
+ li r5,16
+ b .Lcmp_lt32bytes
+
+#endif
EXPORT_SYMBOL(memcmp)
diff --git a/arch/powerpc/lib/memcpy_64.S b/arch/powerpc/lib/memcpy_64.S
index 8d8265be1a59..273ea67e60a1 100644
--- a/arch/powerpc/lib/memcpy_64.S
+++ b/arch/powerpc/lib/memcpy_64.S
@@ -9,6 +9,13 @@
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/export.h>
+#include <asm/asm-compat.h>
+#include <asm/feature-fixups.h>
+
+#ifndef SELFTEST_CASE
+/* For big-endian, 0 == most CPUs, 1 == POWER6, 2 == Cell */
+#define SELFTEST_CASE 0
+#endif
.align 7
_GLOBAL_TOC(memcpy)
@@ -20,10 +27,8 @@ BEGIN_FTR_SECTION
#endif
FTR_SECTION_ELSE
#ifdef CONFIG_PPC_BOOK3S_64
-#ifndef SELFTEST
b memcpy_power7
#endif
-#endif
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY)
#ifdef __LITTLE_ENDIAN__
/* dumb little-endian memcpy that will get replaced at runtime */
@@ -47,6 +52,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY)
cleared.
At the time of writing the only CPU that has this combination of bits
set is Power6. */
+test_feature = (SELFTEST_CASE == 1)
BEGIN_FTR_SECTION
nop
FTR_SECTION_ELSE
@@ -55,6 +61,7 @@ ALT_FTR_SECTION_END(CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_CP_USE_DCBTZ, \
CPU_FTR_UNALIGNED_LD_STD)
.Ldst_aligned:
addi r3,r3,-16
+test_feature = (SELFTEST_CASE == 0)
BEGIN_FTR_SECTION
andi. r0,r4,7
bne .Lsrc_unaligned
diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S
index df7de9d3da08..89bfefcf7fcc 100644
--- a/arch/powerpc/lib/memcpy_power7.S
+++ b/arch/powerpc/lib/memcpy_power7.S
@@ -19,7 +19,10 @@
*/
#include <asm/ppc_asm.h>
-_GLOBAL(memcpy_power7)
+#ifndef SELFTEST_CASE
+/* 0 == don't use VMX, 1 == use VMX */
+#define SELFTEST_CASE 0
+#endif
#ifdef __BIG_ENDIAN__
#define LVS(VRT,RA,RB) lvsl VRT,RA,RB
@@ -29,20 +32,17 @@ _GLOBAL(memcpy_power7)
#define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRB,VRA,VRC
#endif
-#ifdef CONFIG_ALTIVEC
+_GLOBAL(memcpy_power7)
cmpldi r5,16
cmpldi cr1,r5,4096
-
std r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
-
blt .Lshort_copy
- bgt cr1,.Lvmx_copy
-#else
- cmpldi r5,16
-
- std r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
- blt .Lshort_copy
+#ifdef CONFIG_ALTIVEC
+test_feature = SELFTEST_CASE
+BEGIN_FTR_SECTION
+ bgt cr1, .Lvmx_copy
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
.Lnonvmx_copy:
@@ -223,14 +223,14 @@ _GLOBAL(memcpy_power7)
addi r1,r1,STACKFRAMESIZE
b .Lnonvmx_copy
-#ifdef CONFIG_ALTIVEC
.Lvmx_copy:
+#ifdef CONFIG_ALTIVEC
mflr r0
std r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
std r5,-STACKFRAMESIZE+STK_REG(R29)(r1)
std r0,16(r1)
stdu r1,-STACKFRAMESIZE(r1)
- bl enter_vmx_copy
+ bl enter_vmx_ops
cmpwi cr1,r3,0
ld r0,STACKFRAMESIZE+16(r1)
ld r3,STK_REG(R31)(r1)
@@ -445,7 +445,7 @@ _GLOBAL(memcpy_power7)
15: addi r1,r1,STACKFRAMESIZE
ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
- b exit_vmx_copy /* tail call optimise */
+ b exit_vmx_ops /* tail call optimise */
.Lvmx_unaligned_copy:
/* Get the destination 16B aligned */
@@ -649,5 +649,5 @@ _GLOBAL(memcpy_power7)
15: addi r1,r1,STACKFRAMESIZE
ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
- b exit_vmx_copy /* tail call optimise */
+ b exit_vmx_ops /* tail call optimise */
#endif /* CONFIG_ALTIVEC */
diff --git a/arch/powerpc/lib/strlen_32.S b/arch/powerpc/lib/strlen_32.S
new file mode 100644
index 000000000000..0a8d3f64d493
--- /dev/null
+++ b/arch/powerpc/lib/strlen_32.S
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * strlen() for PPC32
+ *
+ * Copyright (C) 2018 Christophe Leroy CS Systemes d'Information.
+ *
+ * Inspired from glibc implementation
+ */
+#include <asm/ppc_asm.h>
+#include <asm/export.h>
+#include <asm/cache.h>
+
+ .text
+
+/*
+ * Algorithm:
+ *
+ * 1) Given a word 'x', we can test to see if it contains any 0 bytes
+ * by subtracting 0x01010101, and seeing if any of the high bits of each
+ * byte changed from 0 to 1. This works because the least significant
+ * 0 byte must have had no incoming carry (otherwise it's not the least
+ * significant), so it is 0x00 - 0x01 == 0xff. For all other
+ * byte values, either they have the high bit set initially, or when
+ * 1 is subtracted you get a value in the range 0x00-0x7f, none of which
+ * have their high bit set. The expression here is
+ * (x - 0x01010101) & ~x & 0x80808080), which gives 0x00000000 when
+ * there were no 0x00 bytes in the word. You get 0x80 in bytes that
+ * match, but possibly false 0x80 matches in the next more significant
+ * byte to a true match due to carries. For little-endian this is
+ * of no consequence since the least significant match is the one
+ * we're interested in, but big-endian needs method 2 to find which
+ * byte matches.
+ * 2) Given a word 'x', we can test to see _which_ byte was zero by
+ * calculating ~(((x & ~0x80808080) - 0x80808080 - 1) | x | ~0x80808080).
+ * This produces 0x80 in each byte that was zero, and 0x00 in all
+ * the other bytes. The '| ~0x80808080' clears the low 7 bits in each
+ * byte, and the '| x' part ensures that bytes with the high bit set
+ * produce 0x00. The addition will carry into the high bit of each byte
+ * iff that byte had one of its low 7 bits set. We can then just see
+ * which was the most significant bit set and divide by 8 to find how
+ * many to add to the index.
+ * This is from the book 'The PowerPC Compiler Writer's Guide',
+ * by Steve Hoxey, Faraydon Karim, Bill Hay and Hank Warren.
+ */
+
+_GLOBAL(strlen)
+ andi. r0, r3, 3
+ lis r7, 0x0101
+ addi r10, r3, -4
+ addic r7, r7, 0x0101 /* r7 = 0x01010101 (lomagic) & clear XER[CA] */
+ rotlwi r6, r7, 31 /* r6 = 0x80808080 (himagic) */
+ bne- 3f
+ .balign IFETCH_ALIGN_BYTES
+1: lwzu r9, 4(r10)
+2: subf r8, r7, r9
+ and. r8, r8, r6
+ beq+ 1b
+ andc. r8, r8, r9
+ beq+ 1b
+ andc r8, r9, r6
+ orc r9, r9, r6
+ subfe r8, r6, r8
+ nor r8, r8, r9
+ cntlzw r8, r8
+ subf r3, r3, r10
+ srwi r8, r8, 3
+ add r3, r3, r8
+ blr
+
+ /* Missaligned string: make sure bytes before string are seen not 0 */
+3: xor r10, r10, r0
+ orc r8, r8, r8
+ lwzu r9, 4(r10)
+ slwi r0, r0, 3
+ srw r8, r8, r0
+ orc r9, r9, r8
+ b 2b
+EXPORT_SYMBOL(strlen)
diff --git a/arch/powerpc/lib/vmx-helper.c b/arch/powerpc/lib/vmx-helper.c
index bf925cdcaca9..9f340494a8ac 100644
--- a/arch/powerpc/lib/vmx-helper.c
+++ b/arch/powerpc/lib/vmx-helper.c
@@ -53,7 +53,7 @@ int exit_vmx_usercopy(void)
return 0;
}
-int enter_vmx_copy(void)
+int enter_vmx_ops(void)
{
if (in_interrupt())
return 0;
@@ -70,7 +70,7 @@ int enter_vmx_copy(void)
* passed a pointer to the destination which we return as required by a
* memcpy implementation.
*/
-void *exit_vmx_copy(void *dest)
+void *exit_vmx_ops(void *dest)
{
disable_kernel_altivec();
preempt_enable();
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c
index 82b1ff759e26..12d92518e898 100644
--- a/arch/powerpc/mm/44x_mmu.c
+++ b/arch/powerpc/mm/44x_mmu.c
@@ -229,7 +229,7 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
}
#ifdef CONFIG_SMP
-void mmu_init_secondary(int cpu)
+void __init mmu_init_secondary(int cpu)
{
unsigned long addr;
unsigned long memstart = memstart_addr & ~(PPC_PIN_SIZE - 1);
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index f06f3577d8d1..cdf6a9960046 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -19,7 +19,7 @@ obj-$(CONFIG_PPC_BOOK3S_64) += pgtable-hash64.o hash_utils_64.o slb_low.o slb.o
obj-$(CONFIG_PPC_RADIX_MMU) += pgtable-radix.o tlb-radix.o
obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o hash_low_32.o mmu_context_hash32.o
obj-$(CONFIG_PPC_STD_MMU) += tlb_hash$(BITS).o
-ifeq ($(CONFIG_PPC_BOOK3S_64),y)
+ifdef CONFIG_PPC_BOOK3S_64
obj-$(CONFIG_PPC_4K_PAGES) += hash64_4k.o
obj-$(CONFIG_PPC_64K_PAGES) += hash64_64k.o
endif
@@ -31,7 +31,7 @@ obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
obj-$(CONFIG_PPC_SPLPAR) += vphn.o
obj-$(CONFIG_PPC_MM_SLICES) += slice.o
obj-y += hugetlbpage.o
-ifeq ($(CONFIG_HUGETLB_PAGE),y)
+ifdef CONFIG_HUGETLB_PAGE
obj-$(CONFIG_PPC_BOOK3S_64) += hugetlbpage-hash64.o
obj-$(CONFIG_PPC_RADIX_MMU) += hugetlbpage-radix.o
obj-$(CONFIG_PPC_BOOK3E_MMU) += hugetlbpage-book3e.o
diff --git a/arch/powerpc/mm/dump_hashpagetable.c b/arch/powerpc/mm/dump_hashpagetable.c
index 14cfb11b09d0..869294695048 100644
--- a/arch/powerpc/mm/dump_hashpagetable.c
+++ b/arch/powerpc/mm/dump_hashpagetable.c
@@ -19,7 +19,6 @@
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
-#include <asm/fixmap.h>
#include <asm/pgtable.h>
#include <linux/const.h>
#include <asm/page.h>
@@ -260,7 +259,7 @@ static int pseries_find(unsigned long ea, int psize, bool primary, u64 *v, u64 *
/* to check in the secondary hash table, we invert the hash */
if (!primary)
hash = ~hash;
- hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
/* see if we can find an entry in the hpte with this hash */
for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) {
lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes);
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index b1ca7a0974e3..7d262c6437c4 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -42,7 +42,6 @@
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
-#include <asm/tlbflush.h>
#include <asm/siginfo.h>
#include <asm/debug.h>
diff --git a/arch/powerpc/mm/hash64_4k.c b/arch/powerpc/mm/hash64_4k.c
index d573d7d07f25..6fa6765a10eb 100644
--- a/arch/powerpc/mm/hash64_4k.c
+++ b/arch/powerpc/mm/hash64_4k.c
@@ -80,7 +80,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
hash = hpt_hash(vpn, shift, ssize);
repeat:
- hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
/* Insert into the hash table, primary slot */
slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0,
@@ -89,7 +89,7 @@ repeat:
* Primary is full, try the secondary
*/
if (unlikely(slot == -1)) {
- hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP;
slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa,
rflags,
HPTE_V_SECONDARY,
@@ -97,8 +97,8 @@ repeat:
MMU_PAGE_4K, ssize);
if (slot == -1) {
if (mftb() & 0x1)
- hpte_group = ((hash & htab_hash_mask) *
- HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (hash & htab_hash_mask) *
+ HPTES_PER_GROUP;
mmu_hash_ops.hpte_remove(hpte_group);
/*
* FIXME!! Should be try the group from which we removed ?
diff --git a/arch/powerpc/mm/hash64_64k.c b/arch/powerpc/mm/hash64_64k.c
index e601d95c3b20..3afa253d7f52 100644
--- a/arch/powerpc/mm/hash64_64k.c
+++ b/arch/powerpc/mm/hash64_64k.c
@@ -154,7 +154,7 @@ htab_insert_hpte:
}
hash = hpt_hash(vpn, shift, ssize);
repeat:
- hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
/* Insert into the hash table, primary slot */
slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0,
@@ -165,7 +165,7 @@ repeat:
if (unlikely(slot == -1)) {
bool soft_invalid;
- hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP;
slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa,
rflags, HPTE_V_SECONDARY,
MMU_PAGE_4K, MMU_PAGE_4K,
@@ -193,8 +193,7 @@ repeat:
* that we do not get the same soft-invalid slot.
*/
if (soft_invalid || (mftb() & 0x1))
- hpte_group = ((hash & htab_hash_mask) *
- HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
mmu_hash_ops.hpte_remove(hpte_group);
/*
@@ -288,7 +287,7 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
hash = hpt_hash(vpn, shift, ssize);
repeat:
- hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
/* Insert into the hash table, primary slot */
slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0,
@@ -298,7 +297,7 @@ repeat:
* Primary is full, try the secondary
*/
if (unlikely(slot == -1)) {
- hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP;
slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa,
rflags,
HPTE_V_SECONDARY,
@@ -306,8 +305,8 @@ repeat:
MMU_PAGE_64K, ssize);
if (slot == -1) {
if (mftb() & 0x1)
- hpte_group = ((hash & htab_hash_mask) *
- HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (hash & htab_hash_mask) *
+ HPTES_PER_GROUP;
mmu_hash_ops.hpte_remove(hpte_group);
/*
* FIXME!! Should be try the group from which we removed ?
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S
index ffbd7c0bda96..26acf6c8c20c 100644
--- a/arch/powerpc/mm/hash_low_32.S
+++ b/arch/powerpc/mm/hash_low_32.S
@@ -27,6 +27,7 @@
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
#include <asm/export.h>
+#include <asm/feature-fixups.h>
#ifdef CONFIG_SMP
.section .bss
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 1d049c78c82a..729f02df8290 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -23,13 +23,13 @@
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
-#include <asm/tlbflush.h>
#include <asm/trace.h>
#include <asm/tlb.h>
#include <asm/cputable.h>
#include <asm/udbg.h>
#include <asm/kexec.h>
#include <asm/ppc-opcode.h>
+#include <asm/feature-fixups.h>
#include <misc/cxl-base.h>
@@ -423,9 +423,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
vpn, want_v & HPTE_V_AVPN, slot, newpp);
- hpte_v = be64_to_cpu(hptep->v);
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
+ hpte_v = hpte_get_old_v(hptep);
/*
* We need to invalidate the TLB always because hpte_remove doesn't do
* a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
@@ -439,9 +437,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
} else {
native_lock_hpte(hptep);
/* recheck with locks held */
- hpte_v = be64_to_cpu(hptep->v);
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
+ hpte_v = hpte_get_old_v(hptep);
if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) ||
!(hpte_v & HPTE_V_VALID))) {
ret = -1;
@@ -481,11 +477,9 @@ static long native_hpte_find(unsigned long vpn, int psize, int ssize)
/* Bolted mappings are only ever in the primary group */
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
for (i = 0; i < HPTES_PER_GROUP; i++) {
- hptep = htab_address + slot;
- hpte_v = be64_to_cpu(hptep->v);
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
+ hptep = htab_address + slot;
+ hpte_v = hpte_get_old_v(hptep);
if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
/* HPTE matches */
return slot;
@@ -574,11 +568,19 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
want_v = hpte_encode_avpn(vpn, bpsize, ssize);
- native_lock_hpte(hptep);
- hpte_v = be64_to_cpu(hptep->v);
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
+ hpte_v = hpte_get_old_v(hptep);
+ if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
+ native_lock_hpte(hptep);
+ /* recheck with locks held */
+ hpte_v = hpte_get_old_v(hptep);
+
+ if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
+ /* Invalidate the hpte. NOTE: this also unlocks it */
+ hptep->v = 0;
+ else
+ native_unlock_hpte(hptep);
+ }
/*
* We need to invalidate the TLB always because hpte_remove doesn't do
* a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
@@ -586,13 +588,6 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
* (hpte_remove) because we assume the old translation is still
* technically "valid".
*/
- if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
- native_unlock_hpte(hptep);
- else
- /* Invalidate the hpte. NOTE: this also unlocks it */
- hptep->v = 0;
-
- /* Invalidate the TLB */
tlbie(vpn, bpsize, apsize, ssize, local);
local_irq_restore(flags);
@@ -634,17 +629,23 @@ static void native_hugepage_invalidate(unsigned long vsid,
hptep = htab_address + slot;
want_v = hpte_encode_avpn(vpn, psize, ssize);
- native_lock_hpte(hptep);
- hpte_v = be64_to_cpu(hptep->v);
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
+ hpte_v = hpte_get_old_v(hptep);
/* Even if we miss, we need to invalidate the TLB */
- if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
- native_unlock_hpte(hptep);
- else
- /* Invalidate the hpte. NOTE: this also unlocks it */
- hptep->v = 0;
+ if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
+ /* recheck with locks held */
+ native_lock_hpte(hptep);
+ hpte_v = hpte_get_old_v(hptep);
+
+ if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
+ /*
+ * Invalidate the hpte. NOTE: this also unlocks it
+ */
+
+ hptep->v = 0;
+ } else
+ native_unlock_hpte(hptep);
+ }
/*
* We need to do tlb invalidate for all the address, tlbie
* instruction compares entry_VA in tlb with the VA specified
@@ -812,16 +813,19 @@ static void native_flush_hash_range(unsigned long number, int local)
slot += hidx & _PTEIDX_GROUP_IX;
hptep = htab_address + slot;
want_v = hpte_encode_avpn(vpn, psize, ssize);
+ hpte_v = hpte_get_old_v(hptep);
+
+ if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
+ continue;
+ /* lock and try again */
native_lock_hpte(hptep);
- hpte_v = be64_to_cpu(hptep->v);
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- hpte_v = hpte_new_to_old_v(hpte_v,
- be64_to_cpu(hptep->r));
- if (!HPTE_V_COMPARE(hpte_v, want_v) ||
- !(hpte_v & HPTE_V_VALID))
+ hpte_v = hpte_get_old_v(hptep);
+
+ if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
native_unlock_hpte(hptep);
else
hptep->v = 0;
+
} pte_iterate_hashed_end();
}
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 8318716e5075..f23a89d8e4ce 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -48,7 +48,6 @@
#include <linux/uaccess.h>
#include <asm/machdep.h>
#include <asm/prom.h>
-#include <asm/tlbflush.h>
#include <asm/io.h>
#include <asm/eeh.h>
#include <asm/tlb.h>
@@ -808,31 +807,6 @@ int hash__remove_section_mapping(unsigned long start, unsigned long end)
}
#endif /* CONFIG_MEMORY_HOTPLUG */
-static void update_hid_for_hash(void)
-{
- unsigned long hid0;
- unsigned long rb = 3UL << PPC_BITLSHIFT(53); /* IS = 3 */
-
- asm volatile("ptesync": : :"memory");
- /* prs = 0, ric = 2, rs = 0, r = 1 is = 3 */
- asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
- : : "r"(rb), "i"(0), "i"(0), "i"(2), "r"(0) : "memory");
- asm volatile("eieio; tlbsync; ptesync; isync; slbia": : :"memory");
- trace_tlbie(0, 0, rb, 0, 2, 0, 0);
-
- /*
- * now switch the HID
- */
- hid0 = mfspr(SPRN_HID0);
- hid0 &= ~HID0_POWER9_RADIX;
- mtspr(SPRN_HID0, hid0);
- asm volatile("isync": : :"memory");
-
- /* Wait for it to happen */
- while ((mfspr(SPRN_HID0) & HID0_POWER9_RADIX))
- cpu_relax();
-}
-
static void __init hash_init_partition_table(phys_addr_t hash_table,
unsigned long htab_size)
{
@@ -845,8 +819,6 @@ static void __init hash_init_partition_table(phys_addr_t hash_table,
htab_size = __ilog2(htab_size) - 18;
mmu_partition_table_set_entry(0, hash_table | htab_size, 0);
pr_info("Partition table %p\n", partition_tb);
- if (cpu_has_feature(CPU_FTR_POWER9_DD1))
- update_hid_for_hash();
}
static void __init htab_initialize(void)
@@ -1077,9 +1049,6 @@ void hash__early_init_mmu_secondary(void)
/* Initialize hash table for that CPU */
if (!firmware_has_feature(FW_FEATURE_LPAR)) {
- if (cpu_has_feature(CPU_FTR_POWER9_DD1))
- update_hid_for_hash();
-
if (!cpu_has_feature(CPU_FTR_ARCH_300))
mtspr(SPRN_SDR1, _SDR1);
else
@@ -1783,8 +1752,7 @@ long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
long slot;
repeat:
- hpte_group = ((hash & htab_hash_mask) *
- HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
/* Insert into the hash table, primary slot */
slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, vflags,
@@ -1792,15 +1760,14 @@ repeat:
/* Primary is full, try the secondary */
if (unlikely(slot == -1)) {
- hpte_group = ((~hash & htab_hash_mask) *
- HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP;
slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags,
vflags | HPTE_V_SECONDARY,
psize, psize, ssize);
if (slot == -1) {
if (mftb() & 0x1)
- hpte_group = ((hash & htab_hash_mask) *
- HPTES_PER_GROUP)&~0x7UL;
+ hpte_group = (hash & htab_hash_mask) *
+ HPTES_PER_GROUP;
mmu_hash_ops.hpte_remove(hpte_group);
goto repeat;
diff --git a/arch/powerpc/mm/highmem.c b/arch/powerpc/mm/highmem.c
index 668e87d03f9e..82a0e37557a5 100644
--- a/arch/powerpc/mm/highmem.c
+++ b/arch/powerpc/mm/highmem.c
@@ -56,7 +56,7 @@ EXPORT_SYMBOL(kmap_atomic_prot);
void __kunmap_atomic(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- int type;
+ int type __maybe_unused;
if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
pagefault_enable();
diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
index f20d16f849c5..01f213d2bcb9 100644
--- a/arch/powerpc/mm/hugepage-hash64.c
+++ b/arch/powerpc/mm/hugepage-hash64.c
@@ -128,7 +128,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
new_pmd |= H_PAGE_HASHPTE;
repeat:
- hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
/* Insert into the hash table, primary slot */
slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0,
@@ -137,16 +137,15 @@ repeat:
* Primary is full, try the secondary
*/
if (unlikely(slot == -1)) {
- hpte_group = ((~hash & htab_hash_mask) *
- HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP;
slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa,
rflags,
HPTE_V_SECONDARY,
psize, lpsize, ssize);
if (slot == -1) {
if (mftb() & 0x1)
- hpte_group = ((hash & htab_hash_mask) *
- HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = (hash & htab_hash_mask) *
+ HPTES_PER_GROUP;
mmu_hash_ops.hpte_remove(hpte_group);
goto repeat;
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 8a9a49c13865..e87f9ef9115b 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -118,15 +118,6 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
}
/*
- * These macros define how to determine which level of the page table holds
- * the hpdp.
- */
-#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
-#define HUGEPD_PGD_SHIFT PGDIR_SHIFT
-#define HUGEPD_PUD_SHIFT PUD_SHIFT
-#endif
-
-/*
* At this point we do the placement change only for BOOK3S 64. This would
* possibly work on other subarchs.
*/
@@ -174,13 +165,13 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
}
}
#else
- if (pshift >= HUGEPD_PGD_SHIFT) {
+ if (pshift >= PGDIR_SHIFT) {
ptl = &mm->page_table_lock;
hpdp = (hugepd_t *)pg;
} else {
pdshift = PUD_SHIFT;
pu = pud_alloc(mm, pg, addr);
- if (pshift >= HUGEPD_PUD_SHIFT) {
+ if (pshift >= PUD_SHIFT) {
ptl = pud_lockptr(mm, pu);
hpdp = (hugepd_t *)pu;
} else {
@@ -621,15 +612,12 @@ static int __init add_huge_page_size(unsigned long long size)
* firmware we only add hugetlb support for page sizes that can be
* supported by linux page table layout.
* For now we have
- * Radix: 2M
+ * Radix: 2M and 1G
* Hash: 16M and 16G
*/
if (radix_enabled()) {
- if (mmu_psize != MMU_PAGE_2M) {
- if (cpu_has_feature(CPU_FTR_POWER9_DD1) ||
- (mmu_psize != MMU_PAGE_1G))
- return -EINVAL;
- }
+ if (mmu_psize != MMU_PAGE_2M && mmu_psize != MMU_PAGE_1G)
+ return -EINVAL;
} else {
if (mmu_psize != MMU_PAGE_16M && mmu_psize != MMU_PAGE_16G)
return -EINVAL;
@@ -695,9 +683,9 @@ static int __init hugetlbpage_init(void)
else
pdshift = PMD_SHIFT;
#else
- if (shift < HUGEPD_PUD_SHIFT)
+ if (shift < PUD_SHIFT)
pdshift = PMD_SHIFT;
- else if (shift < HUGEPD_PGD_SHIFT)
+ else if (shift < PGDIR_SHIFT)
pdshift = PUD_SHIFT;
else
pdshift = PGDIR_SHIFT;
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index f3d4b4a0e561..4a892d894a0f 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -200,9 +200,9 @@ static void pte_frag_destroy(void *pte_frag)
/* drop all the pending references */
count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
/* We allow PTE_FRAG_NR fragments from a PTE page */
- if (page_ref_sub_and_test(page, PTE_FRAG_NR - count)) {
+ if (atomic_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) {
pgtable_page_dtor(page);
- free_unref_page(page);
+ __free_page(page);
}
}
@@ -215,13 +215,13 @@ static void pmd_frag_destroy(void *pmd_frag)
/* drop all the pending references */
count = ((unsigned long)pmd_frag & ~PAGE_MASK) >> PMD_FRAG_SIZE_SHIFT;
/* We allow PTE_FRAG_NR fragments from a PTE page */
- if (page_ref_sub_and_test(page, PMD_FRAG_NR - count)) {
+ if (atomic_sub_and_test(PMD_FRAG_NR - count, &page->pt_frag_refcount)) {
pgtable_pmd_page_dtor(page);
- free_unref_page(page);
+ __free_page(page);
}
}
-static void destroy_pagetable_page(struct mm_struct *mm)
+static void destroy_pagetable_cache(struct mm_struct *mm)
{
void *frag;
@@ -244,13 +244,14 @@ void destroy_context(struct mm_struct *mm)
WARN_ON(process_tb[mm->context.id].prtb0 != 0);
else
subpage_prot_free(mm);
- destroy_pagetable_page(mm);
destroy_contexts(&mm->context);
mm->context.id = MMU_NO_CONTEXT;
}
void arch_exit_mmap(struct mm_struct *mm)
{
+ destroy_pagetable_cache(mm);
+
if (radix_enabled()) {
/*
* Radix doesn't have a valid bit in the process table
@@ -273,15 +274,7 @@ void arch_exit_mmap(struct mm_struct *mm)
#ifdef CONFIG_PPC_RADIX_MMU
void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
{
-
- if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
- isync();
- mtspr(SPRN_PID, next->context.id);
- isync();
- asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
- } else {
- mtspr(SPRN_PID, next->context.id);
- isync();
- }
+ mtspr(SPRN_PID, next->context.id);
+ isync();
}
#endif
diff --git a/arch/powerpc/mm/mmu_context_hash32.c b/arch/powerpc/mm/mmu_context_hash32.c
index aa5a7fd89461..921c1e33e941 100644
--- a/arch/powerpc/mm/mmu_context_hash32.c
+++ b/arch/powerpc/mm/mmu_context_hash32.c
@@ -27,7 +27,6 @@
#include <linux/export.h>
#include <asm/mmu_context.h>
-#include <asm/tlbflush.h>
/*
* On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index c4c0a09a7775..e5d779eed181 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -19,7 +19,6 @@
*
*/
#include <linux/mm.h>
-#include <asm/tlbflush.h>
#include <asm/mmu.h>
#ifdef CONFIG_PPC_MMU_NOHASH
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index 4afbfbb64bfd..01d7c0f7c4f0 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -270,6 +270,8 @@ static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
return NULL;
}
+ atomic_set(&page->pt_frag_refcount, 1);
+
ret = page_address(page);
/*
* if we support only one fragment just return the
@@ -285,7 +287,7 @@ static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
* count.
*/
if (likely(!mm->context.pmd_frag)) {
- set_page_count(page, PMD_FRAG_NR);
+ atomic_set(&page->pt_frag_refcount, PMD_FRAG_NR);
mm->context.pmd_frag = ret + PMD_FRAG_SIZE;
}
spin_unlock(&mm->page_table_lock);
@@ -308,9 +310,10 @@ void pmd_fragment_free(unsigned long *pmd)
{
struct page *page = virt_to_page(pmd);
- if (put_page_testzero(page)) {
+ BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
+ if (atomic_dec_and_test(&page->pt_frag_refcount)) {
pgtable_pmd_page_dtor(page);
- free_unref_page(page);
+ __free_page(page);
}
}
@@ -352,6 +355,7 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
return NULL;
}
+ atomic_set(&page->pt_frag_refcount, 1);
ret = page_address(page);
/*
@@ -367,7 +371,7 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
* count.
*/
if (likely(!mm->context.pte_frag)) {
- set_page_count(page, PTE_FRAG_NR);
+ atomic_set(&page->pt_frag_refcount, PTE_FRAG_NR);
mm->context.pte_frag = ret + PTE_FRAG_SIZE;
}
spin_unlock(&mm->page_table_lock);
@@ -390,10 +394,11 @@ void pte_fragment_free(unsigned long *table, int kernel)
{
struct page *page = virt_to_page(table);
- if (put_page_testzero(page)) {
+ BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
+ if (atomic_dec_and_test(&page->pt_frag_refcount)) {
if (!kernel)
pgtable_page_dtor(page);
- free_unref_page(page);
+ __free_page(page);
}
}
@@ -450,3 +455,25 @@ void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
return pgtable_free(table, index);
}
#endif
+
+#ifdef CONFIG_PROC_FS
+atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
+
+void arch_report_meminfo(struct seq_file *m)
+{
+ /*
+ * Hash maps the memory with one size mmu_linear_psize.
+ * So don't bother to print these on hash
+ */
+ if (!radix_enabled())
+ return;
+ seq_printf(m, "DirectMap4k: %8lu kB\n",
+ atomic_long_read(&direct_pages_count[MMU_PAGE_4K]) << 2);
+ seq_printf(m, "DirectMap64k: %8lu kB\n",
+ atomic_long_read(&direct_pages_count[MMU_PAGE_64K]) << 6);
+ seq_printf(m, "DirectMap2M: %8lu kB\n",
+ atomic_long_read(&direct_pages_count[MMU_PAGE_2M]) << 11);
+ seq_printf(m, "DirectMap1G: %8lu kB\n",
+ atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
+}
+#endif /* CONFIG_PROC_FS */
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 96f68c5aa1f5..7be99fd9af15 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -226,16 +226,6 @@ void radix__mark_rodata_ro(void)
{
unsigned long start, end;
- /*
- * mark_rodata_ro() will mark itself as !writable at some point.
- * Due to DD1 workaround in radix__pte_update(), we'll end up with
- * an invalid pte and the system will crash quite severly.
- */
- if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
- pr_warn("Warning: Unable to mark rodata read only on P9 DD1\n");
- return;
- }
-
start = (unsigned long)_stext;
end = (unsigned long)__init_begin;
@@ -277,6 +267,7 @@ static int __meminit create_physical_mapping(unsigned long start,
#else
int split_text_mapping = 0;
#endif
+ int psize;
start = _ALIGN_UP(start, PAGE_SIZE);
for (addr = start; addr < end; addr += mapping_size) {
@@ -290,13 +281,17 @@ static int __meminit create_physical_mapping(unsigned long start,
retry:
if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
mmu_psize_defs[MMU_PAGE_1G].shift &&
- PUD_SIZE <= max_mapping_size)
+ PUD_SIZE <= max_mapping_size) {
mapping_size = PUD_SIZE;
- else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
- mmu_psize_defs[MMU_PAGE_2M].shift)
+ psize = MMU_PAGE_1G;
+ } else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
+ mmu_psize_defs[MMU_PAGE_2M].shift) {
mapping_size = PMD_SIZE;
- else
+ psize = MMU_PAGE_2M;
+ } else {
mapping_size = PAGE_SIZE;
+ psize = mmu_virtual_psize;
+ }
if (split_text_mapping && (mapping_size == PUD_SIZE) &&
(addr <= __pa_symbol(__init_begin)) &&
@@ -307,8 +302,10 @@ retry:
if (split_text_mapping && (mapping_size == PMD_SIZE) &&
(addr <= __pa_symbol(__init_begin)) &&
- (addr + mapping_size) >= __pa_symbol(_stext))
+ (addr + mapping_size) >= __pa_symbol(_stext)) {
mapping_size = PAGE_SIZE;
+ psize = mmu_virtual_psize;
+ }
if (mapping_size != previous_size) {
print_mapping(start, addr, previous_size);
@@ -326,6 +323,8 @@ retry:
rc = __map_kernel_page(vaddr, addr, prot, mapping_size, nid, start, end);
if (rc)
return rc;
+
+ update_page_count(psize, 1);
}
print_mapping(start, addr, mapping_size);
@@ -533,35 +532,6 @@ found:
return;
}
-static void update_hid_for_radix(void)
-{
- unsigned long hid0;
- unsigned long rb = 3UL << PPC_BITLSHIFT(53); /* IS = 3 */
-
- asm volatile("ptesync": : :"memory");
- /* prs = 0, ric = 2, rs = 0, r = 1 is = 3 */
- asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
- : : "r"(rb), "i"(1), "i"(0), "i"(2), "r"(0) : "memory");
- /* prs = 1, ric = 2, rs = 0, r = 1 is = 3 */
- asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
- : : "r"(rb), "i"(1), "i"(1), "i"(2), "r"(0) : "memory");
- asm volatile("eieio; tlbsync; ptesync; isync; slbia": : :"memory");
- trace_tlbie(0, 0, rb, 0, 2, 0, 1);
- trace_tlbie(0, 0, rb, 0, 2, 1, 1);
-
- /*
- * now switch the HID
- */
- hid0 = mfspr(SPRN_HID0);
- hid0 |= HID0_POWER9_RADIX;
- mtspr(SPRN_HID0, hid0);
- asm volatile("isync": : :"memory");
-
- /* Wait for it to happen */
- while (!(mfspr(SPRN_HID0) & HID0_POWER9_RADIX))
- cpu_relax();
-}
-
static void radix_init_amor(void)
{
/*
@@ -576,22 +546,12 @@ static void radix_init_amor(void)
static void radix_init_iamr(void)
{
- unsigned long iamr;
-
- /*
- * The IAMR should set to 0 on DD1.
- */
- if (cpu_has_feature(CPU_FTR_POWER9_DD1))
- iamr = 0;
- else
- iamr = (1ul << 62);
-
/*
* Radix always uses key0 of the IAMR to determine if an access is
* allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction
* fetch.
*/
- mtspr(SPRN_IAMR, iamr);
+ mtspr(SPRN_IAMR, (1ul << 62));
}
void __init radix__early_init_mmu(void)
@@ -644,8 +604,6 @@ void __init radix__early_init_mmu(void)
if (!firmware_has_feature(FW_FEATURE_LPAR)) {
radix_init_native();
- if (cpu_has_feature(CPU_FTR_POWER9_DD1))
- update_hid_for_radix();
lpcr = mfspr(SPRN_LPCR);
mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
radix_init_partition_table();
@@ -671,10 +629,6 @@ void radix__early_init_mmu_secondary(void)
* update partition table control register and UPRT
*/
if (!firmware_has_feature(FW_FEATURE_LPAR)) {
-
- if (cpu_has_feature(CPU_FTR_POWER9_DD1))
- update_hid_for_radix();
-
lpcr = mfspr(SPRN_LPCR);
mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
@@ -1095,8 +1049,7 @@ void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
* To avoid NMMU hang while relaxing access, we need mark
* the pte invalid in between.
*/
- if (cpu_has_feature(CPU_FTR_POWER9_DD1) ||
- atomic_read(&mm->context.copros) > 0) {
+ if (atomic_read(&mm->context.copros) > 0) {
unsigned long old_pte, new_pte;
old_pte = __radix_pte_update(ptep, ~0, 0);
diff --git a/arch/powerpc/mm/pkeys.c b/arch/powerpc/mm/pkeys.c
index e6f500fabf5e..333b1f80c435 100644
--- a/arch/powerpc/mm/pkeys.c
+++ b/arch/powerpc/mm/pkeys.c
@@ -14,9 +14,12 @@ DEFINE_STATIC_KEY_TRUE(pkey_disabled);
bool pkey_execute_disable_supported;
int pkeys_total; /* Total pkeys as per device tree */
bool pkeys_devtree_defined; /* pkey property exported by device tree */
-u32 initial_allocation_mask; /* Bits set for reserved keys */
-u64 pkey_amr_uamor_mask; /* Bits in AMR/UMOR not to be touched */
+u32 initial_allocation_mask; /* Bits set for the initially allocated keys */
+u32 reserved_allocation_mask; /* Bits set for reserved keys */
+u64 pkey_amr_mask; /* Bits in AMR not to be touched */
u64 pkey_iamr_mask; /* Bits in AMR not to be touched */
+u64 pkey_uamor_mask; /* Bits in UMOR not to be touched */
+int execute_only_key = 2;
#define AMR_BITS_PER_PKEY 2
#define AMR_RD_BIT 0x1UL
@@ -91,7 +94,7 @@ int pkey_initialize(void)
* arch-neutral code.
*/
pkeys_total = min_t(int, pkeys_total,
- (ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT));
+ ((ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT)+1));
if (!pkey_mmu_enabled() || radix_enabled() || !pkeys_total)
static_branch_enable(&pkey_disabled);
@@ -119,20 +122,39 @@ int pkey_initialize(void)
#else
os_reserved = 0;
#endif
- initial_allocation_mask = ~0x0;
- pkey_amr_uamor_mask = ~0x0ul;
+ /* Bits are in LE format. */
+ reserved_allocation_mask = (0x1 << 1) | (0x1 << execute_only_key);
+
+ /* register mask is in BE format */
+ pkey_amr_mask = ~0x0ul;
+ pkey_amr_mask &= ~(0x3ul << pkeyshift(0));
+
pkey_iamr_mask = ~0x0ul;
- /*
- * key 0, 1 are reserved.
- * key 0 is the default key, which allows read/write/execute.
- * key 1 is recommended not to be used. PowerISA(3.0) page 1015,
- * programming note.
- */
- for (i = 2; i < (pkeys_total - os_reserved); i++) {
- initial_allocation_mask &= ~(0x1 << i);
- pkey_amr_uamor_mask &= ~(0x3ul << pkeyshift(i));
- pkey_iamr_mask &= ~(0x1ul << pkeyshift(i));
+ pkey_iamr_mask &= ~(0x3ul << pkeyshift(0));
+ pkey_iamr_mask &= ~(0x3ul << pkeyshift(execute_only_key));
+
+ pkey_uamor_mask = ~0x0ul;
+ pkey_uamor_mask &= ~(0x3ul << pkeyshift(0));
+ pkey_uamor_mask &= ~(0x3ul << pkeyshift(execute_only_key));
+
+ /* mark the rest of the keys as reserved and hence unavailable */
+ for (i = (pkeys_total - os_reserved); i < pkeys_total; i++) {
+ reserved_allocation_mask |= (0x1 << i);
+ pkey_uamor_mask &= ~(0x3ul << pkeyshift(i));
+ }
+ initial_allocation_mask = reserved_allocation_mask | (0x1 << 0);
+
+ if (unlikely((pkeys_total - os_reserved) <= execute_only_key)) {
+ /*
+ * Insufficient number of keys to support
+ * execute only key. Mark it unavailable.
+ * Any AMR, UAMOR, IAMR bit set for
+ * this key is irrelevant since this key
+ * can never be allocated.
+ */
+ execute_only_key = -1;
}
+
return 0;
}
@@ -143,8 +165,7 @@ void pkey_mm_init(struct mm_struct *mm)
if (static_branch_likely(&pkey_disabled))
return;
mm_pkey_allocation_map(mm) = initial_allocation_mask;
- /* -1 means unallocated or invalid */
- mm->context.execute_only_pkey = -1;
+ mm->context.execute_only_pkey = execute_only_key;
}
static inline u64 read_amr(void)
@@ -213,33 +234,6 @@ static inline void init_iamr(int pkey, u8 init_bits)
write_iamr(old_iamr | new_iamr_bits);
}
-static void pkey_status_change(int pkey, bool enable)
-{
- u64 old_uamor;
-
- /* Reset the AMR and IAMR bits for this key */
- init_amr(pkey, 0x0);
- init_iamr(pkey, 0x0);
-
- /* Enable/disable key */
- old_uamor = read_uamor();
- if (enable)
- old_uamor |= (0x3ul << pkeyshift(pkey));
- else
- old_uamor &= ~(0x3ul << pkeyshift(pkey));
- write_uamor(old_uamor);
-}
-
-void __arch_activate_pkey(int pkey)
-{
- pkey_status_change(pkey, true);
-}
-
-void __arch_deactivate_pkey(int pkey)
-{
- pkey_status_change(pkey, false);
-}
-
/*
* Set the access rights in AMR IAMR and UAMOR registers for @pkey to that
* specified in @init_val.
@@ -289,9 +283,6 @@ void thread_pkey_regs_restore(struct thread_struct *new_thread,
if (static_branch_likely(&pkey_disabled))
return;
- /*
- * TODO: Just set UAMOR to zero if @new_thread hasn't used any keys yet.
- */
if (old_thread->amr != new_thread->amr)
write_amr(new_thread->amr);
if (old_thread->iamr != new_thread->iamr)
@@ -305,9 +296,13 @@ void thread_pkey_regs_init(struct thread_struct *thread)
if (static_branch_likely(&pkey_disabled))
return;
- thread->amr = read_amr() & pkey_amr_uamor_mask;
- thread->iamr = read_iamr() & pkey_iamr_mask;
- thread->uamor = read_uamor() & pkey_amr_uamor_mask;
+ thread->amr = pkey_amr_mask;
+ thread->iamr = pkey_iamr_mask;
+ thread->uamor = pkey_uamor_mask;
+
+ write_uamor(pkey_uamor_mask);
+ write_amr(pkey_amr_mask);
+ write_iamr(pkey_iamr_mask);
}
static inline bool pkey_allows_readwrite(int pkey)
@@ -322,48 +317,7 @@ static inline bool pkey_allows_readwrite(int pkey)
int __execute_only_pkey(struct mm_struct *mm)
{
- bool need_to_set_mm_pkey = false;
- int execute_only_pkey = mm->context.execute_only_pkey;
- int ret;
-
- /* Do we need to assign a pkey for mm's execute-only maps? */
- if (execute_only_pkey == -1) {
- /* Go allocate one to use, which might fail */
- execute_only_pkey = mm_pkey_alloc(mm);
- if (execute_only_pkey < 0)
- return -1;
- need_to_set_mm_pkey = true;
- }
-
- /*
- * We do not want to go through the relatively costly dance to set AMR
- * if we do not need to. Check it first and assume that if the
- * execute-only pkey is readwrite-disabled than we do not have to set it
- * ourselves.
- */
- if (!need_to_set_mm_pkey && !pkey_allows_readwrite(execute_only_pkey))
- return execute_only_pkey;
-
- /*
- * Set up AMR so that it denies access for everything other than
- * execution.
- */
- ret = __arch_set_user_pkey_access(current, execute_only_pkey,
- PKEY_DISABLE_ACCESS |
- PKEY_DISABLE_WRITE);
- /*
- * If the AMR-set operation failed somehow, just return 0 and
- * effectively disable execute-only support.
- */
- if (ret) {
- mm_pkey_free(mm, execute_only_pkey);
- return -1;
- }
-
- /* We got one, store it and use it from here on out */
- if (need_to_set_mm_pkey)
- mm->context.execute_only_pkey = execute_only_pkey;
- return execute_only_pkey;
+ return mm->context.execute_only_pkey;
}
static inline bool vma_is_pkey_exec_only(struct vm_area_struct *vma)
@@ -407,9 +361,6 @@ static bool pkey_access_permitted(int pkey, bool write, bool execute)
int pkey_shift;
u64 amr;
- if (!pkey)
- return true;
-
if (!is_pkey_enabled(pkey))
return true;
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index cb796724a6fc..0b095fa54049 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -90,6 +90,45 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize,
: "memory" );
}
+/*
+ * Insert bolted entries into SLB (which may not be empty, so don't clear
+ * slb_cache_ptr).
+ */
+void __slb_restore_bolted_realmode(void)
+{
+ struct slb_shadow *p = get_slb_shadow();
+ enum slb_index index;
+
+ /* No isync needed because realmode. */
+ for (index = 0; index < SLB_NUM_BOLTED; index++) {
+ asm volatile("slbmte %0,%1" :
+ : "r" (be64_to_cpu(p->save_area[index].vsid)),
+ "r" (be64_to_cpu(p->save_area[index].esid)));
+ }
+}
+
+/*
+ * Insert the bolted entries into an empty SLB.
+ * This is not the same as rebolt because the bolted segments are not
+ * changed, just loaded from the shadow area.
+ */
+void slb_restore_bolted_realmode(void)
+{
+ __slb_restore_bolted_realmode();
+ get_paca()->slb_cache_ptr = 0;
+}
+
+/*
+ * This flushes all SLB entries including 0, so it must be realmode.
+ */
+void slb_flush_all_realmode(void)
+{
+ /*
+ * This flushes all SLB entries including 0, so it must be realmode.
+ */
+ asm volatile("slbmte %0,%0; slbia" : : "r" (0));
+}
+
static void __slb_flush_and_rebolt(void)
{
/* If you change this make sure you change SLB_NUM_BOLTED
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index a83fbd2a4a24..4ac5057ad439 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -22,6 +22,7 @@
#include <asm/mmu.h>
#include <asm/pgtable.h>
#include <asm/firmware.h>
+#include <asm/feature-fixups.h>
/*
* This macro generates asm code to compute the VSID scramble
diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c
index 9d16ee251fc0..3327551c8b47 100644
--- a/arch/powerpc/mm/subpage-prot.c
+++ b/arch/powerpc/mm/subpage-prot.c
@@ -17,7 +17,6 @@
#include <asm/pgtable.h>
#include <linux/uaccess.h>
-#include <asm/tlbflush.h>
/*
* Free all pages allocated for subpage protection maps and pointers.
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index 1135b43a597c..fef3e1eb3a19 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -1048,24 +1048,6 @@ void radix__flush_tlb_all(void)
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
-void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
- unsigned long address)
-{
- /*
- * We track page size in pte only for DD1, So we can
- * call this only on DD1.
- */
- if (!cpu_has_feature(CPU_FTR_POWER9_DD1)) {
- VM_WARN_ON(1);
- return;
- }
-
- if (old_pte & R_PAGE_LARGE)
- radix__flush_tlb_page_psize(mm, address, MMU_PAGE_2M);
- else
- radix__flush_tlb_page_psize(mm, address, mmu_virtual_psize);
-}
-
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
{
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index eb82d787d99a..7fd20c52a8ec 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -22,6 +22,7 @@
#include <asm/ppc-opcode.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_booke_hv_asm.h>
+#include <asm/feature-fixups.h>
#ifdef CONFIG_PPC_64K_PAGES
#define VPTE_PMD_SHIFT (PTE_INDEX_SIZE+1)
diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S
index 048b8e9f4492..e066a658acac 100644
--- a/arch/powerpc/mm/tlb_nohash_low.S
+++ b/arch/powerpc/mm/tlb_nohash_low.S
@@ -34,6 +34,8 @@
#include <asm/asm-offsets.h>
#include <asm/processor.h>
#include <asm/bug.h>
+#include <asm/asm-compat.h>
+#include <asm/feature-fixups.h>
#if defined(CONFIG_40x)
diff --git a/arch/powerpc/net/Makefile b/arch/powerpc/net/Makefile
index 809f019d3cba..c2dec3a68d4c 100644
--- a/arch/powerpc/net/Makefile
+++ b/arch/powerpc/net/Makefile
@@ -2,7 +2,7 @@
#
# Arch-specific network modules
#
-ifeq ($(CONFIG_PPC64),y)
+ifdef CONFIG_PPC64
obj-$(CONFIG_BPF_JIT) += bpf_jit_comp64.o
else
obj-$(CONFIG_BPF_JIT) += bpf_jit_asm.o bpf_jit_comp.o
diff --git a/arch/powerpc/net/bpf_jit32.h b/arch/powerpc/net/bpf_jit32.h
index a8cd7e289ecd..6f4daacad296 100644
--- a/arch/powerpc/net/bpf_jit32.h
+++ b/arch/powerpc/net/bpf_jit32.h
@@ -13,6 +13,7 @@
#ifndef _BPF_JIT32_H
#define _BPF_JIT32_H
+#include <asm/asm-compat.h>
#include "bpf_jit.h"
#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/net/bpf_jit_asm.S b/arch/powerpc/net/bpf_jit_asm.S
index 3dd9c43d40c9..c80280dc2e04 100644
--- a/arch/powerpc/net/bpf_jit_asm.S
+++ b/arch/powerpc/net/bpf_jit_asm.S
@@ -10,6 +10,7 @@
*/
#include <asm/ppc_asm.h>
+#include <asm/asm-compat.h>
#include "bpf_jit32.h"
/*
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 5b061fc81df3..d5bfe24bb3b5 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -12,6 +12,7 @@
*/
#include <linux/moduleloader.h>
#include <asm/cacheflush.h>
+#include <asm/asm-compat.h>
#include <linux/netdevice.h>
#include <linux/filter.h>
#include <linux/if_vlan.h>
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index c0a9bcd28356..50b129785aee 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -13,6 +13,7 @@
*/
#include <linux/moduleloader.h>
#include <asm/cacheflush.h>
+#include <asm/asm-compat.h>
#include <linux/netdevice.h>
#include <linux/filter.h>
#include <linux/if_vlan.h>
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 19d8ab49d1bd..81f8a0c838ae 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -128,10 +128,6 @@ static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
static void power_pmu_sched_task(struct perf_event_context *ctx, bool sched_in) {}
static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}
static void pmao_restore_workaround(bool ebb) { }
-static bool use_ic(u64 event)
-{
- return false;
-}
#endif /* CONFIG_PPC32 */
static bool regs_use_siar(struct pt_regs *regs)
@@ -714,14 +710,6 @@ static void pmao_restore_workaround(bool ebb)
mtspr(SPRN_PMC6, pmcs[5]);
}
-static bool use_ic(u64 event)
-{
- if (cpu_has_feature(CPU_FTR_POWER9_DD1) &&
- (event == 0x200f2 || event == 0x300f2))
- return true;
-
- return false;
-}
#endif /* CONFIG_PPC64 */
static void perf_event_interrupt(struct pt_regs *regs);
@@ -1046,7 +1034,6 @@ static u64 check_and_compute_delta(u64 prev, u64 val)
static void power_pmu_read(struct perf_event *event)
{
s64 val, delta, prev;
- struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
if (event->hw.state & PERF_HES_STOPPED)
return;
@@ -1056,13 +1043,6 @@ static void power_pmu_read(struct perf_event *event)
if (is_ebb_event(event)) {
val = read_pmc(event->hw.idx);
- if (use_ic(event->attr.config)) {
- val = mfspr(SPRN_IC);
- if (val > cpuhw->ic_init)
- val = val - cpuhw->ic_init;
- else
- val = val + (0 - cpuhw->ic_init);
- }
local64_set(&event->hw.prev_count, val);
return;
}
@@ -1076,13 +1056,6 @@ static void power_pmu_read(struct perf_event *event)
prev = local64_read(&event->hw.prev_count);
barrier();
val = read_pmc(event->hw.idx);
- if (use_ic(event->attr.config)) {
- val = mfspr(SPRN_IC);
- if (val > cpuhw->ic_init)
- val = val - cpuhw->ic_init;
- else
- val = val + (0 - cpuhw->ic_init);
- }
delta = check_and_compute_delta(prev, val);
if (!delta)
return;
@@ -1535,13 +1508,6 @@ nocheck:
event->attr.branch_sample_type);
}
- /*
- * Workaround for POWER9 DD1 to use the Instruction Counter
- * register value for instruction counting
- */
- if (use_ic(event->attr.config))
- cpuhw->ic_init = mfspr(SPRN_IC);
-
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
return ret;
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
index d1977b61f827..1fafc32b12a0 100644
--- a/arch/powerpc/perf/imc-pmu.c
+++ b/arch/powerpc/perf/imc-pmu.c
@@ -867,59 +867,6 @@ static int thread_imc_cpu_init(void)
ppc_thread_imc_cpu_offline);
}
-void thread_imc_pmu_sched_task(struct perf_event_context *ctx,
- bool sched_in)
-{
- int core_id;
- struct imc_pmu_ref *ref;
-
- if (!is_core_imc_mem_inited(smp_processor_id()))
- return;
-
- core_id = smp_processor_id() / threads_per_core;
- /*
- * imc pmus are enabled only when it is used.
- * See if this is triggered for the first time.
- * If yes, take the mutex lock and enable the counters.
- * If not, just increment the count in ref count struct.
- */
- ref = &core_imc_refc[core_id];
- if (!ref)
- return;
-
- if (sched_in) {
- mutex_lock(&ref->lock);
- if (ref->refc == 0) {
- if (opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
- get_hard_smp_processor_id(smp_processor_id()))) {
- mutex_unlock(&ref->lock);
- pr_err("thread-imc: Unable to start the counter\
- for core %d\n", core_id);
- return;
- }
- }
- ++ref->refc;
- mutex_unlock(&ref->lock);
- } else {
- mutex_lock(&ref->lock);
- ref->refc--;
- if (ref->refc == 0) {
- if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
- get_hard_smp_processor_id(smp_processor_id()))) {
- mutex_unlock(&ref->lock);
- pr_err("thread-imc: Unable to stop the counters\
- for core %d\n", core_id);
- return;
- }
- } else if (ref->refc < 0) {
- ref->refc = 0;
- }
- mutex_unlock(&ref->lock);
- }
-
- return;
-}
-
static int thread_imc_event_init(struct perf_event *event)
{
u32 config = event->attr.config;
@@ -1046,22 +993,70 @@ static int imc_event_add(struct perf_event *event, int flags)
static int thread_imc_event_add(struct perf_event *event, int flags)
{
+ int core_id;
+ struct imc_pmu_ref *ref;
+
if (flags & PERF_EF_START)
imc_event_start(event, flags);
- /* Enable the sched_task to start the engine */
- perf_sched_cb_inc(event->ctx->pmu);
+ if (!is_core_imc_mem_inited(smp_processor_id()))
+ return -EINVAL;
+
+ core_id = smp_processor_id() / threads_per_core;
+ /*
+ * imc pmus are enabled only when it is used.
+ * See if this is triggered for the first time.
+ * If yes, take the mutex lock and enable the counters.
+ * If not, just increment the count in ref count struct.
+ */
+ ref = &core_imc_refc[core_id];
+ if (!ref)
+ return -EINVAL;
+
+ mutex_lock(&ref->lock);
+ if (ref->refc == 0) {
+ if (opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
+ get_hard_smp_processor_id(smp_processor_id()))) {
+ mutex_unlock(&ref->lock);
+ pr_err("thread-imc: Unable to start the counter\
+ for core %d\n", core_id);
+ return -EINVAL;
+ }
+ }
+ ++ref->refc;
+ mutex_unlock(&ref->lock);
return 0;
}
static void thread_imc_event_del(struct perf_event *event, int flags)
{
+
+ int core_id;
+ struct imc_pmu_ref *ref;
+
/*
* Take a snapshot and calculate the delta and update
* the event counter values.
*/
imc_event_update(event);
- perf_sched_cb_dec(event->ctx->pmu);
+
+ core_id = smp_processor_id() / threads_per_core;
+ ref = &core_imc_refc[core_id];
+
+ mutex_lock(&ref->lock);
+ ref->refc--;
+ if (ref->refc == 0) {
+ if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
+ get_hard_smp_processor_id(smp_processor_id()))) {
+ mutex_unlock(&ref->lock);
+ pr_err("thread-imc: Unable to stop the counters\
+ for core %d\n", core_id);
+ return;
+ }
+ } else if (ref->refc < 0) {
+ ref->refc = 0;
+ }
+ mutex_unlock(&ref->lock);
}
/* update_pmu_ops : Populate the appropriate operations for "pmu" */
@@ -1087,7 +1082,6 @@ static int update_pmu_ops(struct imc_pmu *pmu)
break;
case IMC_DOMAIN_THREAD:
pmu->pmu.event_init = thread_imc_event_init;
- pmu->pmu.sched_task = thread_imc_pmu_sched_task;
pmu->pmu.add = thread_imc_event_add;
pmu->pmu.del = thread_imc_event_del;
pmu->pmu.start_txn = thread_imc_pmu_start_txn;
diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
index 2efee3f196f5..177de814286f 100644
--- a/arch/powerpc/perf/isa207-common.c
+++ b/arch/powerpc/perf/isa207-common.c
@@ -59,7 +59,7 @@ static bool is_event_valid(u64 event)
{
u64 valid_mask = EVENT_VALID_MASK;
- if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1))
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
valid_mask = p9_EVENT_VALID_MASK;
return !(event & ~valid_mask);
@@ -86,8 +86,6 @@ static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)
* Incase of Power9:
* Marked event: MMCRA[SDAR_MODE] will be set to 0b00 ('No Updates'),
* or if group already have any marked events.
- * Non-Marked events (for DD1):
- * MMCRA[SDAR_MODE] will be set to 0b01
* For rest
* MMCRA[SDAR_MODE] will be set from event code.
* If sdar_mode from event is zero, default to 0b01. Hardware
@@ -96,7 +94,7 @@ static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
if (is_event_marked(event) || (*mmcra & MMCRA_SAMPLE_ENABLE))
*mmcra &= MMCRA_SDAR_MODE_NO_UPDATES;
- else if (!cpu_has_feature(CPU_FTR_POWER9_DD1) && p9_SDAR_MODE(event))
+ else if (p9_SDAR_MODE(event))
*mmcra |= p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT;
else
*mmcra |= MMCRA_SDAR_MODE_DCACHE;
@@ -106,7 +104,7 @@ static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)
static u64 thresh_cmp_val(u64 value)
{
- if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1))
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
return value << p9_MMCRA_THR_CMP_SHIFT;
return value << MMCRA_THR_CMP_SHIFT;
@@ -114,7 +112,7 @@ static u64 thresh_cmp_val(u64 value)
static unsigned long combine_from_event(u64 event)
{
- if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1))
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
return p9_EVENT_COMBINE(event);
return EVENT_COMBINE(event);
@@ -122,7 +120,7 @@ static unsigned long combine_from_event(u64 event)
static unsigned long combine_shift(unsigned long pmc)
{
- if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1))
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
return p9_MMCR1_COMBINE_SHIFT(pmc);
return MMCR1_COMBINE_SHIFT(pmc);
diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h
index 6a0b586c935a..0028f4b9490d 100644
--- a/arch/powerpc/perf/isa207-common.h
+++ b/arch/powerpc/perf/isa207-common.h
@@ -158,11 +158,6 @@
CNST_PMC_VAL(1) | CNST_PMC_VAL(2) | CNST_PMC_VAL(3) | \
CNST_PMC_VAL(4) | CNST_PMC_VAL(5) | CNST_PMC_VAL(6) | CNST_NC_VAL
-/*
- * Lets restrict use of PMC5 for instruction counting.
- */
-#define P9_DD1_TEST_ADDER (ISA207_TEST_ADDER | CNST_PMC_VAL(5))
-
/* Bits in MMCR1 for PowerISA v2.07 */
#define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1)))
#define MMCR1_COMBINE_SHIFT(pmc) (35 - ((pmc) - 1))
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index 2ca0b33b4efb..e012b1030a5b 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -219,12 +219,6 @@ static struct attribute_group power9_pmu_events_group = {
.attrs = power9_events_attr,
};
-static const struct attribute_group *power9_isa207_pmu_attr_groups[] = {
- &isa207_pmu_format_group,
- &power9_pmu_events_group,
- NULL,
-};
-
PMU_FORMAT_ATTR(event, "config:0-51");
PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
PMU_FORMAT_ATTR(mark, "config:8");
@@ -267,17 +261,6 @@ static const struct attribute_group *power9_pmu_attr_groups[] = {
NULL,
};
-static int power9_generic_events_dd1[] = {
- [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
- [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_ICT_NOSLOT_CYC,
- [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL,
- [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_DISP,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BR_CMPL_ALT,
- [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
- [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
- [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1_FIN,
-};
-
static int power9_generic_events[] = {
[PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_ICT_NOSLOT_CYC,
@@ -439,25 +422,6 @@ static int power9_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
#undef C
-static struct power_pmu power9_isa207_pmu = {
- .name = "POWER9",
- .n_counter = MAX_PMU_COUNTERS,
- .add_fields = ISA207_ADD_FIELDS,
- .test_adder = P9_DD1_TEST_ADDER,
- .compute_mmcr = isa207_compute_mmcr,
- .config_bhrb = power9_config_bhrb,
- .bhrb_filter_map = power9_bhrb_filter_map,
- .get_constraint = isa207_get_constraint,
- .get_alternatives = power9_get_alternatives,
- .disable_pmc = isa207_disable_pmc,
- .flags = PPMU_NO_SIAR | PPMU_ARCH_207S,
- .n_generic = ARRAY_SIZE(power9_generic_events_dd1),
- .generic_events = power9_generic_events_dd1,
- .cache_events = &power9_cache_events,
- .attr_groups = power9_isa207_pmu_attr_groups,
- .bhrb_nr = 32,
-};
-
static struct power_pmu power9_pmu = {
.name = "POWER9",
.n_counter = MAX_PMU_COUNTERS,
@@ -500,23 +464,7 @@ static int __init init_power9_pmu(void)
}
}
- if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
- /*
- * Since PM_INST_CMPL may not provide right counts in all
- * sampling scenarios in power9 DD1, instead use PM_INST_DISP.
- */
- EVENT_VAR(PM_INST_CMPL, _g).id = PM_INST_DISP;
- /*
- * Power9 DD1 should use PM_BR_CMPL_ALT event code for
- * "branches" to provide correct counter value.
- */
- EVENT_VAR(PM_BR_CMPL, _g).id = PM_BR_CMPL_ALT;
- EVENT_VAR(PM_BR_CMPL, _c).id = PM_BR_CMPL_ALT;
- rc = register_power_pmu(&power9_isa207_pmu);
- } else {
- rc = register_power_pmu(&power9_pmu);
- }
-
+ rc = register_power_pmu(&power9_pmu);
if (rc)
return rc;
diff --git a/arch/powerpc/perf/req-gen/_begin.h b/arch/powerpc/perf/req-gen/_begin.h
index 549f8782c52d..a200b86eba3b 100644
--- a/arch/powerpc/perf/req-gen/_begin.h
+++ b/arch/powerpc/perf/req-gen/_begin.h
@@ -3,6 +3,8 @@
#ifndef POWERPC_PERF_REQ_GEN_H_
#define POWERPC_PERF_REQ_GEN_H_
+#include <linux/stringify.h>
+
#define CAT2_STR_(t, s) __stringify(t/s)
#define CAT2_STR(t, s) CAT2_STR_(t, s)
#define I(...) __VA_ARGS__
diff --git a/arch/powerpc/perf/req-gen/perf.h b/arch/powerpc/perf/req-gen/perf.h
index 871a9a1766c2..fa9bc804e67a 100644
--- a/arch/powerpc/perf/req-gen/perf.h
+++ b/arch/powerpc/perf/req-gen/perf.h
@@ -3,6 +3,7 @@
#define LINUX_POWERPC_PERF_REQ_GEN_PERF_H_
#include <linux/perf_event.h>
+#include <linux/stringify.h>
#ifndef REQUEST_FILE
#error "REQUEST_FILE must be defined before including"
diff --git a/arch/powerpc/platforms/4xx/msi.c b/arch/powerpc/platforms/4xx/msi.c
index 81b2cbce7df8..7c324eff2f22 100644
--- a/arch/powerpc/platforms/4xx/msi.c
+++ b/arch/powerpc/platforms/4xx/msi.c
@@ -146,13 +146,19 @@ static int ppc4xx_setup_pcieh_hw(struct platform_device *dev,
const u32 *sdr_addr;
dma_addr_t msi_phys;
void *msi_virt;
+ int err;
sdr_addr = of_get_property(dev->dev.of_node, "sdr-base", NULL);
if (!sdr_addr)
- return -1;
+ return -EINVAL;
- mtdcri(SDR0, *sdr_addr, upper_32_bits(res.start)); /*HIGH addr */
- mtdcri(SDR0, *sdr_addr + 1, lower_32_bits(res.start)); /* Low addr */
+ msi_data = of_get_property(dev->dev.of_node, "msi-data", NULL);
+ if (!msi_data)
+ return -EINVAL;
+
+ msi_mask = of_get_property(dev->dev.of_node, "msi-mask", NULL);
+ if (!msi_mask)
+ return -EINVAL;
msi->msi_dev = of_find_node_by_name(NULL, "ppc4xx-msi");
if (!msi->msi_dev)
@@ -160,30 +166,30 @@ static int ppc4xx_setup_pcieh_hw(struct platform_device *dev,
msi->msi_regs = of_iomap(msi->msi_dev, 0);
if (!msi->msi_regs) {
- dev_err(&dev->dev, "of_iomap problem failed\n");
- return -ENOMEM;
+ dev_err(&dev->dev, "of_iomap failed\n");
+ err = -ENOMEM;
+ goto node_put;
}
dev_dbg(&dev->dev, "PCIE-MSI: msi register mapped 0x%x 0x%x\n",
(u32) (msi->msi_regs + PEIH_TERMADH), (u32) (msi->msi_regs));
msi_virt = dma_alloc_coherent(&dev->dev, 64, &msi_phys, GFP_KERNEL);
- if (!msi_virt)
- return -ENOMEM;
+ if (!msi_virt) {
+ err = -ENOMEM;
+ goto iounmap;
+ }
msi->msi_addr_hi = upper_32_bits(msi_phys);
msi->msi_addr_lo = lower_32_bits(msi_phys & 0xffffffff);
dev_dbg(&dev->dev, "PCIE-MSI: msi address high 0x%x, low 0x%x\n",
msi->msi_addr_hi, msi->msi_addr_lo);
+ mtdcri(SDR0, *sdr_addr, upper_32_bits(res.start)); /*HIGH addr */
+ mtdcri(SDR0, *sdr_addr + 1, lower_32_bits(res.start)); /* Low addr */
+
/* Progam the Interrupt handler Termination addr registers */
out_be32(msi->msi_regs + PEIH_TERMADH, msi->msi_addr_hi);
out_be32(msi->msi_regs + PEIH_TERMADL, msi->msi_addr_lo);
- msi_data = of_get_property(dev->dev.of_node, "msi-data", NULL);
- if (!msi_data)
- return -1;
- msi_mask = of_get_property(dev->dev.of_node, "msi-mask", NULL);
- if (!msi_mask)
- return -1;
/* Program MSI Expected data and Mask bits */
out_be32(msi->msi_regs + PEIH_MSIED, *msi_data);
out_be32(msi->msi_regs + PEIH_MSIMK, *msi_mask);
@@ -191,6 +197,12 @@ static int ppc4xx_setup_pcieh_hw(struct platform_device *dev,
dma_free_coherent(&dev->dev, 64, msi_virt, msi_phys);
return 0;
+
+iounmap:
+ iounmap(msi->msi_regs);
+node_put:
+ of_node_put(msi->msi_dev);
+ return err;
}
static int ppc4xx_of_msi_remove(struct platform_device *dev)
@@ -209,7 +221,6 @@ static int ppc4xx_of_msi_remove(struct platform_device *dev)
msi_bitmap_free(&msi->bitmap);
iounmap(msi->msi_regs);
of_node_put(msi->msi_dev);
- kfree(msi);
return 0;
}
@@ -223,18 +234,16 @@ static int ppc4xx_msi_probe(struct platform_device *dev)
dev_dbg(&dev->dev, "PCIE-MSI: Setting up MSI support...\n");
- msi = kzalloc(sizeof(*msi), GFP_KERNEL);
- if (!msi) {
- dev_err(&dev->dev, "No memory for MSI structure\n");
+ msi = devm_kzalloc(&dev->dev, sizeof(*msi), GFP_KERNEL);
+ if (!msi)
return -ENOMEM;
- }
dev->dev.platform_data = msi;
/* Get MSI ranges */
err = of_address_to_resource(dev->dev.of_node, 0, &res);
if (err) {
dev_err(&dev->dev, "%pOF resource error!\n", dev->dev.of_node);
- goto error_out;
+ return err;
}
msi_irqs = of_irq_count(dev->dev.of_node);
@@ -243,7 +252,7 @@ static int ppc4xx_msi_probe(struct platform_device *dev)
err = ppc4xx_setup_pcieh_hw(dev, res, msi);
if (err)
- goto error_out;
+ return err;
err = ppc4xx_msi_init_allocator(dev, msi);
if (err) {
@@ -256,7 +265,7 @@ static int ppc4xx_msi_probe(struct platform_device *dev)
phb->controller_ops.setup_msi_irqs = ppc4xx_setup_msi_irqs;
phb->controller_ops.teardown_msi_irqs = ppc4xx_teardown_msi_irqs;
}
- return err;
+ return 0;
error_out:
ppc4xx_of_msi_remove(dev);
diff --git a/arch/powerpc/platforms/52xx/Makefile b/arch/powerpc/platforms/52xx/Makefile
index ff2f86fe5429..f40d48eab779 100644
--- a/arch/powerpc/platforms/52xx/Makefile
+++ b/arch/powerpc/platforms/52xx/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_PPC_LITE5200) += lite5200.o
obj-$(CONFIG_PPC_MEDIA5200) += media5200.o
obj-$(CONFIG_PM) += mpc52xx_sleep.o mpc52xx_pm.o
-ifeq ($(CONFIG_PPC_LITE5200),y)
+ifdef CONFIG_PPC_LITE5200
obj-$(CONFIG_PM) += lite5200_sleep.o lite5200_pm.o
endif
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
index 31d3515672f3..b1d208ded981 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
@@ -117,7 +117,10 @@ int mpc52xx_pm_enter(suspend_state_t state)
u32 intr_main_mask;
void __iomem * irq_0x500 = (void __iomem *)CONFIG_KERNEL_START + 0x500;
unsigned long irq_0x500_stop = (unsigned long)irq_0x500 + mpc52xx_ds_cached_size;
- char saved_0x500[mpc52xx_ds_cached_size];
+ char saved_0x500[0x600-0x500];
+
+ if (WARN_ON(mpc52xx_ds_cached_size > sizeof(saved_0x500)))
+ return -ENOMEM;
/* disable all interrupts in PIC */
intr_main_mask = in_be32(&intr->main_mask);
diff --git a/arch/powerpc/platforms/85xx/t1042rdb_diu.c b/arch/powerpc/platforms/85xx/t1042rdb_diu.c
index 58fa3d319f1c..dac36ba82fea 100644
--- a/arch/powerpc/platforms/85xx/t1042rdb_diu.c
+++ b/arch/powerpc/platforms/85xx/t1042rdb_diu.c
@@ -9,8 +9,10 @@
* option) any later version.
*/
+#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -150,3 +152,5 @@ static int __init t1042rdb_diu_init(void)
}
early_initcall(t1042rdb_diu_init);
+
+MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index e6a1de521319..6c6a7c72cae4 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -74,7 +74,6 @@ config PPC_BOOK3S_64
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select ARCH_SUPPORTS_NUMA_BALANCING
select IRQ_WORK
- select HAVE_KERNEL_XZ
config PPC_BOOK3E_64
bool "Embedded processors"
@@ -86,7 +85,6 @@ endchoice
choice
prompt "CPU selection"
- depends on PPC64
default GENERIC_CPU
help
This will create a kernel which is optimised for a particular CPU.
@@ -96,13 +94,17 @@ choice
config GENERIC_CPU
bool "Generic (POWER4 and above)"
- depends on !CPU_LITTLE_ENDIAN
+ depends on PPC64 && !CPU_LITTLE_ENDIAN
config GENERIC_CPU
bool "Generic (POWER8 and above)"
- depends on CPU_LITTLE_ENDIAN
+ depends on PPC64 && CPU_LITTLE_ENDIAN
select ARCH_HAS_FAST_MULTIPLIER
+config GENERIC_CPU
+ bool "Generic 32 bits powerpc"
+ depends on PPC32 && !PPC_8xx
+
config CELL_CPU
bool "Cell Broadband Engine"
depends on PPC_BOOK3S_64 && !CPU_LITTLE_ENDIAN
@@ -138,8 +140,37 @@ config E6500_CPU
bool "Freescale e6500"
depends on E500
+config 860_CPU
+ bool "8xx family"
+ depends on PPC_8xx
+
+config E300C2_CPU
+ bool "e300c2 (832x)"
+ depends on PPC_BOOK3S_32
+
+config E300C3_CPU
+ bool "e300c3 (831x)"
+ depends on PPC_BOOK3S_32
+
endchoice
+config TARGET_CPU_BOOL
+ bool
+ default !GENERIC_CPU
+
+config TARGET_CPU
+ string
+ depends on TARGET_CPU_BOOL
+ default "cell" if CELL_CPU
+ default "power5" if POWER5_CPU
+ default "power6" if POWER6_CPU
+ default "power7" if POWER7_CPU
+ default "power8" if POWER8_CPU
+ default "power9" if POWER9_CPU
+ default "860" if 860_CPU
+ default "e300c2" if E300C2_CPU
+ default "e300c3" if E300C3_CPU
+
config PPC_BOOK3S
def_bool y
depends on PPC_BOOK3S_32 || PPC_BOOK3S_64
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
index d5f808e8a5f3..10064a33ca96 100644
--- a/arch/powerpc/platforms/cell/Makefile
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_CBE_CPUFREQ_SPU_GOVERNOR) += cpufreq_spudemand.o
obj-$(CONFIG_PPC_IBM_CELL_POWERBUTTON) += cbe_powerbutton.o
-ifeq ($(CONFIG_SMP),y)
+ifdef CONFIG_SMP
obj-$(CONFIG_PPC_CELL_NATIVE) += smp.o
endif
diff --git a/arch/powerpc/platforms/cell/cbe_thermal.c b/arch/powerpc/platforms/cell/cbe_thermal.c
index 2c15ff094483..55aac74e1cb9 100644
--- a/arch/powerpc/platforms/cell/cbe_thermal.c
+++ b/arch/powerpc/platforms/cell/cbe_thermal.c
@@ -49,6 +49,7 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/cpu.h>
+#include <linux/stringify.h>
#include <asm/spu.h>
#include <asm/io.h>
#include <asm/prom.h>
diff --git a/arch/powerpc/platforms/cell/spufs/sputrace.h b/arch/powerpc/platforms/cell/spufs/sputrace.h
index d557e999b662..1def11e911ac 100644
--- a/arch/powerpc/platforms/cell/spufs/sputrace.h
+++ b/arch/powerpc/platforms/cell/spufs/sputrace.h
@@ -3,6 +3,7 @@
#define _TRACE_SPUFS_H
#include <linux/tracepoint.h>
+#include <linux/stringify.h>
#undef TRACE_SYSTEM
#define TRACE_SYSTEM spufs
diff --git a/arch/powerpc/platforms/chrp/nvram.c b/arch/powerpc/platforms/chrp/nvram.c
index c3ede2c365c3..791b86398e1d 100644
--- a/arch/powerpc/platforms/chrp/nvram.c
+++ b/arch/powerpc/platforms/chrp/nvram.c
@@ -11,6 +11,7 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
@@ -89,3 +90,5 @@ void __init chrp_nvram_init(void)
return;
}
+
+MODULE_LICENSE("GPL v2");
diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c
index 8bb46dcbebd8..403523c061ba 100644
--- a/arch/powerpc/platforms/embedded6xx/wii.c
+++ b/arch/powerpc/platforms/embedded6xx/wii.c
@@ -67,16 +67,6 @@ void __init wii_memory_fixups(void)
{
struct memblock_region *p = memblock.memory.regions;
- /*
- * This is part of a workaround to allow the use of two
- * discontinuous RAM ranges on the Wii, even if this is
- * currently unsupported on 32-bit PowerPC Linux.
- *
- * We coalesce the two memory ranges of the Wii into a
- * single range, then create a reservation for the "hole"
- * between both ranges.
- */
-
BUG_ON(memblock.memory.cnt != 2);
BUG_ON(!page_aligned(p[0].base) || !page_aligned(p[1].base));
diff --git a/arch/powerpc/platforms/pasemi/dma_lib.c b/arch/powerpc/platforms/pasemi/dma_lib.c
index 2c72263ad6ab..c80f72c370ae 100644
--- a/arch/powerpc/platforms/pasemi/dma_lib.c
+++ b/arch/powerpc/platforms/pasemi/dma_lib.c
@@ -531,7 +531,7 @@ int pasemi_dma_init(void)
iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
if (!iob_pdev) {
BUG();
- printk(KERN_WARNING "Can't find I/O Bridge\n");
+ pr_warn("Can't find I/O Bridge\n");
err = -ENODEV;
goto out;
}
@@ -540,7 +540,7 @@ int pasemi_dma_init(void)
dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
if (!dma_pdev) {
BUG();
- printk(KERN_WARNING "Can't find DMA controller\n");
+ pr_warn("Can't find DMA controller\n");
err = -ENODEV;
goto out;
}
@@ -623,7 +623,7 @@ int pasemi_dma_init(void)
pasemi_write_dma_reg(PAS_DMA_TXF_CFLG0, 0xffffffff);
pasemi_write_dma_reg(PAS_DMA_TXF_CFLG1, 0xffffffff);
- printk(KERN_INFO "PA Semi PWRficient DMA library initialized "
+ pr_info("PA Semi PWRficient DMA library initialized "
"(%d tx, %d rx channels)\n", num_txch, num_rxch);
out:
diff --git a/arch/powerpc/platforms/pasemi/gpio_mdio.c b/arch/powerpc/platforms/pasemi/gpio_mdio.c
index c23e60959aa8..6f35a2afe522 100644
--- a/arch/powerpc/platforms/pasemi/gpio_mdio.c
+++ b/arch/powerpc/platforms/pasemi/gpio_mdio.c
@@ -256,7 +256,7 @@ static int gpio_mdio_probe(struct platform_device *ofdev)
err = of_mdiobus_register(new_bus, np);
if (err != 0) {
- printk(KERN_ERR "%s: Cannot register as MDIO bus, err %d\n",
+ pr_err("%s: Cannot register as MDIO bus, err %d\n",
new_bus->name, err);
goto out_free_irq;
}
diff --git a/arch/powerpc/platforms/pasemi/idle.c b/arch/powerpc/platforms/pasemi/idle.c
index 44e0d9226f0a..8bb4e8082441 100644
--- a/arch/powerpc/platforms/pasemi/idle.c
+++ b/arch/powerpc/platforms/pasemi/idle.c
@@ -78,13 +78,13 @@ static int pasemi_system_reset_exception(struct pt_regs *regs)
static int __init pasemi_idle_init(void)
{
#ifndef CONFIG_PPC_PASEMI_CPUFREQ
- printk(KERN_WARNING "No cpufreq driver, powersavings modes disabled\n");
+ pr_warn("No cpufreq driver, powersavings modes disabled\n");
current_mode = 0;
#endif
ppc_md.system_reset_exception = pasemi_system_reset_exception;
ppc_md.power_save = modes[current_mode].entry;
- printk(KERN_INFO "Using PA6T idle loop (%s)\n", modes[current_mode].name);
+ pr_info("Using PA6T idle loop (%s)\n", modes[current_mode].name);
return 0;
}
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index 78b80cbd9768..f06c83f321e6 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -210,7 +210,7 @@ static int __init iob_init(struct device_node *dn)
/* For 2G space, 8x64 pages (2^21 bytes) is max total l2 size */
iob_l2_base = (u32 *)__va(memblock_alloc_base(1UL<<21, 1UL<<21, 0x80000000));
- printk(KERN_INFO "IOBMAP L2 allocated at: %p\n", iob_l2_base);
+ pr_info("IOBMAP L2 allocated at: %p\n", iob_l2_base);
/* Allocate a spare page to map all invalid IOTLB pages. */
tmp = memblock_alloc(IOBMAP_PAGE_SIZE, IOBMAP_PAGE_SIZE);
diff --git a/arch/powerpc/platforms/pasemi/misc.c b/arch/powerpc/platforms/pasemi/misc.c
index 8571e7bf78b6..aa958a46957f 100644
--- a/arch/powerpc/platforms/pasemi/misc.c
+++ b/arch/powerpc/platforms/pasemi/misc.c
@@ -69,9 +69,7 @@ static int __init pasemi_register_i2c_devices(void)
addr = of_get_property(node, "reg", &len);
if (!addr || len < sizeof(int) ||
*addr > (1 << 10) - 1) {
- printk(KERN_WARNING
- "pasemi_register_i2c_devices: "
- "invalid i2c device entry\n");
+ pr_warn("pasemi_register_i2c_devices: invalid i2c device entry\n");
continue;
}
diff --git a/arch/powerpc/platforms/pasemi/pci.c b/arch/powerpc/platforms/pasemi/pci.c
index aea9ff2c8e6d..c3c64172482d 100644
--- a/arch/powerpc/platforms/pasemi/pci.c
+++ b/arch/powerpc/platforms/pasemi/pci.c
@@ -205,7 +205,7 @@ static int __init pas_add_bridge(struct device_node *dev)
setup_pa_pxp(hose);
- printk(KERN_INFO "Found PA-PXP PCI host bridge.\n");
+ pr_info("Found PA-PXP PCI host bridge.\n");
/* Interpret the "ranges" property */
pci_process_bridge_OF_ranges(hose, dev, 1);
@@ -216,21 +216,21 @@ static int __init pas_add_bridge(struct device_node *dev)
void __init pas_pci_init(void)
{
struct device_node *np, *root;
+ int res;
root = of_find_node_by_path("/");
if (!root) {
- printk(KERN_CRIT "pas_pci_init: can't find root "
- "of device tree\n");
+ pr_crit("pas_pci_init: can't find root of device tree\n");
return;
}
pci_set_flags(PCI_SCAN_ALL_PCIE_DEVS);
- for (np = NULL; (np = of_get_next_child(root, np)) != NULL;)
- if (np->name && !strcmp(np->name, "pxp") && !pas_add_bridge(np))
- of_node_get(np);
-
- of_node_put(root);
+ np = of_find_compatible_node(root, NULL, "pasemi,rootbus");
+ if (np) {
+ res = pas_add_bridge(np);
+ of_node_put(np);
+ }
}
void __iomem *pasemi_pci_getcfgaddr(struct pci_dev *dev, int offset)
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index d0b8ae53660d..9a6eb04cca83 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -207,8 +207,7 @@ static __init void pas_init_IRQ(void)
break;
}
if (!mpic_node) {
- printk(KERN_ERR
- "Failed to locate the MPIC interrupt controller\n");
+ pr_err("Failed to locate the MPIC interrupt controller\n");
return;
}
@@ -217,12 +216,12 @@ static __init void pas_init_IRQ(void)
naddr = of_n_addr_cells(root);
opprop = of_get_property(root, "platform-open-pic", &opplen);
if (!opprop) {
- printk(KERN_ERR "No platform-open-pic property.\n");
+ pr_err("No platform-open-pic property.\n");
of_node_put(root);
return;
}
openpic_addr = of_read_number(opprop, naddr);
- printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr);
+ pr_debug("OpenPIC addr: %lx\n", openpic_addr);
mpic_flags = MPIC_LARGE_VECTORS | MPIC_NO_BIAS | MPIC_NO_RESET;
@@ -265,72 +264,72 @@ static int pas_machine_check_handler(struct pt_regs *regs)
srr1 = regs->msr;
if (nmi_virq && mpic_get_mcirq() == nmi_virq) {
- printk(KERN_ERR "NMI delivered\n");
+ pr_err("NMI delivered\n");
debugger(regs);
mpic_end_irq(irq_get_irq_data(nmi_virq));
goto out;
}
dsisr = mfspr(SPRN_DSISR);
- printk(KERN_ERR "Machine Check on CPU %d\n", cpu);
- printk(KERN_ERR "SRR0 0x%016lx SRR1 0x%016lx\n", srr0, srr1);
- printk(KERN_ERR "DSISR 0x%016lx DAR 0x%016lx\n", dsisr, regs->dar);
- printk(KERN_ERR "BER 0x%016lx MER 0x%016lx\n", mfspr(SPRN_PA6T_BER),
+ pr_err("Machine Check on CPU %d\n", cpu);
+ pr_err("SRR0 0x%016lx SRR1 0x%016lx\n", srr0, srr1);
+ pr_err("DSISR 0x%016lx DAR 0x%016lx\n", dsisr, regs->dar);
+ pr_err("BER 0x%016lx MER 0x%016lx\n", mfspr(SPRN_PA6T_BER),
mfspr(SPRN_PA6T_MER));
- printk(KERN_ERR "IER 0x%016lx DER 0x%016lx\n", mfspr(SPRN_PA6T_IER),
+ pr_err("IER 0x%016lx DER 0x%016lx\n", mfspr(SPRN_PA6T_IER),
mfspr(SPRN_PA6T_DER));
- printk(KERN_ERR "Cause:\n");
+ pr_err("Cause:\n");
if (srr1 & 0x200000)
- printk(KERN_ERR "Signalled by SDC\n");
+ pr_err("Signalled by SDC\n");
if (srr1 & 0x100000) {
- printk(KERN_ERR "Load/Store detected error:\n");
+ pr_err("Load/Store detected error:\n");
if (dsisr & 0x8000)
- printk(KERN_ERR "D-cache ECC double-bit error or bus error\n");
+ pr_err("D-cache ECC double-bit error or bus error\n");
if (dsisr & 0x4000)
- printk(KERN_ERR "LSU snoop response error\n");
+ pr_err("LSU snoop response error\n");
if (dsisr & 0x2000) {
- printk(KERN_ERR "MMU SLB multi-hit or invalid B field\n");
+ pr_err("MMU SLB multi-hit or invalid B field\n");
dump_slb = 1;
}
if (dsisr & 0x1000)
- printk(KERN_ERR "Recoverable Duptags\n");
+ pr_err("Recoverable Duptags\n");
if (dsisr & 0x800)
- printk(KERN_ERR "Recoverable D-cache parity error count overflow\n");
+ pr_err("Recoverable D-cache parity error count overflow\n");
if (dsisr & 0x400)
- printk(KERN_ERR "TLB parity error count overflow\n");
+ pr_err("TLB parity error count overflow\n");
}
if (srr1 & 0x80000)
- printk(KERN_ERR "Bus Error\n");
+ pr_err("Bus Error\n");
if (srr1 & 0x40000) {
- printk(KERN_ERR "I-side SLB multiple hit\n");
+ pr_err("I-side SLB multiple hit\n");
dump_slb = 1;
}
if (srr1 & 0x20000)
- printk(KERN_ERR "I-cache parity error hit\n");
+ pr_err("I-cache parity error hit\n");
if (num_mce_regs == 0)
- printk(KERN_ERR "No MCE registers mapped yet, can't dump\n");
+ pr_err("No MCE registers mapped yet, can't dump\n");
else
- printk(KERN_ERR "SoC debug registers:\n");
+ pr_err("SoC debug registers:\n");
for (i = 0; i < num_mce_regs; i++)
- printk(KERN_ERR "%s: 0x%08x\n", mce_regs[i].name,
+ pr_err("%s: 0x%08x\n", mce_regs[i].name,
in_le32(mce_regs[i].addr));
if (dump_slb) {
unsigned long e, v;
int i;
- printk(KERN_ERR "slb contents:\n");
+ pr_err("slb contents:\n");
for (i = 0; i < mmu_slb_size; i++) {
asm volatile("slbmfee %0,%1" : "=r" (e) : "r" (i));
asm volatile("slbmfev %0,%1" : "=r" (v) : "r" (i));
- printk(KERN_ERR "%02d %016lx %016lx\n", i, e, v);
+ pr_err("%02d %016lx %016lx\n", i, e, v);
}
}
diff --git a/arch/powerpc/platforms/powermac/cache.S b/arch/powerpc/platforms/powermac/cache.S
index cc5347eb1662..27862feee4a5 100644
--- a/arch/powerpc/platforms/powermac/cache.S
+++ b/arch/powerpc/platforms/powermac/cache.S
@@ -17,6 +17,7 @@
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/cputable.h>
+#include <asm/feature-fixups.h>
/*
* Flush and disable all data caches (dL1, L2, L3). This is used
diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c
index 3f82cb24eb2b..4eb8cb38fc69 100644
--- a/arch/powerpc/platforms/powermac/feature.c
+++ b/arch/powerpc/platforms/powermac/feature.c
@@ -2889,10 +2889,8 @@ set_initial_features(void)
/* On all machines, switch modem & serial ports off */
for_each_node_by_name(np, "ch-a")
initial_serial_shutdown(np);
- of_node_put(np);
for_each_node_by_name(np, "ch-b")
initial_serial_shutdown(np);
- of_node_put(np);
}
void __init
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index df762bb3c735..04527d13d5a4 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -781,12 +781,12 @@ static int __init pmac_add_bridge(struct device_node *dev)
struct resource rsrc;
char *disp_name;
const int *bus_range;
- int primary = 1, has_address = 0;
+ int primary = 1;
DBG("Adding PCI host bridge %pOF\n", dev);
/* Fetch host bridge registers address */
- has_address = (of_address_to_resource(dev, 0, &rsrc) == 0);
+ of_address_to_resource(dev, 0, &rsrc);
/* Get bus range if any */
bus_range = of_get_property(dev, "bus-range", &len);
@@ -904,7 +904,7 @@ static int pmac_pci_root_bridge_prepare(struct pci_host_bridge *bridge)
void __init pmac_pci_init(void)
{
struct device_node *np, *root;
- struct device_node *ht = NULL;
+ struct device_node *ht __maybe_unused = NULL;
pci_set_flags(PCI_CAN_SKIP_ISA_ALIGN);
@@ -1019,7 +1019,7 @@ static bool pmac_pci_enable_device_hook(struct pci_dev *dev)
return true;
}
-void pmac_pci_fixup_ohci(struct pci_dev *dev)
+static void pmac_pci_fixup_ohci(struct pci_dev *dev)
{
struct device_node *node = pci_device_to_OF_node(dev);
@@ -1054,7 +1054,7 @@ void __init pmac_pcibios_after_init(void)
}
}
-void pmac_pci_fixup_cardbus(struct pci_dev* dev)
+static void pmac_pci_fixup_cardbus(struct pci_dev *dev)
{
if (!machine_is(powermac))
return;
@@ -1091,7 +1091,7 @@ void pmac_pci_fixup_cardbus(struct pci_dev* dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_ANY_ID, pmac_pci_fixup_cardbus);
-void pmac_pci_fixup_pciata(struct pci_dev* dev)
+static void pmac_pci_fixup_pciata(struct pci_dev *dev)
{
u8 progif = 0;
diff --git a/arch/powerpc/platforms/powermac/sleep.S b/arch/powerpc/platforms/powermac/sleep.S
index 1c2802fabd57..f89808b9713d 100644
--- a/arch/powerpc/platforms/powermac/sleep.S
+++ b/arch/powerpc/platforms/powermac/sleep.S
@@ -18,6 +18,7 @@
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
#include <asm/mmu.h>
+#include <asm/feature-fixups.h>
#define MAGIC 0x4c617273 /* 'Lars' */
diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c
index 12e6e4d30602..f92c1918fb56 100644
--- a/arch/powerpc/platforms/powermac/time.c
+++ b/arch/powerpc/platforms/powermac/time.c
@@ -34,6 +34,8 @@
#include <asm/nvram.h>
#include <asm/smu.h>
+#include "pmac.h"
+
#undef DEBUG
#ifdef DEBUG
@@ -249,7 +251,7 @@ int pmac_set_rtc_time(struct rtc_time *tm)
* Calibrate the decrementer register using VIA timer 1.
* This is used both on powermacs and CHRP machines.
*/
-int __init via_calibrate_decr(void)
+static int __init via_calibrate_decr(void)
{
struct device_node *vias;
volatile unsigned char __iomem *via;
diff --git a/arch/powerpc/platforms/powermac/udbg_scc.c b/arch/powerpc/platforms/powermac/udbg_scc.c
index d83135a9830e..8901973ed683 100644
--- a/arch/powerpc/platforms/powermac/udbg_scc.c
+++ b/arch/powerpc/platforms/powermac/udbg_scc.c
@@ -73,7 +73,7 @@ void udbg_scc_init(int force_scc)
struct device_node *stdout = NULL, *escc = NULL, *macio = NULL;
struct device_node *ch, *ch_def = NULL, *ch_a = NULL;
const char *path;
- int i, x;
+ int i;
escc = of_find_node_by_name(NULL, "escc");
if (escc == NULL)
@@ -120,7 +120,7 @@ void udbg_scc_init(int force_scc)
mb();
for (i = 20000; i != 0; --i)
- x = in_8(sccc);
+ in_8(sccc);
out_8(sccc, 0x09); /* reset A or B side */
out_8(sccc, 0xc0);
diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile
index 703a350a7f4e..b540ce8eec55 100644
--- a/arch/powerpc/platforms/powernv/Makefile
+++ b/arch/powerpc/platforms/powernv/Makefile
@@ -6,7 +6,7 @@ obj-y += opal-msglog.o opal-hmi.o opal-power.o opal-irqchip.o
obj-y += opal-kmsg.o opal-powercap.o opal-psr.o opal-sensor-groups.o
obj-$(CONFIG_SMP) += smp.o subcore.o subcore-asm.o
-obj-$(CONFIG_PCI) += pci.o pci-ioda.o npu-dma.o
+obj-$(CONFIG_PCI) += pci.o pci-ioda.o npu-dma.o pci-ioda-tce.o
obj-$(CONFIG_CXL_BASE) += pci-cxl.o
obj-$(CONFIG_EEH) += eeh-powernv.o
obj-$(CONFIG_PPC_SCOM) += opal-xscom.o
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index ddfc3544d285..3c1beae29f2d 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -223,6 +223,14 @@ int pnv_eeh_post_init(void)
eeh_probe_devices();
eeh_addr_cache_build();
+ if (eeh_has_flag(EEH_POSTPONED_PROBE)) {
+ eeh_clear_flag(EEH_POSTPONED_PROBE);
+ if (eeh_enabled())
+ pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n");
+ else
+ pr_info("EEH: No capable adapters found\n");
+ }
+
/* Register OPAL event notifier */
eeh_event_irq = opal_event_request(ilog2(OPAL_EVENT_PCI_ERROR));
if (eeh_event_irq < 0) {
@@ -384,8 +392,10 @@ static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
return NULL;
/* Skip if we haven't probed yet */
- if (phb->ioda.pe_rmap[config_addr] == IODA_INVALID_PE)
+ if (phb->ioda.pe_rmap[config_addr] == IODA_INVALID_PE) {
+ eeh_add_flag(EEH_POSTPONED_PROBE);
return NULL;
+ }
/* Initialize eeh device */
edev->class_code = pdn->class_code;
diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
index 1c5d0675b43c..35f699ebb662 100644
--- a/arch/powerpc/platforms/powernv/idle.c
+++ b/arch/powerpc/platforms/powernv/idle.c
@@ -36,6 +36,8 @@
#define P9_STOP_SPR_PSSCR 855
static u32 supported_cpuidle_states;
+struct pnv_idle_states_t *pnv_idle_states;
+int nr_pnv_idle_states;
/*
* The default stop state that will be used by ppc_md.power_save
@@ -177,11 +179,6 @@ static void pnv_alloc_idle_core_states(void)
paca_ptrs[cpu]->core_idle_state_ptr = core_idle_state;
paca_ptrs[cpu]->thread_idle_state = PNV_THREAD_RUNNING;
paca_ptrs[cpu]->thread_mask = 1 << j;
- if (!cpu_has_feature(CPU_FTR_POWER9_DD1))
- continue;
- paca_ptrs[cpu]->thread_sibling_pacas =
- kmalloc_node(paca_ptr_array_size,
- GFP_KERNEL, node);
}
}
@@ -622,48 +619,10 @@ int validate_psscr_val_mask(u64 *psscr_val, u64 *psscr_mask, u32 flags)
* @dt_idle_states: Number of idle state entries
* Returns 0 on success
*/
-static int __init pnv_power9_idle_init(struct device_node *np, u32 *flags,
- int dt_idle_states)
+static int __init pnv_power9_idle_init(void)
{
- u64 *psscr_val = NULL;
- u64 *psscr_mask = NULL;
- u32 *residency_ns = NULL;
u64 max_residency_ns = 0;
- int rc = 0, i;
-
- psscr_val = kcalloc(dt_idle_states, sizeof(*psscr_val), GFP_KERNEL);
- psscr_mask = kcalloc(dt_idle_states, sizeof(*psscr_mask), GFP_KERNEL);
- residency_ns = kcalloc(dt_idle_states, sizeof(*residency_ns),
- GFP_KERNEL);
-
- if (!psscr_val || !psscr_mask || !residency_ns) {
- rc = -1;
- goto out;
- }
-
- if (of_property_read_u64_array(np,
- "ibm,cpu-idle-state-psscr",
- psscr_val, dt_idle_states)) {
- pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr in DT\n");
- rc = -1;
- goto out;
- }
-
- if (of_property_read_u64_array(np,
- "ibm,cpu-idle-state-psscr-mask",
- psscr_mask, dt_idle_states)) {
- pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr-mask in DT\n");
- rc = -1;
- goto out;
- }
-
- if (of_property_read_u32_array(np,
- "ibm,cpu-idle-state-residency-ns",
- residency_ns, dt_idle_states)) {
- pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-residency-ns in DT\n");
- rc = -1;
- goto out;
- }
+ int i;
/*
* Set pnv_first_deep_stop_state, pnv_deepest_stop_psscr_{val,mask},
@@ -679,33 +638,37 @@ static int __init pnv_power9_idle_init(struct device_node *np, u32 *flags,
* the shallowest (OPAL_PM_STOP_INST_FAST) loss-less stop state.
*/
pnv_first_deep_stop_state = MAX_STOP_STATE;
- for (i = 0; i < dt_idle_states; i++) {
+ for (i = 0; i < nr_pnv_idle_states; i++) {
int err;
- u64 psscr_rl = psscr_val[i] & PSSCR_RL_MASK;
+ struct pnv_idle_states_t *state = &pnv_idle_states[i];
+ u64 psscr_rl = state->psscr_val & PSSCR_RL_MASK;
- if ((flags[i] & OPAL_PM_LOSE_FULL_CONTEXT) &&
- (pnv_first_deep_stop_state > psscr_rl))
+ if ((state->flags & OPAL_PM_LOSE_FULL_CONTEXT) &&
+ pnv_first_deep_stop_state > psscr_rl)
pnv_first_deep_stop_state = psscr_rl;
- err = validate_psscr_val_mask(&psscr_val[i], &psscr_mask[i],
- flags[i]);
+ err = validate_psscr_val_mask(&state->psscr_val,
+ &state->psscr_mask,
+ state->flags);
if (err) {
- report_invalid_psscr_val(psscr_val[i], err);
+ report_invalid_psscr_val(state->psscr_val, err);
continue;
}
- if (max_residency_ns < residency_ns[i]) {
- max_residency_ns = residency_ns[i];
- pnv_deepest_stop_psscr_val = psscr_val[i];
- pnv_deepest_stop_psscr_mask = psscr_mask[i];
- pnv_deepest_stop_flag = flags[i];
+ state->valid = true;
+
+ if (max_residency_ns < state->residency_ns) {
+ max_residency_ns = state->residency_ns;
+ pnv_deepest_stop_psscr_val = state->psscr_val;
+ pnv_deepest_stop_psscr_mask = state->psscr_mask;
+ pnv_deepest_stop_flag = state->flags;
deepest_stop_found = true;
}
if (!default_stop_found &&
- (flags[i] & OPAL_PM_STOP_INST_FAST)) {
- pnv_default_stop_val = psscr_val[i];
- pnv_default_stop_mask = psscr_mask[i];
+ (state->flags & OPAL_PM_STOP_INST_FAST)) {
+ pnv_default_stop_val = state->psscr_val;
+ pnv_default_stop_mask = state->psscr_mask;
default_stop_found = true;
}
}
@@ -728,11 +691,8 @@ static int __init pnv_power9_idle_init(struct device_node *np, u32 *flags,
pr_info("cpuidle-powernv: Requested Level (RL) value of first deep stop = 0x%llx\n",
pnv_first_deep_stop_state);
-out:
- kfree(psscr_val);
- kfree(psscr_mask);
- kfree(residency_ns);
- return rc;
+
+ return 0;
}
/*
@@ -740,50 +700,146 @@ out:
*/
static void __init pnv_probe_idle_states(void)
{
- struct device_node *np;
- int dt_idle_states;
- u32 *flags = NULL;
int i;
+ if (nr_pnv_idle_states < 0) {
+ pr_warn("cpuidle-powernv: no idle states found in the DT\n");
+ return;
+ }
+
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ if (pnv_power9_idle_init())
+ return;
+ }
+
+ for (i = 0; i < nr_pnv_idle_states; i++)
+ supported_cpuidle_states |= pnv_idle_states[i].flags;
+}
+
+/*
+ * This function parses device-tree and populates all the information
+ * into pnv_idle_states structure. It also sets up nr_pnv_idle_states
+ * which is the number of cpuidle states discovered through device-tree.
+ */
+
+static int pnv_parse_cpuidle_dt(void)
+{
+ struct device_node *np;
+ int nr_idle_states, i;
+ int rc = 0;
+ u32 *temp_u32;
+ u64 *temp_u64;
+ const char **temp_string;
+
np = of_find_node_by_path("/ibm,opal/power-mgt");
if (!np) {
pr_warn("opal: PowerMgmt Node not found\n");
- goto out;
+ return -ENODEV;
}
- dt_idle_states = of_property_count_u32_elems(np,
- "ibm,cpu-idle-state-flags");
- if (dt_idle_states < 0) {
- pr_warn("cpuidle-powernv: no idle states found in the DT\n");
+ nr_idle_states = of_property_count_u32_elems(np,
+ "ibm,cpu-idle-state-flags");
+
+ pnv_idle_states = kcalloc(nr_idle_states, sizeof(*pnv_idle_states),
+ GFP_KERNEL);
+ temp_u32 = kcalloc(nr_idle_states, sizeof(u32), GFP_KERNEL);
+ temp_u64 = kcalloc(nr_idle_states, sizeof(u64), GFP_KERNEL);
+ temp_string = kcalloc(nr_idle_states, sizeof(char *), GFP_KERNEL);
+
+ if (!(pnv_idle_states && temp_u32 && temp_u64 && temp_string)) {
+ pr_err("Could not allocate memory for dt parsing\n");
+ rc = -ENOMEM;
goto out;
}
- flags = kcalloc(dt_idle_states, sizeof(*flags), GFP_KERNEL);
-
- if (of_property_read_u32_array(np,
- "ibm,cpu-idle-state-flags", flags, dt_idle_states)) {
+ /* Read flags */
+ if (of_property_read_u32_array(np, "ibm,cpu-idle-state-flags",
+ temp_u32, nr_idle_states)) {
pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-flags in DT\n");
+ rc = -EINVAL;
+ goto out;
+ }
+ for (i = 0; i < nr_idle_states; i++)
+ pnv_idle_states[i].flags = temp_u32[i];
+
+ /* Read latencies */
+ if (of_property_read_u32_array(np, "ibm,cpu-idle-state-latencies-ns",
+ temp_u32, nr_idle_states)) {
+ pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n");
+ rc = -EINVAL;
+ goto out;
+ }
+ for (i = 0; i < nr_idle_states; i++)
+ pnv_idle_states[i].latency_ns = temp_u32[i];
+
+ /* Read residencies */
+ if (of_property_read_u32_array(np, "ibm,cpu-idle-state-residency-ns",
+ temp_u32, nr_idle_states)) {
+ pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n");
+ rc = -EINVAL;
goto out;
}
+ for (i = 0; i < nr_idle_states; i++)
+ pnv_idle_states[i].residency_ns = temp_u32[i];
+ /* For power9 */
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
- if (pnv_power9_idle_init(np, flags, dt_idle_states))
+ /* Read pm_crtl_val */
+ if (of_property_read_u64_array(np, "ibm,cpu-idle-state-psscr",
+ temp_u64, nr_idle_states)) {
+ pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr in DT\n");
+ rc = -EINVAL;
+ goto out;
+ }
+ for (i = 0; i < nr_idle_states; i++)
+ pnv_idle_states[i].psscr_val = temp_u64[i];
+
+ /* Read pm_crtl_mask */
+ if (of_property_read_u64_array(np, "ibm,cpu-idle-state-psscr-mask",
+ temp_u64, nr_idle_states)) {
+ pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr-mask in DT\n");
+ rc = -EINVAL;
goto out;
+ }
+ for (i = 0; i < nr_idle_states; i++)
+ pnv_idle_states[i].psscr_mask = temp_u64[i];
}
- for (i = 0; i < dt_idle_states; i++)
- supported_cpuidle_states |= flags[i];
+ /*
+ * power8 specific properties ibm,cpu-idle-state-pmicr-mask and
+ * ibm,cpu-idle-state-pmicr-val were never used and there is no
+ * plan to use it in near future. Hence, not parsing these properties
+ */
+ if (of_property_read_string_array(np, "ibm,cpu-idle-state-names",
+ temp_string, nr_idle_states) < 0) {
+ pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-names in DT\n");
+ rc = -EINVAL;
+ goto out;
+ }
+ for (i = 0; i < nr_idle_states; i++)
+ strlcpy(pnv_idle_states[i].name, temp_string[i],
+ PNV_IDLE_NAME_LEN);
+ nr_pnv_idle_states = nr_idle_states;
+ rc = 0;
out:
- kfree(flags);
+ kfree(temp_u32);
+ kfree(temp_u64);
+ kfree(temp_string);
+ return rc;
}
+
static int __init pnv_init_idle_states(void)
{
-
+ int rc = 0;
supported_cpuidle_states = 0;
+ /* In case we error out nr_pnv_idle_states will be zero */
+ nr_pnv_idle_states = 0;
if (cpuidle_disable != IDLE_NO_OVERRIDE)
goto out;
-
+ rc = pnv_parse_cpuidle_dt();
+ if (rc)
+ return rc;
pnv_probe_idle_states();
if (!(supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
@@ -805,29 +861,6 @@ static int __init pnv_init_idle_states(void)
pnv_alloc_idle_core_states();
- /*
- * For each CPU, record its PACA address in each of it's
- * sibling thread's PACA at the slot corresponding to this
- * CPU's index in the core.
- */
- if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
- int cpu;
-
- pr_info("powernv: idle: Saving PACA pointers of all CPUs in their thread sibling PACA\n");
- for_each_present_cpu(cpu) {
- int base_cpu = cpu_first_thread_sibling(cpu);
- int idx = cpu_thread_in_core(cpu);
- int i;
-
- for (i = 0; i < threads_per_core; i++) {
- int j = base_cpu + i;
-
- paca_ptrs[j]->thread_sibling_pacas[idx] =
- paca_ptrs[cpu];
- }
- }
- }
-
if (supported_cpuidle_states & OPAL_PM_NAP_ENABLED)
ppc_md.power_save = power7_idle;
diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c
index b99283df8584..51dc398ae3f7 100644
--- a/arch/powerpc/platforms/powernv/memtrace.c
+++ b/arch/powerpc/platforms/powernv/memtrace.c
@@ -47,38 +47,9 @@ static ssize_t memtrace_read(struct file *filp, char __user *ubuf,
return simple_read_from_buffer(ubuf, count, ppos, ent->mem, ent->size);
}
-static bool valid_memtrace_range(struct memtrace_entry *dev,
- unsigned long start, unsigned long size)
-{
- if ((start >= dev->start) &&
- ((start + size) <= (dev->start + dev->size)))
- return true;
-
- return false;
-}
-
-static int memtrace_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- unsigned long size = vma->vm_end - vma->vm_start;
- struct memtrace_entry *dev = filp->private_data;
-
- if (!valid_memtrace_range(dev, vma->vm_pgoff << PAGE_SHIFT, size))
- return -EINVAL;
-
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- if (remap_pfn_range(vma, vma->vm_start,
- vma->vm_pgoff + (dev->start >> PAGE_SHIFT),
- size, vma->vm_page_prot))
- return -EAGAIN;
-
- return 0;
-}
-
static const struct file_operations memtrace_fops = {
.llseek = default_llseek,
.read = memtrace_read,
- .mmap = memtrace_mmap,
.open = simple_open,
};
@@ -206,8 +177,11 @@ static int memtrace_init_debugfs(void)
snprintf(ent->name, 16, "%08x", ent->nid);
dir = debugfs_create_dir(ent->name, memtrace_debugfs_dir);
- if (!dir)
+ if (!dir) {
+ pr_err("Failed to create debugfs directory for node %d\n",
+ ent->nid);
return -1;
+ }
ent->dir = dir;
debugfs_create_file("trace", 0400, dir, ent, &memtrace_fops);
@@ -218,18 +192,93 @@ static int memtrace_init_debugfs(void)
return ret;
}
+static int online_mem_block(struct memory_block *mem, void *arg)
+{
+ return device_online(&mem->dev);
+}
+
+/*
+ * Iterate through the chunks of memory we have removed from the kernel
+ * and attempt to add them back to the kernel.
+ */
+static int memtrace_online(void)
+{
+ int i, ret = 0;
+ struct memtrace_entry *ent;
+
+ for (i = memtrace_array_nr - 1; i >= 0; i--) {
+ ent = &memtrace_array[i];
+
+ /* We have onlined this chunk previously */
+ if (ent->nid == -1)
+ continue;
+
+ /* Remove from io mappings */
+ if (ent->mem) {
+ iounmap(ent->mem);
+ ent->mem = 0;
+ }
+
+ if (add_memory(ent->nid, ent->start, ent->size)) {
+ pr_err("Failed to add trace memory to node %d\n",
+ ent->nid);
+ ret += 1;
+ continue;
+ }
+
+ /*
+ * If kernel isn't compiled with the auto online option
+ * we need to online the memory ourselves.
+ */
+ if (!memhp_auto_online) {
+ walk_memory_range(PFN_DOWN(ent->start),
+ PFN_UP(ent->start + ent->size - 1),
+ NULL, online_mem_block);
+ }
+
+ /*
+ * Memory was added successfully so clean up references to it
+ * so on reentry we can tell that this chunk was added.
+ */
+ debugfs_remove_recursive(ent->dir);
+ pr_info("Added trace memory back to node %d\n", ent->nid);
+ ent->size = ent->start = ent->nid = -1;
+ }
+ if (ret)
+ return ret;
+
+ /* If all chunks of memory were added successfully, reset globals */
+ kfree(memtrace_array);
+ memtrace_array = NULL;
+ memtrace_size = 0;
+ memtrace_array_nr = 0;
+ return 0;
+}
+
static int memtrace_enable_set(void *data, u64 val)
{
- if (memtrace_size)
+ u64 bytes;
+
+ /*
+ * Don't attempt to do anything if size isn't aligned to a memory
+ * block or equal to zero.
+ */
+ bytes = memory_block_size_bytes();
+ if (val & (bytes - 1)) {
+ pr_err("Value must be aligned with 0x%llx\n", bytes);
return -EINVAL;
+ }
- if (!val)
- return -EINVAL;
+ /* Re-add/online previously removed/offlined memory */
+ if (memtrace_size) {
+ if (memtrace_online())
+ return -EAGAIN;
+ }
- /* Make sure size is aligned to a memory block */
- if (val & (memory_block_size_bytes() - 1))
- return -EINVAL;
+ if (!val)
+ return 0;
+ /* Offline and remove memory */
if (memtrace_init_regions_runtime(val))
return -EINVAL;
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index 8cdf91f5d3a4..8006c54a91e3 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -17,7 +17,9 @@
#include <linux/pci.h>
#include <linux/memblock.h>
#include <linux/iommu.h>
+#include <linux/debugfs.h>
+#include <asm/debugfs.h>
#include <asm/tlb.h>
#include <asm/powernv.h>
#include <asm/reg.h>
@@ -44,7 +46,8 @@ static DEFINE_SPINLOCK(npu_context_lock);
* entire TLB on the GPU for the given PID rather than each specific address in
* the range.
*/
-#define ATSD_THRESHOLD (2*1024*1024)
+static uint64_t atsd_threshold = 2 * 1024 * 1024;
+static struct dentry *atsd_threshold_dentry;
/*
* Other types of TCE cache invalidation are not functional in the
@@ -437,8 +440,9 @@ static int get_mmio_atsd_reg(struct npu *npu)
int i;
for (i = 0; i < npu->mmio_atsd_count; i++) {
- if (!test_and_set_bit_lock(i, &npu->mmio_atsd_usage))
- return i;
+ if (!test_bit(i, &npu->mmio_atsd_usage))
+ if (!test_and_set_bit_lock(i, &npu->mmio_atsd_usage))
+ return i;
}
return -ENOSPC;
@@ -683,7 +687,7 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
struct npu_context *npu_context = mn_to_npu_context(mn);
unsigned long address;
- if (end - start > ATSD_THRESHOLD) {
+ if (end - start > atsd_threshold) {
/*
* Just invalidate the entire PID if the address range is too
* large.
@@ -958,6 +962,11 @@ int pnv_npu2_init(struct pnv_phb *phb)
static int npu_index;
uint64_t rc = 0;
+ if (!atsd_threshold_dentry) {
+ atsd_threshold_dentry = debugfs_create_x64("atsd_threshold",
+ 0600, powerpc_debugfs_root, &atsd_threshold);
+ }
+
phb->npu.nmmu_flush =
of_property_read_bool(phb->hose->dn, "ibm,nmmu-flush");
for_each_child_of_node(phb->hose->dn, dn) {
diff --git a/arch/powerpc/platforms/powernv/opal-dump.c b/arch/powerpc/platforms/powernv/opal-dump.c
index 0dc8fa4e0af2..198143833f00 100644
--- a/arch/powerpc/platforms/powernv/opal-dump.c
+++ b/arch/powerpc/platforms/powernv/opal-dump.c
@@ -225,13 +225,16 @@ static int64_t dump_read_info(uint32_t *dump_id, uint32_t *dump_size, uint32_t *
if (rc == OPAL_PARAMETER)
rc = opal_dump_info(&id, &size);
+ if (rc) {
+ pr_warn("%s: Failed to get dump info (%d)\n",
+ __func__, rc);
+ return rc;
+ }
+
*dump_id = be32_to_cpu(id);
*dump_size = be32_to_cpu(size);
*dump_type = be32_to_cpu(type);
- if (rc)
- pr_warn("%s: Failed to get dump info (%d)\n",
- __func__, rc);
return rc;
}
@@ -368,13 +371,12 @@ static irqreturn_t process_dump(int irq, void *data)
{
int rc;
uint32_t dump_id, dump_size, dump_type;
- struct dump_obj *dump;
char name[22];
struct kobject *kobj;
rc = dump_read_info(&dump_id, &dump_size, &dump_type);
if (rc != OPAL_SUCCESS)
- return rc;
+ return IRQ_HANDLED;
sprintf(name, "0x%x-0x%x", dump_type, dump_id);
@@ -386,12 +388,10 @@ static irqreturn_t process_dump(int irq, void *data)
if (kobj) {
/* Drop reference added by kset_find_obj() */
kobject_put(kobj);
- return 0;
+ return IRQ_HANDLED;
}
- dump = create_dump_obj(dump_id, dump_size, dump_type);
- if (!dump)
- return -1;
+ create_dump_obj(dump_id, dump_size, dump_type);
return IRQ_HANDLED;
}
diff --git a/arch/powerpc/platforms/powernv/opal-irqchip.c b/arch/powerpc/platforms/powernv/opal-irqchip.c
index 605c7e5d52c2..bc97770a67db 100644
--- a/arch/powerpc/platforms/powernv/opal-irqchip.c
+++ b/arch/powerpc/platforms/powernv/opal-irqchip.c
@@ -22,6 +22,7 @@
#include <linux/kthread.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/of_irq.h>
#include <asm/machdep.h>
#include <asm/opal.h>
@@ -38,8 +39,8 @@ struct opal_event_irqchip {
};
static struct opal_event_irqchip opal_event_irqchip;
static u64 last_outstanding_events;
-static unsigned int opal_irq_count;
-static unsigned int *opal_irqs;
+static int opal_irq_count;
+static struct resource *opal_irqs;
void opal_handle_events(void)
{
@@ -165,24 +166,23 @@ void opal_event_shutdown(void)
/* First free interrupts, which will also mask them */
for (i = 0; i < opal_irq_count; i++) {
- if (!opal_irqs[i])
+ if (!opal_irqs || !opal_irqs[i].start)
continue;
if (in_interrupt() || irqs_disabled())
- disable_irq_nosync(opal_irqs[i]);
+ disable_irq_nosync(opal_irqs[i].start);
else
- free_irq(opal_irqs[i], NULL);
+ free_irq(opal_irqs[i].start, NULL);
- opal_irqs[i] = 0;
+ opal_irqs[i].start = 0;
}
}
int __init opal_event_init(void)
{
struct device_node *dn, *opal_node;
- const char **names;
- u32 *irqs;
- int i, rc;
+ bool old_style = false;
+ int i, rc = 0;
opal_node = of_find_node_by_path("/ibm,opal");
if (!opal_node) {
@@ -207,67 +207,91 @@ int __init opal_event_init(void)
goto out;
}
- /* Get opal-interrupts property and names if present */
- rc = of_property_count_u32_elems(opal_node, "opal-interrupts");
- if (rc < 0)
- goto out;
+ /* Look for new-style (standard) "interrupts" property */
+ opal_irq_count = of_irq_count(opal_node);
- opal_irq_count = rc;
- pr_debug("Found %d interrupts reserved for OPAL\n", opal_irq_count);
+ /* Absent ? Look for the old one */
+ if (opal_irq_count < 1) {
+ /* Get opal-interrupts property and names if present */
+ rc = of_property_count_u32_elems(opal_node, "opal-interrupts");
+ if (rc > 0)
+ opal_irq_count = rc;
+ old_style = true;
+ }
- irqs = kcalloc(opal_irq_count, sizeof(*irqs), GFP_KERNEL);
- names = kcalloc(opal_irq_count, sizeof(*names), GFP_KERNEL);
- opal_irqs = kcalloc(opal_irq_count, sizeof(*opal_irqs), GFP_KERNEL);
+ /* No interrupts ? Bail out */
+ if (!opal_irq_count)
+ goto out;
- if (WARN_ON(!irqs || !names || !opal_irqs))
- goto out_free;
+ pr_debug("OPAL: Found %d interrupts reserved for OPAL using %s scheme\n",
+ opal_irq_count, old_style ? "old" : "new");
- rc = of_property_read_u32_array(opal_node, "opal-interrupts",
- irqs, opal_irq_count);
- if (rc < 0) {
- pr_err("Error %d reading opal-interrupts array\n", rc);
- goto out_free;
+ /* Allocate an IRQ resources array */
+ opal_irqs = kcalloc(opal_irq_count, sizeof(struct resource), GFP_KERNEL);
+ if (WARN_ON(!opal_irqs)) {
+ rc = -ENOMEM;
+ goto out;
}
- /* It's not an error for the names to be missing */
- of_property_read_string_array(opal_node, "opal-interrupts-names",
- names, opal_irq_count);
+ /* Build the resources array */
+ if (old_style) {
+ /* Old style "opal-interrupts" property */
+ for (i = 0; i < opal_irq_count; i++) {
+ struct resource *r = &opal_irqs[i];
+ const char *name = NULL;
+ u32 hw_irq;
+ int virq;
+
+ rc = of_property_read_u32_index(opal_node, "opal-interrupts",
+ i, &hw_irq);
+ if (WARN_ON(rc < 0)) {
+ opal_irq_count = i;
+ break;
+ }
+ of_property_read_string_index(opal_node, "opal-interrupts-names",
+ i, &name);
+ virq = irq_create_mapping(NULL, hw_irq);
+ if (!virq) {
+ pr_warn("Failed to map OPAL irq 0x%x\n", hw_irq);
+ continue;
+ }
+ r->start = r->end = virq;
+ r->flags = IORESOURCE_IRQ | IRQ_TYPE_LEVEL_LOW;
+ r->name = name;
+ }
+ } else {
+ /* new style standard "interrupts" property */
+ rc = of_irq_to_resource_table(opal_node, opal_irqs, opal_irq_count);
+ if (WARN_ON(rc < 0)) {
+ opal_irq_count = 0;
+ kfree(opal_irqs);
+ goto out;
+ }
+ if (WARN_ON(rc < opal_irq_count))
+ opal_irq_count = rc;
+ }
/* Install interrupt handlers */
for (i = 0; i < opal_irq_count; i++) {
- unsigned int virq;
- char *name;
-
- /* Get hardware and virtual IRQ */
- virq = irq_create_mapping(NULL, irqs[i]);
- if (!virq) {
- pr_warn("Failed to map irq 0x%x\n", irqs[i]);
- continue;
- }
+ struct resource *r = &opal_irqs[i];
+ const char *name;
- if (names[i] && strlen(names[i]))
- name = kasprintf(GFP_KERNEL, "opal-%s", names[i]);
+ /* Prefix name */
+ if (r->name && strlen(r->name))
+ name = kasprintf(GFP_KERNEL, "opal-%s", r->name);
else
name = kasprintf(GFP_KERNEL, "opal");
/* Install interrupt handler */
- rc = request_irq(virq, opal_interrupt, IRQF_TRIGGER_LOW,
+ rc = request_irq(r->start, opal_interrupt, r->flags & IRQD_TRIGGER_MASK,
name, NULL);
if (rc) {
- irq_dispose_mapping(virq);
- pr_warn("Error %d requesting irq %d (0x%x)\n",
- rc, virq, irqs[i]);
+ pr_warn("Error %d requesting OPAL irq %d\n", rc, (int)r->start);
continue;
}
-
- /* Cache IRQ */
- opal_irqs[i] = virq;
}
-
-out_free:
- kfree(irqs);
- kfree(names);
-out:
+ rc = 0;
+ out:
of_node_put(opal_node);
return rc;
}
diff --git a/arch/powerpc/platforms/powernv/opal-kmsg.c b/arch/powerpc/platforms/powernv/opal-kmsg.c
index 6f1214d4de92..55691950d981 100644
--- a/arch/powerpc/platforms/powernv/opal-kmsg.c
+++ b/arch/powerpc/platforms/powernv/opal-kmsg.c
@@ -23,12 +23,9 @@
* may not be completely printed. This function does not actually dump the
* message, it just ensures that OPAL completely flushes the console buffer.
*/
-static void force_opal_console_flush(struct kmsg_dumper *dumper,
+static void kmsg_dump_opal_console_flush(struct kmsg_dumper *dumper,
enum kmsg_dump_reason reason)
{
- int i;
- int64_t ret;
-
/*
* Outside of a panic context the pollers will continue to run,
* so we don't need to do any special flushing.
@@ -36,32 +33,11 @@ static void force_opal_console_flush(struct kmsg_dumper *dumper,
if (reason != KMSG_DUMP_PANIC)
return;
- if (opal_check_token(OPAL_CONSOLE_FLUSH)) {
- ret = opal_console_flush(0);
-
- if (ret == OPAL_UNSUPPORTED || ret == OPAL_PARAMETER)
- return;
-
- /* Incrementally flush until there's nothing left */
- while (opal_console_flush(0) != OPAL_SUCCESS);
- } else {
- /*
- * If OPAL_CONSOLE_FLUSH is not implemented in the firmware,
- * the console can still be flushed by calling the polling
- * function enough times to flush the buffer. We don't know
- * how much output still needs to be flushed, but we can be
- * generous since the kernel is in panic and doesn't need
- * to do much else.
- */
- printk(KERN_NOTICE "opal: OPAL_CONSOLE_FLUSH missing.\n");
- for (i = 0; i < 1024; i++) {
- opal_poll_events(NULL);
- }
- }
+ opal_flush_console(0);
}
static struct kmsg_dumper opal_kmsg_dumper = {
- .dump = force_opal_console_flush
+ .dump = kmsg_dump_opal_console_flush
};
void __init opal_kmsg_init(void)
diff --git a/arch/powerpc/platforms/powernv/opal-sensor-groups.c b/arch/powerpc/platforms/powernv/opal-sensor-groups.c
index 541c9ea04a32..f7d04b6a2d7a 100644
--- a/arch/powerpc/platforms/powernv/opal-sensor-groups.c
+++ b/arch/powerpc/platforms/powernv/opal-sensor-groups.c
@@ -32,6 +32,34 @@ static struct sensor_group {
struct sg_attr *sgattrs;
} *sgs;
+int sensor_group_enable(u32 handle, bool enable)
+{
+ struct opal_msg msg;
+ int token, ret;
+
+ token = opal_async_get_token_interruptible();
+ if (token < 0)
+ return token;
+
+ ret = opal_sensor_group_enable(handle, token, enable);
+ if (ret == OPAL_ASYNC_COMPLETION) {
+ ret = opal_async_wait_response(token, &msg);
+ if (ret) {
+ pr_devel("Failed to wait for the async response\n");
+ ret = -EIO;
+ goto out;
+ }
+ ret = opal_error_code(opal_get_async_rc(msg));
+ } else {
+ ret = opal_error_code(ret);
+ }
+
+out:
+ opal_async_release_token(token);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sensor_group_enable);
+
static ssize_t sg_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index a8d9b4089c31..251528231a9e 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -14,6 +14,8 @@
#include <asm/hvcall.h>
#include <asm/asm-offsets.h>
#include <asm/opal.h>
+#include <asm/asm-compat.h>
+#include <asm/feature-fixups.h>
.section ".text"
@@ -327,3 +329,5 @@ OPAL_CALL(opal_npu_tl_set, OPAL_NPU_TL_SET);
OPAL_CALL(opal_pci_get_pbcq_tunnel_bar, OPAL_PCI_GET_PBCQ_TUNNEL_BAR);
OPAL_CALL(opal_pci_set_pbcq_tunnel_bar, OPAL_PCI_SET_PBCQ_TUNNEL_BAR);
OPAL_CALL(opal_sensor_read_u64, OPAL_SENSOR_READ_U64);
+OPAL_CALL(opal_sensor_group_enable, OPAL_SENSOR_GROUP_ENABLE);
+OPAL_CALL(opal_nx_coproc_init, OPAL_NX_COPROC_INIT);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 0d539c661748..404c379db168 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -344,70 +344,125 @@ int opal_get_chars(uint32_t vtermno, char *buf, int count)
return 0;
}
-int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
+static int __opal_put_chars(uint32_t vtermno, const char *data, int total_len, bool atomic)
{
- int written = 0;
+ unsigned long flags = 0 /* shut up gcc */;
+ int written;
__be64 olen;
- s64 len, rc;
- unsigned long flags;
- __be64 evt;
+ s64 rc;
if (!opal.entry)
return -ENODEV;
- /* We want put_chars to be atomic to avoid mangling of hvsi
- * packets. To do that, we first test for room and return
- * -EAGAIN if there isn't enough.
- *
- * Unfortunately, opal_console_write_buffer_space() doesn't
- * appear to work on opal v1, so we just assume there is
- * enough room and be done with it
- */
- spin_lock_irqsave(&opal_write_lock, flags);
+ if (atomic)
+ spin_lock_irqsave(&opal_write_lock, flags);
rc = opal_console_write_buffer_space(vtermno, &olen);
- len = be64_to_cpu(olen);
- if (rc || len < total_len) {
- spin_unlock_irqrestore(&opal_write_lock, flags);
+ if (rc || be64_to_cpu(olen) < total_len) {
/* Closed -> drop characters */
if (rc)
- return total_len;
- opal_poll_events(NULL);
- return -EAGAIN;
+ written = total_len;
+ else
+ written = -EAGAIN;
+ goto out;
}
- /* We still try to handle partial completions, though they
- * should no longer happen.
- */
- rc = OPAL_BUSY;
- while(total_len > 0 && (rc == OPAL_BUSY ||
- rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) {
- olen = cpu_to_be64(total_len);
- rc = opal_console_write(vtermno, &olen, data);
- len = be64_to_cpu(olen);
-
- /* Closed or other error drop */
- if (rc != OPAL_SUCCESS && rc != OPAL_BUSY &&
- rc != OPAL_BUSY_EVENT) {
- written = total_len;
- break;
+ /* Should not get a partial write here because space is available. */
+ olen = cpu_to_be64(total_len);
+ rc = opal_console_write(vtermno, &olen, data);
+ if (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
+ if (rc == OPAL_BUSY_EVENT) {
+ mdelay(OPAL_BUSY_DELAY_MS);
+ opal_poll_events(NULL);
+ } else if (rc == OPAL_BUSY_EVENT) {
+ mdelay(OPAL_BUSY_DELAY_MS);
}
- if (rc == OPAL_SUCCESS) {
- total_len -= len;
- data += len;
- written += len;
+ written = -EAGAIN;
+ goto out;
+ }
+
+ /* Closed or other error drop */
+ if (rc != OPAL_SUCCESS) {
+ written = opal_error_code(rc);
+ goto out;
+ }
+
+ written = be64_to_cpu(olen);
+ if (written < total_len) {
+ if (atomic) {
+ /* Should not happen */
+ pr_warn("atomic console write returned partial "
+ "len=%d written=%d\n", total_len, written);
}
- /* This is a bit nasty but we need that for the console to
- * flush when there aren't any interrupts. We will clean
- * things a bit later to limit that to synchronous path
- * such as the kernel console and xmon/udbg
+ if (!written)
+ written = -EAGAIN;
+ }
+
+out:
+ if (atomic)
+ spin_unlock_irqrestore(&opal_write_lock, flags);
+
+ /* In the -EAGAIN case, callers loop, so we have to flush the console
+ * here in case they have interrupts off (and we don't want to wait
+ * for async flushing if we can make immediate progress here). If
+ * necessary the API could be made entirely non-flushing if the
+ * callers had a ->flush API to use.
+ */
+ if (written == -EAGAIN)
+ opal_flush_console(vtermno);
+
+ return written;
+}
+
+int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
+{
+ return __opal_put_chars(vtermno, data, total_len, false);
+}
+
+/*
+ * opal_put_chars_atomic will not perform partial-writes. Data will be
+ * atomically written to the terminal or not at all. This is not strictly
+ * true at the moment because console space can race with OPAL's console
+ * writes.
+ */
+int opal_put_chars_atomic(uint32_t vtermno, const char *data, int total_len)
+{
+ return __opal_put_chars(vtermno, data, total_len, true);
+}
+
+int opal_flush_console(uint32_t vtermno)
+{
+ s64 rc;
+
+ if (!opal_check_token(OPAL_CONSOLE_FLUSH)) {
+ __be64 evt;
+
+ WARN_ONCE(1, "opal: OPAL_CONSOLE_FLUSH missing.\n");
+ /*
+ * If OPAL_CONSOLE_FLUSH is not implemented in the firmware,
+ * the console can still be flushed by calling the polling
+ * function while it has OPAL_EVENT_CONSOLE_OUTPUT events.
*/
- do
+ do {
opal_poll_events(&evt);
- while(rc == OPAL_SUCCESS &&
- (be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_OUTPUT));
+ } while (be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_OUTPUT);
+
+ return OPAL_SUCCESS;
}
- spin_unlock_irqrestore(&opal_write_lock, flags);
- return written;
+
+ do {
+ rc = OPAL_BUSY;
+ while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
+ rc = opal_console_flush(vtermno);
+ if (rc == OPAL_BUSY_EVENT) {
+ mdelay(OPAL_BUSY_DELAY_MS);
+ opal_poll_events(NULL);
+ } else if (rc == OPAL_BUSY) {
+ mdelay(OPAL_BUSY_DELAY_MS);
+ }
+ }
+ } while (rc == OPAL_PARTIAL); /* More to flush */
+
+ return opal_error_code(rc);
}
static int opal_recover_mce(struct pt_regs *regs,
@@ -922,6 +977,7 @@ EXPORT_SYMBOL_GPL(opal_flash_read);
EXPORT_SYMBOL_GPL(opal_flash_write);
EXPORT_SYMBOL_GPL(opal_flash_erase);
EXPORT_SYMBOL_GPL(opal_prd_msg);
+EXPORT_SYMBOL_GPL(opal_check_token);
/* Convert a region of vmalloc memory to an opal sg list */
struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
@@ -1034,3 +1090,5 @@ EXPORT_SYMBOL_GPL(opal_write_oppanel_async);
EXPORT_SYMBOL_GPL(opal_int_set_mfrr);
EXPORT_SYMBOL_GPL(opal_int_eoi);
EXPORT_SYMBOL_GPL(opal_error_code);
+/* Export the below symbol for NX compression */
+EXPORT_SYMBOL(opal_nx_coproc_init);
diff --git a/arch/powerpc/platforms/powernv/pci-cxl.c b/arch/powerpc/platforms/powernv/pci-cxl.c
index cee003de63af..1b18111453d7 100644
--- a/arch/powerpc/platforms/powernv/pci-cxl.c
+++ b/arch/powerpc/platforms/powernv/pci-cxl.c
@@ -8,11 +8,8 @@
*/
#include <linux/module.h>
-#include <linux/msi.h>
-#include <asm/pci-bridge.h>
#include <asm/pnv-pci.h>
#include <asm/opal.h>
-#include <misc/cxl.h>
#include "pci.h"
@@ -179,199 +176,3 @@ static inline int get_cxl_module(void)
#else
static inline int get_cxl_module(void) { return 0; }
#endif
-
-/*
- * Sets flags and switches the controller ops to enable the cxl kernel api.
- * Originally the cxl kernel API operated on a virtual PHB, but certain cards
- * such as the Mellanox CX4 use a peer model instead and for these cards the
- * cxl kernel api will operate on the real PHB.
- */
-int pnv_cxl_enable_phb_kernel_api(struct pci_controller *hose, bool enable)
-{
- struct pnv_phb *phb = hose->private_data;
- int rc;
-
- if (!enable) {
- /*
- * Once cxl mode is enabled on the PHB, there is currently no
- * known safe method to disable it again, and trying risks a
- * checkstop. If we can find a way to safely disable cxl mode
- * in the future we can revisit this, but for now the only sane
- * thing to do is to refuse to disable cxl mode:
- */
- return -EPERM;
- }
-
- /*
- * Hold a reference to the cxl module since several PHB operations now
- * depend on it, and it would be insane to allow it to be removed so
- * long as we are in this mode (and since we can't safely disable this
- * mode once enabled...).
- */
- rc = get_cxl_module();
- if (rc)
- return rc;
-
- phb->flags |= PNV_PHB_FLAG_CXL;
- hose->controller_ops = pnv_cxl_cx4_ioda_controller_ops;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(pnv_cxl_enable_phb_kernel_api);
-
-bool pnv_pci_on_cxl_phb(struct pci_dev *dev)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
-
- return !!(phb->flags & PNV_PHB_FLAG_CXL);
-}
-EXPORT_SYMBOL_GPL(pnv_pci_on_cxl_phb);
-
-struct cxl_afu *pnv_cxl_phb_to_afu(struct pci_controller *hose)
-{
- struct pnv_phb *phb = hose->private_data;
-
- return (struct cxl_afu *)phb->cxl_afu;
-}
-EXPORT_SYMBOL_GPL(pnv_cxl_phb_to_afu);
-
-void pnv_cxl_phb_set_peer_afu(struct pci_dev *dev, struct cxl_afu *afu)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
-
- phb->cxl_afu = afu;
-}
-EXPORT_SYMBOL_GPL(pnv_cxl_phb_set_peer_afu);
-
-/*
- * In the peer cxl model, the XSL/PSL is physical function 0, and will be used
- * by other functions on the device for memory access and interrupts. When the
- * other functions are enabled we explicitly take a reference on the cxl
- * function since they will use it, and allocate a default context associated
- * with that function just like the vPHB model of the cxl kernel API.
- */
-bool pnv_cxl_enable_device_hook(struct pci_dev *dev)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
- struct cxl_afu *afu = phb->cxl_afu;
-
- if (!pnv_pci_enable_device_hook(dev))
- return false;
-
-
- /* No special handling for the cxl function, which is always PF 0 */
- if (PCI_FUNC(dev->devfn) == 0)
- return true;
-
- if (!afu) {
- dev_WARN(&dev->dev, "Attempted to enable function > 0 on CXL PHB without a peer AFU\n");
- return false;
- }
-
- dev_info(&dev->dev, "Enabling function on CXL enabled PHB with peer AFU\n");
-
- /* Make sure the peer AFU can't go away while this device is active */
- cxl_afu_get(afu);
-
- return cxl_pci_associate_default_context(dev, afu);
-}
-
-void pnv_cxl_disable_device(struct pci_dev *dev)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
- struct cxl_afu *afu = phb->cxl_afu;
-
- /* No special handling for cxl function: */
- if (PCI_FUNC(dev->devfn) == 0)
- return;
-
- cxl_pci_disable_device(dev);
- cxl_afu_put(afu);
-}
-
-/*
- * This is a special version of pnv_setup_msi_irqs for cards in cxl mode. This
- * function handles setting up the IVTE entries for the XSL to use.
- *
- * We are currently not filling out the MSIX table, since the only currently
- * supported adapter (CX4) uses a custom MSIX table format in cxl mode and it
- * is up to their driver to fill that out. In the future we may fill out the
- * MSIX table (and change the IVTE entries to be an index to the MSIX table)
- * for adapters implementing the Full MSI-X mode described in the CAIA.
- */
-int pnv_cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
-{
- struct pci_controller *hose = pci_bus_to_host(pdev->bus);
- struct pnv_phb *phb = hose->private_data;
- struct msi_desc *entry;
- struct cxl_context *ctx = NULL;
- unsigned int virq;
- int hwirq;
- int afu_irq = 0;
- int rc;
-
- if (WARN_ON(!phb) || !phb->msi_bmp.bitmap)
- return -ENODEV;
-
- if (pdev->no_64bit_msi && !phb->msi32_support)
- return -ENODEV;
-
- rc = cxl_cx4_setup_msi_irqs(pdev, nvec, type);
- if (rc)
- return rc;
-
- for_each_pci_msi_entry(entry, pdev) {
- if (!entry->msi_attrib.is_64 && !phb->msi32_support) {
- pr_warn("%s: Supports only 64-bit MSIs\n",
- pci_name(pdev));
- return -ENXIO;
- }
-
- hwirq = cxl_next_msi_hwirq(pdev, &ctx, &afu_irq);
- if (WARN_ON(hwirq <= 0))
- return (hwirq ? hwirq : -ENOMEM);
-
- virq = irq_create_mapping(NULL, hwirq);
- if (!virq) {
- pr_warn("%s: Failed to map cxl mode MSI to linux irq\n",
- pci_name(pdev));
- return -ENOMEM;
- }
-
- rc = pnv_cxl_ioda_msi_setup(pdev, hwirq, virq);
- if (rc) {
- pr_warn("%s: Failed to setup cxl mode MSI\n", pci_name(pdev));
- irq_dispose_mapping(virq);
- return rc;
- }
-
- irq_set_msi_desc(virq, entry);
- }
-
- return 0;
-}
-
-void pnv_cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev)
-{
- struct pci_controller *hose = pci_bus_to_host(pdev->bus);
- struct pnv_phb *phb = hose->private_data;
- struct msi_desc *entry;
- irq_hw_number_t hwirq;
-
- if (WARN_ON(!phb))
- return;
-
- for_each_pci_msi_entry(entry, pdev) {
- if (!entry->irq)
- continue;
- hwirq = virq_to_hw(entry->irq);
- irq_set_msi_desc(entry->irq, NULL);
- irq_dispose_mapping(entry->irq);
- }
-
- cxl_cx4_teardown_msi_irqs(pdev);
-}
diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
new file mode 100644
index 000000000000..6c5db1acbe8d
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
@@ -0,0 +1,399 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * TCE helpers for IODA PCI/PCIe on PowerNV platforms
+ *
+ * Copyright 2018 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/iommu.h>
+
+#include <asm/iommu.h>
+#include <asm/tce.h>
+#include "pci.h"
+
+void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
+ void *tce_mem, u64 tce_size,
+ u64 dma_offset, unsigned int page_shift)
+{
+ tbl->it_blocksize = 16;
+ tbl->it_base = (unsigned long)tce_mem;
+ tbl->it_page_shift = page_shift;
+ tbl->it_offset = dma_offset >> tbl->it_page_shift;
+ tbl->it_index = 0;
+ tbl->it_size = tce_size >> 3;
+ tbl->it_busno = 0;
+ tbl->it_type = TCE_PCI;
+}
+
+static __be64 *pnv_alloc_tce_level(int nid, unsigned int shift)
+{
+ struct page *tce_mem = NULL;
+ __be64 *addr;
+
+ tce_mem = alloc_pages_node(nid, GFP_KERNEL, shift - PAGE_SHIFT);
+ if (!tce_mem) {
+ pr_err("Failed to allocate a TCE memory, level shift=%d\n",
+ shift);
+ return NULL;
+ }
+ addr = page_address(tce_mem);
+ memset(addr, 0, 1UL << shift);
+
+ return addr;
+}
+
+static __be64 *pnv_tce(struct iommu_table *tbl, bool user, long idx, bool alloc)
+{
+ __be64 *tmp = user ? tbl->it_userspace : (__be64 *) tbl->it_base;
+ int level = tbl->it_indirect_levels;
+ const long shift = ilog2(tbl->it_level_size);
+ unsigned long mask = (tbl->it_level_size - 1) << (level * shift);
+
+ while (level) {
+ int n = (idx & mask) >> (level * shift);
+ unsigned long tce;
+
+ if (tmp[n] == 0) {
+ __be64 *tmp2;
+
+ if (!alloc)
+ return NULL;
+
+ tmp2 = pnv_alloc_tce_level(tbl->it_nid,
+ ilog2(tbl->it_level_size) + 3);
+ if (!tmp2)
+ return NULL;
+
+ tmp[n] = cpu_to_be64(__pa(tmp2) |
+ TCE_PCI_READ | TCE_PCI_WRITE);
+ }
+ tce = be64_to_cpu(tmp[n]);
+
+ tmp = __va(tce & ~(TCE_PCI_READ | TCE_PCI_WRITE));
+ idx &= ~mask;
+ mask >>= shift;
+ --level;
+ }
+
+ return tmp + idx;
+}
+
+int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
+ unsigned long uaddr, enum dma_data_direction direction,
+ unsigned long attrs)
+{
+ u64 proto_tce = iommu_direction_to_tce_perm(direction);
+ u64 rpn = __pa(uaddr) >> tbl->it_page_shift;
+ long i;
+
+ if (proto_tce & TCE_PCI_WRITE)
+ proto_tce |= TCE_PCI_READ;
+
+ for (i = 0; i < npages; i++) {
+ unsigned long newtce = proto_tce |
+ ((rpn + i) << tbl->it_page_shift);
+ unsigned long idx = index - tbl->it_offset + i;
+
+ *(pnv_tce(tbl, false, idx, true)) = cpu_to_be64(newtce);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_IOMMU_API
+int pnv_tce_xchg(struct iommu_table *tbl, long index,
+ unsigned long *hpa, enum dma_data_direction *direction,
+ bool alloc)
+{
+ u64 proto_tce = iommu_direction_to_tce_perm(*direction);
+ unsigned long newtce = *hpa | proto_tce, oldtce;
+ unsigned long idx = index - tbl->it_offset;
+ __be64 *ptce = NULL;
+
+ BUG_ON(*hpa & ~IOMMU_PAGE_MASK(tbl));
+
+ if (*direction == DMA_NONE) {
+ ptce = pnv_tce(tbl, false, idx, false);
+ if (!ptce) {
+ *hpa = 0;
+ return 0;
+ }
+ }
+
+ if (!ptce) {
+ ptce = pnv_tce(tbl, false, idx, alloc);
+ if (!ptce)
+ return alloc ? H_HARDWARE : H_TOO_HARD;
+ }
+
+ if (newtce & TCE_PCI_WRITE)
+ newtce |= TCE_PCI_READ;
+
+ oldtce = be64_to_cpu(xchg(ptce, cpu_to_be64(newtce)));
+ *hpa = oldtce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
+ *direction = iommu_tce_direction(oldtce);
+
+ return 0;
+}
+
+__be64 *pnv_tce_useraddrptr(struct iommu_table *tbl, long index, bool alloc)
+{
+ if (WARN_ON_ONCE(!tbl->it_userspace))
+ return NULL;
+
+ return pnv_tce(tbl, true, index - tbl->it_offset, alloc);
+}
+#endif
+
+void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
+{
+ long i;
+
+ for (i = 0; i < npages; i++) {
+ unsigned long idx = index - tbl->it_offset + i;
+ __be64 *ptce = pnv_tce(tbl, false, idx, false);
+
+ if (ptce)
+ *ptce = cpu_to_be64(0);
+ }
+}
+
+unsigned long pnv_tce_get(struct iommu_table *tbl, long index)
+{
+ __be64 *ptce = pnv_tce(tbl, false, index - tbl->it_offset, false);
+
+ if (!ptce)
+ return 0;
+
+ return be64_to_cpu(*ptce);
+}
+
+static void pnv_pci_ioda2_table_do_free_pages(__be64 *addr,
+ unsigned long size, unsigned int levels)
+{
+ const unsigned long addr_ul = (unsigned long) addr &
+ ~(TCE_PCI_READ | TCE_PCI_WRITE);
+
+ if (levels) {
+ long i;
+ u64 *tmp = (u64 *) addr_ul;
+
+ for (i = 0; i < size; ++i) {
+ unsigned long hpa = be64_to_cpu(tmp[i]);
+
+ if (!(hpa & (TCE_PCI_READ | TCE_PCI_WRITE)))
+ continue;
+
+ pnv_pci_ioda2_table_do_free_pages(__va(hpa), size,
+ levels - 1);
+ }
+ }
+
+ free_pages(addr_ul, get_order(size << 3));
+}
+
+void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl)
+{
+ const unsigned long size = tbl->it_indirect_levels ?
+ tbl->it_level_size : tbl->it_size;
+
+ if (!tbl->it_size)
+ return;
+
+ pnv_pci_ioda2_table_do_free_pages((__be64 *)tbl->it_base, size,
+ tbl->it_indirect_levels);
+ if (tbl->it_userspace) {
+ pnv_pci_ioda2_table_do_free_pages(tbl->it_userspace, size,
+ tbl->it_indirect_levels);
+ }
+}
+
+static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned int shift,
+ unsigned int levels, unsigned long limit,
+ unsigned long *current_offset, unsigned long *total_allocated)
+{
+ __be64 *addr, *tmp;
+ unsigned long allocated = 1UL << shift;
+ unsigned int entries = 1UL << (shift - 3);
+ long i;
+
+ addr = pnv_alloc_tce_level(nid, shift);
+ *total_allocated += allocated;
+
+ --levels;
+ if (!levels) {
+ *current_offset += allocated;
+ return addr;
+ }
+
+ for (i = 0; i < entries; ++i) {
+ tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift,
+ levels, limit, current_offset, total_allocated);
+ if (!tmp)
+ break;
+
+ addr[i] = cpu_to_be64(__pa(tmp) |
+ TCE_PCI_READ | TCE_PCI_WRITE);
+
+ if (*current_offset >= limit)
+ break;
+ }
+
+ return addr;
+}
+
+long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
+ __u32 page_shift, __u64 window_size, __u32 levels,
+ bool alloc_userspace_copy, struct iommu_table *tbl)
+{
+ void *addr, *uas = NULL;
+ unsigned long offset = 0, level_shift, total_allocated = 0;
+ unsigned long total_allocated_uas = 0;
+ const unsigned int window_shift = ilog2(window_size);
+ unsigned int entries_shift = window_shift - page_shift;
+ unsigned int table_shift = max_t(unsigned int, entries_shift + 3,
+ PAGE_SHIFT);
+ const unsigned long tce_table_size = 1UL << table_shift;
+ unsigned int tmplevels = levels;
+
+ if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS))
+ return -EINVAL;
+
+ if (!is_power_of_2(window_size))
+ return -EINVAL;
+
+ if (alloc_userspace_copy && (window_size > (1ULL << 32)))
+ tmplevels = 1;
+
+ /* Adjust direct table size from window_size and levels */
+ entries_shift = (entries_shift + levels - 1) / levels;
+ level_shift = entries_shift + 3;
+ level_shift = max_t(unsigned int, level_shift, PAGE_SHIFT);
+
+ if ((level_shift - 3) * levels + page_shift >= 60)
+ return -EINVAL;
+
+ /* Allocate TCE table */
+ addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
+ tmplevels, tce_table_size, &offset, &total_allocated);
+
+ /* addr==NULL means that the first level allocation failed */
+ if (!addr)
+ return -ENOMEM;
+
+ /*
+ * First level was allocated but some lower level failed as
+ * we did not allocate as much as we wanted,
+ * release partially allocated table.
+ */
+ if (tmplevels == levels && offset < tce_table_size)
+ goto free_tces_exit;
+
+ /* Allocate userspace view of the TCE table */
+ if (alloc_userspace_copy) {
+ offset = 0;
+ uas = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
+ levels, tce_table_size, &offset,
+ &total_allocated_uas);
+ if (!uas)
+ goto free_tces_exit;
+ if (tmplevels == levels && (offset < tce_table_size ||
+ total_allocated_uas != total_allocated))
+ goto free_uas_exit;
+ }
+
+ /* Setup linux iommu table */
+ pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, bus_offset,
+ page_shift);
+ tbl->it_level_size = 1ULL << (level_shift - 3);
+ tbl->it_indirect_levels = levels - 1;
+ tbl->it_allocated_size = total_allocated;
+ tbl->it_userspace = uas;
+ tbl->it_nid = nid;
+
+ pr_debug("Created TCE table: ws=%08llx ts=%lx @%08llx base=%lx uas=%p levels=%d/%d\n",
+ window_size, tce_table_size, bus_offset, tbl->it_base,
+ tbl->it_userspace, tmplevels, levels);
+
+ return 0;
+
+free_uas_exit:
+ pnv_pci_ioda2_table_do_free_pages(uas,
+ 1ULL << (level_shift - 3), levels - 1);
+free_tces_exit:
+ pnv_pci_ioda2_table_do_free_pages(addr,
+ 1ULL << (level_shift - 3), levels - 1);
+
+ return -ENOMEM;
+}
+
+static void pnv_iommu_table_group_link_free(struct rcu_head *head)
+{
+ struct iommu_table_group_link *tgl = container_of(head,
+ struct iommu_table_group_link, rcu);
+
+ kfree(tgl);
+}
+
+void pnv_pci_unlink_table_and_group(struct iommu_table *tbl,
+ struct iommu_table_group *table_group)
+{
+ long i;
+ bool found;
+ struct iommu_table_group_link *tgl;
+
+ if (!tbl || !table_group)
+ return;
+
+ /* Remove link to a group from table's list of attached groups */
+ found = false;
+ list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) {
+ if (tgl->table_group == table_group) {
+ list_del_rcu(&tgl->next);
+ call_rcu(&tgl->rcu, pnv_iommu_table_group_link_free);
+ found = true;
+ break;
+ }
+ }
+ if (WARN_ON(!found))
+ return;
+
+ /* Clean a pointer to iommu_table in iommu_table_group::tables[] */
+ found = false;
+ for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
+ if (table_group->tables[i] == tbl) {
+ table_group->tables[i] = NULL;
+ found = true;
+ break;
+ }
+ }
+ WARN_ON(!found);
+}
+
+long pnv_pci_link_table_and_group(int node, int num,
+ struct iommu_table *tbl,
+ struct iommu_table_group *table_group)
+{
+ struct iommu_table_group_link *tgl = NULL;
+
+ if (WARN_ON(!tbl || !table_group))
+ return -EINVAL;
+
+ tgl = kzalloc_node(sizeof(struct iommu_table_group_link), GFP_KERNEL,
+ node);
+ if (!tgl)
+ return -ENOMEM;
+
+ tgl->table_group = table_group;
+ list_add_rcu(&tgl->next, &tbl->it_group_list);
+
+ table_group->tables[num] = tbl;
+
+ return 0;
+}
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 70b2e1e0f23c..4e6302bf4073 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -52,12 +52,8 @@
#define PNV_IODA1_M64_SEGS 8 /* Segments per M64 BAR */
#define PNV_IODA1_DMA32_SEGSIZE 0x10000000
-#define POWERNV_IOMMU_DEFAULT_LEVELS 1
-#define POWERNV_IOMMU_MAX_LEVELS 5
-
static const char * const pnv_phb_names[] = { "IODA1", "IODA2", "NPU_NVLINK",
"NPU_OCAPI" };
-static void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl);
void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
const char *fmt, ...)
@@ -2008,7 +2004,7 @@ static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index,
static int pnv_ioda1_tce_xchg(struct iommu_table *tbl, long index,
unsigned long *hpa, enum dma_data_direction *direction)
{
- long ret = pnv_tce_xchg(tbl, index, hpa, direction);
+ long ret = pnv_tce_xchg(tbl, index, hpa, direction, true);
if (!ret)
pnv_pci_p7ioc_tce_invalidate(tbl, index, 1, false);
@@ -2019,7 +2015,7 @@ static int pnv_ioda1_tce_xchg(struct iommu_table *tbl, long index,
static int pnv_ioda1_tce_xchg_rm(struct iommu_table *tbl, long index,
unsigned long *hpa, enum dma_data_direction *direction)
{
- long ret = pnv_tce_xchg(tbl, index, hpa, direction);
+ long ret = pnv_tce_xchg(tbl, index, hpa, direction, false);
if (!ret)
pnv_pci_p7ioc_tce_invalidate(tbl, index, 1, true);
@@ -2041,6 +2037,7 @@ static struct iommu_table_ops pnv_ioda1_iommu_ops = {
#ifdef CONFIG_IOMMU_API
.exchange = pnv_ioda1_tce_xchg,
.exchange_rm = pnv_ioda1_tce_xchg_rm,
+ .useraddrptr = pnv_tce_useraddrptr,
#endif
.clear = pnv_ioda1_tce_free,
.get = pnv_tce_get,
@@ -2172,7 +2169,7 @@ static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index,
static int pnv_ioda2_tce_xchg(struct iommu_table *tbl, long index,
unsigned long *hpa, enum dma_data_direction *direction)
{
- long ret = pnv_tce_xchg(tbl, index, hpa, direction);
+ long ret = pnv_tce_xchg(tbl, index, hpa, direction, true);
if (!ret)
pnv_pci_ioda2_tce_invalidate(tbl, index, 1, false);
@@ -2183,7 +2180,7 @@ static int pnv_ioda2_tce_xchg(struct iommu_table *tbl, long index,
static int pnv_ioda2_tce_xchg_rm(struct iommu_table *tbl, long index,
unsigned long *hpa, enum dma_data_direction *direction)
{
- long ret = pnv_tce_xchg(tbl, index, hpa, direction);
+ long ret = pnv_tce_xchg(tbl, index, hpa, direction, false);
if (!ret)
pnv_pci_ioda2_tce_invalidate(tbl, index, 1, true);
@@ -2200,20 +2197,16 @@ static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index,
pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
}
-static void pnv_ioda2_table_free(struct iommu_table *tbl)
-{
- pnv_pci_ioda2_table_free_pages(tbl);
-}
-
static struct iommu_table_ops pnv_ioda2_iommu_ops = {
.set = pnv_ioda2_tce_build,
#ifdef CONFIG_IOMMU_API
.exchange = pnv_ioda2_tce_xchg,
.exchange_rm = pnv_ioda2_tce_xchg_rm,
+ .useraddrptr = pnv_tce_useraddrptr,
#endif
.clear = pnv_ioda2_tce_free,
.get = pnv_tce_get,
- .free = pnv_ioda2_table_free,
+ .free = pnv_pci_ioda2_table_free_pages,
};
static int pnv_pci_ioda_dev_dma_weight(struct pci_dev *dev, void *data)
@@ -2463,13 +2456,9 @@ void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable)
pe->tce_bypass_enabled = enable;
}
-static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
- __u32 page_shift, __u64 window_size, __u32 levels,
- struct iommu_table *tbl);
-
static long pnv_pci_ioda2_create_table(struct iommu_table_group *table_group,
int num, __u32 page_shift, __u64 window_size, __u32 levels,
- struct iommu_table **ptbl)
+ bool alloc_userspace_copy, struct iommu_table **ptbl)
{
struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
table_group);
@@ -2486,7 +2475,7 @@ static long pnv_pci_ioda2_create_table(struct iommu_table_group *table_group,
ret = pnv_pci_ioda2_table_alloc_pages(nid,
bus_offset, page_shift, window_size,
- levels, tbl);
+ levels, alloc_userspace_copy, tbl);
if (ret) {
iommu_tce_table_put(tbl);
return ret;
@@ -2519,7 +2508,7 @@ static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe)
rc = pnv_pci_ioda2_create_table(&pe->table_group, 0,
IOMMU_PAGE_SHIFT_4K,
window_size,
- POWERNV_IOMMU_DEFAULT_LEVELS, &tbl);
+ POWERNV_IOMMU_DEFAULT_LEVELS, false, &tbl);
if (rc) {
pe_err(pe, "Failed to create 32-bit TCE table, err %ld",
rc);
@@ -2606,7 +2595,16 @@ static unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift,
tce_table_size, direct_table_size);
}
- return bytes;
+ return bytes + bytes; /* one for HW table, one for userspace copy */
+}
+
+static long pnv_pci_ioda2_create_table_userspace(
+ struct iommu_table_group *table_group,
+ int num, __u32 page_shift, __u64 window_size, __u32 levels,
+ struct iommu_table **ptbl)
+{
+ return pnv_pci_ioda2_create_table(table_group,
+ num, page_shift, window_size, levels, true, ptbl);
}
static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
@@ -2635,7 +2633,7 @@ static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group)
static struct iommu_table_group_ops pnv_pci_ioda2_ops = {
.get_table_size = pnv_pci_ioda2_get_table_size,
- .create_table = pnv_pci_ioda2_create_table,
+ .create_table = pnv_pci_ioda2_create_table_userspace,
.set_window = pnv_pci_ioda2_set_window,
.unset_window = pnv_pci_ioda2_unset_window,
.take_ownership = pnv_ioda2_take_ownership,
@@ -2740,7 +2738,7 @@ static void pnv_ioda2_npu_take_ownership(struct iommu_table_group *table_group)
static struct iommu_table_group_ops pnv_pci_ioda2_npu_ops = {
.get_table_size = pnv_pci_ioda2_get_table_size,
- .create_table = pnv_pci_ioda2_create_table,
+ .create_table = pnv_pci_ioda2_create_table_userspace,
.set_window = pnv_pci_ioda2_npu_set_window,
.unset_window = pnv_pci_ioda2_npu_unset_window,
.take_ownership = pnv_ioda2_npu_take_ownership,
@@ -2774,144 +2772,6 @@ static void pnv_pci_ioda_setup_iommu_api(void)
static void pnv_pci_ioda_setup_iommu_api(void) { };
#endif
-static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
- unsigned levels, unsigned long limit,
- unsigned long *current_offset, unsigned long *total_allocated)
-{
- struct page *tce_mem = NULL;
- __be64 *addr, *tmp;
- unsigned order = max_t(unsigned, shift, PAGE_SHIFT) - PAGE_SHIFT;
- unsigned long allocated = 1UL << (order + PAGE_SHIFT);
- unsigned entries = 1UL << (shift - 3);
- long i;
-
- tce_mem = alloc_pages_node(nid, GFP_KERNEL, order);
- if (!tce_mem) {
- pr_err("Failed to allocate a TCE memory, order=%d\n", order);
- return NULL;
- }
- addr = page_address(tce_mem);
- memset(addr, 0, allocated);
- *total_allocated += allocated;
-
- --levels;
- if (!levels) {
- *current_offset += allocated;
- return addr;
- }
-
- for (i = 0; i < entries; ++i) {
- tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift,
- levels, limit, current_offset, total_allocated);
- if (!tmp)
- break;
-
- addr[i] = cpu_to_be64(__pa(tmp) |
- TCE_PCI_READ | TCE_PCI_WRITE);
-
- if (*current_offset >= limit)
- break;
- }
-
- return addr;
-}
-
-static void pnv_pci_ioda2_table_do_free_pages(__be64 *addr,
- unsigned long size, unsigned level);
-
-static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
- __u32 page_shift, __u64 window_size, __u32 levels,
- struct iommu_table *tbl)
-{
- void *addr;
- unsigned long offset = 0, level_shift, total_allocated = 0;
- const unsigned window_shift = ilog2(window_size);
- unsigned entries_shift = window_shift - page_shift;
- unsigned table_shift = max_t(unsigned, entries_shift + 3, PAGE_SHIFT);
- const unsigned long tce_table_size = 1UL << table_shift;
-
- if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS))
- return -EINVAL;
-
- if (!is_power_of_2(window_size))
- return -EINVAL;
-
- /* Adjust direct table size from window_size and levels */
- entries_shift = (entries_shift + levels - 1) / levels;
- level_shift = entries_shift + 3;
- level_shift = max_t(unsigned, level_shift, PAGE_SHIFT);
-
- if ((level_shift - 3) * levels + page_shift >= 60)
- return -EINVAL;
-
- /* Allocate TCE table */
- addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
- levels, tce_table_size, &offset, &total_allocated);
-
- /* addr==NULL means that the first level allocation failed */
- if (!addr)
- return -ENOMEM;
-
- /*
- * First level was allocated but some lower level failed as
- * we did not allocate as much as we wanted,
- * release partially allocated table.
- */
- if (offset < tce_table_size) {
- pnv_pci_ioda2_table_do_free_pages(addr,
- 1ULL << (level_shift - 3), levels - 1);
- return -ENOMEM;
- }
-
- /* Setup linux iommu table */
- pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, bus_offset,
- page_shift);
- tbl->it_level_size = 1ULL << (level_shift - 3);
- tbl->it_indirect_levels = levels - 1;
- tbl->it_allocated_size = total_allocated;
-
- pr_devel("Created TCE table: ws=%08llx ts=%lx @%08llx\n",
- window_size, tce_table_size, bus_offset);
-
- return 0;
-}
-
-static void pnv_pci_ioda2_table_do_free_pages(__be64 *addr,
- unsigned long size, unsigned level)
-{
- const unsigned long addr_ul = (unsigned long) addr &
- ~(TCE_PCI_READ | TCE_PCI_WRITE);
-
- if (level) {
- long i;
- u64 *tmp = (u64 *) addr_ul;
-
- for (i = 0; i < size; ++i) {
- unsigned long hpa = be64_to_cpu(tmp[i]);
-
- if (!(hpa & (TCE_PCI_READ | TCE_PCI_WRITE)))
- continue;
-
- pnv_pci_ioda2_table_do_free_pages(__va(hpa), size,
- level - 1);
- }
- }
-
- free_pages(addr_ul, get_order(size << 3));
-}
-
-static void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl)
-{
- const unsigned long size = tbl->it_indirect_levels ?
- tbl->it_level_size : tbl->it_size;
-
- if (!tbl->it_size)
- return;
-
- pnv_pci_ioda2_table_do_free_pages((__be64 *)tbl->it_base, size,
- tbl->it_indirect_levels);
-}
-
static unsigned long pnv_ioda_parse_tce_sizes(struct pnv_phb *phb)
{
struct pci_controller *hose = phb->hose;
@@ -2926,7 +2786,7 @@ static unsigned long pnv_ioda_parse_tce_sizes(struct pnv_phb *phb)
/* Add 16M for POWER8 by default */
if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
!cpu_has_feature(CPU_FTR_ARCH_300))
- mask |= SZ_16M;
+ mask |= SZ_16M | SZ_256M;
return mask;
}
@@ -3576,7 +3436,7 @@ static resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev,
/* Prevent enabling devices for which we couldn't properly
* assign a PE
*/
-bool pnv_pci_enable_device_hook(struct pci_dev *dev)
+static bool pnv_pci_enable_device_hook(struct pci_dev *dev)
{
struct pci_controller *hose = pci_bus_to_host(dev->bus);
struct pnv_phb *phb = hose->private_data;
@@ -3844,26 +3704,6 @@ static const struct pci_controller_ops pnv_npu_ocapi_ioda_controller_ops = {
.shutdown = pnv_pci_ioda_shutdown,
};
-#ifdef CONFIG_CXL_BASE
-const struct pci_controller_ops pnv_cxl_cx4_ioda_controller_ops = {
- .dma_dev_setup = pnv_pci_dma_dev_setup,
- .dma_bus_setup = pnv_pci_dma_bus_setup,
-#ifdef CONFIG_PCI_MSI
- .setup_msi_irqs = pnv_cxl_cx4_setup_msi_irqs,
- .teardown_msi_irqs = pnv_cxl_cx4_teardown_msi_irqs,
-#endif
- .enable_device_hook = pnv_cxl_enable_device_hook,
- .disable_device = pnv_cxl_disable_device,
- .release_device = pnv_pci_release_device,
- .window_alignment = pnv_pci_window_alignment,
- .setup_bridge = pnv_pci_setup_bridge,
- .reset_secondary_bus = pnv_pci_reset_secondary_bus,
- .dma_set_mask = pnv_pci_ioda_dma_set_mask,
- .dma_get_required_mask = pnv_pci_ioda_dma_get_required_mask,
- .shutdown = pnv_pci_ioda_shutdown,
-};
-#endif
-
static void __init pnv_pci_init_ioda_phb(struct device_node *np,
u64 hub_id, int ioda_type)
{
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index b265ecc0836a..13aef2323bbc 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -802,85 +802,6 @@ struct pci_ops pnv_pci_ops = {
.write = pnv_pci_write_config,
};
-static __be64 *pnv_tce(struct iommu_table *tbl, long idx)
-{
- __be64 *tmp = ((__be64 *)tbl->it_base);
- int level = tbl->it_indirect_levels;
- const long shift = ilog2(tbl->it_level_size);
- unsigned long mask = (tbl->it_level_size - 1) << (level * shift);
-
- while (level) {
- int n = (idx & mask) >> (level * shift);
- unsigned long tce = be64_to_cpu(tmp[n]);
-
- tmp = __va(tce & ~(TCE_PCI_READ | TCE_PCI_WRITE));
- idx &= ~mask;
- mask >>= shift;
- --level;
- }
-
- return tmp + idx;
-}
-
-int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
- unsigned long uaddr, enum dma_data_direction direction,
- unsigned long attrs)
-{
- u64 proto_tce = iommu_direction_to_tce_perm(direction);
- u64 rpn = __pa(uaddr) >> tbl->it_page_shift;
- long i;
-
- if (proto_tce & TCE_PCI_WRITE)
- proto_tce |= TCE_PCI_READ;
-
- for (i = 0; i < npages; i++) {
- unsigned long newtce = proto_tce |
- ((rpn + i) << tbl->it_page_shift);
- unsigned long idx = index - tbl->it_offset + i;
-
- *(pnv_tce(tbl, idx)) = cpu_to_be64(newtce);
- }
-
- return 0;
-}
-
-#ifdef CONFIG_IOMMU_API
-int pnv_tce_xchg(struct iommu_table *tbl, long index,
- unsigned long *hpa, enum dma_data_direction *direction)
-{
- u64 proto_tce = iommu_direction_to_tce_perm(*direction);
- unsigned long newtce = *hpa | proto_tce, oldtce;
- unsigned long idx = index - tbl->it_offset;
-
- BUG_ON(*hpa & ~IOMMU_PAGE_MASK(tbl));
-
- if (newtce & TCE_PCI_WRITE)
- newtce |= TCE_PCI_READ;
-
- oldtce = be64_to_cpu(xchg(pnv_tce(tbl, idx), cpu_to_be64(newtce)));
- *hpa = oldtce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
- *direction = iommu_tce_direction(oldtce);
-
- return 0;
-}
-#endif
-
-void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
-{
- long i;
-
- for (i = 0; i < npages; i++) {
- unsigned long idx = index - tbl->it_offset + i;
-
- *(pnv_tce(tbl, idx)) = cpu_to_be64(0);
- }
-}
-
-unsigned long pnv_tce_get(struct iommu_table *tbl, long index)
-{
- return be64_to_cpu(*(pnv_tce(tbl, index - tbl->it_offset)));
-}
-
struct iommu_table *pnv_pci_table_alloc(int nid)
{
struct iommu_table *tbl;
@@ -895,85 +816,6 @@ struct iommu_table *pnv_pci_table_alloc(int nid)
return tbl;
}
-long pnv_pci_link_table_and_group(int node, int num,
- struct iommu_table *tbl,
- struct iommu_table_group *table_group)
-{
- struct iommu_table_group_link *tgl = NULL;
-
- if (WARN_ON(!tbl || !table_group))
- return -EINVAL;
-
- tgl = kzalloc_node(sizeof(struct iommu_table_group_link), GFP_KERNEL,
- node);
- if (!tgl)
- return -ENOMEM;
-
- tgl->table_group = table_group;
- list_add_rcu(&tgl->next, &tbl->it_group_list);
-
- table_group->tables[num] = tbl;
-
- return 0;
-}
-
-static void pnv_iommu_table_group_link_free(struct rcu_head *head)
-{
- struct iommu_table_group_link *tgl = container_of(head,
- struct iommu_table_group_link, rcu);
-
- kfree(tgl);
-}
-
-void pnv_pci_unlink_table_and_group(struct iommu_table *tbl,
- struct iommu_table_group *table_group)
-{
- long i;
- bool found;
- struct iommu_table_group_link *tgl;
-
- if (!tbl || !table_group)
- return;
-
- /* Remove link to a group from table's list of attached groups */
- found = false;
- list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) {
- if (tgl->table_group == table_group) {
- list_del_rcu(&tgl->next);
- call_rcu(&tgl->rcu, pnv_iommu_table_group_link_free);
- found = true;
- break;
- }
- }
- if (WARN_ON(!found))
- return;
-
- /* Clean a pointer to iommu_table in iommu_table_group::tables[] */
- found = false;
- for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
- if (table_group->tables[i] == tbl) {
- table_group->tables[i] = NULL;
- found = true;
- break;
- }
- }
- WARN_ON(!found);
-}
-
-void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
- void *tce_mem, u64 tce_size,
- u64 dma_offset, unsigned page_shift)
-{
- tbl->it_blocksize = 16;
- tbl->it_base = (unsigned long)tce_mem;
- tbl->it_page_shift = page_shift;
- tbl->it_offset = dma_offset >> tbl->it_page_shift;
- tbl->it_index = 0;
- tbl->it_size = tce_size >> 3;
- tbl->it_busno = 0;
- tbl->it_type = TCE_PCI;
-}
-
void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
{
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index eada4b6068cb..8b37b28e3831 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -88,7 +88,6 @@ struct pnv_ioda_pe {
};
#define PNV_PHB_FLAG_EEH (1 << 0)
-#define PNV_PHB_FLAG_CXL (1 << 1) /* Real PHB supporting the cxl kernel API */
struct pnv_phb {
struct pci_controller *hose;
@@ -194,20 +193,10 @@ struct pnv_phb {
bool nmmu_flush;
} npu;
-#ifdef CONFIG_CXL_BASE
- struct cxl_afu *cxl_afu;
-#endif
int p2p_target_count;
};
extern struct pci_ops pnv_pci_ops;
-extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
- unsigned long uaddr, enum dma_data_direction direction,
- unsigned long attrs);
-extern void pnv_tce_free(struct iommu_table *tbl, long index, long npages);
-extern int pnv_tce_xchg(struct iommu_table *tbl, long index,
- unsigned long *hpa, enum dma_data_direction *direction);
-extern unsigned long pnv_tce_get(struct iommu_table *tbl, long index);
void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
unsigned char *log_buff);
@@ -217,14 +206,6 @@ int pnv_pci_cfg_write(struct pci_dn *pdn,
int where, int size, u32 val);
extern struct iommu_table *pnv_pci_table_alloc(int nid);
-extern long pnv_pci_link_table_and_group(int node, int num,
- struct iommu_table *tbl,
- struct iommu_table_group *table_group);
-extern void pnv_pci_unlink_table_and_group(struct iommu_table *tbl,
- struct iommu_table_group *table_group);
-extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
- void *tce_mem, u64 tce_size,
- u64 dma_offset, unsigned page_shift);
extern void pnv_pci_init_ioda_hub(struct device_node *np);
extern void pnv_pci_init_ioda2_phb(struct device_node *np);
extern void pnv_pci_init_npu_phb(struct device_node *np);
@@ -238,7 +219,6 @@ extern int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type);
extern void pnv_teardown_msi_irqs(struct pci_dev *pdev);
extern struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev);
extern void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq);
-extern bool pnv_pci_enable_device_hook(struct pci_dev *dev);
extern void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable);
extern int pnv_eeh_post_init(void);
@@ -262,14 +242,33 @@ extern void pnv_npu_take_ownership(struct pnv_ioda_pe *npe);
extern void pnv_npu_release_ownership(struct pnv_ioda_pe *npe);
extern int pnv_npu2_init(struct pnv_phb *phb);
-/* cxl functions */
-extern bool pnv_cxl_enable_device_hook(struct pci_dev *dev);
-extern void pnv_cxl_disable_device(struct pci_dev *dev);
-extern int pnv_cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type);
-extern void pnv_cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev);
+/* pci-ioda-tce.c */
+#define POWERNV_IOMMU_DEFAULT_LEVELS 1
+#define POWERNV_IOMMU_MAX_LEVELS 5
+extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
+ unsigned long uaddr, enum dma_data_direction direction,
+ unsigned long attrs);
+extern void pnv_tce_free(struct iommu_table *tbl, long index, long npages);
+extern int pnv_tce_xchg(struct iommu_table *tbl, long index,
+ unsigned long *hpa, enum dma_data_direction *direction,
+ bool alloc);
+extern __be64 *pnv_tce_useraddrptr(struct iommu_table *tbl, long index,
+ bool alloc);
+extern unsigned long pnv_tce_get(struct iommu_table *tbl, long index);
+
+extern long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
+ __u32 page_shift, __u64 window_size, __u32 levels,
+ bool alloc_userspace_copy, struct iommu_table *tbl);
+extern void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl);
-/* phb ops (cxl switches these when enabling the kernel api on the phb) */
-extern const struct pci_controller_ops pnv_cxl_cx4_ioda_controller_ops;
+extern long pnv_pci_link_table_and_group(int node, int num,
+ struct iommu_table *tbl,
+ struct iommu_table_group *table_group);
+extern void pnv_pci_unlink_table_and_group(struct iommu_table *tbl,
+ struct iommu_table_group *table_group);
+extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
+ void *tce_mem, u64 tce_size,
+ u64 dma_offset, unsigned int page_shift);
#endif /* __POWERNV_PCI_H */
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index f96df0a25d05..adddde023622 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -78,6 +78,12 @@ static void init_fw_feat_flags(struct device_node *np)
if (fw_feature_is("enabled", "fw-count-cache-disabled", np))
security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
+ if (fw_feature_is("enabled", "fw-count-cache-flush-bcctr2,0,0", np))
+ security_ftr_set(SEC_FTR_BCCTR_FLUSH_ASSIST);
+
+ if (fw_feature_is("enabled", "needs-count-cache-flush-on-context-switch", np))
+ security_ftr_set(SEC_FTR_FLUSH_COUNT_CACHE);
+
/*
* The features below are enabled by default, so we instead look to see
* if firmware has *disabled* them, and clear them if so.
@@ -124,7 +130,7 @@ static void pnv_setup_rfi_flush(void)
security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV));
setup_rfi_flush(type, enable);
- setup_barrier_nospec();
+ setup_count_cache_flush();
}
static void __init pnv_setup_arch(void)
@@ -314,7 +320,7 @@ static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
u64 reinit_flags;
if (xive_enabled())
- xive_kexec_teardown_cpu(secondary);
+ xive_teardown_cpu();
else
xics_kexec_teardown_cpu(secondary);
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index b80909957792..0d354e19ef92 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -283,23 +283,6 @@ static void pnv_cause_ipi(int cpu)
ic_cause_ipi(cpu);
}
-static void pnv_p9_dd1_cause_ipi(int cpu)
-{
- int this_cpu = get_cpu();
-
- /*
- * POWER9 DD1 has a global addressed msgsnd, but for now we restrict
- * IPIs to same core, because it requires additional synchronization
- * for inter-core doorbells which we do not implement.
- */
- if (cpumask_test_cpu(cpu, cpu_sibling_mask(this_cpu)))
- doorbell_global_ipi(cpu);
- else
- ic_cause_ipi(cpu);
-
- put_cpu();
-}
-
static void __init pnv_smp_probe(void)
{
if (xive_enabled())
@@ -311,14 +294,10 @@ static void __init pnv_smp_probe(void)
ic_cause_ipi = smp_ops->cause_ipi;
WARN_ON(!ic_cause_ipi);
- if (cpu_has_feature(CPU_FTR_ARCH_300)) {
- if (cpu_has_feature(CPU_FTR_POWER9_DD1))
- smp_ops->cause_ipi = pnv_p9_dd1_cause_ipi;
- else
- smp_ops->cause_ipi = doorbell_global_ipi;
- } else {
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ smp_ops->cause_ipi = doorbell_global_ipi;
+ else
smp_ops->cause_ipi = pnv_cause_ipi;
- }
}
}
diff --git a/arch/powerpc/platforms/powernv/vas.h b/arch/powerpc/platforms/powernv/vas.h
index ae0100fd35bb..f5493dbdd7ff 100644
--- a/arch/powerpc/platforms/powernv/vas.h
+++ b/arch/powerpc/platforms/powernv/vas.h
@@ -15,6 +15,7 @@
#include <linux/io.h>
#include <linux/dcache.h>
#include <linux/mutex.h>
+#include <linux/stringify.h>
/*
* Overview of Virtual Accelerator Switchboard (VAS).
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 13eede60c24d..7e89d5c47068 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -25,6 +25,6 @@ obj-$(CONFIG_LPARCFG) += lparcfg.o
obj-$(CONFIG_IBMVIO) += vio.o
obj-$(CONFIG_IBMEBUS) += ibmebus.o
-ifeq ($(CONFIG_PPC_PSERIES),y)
+ifdef CONFIG_PPC_PSERIES
obj-$(CONFIG_SUSPEND) += suspend.o
endif
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
index c511a1743a44..d91412c591ef 100644
--- a/arch/powerpc/platforms/pseries/hvCall.S
+++ b/arch/powerpc/platforms/pseries/hvCall.S
@@ -13,6 +13,7 @@
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
+#include <asm/feature-fixups.h>
.section ".text"
diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c
index 46fbaef69a59..23f54223ed56 100644
--- a/arch/powerpc/platforms/pseries/kexec.c
+++ b/arch/powerpc/platforms/pseries/kexec.c
@@ -58,7 +58,7 @@ void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
}
if (xive_enabled()) {
- xive_kexec_teardown_cpu(secondary);
+ xive_teardown_cpu();
if (!secondary)
xive_shutdown();
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 5a392e40f3d2..d3992ced0782 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -21,6 +21,7 @@
/* Enables debugging of low-level hash table routines - careful! */
#undef DEBUG
+#define pr_fmt(fmt) "lpar: " fmt
#include <linux/kernel.h>
#include <linux/dma-mapping.h>
@@ -36,7 +37,6 @@
#include <asm/machdep.h>
#include <asm/mmu_context.h>
#include <asm/iommu.h>
-#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <asm/prom.h>
#include <asm/cputable.h>
@@ -165,8 +165,7 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot);
if (unlikely(lpar_rc == H_PTEG_FULL)) {
- if (!(vflags & HPTE_V_BOLTED))
- pr_devel(" full\n");
+ pr_devel("Hash table group is full\n");
return -1;
}
@@ -176,8 +175,7 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
* or we will loop forever, so return -2 in this case.
*/
if (unlikely(lpar_rc != H_SUCCESS)) {
- if (!(vflags & HPTE_V_BOLTED))
- pr_devel(" lpar err %ld\n", lpar_rc);
+ pr_err("Failed hash pte insert with error %ld\n", lpar_rc);
return -2;
}
if (!(vflags & HPTE_V_BOLTED))
@@ -240,8 +238,11 @@ static void manual_hpte_clear_all(void)
*/
for (i = 0; i < hpte_count; i += 4) {
lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes);
- if (lpar_rc != H_SUCCESS)
+ if (lpar_rc != H_SUCCESS) {
+ pr_info("Failed to read hash page table at %ld err %ld\n",
+ i, lpar_rc);
continue;
+ }
for (j = 0; j < 4; j++){
if ((ptes[j].pteh & HPTE_V_VRMA_MASK) ==
HPTE_V_VRMA_MASK)
@@ -340,8 +341,11 @@ static long __pSeries_lpar_hpte_find(unsigned long want_v, unsigned long hpte_gr
for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) {
lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes);
- if (lpar_rc != H_SUCCESS)
+ if (lpar_rc != H_SUCCESS) {
+ pr_info("Failed to read hash page table at %ld err %ld\n",
+ hpte_group, lpar_rc);
continue;
+ }
for (j = 0; j < 4; j++) {
if (HPTE_V_COMPARE(ptes[j].pteh, want_v) &&
@@ -612,8 +616,8 @@ static int __init disable_bulk_remove(char *str)
{
if (strcmp(str, "off") == 0 &&
firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
- printk(KERN_INFO "Disabling BULK_REMOVE firmware feature");
- powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE;
+ pr_info("Disabling BULK_REMOVE firmware feature");
+ powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE;
}
return 1;
}
@@ -659,8 +663,7 @@ static int pseries_lpar_resize_hpt(unsigned long shift)
if (!firmware_has_feature(FW_FEATURE_HPT_RESIZE))
return -ENODEV;
- printk(KERN_INFO "lpar: Attempting to resize HPT to shift %lu\n",
- shift);
+ pr_info("Attempting to resize HPT to shift %lu\n", shift);
t0 = ktime_get();
@@ -672,8 +675,7 @@ static int pseries_lpar_resize_hpt(unsigned long shift)
/* prepare with shift==0 cancels an in-progress resize */
rc = plpar_resize_hpt_prepare(0, 0);
if (rc != H_SUCCESS)
- printk(KERN_WARNING
- "lpar: Unexpected error %d cancelling timed out HPT resize\n",
+ pr_warn("Unexpected error %d cancelling timed out HPT resize\n",
rc);
return -ETIMEDOUT;
}
@@ -691,9 +693,7 @@ static int pseries_lpar_resize_hpt(unsigned long shift)
case H_RESOURCE:
return -EPERM;
default:
- printk(KERN_WARNING
- "lpar: Unexpected error %d from H_RESIZE_HPT_PREPARE\n",
- rc);
+ pr_warn("Unexpected error %d from H_RESIZE_HPT_PREPARE\n", rc);
return -EIO;
}
@@ -706,22 +706,19 @@ static int pseries_lpar_resize_hpt(unsigned long shift)
if (rc != 0) {
switch (state.commit_rc) {
case H_PTEG_FULL:
- printk(KERN_WARNING
- "lpar: Hash collision while resizing HPT\n");
+ pr_warn("Hash collision while resizing HPT\n");
return -ENOSPC;
default:
- printk(KERN_WARNING
- "lpar: Unexpected error %d from H_RESIZE_HPT_COMMIT\n",
- state.commit_rc);
+ pr_warn("Unexpected error %d from H_RESIZE_HPT_COMMIT\n",
+ state.commit_rc);
return -EIO;
};
}
- printk(KERN_INFO
- "lpar: HPT resize to shift %lu complete (%lld ms / %lld ms)\n",
- shift, (long long) ktime_ms_delta(t1, t0),
- (long long) ktime_ms_delta(t2, t1));
+ pr_info("HPT resize to shift %lu complete (%lld ms / %lld ms)\n",
+ shift, (long long) ktime_ms_delta(t1, t0),
+ (long long) ktime_ms_delta(t2, t1));
return 0;
}
@@ -785,13 +782,13 @@ static int __init cmo_free_hint(char *str)
parm = strstrip(str);
if (strcasecmp(parm, "no") == 0 || strcasecmp(parm, "off") == 0) {
- printk(KERN_INFO "cmo_free_hint: CMO free page hinting is not active.\n");
+ pr_info("%s: CMO free page hinting is not active.\n", __func__);
cmo_free_hint_flag = 0;
return 1;
}
cmo_free_hint_flag = 1;
- printk(KERN_INFO "cmo_free_hint: CMO free page hinting is active.\n");
+ pr_info("%s: CMO free page hinting is active.\n", __func__);
if (strcasecmp(parm, "yes") == 0 || strcasecmp(parm, "on") == 0)
return 1;
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index 8a8033a249c7..f0e30dc94988 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -17,6 +17,7 @@
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/stringify.h>
#include <asm/machdep.h>
#include <asm/rtas.h>
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 5e1ef9150182..851ce326874a 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -22,6 +22,7 @@
#include <linux/of.h>
#include <linux/fs.h>
#include <linux/reboot.h>
+#include <linux/irq_work.h>
#include <asm/machdep.h>
#include <asm/rtas.h>
@@ -32,11 +33,13 @@
static unsigned char ras_log_buf[RTAS_ERROR_LOG_MAX];
static DEFINE_SPINLOCK(ras_log_buf_lock);
-static char global_mce_data_buf[RTAS_ERROR_LOG_MAX];
-static DEFINE_PER_CPU(__u64, mce_data_buf);
-
static int ras_check_exception_token;
+static void mce_process_errlog_event(struct irq_work *work);
+static struct irq_work mce_errlog_process_work = {
+ .func = mce_process_errlog_event,
+};
+
#define EPOW_SENSOR_TOKEN 9
#define EPOW_SENSOR_INDEX 0
@@ -330,16 +333,20 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id)
((((A) >= 0x7000) && ((A) < 0x7ff0)) || \
(((A) >= rtas.base) && ((A) < (rtas.base + rtas.size - 16))))
+static inline struct rtas_error_log *fwnmi_get_errlog(void)
+{
+ return (struct rtas_error_log *)local_paca->mce_data_buf;
+}
+
/*
* Get the error information for errors coming through the
* FWNMI vectors. The pt_regs' r3 will be updated to reflect
* the actual r3 if possible, and a ptr to the error log entry
* will be returned if found.
*
- * If the RTAS error is not of the extended type, then we put it in a per
- * cpu 64bit buffer. If it is the extended type we use global_mce_data_buf.
+ * Use one buffer mce_data_buf per cpu to store RTAS error.
*
- * The global_mce_data_buf does not have any locks or protection around it,
+ * The mce_data_buf does not have any locks or protection around it,
* if a second machine check comes in, or a system reset is done
* before we have logged the error, then we will get corruption in the
* error log. This is preferable over holding off on calling
@@ -349,7 +356,7 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id)
static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
{
unsigned long *savep;
- struct rtas_error_log *h, *errhdr = NULL;
+ struct rtas_error_log *h;
/* Mask top two bits */
regs->gpr[3] &= ~(0x3UL << 62);
@@ -360,24 +367,22 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
}
savep = __va(regs->gpr[3]);
- regs->gpr[3] = savep[0]; /* restore original r3 */
+ regs->gpr[3] = be64_to_cpu(savep[0]); /* restore original r3 */
- /* If it isn't an extended log we can use the per cpu 64bit buffer */
h = (struct rtas_error_log *)&savep[1];
+ /* Use the per cpu buffer from paca to store rtas error log */
+ memset(local_paca->mce_data_buf, 0, RTAS_ERROR_LOG_MAX);
if (!rtas_error_extended(h)) {
- memcpy(this_cpu_ptr(&mce_data_buf), h, sizeof(__u64));
- errhdr = (struct rtas_error_log *)this_cpu_ptr(&mce_data_buf);
+ memcpy(local_paca->mce_data_buf, h, sizeof(__u64));
} else {
int len, error_log_length;
error_log_length = 8 + rtas_error_extended_log_length(h);
- len = max_t(int, error_log_length, RTAS_ERROR_LOG_MAX);
- memset(global_mce_data_buf, 0, RTAS_ERROR_LOG_MAX);
- memcpy(global_mce_data_buf, h, len);
- errhdr = (struct rtas_error_log *)global_mce_data_buf;
+ len = min_t(int, error_log_length, RTAS_ERROR_LOG_MAX);
+ memcpy(local_paca->mce_data_buf, h, len);
}
- return errhdr;
+ return (struct rtas_error_log *)local_paca->mce_data_buf;
}
/* Call this when done with the data returned by FWNMI_get_errinfo.
@@ -423,6 +428,17 @@ int pSeries_system_reset_exception(struct pt_regs *regs)
}
/*
+ * Process MCE rtas errlog event.
+ */
+static void mce_process_errlog_event(struct irq_work *work)
+{
+ struct rtas_error_log *err;
+
+ err = fwnmi_get_errlog();
+ log_error((char *)err, ERR_TYPE_RTAS_LOG, 0);
+}
+
+/*
* See if we can recover from a machine check exception.
* This is only called on power4 (or above) and only via
* the Firmware Non-Maskable Interrupts (fwnmi) handler
@@ -466,7 +482,8 @@ static int recover_mce(struct pt_regs *regs, struct rtas_error_log *err)
recovered = 1;
}
- log_error((char *)err, ERR_TYPE_RTAS_LOG, 0);
+ /* Queue irq work to log this rtas event later. */
+ irq_work_queue(&mce_errlog_process_work);
return recovered;
}
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 8a4868a3964b..ba1791fd3234 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -41,6 +41,7 @@
#include <linux/root_dev.h>
#include <linux/of.h>
#include <linux/of_pci.h>
+#include <linux/memblock.h>
#include <asm/mmu.h>
#include <asm/processor.h>
@@ -69,6 +70,7 @@
#include <asm/kexec.h>
#include <asm/isa-bridge.h>
#include <asm/security_features.h>
+#include <asm/asm-const.h>
#include "pseries.h"
#include "../../../../drivers/pci/pci.h"
@@ -102,6 +104,9 @@ static void pSeries_show_cpuinfo(struct seq_file *m)
static void __init fwnmi_init(void)
{
unsigned long system_reset_addr, machine_check_addr;
+ u8 *mce_data_buf;
+ unsigned int i;
+ int nr_cpus = num_possible_cpus();
int ibm_nmi_register = rtas_token("ibm,nmi-register");
if (ibm_nmi_register == RTAS_UNKNOWN_SERVICE)
@@ -115,6 +120,18 @@ static void __init fwnmi_init(void)
if (0 == rtas_call(ibm_nmi_register, 2, 1, NULL, system_reset_addr,
machine_check_addr))
fwnmi_active = 1;
+
+ /*
+ * Allocate a chunk for per cpu buffer to hold rtas errorlog.
+ * It will be used in real mode mce handler, hence it needs to be
+ * below RMA.
+ */
+ mce_data_buf = __va(memblock_alloc_base(RTAS_ERROR_LOG_MAX * nr_cpus,
+ RTAS_ERROR_LOG_MAX, ppc64_rma_size));
+ for_each_possible_cpu(i) {
+ paca_ptrs[i]->mce_data_buf = mce_data_buf +
+ (RTAS_ERROR_LOG_MAX * i);
+ }
}
static void pseries_8259_cascade(struct irq_desc *desc)
@@ -485,6 +502,12 @@ static void init_cpu_char_feature_flags(struct h_cpu_char_result *result)
if (result->character & H_CPU_CHAR_COUNT_CACHE_DISABLED)
security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
+ if (result->character & H_CPU_CHAR_BCCTR_FLUSH_ASSIST)
+ security_ftr_set(SEC_FTR_BCCTR_FLUSH_ASSIST);
+
+ if (result->behaviour & H_CPU_BEHAV_FLUSH_COUNT_CACHE)
+ security_ftr_set(SEC_FTR_FLUSH_COUNT_CACHE);
+
/*
* The features below are enabled by default, so we instead look to see
* if firmware has *disabled* them, and clear them if so.
@@ -535,7 +558,7 @@ void pseries_setup_rfi_flush(void)
security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR);
setup_rfi_flush(types, enable);
- setup_barrier_nospec();
+ setup_count_cache_flush();
}
#ifdef CONFIG_PCI_IOV
@@ -647,6 +670,15 @@ void of_pci_parse_iov_addrs(struct pci_dev *dev, const int *indexes)
}
}
+static void pseries_disable_sriov_resources(struct pci_dev *pdev)
+{
+ int i;
+
+ pci_warn(pdev, "No hypervisor support for SR-IOV on this device, IOV BARs disabled.\n");
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
+ pdev->resource[i + PCI_IOV_RESOURCES].flags = 0;
+}
+
static void pseries_pci_fixup_resources(struct pci_dev *pdev)
{
const int *indexes;
@@ -654,10 +686,10 @@ static void pseries_pci_fixup_resources(struct pci_dev *pdev)
/*Firmware must support open sriov otherwise dont configure*/
indexes = of_get_property(dn, "ibm,open-sriov-vf-bar-info", NULL);
- if (!indexes)
- return;
- /* Assign the addresses from device tree*/
- of_pci_set_vf_bar_size(pdev, indexes);
+ if (indexes)
+ of_pci_set_vf_bar_size(pdev, indexes);
+ else
+ pseries_disable_sriov_resources(pdev);
}
static void pseries_pci_fixup_iov_resources(struct pci_dev *pdev)
@@ -669,10 +701,10 @@ static void pseries_pci_fixup_iov_resources(struct pci_dev *pdev)
return;
/*Firmware must support open sriov otherwise dont configure*/
indexes = of_get_property(dn, "ibm,open-sriov-vf-bar-info", NULL);
- if (!indexes)
- return;
- /* Assign the addresses from device tree*/
- of_pci_parse_iov_addrs(pdev, indexes);
+ if (indexes)
+ of_pci_parse_iov_addrs(pdev, indexes);
+ else
+ pseries_disable_sriov_resources(pdev);
}
static resource_size_t pseries_pci_iov_resource_alignment(struct pci_dev *pdev,
diff --git a/arch/powerpc/purgatory/trampoline.S b/arch/powerpc/purgatory/trampoline.S
index 4aad9dd10ace..1e1129553fd7 100644
--- a/arch/powerpc/purgatory/trampoline.S
+++ b/arch/powerpc/purgatory/trampoline.S
@@ -12,15 +12,7 @@
* Software Foundation (version 2 of the License).
*/
-#if defined(__LITTLE_ENDIAN__)
-#define STWX_BE stwbrx
-#define LWZX_BE lwbrx
-#elif defined(__BIG_ENDIAN__)
-#define STWX_BE stwx
-#define LWZX_BE lwzx
-#else
-#error no endianness defined!
-#endif
+#include <asm/asm-compat.h>
.machine ppc64
.balign 256
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index ea2f595b5133..f730539074c4 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -48,7 +48,7 @@ obj-$(CONFIG_UCODE_PATCH) += micropatch.o
obj-$(CONFIG_PPC_MPC512x) += mpc5xxx_clocks.o
obj-$(CONFIG_PPC_MPC52xx) += mpc5xxx_clocks.o
-ifeq ($(CONFIG_SUSPEND),y)
+ifdef CONFIG_SUSPEND
obj-$(CONFIG_6xx) += 6xx-suspend.o
endif
diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c
index 5240d3a74a10..4f8dcf124828 100644
--- a/arch/powerpc/sysdev/cpm1.c
+++ b/arch/powerpc/sysdev/cpm1.c
@@ -38,7 +38,6 @@
#include <asm/8xx_immap.h>
#include <asm/cpm1.h>
#include <asm/io.h>
-#include <asm/tlbflush.h>
#include <asm/rheap.h>
#include <asm/prom.h>
#include <asm/cpm.h>
diff --git a/arch/powerpc/sysdev/fsl_mpic_err.c b/arch/powerpc/sysdev/fsl_mpic_err.c
index 488ec453038a..2a98837dc6ba 100644
--- a/arch/powerpc/sysdev/fsl_mpic_err.c
+++ b/arch/powerpc/sysdev/fsl_mpic_err.c
@@ -76,7 +76,7 @@ int mpic_setup_error_int(struct mpic *mpic, int intvec)
mpic->flags |= MPIC_FSL_HAS_EIMR;
/* allocate interrupt vectors for error interrupts */
for (i = MPIC_MAX_ERR - 1; i >= 0; i--)
- mpic->err_int_vecs[i] = --intvec;
+ mpic->err_int_vecs[i] = intvec--;
return 0;
}
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 353b43972bbf..934a77324f6b 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -1380,12 +1380,12 @@ struct mpic * __init mpic_alloc(struct device_node *node,
* global vector number space, as in case of ipis
* and timer interrupts.
*
- * Available vector space = intvec_top - 12, where 12
+ * Available vector space = intvec_top - 13, where 13
* is the number of vectors which have been consumed by
- * ipis and timer interrupts.
+ * ipis, timer interrupts and spurious.
*/
if (fsl_version >= 0x401) {
- ret = mpic_setup_error_int(mpic, intvec_top - 12);
+ ret = mpic_setup_error_int(mpic, intvec_top - 13);
if (ret)
return NULL;
}
diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c
index eb69a5186243..280e964e1aa8 100644
--- a/arch/powerpc/sysdev/mpic_msgr.c
+++ b/arch/powerpc/sysdev/mpic_msgr.c
@@ -196,7 +196,7 @@ static int mpic_msgr_probe(struct platform_device *dev)
/* IO map the message register block. */
of_address_to_resource(np, 0, &rsrc);
- msgr_block_addr = ioremap(rsrc.start, rsrc.end - rsrc.start);
+ msgr_block_addr = ioremap(rsrc.start, resource_size(&rsrc));
if (!msgr_block_addr) {
dev_err(&dev->dev, "Failed to iomap MPIC message registers");
return -EFAULT;
diff --git a/arch/powerpc/sysdev/msi_bitmap.c b/arch/powerpc/sysdev/msi_bitmap.c
index 6243a7e537d0..e64a411d1a00 100644
--- a/arch/powerpc/sysdev/msi_bitmap.c
+++ b/arch/powerpc/sysdev/msi_bitmap.c
@@ -225,22 +225,23 @@ static void __init test_of_node(void)
struct device_node of_node;
struct property prop;
struct msi_bitmap bmp;
- int size = 256;
- DECLARE_BITMAP(expected, size);
+#define SIZE_EXPECTED 256
+ DECLARE_BITMAP(expected, SIZE_EXPECTED);
/* There should really be a struct device_node allocator */
memset(&of_node, 0, sizeof(of_node));
of_node_init(&of_node);
of_node.full_name = node_name;
- WARN_ON(msi_bitmap_alloc(&bmp, size, &of_node));
+ WARN_ON(msi_bitmap_alloc(&bmp, SIZE_EXPECTED, &of_node));
/* No msi-available-ranges, so expect > 0 */
WARN_ON(msi_bitmap_reserve_dt_hwirqs(&bmp) <= 0);
/* Should all still be free */
- WARN_ON(bitmap_find_free_region(bmp.bitmap, size, get_count_order(size)));
- bitmap_release_region(bmp.bitmap, 0, get_count_order(size));
+ WARN_ON(bitmap_find_free_region(bmp.bitmap, SIZE_EXPECTED,
+ get_count_order(SIZE_EXPECTED)));
+ bitmap_release_region(bmp.bitmap, 0, get_count_order(SIZE_EXPECTED));
/* Now create a fake msi-available-ranges property */
@@ -256,8 +257,8 @@ static void __init test_of_node(void)
WARN_ON(msi_bitmap_reserve_dt_hwirqs(&bmp));
/* Check we got the expected result */
- WARN_ON(bitmap_parselist(expected_str, expected, size));
- WARN_ON(!bitmap_equal(expected, bmp.bitmap, size));
+ WARN_ON(bitmap_parselist(expected_str, expected, SIZE_EXPECTED));
+ WARN_ON(!bitmap_equal(expected, bmp.bitmap, SIZE_EXPECTED));
msi_bitmap_free(&bmp);
kfree(bmp.bitmap);
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index 3459015092fa..e8f5b0551095 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -319,7 +319,7 @@ void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
* The FW told us to call it. This happens for some
* interrupt sources that need additional HW whacking
* beyond the ESB manipulation. For example LPC interrupts
- * on P9 DD1.0 need a latch to be clared in the LPC bridge
+ * on P9 DD1.0 needed a latch to be clared in the LPC bridge
* itself. The Firmware will take care of it.
*/
if (WARN_ON_ONCE(!xive_ops->eoi))
@@ -337,9 +337,9 @@ void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
* This allows us to then do a re-trigger if Q was set
* rather than synthesizing an interrupt in software
*
- * For LSIs, using the HW EOI cycle works around a problem
- * on P9 DD1 PHBs where the other ESB accesses don't work
- * properly.
+ * For LSIs the HW EOI cycle is used rather than PQ bits,
+ * as they are automatically re-triggred in HW when still
+ * pending.
*/
if (xd->flags & XIVE_IRQ_FLAG_LSI)
xive_esb_read(xd, XIVE_ESB_LOAD_EOI);
@@ -1408,28 +1408,6 @@ void xive_teardown_cpu(void)
xive_cleanup_cpu_queues(cpu, xc);
}
-void xive_kexec_teardown_cpu(int secondary)
-{
- struct xive_cpu *xc = __this_cpu_read(xive_cpu);
- unsigned int cpu = smp_processor_id();
-
- /* Set CPPR to 0 to disable flow of interrupts */
- xc->cppr = 0;
- out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
-
- /* Backend cleanup if any */
- if (xive_ops->teardown_cpu)
- xive_ops->teardown_cpu(cpu, xc);
-
-#ifdef CONFIG_SMP
- /* Get rid of IPI */
- xive_cleanup_cpu_ipi(cpu, xc);
-#endif
-
- /* Disable and free the queues */
- xive_cleanup_cpu_queues(cpu, xc);
-}
-
void xive_shutdown(void)
{
xive_ops->shutdown();
diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
index 311185b9960a..5b20a678d755 100644
--- a/arch/powerpc/sysdev/xive/native.c
+++ b/arch/powerpc/sysdev/xive/native.c
@@ -109,7 +109,7 @@ int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
rc = opal_xive_set_irq_config(hw_irq, target, prio, sw_irq);
if (rc != OPAL_BUSY)
break;
- msleep(1);
+ msleep(OPAL_BUSY_DELAY_MS);
}
return rc == 0 ? 0 : -ENXIO;
}
@@ -163,7 +163,7 @@ int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
rc = opal_xive_set_queue_info(vp_id, prio, qpage_phys, order, flags);
if (rc != OPAL_BUSY)
break;
- msleep(1);
+ msleep(OPAL_BUSY_DELAY_MS);
}
if (rc) {
pr_err("Error %lld setting queue for prio %d\n", rc, prio);
@@ -190,7 +190,7 @@ static void __xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
rc = opal_xive_set_queue_info(vp_id, prio, 0, 0, 0);
if (rc != OPAL_BUSY)
break;
- msleep(1);
+ msleep(OPAL_BUSY_DELAY_MS);
}
if (rc)
pr_err("Error %lld disabling queue for prio %d\n", rc, prio);
@@ -253,7 +253,7 @@ static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc)
for (;;) {
irq = opal_xive_allocate_irq(chip_id);
if (irq == OPAL_BUSY) {
- msleep(1);
+ msleep(OPAL_BUSY_DELAY_MS);
continue;
}
if (irq < 0) {
@@ -275,7 +275,7 @@ u32 xive_native_alloc_irq(void)
rc = opal_xive_allocate_irq(OPAL_XIVE_ANY_CHIP);
if (rc != OPAL_BUSY)
break;
- msleep(1);
+ msleep(OPAL_BUSY_DELAY_MS);
}
if (rc < 0)
return 0;
@@ -289,7 +289,7 @@ void xive_native_free_irq(u32 irq)
s64 rc = opal_xive_free_irq(irq);
if (rc != OPAL_BUSY)
break;
- msleep(1);
+ msleep(OPAL_BUSY_DELAY_MS);
}
}
EXPORT_SYMBOL_GPL(xive_native_free_irq);
@@ -305,7 +305,7 @@ static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
for (;;) {
rc = opal_xive_free_irq(xc->hw_ipi);
if (rc == OPAL_BUSY) {
- msleep(1);
+ msleep(OPAL_BUSY_DELAY_MS);
continue;
}
xc->hw_ipi = 0;
@@ -395,12 +395,11 @@ static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
/* Enable the pool VP */
vp = xive_pool_vps + cpu;
- pr_debug("CPU %d setting up pool VP 0x%x\n", cpu, vp);
for (;;) {
rc = opal_xive_set_vp_info(vp, OPAL_XIVE_VP_ENABLED, 0);
if (rc != OPAL_BUSY)
break;
- msleep(1);
+ msleep(OPAL_BUSY_DELAY_MS);
}
if (rc) {
pr_err("Failed to enable pool VP on CPU %d\n", cpu);
@@ -415,16 +414,9 @@ static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
}
vp_cam = be64_to_cpu(vp_cam_be);
- pr_debug("VP CAM = %llx\n", vp_cam);
-
/* Push it on the CPU (set LSMFB to 0xff to skip backlog scan) */
- pr_debug("(Old HW value: %08x)\n",
- in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2));
out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD0, 0xff);
- out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2,
- TM_QW2W2_VP | vp_cam);
- pr_debug("(New HW value: %08x)\n",
- in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2));
+ out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2, TM_QW2W2_VP | vp_cam);
}
static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
@@ -444,7 +436,7 @@ static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
rc = opal_xive_set_vp_info(vp, 0, 0);
if (rc != OPAL_BUSY)
break;
- msleep(1);
+ msleep(OPAL_BUSY_DELAY_MS);
}
}
@@ -645,7 +637,7 @@ u32 xive_native_alloc_vp_block(u32 max_vcpus)
rc = opal_xive_alloc_vp_block(order);
switch (rc) {
case OPAL_BUSY:
- msleep(1);
+ msleep(OPAL_BUSY_DELAY_MS);
break;
case OPAL_XIVE_PROVISIONING:
if (!xive_native_provision_pages())
@@ -687,7 +679,7 @@ int xive_native_enable_vp(u32 vp_id, bool single_escalation)
rc = opal_xive_set_vp_info(vp_id, flags, 0);
if (rc != OPAL_BUSY)
break;
- msleep(1);
+ msleep(OPAL_BUSY_DELAY_MS);
}
return rc ? -EIO : 0;
}
@@ -701,7 +693,7 @@ int xive_native_disable_vp(u32 vp_id)
rc = opal_xive_set_vp_info(vp_id, 0, 0);
if (rc != OPAL_BUSY)
break;
- msleep(1);
+ msleep(OPAL_BUSY_DELAY_MS);
}
return rc ? -EIO : 0;
}
diff --git a/arch/powerpc/tools/checkpatch.sh b/arch/powerpc/tools/checkpatch.sh
new file mode 100755
index 000000000000..1fad3fb90e7c
--- /dev/null
+++ b/arch/powerpc/tools/checkpatch.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2018, Michael Ellerman, IBM Corporation.
+#
+# Wrapper around checkpatch that uses our preferred settings
+
+script_base=$(realpath $(dirname $0))
+
+exec $script_base/../../../scripts/checkpatch.pl \
+ --subjective \
+ --no-summary \
+ --max-line-length=90 \
+ --show-types \
+ --ignore ARCH_INCLUDE_LINUX \
+ --ignore BIT_MACRO \
+ --ignore COMPARISON_TO_NULL \
+ --ignore EMAIL_SUBJECT \
+ --ignore FILE_PATH_CHANGES \
+ --ignore GLOBAL_INITIALISERS \
+ --ignore LINE_SPACING \
+ --ignore MULTIPLE_ASSIGNMENTS \
+ $@
diff --git a/arch/powerpc/xmon/spr_access.S b/arch/powerpc/xmon/spr_access.S
index 4099cbcddaaa..720a52afdd58 100644
--- a/arch/powerpc/xmon/spr_access.S
+++ b/arch/powerpc/xmon/spr_access.S
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <asm/ppc_asm.h>
+#include <asm/asm-compat.h>
/* unsigned long xmon_mfspr(sprn, default_value) */
_GLOBAL(xmon_mfspr)
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 196978733e64..4264aedc7775 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -56,6 +56,7 @@
#include <asm/opal.h>
#include <asm/firmware.h>
#include <asm/code-patching.h>
+#include <asm/sections.h>
#ifdef CONFIG_PPC64
#include <asm/hvcall.h>
@@ -244,6 +245,7 @@ Commands:\n\
f flush cache\n\
la lookup symbol+offset of specified address\n\
ls lookup address of specified symbol\n\
+ lp s [#] lookup address of percpu symbol s for current cpu, or cpu #\n\
m examine/change memory\n\
mm move a block of memory\n\
ms set a block of memory\n\
@@ -918,13 +920,13 @@ static void remove_cpu_bpts(void)
static void
show_uptime(void)
{
- struct timespec uptime;
+ struct timespec64 uptime;
if (setjmp(bus_error_jmp) == 0) {
catch_memory_errors = 1;
sync();
- get_monotonic_boottime(&uptime);
+ ktime_get_coarse_boottime_ts64(&uptime);
printf("Uptime: %lu.%.2lu seconds\n", (unsigned long)uptime.tv_sec,
((unsigned long)uptime.tv_nsec / (NSEC_PER_SEC/100)));
@@ -2429,7 +2431,6 @@ static void dump_one_paca(int cpu)
DUMP(p, thread_idle_state, "%#-*x");
DUMP(p, thread_mask, "%#-*x");
DUMP(p, subcore_sibling_mask, "%#-*x");
- DUMP(p, thread_sibling_pacas, "%-*px");
DUMP(p, requested_psscr, "%#-*llx");
DUMP(p, stop_sprs.pid, "%#-*llx");
DUMP(p, stop_sprs.ldbar, "%#-*llx");
@@ -3353,7 +3354,8 @@ static void
symbol_lookup(void)
{
int type = inchar();
- unsigned long addr;
+ unsigned long addr, cpu;
+ void __percpu *ptr = NULL;
static char tmp[64];
switch (type) {
@@ -3377,6 +3379,34 @@ symbol_lookup(void)
catch_memory_errors = 0;
termch = 0;
break;
+ case 'p':
+ getstring(tmp, 64);
+ if (setjmp(bus_error_jmp) == 0) {
+ catch_memory_errors = 1;
+ sync();
+ ptr = (void __percpu *)kallsyms_lookup_name(tmp);
+ sync();
+ }
+
+ if (ptr &&
+ ptr >= (void __percpu *)__per_cpu_start &&
+ ptr < (void __percpu *)__per_cpu_end)
+ {
+ if (scanhex(&cpu) && cpu < num_possible_cpus()) {
+ addr = (unsigned long)per_cpu_ptr(ptr, cpu);
+ } else {
+ cpu = raw_smp_processor_id();
+ addr = (unsigned long)this_cpu_ptr(ptr);
+ }
+
+ printf("%s for cpu 0x%lx: %lx\n", tmp, cpu, addr);
+ } else {
+ printf("Percpu symbol '%s' not found.\n", tmp);
+ }
+
+ catch_memory_errors = 0;
+ termch = 0;
+ break;
}
}