diff options
Diffstat (limited to 'arch/powerpc/include')
48 files changed, 672 insertions, 141 deletions
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild index d8f9d2f18a23..6c0a955a1b06 100644 --- a/arch/powerpc/include/asm/Kbuild +++ b/arch/powerpc/include/asm/Kbuild @@ -3,4 +3,5 @@ generic-y += clkdev.h generic-y += rwsem.h generic-y += trace_clock.h generic-y += preempt.h -generic-y += vtime.h
\ No newline at end of file +generic-y += vtime.h +generic-y += hash.h diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index ae782254e731..f89da808ce31 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -45,11 +45,15 @@ # define SMPWMB eieio #endif +#define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory") + #define smp_mb() mb() -#define smp_rmb() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory") +#define smp_rmb() __lwsync() #define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") #define smp_read_barrier_depends() read_barrier_depends() #else +#define __lwsync() barrier() + #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() @@ -65,4 +69,19 @@ #define data_barrier(x) \ asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory"); +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + __lwsync(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + __lwsync(); \ + ___p1; \ +}) + #endif /* _ASM_POWERPC_BARRIER_H */ diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h index 910194e9a1e2..a5e9a7d494d8 100644 --- a/arch/powerpc/include/asm/bitops.h +++ b/arch/powerpc/include/asm/bitops.h @@ -46,6 +46,11 @@ #include <asm/asm-compat.h> #include <asm/synch.h> +/* PPC bit number conversion */ +#define PPC_BITLSHIFT(be) (BITS_PER_LONG - 1 - (be)) +#define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit)) +#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs)) + /* * clear_bit doesn't imply a memory barrier */ diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h index 9e495c9a6a88..ed0afc1e44a4 100644 --- a/arch/powerpc/include/asm/cache.h +++ b/arch/powerpc/include/asm/cache.h @@ -41,8 +41,20 @@ struct ppc64_caches { extern struct ppc64_caches ppc64_caches; #endif /* __powerpc64__ && ! __ASSEMBLY__ */ -#if !defined(__ASSEMBLY__) +#if defined(__ASSEMBLY__) +/* + * For a snooping icache, we still need a dummy icbi to purge all the + * prefetched instructions from the ifetch buffers. We also need a sync + * before the icbi to order the the actual stores to memory that might + * have modified instructions with the icbi. + */ +#define PURGE_PREFETCHED_INS \ + sync; \ + icbi 0,r3; \ + sync; \ + isync +#else #define __read_mostly __attribute__((__section__(".data..read_mostly"))) #ifdef CONFIG_6xx diff --git a/arch/powerpc/include/asm/clk_interface.h b/arch/powerpc/include/asm/clk_interface.h deleted file mode 100644 index ab1882c1e176..000000000000 --- a/arch/powerpc/include/asm/clk_interface.h +++ /dev/null @@ -1,20 +0,0 @@ -#ifndef __ASM_POWERPC_CLK_INTERFACE_H -#define __ASM_POWERPC_CLK_INTERFACE_H - -#include <linux/clk.h> - -struct clk_interface { - struct clk* (*clk_get) (struct device *dev, const char *id); - int (*clk_enable) (struct clk *clk); - void (*clk_disable) (struct clk *clk); - unsigned long (*clk_get_rate) (struct clk *clk); - void (*clk_put) (struct clk *clk); - long (*clk_round_rate) (struct clk *clk, unsigned long rate); - int (*clk_set_rate) (struct clk *clk, unsigned long rate); - int (*clk_set_parent) (struct clk *clk, struct clk *parent); - struct clk* (*clk_get_parent) (struct clk *clk); -}; - -extern struct clk_interface clk_functions; - -#endif /* __ASM_POWERPC_CLK_INTERFACE_H */ diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h index e245aab7f191..d463c68fe7f0 100644 --- a/arch/powerpc/include/asm/cmpxchg.h +++ b/arch/powerpc/include/asm/cmpxchg.h @@ -300,6 +300,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ cmpxchg_local((ptr), (o), (n)); \ }) +#define cmpxchg64_relaxed cmpxchg64_local #else #include <asm-generic/cmpxchg-local.h> #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h index a6f8c7a5cbb7..97e02f985df8 100644 --- a/arch/powerpc/include/asm/code-patching.h +++ b/arch/powerpc/include/asm/code-patching.h @@ -34,6 +34,13 @@ int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr); unsigned long branch_target(const unsigned int *instr); unsigned int translate_branch(const unsigned int *dest, const unsigned int *src); +#ifdef CONFIG_PPC_BOOK3E_64 +void __patch_exception(int exc, unsigned long addr); +#define patch_exception(exc, name) do { \ + extern unsigned int name; \ + __patch_exception((exc), (unsigned long)&name); \ +} while (0) +#endif static inline unsigned long ppc_function_entry(void *func) { diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 0d4939ba48e7..617cc767c076 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -90,6 +90,18 @@ struct cpu_spec { * if the error is fatal, 1 if it was fully recovered and 0 to * pass up (not CPU originated) */ int (*machine_check)(struct pt_regs *regs); + + /* + * Processor specific early machine check handler which is + * called in real mode to handle SLB and TLB errors. + */ + long (*machine_check_early)(struct pt_regs *regs); + + /* + * Processor specific routine to flush tlbs. + */ + void (*flush_tlb)(unsigned long inval_selector); + }; extern struct cpu_spec *cur_cpu_spec; diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index d3e5e9bc8f94..9e39ceb1d19f 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h @@ -90,7 +90,8 @@ struct eeh_pe { #define EEH_DEV_IRQ_DISABLED (1 << 3) /* Interrupt disabled */ #define EEH_DEV_DISCONNECTED (1 << 4) /* Removing from PE */ -#define EEH_DEV_SYSFS (1 << 8) /* Sysfs created */ +#define EEH_DEV_NO_HANDLER (1 << 8) /* No error handler */ +#define EEH_DEV_SYSFS (1 << 9) /* Sysfs created */ struct eeh_dev { int mode; /* EEH mode */ @@ -117,6 +118,16 @@ static inline struct pci_dev *eeh_dev_to_pci_dev(struct eeh_dev *edev) return edev ? edev->pdev : NULL; } +/* Return values from eeh_ops::next_error */ +enum { + EEH_NEXT_ERR_NONE = 0, + EEH_NEXT_ERR_INF, + EEH_NEXT_ERR_FROZEN_PE, + EEH_NEXT_ERR_FENCED_PHB, + EEH_NEXT_ERR_DEAD_PHB, + EEH_NEXT_ERR_DEAD_IOC +}; + /* * The struct is used to trace the registered EEH operation * callback functions. Actually, those operation callback @@ -157,6 +168,7 @@ struct eeh_ops { int (*read_config)(struct device_node *dn, int where, int size, u32 *val); int (*write_config)(struct device_node *dn, int where, int size, u32 val); int (*next_error)(struct eeh_pe **pe); + int (*restore_config)(struct device_node *dn); }; extern struct eeh_ops *eeh_ops; diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 894662a5d4d5..66830618cc19 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -284,7 +284,7 @@ do_kvm_##n: \ subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \ beq- 1f; \ ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ -1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \ +1: cmpdi cr1,r1,-INT_FRAME_SIZE; /* check if r1 is in userspace */ \ blt+ cr1,3f; /* abort if it is */ \ li r1,(n); /* will be reloaded later */ \ sth r1,PACA_TRAP_SAVE(r13); \ @@ -301,9 +301,12 @@ do_kvm_##n: \ beq 4f; /* if from kernel mode */ \ ACCOUNT_CPU_USER_ENTRY(r9, r10); \ SAVE_PPR(area, r9, r10); \ -4: std r2,GPR2(r1); /* save r2 in stackframe */ \ - SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ - SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ +4: EXCEPTION_PROLOG_COMMON_2(area) \ + EXCEPTION_PROLOG_COMMON_3(n) \ + ACCOUNT_STOLEN_TIME + +/* Save original regs values from save area to stack frame. */ +#define EXCEPTION_PROLOG_COMMON_2(area) \ ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \ ld r10,area+EX_R10(r13); \ std r9,GPR9(r1); \ @@ -318,11 +321,16 @@ do_kvm_##n: \ ld r10,area+EX_CFAR(r13); \ std r10,ORIG_GPR3(r1); \ END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \ + GET_CTR(r10, area); \ + std r10,_CTR(r1); + +#define EXCEPTION_PROLOG_COMMON_3(n) \ + std r2,GPR2(r1); /* save r2 in stackframe */ \ + SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ + SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ mflr r9; /* Get LR, later save to stack */ \ ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ std r9,_LINK(r1); \ - GET_CTR(r10, area); \ - std r10,_CTR(r1); \ lbz r10,PACASOFTIRQEN(r13); \ mfspr r11,SPRN_XER; /* save XER in stackframe */ \ std r10,SOFTE(r1); \ @@ -332,8 +340,7 @@ do_kvm_##n: \ li r10,0; \ ld r11,exception_marker@toc(r2); \ std r10,RESULT(r1); /* clear regs->result */ \ - std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ \ - ACCOUNT_STOLEN_TIME + std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ /* * Exception vectors. diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h index 5c2c0233175e..90f604bbcd19 100644 --- a/arch/powerpc/include/asm/fixmap.h +++ b/arch/powerpc/include/asm/fixmap.h @@ -58,52 +58,12 @@ enum fixed_addresses { extern void __set_fixmap (enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags); -#define set_fixmap(idx, phys) \ - __set_fixmap(idx, phys, PAGE_KERNEL) -/* - * Some hardware wants to get fixmapped without caching. - */ -#define set_fixmap_nocache(idx, phys) \ - __set_fixmap(idx, phys, PAGE_KERNEL_NCG) - -#define clear_fixmap(idx) \ - __set_fixmap(idx, 0, __pgprot(0)) - #define __FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) #define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE) -#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) -#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) - -extern void __this_fixmap_does_not_exist(void); - -/* - * 'index to address' translation. If anyone tries to use the idx - * directly without tranlation, we catch the bug with a NULL-deference - * kernel oops. Illegal ranges of incoming indices are caught too. - */ -static __always_inline unsigned long fix_to_virt(const unsigned int idx) -{ - /* - * this branch gets completely eliminated after inlining, - * except when someone tries to use fixaddr indices in an - * illegal way. (such as mixing up address types or using - * out-of-range indices). - * - * If it doesn't get removed, the linker will complain - * loudly with a reasonably clear error message.. - */ - if (idx >= __end_of_fixed_addresses) - __this_fixmap_does_not_exist(); - - return __fix_to_virt(idx); -} +#define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NCG -static inline unsigned long virt_to_fix(const unsigned long vaddr) -{ - BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); - return __virt_to_fix(vaddr); -} +#include <asm-generic/fixmap.h> #endif /* !__ASSEMBLY__ */ #endif diff --git a/arch/powerpc/include/asm/fsl_lbc.h b/arch/powerpc/include/asm/fsl_lbc.h index 420b45368fcf..067fb0dca549 100644 --- a/arch/powerpc/include/asm/fsl_lbc.h +++ b/arch/powerpc/include/asm/fsl_lbc.h @@ -285,7 +285,7 @@ struct fsl_lbc_ctrl { /* device info */ struct device *dev; struct fsl_lbc_regs __iomem *regs; - int irq; + int irq[2]; wait_queue_head_t irq_wait; spinlock_t lock; void *nand; diff --git a/arch/powerpc/include/asm/hardirq.h b/arch/powerpc/include/asm/hardirq.h index 3bdcfce2c42a..418fb654370d 100644 --- a/arch/powerpc/include/asm/hardirq.h +++ b/arch/powerpc/include/asm/hardirq.h @@ -6,7 +6,8 @@ typedef struct { unsigned int __softirq_pending; - unsigned int timer_irqs; + unsigned int timer_irqs_event; + unsigned int timer_irqs_others; unsigned int pmu_irqs; unsigned int mce_exceptions; unsigned int spurious_irqs; diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index 575fbf81fad0..97d3869991ca 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -191,8 +191,24 @@ DEF_MMIO_OUT_D(out_le32, 32, stw); #endif /* __BIG_ENDIAN */ +/* + * Cache inhibitied accessors for use in real mode, you don't want to use these + * unless you know what you're doing. + * + * NB. These use the cpu byte ordering. + */ +DEF_MMIO_OUT_X(out_rm8, 8, stbcix); +DEF_MMIO_OUT_X(out_rm16, 16, sthcix); +DEF_MMIO_OUT_X(out_rm32, 32, stwcix); +DEF_MMIO_IN_X(in_rm8, 8, lbzcix); +DEF_MMIO_IN_X(in_rm16, 16, lhzcix); +DEF_MMIO_IN_X(in_rm32, 32, lwzcix); + #ifdef __powerpc64__ +DEF_MMIO_OUT_X(out_rm64, 64, stdcix); +DEF_MMIO_IN_X(in_rm64, 64, ldcix); + #ifdef __BIG_ENDIAN__ DEF_MMIO_OUT_D(out_be64, 64, std); DEF_MMIO_IN_D(in_be64, 64, ld); diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index c34656a8925e..f7a8036579b5 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -30,22 +30,19 @@ #include <asm/machdep.h> #include <asm/types.h> -#define IOMMU_PAGE_SHIFT 12 -#define IOMMU_PAGE_SIZE (ASM_CONST(1) << IOMMU_PAGE_SHIFT) -#define IOMMU_PAGE_MASK (~((1 << IOMMU_PAGE_SHIFT) - 1)) -#define IOMMU_PAGE_ALIGN(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE) +#define IOMMU_PAGE_SHIFT_4K 12 +#define IOMMU_PAGE_SIZE_4K (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K) +#define IOMMU_PAGE_MASK_4K (~((1 << IOMMU_PAGE_SHIFT_4K) - 1)) +#define IOMMU_PAGE_ALIGN_4K(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE_4K) + +#define IOMMU_PAGE_SIZE(tblptr) (ASM_CONST(1) << (tblptr)->it_page_shift) +#define IOMMU_PAGE_MASK(tblptr) (~((1 << (tblptr)->it_page_shift) - 1)) +#define IOMMU_PAGE_ALIGN(addr, tblptr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE(tblptr)) /* Boot time flags */ extern int iommu_is_off; extern int iommu_force_on; -/* Pure 2^n version of get_order */ -static __inline__ __attribute_const__ int get_iommu_order(unsigned long size) -{ - return __ilog2((size - 1) >> IOMMU_PAGE_SHIFT) + 1; -} - - /* * IOMAP_MAX_ORDER defines the largest contiguous block * of dma space we can get. IOMAP_MAX_ORDER = 13 @@ -76,11 +73,20 @@ struct iommu_table { struct iommu_pool large_pool; struct iommu_pool pools[IOMMU_NR_POOLS]; unsigned long *it_map; /* A simple allocation bitmap for now */ + unsigned long it_page_shift;/* table iommu page size */ #ifdef CONFIG_IOMMU_API struct iommu_group *it_group; #endif }; +/* Pure 2^n version of get_order */ +static inline __attribute_const__ +int get_iommu_order(unsigned long size, struct iommu_table *tbl) +{ + return __ilog2((size - 1) >> tbl->it_page_shift) + 1; +} + + struct scatterlist; static inline void set_iommu_table_base(struct device *dev, void *base) @@ -101,8 +107,34 @@ extern void iommu_free_table(struct iommu_table *tbl, const char *node_name); */ extern struct iommu_table *iommu_init_table(struct iommu_table * tbl, int nid); +#ifdef CONFIG_IOMMU_API extern void iommu_register_group(struct iommu_table *tbl, int pci_domain_number, unsigned long pe_num); +extern int iommu_add_device(struct device *dev); +extern void iommu_del_device(struct device *dev); +#else +static inline void iommu_register_group(struct iommu_table *tbl, + int pci_domain_number, + unsigned long pe_num) +{ +} + +static inline int iommu_add_device(struct device *dev) +{ + return 0; +} + +static inline void iommu_del_device(struct device *dev) +{ +} +#endif /* !CONFIG_IOMMU_API */ + +static inline void set_iommu_table_base_and_group(struct device *dev, + void *base) +{ + set_iommu_table_base(dev, base); + iommu_add_device(dev); +} extern int iommu_map_sg(struct device *dev, struct iommu_table *tbl, struct scatterlist *sglist, int nelems, diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h index 1bd92fd43cfb..1503d8c7c41b 100644 --- a/arch/powerpc/include/asm/kvm_asm.h +++ b/arch/powerpc/include/asm/kvm_asm.h @@ -74,6 +74,7 @@ #define BOOKE_INTERRUPT_GUEST_DBELL_CRIT 39 #define BOOKE_INTERRUPT_HV_SYSCALL 40 #define BOOKE_INTERRUPT_HV_PRIV 41 +#define BOOKE_INTERRUPT_LRAT_ERROR 42 /* book3s */ diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 4a594b76674d..bc23b1ba7980 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -192,6 +192,10 @@ extern void kvmppc_load_up_vsx(void); extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst); extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst); extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); +extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, + struct kvm_vcpu *vcpu); +extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, + struct kvmppc_book3s_shadow_vcpu *svcpu); static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) { diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index 0bd9348a4db9..192917d2239c 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h @@ -79,6 +79,7 @@ struct kvmppc_host_state { ulong vmhandler; ulong scratch0; ulong scratch1; + ulong scratch2; u8 in_guest; u8 restore_hid5; u8 napping; @@ -106,6 +107,7 @@ struct kvmppc_host_state { }; struct kvmppc_book3s_shadow_vcpu { + bool in_use; ulong gpr[14]; u32 cr; u32 xer; diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h index 844c28de7ec0..d0a2a2f99564 100644 --- a/arch/powerpc/include/asm/lppaca.h +++ b/arch/powerpc/include/asm/lppaca.h @@ -132,8 +132,6 @@ struct slb_shadow { } save_area[SLB_NUM_BOLTED]; } ____cacheline_aligned; -extern struct slb_shadow slb_shadow[]; - /* * Layout of entries in the hypervisor's dispatch trace log buffer. */ diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h new file mode 100644 index 000000000000..8e99edf6d966 --- /dev/null +++ b/arch/powerpc/include/asm/mce.h @@ -0,0 +1,197 @@ +/* + * Machine check exception header file. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright 2013 IBM Corporation + * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com> + */ + +#ifndef __ASM_PPC64_MCE_H__ +#define __ASM_PPC64_MCE_H__ + +#include <linux/bitops.h> + +/* + * Machine Check bits on power7 and power8 + */ +#define P7_SRR1_MC_LOADSTORE(srr1) ((srr1) & PPC_BIT(42)) /* P8 too */ + +/* SRR1 bits for machine check (On Power7 and Power8) */ +#define P7_SRR1_MC_IFETCH(srr1) ((srr1) & PPC_BITMASK(43, 45)) /* P8 too */ + +#define P7_SRR1_MC_IFETCH_UE (0x1 << PPC_BITLSHIFT(45)) /* P8 too */ +#define P7_SRR1_MC_IFETCH_SLB_PARITY (0x2 << PPC_BITLSHIFT(45)) /* P8 too */ +#define P7_SRR1_MC_IFETCH_SLB_MULTIHIT (0x3 << PPC_BITLSHIFT(45)) /* P8 too */ +#define P7_SRR1_MC_IFETCH_SLB_BOTH (0x4 << PPC_BITLSHIFT(45)) +#define P7_SRR1_MC_IFETCH_TLB_MULTIHIT (0x5 << PPC_BITLSHIFT(45)) /* P8 too */ +#define P7_SRR1_MC_IFETCH_UE_TLB_RELOAD (0x6 << PPC_BITLSHIFT(45)) /* P8 too */ +#define P7_SRR1_MC_IFETCH_UE_IFU_INTERNAL (0x7 << PPC_BITLSHIFT(45)) + +/* SRR1 bits for machine check (On Power8) */ +#define P8_SRR1_MC_IFETCH_ERAT_MULTIHIT (0x4 << PPC_BITLSHIFT(45)) + +/* DSISR bits for machine check (On Power7 and Power8) */ +#define P7_DSISR_MC_UE (PPC_BIT(48)) /* P8 too */ +#define P7_DSISR_MC_UE_TABLEWALK (PPC_BIT(49)) /* P8 too */ +#define P7_DSISR_MC_ERAT_MULTIHIT (PPC_BIT(52)) /* P8 too */ +#define P7_DSISR_MC_TLB_MULTIHIT_MFTLB (PPC_BIT(53)) /* P8 too */ +#define P7_DSISR_MC_SLB_PARITY_MFSLB (PPC_BIT(55)) /* P8 too */ +#define P7_DSISR_MC_SLB_MULTIHIT (PPC_BIT(56)) /* P8 too */ +#define P7_DSISR_MC_SLB_MULTIHIT_PARITY (PPC_BIT(57)) /* P8 too */ + +/* + * DSISR bits for machine check (Power8) in addition to above. + * Secondary DERAT Multihit + */ +#define P8_DSISR_MC_ERAT_MULTIHIT_SEC (PPC_BIT(54)) + +/* SLB error bits */ +#define P7_DSISR_MC_SLB_ERRORS (P7_DSISR_MC_ERAT_MULTIHIT | \ + P7_DSISR_MC_SLB_PARITY_MFSLB | \ + P7_DSISR_MC_SLB_MULTIHIT | \ + P7_DSISR_MC_SLB_MULTIHIT_PARITY) + +#define P8_DSISR_MC_SLB_ERRORS (P7_DSISR_MC_SLB_ERRORS | \ + P8_DSISR_MC_ERAT_MULTIHIT_SEC) +enum MCE_Version { + MCE_V1 = 1, +}; + +enum MCE_Severity { + MCE_SEV_NO_ERROR = 0, + MCE_SEV_WARNING = 1, + MCE_SEV_ERROR_SYNC = 2, + MCE_SEV_FATAL = 3, +}; + +enum MCE_Disposition { + MCE_DISPOSITION_RECOVERED = 0, + MCE_DISPOSITION_NOT_RECOVERED = 1, +}; + +enum MCE_Initiator { + MCE_INITIATOR_UNKNOWN = 0, + MCE_INITIATOR_CPU = 1, +}; + +enum MCE_ErrorType { + MCE_ERROR_TYPE_UNKNOWN = 0, + MCE_ERROR_TYPE_UE = 1, + MCE_ERROR_TYPE_SLB = 2, + MCE_ERROR_TYPE_ERAT = 3, + MCE_ERROR_TYPE_TLB = 4, +}; + +enum MCE_UeErrorType { + MCE_UE_ERROR_INDETERMINATE = 0, + MCE_UE_ERROR_IFETCH = 1, + MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH = 2, + MCE_UE_ERROR_LOAD_STORE = 3, + MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 4, +}; + +enum MCE_SlbErrorType { + MCE_SLB_ERROR_INDETERMINATE = 0, + MCE_SLB_ERROR_PARITY = 1, + MCE_SLB_ERROR_MULTIHIT = 2, +}; + +enum MCE_EratErrorType { + MCE_ERAT_ERROR_INDETERMINATE = 0, + MCE_ERAT_ERROR_PARITY = 1, + MCE_ERAT_ERROR_MULTIHIT = 2, +}; + +enum MCE_TlbErrorType { + MCE_TLB_ERROR_INDETERMINATE = 0, + MCE_TLB_ERROR_PARITY = 1, + MCE_TLB_ERROR_MULTIHIT = 2, +}; + +struct machine_check_event { + enum MCE_Version version:8; /* 0x00 */ + uint8_t in_use; /* 0x01 */ + enum MCE_Severity severity:8; /* 0x02 */ + enum MCE_Initiator initiator:8; /* 0x03 */ + enum MCE_ErrorType error_type:8; /* 0x04 */ + enum MCE_Disposition disposition:8; /* 0x05 */ + uint8_t reserved_1[2]; /* 0x06 */ + uint64_t gpr3; /* 0x08 */ + uint64_t srr0; /* 0x10 */ + uint64_t srr1; /* 0x18 */ + union { /* 0x20 */ + struct { + enum MCE_UeErrorType ue_error_type:8; + uint8_t effective_address_provided; + uint8_t physical_address_provided; + uint8_t reserved_1[5]; + uint64_t effective_address; + uint64_t physical_address; + uint8_t reserved_2[8]; + } ue_error; + + struct { + enum MCE_SlbErrorType slb_error_type:8; + uint8_t effective_address_provided; + uint8_t reserved_1[6]; + uint64_t effective_address; + uint8_t reserved_2[16]; + } slb_error; + + struct { + enum MCE_EratErrorType erat_error_type:8; + uint8_t effective_address_provided; + uint8_t reserved_1[6]; + uint64_t effective_address; + uint8_t reserved_2[16]; + } erat_error; + + struct { + enum MCE_TlbErrorType tlb_error_type:8; + uint8_t effective_address_provided; + uint8_t reserved_1[6]; + uint64_t effective_address; + uint8_t reserved_2[16]; + } tlb_error; + } u; +}; + +struct mce_error_info { + enum MCE_ErrorType error_type:8; + union { + enum MCE_UeErrorType ue_error_type:8; + enum MCE_SlbErrorType slb_error_type:8; + enum MCE_EratErrorType erat_error_type:8; + enum MCE_TlbErrorType tlb_error_type:8; + } u; + uint8_t reserved[2]; +}; + +#define MAX_MC_EVT 100 + +/* Release flags for get_mce_event() */ +#define MCE_EVENT_RELEASE true +#define MCE_EVENT_DONTRELEASE false + +extern void save_mce_event(struct pt_regs *regs, long handled, + struct mce_error_info *mce_err, uint64_t addr); +extern int get_mce_event(struct machine_check_event *mce, bool release); +extern void release_mce_event(void); +extern void machine_check_queue_event(void); +extern void machine_check_print_event_info(struct machine_check_event *evt); +extern uint64_t get_mce_fault_addr(struct machine_check_event *evt); + +#endif /* __ASM_PPC64_MCE_H__ */ diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h index 936db360790a..89b785d16846 100644 --- a/arch/powerpc/include/asm/mmu-book3e.h +++ b/arch/powerpc/include/asm/mmu-book3e.h @@ -286,8 +286,21 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) extern int mmu_linear_psize; extern int mmu_vmemmap_psize; +struct tlb_core_data { + /* For software way selection, as on Freescale TLB1 */ + u8 esel_next, esel_max, esel_first; + + /* Per-core spinlock for e6500 TLB handlers (no tlbsrx.) */ + u8 lock; +}; + #ifdef CONFIG_PPC64 extern unsigned long linear_map_top; +extern int book3e_htw_mode; + +#define PPC_HTW_NONE 0 +#define PPC_HTW_IBM 1 +#define PPC_HTW_E6500 2 /* * 64-bit booke platforms don't load the tlb in the tlb miss handler code. diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 691fd8aca939..f8d1d6dcf7db 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -180,16 +180,17 @@ static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr) #define MMU_PAGE_64K_AP 3 /* "Admixed pages" (hash64 only) */ #define MMU_PAGE_256K 4 #define MMU_PAGE_1M 5 -#define MMU_PAGE_4M 6 -#define MMU_PAGE_8M 7 -#define MMU_PAGE_16M 8 -#define MMU_PAGE_64M 9 -#define MMU_PAGE_256M 10 -#define MMU_PAGE_1G 11 -#define MMU_PAGE_16G 12 -#define MMU_PAGE_64G 13 - -#define MMU_PAGE_COUNT 14 +#define MMU_PAGE_2M 6 +#define MMU_PAGE_4M 7 +#define MMU_PAGE_8M 8 +#define MMU_PAGE_16M 9 +#define MMU_PAGE_64M 10 +#define MMU_PAGE_256M 11 +#define MMU_PAGE_1G 12 +#define MMU_PAGE_16G 13 +#define MMU_PAGE_64G 14 + +#define MMU_PAGE_COUNT 15 #if defined(CONFIG_PPC_STD_MMU_64) /* 64-bit classic hash table MMU */ diff --git a/arch/powerpc/include/asm/mpc5121.h b/arch/powerpc/include/asm/mpc5121.h index 887d3d6133e3..4a69cd1d5041 100644 --- a/arch/powerpc/include/asm/mpc5121.h +++ b/arch/powerpc/include/asm/mpc5121.h @@ -37,7 +37,12 @@ struct mpc512x_ccm { u32 cccr; /* CFM Clock Control Register */ u32 dccr; /* DIU Clock Control Register */ u32 mscan_ccr[4]; /* MSCAN Clock Control Registers */ - u8 res[0x98]; /* Reserved */ + u32 out_ccr[4]; /* OUT CLK Configure Registers */ + u32 rsv0[2]; /* Reserved */ + u32 scfr3; /* System Clock Frequency Register 3 */ + u32 rsv1[3]; /* Reserved */ + u32 spll_lock_cnt; /* System PLL Lock Counter */ + u8 res[0x6c]; /* Reserved */ }; /* diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index 033c06be1d84..40157e2ca691 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h @@ -33,6 +33,28 @@ struct opal_takeover_args { u64 rd_loc; /* r11 */ }; +/* + * SG entry + * + * WARNING: The current implementation requires each entry + * to represent a block that is 4k aligned *and* each block + * size except the last one in the list to be as well. + */ +struct opal_sg_entry { + void *data; + long length; +}; + +/* sg list */ +struct opal_sg_list { + unsigned long num_entries; + struct opal_sg_list *next; + struct opal_sg_entry entry[]; +}; + +/* We calculate number of sg entries based on PAGE_SIZE */ +#define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry)) + extern long opal_query_takeover(u64 *hal_size, u64 *hal_align); extern long opal_do_takeover(struct opal_takeover_args *args); @@ -132,6 +154,9 @@ extern int opal_enter_rtas(struct rtas_args *args, #define OPAL_FLASH_VALIDATE 76 #define OPAL_FLASH_MANAGE 77 #define OPAL_FLASH_UPDATE 78 +#define OPAL_GET_MSG 85 +#define OPAL_CHECK_ASYNC_COMPLETION 86 +#define OPAL_SYNC_HOST_REBOOT 87 #ifndef __ASSEMBLY__ @@ -211,7 +236,16 @@ enum OpalPendingState { OPAL_EVENT_ERROR_LOG = 0x40, OPAL_EVENT_EPOW = 0x80, OPAL_EVENT_LED_STATUS = 0x100, - OPAL_EVENT_PCI_ERROR = 0x200 + OPAL_EVENT_PCI_ERROR = 0x200, + OPAL_EVENT_MSG_PENDING = 0x800, +}; + +enum OpalMessageType { + OPAL_MSG_ASYNC_COMP = 0, + OPAL_MSG_MEM_ERR, + OPAL_MSG_EPOW, + OPAL_MSG_SHUTDOWN, + OPAL_MSG_TYPE_MAX, }; /* Machine check related definitions */ @@ -311,12 +345,16 @@ enum OpalMveEnableAction { OPAL_ENABLE_MVE = 1 }; -enum OpalPciResetAndReinitScope { +enum OpalPciResetScope { OPAL_PHB_COMPLETE = 1, OPAL_PCI_LINK = 2, OPAL_PHB_ERROR = 3, OPAL_PCI_HOT_RESET = 4, OPAL_PCI_FUNDAMENTAL_RESET = 5, OPAL_PCI_IODA_TABLE_RESET = 6, }; +enum OpalPciReinitScope { + OPAL_REINIT_PCI_DEV = 1000 +}; + enum OpalPciResetState { OPAL_DEASSERT_RESET = 0, OPAL_ASSERT_RESET = 1 @@ -356,6 +394,12 @@ enum OpalLPCAddressType { OPAL_LPC_FW = 2, }; +struct opal_msg { + uint32_t msg_type; + uint32_t reserved; + uint64_t params[8]; +}; + struct opal_machine_check_event { enum OpalMCE_Version version:8; /* 0x00 */ uint8_t in_use; /* 0x01 */ @@ -404,6 +448,58 @@ struct opal_machine_check_event { } u; }; +/* FSP memory errors handling */ +enum OpalMemErr_Version { + OpalMemErr_V1 = 1, +}; + +enum OpalMemErrType { + OPAL_MEM_ERR_TYPE_RESILIENCE = 0, + OPAL_MEM_ERR_TYPE_DYN_DALLOC, + OPAL_MEM_ERR_TYPE_SCRUB, +}; + +/* Memory Reilience error type */ +enum OpalMemErr_ResilErrType { + OPAL_MEM_RESILIENCE_CE = 0, + OPAL_MEM_RESILIENCE_UE, + OPAL_MEM_RESILIENCE_UE_SCRUB, +}; + +/* Dynamic Memory Deallocation type */ +enum OpalMemErr_DynErrType { + OPAL_MEM_DYNAMIC_DEALLOC = 0, +}; + +/* OpalMemoryErrorData->flags */ +#define OPAL_MEM_CORRECTED_ERROR 0x0001 +#define OPAL_MEM_THRESHOLD_EXCEEDED 0x0002 +#define OPAL_MEM_ACK_REQUIRED 0x8000 + +struct OpalMemoryErrorData { + enum OpalMemErr_Version version:8; /* 0x00 */ + enum OpalMemErrType type:8; /* 0x01 */ + uint16_t flags; /* 0x02 */ + uint8_t reserved_1[4]; /* 0x04 */ + + union { + /* Memory Resilience corrected/uncorrected error info */ + struct { + enum OpalMemErr_ResilErrType resil_err_type:8; + uint8_t reserved_1[7]; + uint64_t physical_address_start; + uint64_t physical_address_end; + } resilience; + /* Dynamic memory deallocation error info */ + struct { + enum OpalMemErr_DynErrType dyn_err_type:8; + uint8_t reserved_1[7]; + uint64_t physical_address_start; + uint64_t physical_address_end; + } dyn_dealloc; + } u; +}; + enum { OPAL_P7IOC_DIAG_TYPE_NONE = 0, OPAL_P7IOC_DIAG_TYPE_RGC = 1, @@ -710,7 +806,7 @@ int64_t opal_pci_get_phb_diag_data(uint64_t phb_id, void *diag_buffer, int64_t opal_pci_get_phb_diag_data2(uint64_t phb_id, void *diag_buffer, uint64_t diag_buffer_len); int64_t opal_pci_fence_phb(uint64_t phb_id); -int64_t opal_pci_reinit(uint64_t phb_id, uint8_t reinit_scope); +int64_t opal_pci_reinit(uint64_t phb_id, uint64_t reinit_scope, uint64_t data); int64_t opal_pci_mask_pe_error(uint64_t phb_id, uint16_t pe_number, uint8_t error_type, uint8_t mask_action); int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action); int64_t opal_get_epow_status(__be64 *status); @@ -720,17 +816,21 @@ int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe, int64_t opal_pci_poll(uint64_t phb_id); int64_t opal_return_cpu(void); -int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, uint64_t *val); +int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, __be64 *val); int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val); int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type, uint32_t addr, uint32_t data, uint32_t sz); int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type, - uint32_t addr, uint32_t *data, uint32_t sz); + uint32_t addr, __be32 *data, uint32_t sz); int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result); int64_t opal_manage_flash(uint8_t op); int64_t opal_update_flash(uint64_t blk_list); +int64_t opal_get_msg(uint64_t buffer, size_t size); +int64_t opal_check_completion(uint64_t buffer, size_t size, uint64_t token); +int64_t opal_sync_host_reboot(void); + /* Internal functions */ extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data); @@ -744,6 +844,8 @@ extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data); extern int opal_notifier_register(struct notifier_block *nb); +extern int opal_message_notifier_register(enum OpalMessageType msg_type, + struct notifier_block *nb); extern void opal_notifier_enable(void); extern void opal_notifier_disable(void); extern void opal_notifier_update_evt(uint64_t evt_mask, uint64_t evt_val); diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index b6ea9e068c13..9c5dbc3833fb 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -16,7 +16,6 @@ #ifdef CONFIG_PPC64 -#include <linux/init.h> #include <asm/types.h> #include <asm/lppaca.h> #include <asm/mmu.h> @@ -113,6 +112,10 @@ struct paca_struct { /* Keep pgd in the same cacheline as the start of extlb */ pgd_t *pgd __attribute__((aligned(0x80))); /* Current PGD */ pgd_t *kernel_pgd; /* Kernel PGD */ + + /* Shared by all threads of a core -- points to tcd of first thread */ + struct tlb_core_data *tcd_ptr; + /* We can have up to 3 levels of reentrancy in the TLB miss handler */ u64 extlb[3][EX_TLB_SIZE / sizeof(u64)]; u64 exmc[8]; /* used for machine checks */ @@ -123,6 +126,8 @@ struct paca_struct { void *mc_kstack; void *crit_kstack; void *dbg_kstack; + + struct tlb_core_data tcd; #endif /* CONFIG_PPC_BOOK3E */ mm_context_t context; @@ -152,6 +157,15 @@ struct paca_struct { */ struct opal_machine_check_event *opal_mc_evt; #endif +#ifdef CONFIG_PPC_BOOK3S_64 + /* Exclusive emergency stack pointer for machine check exception. */ + void *mc_emergency_sp; + /* + * Flag to check whether we are in machine check early handler + * and already using emergency stack. + */ + u16 in_mce; +#endif /* Stuff for accurate time accounting */ u64 user_time; /* accumulated usermode TB ticks */ diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h index 27b2386f738a..842846c1b711 100644 --- a/arch/powerpc/include/asm/pgalloc-32.h +++ b/arch/powerpc/include/asm/pgalloc-32.h @@ -84,10 +84,8 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb, static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, unsigned long address) { - struct page *page = page_address(table); - tlb_flush_pgtable(tlb, address); - pgtable_page_dtor(page); - pgtable_free_tlb(tlb, page, 0); + pgtable_page_dtor(table); + pgtable_free_tlb(tlb, page_address(table), 0); } #endif /* _ASM_POWERPC_PGALLOC_32_H */ diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h index 694012877bf7..4b0be20fcbfd 100644 --- a/arch/powerpc/include/asm/pgalloc-64.h +++ b/arch/powerpc/include/asm/pgalloc-64.h @@ -148,11 +148,9 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb, static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, unsigned long address) { - struct page *page = page_address(table); - tlb_flush_pgtable(tlb, address); - pgtable_page_dtor(page); - pgtable_free_tlb(tlb, page, 0); + pgtable_page_dtor(table); + pgtable_free_tlb(tlb, page_address(table), 0); } #else /* if CONFIG_PPC_64K_PAGES */ diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index 4a191c472867..bc141c950b1e 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h @@ -558,5 +558,19 @@ extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); #define __HAVE_ARCH_PMDP_INVALIDATE extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp); + +#define pmd_move_must_withdraw pmd_move_must_withdraw +struct spinlock; +static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, + struct spinlock *old_pmd_ptl) +{ + /* + * Archs like ppc64 use pgtable to store per pmd + * specific information. So when we switch the pmd, + * we should also withdraw and deposit the pgtable + */ + return true; +} + #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_PGTABLE_PPC64_H_ */ diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 7d6eacf249cf..b999ca318985 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -3,6 +3,7 @@ #ifdef __KERNEL__ #ifndef __ASSEMBLY__ +#include <linux/mmdebug.h> #include <asm/processor.h> /* For TASK_SIZE */ #include <asm/mmu.h> #include <asm/page.h> @@ -33,10 +34,73 @@ static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } -static inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_PRESENT; } static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } +#ifdef CONFIG_NUMA_BALANCING + +static inline int pte_present(pte_t pte) +{ + return pte_val(pte) & (_PAGE_PRESENT | _PAGE_NUMA); +} + +#define pte_numa pte_numa +static inline int pte_numa(pte_t pte) +{ + return (pte_val(pte) & + (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA; +} + +#define pte_mknonnuma pte_mknonnuma +static inline pte_t pte_mknonnuma(pte_t pte) +{ + pte_val(pte) &= ~_PAGE_NUMA; + pte_val(pte) |= _PAGE_PRESENT | _PAGE_ACCESSED; + return pte; +} + +#define pte_mknuma pte_mknuma +static inline pte_t pte_mknuma(pte_t pte) +{ + /* + * We should not set _PAGE_NUMA on non present ptes. Also clear the + * present bit so that hash_page will return 1 and we collect this + * as numa fault. + */ + if (pte_present(pte)) { + pte_val(pte) |= _PAGE_NUMA; + pte_val(pte) &= ~_PAGE_PRESENT; + } else + VM_BUG_ON(1); + return pte; +} + +#define pmd_numa pmd_numa +static inline int pmd_numa(pmd_t pmd) +{ + return pte_numa(pmd_pte(pmd)); +} + +#define pmd_mknonnuma pmd_mknonnuma +static inline pmd_t pmd_mknonnuma(pmd_t pmd) +{ + return pte_pmd(pte_mknonnuma(pmd_pte(pmd))); +} + +#define pmd_mknuma pmd_mknuma +static inline pmd_t pmd_mknuma(pmd_t pmd) +{ + return pte_pmd(pte_mknuma(pmd_pte(pmd))); +} + +# else + +static inline int pte_present(pte_t pte) +{ + return pte_val(pte) & _PAGE_PRESENT; +} +#endif /* CONFIG_NUMA_BALANCING */ + /* Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. * diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index f595b98079ee..6586a40a46ce 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -4,7 +4,6 @@ #ifndef _ASM_POWERPC_PPC_ASM_H #define _ASM_POWERPC_PPC_ASM_H -#include <linux/init.h> #include <linux/stringify.h> #include <asm/asm-compat.h> #include <asm/processor.h> @@ -295,6 +294,11 @@ n: * you want to access various offsets within it). On ppc32 this is * identical to LOAD_REG_IMMEDIATE. * + * LOAD_REG_ADDR_PIC(rn, name) + * Loads the address of label 'name' into register 'run'. Use this when + * the kernel doesn't run at the linked or relocated address. Please + * note that this macro will clobber the lr register. + * * LOAD_REG_ADDRBASE(rn, name) * ADDROFF(name) * LOAD_REG_ADDRBASE loads part of the address of label 'name' into @@ -305,6 +309,14 @@ n: * LOAD_REG_ADDRBASE(rX, name) * ld rY,ADDROFF(name)(rX) */ + +/* Be careful, this will clobber the lr register. */ +#define LOAD_REG_ADDR_PIC(reg, name) \ + bl 0f; \ +0: mflr reg; \ + addis reg,reg,(name - 0b)@ha; \ + addi reg,reg,(name - 0b)@l; + #ifdef __powerpc64__ #define LOAD_REG_IMMEDIATE(reg,expr) \ lis reg,(expr)@highest; \ diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index fc14a38c7ccf..b62de43ae5f3 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -256,6 +256,8 @@ struct thread_struct { unsigned long evr[32]; /* upper 32-bits of SPE regs */ u64 acc; /* Accumulator */ unsigned long spefscr; /* SPE & eFP status */ + unsigned long spefscr_last; /* SPEFSCR value on last prctl + call or trap return */ int used_spe; /* set if process has used spe */ #endif /* CONFIG_SPE */ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM @@ -317,7 +319,9 @@ struct thread_struct { (_ALIGN_UP(sizeof(init_thread_info), 16) + (unsigned long) &init_stack) #ifdef CONFIG_SPE -#define SPEFSCR_INIT .spefscr = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE, +#define SPEFSCR_INIT \ + .spefscr = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE, \ + .spefscr_last = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE, #else #define SPEFSCR_INIT #endif @@ -373,6 +377,8 @@ extern int set_endian(struct task_struct *tsk, unsigned int val); extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr); extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val); +extern void fp_enable(void); +extern void vec_enable(void); extern void load_fp_state(struct thread_fp_state *fp); extern void store_fp_state(struct thread_fp_state *fp); extern void load_vr_state(struct thread_vr_state *vr); @@ -444,13 +450,6 @@ enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF}; extern int powersave_nap; /* set if nap mode can be used in idle loop */ extern void power7_nap(void); - -#ifdef CONFIG_PSERIES_IDLE -extern void update_smt_snooze_delay(int cpu, int residency); -#else -static inline void update_smt_snooze_delay(int cpu, int residency) {} -#endif - extern void flush_instruction_cache(void); extern void hard_reset_now(void); extern void poweroff_now(void); diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h index 678a7c1d9cb8..a1bc7e758422 100644 --- a/arch/powerpc/include/asm/ps3.h +++ b/arch/powerpc/include/asm/ps3.h @@ -21,7 +21,6 @@ #if !defined(_ASM_POWERPC_PS3_H) #define _ASM_POWERPC_PS3_H -#include <linux/init.h> #include <linux/types.h> #include <linux/device.h> #include <asm/cell-pmu.h> diff --git a/arch/powerpc/include/asm/pte-hash64.h b/arch/powerpc/include/asm/pte-hash64.h index 0419eeb53274..2505d8eab15c 100644 --- a/arch/powerpc/include/asm/pte-hash64.h +++ b/arch/powerpc/include/asm/pte-hash64.h @@ -19,7 +19,7 @@ #define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */ #define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */ #define _PAGE_GUARDED 0x0008 -#define _PAGE_COHERENT 0x0010 /* M: enforce memory coherence (SMP systems) */ +/* We can derive Memory coherence from _PAGE_NO_CACHE */ #define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */ #define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */ #define _PAGE_DIRTY 0x0080 /* C: page changed */ @@ -27,6 +27,12 @@ #define _PAGE_RW 0x0200 /* software: user write access allowed */ #define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */ +/* + * Used for tracking numa faults + */ +#define _PAGE_NUMA 0x00000010 /* Gather numa placement stats */ + + /* No separate kernel read-only */ #define _PAGE_KERNEL_RW (_PAGE_RW | _PAGE_DIRTY) /* user access blocked by key */ #define _PAGE_KERNEL_RO _PAGE_KERNEL_RW diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index fa8388ed94c5..62b114e079cf 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -1075,6 +1075,8 @@ #define PVR_8560 0x80200000 #define PVR_VER_E500V1 0x8020 #define PVR_VER_E500V2 0x8021 +#define PVR_VER_E6500 0x8040 + /* * For the 8xx processors, all of them report the same PVR family for * the PowerPC core. The various versions of these processors must be diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index 2e31aacd8acc..163c3b05a76e 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h @@ -101,6 +101,7 @@ #define SPRN_IVOR39 0x1B1 /* Interrupt Vector Offset Register 39 */ #define SPRN_IVOR40 0x1B2 /* Interrupt Vector Offset Register 40 */ #define SPRN_IVOR41 0x1B3 /* Interrupt Vector Offset Register 41 */ +#define SPRN_IVOR42 0x1B4 /* Interrupt Vector Offset Register 42 */ #define SPRN_GIVOR2 0x1B8 /* Guest IVOR2 */ #define SPRN_GIVOR3 0x1B9 /* Guest IVOR3 */ #define SPRN_GIVOR4 0x1BA /* Guest IVOR4 */ @@ -170,6 +171,7 @@ #define SPRN_L2CSR1 0x3FA /* L2 Data Cache Control and Status Register 1 */ #define SPRN_DCCR 0x3FA /* Data Cache Cacheability Register */ #define SPRN_ICCR 0x3FB /* Instruction Cache Cacheability Register */ +#define SPRN_PWRMGTCR0 0x3FB /* Power management control register 0 */ #define SPRN_SVR 0x3FF /* System Version Register */ /* @@ -216,6 +218,14 @@ #define CCR1_DPC 0x00000100 /* Disable L1 I-Cache/D-Cache parity checking */ #define CCR1_TCS 0x00000080 /* Timer Clock Select */ +/* Bit definitions for PWRMGTCR0. */ +#define PWRMGTCR0_PW20_WAIT (1 << 14) /* PW20 state enable bit */ +#define PWRMGTCR0_PW20_ENT_SHIFT 8 +#define PWRMGTCR0_PW20_ENT 0x3F00 +#define PWRMGTCR0_AV_IDLE_PD_EN (1 << 22) /* Altivec idle enable */ +#define PWRMGTCR0_AV_IDLE_CNT_SHIFT 16 +#define PWRMGTCR0_AV_IDLE_CNT 0x3F0000 + /* Bit definitions for the MCSR. */ #define MCSR_MCS 0x80000000 /* Machine Check Summary */ #define MCSR_IB 0x40000000 /* Instruction PLB Error */ diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index 703a8412dac2..11ba86e17631 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -26,6 +26,7 @@ extern void reloc_got2(unsigned long); void check_for_initrd(void); void do_init_bootmem(void); void setup_panic(void); +#define ARCH_PANIC_TIMEOUT 180 #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 5f54a744dcc5..35aa339410bd 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -28,7 +28,7 @@ #include <asm/synch.h> #include <asm/ppc-opcode.h> -#define arch_spin_is_locked(x) ((x)->slock != 0) +#define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */ #ifdef CONFIG_PPC64 /* use 0x800000yy when locked, where yy == CPU number */ @@ -54,6 +54,16 @@ #define SYNC_IO #endif +static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) +{ + return lock.slock == 0; +} + +static inline int arch_spin_is_locked(arch_spinlock_t *lock) +{ + return !arch_spin_value_unlocked(*lock); +} + /* * This returns the old value in the lock, so we succeeded * in getting the lock if the return value is 0. diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index 9ee12610af02..aace90547614 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h @@ -35,7 +35,7 @@ extern void giveup_vsx(struct task_struct *); extern void enable_kernel_spe(void); extern void giveup_spe(struct task_struct *); extern void load_up_spe(struct task_struct *); -extern void switch_booke_debug_regs(struct thread_struct *new_thread); +extern void switch_booke_debug_regs(struct debug_reg *new_debug); #ifndef CONFIG_SMP extern void discard_lazy_cpu_state(void); diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index 43523fe0d8b4..3ddf70276706 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -359,3 +359,5 @@ COMPAT_SYS(process_vm_readv) COMPAT_SYS(process_vm_writev) SYSCALL(finit_module) SYSCALL(ni_syscall) /* sys_kcmp */ +SYSCALL_SPU(sched_setattr) +SYSCALL_SPU(sched_getattr) diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 9854c564ac52..b034ecdb7c74 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -91,8 +91,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_32BIT 4 /* 32 bit binary */ -#define TIF_PERFMON_WORK 5 /* work for pfm_handle_work() */ -#define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */ +#define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SINGLESTEP 8 /* singlestepping active */ #define TIF_NOHZ 9 /* in adaptive nohz mode */ @@ -115,8 +114,7 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) #define _TIF_32BIT (1<<TIF_32BIT) -#define _TIF_PERFMON_WORK (1<<TIF_PERFMON_WORK) -#define _TIF_PERFMON_CTXSW (1<<TIF_PERFMON_CTXSW) +#define _TIF_RESTORE_TM (1<<TIF_RESTORE_TM) #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) #define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) #define _TIF_SECCOMP (1<<TIF_SECCOMP) @@ -132,7 +130,8 @@ static inline struct thread_info *current_thread_info(void) _TIF_NOHZ) #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ - _TIF_NOTIFY_RESUME | _TIF_UPROBE) + _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ + _TIF_RESTORE_TM) #define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR) /* Bits in local_flags */ diff --git a/arch/powerpc/include/asm/tm.h b/arch/powerpc/include/asm/tm.h index 9dfbc34bdbf5..0c9f8b74dd97 100644 --- a/arch/powerpc/include/asm/tm.h +++ b/arch/powerpc/include/asm/tm.h @@ -15,6 +15,7 @@ extern void do_load_up_transact_altivec(struct thread_struct *thread); extern void tm_enable(void); extern void tm_reclaim(struct thread_struct *thread, unsigned long orig_msr, uint8_t cause); +extern void tm_reclaim_current(uint8_t cause); extern void tm_recheckpoint(struct thread_struct *thread, unsigned long orig_msr); extern void tm_abort(uint8_t cause); diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index 89e3ef2496ac..d0b5fca6b077 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h @@ -22,7 +22,15 @@ struct device_node; static inline int cpu_to_node(int cpu) { - return numa_cpu_lookup_table[cpu]; + int nid; + + nid = numa_cpu_lookup_table[cpu]; + + /* + * During early boot, the numa-cpu lookup table might not have been + * setup for all CPUs yet. In such cases, default to node 0. + */ + return (nid < 0) ? 0 : nid; } #define parent_node(node) (node) diff --git a/arch/powerpc/include/asm/unaligned.h b/arch/powerpc/include/asm/unaligned.h index 5f1b1e3c2137..8296381ae432 100644 --- a/arch/powerpc/include/asm/unaligned.h +++ b/arch/powerpc/include/asm/unaligned.h @@ -4,13 +4,18 @@ #ifdef __KERNEL__ /* - * The PowerPC can do unaligned accesses itself in big endian mode. + * The PowerPC can do unaligned accesses itself based on its endian mode. */ #include <linux/unaligned/access_ok.h> #include <linux/unaligned/generic.h> +#ifdef __LITTLE_ENDIAN__ +#define get_unaligned __get_unaligned_le +#define put_unaligned __put_unaligned_le +#else #define get_unaligned __get_unaligned_be #define put_unaligned __put_unaligned_be +#endif #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_UNALIGNED_H */ diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index 3ca819f541bf..4494f029b632 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -12,7 +12,7 @@ #include <uapi/asm/unistd.h> -#define __NR_syscalls 355 +#define __NR_syscalls 357 #define __NR__exit __NR_exit #define NR_syscalls __NR_syscalls diff --git a/arch/powerpc/include/asm/uprobes.h b/arch/powerpc/include/asm/uprobes.h index 75c6ecdb8f37..7422a999a39a 100644 --- a/arch/powerpc/include/asm/uprobes.h +++ b/arch/powerpc/include/asm/uprobes.h @@ -36,9 +36,8 @@ typedef ppc_opcode_t uprobe_opcode_t; struct arch_uprobe { union { - u8 insn[MAX_UINSN_BYTES]; - u8 ixol[MAX_UINSN_BYTES]; - u32 ainsn; + u32 insn; + u32 ixol; }; }; diff --git a/arch/powerpc/include/asm/vio.h b/arch/powerpc/include/asm/vio.h index 68d0cc998b1b..4f9b7ca0710f 100644 --- a/arch/powerpc/include/asm/vio.h +++ b/arch/powerpc/include/asm/vio.h @@ -15,7 +15,6 @@ #define _ASM_POWERPC_VIO_H #ifdef __KERNEL__ -#include <linux/init.h> #include <linux/errno.h> #include <linux/device.h> #include <linux/dma-mapping.h> diff --git a/arch/powerpc/include/uapi/asm/socket.h b/arch/powerpc/include/uapi/asm/socket.h index fa698324a1fd..a9c3e2e18c05 100644 --- a/arch/powerpc/include/uapi/asm/socket.h +++ b/arch/powerpc/include/uapi/asm/socket.h @@ -85,4 +85,6 @@ #define SO_MAX_PACING_RATE 47 +#define SO_BPF_EXTENSIONS 48 + #endif /* _ASM_POWERPC_SOCKET_H */ diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h index 74cb4d72d673..881bf2e2560d 100644 --- a/arch/powerpc/include/uapi/asm/unistd.h +++ b/arch/powerpc/include/uapi/asm/unistd.h @@ -377,6 +377,7 @@ #define __NR_process_vm_writev 352 #define __NR_finit_module 353 #define __NR_kcmp 354 - +#define __NR_sched_setattr 355 +#define __NR_sched_getattr 356 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ |