summaryrefslogtreecommitdiff
path: root/arch/powerpc/include/asm/book3s/64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm/book3s/64')
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-4k.h8
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-64k.h3
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash.h4
-rw-r--r--arch/powerpc/include/asm/book3s/64/hugetlb.h (renamed from arch/powerpc/include/asm/book3s/64/hugetlb-radix.h)28
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu-hash.h47
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h18
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix.h28
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-radix.h2
8 files changed, 100 insertions, 38 deletions
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
index 1af837c561ba..0c4e470571ca 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
@@ -16,9 +16,6 @@
#define H_PUD_TABLE_SIZE (sizeof(pud_t) << H_PUD_INDEX_SIZE)
#define H_PGD_TABLE_SIZE (sizeof(pgd_t) << H_PGD_INDEX_SIZE)
-/* With 4k base page size, hugepage PTEs go at the PMD level */
-#define MIN_HUGEPTE_SHIFT PMD_SHIFT
-
/* PTE flags to conserve for HPTE identification */
#define _PAGE_HPTEFLAGS (H_PAGE_BUSY | H_PAGE_HASHPTE | \
H_PAGE_F_SECOND | H_PAGE_F_GIX)
@@ -39,12 +36,13 @@
#ifdef CONFIG_HUGETLB_PAGE
static inline int hash__hugepd_ok(hugepd_t hpd)
{
+ unsigned long hpdval = hpd_val(hpd);
/*
* if it is not a pte and have hugepd shift mask
* set, then it is a hugepd directory pointer
*/
- if (!(hpd.pd & _PAGE_PTE) &&
- ((hpd.pd & HUGEPD_SHIFT_MASK) != 0))
+ if (!(hpdval & _PAGE_PTE) &&
+ ((hpdval & HUGEPD_SHIFT_MASK) != 0))
return true;
return false;
}
diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h
index 5aae4f530c21..f3dd21efa2ea 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h
@@ -6,9 +6,6 @@
#define H_PUD_INDEX_SIZE 5
#define H_PGD_INDEX_SIZE 12
-/* With 4k base page size, hugepage PTEs go at the PMD level */
-#define MIN_HUGEPTE_SHIFT PAGE_SHIFT
-
#define H_PAGE_COMBO 0x00001000 /* this is a combo 4k page */
#define H_PAGE_4K_PFN 0x00002000 /* PFN is for a single 4k page */
/*
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index f61cad3de4e6..4c935f7504f7 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -201,6 +201,10 @@ extern int __meminit hash__vmemmap_create_mapping(unsigned long start,
unsigned long phys);
extern void hash__vmemmap_remove_mapping(unsigned long start,
unsigned long page_size);
+
+int hash__create_section_mapping(unsigned long start, unsigned long end);
+int hash__remove_section_mapping(unsigned long start, unsigned long end);
+
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */
diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb-radix.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h
index c45189aa7476..c62f14d0bec1 100644
--- a/arch/powerpc/include/asm/book3s/64/hugetlb-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h
@@ -1,5 +1,5 @@
-#ifndef _ASM_POWERPC_BOOK3S_64_HUGETLB_RADIX_H
-#define _ASM_POWERPC_BOOK3S_64_HUGETLB_RADIX_H
+#ifndef _ASM_POWERPC_BOOK3S_64_HUGETLB_H
+#define _ASM_POWERPC_BOOK3S_64_HUGETLB_H
/*
* For radix we want generic code to handle hugetlb. But then if we want
* both hash and radix to be enabled together we need to workaround the
@@ -21,9 +21,33 @@ static inline int hstate_get_psize(struct hstate *hstate)
return MMU_PAGE_2M;
else if (shift == mmu_psize_defs[MMU_PAGE_1G].shift)
return MMU_PAGE_1G;
+ else if (shift == mmu_psize_defs[MMU_PAGE_16M].shift)
+ return MMU_PAGE_16M;
+ else if (shift == mmu_psize_defs[MMU_PAGE_16G].shift)
+ return MMU_PAGE_16G;
else {
WARN(1, "Wrong huge page shift\n");
return mmu_virtual_psize;
}
}
+
+#define arch_make_huge_pte arch_make_huge_pte
+static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
+ struct page *page, int writable)
+{
+ unsigned long page_shift;
+
+ if (!cpu_has_feature(CPU_FTR_POWER9_DD1))
+ return entry;
+
+ page_shift = huge_page_shift(hstate_vma(vma));
+ /*
+ * We don't support 1G hugetlb pages yet.
+ */
+ VM_WARN_ON(page_shift == mmu_psize_defs[MMU_PAGE_1G].shift);
+ if (page_shift == mmu_psize_defs[MMU_PAGE_2M].shift)
+ return __pte(pte_val(entry) | _PAGE_LARGE);
+ else
+ return entry;
+}
#endif
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index e407af2b7333..2e6a823fa502 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -70,7 +70,9 @@
#define HPTE_V_SSIZE_SHIFT 62
#define HPTE_V_AVPN_SHIFT 7
+#define HPTE_V_COMMON_BITS ASM_CONST(0x000fffffffffffff)
#define HPTE_V_AVPN ASM_CONST(0x3fffffffffffff80)
+#define HPTE_V_AVPN_3_0 ASM_CONST(0x000fffffffffff80)
#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
#define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & 0xffffffffffffff80UL))
#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
@@ -80,14 +82,16 @@
#define HPTE_V_VALID ASM_CONST(0x0000000000000001)
/*
- * ISA 3.0 have a different HPTE format.
+ * ISA 3.0 has a different HPTE format.
*/
#define HPTE_R_3_0_SSIZE_SHIFT 58
+#define HPTE_R_3_0_SSIZE_MASK (3ull << HPTE_R_3_0_SSIZE_SHIFT)
#define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
#define HPTE_R_TS ASM_CONST(0x4000000000000000)
#define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000)
#define HPTE_R_RPN_SHIFT 12
#define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
+#define HPTE_R_RPN_3_0 ASM_CONST(0x01fffffffffff000)
#define HPTE_R_PP ASM_CONST(0x0000000000000003)
#define HPTE_R_PPP ASM_CONST(0x8000000000000003)
#define HPTE_R_N ASM_CONST(0x0000000000000004)
@@ -316,12 +320,43 @@ static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
*/
v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
v <<= HPTE_V_AVPN_SHIFT;
- if (!cpu_has_feature(CPU_FTR_ARCH_300))
- v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
+ v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
return v;
}
/*
+ * ISA v3.0 defines a new HPTE format, which differs from the old
+ * format in having smaller AVPN and ARPN fields, and the B field
+ * in the second dword instead of the first.
+ */
+static inline unsigned long hpte_old_to_new_v(unsigned long v)
+{
+ /* trim AVPN, drop B */
+ return v & HPTE_V_COMMON_BITS;
+}
+
+static inline unsigned long hpte_old_to_new_r(unsigned long v, unsigned long r)
+{
+ /* move B field from 1st to 2nd dword, trim ARPN */
+ return (r & ~HPTE_R_3_0_SSIZE_MASK) |
+ (((v) >> HPTE_V_SSIZE_SHIFT) << HPTE_R_3_0_SSIZE_SHIFT);
+}
+
+static inline unsigned long hpte_new_to_old_v(unsigned long v, unsigned long r)
+{
+ /* insert B field */
+ return (v & HPTE_V_COMMON_BITS) |
+ ((r & HPTE_R_3_0_SSIZE_MASK) <<
+ (HPTE_V_SSIZE_SHIFT - HPTE_R_3_0_SSIZE_SHIFT));
+}
+
+static inline unsigned long hpte_new_to_old_r(unsigned long r)
+{
+ /* clear out B field */
+ return r & ~HPTE_R_3_0_SSIZE_MASK;
+}
+
+/*
* This function sets the AVPN and L fields of the HPTE appropriately
* using the base page size and actual page size.
*/
@@ -341,12 +376,8 @@ static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize,
* aligned for the requested page size
*/
static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize,
- int actual_psize, int ssize)
+ int actual_psize)
{
-
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- pa |= ((unsigned long) ssize) << HPTE_R_3_0_SSIZE_SHIFT;
-
/* A 4K page needs no special encoding */
if (actual_psize == MMU_PAGE_4K)
return pa & HPTE_R_RPN;
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 0ebfbc8f0449..5905f0ff57d1 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -26,6 +26,11 @@
#define _RPAGE_SW1 0x00800
#define _RPAGE_SW2 0x00400
#define _RPAGE_SW3 0x00200
+#define _RPAGE_RSV1 0x1000000000000000UL
+#define _RPAGE_RSV2 0x0800000000000000UL
+#define _RPAGE_RSV3 0x0400000000000000UL
+#define _RPAGE_RSV4 0x0200000000000000UL
+
#ifdef CONFIG_MEM_SOFT_DIRTY
#define _PAGE_SOFT_DIRTY _RPAGE_SW3 /* software: software dirty tracking */
#else
@@ -33,6 +38,11 @@
#endif
#define _PAGE_SPECIAL _RPAGE_SW2 /* software: special page */
+/*
+ * For P9 DD1 only, we need to track whether the pte's huge.
+ */
+#define _PAGE_LARGE _RPAGE_RSV1
+
#define _PAGE_PTE (1ul << 62) /* distinguishes PTEs from pointers */
#define _PAGE_PRESENT (1ul << 63) /* pte contains a translation */
@@ -568,10 +578,11 @@ static inline bool check_pte_access(unsigned long access, unsigned long ptev)
*/
static inline void __ptep_set_access_flags(struct mm_struct *mm,
- pte_t *ptep, pte_t entry)
+ pte_t *ptep, pte_t entry,
+ unsigned long address)
{
if (radix_enabled())
- return radix__ptep_set_access_flags(mm, ptep, entry);
+ return radix__ptep_set_access_flags(mm, ptep, entry, address);
return hash__ptep_set_access_flags(ptep, entry);
}
@@ -789,9 +800,6 @@ extern struct page *pgd_page(pgd_t pgd);
#define pgd_ERROR(e) \
pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
-void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
-void pgtable_cache_init(void);
-
static inline int map_kernel_page(unsigned long ea, unsigned long pa,
unsigned long flags)
{
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index 2a46dea8e1b1..b4d1302387a3 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -140,19 +140,20 @@ static inline unsigned long radix__pte_update(struct mm_struct *mm,
unsigned long new_pte;
old_pte = __radix_pte_update(ptep, ~0, 0);
- asm volatile("ptesync" : : : "memory");
/*
* new value of pte
*/
new_pte = (old_pte | set) & ~clr;
-
/*
- * For now let's do heavy pid flush
- * radix__flush_tlb_page_psize(mm, addr, mmu_virtual_psize);
+ * If we are trying to clear the pte, we can skip
+ * the below sequence and batch the tlb flush. The
+ * tlb flush batching is done by mmu gather code
*/
- radix__flush_tlb_mm(mm);
-
- __radix_pte_update(ptep, 0, new_pte);
+ if (new_pte) {
+ asm volatile("ptesync" : : : "memory");
+ radix__flush_tlb_pte_p9_dd1(old_pte, mm, addr);
+ __radix_pte_update(ptep, 0, new_pte);
+ }
} else
old_pte = __radix_pte_update(ptep, clr, set);
asm volatile("ptesync" : : : "memory");
@@ -167,7 +168,8 @@ static inline unsigned long radix__pte_update(struct mm_struct *mm,
* function doesn't need to invalidate tlb.
*/
static inline void radix__ptep_set_access_flags(struct mm_struct *mm,
- pte_t *ptep, pte_t entry)
+ pte_t *ptep, pte_t entry,
+ unsigned long address)
{
unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
@@ -183,13 +185,7 @@ static inline void radix__ptep_set_access_flags(struct mm_struct *mm,
* new value of pte
*/
new_pte = old_pte | set;
-
- /*
- * For now let's do heavy pid flush
- * radix__flush_tlb_page_psize(mm, addr, mmu_virtual_psize);
- */
- radix__flush_tlb_mm(mm);
-
+ radix__flush_tlb_pte_p9_dd1(old_pte, mm, address);
__radix_pte_update(ptep, 0, new_pte);
} else
__radix_pte_update(ptep, 0, set);
@@ -243,6 +239,8 @@ static inline int radix__pmd_trans_huge(pmd_t pmd)
static inline pmd_t radix__pmd_mkhuge(pmd_t pmd)
{
+ if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+ return __pmd(pmd_val(pmd) | _PAGE_PTE | _PAGE_LARGE);
return __pmd(pmd_val(pmd) | _PAGE_PTE);
}
static inline void radix__pmdp_huge_split_prepare(struct vm_area_struct *vma,
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
index a9e19cb2f7c5..cc7fbde4f53c 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
@@ -42,4 +42,6 @@ extern void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
unsigned long page_size);
extern void radix__flush_tlb_lpid(unsigned long lpid);
extern void radix__flush_tlb_all(void);
+extern void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
+ unsigned long address);
#endif