From 765129594494994e3de91182857bc2586dbe21c9 Mon Sep 17 00:00:00 2001 From: Becky Bruce Date: Mon, 10 Oct 2011 10:50:36 +0000 Subject: powerpc: Only define HAVE_ARCH_HUGETLB_UNMAPPED_AREA if PPC_MM_SLICES If we don't have slices, we should be able to use the generic hugetlb_get_unmapped_area() code Signed-off-by: Becky Bruce Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/page_64.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h index fb40ede6bc0..fed85e6290e 100644 --- a/arch/powerpc/include/asm/page_64.h +++ b/arch/powerpc/include/asm/page_64.h @@ -130,7 +130,9 @@ do { \ #ifdef CONFIG_HUGETLB_PAGE +#ifdef CONFIG_PPC_MM_SLICES #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA +#endif #endif /* !CONFIG_HUGETLB_PAGE */ -- cgit v1.2.3 From 97632e6fbea5b996669ffee21d869ed09848e1ec Mon Sep 17 00:00:00 2001 From: Becky Bruce Date: Mon, 10 Oct 2011 10:50:37 +0000 Subject: powerpc: hugetlb: fix huge_ptep_set_access_flags return value There was an unconditional return of "1" in the original code from David Gibson, and I dropped it because it wasn't needed for FSL BOOKE 32-bit. However, not all systems (including 64-bit FSL BOOKE) do loading of the hpte from the fault handler asm and depend on this function returning 1, which causes a call to update_mmu_cache() that writes an entry into the tlb. Signed-off-by: Becky Bruce Signed-off-by: David Gibson Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/hugetlb.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index 86004930a78..70f9885f5c0 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -124,7 +124,18 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte, int dirty) { +#if defined(CONFIG_PPC_MMU_NOHASH) && \ + !(defined(CONFIG_PPC_FSL_BOOK3E) && defined(CONFIG_PPC32)) + /* + * The "return 1" forces a call of update_mmu_cache, which will write a + * TLB entry. Without this, platforms that don't do a write of the TLB + * entry in the TLB miss handler asm will fault ad infinitum. + */ + ptep_set_access_flags(vma, addr, ptep, pte, dirty); + return 1; +#else return ptep_set_access_flags(vma, addr, ptep, pte, dirty); +#endif } static inline pte_t huge_ptep_get(pte_t *ptep) -- cgit v1.2.3 From 881fde1db591628db0494e77cd9002b0ba8b04b7 Mon Sep 17 00:00:00 2001 From: Becky Bruce Date: Mon, 10 Oct 2011 10:50:40 +0000 Subject: powerpc: hugetlb: modify include usage for FSL BookE code The original 32-bit hugetlb implementation used PPC64 vs PPC32 to determine which code path to take. However, the final hugetlb implementation for 64-bit FSL ended up shared with the FSL 32-bit code so the actual check needs to be FSL_BOOK3E vs everything else. This patch changes the include protections to reflect this. There are also a couple of related comment fixes. Signed-off-by: Becky Bruce Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/hugetlb.h | 6 ++--- arch/powerpc/mm/hugetlbpage.c | 54 ++++++++++++++++++-------------------- arch/powerpc/mm/tlb_nohash.c | 2 +- 3 files changed, 29 insertions(+), 33 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index 70f9885f5c0..273acfad65a 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -22,14 +22,14 @@ static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr, unsigned pdshift) { /* - * On 32-bit, we have multiple higher-level table entries that point to - * the same hugepte. Just use the first one since they're all + * On FSL BookE, we have multiple higher-level table entries that + * point to the same hugepte. Just use the first one since they're all * identical. So for that case, idx=0. */ unsigned long idx = 0; pte_t *dir = hugepd_page(*hpdp); -#ifdef CONFIG_PPC64 +#ifndef CONFIG_PPC_FSL_BOOK3E idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(*hpdp); #endif diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 96178e8fb04..7c7cb979727 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -33,17 +33,17 @@ unsigned int HPAGE_SHIFT; * implementations may have more than one gpage size due to limitations * of the memory allocators, so we need multiple arrays */ -#ifdef CONFIG_PPC64 -#define MAX_NUMBER_GPAGES 1024 -static u64 gpage_freearray[MAX_NUMBER_GPAGES]; -static unsigned nr_gpages; -#else +#ifdef CONFIG_PPC_FSL_BOOK3E #define MAX_NUMBER_GPAGES 128 struct psize_gpages { u64 gpage_list[MAX_NUMBER_GPAGES]; unsigned int nr_gpages; }; static struct psize_gpages gpage_freearray[MMU_PAGE_COUNT]; +#else +#define MAX_NUMBER_GPAGES 1024 +static u64 gpage_freearray[MAX_NUMBER_GPAGES]; +static unsigned nr_gpages; #endif static inline int shift_to_mmu_psize(unsigned int shift) @@ -114,12 +114,12 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, struct kmem_cache *cachep; pte_t *new; -#ifdef CONFIG_PPC64 - cachep = PGT_CACHE(pdshift - pshift); -#else +#ifdef CONFIG_PPC_FSL_BOOK3E int i; int num_hugepd = 1 << (pshift - pdshift); cachep = hugepte_cache; +#else + cachep = PGT_CACHE(pdshift - pshift); #endif new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT); @@ -131,12 +131,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, return -ENOMEM; spin_lock(&mm->page_table_lock); -#ifdef CONFIG_PPC64 - if (!hugepd_none(*hpdp)) - kmem_cache_free(cachep, new); - else - hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift; -#else +#ifdef CONFIG_PPC_FSL_BOOK3E /* * We have multiple higher-level entries that point to the same * actual pte location. Fill in each as we go and backtrack on error. @@ -215,7 +210,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz return hugepte_offset(hpdp, addr, pdshift); } -#ifdef CONFIG_PPC32 +#ifdef CONFIG_PPC_FSL_BOOK3E /* Build list of addresses of gigantic pages. This function is used in early * boot before the buddy or bootmem allocator is setup. */ @@ -335,7 +330,7 @@ void __init reserve_hugetlb_gpages(void) } } -#else /* PPC64 */ +#else /* !PPC_FSL_BOOK3E */ /* Build list of addresses of gigantic pages. This function is used in early * boot before the buddy or bootmem allocator is setup. @@ -373,7 +368,7 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) return 0; } -#ifdef CONFIG_PPC32 +#ifdef CONFIG_PPC_FSL_BOOK3E #define HUGEPD_FREELIST_SIZE \ ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t)) @@ -433,11 +428,11 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif unsigned long pdmask = ~((1UL << pdshift) - 1); unsigned int num_hugepd = 1; -#ifdef CONFIG_PPC64 - unsigned int shift = hugepd_shift(*hpdp); -#else - /* Note: On 32-bit the hpdp may be the first of several */ +#ifdef CONFIG_PPC_FSL_BOOK3E + /* Note: On fsl the hpdp may be the first of several */ num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift)); +#else + unsigned int shift = hugepd_shift(*hpdp); #endif start &= pdmask; @@ -455,10 +450,11 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif hpdp->pd = 0; tlb->need_flush = 1; -#ifdef CONFIG_PPC64 - pgtable_free_tlb(tlb, hugepte, pdshift - shift); -#else + +#ifdef CONFIG_PPC_FSL_BOOK3E hugepd_free(tlb, hugepte); +#else + pgtable_free_tlb(tlb, hugepte, pdshift - shift); #endif } @@ -590,12 +586,12 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, continue; hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling); } else { -#ifdef CONFIG_PPC32 +#ifdef CONFIG_PPC_FSL_BOOK3E /* * Increment next by the size of the huge mapping since - * on 32-bit there may be more than one entry at the pgd - * level for a single hugepage, but all of them point to - * the same kmem cache that holds the hugepte. + * there may be more than one entry at the pgd level + * for a single hugepage, but all of them point to the + * same kmem cache that holds the hugepte. */ next = addr + (1 << hugepd_shift(*(hugepd_t *)pgd)); #endif @@ -817,7 +813,7 @@ static int __init hugepage_setup_sz(char *str) } __setup("hugepagesz=", hugepage_setup_sz); -#ifdef CONFIG_FSL_BOOKE +#ifdef CONFIG_PPC_FSL_BOOK3E struct kmem_cache *hugepte_cache; static int __init hugetlbpage_init(void) { diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index 4e13d6f9023..b2c65c66085 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c @@ -52,7 +52,7 @@ * indirect page table entries. */ #ifdef CONFIG_PPC_BOOK3E_MMU -#ifdef CONFIG_FSL_BOOKE +#ifdef CONFIG_PPC_FSL_BOOK3E struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { [MMU_PAGE_4K] = { .shift = 12, -- cgit v1.2.3 From a6146888be0aa80ea41c99178d7d2e08efc776b5 Mon Sep 17 00:00:00 2001 From: Becky Bruce Date: Mon, 10 Oct 2011 10:50:43 +0000 Subject: powerpc: Add gpages reservation code for 64-bit FSL BOOKE For 64-bit FSL_BOOKE implementations, gigantic pages need to be reserved at boot time by the memblock code based on the command line. This adds the call that handles the reservation, and fixes some code comments. It also removes the previous pr_err when reserve_hugetlb_gpages is called on a system without hugetlb enabled - the way the code is structured, the call is unconditional and the resulting error message spurious and confusing. Signed-off-by: Becky Bruce Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/hugetlb.h | 19 ++++++++++++++----- arch/powerpc/kernel/setup_64.c | 10 ++++++++++ arch/powerpc/mm/hugetlbpage.c | 8 ++++---- 3 files changed, 28 insertions(+), 9 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index 273acfad65a..555044c310b 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -5,7 +5,6 @@ #include extern struct kmem_cache *hugepte_cache; -extern void __init reserve_hugetlb_gpages(void); static inline pte_t *hugepd_page(hugepd_t hpd) { @@ -153,14 +152,24 @@ static inline void arch_release_hugepage(struct page *page) } #else /* ! CONFIG_HUGETLB_PAGE */ -static inline void reserve_hugetlb_gpages(void) -{ - pr_err("Cannot reserve gpages without hugetlb enabled\n"); -} static inline void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { } +#endif /* CONFIG_HUGETLB_PAGE */ + + +/* + * FSL Book3E platforms require special gpage handling - the gpages + * are reserved early in the boot process by memblock instead of via + * the .dts as on IBM platforms. + */ +#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_FSL_BOOK3E) +extern void __init reserve_hugetlb_gpages(void); +#else +static inline void reserve_hugetlb_gpages(void) +{ +} #endif #endif /* _ASM_POWERPC_HUGETLB_H */ diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index fb9bb46e7e8..4cb8f1e9d04 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -35,6 +35,8 @@ #include #include #include +#include + #include #include #include @@ -64,6 +66,7 @@ #include #include #include +#include #include "setup.h" @@ -217,6 +220,13 @@ void __init early_setup(unsigned long dt_ptr) /* Initialize the hash table or TLB handling */ early_init_mmu(); + /* + * Reserve any gigantic pages requested on the command line. + * memblock needs to have been initialized by the time this is + * called since this will reserve memory. + */ + reserve_hugetlb_gpages(); + DBG(" <- early_setup()\n"); } diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 7c7cb979727..79c575d3dd6 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -28,10 +28,10 @@ unsigned int HPAGE_SHIFT; /* * Tracks gpages after the device tree is scanned and before the - * huge_boot_pages list is ready. On 64-bit implementations, this is - * just used to track 16G pages and so is a single array. 32-bit - * implementations may have more than one gpage size due to limitations - * of the memory allocators, so we need multiple arrays + * huge_boot_pages list is ready. On non-Freescale implementations, this is + * just used to track 16G pages and so is a single array. FSL-based + * implementations may have more than one gpage size, so we need multiple + * arrays */ #ifdef CONFIG_PPC_FSL_BOOK3E #define MAX_NUMBER_GPAGES 128 -- cgit v1.2.3 From d93e4d7d72037d8c9405e8d404ecb2ee162adc25 Mon Sep 17 00:00:00 2001 From: Becky Bruce Date: Mon, 28 Nov 2011 14:43:33 +0000 Subject: powerpc/book3e: Change hugetlb preload to take vma argument This avoids an extra find_vma() and is less error-prone. Signed-off-by: Becky Bruce Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/hugetlb.h | 3 ++- arch/powerpc/mm/hugetlbpage-book3e.c | 8 ++++++-- arch/powerpc/mm/mem.c | 2 +- 3 files changed, 9 insertions(+), 4 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index 555044c310b..863f49d5ea3 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -52,7 +52,8 @@ static inline int is_hugepage_only_range(struct mm_struct *mm, } #endif -void book3e_hugetlb_preload(struct mm_struct *mm, unsigned long ea, pte_t pte); +void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, + pte_t pte); void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr); void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, diff --git a/arch/powerpc/mm/hugetlbpage-book3e.c b/arch/powerpc/mm/hugetlbpage-book3e.c index 4d6d849a3e5..3bc700655fc 100644 --- a/arch/powerpc/mm/hugetlbpage-book3e.c +++ b/arch/powerpc/mm/hugetlbpage-book3e.c @@ -37,12 +37,14 @@ static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid) return found; } -void book3e_hugetlb_preload(struct mm_struct *mm, unsigned long ea, pte_t pte) +void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, + pte_t pte) { unsigned long mas1, mas2; u64 mas7_3; unsigned long psize, tsize, shift; unsigned long flags; + struct mm_struct *mm; #ifdef CONFIG_PPC_FSL_BOOK3E int index, ncams; @@ -51,12 +53,14 @@ void book3e_hugetlb_preload(struct mm_struct *mm, unsigned long ea, pte_t pte) if (unlikely(is_kernel_addr(ea))) return; + mm = vma->vm_mm; + #ifdef CONFIG_PPC_MM_SLICES psize = get_slice_psize(mm, ea); tsize = mmu_get_tsize(psize); shift = mmu_psize_defs[psize].shift; #else - psize = vma_mmu_pagesize(find_vma(mm, ea)); + psize = vma_mmu_pagesize(vma); shift = __ilog2(psize); tsize = shift - 10; #endif diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 22563b9664c..83d819f3086 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -553,7 +553,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, #if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \ && defined(CONFIG_HUGETLB_PAGE) if (is_vm_hugetlb_page(vma)) - book3e_hugetlb_preload(vma->vm_mm, address, *ptep); + book3e_hugetlb_preload(vma, address, *ptep); #endif } -- cgit v1.2.3 From 1f6820b4c1d09b017625ce32ba23fa39ebfdb27a Mon Sep 17 00:00:00 2001 From: Becky Bruce Date: Tue, 29 Nov 2011 15:10:39 +0000 Subject: powerpc: Define/use HUGETLB_NEED_PRELOAD insead of complicated #if Define HUGETLB_NEED_PRELOAD in mmu-book3e.h for CONFIG_PPC64 instead of having a much more complicated #if block. This is easier to read and maintain. Signed-off-by: Becky Bruce Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/hugetlb.h | 3 +-- arch/powerpc/include/asm/mmu-book3e.h | 7 +++++++ 2 files changed, 8 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index 863f49d5ea3..dfdb95bc59a 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -124,8 +124,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte, int dirty) { -#if defined(CONFIG_PPC_MMU_NOHASH) && \ - !(defined(CONFIG_PPC_FSL_BOOK3E) && defined(CONFIG_PPC32)) +#ifdef HUGETLB_NEED_PRELOAD /* * The "return 1" forces a call of update_mmu_cache, which will write a * TLB entry. Without this, platforms that don't do a write of the TLB diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h index 50210b9b014..f5f89cafebd 100644 --- a/arch/powerpc/include/asm/mmu-book3e.h +++ b/arch/powerpc/include/asm/mmu-book3e.h @@ -258,6 +258,13 @@ extern int mmu_vmemmap_psize; #ifdef CONFIG_PPC64 extern unsigned long linear_map_top; + +/* + * 64-bit booke platforms don't load the tlb in the tlb miss handler code. + * HUGETLB_NEED_PRELOAD handles this - it causes huge_ptep_set_access_flags to + * return 1, indicating that the tlb requires preloading. + */ +#define HUGETLB_NEED_PRELOAD #endif #endif /* !__ASSEMBLY__ */ -- cgit v1.2.3