diff options
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/alignment.c | 20 | ||||
-rw-r--r-- | arch/arm/mm/cache-l2x0.c | 46 | ||||
-rw-r--r-- | arch/arm/mm/context.c | 14 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v4mc.c | 6 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v6.c | 10 | ||||
-rw-r--r-- | arch/arm/mm/copypage-xscale.c | 6 | ||||
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 5 | ||||
-rw-r--r-- | arch/arm/mm/ioremap.c | 21 | ||||
-rw-r--r-- | arch/arm/mm/mm.h | 4 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 18 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7.S | 2 |
11 files changed, 87 insertions, 65 deletions
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index cfbcf8b95599..c335c76e0d88 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -86,16 +86,6 @@ core_param(alignment, ai_usermode, int, 0600); #define UM_FIXUP (1 << 1) #define UM_SIGNAL (1 << 2) -#ifdef CONFIG_PROC_FS -static const char *usermode_action[] = { - "ignored", - "warn", - "fixup", - "fixup+warn", - "signal", - "signal+warn" -}; - /* Return true if and only if the ARMv6 unaligned access model is in use. */ static bool cpu_is_v6_unaligned(void) { @@ -123,6 +113,16 @@ static int safe_usermode(int new_usermode, bool warn) return new_usermode; } +#ifdef CONFIG_PROC_FS +static const char *usermode_action[] = { + "ignored", + "warn", + "fixup", + "fixup+warn", + "signal", + "signal+warn" +}; + static int alignment_proc_show(struct seq_file *m, void *v) { seq_printf(m, "User:\t\t%lu\n", ai_user); diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 3f9b9980478e..8ac9e9f84790 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -29,7 +29,7 @@ #define CACHE_LINE_SIZE 32 static void __iomem *l2x0_base; -static DEFINE_SPINLOCK(l2x0_lock); +static DEFINE_RAW_SPINLOCK(l2x0_lock); static uint32_t l2x0_way_mask; /* Bitmask of active ways */ static uint32_t l2x0_size; @@ -126,9 +126,9 @@ static void l2x0_cache_sync(void) { unsigned long flags; - spin_lock_irqsave(&l2x0_lock, flags); + raw_spin_lock_irqsave(&l2x0_lock, flags); cache_sync(); - spin_unlock_irqrestore(&l2x0_lock, flags); + raw_spin_unlock_irqrestore(&l2x0_lock, flags); } static void __l2x0_flush_all(void) @@ -145,9 +145,9 @@ static void l2x0_flush_all(void) unsigned long flags; /* clean all ways */ - spin_lock_irqsave(&l2x0_lock, flags); + raw_spin_lock_irqsave(&l2x0_lock, flags); __l2x0_flush_all(); - spin_unlock_irqrestore(&l2x0_lock, flags); + raw_spin_unlock_irqrestore(&l2x0_lock, flags); } static void l2x0_clean_all(void) @@ -155,11 +155,11 @@ static void l2x0_clean_all(void) unsigned long flags; /* clean all ways */ - spin_lock_irqsave(&l2x0_lock, flags); + raw_spin_lock_irqsave(&l2x0_lock, flags); writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY); cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask); cache_sync(); - spin_unlock_irqrestore(&l2x0_lock, flags); + raw_spin_unlock_irqrestore(&l2x0_lock, flags); } static void l2x0_inv_all(void) @@ -167,13 +167,13 @@ static void l2x0_inv_all(void) unsigned long flags; /* invalidate all ways */ - spin_lock_irqsave(&l2x0_lock, flags); + raw_spin_lock_irqsave(&l2x0_lock, flags); /* Invalidating when L2 is enabled is a nono */ BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1); writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); cache_sync(); - spin_unlock_irqrestore(&l2x0_lock, flags); + raw_spin_unlock_irqrestore(&l2x0_lock, flags); } static void l2x0_inv_range(unsigned long start, unsigned long end) @@ -181,7 +181,7 @@ static void l2x0_inv_range(unsigned long start, unsigned long end) void __iomem *base = l2x0_base; unsigned long flags; - spin_lock_irqsave(&l2x0_lock, flags); + raw_spin_lock_irqsave(&l2x0_lock, flags); if (start & (CACHE_LINE_SIZE - 1)) { start &= ~(CACHE_LINE_SIZE - 1); debug_writel(0x03); @@ -206,13 +206,13 @@ static void l2x0_inv_range(unsigned long start, unsigned long end) } if (blk_end < end) { - spin_unlock_irqrestore(&l2x0_lock, flags); - spin_lock_irqsave(&l2x0_lock, flags); + raw_spin_unlock_irqrestore(&l2x0_lock, flags); + raw_spin_lock_irqsave(&l2x0_lock, flags); } } cache_wait(base + L2X0_INV_LINE_PA, 1); cache_sync(); - spin_unlock_irqrestore(&l2x0_lock, flags); + raw_spin_unlock_irqrestore(&l2x0_lock, flags); } static void l2x0_clean_range(unsigned long start, unsigned long end) @@ -225,7 +225,7 @@ static void l2x0_clean_range(unsigned long start, unsigned long end) return; } - spin_lock_irqsave(&l2x0_lock, flags); + raw_spin_lock_irqsave(&l2x0_lock, flags); start &= ~(CACHE_LINE_SIZE - 1); while (start < end) { unsigned long blk_end = start + min(end - start, 4096UL); @@ -236,13 +236,13 @@ static void l2x0_clean_range(unsigned long start, unsigned long end) } if (blk_end < end) { - spin_unlock_irqrestore(&l2x0_lock, flags); - spin_lock_irqsave(&l2x0_lock, flags); + raw_spin_unlock_irqrestore(&l2x0_lock, flags); + raw_spin_lock_irqsave(&l2x0_lock, flags); } } cache_wait(base + L2X0_CLEAN_LINE_PA, 1); cache_sync(); - spin_unlock_irqrestore(&l2x0_lock, flags); + raw_spin_unlock_irqrestore(&l2x0_lock, flags); } static void l2x0_flush_range(unsigned long start, unsigned long end) @@ -255,7 +255,7 @@ static void l2x0_flush_range(unsigned long start, unsigned long end) return; } - spin_lock_irqsave(&l2x0_lock, flags); + raw_spin_lock_irqsave(&l2x0_lock, flags); start &= ~(CACHE_LINE_SIZE - 1); while (start < end) { unsigned long blk_end = start + min(end - start, 4096UL); @@ -268,24 +268,24 @@ static void l2x0_flush_range(unsigned long start, unsigned long end) debug_writel(0x00); if (blk_end < end) { - spin_unlock_irqrestore(&l2x0_lock, flags); - spin_lock_irqsave(&l2x0_lock, flags); + raw_spin_unlock_irqrestore(&l2x0_lock, flags); + raw_spin_lock_irqsave(&l2x0_lock, flags); } } cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); cache_sync(); - spin_unlock_irqrestore(&l2x0_lock, flags); + raw_spin_unlock_irqrestore(&l2x0_lock, flags); } static void l2x0_disable(void) { unsigned long flags; - spin_lock_irqsave(&l2x0_lock, flags); + raw_spin_lock_irqsave(&l2x0_lock, flags); __l2x0_flush_all(); writel_relaxed(0, l2x0_base + L2X0_CTRL); dsb(); - spin_unlock_irqrestore(&l2x0_lock, flags); + raw_spin_unlock_irqrestore(&l2x0_lock, flags); } static void l2x0_unlock(__u32 cache_id) diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index b0ee9ba3cfab..93aac068da94 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -16,7 +16,7 @@ #include <asm/mmu_context.h> #include <asm/tlbflush.h> -static DEFINE_SPINLOCK(cpu_asid_lock); +static DEFINE_RAW_SPINLOCK(cpu_asid_lock); unsigned int cpu_last_asid = ASID_FIRST_VERSION; #ifdef CONFIG_SMP DEFINE_PER_CPU(struct mm_struct *, current_mm); @@ -31,7 +31,7 @@ DEFINE_PER_CPU(struct mm_struct *, current_mm); void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) { mm->context.id = 0; - spin_lock_init(&mm->context.id_lock); + raw_spin_lock_init(&mm->context.id_lock); } static void flush_context(void) @@ -58,7 +58,7 @@ static void set_mm_context(struct mm_struct *mm, unsigned int asid) * the broadcast. This function is also called via IPI so the * mm->context.id_lock has to be IRQ-safe. */ - spin_lock_irqsave(&mm->context.id_lock, flags); + raw_spin_lock_irqsave(&mm->context.id_lock, flags); if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) { /* * Old version of ASID found. Set the new one and @@ -67,7 +67,7 @@ static void set_mm_context(struct mm_struct *mm, unsigned int asid) mm->context.id = asid; cpumask_clear(mm_cpumask(mm)); } - spin_unlock_irqrestore(&mm->context.id_lock, flags); + raw_spin_unlock_irqrestore(&mm->context.id_lock, flags); /* * Set the mm_cpumask(mm) bit for the current CPU. @@ -117,7 +117,7 @@ void __new_context(struct mm_struct *mm) { unsigned int asid; - spin_lock(&cpu_asid_lock); + raw_spin_lock(&cpu_asid_lock); #ifdef CONFIG_SMP /* * Check the ASID again, in case the change was broadcast from @@ -125,7 +125,7 @@ void __new_context(struct mm_struct *mm) */ if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) { cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); - spin_unlock(&cpu_asid_lock); + raw_spin_unlock(&cpu_asid_lock); return; } #endif @@ -153,5 +153,5 @@ void __new_context(struct mm_struct *mm) } set_mm_context(mm, asid); - spin_unlock(&cpu_asid_lock); + raw_spin_unlock(&cpu_asid_lock); } diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index b8061519ce77..7d0a8c230342 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c @@ -30,7 +30,7 @@ #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ L_PTE_MT_MINICACHE) -static DEFINE_SPINLOCK(minicache_lock); +static DEFINE_RAW_SPINLOCK(minicache_lock); /* * ARMv4 mini-dcache optimised copy_user_highpage @@ -76,14 +76,14 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from, if (!test_and_set_bit(PG_dcache_clean, &from->flags)) __flush_dcache_page(page_mapping(from), from); - spin_lock(&minicache_lock); + raw_spin_lock(&minicache_lock); set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); flush_tlb_kernel_page(0xffff8000); mc_copy_user_page((void *)0xffff8000, kto); - spin_unlock(&minicache_lock); + raw_spin_unlock(&minicache_lock); kunmap_atomic(kto, KM_USER1); } diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 63cca0097130..3d9a1552cef6 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -27,7 +27,7 @@ #define from_address (0xffff8000) #define to_address (0xffffc000) -static DEFINE_SPINLOCK(v6_lock); +static DEFINE_RAW_SPINLOCK(v6_lock); /* * Copy the user page. No aliasing to deal with so we can just @@ -88,7 +88,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to, * Now copy the page using the same cache colour as the * pages ultimate destination. */ - spin_lock(&v6_lock); + raw_spin_lock(&v6_lock); set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0); set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0); @@ -101,7 +101,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to, copy_page((void *)kto, (void *)kfrom); - spin_unlock(&v6_lock); + raw_spin_unlock(&v6_lock); } /* @@ -121,13 +121,13 @@ static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vad * Now clear the page using the same cache colour as * the pages ultimate destination. */ - spin_lock(&v6_lock); + raw_spin_lock(&v6_lock); set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0); flush_tlb_kernel_page(to); clear_page((void *)to); - spin_unlock(&v6_lock); + raw_spin_unlock(&v6_lock); } struct cpu_user_fns v6_user_fns __initdata = { diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index 649bbcd325bf..610c24ced310 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c @@ -32,7 +32,7 @@ #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ L_PTE_MT_MINICACHE) -static DEFINE_SPINLOCK(minicache_lock); +static DEFINE_RAW_SPINLOCK(minicache_lock); /* * XScale mini-dcache optimised copy_user_highpage @@ -98,14 +98,14 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from, if (!test_and_set_bit(PG_dcache_clean, &from->flags)) __flush_dcache_page(page_mapping(from), from); - spin_lock(&minicache_lock); + raw_spin_lock(&minicache_lock); set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); flush_tlb_kernel_page(COPYPAGE_MINICACHE); mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); - spin_unlock(&minicache_lock); + raw_spin_unlock(&minicache_lock); kunmap_atomic(kto, KM_USER1); } diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 01f5987eb1ad..e4e7f6cba1ab 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -120,9 +120,8 @@ static void __dma_free_buffer(struct page *page, size_t size) #ifdef CONFIG_MMU - #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - consistent_base) >> PAGE_SHIFT) -#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - consistent_base) >> PGDIR_SHIFT) +#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - consistent_base) >> PMD_SHIFT) /* * These are the page tables (2MB each) covering uncached, DMA consistent allocations @@ -206,7 +205,7 @@ static int __init consistent_init(void) } consistent_pte[i++] = pte; - base += (1 << PGDIR_SHIFT); + base += PMD_SIZE; } while (base < CONSISTENT_END); return ret; diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index ab506272b2d3..bdb248c4f55c 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -289,6 +289,27 @@ __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) } EXPORT_SYMBOL(__arm_ioremap); +/* + * Remap an arbitrary physical address space into the kernel virtual + * address space as memory. Needed when the kernel wants to execute + * code in external memory. This is needed for reprogramming source + * clocks that would affect normal memory for example. Please see + * CONFIG_GENERIC_ALLOCATOR for allocating external memory. + */ +void __iomem * +__arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached) +{ + unsigned int mtype; + + if (cached) + mtype = MT_MEMORY; + else + mtype = MT_MEMORY_NONCACHED; + + return __arm_ioremap_caller(phys_addr, size, mtype, + __builtin_return_address(0)); +} + void __iounmap(volatile void __iomem *io_addr) { void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 010566799c80..ad7cce3bc431 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -12,8 +12,8 @@ static inline pmd_t *pmd_off_k(unsigned long virt) struct mem_type { pteval_t prot_pte; - unsigned int prot_l1; - unsigned int prot_sect; + pmdval_t prot_l1; + pmdval_t prot_sect; unsigned int domain; }; diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index ea9c9f3e48bf..dc8c550e6cbd 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -60,7 +60,7 @@ EXPORT_SYMBOL(pgprot_kernel); struct cachepolicy { const char policy[16]; unsigned int cr_mask; - unsigned int pmd; + pmdval_t pmd; pteval_t pte; }; @@ -296,7 +296,7 @@ static void __init build_mem_type_table(void) { struct cachepolicy *cp; unsigned int cr = get_cr(); - unsigned int user_pgprot, kern_pgprot, vecs_pgprot; + pteval_t user_pgprot, kern_pgprot, vecs_pgprot; int cpu_arch = cpu_architecture(); int i; @@ -871,14 +871,14 @@ static inline void prepare_page_table(void) /* * Clear out all the mappings below the kernel image. */ - for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE) + for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE) pmd_clear(pmd_off_k(addr)); #ifdef CONFIG_XIP_KERNEL /* The XIP kernel is mapped in the module area -- skip over it */ - addr = ((unsigned long)_etext + PGDIR_SIZE - 1) & PGDIR_MASK; + addr = ((unsigned long)_etext + PMD_SIZE - 1) & PMD_MASK; #endif - for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE) + for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE) pmd_clear(pmd_off_k(addr)); /* @@ -893,10 +893,12 @@ static inline void prepare_page_table(void) * memory bank, up to the end of the vmalloc region. */ for (addr = __phys_to_virt(end); - addr < VMALLOC_END; addr += PGDIR_SIZE) + addr < VMALLOC_END; addr += PMD_SIZE) pmd_clear(pmd_off_k(addr)); } +#define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) + /* * Reserve the special regions of memory */ @@ -906,7 +908,7 @@ void __init arm_mm_memblock_reserve(void) * Reserve the page tables. These are already in use, * and can only be in node 0. */ - memblock_reserve(__pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(pgd_t)); + memblock_reserve(__pa(swapper_pg_dir), SWAPPER_PG_DIR_SIZE); #ifdef CONFIG_SA1111 /* @@ -934,7 +936,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc) */ vectors_page = early_alloc(PAGE_SIZE); - for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) + for (addr = VMALLOC_END; addr; addr += PMD_SIZE) pmd_clear(pmd_off_k(addr)); /* diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 6af366ce0165..2c559ac38142 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -218,7 +218,7 @@ ENDPROC(cpu_v7_set_pte_ext) /* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */ .globl cpu_v7_suspend_size .equ cpu_v7_suspend_size, 4 * 7 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_v7_do_suspend) stmfd sp!, {r4 - r10, lr} mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID |