diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-11-27 11:09:59 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-11-27 11:09:59 -0800 |
commit | 5d8686276a5acc0a3d8055028a6e9d990c9c4fbd (patch) | |
tree | 633652441a0e598950137357c35f9dcf6bfb6b00 /arch | |
parent | 5a44ed0d30dd0681df44eb085c31b371b7fe84fe (diff) | |
parent | 66362c9afc1d4a806280b9e3646d2d0a26cf83e9 (diff) |
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Catalin Marinas:
- Build fix when !CONFIG_UID16 (the patch is touching generic files but
it only affects arm64 builds; submitted by Arnd Bergmann)
- EFI fixes to deal with early_memremap() returning NULL and correctly
mapping run-time regions
- Fix CPUID register extraction of unsigned fields (not to be
sign-extended)
- ASID allocator fix to deal with long-running tasks over multiple
generation roll-overs
- Revert support for marking page ranges as contiguous PTEs (it leads
to TLB conflicts and requires additional non-trivial kernel changes)
- Proper early_alloc() failure check
- Disable KASan for 48-bit VA and 16KB page configuration (the pgd is
larger than the KASan shadow memory)
- Update the fault_info table (original descriptions based on early
engineering spec)
* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
arm64: efi: fix initcall return values
arm64: efi: deal with NULL return value of early_memremap()
arm64: debug: Treat the BRPs/WRPs as unsigned
arm64: cpufeature: Track unsigned fields
arm64: cpufeature: Add helpers for extracting unsigned values
Revert "arm64: Mark kernel page ranges contiguous"
arm64: mm: keep reserved ASIDs in sync with mm after multiple rollovers
arm64: KASAN depends on !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
arm64: efi: correctly map runtime regions
arm64: mm: fix fault_info table xFSC decoding
arm64: fix building without CONFIG_UID16
arm64: early_alloc: Fix check for allocation failure
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm64/Kconfig | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/cpufeature.h | 22 | ||||
-rw-r--r-- | arch/arm64/include/asm/hw_breakpoint.h | 6 | ||||
-rw-r--r-- | arch/arm64/kernel/cpufeature.c | 37 | ||||
-rw-r--r-- | arch/arm64/kernel/efi.c | 33 | ||||
-rw-r--r-- | arch/arm64/mm/context.c | 38 | ||||
-rw-r--r-- | arch/arm64/mm/fault.c | 28 | ||||
-rw-r--r-- | arch/arm64/mm/mmu.c | 77 |
8 files changed, 122 insertions, 121 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index e55848c1edf4..871f21783866 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -49,7 +49,7 @@ config ARM64 select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_BITREVERSE select HAVE_ARCH_JUMP_LABEL - select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP + select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48) select HAVE_ARCH_KGDB select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 52722ee73dba..8f271b83f910 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -47,8 +47,12 @@ enum ftr_type { #define FTR_STRICT true /* SANITY check strict matching required */ #define FTR_NONSTRICT false /* SANITY check ignored */ +#define FTR_SIGNED true /* Value should be treated as signed */ +#define FTR_UNSIGNED false /* Value should be treated as unsigned */ + struct arm64_ftr_bits { - bool strict; /* CPU Sanity check: strict matching required ? */ + bool sign; /* Value is signed ? */ + bool strict; /* CPU Sanity check: strict matching required ? */ enum ftr_type type; u8 shift; u8 width; @@ -124,6 +128,18 @@ cpuid_feature_extract_field(u64 features, int field) return cpuid_feature_extract_field_width(features, field, 4); } +static inline unsigned int __attribute_const__ +cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width) +{ + return (u64)(features << (64 - width - field)) >> (64 - width); +} + +static inline unsigned int __attribute_const__ +cpuid_feature_extract_unsigned_field(u64 features, int field) +{ + return cpuid_feature_extract_unsigned_field_width(features, field, 4); +} + static inline u64 arm64_ftr_mask(struct arm64_ftr_bits *ftrp) { return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift); @@ -131,7 +147,9 @@ static inline u64 arm64_ftr_mask(struct arm64_ftr_bits *ftrp) static inline s64 arm64_ftr_value(struct arm64_ftr_bits *ftrp, u64 val) { - return cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width); + return ftrp->sign ? + cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width) : + cpuid_feature_extract_unsigned_field_width(val, ftrp->shift, ftrp->width); } static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0) diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h index e54415ec6935..9732908bfc8a 100644 --- a/arch/arm64/include/asm/hw_breakpoint.h +++ b/arch/arm64/include/asm/hw_breakpoint.h @@ -138,16 +138,18 @@ extern struct pmu perf_ops_bp; /* Determine number of BRP registers available. */ static inline int get_num_brps(void) { + u64 dfr0 = read_system_reg(SYS_ID_AA64DFR0_EL1); return 1 + - cpuid_feature_extract_field(read_system_reg(SYS_ID_AA64DFR0_EL1), + cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_BRPS_SHIFT); } /* Determine number of WRP registers available. */ static inline int get_num_wrps(void) { + u64 dfr0 = read_system_reg(SYS_ID_AA64DFR0_EL1); return 1 + - cpuid_feature_extract_field(read_system_reg(SYS_ID_AA64DFR0_EL1), + cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_WRPS_SHIFT); } diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index c8cf89223b5a..0669c63281ea 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -44,8 +44,9 @@ unsigned int compat_elf_hwcap2 __read_mostly; DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); -#define ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ +#define __ARM64_FTR_BITS(SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ { \ + .sign = SIGNED, \ .strict = STRICT, \ .type = TYPE, \ .shift = SHIFT, \ @@ -53,6 +54,14 @@ DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); .safe_val = SAFE_VAL, \ } +/* Define a feature with signed values */ +#define ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ + __ARM64_FTR_BITS(FTR_SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) + +/* Define a feature with unsigned value */ +#define U_ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ + __ARM64_FTR_BITS(FTR_UNSIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) + #define ARM64_FTR_END \ { \ .width = 0, \ @@ -99,7 +108,7 @@ static struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { * Differing PARange is fine as long as all peripherals and memory are mapped * within the minimum PARange of all CPUs */ - ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0), + U_ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0), ARM64_FTR_END, }; @@ -115,18 +124,18 @@ static struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { }; static struct arm64_ftr_bits ftr_ctr[] = { - ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */ + U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */ ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0), - ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */ - ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */ - ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */ + U_ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */ + U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */ + U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */ /* * Linux can handle differing I-cache policies. Userspace JITs will * make use of *minLine */ - ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, 0), /* L1Ip */ + U_ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, 0), /* L1Ip */ ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 10, 0), /* RAZ */ - ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */ + U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */ ARM64_FTR_END, }; @@ -144,12 +153,12 @@ static struct arm64_ftr_bits ftr_id_mmfr0[] = { static struct arm64_ftr_bits ftr_id_aa64dfr0[] = { ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), - ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6), + U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0), + U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0), + U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0), + U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0), + U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0), + U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6), ARM64_FTR_END, }; diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index fc5508e0df57..4eeb17198cfa 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -127,7 +127,11 @@ static int __init uefi_init(void) table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables; config_tables = early_memremap(efi_to_phys(efi.systab->tables), table_size); - + if (config_tables == NULL) { + pr_warn("Unable to map EFI config table array.\n"); + retval = -ENOMEM; + goto out; + } retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables, sizeof(efi_config_table_64_t), NULL); @@ -209,6 +213,14 @@ void __init efi_init(void) PAGE_ALIGN(params.mmap_size + (params.mmap & ~PAGE_MASK))); memmap.phys_map = params.mmap; memmap.map = early_memremap(params.mmap, params.mmap_size); + if (memmap.map == NULL) { + /* + * If we are booting via UEFI, the UEFI memory map is the only + * description of memory we have, so there is little point in + * proceeding if we cannot access it. + */ + panic("Unable to map EFI memory map.\n"); + } memmap.map_end = memmap.map + params.mmap_size; memmap.desc_size = params.desc_size; memmap.desc_version = params.desc_ver; @@ -227,7 +239,6 @@ static bool __init efi_virtmap_init(void) init_new_context(NULL, &efi_mm); for_each_efi_memory_desc(&memmap, md) { - u64 paddr, npages, size; pgprot_t prot; if (!(md->attribute & EFI_MEMORY_RUNTIME)) @@ -235,11 +246,6 @@ static bool __init efi_virtmap_init(void) if (md->virt_addr == 0) return false; - paddr = md->phys_addr; - npages = md->num_pages; - memrange_efi_to_native(&paddr, &npages); - size = npages << PAGE_SHIFT; - pr_info(" EFI remap 0x%016llx => %p\n", md->phys_addr, (void *)md->virt_addr); @@ -256,7 +262,8 @@ static bool __init efi_virtmap_init(void) else prot = PAGE_KERNEL; - create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, + create_pgd_mapping(&efi_mm, md->phys_addr, md->virt_addr, + md->num_pages << EFI_PAGE_SHIFT, __pgprot(pgprot_val(prot) | PTE_NG)); } return true; @@ -273,12 +280,12 @@ static int __init arm64_enable_runtime_services(void) if (!efi_enabled(EFI_BOOT)) { pr_info("EFI services will not be available.\n"); - return -1; + return 0; } if (efi_runtime_disabled()) { pr_info("EFI runtime services will be disabled.\n"); - return -1; + return 0; } pr_info("Remapping and enabling EFI services.\n"); @@ -288,7 +295,7 @@ static int __init arm64_enable_runtime_services(void) mapsize); if (!memmap.map) { pr_err("Failed to remap EFI memory map\n"); - return -1; + return -ENOMEM; } memmap.map_end = memmap.map + mapsize; efi.memmap = &memmap; @@ -297,13 +304,13 @@ static int __init arm64_enable_runtime_services(void) sizeof(efi_system_table_t)); if (!efi.systab) { pr_err("Failed to remap EFI System Table\n"); - return -1; + return -ENOMEM; } set_bit(EFI_SYSTEM_TABLES, &efi.flags); if (!efi_virtmap_init()) { pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n"); - return -1; + return -ENOMEM; } /* Set up runtime services function pointers */ diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index f636a2639f03..e87f53ff5f58 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -76,13 +76,28 @@ static void flush_context(unsigned int cpu) __flush_icache_all(); } -static int is_reserved_asid(u64 asid) +static bool check_update_reserved_asid(u64 asid, u64 newasid) { int cpu; - for_each_possible_cpu(cpu) - if (per_cpu(reserved_asids, cpu) == asid) - return 1; - return 0; + bool hit = false; + + /* + * Iterate over the set of reserved ASIDs looking for a match. + * If we find one, then we can update our mm to use newasid + * (i.e. the same ASID in the current generation) but we can't + * exit the loop early, since we need to ensure that all copies + * of the old ASID are updated to reflect the mm. Failure to do + * so could result in us missing the reserved ASID in a future + * generation. + */ + for_each_possible_cpu(cpu) { + if (per_cpu(reserved_asids, cpu) == asid) { + hit = true; + per_cpu(reserved_asids, cpu) = newasid; + } + } + + return hit; } static u64 new_context(struct mm_struct *mm, unsigned int cpu) @@ -92,12 +107,14 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) u64 generation = atomic64_read(&asid_generation); if (asid != 0) { + u64 newasid = generation | (asid & ~ASID_MASK); + /* * If our current ASID was active during a rollover, we * can continue to use it and this was just a false alarm. */ - if (is_reserved_asid(asid)) - return generation | (asid & ~ASID_MASK); + if (check_update_reserved_asid(asid, newasid)) + return newasid; /* * We had a valid ASID in a previous life, so try to re-use @@ -105,7 +122,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) */ asid &= ~ASID_MASK; if (!__test_and_set_bit(asid, asid_map)) - goto bump_gen; + return newasid; } /* @@ -129,10 +146,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) set_asid: __set_bit(asid, asid_map); cur_idx = asid; - -bump_gen: - asid |= generation; - return asid; + return asid | generation; } void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 19211c4a8911..92ddac1e8ca2 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -393,16 +393,16 @@ static struct fault_info { { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" }, { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, - { do_bad, SIGBUS, 0, "reserved access flag fault" }, + { do_bad, SIGBUS, 0, "unknown 8" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, - { do_bad, SIGBUS, 0, "reserved permission fault" }, + { do_bad, SIGBUS, 0, "unknown 12" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" }, { do_bad, SIGBUS, 0, "synchronous external abort" }, - { do_bad, SIGBUS, 0, "asynchronous external abort" }, + { do_bad, SIGBUS, 0, "unknown 17" }, { do_bad, SIGBUS, 0, "unknown 18" }, { do_bad, SIGBUS, 0, "unknown 19" }, { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, @@ -410,16 +410,16 @@ static struct fault_info { { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, { do_bad, SIGBUS, 0, "synchronous parity error" }, - { do_bad, SIGBUS, 0, "asynchronous parity error" }, + { do_bad, SIGBUS, 0, "unknown 25" }, { do_bad, SIGBUS, 0, "unknown 26" }, { do_bad, SIGBUS, 0, "unknown 27" }, - { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, - { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, - { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, - { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, + { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" }, + { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" }, + { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" }, + { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" }, { do_bad, SIGBUS, 0, "unknown 32" }, { do_bad, SIGBUS, BUS_ADRALN, "alignment fault" }, - { do_bad, SIGBUS, 0, "debug event" }, + { do_bad, SIGBUS, 0, "unknown 34" }, { do_bad, SIGBUS, 0, "unknown 35" }, { do_bad, SIGBUS, 0, "unknown 36" }, { do_bad, SIGBUS, 0, "unknown 37" }, @@ -433,21 +433,21 @@ static struct fault_info { { do_bad, SIGBUS, 0, "unknown 45" }, { do_bad, SIGBUS, 0, "unknown 46" }, { do_bad, SIGBUS, 0, "unknown 47" }, - { do_bad, SIGBUS, 0, "unknown 48" }, + { do_bad, SIGBUS, 0, "TLB conflict abort" }, { do_bad, SIGBUS, 0, "unknown 49" }, { do_bad, SIGBUS, 0, "unknown 50" }, { do_bad, SIGBUS, 0, "unknown 51" }, { do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" }, - { do_bad, SIGBUS, 0, "unknown 53" }, + { do_bad, SIGBUS, 0, "implementation fault (unsupported exclusive)" }, { do_bad, SIGBUS, 0, "unknown 54" }, { do_bad, SIGBUS, 0, "unknown 55" }, { do_bad, SIGBUS, 0, "unknown 56" }, { do_bad, SIGBUS, 0, "unknown 57" }, - { do_bad, SIGBUS, 0, "implementation fault (coprocessor abort)" }, + { do_bad, SIGBUS, 0, "unknown 58" }, { do_bad, SIGBUS, 0, "unknown 59" }, { do_bad, SIGBUS, 0, "unknown 60" }, - { do_bad, SIGBUS, 0, "unknown 61" }, - { do_bad, SIGBUS, 0, "unknown 62" }, + { do_bad, SIGBUS, 0, "section domain fault" }, + { do_bad, SIGBUS, 0, "page domain fault" }, { do_bad, SIGBUS, 0, "unknown 63" }, }; diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index abb66f84d4ac..873e363048c6 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -64,8 +64,12 @@ EXPORT_SYMBOL(phys_mem_access_prot); static void __init *early_alloc(unsigned long sz) { - void *ptr = __va(memblock_alloc(sz, sz)); - BUG_ON(!ptr); + phys_addr_t phys; + void *ptr; + + phys = memblock_alloc(sz, sz); + BUG_ON(!phys); + ptr = __va(phys); memset(ptr, 0, sz); return ptr; } @@ -81,55 +85,19 @@ static void split_pmd(pmd_t *pmd, pte_t *pte) do { /* * Need to have the least restrictive permissions available - * permissions will be fixed up later. Default the new page - * range as contiguous ptes. + * permissions will be fixed up later */ - set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC_CONT)); + set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC)); pfn++; } while (pte++, i++, i < PTRS_PER_PTE); } -/* - * Given a PTE with the CONT bit set, determine where the CONT range - * starts, and clear the entire range of PTE CONT bits. - */ -static void clear_cont_pte_range(pte_t *pte, unsigned long addr) -{ - int i; - - pte -= CONT_RANGE_OFFSET(addr); - for (i = 0; i < CONT_PTES; i++) { - set_pte(pte, pte_mknoncont(*pte)); - pte++; - } - flush_tlb_all(); -} - -/* - * Given a range of PTEs set the pfn and provided page protection flags - */ -static void __populate_init_pte(pte_t *pte, unsigned long addr, - unsigned long end, phys_addr_t phys, - pgprot_t prot) -{ - unsigned long pfn = __phys_to_pfn(phys); - - do { - /* clear all the bits except the pfn, then apply the prot */ - set_pte(pte, pfn_pte(pfn, prot)); - pte++; - pfn++; - addr += PAGE_SIZE; - } while (addr != end); -} - static void alloc_init_pte(pmd_t *pmd, unsigned long addr, - unsigned long end, phys_addr_t phys, + unsigned long end, unsigned long pfn, pgprot_t prot, void *(*alloc)(unsigned long size)) { pte_t *pte; - unsigned long next; if (pmd_none(*pmd) || pmd_sect(*pmd)) { pte = alloc(PTRS_PER_PTE * sizeof(pte_t)); @@ -142,27 +110,9 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr, pte = pte_offset_kernel(pmd, addr); do { - next = min(end, (addr + CONT_SIZE) & CONT_MASK); - if (((addr | next | phys) & ~CONT_MASK) == 0) { - /* a block of CONT_PTES */ - __populate_init_pte(pte, addr, next, phys, - __pgprot(pgprot_val(prot) | PTE_CONT)); - } else { - /* - * If the range being split is already inside of a - * contiguous range but this PTE isn't going to be - * contiguous, then we want to unmark the adjacent - * ranges, then update the portion of the range we - * are interrested in. - */ - clear_cont_pte_range(pte, addr); - __populate_init_pte(pte, addr, next, phys, prot); - } - - pte += (next - addr) >> PAGE_SHIFT; - phys += next - addr; - addr = next; - } while (addr != end); + set_pte(pte, pfn_pte(pfn, prot)); + pfn++; + } while (pte++, addr += PAGE_SIZE, addr != end); } static void split_pud(pud_t *old_pud, pmd_t *pmd) @@ -223,7 +173,8 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud, } } } else { - alloc_init_pte(pmd, addr, next, phys, prot, alloc); + alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys), + prot, alloc); } phys += next - addr; } while (pmd++, addr = next, addr != end); |