From 5a79859ae0f35d25c67a03e82bf0c80592f16a39 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 12 Feb 2015 13:08:27 +0100 Subject: s390: remove 31 bit support Remove the 31 bit support in order to reduce maintenance cost and effectively remove dead code. Since a couple of years there is no distribution left that comes with a 31 bit kernel. The 31 bit kernel also has been broken since more than a year before anybody noticed. In addition I added a removal warning to the kernel shown at ipl for 5 minutes: a960062e5826 ("s390: add 31 bit warning message") which let everybody know about the plan to remove 31 bit code. We didn't get any response. Given that the last 31 bit only machine was introduced in 1999 let's remove the code. Anybody with 31 bit user space code can still use the compat mode. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/mm/dump_pagetables.c | 24 ++---------------------- arch/s390/mm/extmem.c | 14 -------------- arch/s390/mm/fault.c | 36 ------------------------------------ arch/s390/mm/gup.c | 4 ---- arch/s390/mm/init.c | 5 ----- arch/s390/mm/mem_detect.c | 4 ---- arch/s390/mm/mmap.c | 25 ------------------------- arch/s390/mm/pageattr.c | 2 +- arch/s390/mm/pgtable.c | 8 -------- arch/s390/mm/vmem.c | 10 ++-------- 10 files changed, 5 insertions(+), 127 deletions(-) (limited to 'arch/s390/mm') diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c index d46cadeda204..8556d6be9b54 100644 --- a/arch/s390/mm/dump_pagetables.c +++ b/arch/s390/mm/dump_pagetables.c @@ -18,9 +18,7 @@ enum address_markers_idx { KERNEL_END_NR, VMEMMAP_NR, VMALLOC_NR, -#ifdef CONFIG_64BIT MODULES_NR, -#endif }; static struct addr_marker address_markers[] = { @@ -29,9 +27,7 @@ static struct addr_marker address_markers[] = { [KERNEL_END_NR] = {(unsigned long)&_end, "Kernel Image End"}, [VMEMMAP_NR] = {0, "vmemmap Area"}, [VMALLOC_NR] = {0, "vmalloc Area"}, -#ifdef CONFIG_64BIT [MODULES_NR] = {0, "Modules Area"}, -#endif { -1, NULL } }; @@ -127,12 +123,6 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st, } } -#ifdef CONFIG_64BIT -#define _PMD_PROT_MASK _SEGMENT_ENTRY_PROTECT -#else -#define _PMD_PROT_MASK 0 -#endif - static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t *pud, unsigned long addr) { @@ -145,7 +135,7 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pmd = pmd_offset(pud, addr); if (!pmd_none(*pmd)) { if (pmd_large(*pmd)) { - prot = pmd_val(*pmd) & _PMD_PROT_MASK; + prot = pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT; note_page(m, st, prot, 3); } else walk_pte_level(m, st, pmd, addr); @@ -155,12 +145,6 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st, } } -#ifdef CONFIG_64BIT -#define _PUD_PROT_MASK _REGION3_ENTRY_RO -#else -#define _PUD_PROT_MASK 0 -#endif - static void walk_pud_level(struct seq_file *m, struct pg_state *st, pgd_t *pgd, unsigned long addr) { @@ -173,7 +157,7 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st, pud = pud_offset(pgd, addr); if (!pud_none(*pud)) if (pud_large(*pud)) { - prot = pud_val(*pud) & _PUD_PROT_MASK; + prot = pud_val(*pud) & _REGION3_ENTRY_RO; note_page(m, st, prot, 2); } else walk_pmd_level(m, st, pud, addr); @@ -230,13 +214,9 @@ static int pt_dump_init(void) * kernel ASCE. We need this to keep the page table walker functions * from accessing non-existent entries. */ -#ifdef CONFIG_32BIT - max_addr = 1UL << 31; -#else max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2; max_addr = 1UL << (max_addr * 11 + 31); address_markers[MODULES_NR].start_address = MODULES_VADDR; -#endif address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap; address_markers[VMALLOC_NR].start_address = VMALLOC_START; debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops); diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c index 519bba716cc3..23c496957c22 100644 --- a/arch/s390/mm/extmem.c +++ b/arch/s390/mm/extmem.c @@ -51,7 +51,6 @@ struct qout64 { struct qrange range[6]; }; -#ifdef CONFIG_64BIT struct qrange_old { unsigned int start; /* last byte type */ unsigned int end; /* last byte reserved */ @@ -65,7 +64,6 @@ struct qout64_old { int segrcnt; struct qrange_old range[6]; }; -#endif struct qin64 { char qopcode; @@ -103,7 +101,6 @@ static int scode_set; static int dcss_set_subcodes(void) { -#ifdef CONFIG_64BIT char *name = kmalloc(8 * sizeof(char), GFP_KERNEL | GFP_DMA); unsigned long rx, ry; int rc; @@ -135,7 +132,6 @@ dcss_set_subcodes(void) segext_scode = DCSS_SEGEXTX; return 0; } -#endif /* Diag x'64' new subcodes are not supported, set to old subcodes */ loadshr_scode = DCSS_LOADNOLY; loadnsr_scode = DCSS_LOADNSR; @@ -208,7 +204,6 @@ dcss_diag(int *func, void *parameter, rx = (unsigned long) parameter; ry = (unsigned long) *func; -#ifdef CONFIG_64BIT /* 64-bit Diag x'64' new subcode, keep in 64-bit addressing mode */ if (*func > DCSS_SEGEXT) asm volatile( @@ -225,13 +220,6 @@ dcss_diag(int *func, void *parameter, " ipm %2\n" " srl %2,28\n" : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc"); -#else - asm volatile( - " diag %0,%1,0x64\n" - " ipm %2\n" - " srl %2,28\n" - : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc"); -#endif *ret1 = rx; *ret2 = ry; return rc; @@ -281,7 +269,6 @@ query_segment_type (struct dcss_segment *seg) goto out_free; } -#ifdef CONFIG_64BIT /* Only old format of output area of Diagnose x'64' is supported, copy data for the new format. */ if (segext_scode == DCSS_SEGEXT) { @@ -307,7 +294,6 @@ query_segment_type (struct dcss_segment *seg) } kfree(qout_old); } -#endif if (qout->segcnt > 6) { rc = -EOPNOTSUPP; goto out_free; diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 3ff86533f7db..76515bcea2f1 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -36,15 +36,9 @@ #include #include "../kernel/entry.h" -#ifndef CONFIG_64BIT -#define __FAIL_ADDR_MASK 0x7ffff000 -#define __SUBCODE_MASK 0x0200 -#define __PF_RES_FIELD 0ULL -#else /* CONFIG_64BIT */ #define __FAIL_ADDR_MASK -4096L #define __SUBCODE_MASK 0x0600 #define __PF_RES_FIELD 0x8000000000000000ULL -#endif /* CONFIG_64BIT */ #define VM_FAULT_BADCONTEXT 0x010000 #define VM_FAULT_BADMAP 0x020000 @@ -54,7 +48,6 @@ static unsigned long store_indication __read_mostly; -#ifdef CONFIG_64BIT static int __init fault_init(void) { if (test_facility(75)) @@ -62,7 +55,6 @@ static int __init fault_init(void) return 0; } early_initcall(fault_init); -#endif static inline int notify_page_fault(struct pt_regs *regs) { @@ -133,7 +125,6 @@ static int bad_address(void *p) return probe_kernel_address((unsigned long *)p, dummy); } -#ifdef CONFIG_64BIT static void dump_pagetable(unsigned long asce, unsigned long address) { unsigned long *table = __va(asce & PAGE_MASK); @@ -187,33 +178,6 @@ bad: pr_cont("BAD\n"); } -#else /* CONFIG_64BIT */ - -static void dump_pagetable(unsigned long asce, unsigned long address) -{ - unsigned long *table = __va(asce & PAGE_MASK); - - pr_alert("AS:%08lx ", asce); - table = table + ((address >> 20) & 0x7ff); - if (bad_address(table)) - goto bad; - pr_cont("S:%08lx ", *table); - if (*table & _SEGMENT_ENTRY_INVALID) - goto out; - table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); - table = table + ((address >> 12) & 0xff); - if (bad_address(table)) - goto bad; - pr_cont("P:%08lx ", *table); -out: - pr_cont("\n"); - return; -bad: - pr_cont("BAD\n"); -} - -#endif /* CONFIG_64BIT */ - static void dump_fault_info(struct pt_regs *regs) { unsigned long asce; diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c index 5c586c78ca8d..1eb41bb3010c 100644 --- a/arch/s390/mm/gup.c +++ b/arch/s390/mm/gup.c @@ -106,11 +106,9 @@ static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, pmd_t *pmdp, pmd; pmdp = (pmd_t *) pudp; -#ifdef CONFIG_64BIT if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) pmdp = (pmd_t *) pud_deref(pud); pmdp += pmd_index(addr); -#endif do { pmd = *pmdp; barrier(); @@ -145,11 +143,9 @@ static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, pud_t *pudp, pud; pudp = (pud_t *) pgdp; -#ifdef CONFIG_64BIT if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) pudp = (pud_t *) pgd_deref(pgd); pudp += pud_index(addr); -#endif do { pud = *pudp; barrier(); diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index d35b15113b17..80875c43a4a4 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -105,7 +105,6 @@ void __init paging_init(void) unsigned long pgd_type, asce_bits; init_mm.pgd = swapper_pg_dir; -#ifdef CONFIG_64BIT if (VMALLOC_END > (1UL << 42)) { asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH; pgd_type = _REGION2_ENTRY_EMPTY; @@ -113,10 +112,6 @@ void __init paging_init(void) asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; pgd_type = _REGION3_ENTRY_EMPTY; } -#else - asce_bits = _ASCE_TABLE_LENGTH; - pgd_type = _SEGMENT_ENTRY_EMPTY; -#endif S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits; clear_table((unsigned long *) init_mm.pgd, pgd_type, sizeof(unsigned long)*2048); diff --git a/arch/s390/mm/mem_detect.c b/arch/s390/mm/mem_detect.c index 5535cfe0ee11..0f3604395805 100644 --- a/arch/s390/mm/mem_detect.c +++ b/arch/s390/mm/mem_detect.c @@ -36,10 +36,6 @@ void __init detect_memory_memblock(void) memsize = rzm * rnmax; if (!rzm) rzm = 1ULL << 17; - if (IS_ENABLED(CONFIG_32BIT)) { - rzm = min(ADDR2G, rzm); - memsize = min(ADDR2G, memsize); - } max_physmem_end = memsize; addr = 0; /* keep memblock lists close to the kernel */ diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index 179a2c20b01f..2e8378796e87 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -190,29 +190,6 @@ unsigned long randomize_et_dyn(void) return base + mmap_rnd(); } -#ifndef CONFIG_64BIT - -/* - * This function, called very early during the creation of a new - * process VM image, sets up which VM layout function to use: - */ -void arch_pick_mmap_layout(struct mm_struct *mm) -{ - /* - * Fall back to the standard layout if the personality - * bit is set, or if the expected stack growth is unlimited: - */ - if (mmap_is_legacy()) { - mm->mmap_base = mmap_base_legacy(); - mm->get_unmapped_area = arch_get_unmapped_area; - } else { - mm->mmap_base = mmap_base(); - mm->get_unmapped_area = arch_get_unmapped_area_topdown; - } -} - -#else - int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) { if (is_compat_task() || (TASK_SIZE >= (1UL << 53))) @@ -317,5 +294,3 @@ static int __init setup_mmap_rnd(void) return 0; } early_initcall(setup_mmap_rnd); - -#endif diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c index 426c9d462d1c..749c98407b41 100644 --- a/arch/s390/mm/pageattr.c +++ b/arch/s390/mm/pageattr.c @@ -109,7 +109,7 @@ static void ipte_range(pte_t *pte, unsigned long address, int nr) { int i; - if (test_facility(13) && IS_ENABLED(CONFIG_64BIT)) { + if (test_facility(13)) { __ptep_ipte_range(address, nr - 1, pte); return; } diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index b2c1542f2ba2..33f589459113 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -27,14 +27,8 @@ #include #include -#ifndef CONFIG_64BIT -#define ALLOC_ORDER 1 -#define FRAG_MASK 0x0f -#else #define ALLOC_ORDER 2 #define FRAG_MASK 0x03 -#endif - unsigned long *crst_table_alloc(struct mm_struct *mm) { @@ -50,7 +44,6 @@ void crst_table_free(struct mm_struct *mm, unsigned long *table) free_pages((unsigned long) table, ALLOC_ORDER); } -#ifdef CONFIG_64BIT static void __crst_table_upgrade(void *arg) { struct mm_struct *mm = arg; @@ -140,7 +133,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) if (current->active_mm == mm) set_user_asce(mm); } -#endif #ifdef CONFIG_PGSTE diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index b1593c2f751a..ef7d6c8fea66 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -38,12 +38,10 @@ static inline pud_t *vmem_pud_alloc(void) { pud_t *pud = NULL; -#ifdef CONFIG_64BIT pud = vmem_alloc_pages(2); if (!pud) return NULL; clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4); -#endif return pud; } @@ -51,12 +49,10 @@ static inline pmd_t *vmem_pmd_alloc(void) { pmd_t *pmd = NULL; -#ifdef CONFIG_64BIT pmd = vmem_alloc_pages(2); if (!pmd) return NULL; clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4); -#endif return pmd; } @@ -98,7 +94,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) pgd_populate(&init_mm, pg_dir, pu_dir); } pu_dir = pud_offset(pg_dir, address); -#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC) +#ifndef CONFIG_DEBUG_PAGEALLOC if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address && !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) { pud_val(*pu_dir) = __pa(address) | @@ -115,7 +111,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) pud_populate(&init_mm, pu_dir, pm_dir); } pm_dir = pmd_offset(pu_dir, address); -#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC) +#ifndef CONFIG_DEBUG_PAGEALLOC if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address && !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) { pmd_val(*pm_dir) = __pa(address) | @@ -222,7 +218,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) pm_dir = pmd_offset(pu_dir, address); if (pmd_none(*pm_dir)) { -#ifdef CONFIG_64BIT /* Use 1MB frames for vmemmap if available. We always * use large frames even if they are only partially * used. @@ -240,7 +235,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) address = (address + PMD_SIZE) & PMD_MASK; continue; } -#endif pt_dir = vmem_pte_alloc(address); if (!pt_dir) goto out; -- cgit v1.2.3 From 8a5d8473dd7e2b0bc2864e34bd6836b520589fa1 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 13 Mar 2015 12:55:56 +0100 Subject: s390/maccess: remove potentially broken probe_kernel_write() Remove the s390 architecture implementation of probe_kernel_write() and instead use a new function s390_kernel_write() to modify kernel text and data everywhere. The s390 implementation of probe_kernel_write() was potentially broken since it modified memory in a read-modify-write fashion, which read four bytes, modified the requested bytes within those four bytes and wrote the result back. If two cpus would modify the same four byte area at different locations within that area, this could lead to corruption. Right now the only places which called probe_kernel_write() did run within stop_machine_run. Therefore the scenario can't happen right now, however that might change at any time. To fix this rename probe_kernel_write() to s390_kernel_write() which can have special semantics, like only call it while running within stop_machine(). Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/uaccess.h | 1 + arch/s390/kernel/ftrace.c | 12 ++++++------ arch/s390/kernel/jump_label.c | 2 +- arch/s390/kernel/kprobes.c | 2 +- arch/s390/mm/maccess.c | 29 +++++++++++++++++++---------- 5 files changed, 28 insertions(+), 18 deletions(-) (limited to 'arch/s390/mm') diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index cd4c68e0398d..d64a7a62164f 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -372,5 +372,6 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo } int copy_to_user_real(void __user *dest, void *src, unsigned long count); +void s390_kernel_write(void *dst, const void *src, size_t size); #endif /* __S390_UACCESS_H */ diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index 6c79f1b44fe7..e0eaf11134b4 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c @@ -130,8 +130,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, /* Verify that the to be replaced code matches what we expect. */ if (memcmp(&orig, &old, sizeof(old))) return -EINVAL; - if (probe_kernel_write((void *) rec->ip, &new, sizeof(new))) - return -EPERM; + s390_kernel_write((void *) rec->ip, &new, sizeof(new)); return 0; } @@ -159,8 +158,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) /* Verify that the to be replaced code matches what we expect. */ if (memcmp(&orig, &old, sizeof(old))) return -EINVAL; - if (probe_kernel_write((void *) rec->ip, &new, sizeof(new))) - return -EPERM; + s390_kernel_write((void *) rec->ip, &new, sizeof(new)); return 0; } @@ -231,14 +229,16 @@ int ftrace_enable_ftrace_graph_caller(void) { u8 op = 0x04; /* set mask field to zero */ - return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op)); + s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op)); + return 0; } int ftrace_disable_ftrace_graph_caller(void) { u8 op = 0xf4; /* set mask field to all ones */ - return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op)); + s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op)); + return 0; } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c index 830066f936c8..a90299600483 100644 --- a/arch/s390/kernel/jump_label.c +++ b/arch/s390/kernel/jump_label.c @@ -78,7 +78,7 @@ static void __jump_label_transform(struct jump_entry *entry, if (memcmp((void *)entry->code, &old, sizeof(old))) jump_label_bug(entry, &old, &new); } - probe_kernel_write((void *)entry->code, &new, sizeof(new)); + s390_kernel_write((void *)entry->code, &new, sizeof(new)); } static int __sm_arch_jump_label_transform(void *data) diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index f516edc1fbe3..389db56a2208 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -178,7 +178,7 @@ static int swap_instruction(void *data) } skip_ftrace: kcb->kprobe_status = KPROBE_SWAP_INST; - probe_kernel_write(p->addr, &new_insn, len); + s390_kernel_write(p->addr, &new_insn, len); kcb->kprobe_status = status; return 0; } diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c index 2eb34bdfc613..fb737e9e0683 100644 --- a/arch/s390/mm/maccess.c +++ b/arch/s390/mm/maccess.c @@ -16,13 +16,7 @@ #include #include -/* - * This function writes to kernel memory bypassing DAT and possible - * write protection. It copies one to four bytes from src to dst - * using the stura instruction. - * Returns the number of bytes copied or -EFAULT. - */ -static long probe_kernel_write_odd(void *dst, const void *src, size_t size) +static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size) { unsigned long count, aligned; int offset, mask; @@ -48,19 +42,34 @@ static long probe_kernel_write_odd(void *dst, const void *src, size_t size) return rc ? rc : count; } -long probe_kernel_write(void *dst, const void *src, size_t size) +/* + * s390_kernel_write - write to kernel memory bypassing DAT + * @dst: destination address + * @src: source address + * @size: number of bytes to copy + * + * This function writes to kernel memory bypassing DAT and possible page table + * write protection. It writes to the destination using the sturg instruction. + * Therefore we have a read-modify-write sequence: the function reads four + * bytes from destination at a four byte boundary, modifies the bytes + * requested and writes the result back in a loop. + * + * Note: this means that this function may not be called concurrently on + * several cpus with overlapping words, since this may potentially + * cause data corruption. + */ +void notrace s390_kernel_write(void *dst, const void *src, size_t size) { long copied = 0; while (size) { - copied = probe_kernel_write_odd(dst, src, size); + copied = s390_kernel_write_odd(dst, src, size); if (copied < 0) break; dst += copied; src += copied; size -= copied; } - return copied < 0 ? -EFAULT : 0; } static int __memcpy_real(void *dest, void *src, size_t count) -- cgit v1.2.3 From 3c1a3bcea945f9d59ab1fe3d319c67c0ff56100f Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 13 Mar 2015 13:13:36 +0100 Subject: s390/maccess: improve s390_kernel_write() Use the sturg instruction instead of the stura instruction. This allows to modify up to eight bytes in a row instead of only four. For function tracer enabling and disabling this reduces the time needed to modify the text sections by 50%, since for each mcount call site six bytes need to be changed. Also remove the EXTABLE entries, since calls to this function are not supposed to fail. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/mm/maccess.c | 45 ++++++++++++++++++++------------------------- 1 file changed, 20 insertions(+), 25 deletions(-) (limited to 'arch/s390/mm') diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c index fb737e9e0683..8a993a53fcd6 100644 --- a/arch/s390/mm/maccess.c +++ b/arch/s390/mm/maccess.c @@ -1,7 +1,7 @@ /* * Access kernel memory without faulting -- s390 specific implementation. * - * Copyright IBM Corp. 2009 + * Copyright IBM Corp. 2009, 2015 * * Author(s): Heiko Carstens , * @@ -18,28 +18,25 @@ static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size) { - unsigned long count, aligned; - int offset, mask; - int rc = -EFAULT; + unsigned long aligned, offset, count; + char tmp[8]; - aligned = (unsigned long) dst & ~3UL; - offset = (unsigned long) dst & 3; - count = min_t(unsigned long, 4 - offset, size); - mask = (0xf << (4 - count)) & 0xf; - mask >>= offset; + aligned = (unsigned long) dst & ~7UL; + offset = (unsigned long) dst & 7UL; + size = min(8UL - offset, size); + count = size - 1; asm volatile( " bras 1,0f\n" - " icm 0,0,0(%3)\n" - "0: l 0,0(%1)\n" - " lra %1,0(%1)\n" - "1: ex %2,0(1)\n" - "2: stura 0,%1\n" - " la %0,0\n" - "3:\n" - EX_TABLE(0b,3b) EX_TABLE(1b,3b) EX_TABLE(2b,3b) - : "+d" (rc), "+a" (aligned) - : "a" (mask), "a" (src) : "cc", "memory", "0", "1"); - return rc ? rc : count; + " mvc 0(1,%4),0(%5)\n" + "0: mvc 0(8,%3),0(%0)\n" + " ex %1,0(1)\n" + " lg %1,0(%3)\n" + " lra %0,0(%0)\n" + " sturg %1,%0\n" + : "+&a" (aligned), "+&a" (count), "=m" (tmp) + : "a" (&tmp), "a" (&tmp[offset]), "a" (src) + : "cc", "memory", "1"); + return size; } /* @@ -50,8 +47,8 @@ static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t siz * * This function writes to kernel memory bypassing DAT and possible page table * write protection. It writes to the destination using the sturg instruction. - * Therefore we have a read-modify-write sequence: the function reads four - * bytes from destination at a four byte boundary, modifies the bytes + * Therefore we have a read-modify-write sequence: the function reads eight + * bytes from destination at an eight byte boundary, modifies the bytes * requested and writes the result back in a loop. * * Note: this means that this function may not be called concurrently on @@ -60,12 +57,10 @@ static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t siz */ void notrace s390_kernel_write(void *dst, const void *src, size_t size) { - long copied = 0; + long copied; while (size) { copied = s390_kernel_write_odd(dst, src, size); - if (copied < 0) - break; dst += copied; src += copied; size -= copied; -- cgit v1.2.3 From 3ddb1b7578040ef114747e30f277cfea6286c5da Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 16 Mar 2015 12:44:10 +0100 Subject: s390: make couple of functions and variables static As reported by sparse these can and should be static. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/topology.c | 2 +- arch/s390/mm/mmap.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/s390/mm') diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 14da43b801d9..5728c5bd44a8 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -421,7 +421,7 @@ int topology_cpu_init(struct cpu *cpu) return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group); } -const struct cpumask *cpu_thread_mask(int cpu) +static const struct cpumask *cpu_thread_mask(int cpu) { return &per_cpu(cpu_topology, cpu).thread_mask; } diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index 2e8378796e87..b68af0564a42 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -32,7 +32,7 @@ #include unsigned long mmap_rnd_mask; -unsigned long mmap_align_mask; +static unsigned long mmap_align_mask; static unsigned long stack_maxrandom_size(void) { -- cgit v1.2.3