From d0679c730395d0bde9a46939e7ba255b4ba7dd7c Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Tue, 2 Feb 2010 14:40:02 -0800 Subject: kbuild: move -fno-dwarf2-cfi-asm to powerpc only Better dwarf2 unwind information is a good thing, it allows better debugging with kgdb and crash and helps systemtap. Commit 003086497f07f7f1e67c0c295e261740f822b377 ("Build with -fno-dwarf2-cfi-asm") disabled some CFI information globally to work around a module loader bug on powerpc. But this disables the better unwind tables for all architectures, not just powerpc. Move the workaround to powerpc and also add a suitable comment that's it really a workaround. This improves dwarf2 unwind tables on x86 at least. Signed-off-by: Andi Kleen Cc: Kyle McMartin Signed-off-by: Andrew Morton Acked-by: Benjamin Herrenschmidt Signed-off-by: Michal Marek --- arch/powerpc/Makefile | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/powerpc') diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 1a54a3b3a3f..42dcd3f4ad7 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -112,6 +112,11 @@ KBUILD_CFLAGS += $(call cc-option,-mspe=no) # kernel considerably. KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time) +# FIXME: the module load should be taught about the additional relocs +# generated by this. +# revert to pre-gcc-4.4 behaviour of .eh_frame +KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm) + # Never use string load/store instructions as they are # often slow when they are implemented at all KBUILD_CFLAGS += -mno-string -- cgit v1.2.3 From 4af57b787b4be09419a2bb48aa705fa87ef41cca Mon Sep 17 00:00:00 2001 From: Tim Abbott Date: Sat, 20 Feb 2010 01:03:34 +0100 Subject: Rename .data.cacheline_aligned to .data..cacheline_aligned. Signed-off-by: Tim Abbott Cc: Sam Ravnborg Signed-off-by: Denys Vlasenko Signed-off-by: Michal Marek --- arch/powerpc/kernel/vmlinux.lds.S | 2 +- arch/x86/kernel/init_task.c | 2 +- include/asm-generic/vmlinux.lds.h | 2 +- include/linux/cache.h | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index dcd01c82e70..3229c062216 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -231,7 +231,7 @@ SECTIONS PAGE_ALIGNED_DATA(PAGE_SIZE) } - .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) { + .data..cacheline_aligned : AT(ADDR(.data..cacheline_aligned) - LOAD_OFFSET) { CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) } diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c index 3a54dcb9cd0..43e9ccf4494 100644 --- a/arch/x86/kernel/init_task.c +++ b/arch/x86/kernel/init_task.c @@ -34,7 +34,7 @@ EXPORT_SYMBOL(init_task); /* * per-CPU TSS segments. Threads are completely 'soft' on Linux, * no more per-task TSS's. The TSS size is kept cacheline-aligned - * so they are allowed to end up in the .data.cacheline_aligned + * so they are allowed to end up in the .data..cacheline_aligned * section. Since TSS's are completely CPU-local, we want them * on exact cacheline boundaries, to eliminate cacheline ping-pong. */ diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 67e652068e0..78450aaab9e 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -189,7 +189,7 @@ #define CACHELINE_ALIGNED_DATA(align) \ . = ALIGN(align); \ - *(.data.cacheline_aligned) + *(.data..cacheline_aligned) #define INIT_TASK_DATA(align) \ . = ALIGN(align); \ diff --git a/include/linux/cache.h b/include/linux/cache.h index 97e24881c4c..4c570653ab8 100644 --- a/include/linux/cache.h +++ b/include/linux/cache.h @@ -31,7 +31,7 @@ #ifndef __cacheline_aligned #define __cacheline_aligned \ __attribute__((__aligned__(SMP_CACHE_BYTES), \ - __section__(".data.cacheline_aligned"))) + __section__(".data..cacheline_aligned"))) #endif /* __cacheline_aligned */ #ifndef __cacheline_aligned_in_smp -- cgit v1.2.3 From 2af7687f1ad2c4571b9835f9bb2e3db9a738d258 Mon Sep 17 00:00:00 2001 From: Tim Abbott Date: Sat, 20 Feb 2010 01:03:35 +0100 Subject: Rename .data.init_task to .data..init_task. Signed-off-by: Tim Abbott Cc: Sam Ravnborg Signed-off-by: Denys Vlasenko Signed-off-by: Michal Marek --- arch/ia64/kernel/init_task.c | 2 +- arch/powerpc/kernel/vmlinux.lds.S | 4 +--- include/asm-generic/vmlinux.lds.h | 4 ++-- include/linux/init_task.h | 2 +- 4 files changed, 5 insertions(+), 7 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c index e253ab8fcbc..f9efe9739d3 100644 --- a/arch/ia64/kernel/init_task.c +++ b/arch/ia64/kernel/init_task.c @@ -23,7 +23,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); * Initial task structure. * * We need to make sure that this is properly aligned due to the way process stacks are - * handled. This is done by having a special ".data.init_task" section... + * handled. This is done by having a special ".data..init_task" section... */ #define init_thread_info init_task_mem.s.thread_info diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 3229c062216..136dcf3ce7b 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -223,9 +223,7 @@ SECTIONS #endif /* The initial task and kernel stack */ - .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) { - INIT_TASK_DATA(THREAD_SIZE) - } + INIT_TASK_DATA_SECTION(THREAD_SIZE) .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) { PAGE_ALIGNED_DATA(PAGE_SIZE) diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 78450aaab9e..9cb9a9021e6 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -193,7 +193,7 @@ #define INIT_TASK_DATA(align) \ . = ALIGN(align); \ - *(.data.init_task) + *(.data..init_task) /* * Read only Data @@ -435,7 +435,7 @@ */ #define INIT_TASK_DATA_SECTION(align) \ . = ALIGN(align); \ - .data.init_task : { \ + .data..init_task : { \ INIT_TASK_DATA(align) \ } diff --git a/include/linux/init_task.h b/include/linux/init_task.h index abec69b63d7..f00253b3fc4 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -191,7 +191,7 @@ extern struct cred init_cred; } /* Attach to the init_task data structure for proper alignment */ -#define __init_task_data __attribute__((__section__(".data.init_task"))) +#define __init_task_data __attribute__((__section__(".data..init_task"))) #endif -- cgit v1.2.3 From 5f547f51a2205d18a507b88756e6988639db5f25 Mon Sep 17 00:00:00 2001 From: Tim Abbott Date: Sat, 20 Feb 2010 01:03:36 +0100 Subject: powerpc: remove unused __page_aligned definition. There is already an architecture-independent __page_aligned_data macro for this purpose, so removing the powerpc-specific macro should be harmless. Signed-off-by: Tim Abbott Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Sam Ravnborg Signed-off-by: Denys Vlasenko Signed-off-by: Michal Marek --- arch/powerpc/include/asm/page_64.h | 8 -------- 1 file changed, 8 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h index bfc4e027e2a..358ff14ea25 100644 --- a/arch/powerpc/include/asm/page_64.h +++ b/arch/powerpc/include/asm/page_64.h @@ -162,14 +162,6 @@ do { \ #endif /* !CONFIG_HUGETLB_PAGE */ -#ifdef MODULE -#define __page_aligned __attribute__((__aligned__(PAGE_SIZE))) -#else -#define __page_aligned \ - __attribute__((__aligned__(PAGE_SIZE), \ - __section__(".data.page_aligned"))) -#endif - #define VM_DATA_DEFAULT_FLAGS \ (test_thread_flag(TIF_32BIT) ? \ VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64) -- cgit v1.2.3 From 75b134837263eb919d91678f7fcf3d54cd088c8d Mon Sep 17 00:00:00 2001 From: Tim Abbott Date: Sat, 20 Feb 2010 01:03:37 +0100 Subject: Rename .data.page_aligned to .data..page_aligned. Signed-off-by: Tim Abbott Cc: Sam Ravnborg Signed-off-by: Denys Vlasenko Signed-off-by: Michal Marek --- arch/ia64/kernel/vmlinux.lds.S | 2 +- arch/powerpc/kernel/vmlinux.lds.S | 2 +- include/asm-generic/vmlinux.lds.h | 2 +- include/linux/linkage.h | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 1295ba327f6..7fb1198611f 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -175,7 +175,7 @@ SECTIONS . = ALIGN(PAGE_SIZE); __init_end = .; - .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) + .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) { PAGE_ALIGNED_DATA(PAGE_SIZE) . = ALIGN(PAGE_SIZE); diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 136dcf3ce7b..951e6c5b2c8 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -225,7 +225,7 @@ SECTIONS /* The initial task and kernel stack */ INIT_TASK_DATA_SECTION(THREAD_SIZE) - .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) { + .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) { PAGE_ALIGNED_DATA(PAGE_SIZE) } diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 9cb9a9021e6..569c25a8555 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -181,7 +181,7 @@ #define PAGE_ALIGNED_DATA(page_align) \ . = ALIGN(page_align); \ - *(.data.page_aligned) + *(.data..page_aligned) #define READ_MOSTLY_DATA(align) \ . = ALIGN(align); \ diff --git a/include/linux/linkage.h b/include/linux/linkage.h index 5126cceb6ae..05f4406d599 100644 --- a/include/linux/linkage.h +++ b/include/linux/linkage.h @@ -18,7 +18,7 @@ # define asmregparm #endif -#define __page_aligned_data __section(.data.page_aligned) __aligned(PAGE_SIZE) +#define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE) #define __page_aligned_bss __section(.bss.page_aligned) __aligned(PAGE_SIZE) /* @@ -27,7 +27,7 @@ * Note when using these that you must specify the appropriate * alignment directives yourself */ -#define __PAGE_ALIGNED_DATA .section ".data.page_aligned", "aw" +#define __PAGE_ALIGNED_DATA .section ".data..page_aligned", "aw" #define __PAGE_ALIGNED_BSS .section ".bss.page_aligned", "aw" /* -- cgit v1.2.3 From 54cb27a71f51d304342c79e62fd7667f2171062b Mon Sep 17 00:00:00 2001 From: Denys Vlasenko Date: Sat, 20 Feb 2010 01:03:44 +0100 Subject: Rename .data.read_mostly to .data..read_mostly. Signed-off-by: Denys Vlasenko Signed-off-by: Michal Marek --- arch/ia64/include/asm/cache.h | 2 +- arch/ia64/kernel/paravirtentry.S | 2 +- arch/ia64/xen/xensetup.S | 2 +- arch/parisc/include/asm/cache.h | 2 +- arch/parisc/kernel/head.S | 2 +- arch/powerpc/include/asm/cache.h | 2 +- arch/powerpc/kernel/vmlinux.lds.S | 2 +- arch/s390/include/asm/cache.h | 2 +- arch/sh/include/asm/cache.h | 2 +- arch/sparc/include/asm/cache.h | 2 +- arch/x86/include/asm/cache.h | 2 +- include/asm-generic/vmlinux.lds.h | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h index e7482bd628f..988254a7d34 100644 --- a/arch/ia64/include/asm/cache.h +++ b/arch/ia64/include/asm/cache.h @@ -24,6 +24,6 @@ # define SMP_CACHE_BYTES (1 << 3) #endif -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) #endif /* _ASM_IA64_CACHE_H */ diff --git a/arch/ia64/kernel/paravirtentry.S b/arch/ia64/kernel/paravirtentry.S index 6158560d7f1..92d880c4d3d 100644 --- a/arch/ia64/kernel/paravirtentry.S +++ b/arch/ia64/kernel/paravirtentry.S @@ -28,7 +28,7 @@ #include "entry.h" #define DATA8(sym, init_value) \ - .pushsection .data.read_mostly ; \ + .pushsection .data..read_mostly ; \ .align 8 ; \ .global sym ; \ sym: ; \ diff --git a/arch/ia64/xen/xensetup.S b/arch/ia64/xen/xensetup.S index aff8346ea19..b820ed02ab9 100644 --- a/arch/ia64/xen/xensetup.S +++ b/arch/ia64/xen/xensetup.S @@ -14,7 +14,7 @@ #include #include - .section .data.read_mostly + .section .data..read_mostly .align 8 .global xen_domain_type xen_domain_type: diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h index 32c2cca7434..45effe6978f 100644 --- a/arch/parisc/include/asm/cache.h +++ b/arch/parisc/include/asm/cache.h @@ -28,7 +28,7 @@ #define SMP_CACHE_BYTES L1_CACHE_BYTES -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) void parisc_cache_init(void); /* initializes cache-flushing */ void disable_sr_hashing_asm(int); /* low level support for above */ diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S index 0e3d9f9b9e3..4dbdf0ed6fa 100644 --- a/arch/parisc/kernel/head.S +++ b/arch/parisc/kernel/head.S @@ -345,7 +345,7 @@ smp_slave_stext: ENDPROC(stext) #ifndef CONFIG_64BIT - .section .data.read_mostly + .section .data..read_mostly .align 4 .export $global$,data diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h index 81de6eb3455..3f41ab9c1e4 100644 --- a/arch/powerpc/include/asm/cache.h +++ b/arch/powerpc/include/asm/cache.h @@ -38,7 +38,7 @@ extern struct ppc64_caches ppc64_caches; #endif /* __powerpc64__ && ! __ASSEMBLY__ */ #if !defined(__ASSEMBLY__) -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) #endif #endif /* __KERNEL__ */ diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 951e6c5b2c8..8a0deefac08 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -233,7 +233,7 @@ SECTIONS CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) } - .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) { + .data..read_mostly : AT(ADDR(.data..read_mostly) - LOAD_OFFSET) { READ_MOSTLY_DATA(L1_CACHE_BYTES) } diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h index 9b866816863..24aafa68b64 100644 --- a/arch/s390/include/asm/cache.h +++ b/arch/s390/include/asm/cache.h @@ -14,6 +14,6 @@ #define L1_CACHE_BYTES 256 #define L1_CACHE_SHIFT 8 -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) #endif diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h index 02df18ea960..455a9a9bd7d 100644 --- a/arch/sh/include/asm/cache.h +++ b/arch/sh/include/asm/cache.h @@ -14,7 +14,7 @@ #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) #ifndef __ASSEMBLY__ struct cache_info { diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h index 41f85ae4bd4..2909f0ab6f9 100644 --- a/arch/sparc/include/asm/cache.h +++ b/arch/sparc/include/asm/cache.h @@ -19,7 +19,7 @@ #define SMP_CACHE_BYTES (1 << SMP_CACHE_BYTES_SHIFT) -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) #ifdef CONFIG_SPARC32 #include diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h index 2f9047cfaac..48f99f15452 100644 --- a/arch/x86/include/asm/cache.h +++ b/arch/x86/include/asm/cache.h @@ -7,7 +7,7 @@ #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT #define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT) diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index e304fcd10bc..6f6da4f080f 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -185,7 +185,7 @@ #define READ_MOSTLY_DATA(align) \ . = ALIGN(align); \ - *(.data.read_mostly) + *(.data..read_mostly) #define CACHELINE_ALIGNED_DATA(align) \ . = ALIGN(align); \ -- cgit v1.2.3