summaryrefslogtreecommitdiff
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/bitops.h3
-rw-r--r--include/asm-generic/bitops/ext2-atomic.h4
-rw-r--r--include/asm-generic/bitops/ext2-non-atomic.h20
-rw-r--r--include/asm-generic/bitops/le.h89
-rw-r--r--include/asm-generic/bitops/minix-le.h17
-rw-r--r--include/asm-generic/bitops/minix.h15
-rw-r--r--include/asm-generic/bug.h35
-rw-r--r--include/asm-generic/cputime.h3
-rw-r--r--include/asm-generic/errno.h2
-rw-r--r--include/asm-generic/fcntl.h4
-rw-r--r--include/asm-generic/ftrace.h16
-rw-r--r--include/asm-generic/futex.h7
-rw-r--r--include/asm-generic/io.h33
-rw-r--r--include/asm-generic/ioctls.h1
-rw-r--r--include/asm-generic/pgtable.h2
-rw-r--r--include/asm-generic/sections.h1
-rw-r--r--include/asm-generic/siginfo.h2
-rw-r--r--include/asm-generic/sizes.h47
-rw-r--r--include/asm-generic/types.h27
-rw-r--r--include/asm-generic/uaccess.h8
-rw-r--r--include/asm-generic/unistd.h10
-rw-r--r--include/asm-generic/user.h4
-rw-r--r--include/asm-generic/vmlinux.lds.h67
23 files changed, 250 insertions, 167 deletions
diff --git a/include/asm-generic/bitops.h b/include/asm-generic/bitops.h
index a54f4421a24..280ca7a96f7 100644
--- a/include/asm-generic/bitops.h
+++ b/include/asm-generic/bitops.h
@@ -38,8 +38,7 @@
#include <asm-generic/bitops/atomic.h>
#include <asm-generic/bitops/non-atomic.h>
-#include <asm-generic/bitops/ext2-non-atomic.h>
+#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic.h>
-#include <asm-generic/bitops/minix.h>
#endif /* __ASM_GENERIC_BITOPS_H */
diff --git a/include/asm-generic/bitops/ext2-atomic.h b/include/asm-generic/bitops/ext2-atomic.h
index ab1c875efb7..ecf1c9d8a7c 100644
--- a/include/asm-generic/bitops/ext2-atomic.h
+++ b/include/asm-generic/bitops/ext2-atomic.h
@@ -5,7 +5,7 @@
({ \
int ret; \
spin_lock(lock); \
- ret = ext2_set_bit((nr), (unsigned long *)(addr)); \
+ ret = __test_and_set_bit_le(nr, addr); \
spin_unlock(lock); \
ret; \
})
@@ -14,7 +14,7 @@
({ \
int ret; \
spin_lock(lock); \
- ret = ext2_clear_bit((nr), (unsigned long *)(addr)); \
+ ret = __test_and_clear_bit_le(nr, addr); \
spin_unlock(lock); \
ret; \
})
diff --git a/include/asm-generic/bitops/ext2-non-atomic.h b/include/asm-generic/bitops/ext2-non-atomic.h
deleted file mode 100644
index 63cf822431a..00000000000
--- a/include/asm-generic/bitops/ext2-non-atomic.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_
-#define _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_
-
-#include <asm-generic/bitops/le.h>
-
-#define ext2_set_bit(nr,addr) \
- generic___test_and_set_le_bit((nr),(unsigned long *)(addr))
-#define ext2_clear_bit(nr,addr) \
- generic___test_and_clear_le_bit((nr),(unsigned long *)(addr))
-
-#define ext2_test_bit(nr,addr) \
- generic_test_le_bit((nr),(unsigned long *)(addr))
-#define ext2_find_first_zero_bit(addr, size) \
- generic_find_first_zero_le_bit((unsigned long *)(addr), (size))
-#define ext2_find_next_zero_bit(addr, size, off) \
- generic_find_next_zero_le_bit((unsigned long *)(addr), (size), (off))
-#define ext2_find_next_bit(addr, size, off) \
- generic_find_next_le_bit((unsigned long *)(addr), (size), (off))
-
-#endif /* _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ */
diff --git a/include/asm-generic/bitops/le.h b/include/asm-generic/bitops/le.h
index 80e3bf13b2b..946a21b1b5d 100644
--- a/include/asm-generic/bitops/le.h
+++ b/include/asm-generic/bitops/le.h
@@ -4,54 +4,77 @@
#include <asm/types.h>
#include <asm/byteorder.h>
-#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
-#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
-
#if defined(__LITTLE_ENDIAN)
-#define generic_test_le_bit(nr, addr) test_bit(nr, addr)
-#define generic___set_le_bit(nr, addr) __set_bit(nr, addr)
-#define generic___clear_le_bit(nr, addr) __clear_bit(nr, addr)
+#define BITOP_LE_SWIZZLE 0
-#define generic_test_and_set_le_bit(nr, addr) test_and_set_bit(nr, addr)
-#define generic_test_and_clear_le_bit(nr, addr) test_and_clear_bit(nr, addr)
+static inline unsigned long find_next_zero_bit_le(const void *addr,
+ unsigned long size, unsigned long offset)
+{
+ return find_next_zero_bit(addr, size, offset);
+}
-#define generic___test_and_set_le_bit(nr, addr) __test_and_set_bit(nr, addr)
-#define generic___test_and_clear_le_bit(nr, addr) __test_and_clear_bit(nr, addr)
+static inline unsigned long find_next_bit_le(const void *addr,
+ unsigned long size, unsigned long offset)
+{
+ return find_next_bit(addr, size, offset);
+}
-#define generic_find_next_zero_le_bit(addr, size, offset) find_next_zero_bit(addr, size, offset)
-#define generic_find_next_le_bit(addr, size, offset) \
- find_next_bit(addr, size, offset)
+static inline unsigned long find_first_zero_bit_le(const void *addr,
+ unsigned long size)
+{
+ return find_first_zero_bit(addr, size);
+}
#elif defined(__BIG_ENDIAN)
-#define generic_test_le_bit(nr, addr) \
- test_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
-#define generic___set_le_bit(nr, addr) \
- __set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
-#define generic___clear_le_bit(nr, addr) \
- __clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
-
-#define generic_test_and_set_le_bit(nr, addr) \
- test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
-#define generic_test_and_clear_le_bit(nr, addr) \
- test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
-
-#define generic___test_and_set_le_bit(nr, addr) \
- __test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
-#define generic___test_and_clear_le_bit(nr, addr) \
- __test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
+#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
-extern unsigned long generic_find_next_zero_le_bit(const unsigned long *addr,
+extern unsigned long find_next_zero_bit_le(const void *addr,
unsigned long size, unsigned long offset);
-extern unsigned long generic_find_next_le_bit(const unsigned long *addr,
+extern unsigned long find_next_bit_le(const void *addr,
unsigned long size, unsigned long offset);
+#define find_first_zero_bit_le(addr, size) \
+ find_next_zero_bit_le((addr), (size), 0)
+
#else
#error "Please fix <asm/byteorder.h>"
#endif
-#define generic_find_first_zero_le_bit(addr, size) \
- generic_find_next_zero_le_bit((addr), (size), 0)
+static inline int test_bit_le(int nr, const void *addr)
+{
+ return test_bit(nr ^ BITOP_LE_SWIZZLE, addr);
+}
+
+static inline void __set_bit_le(int nr, void *addr)
+{
+ __set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
+}
+
+static inline void __clear_bit_le(int nr, void *addr)
+{
+ __clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
+}
+
+static inline int test_and_set_bit_le(int nr, void *addr)
+{
+ return test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
+}
+
+static inline int test_and_clear_bit_le(int nr, void *addr)
+{
+ return test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
+}
+
+static inline int __test_and_set_bit_le(int nr, void *addr)
+{
+ return __test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
+}
+
+static inline int __test_and_clear_bit_le(int nr, void *addr)
+{
+ return __test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
+}
#endif /* _ASM_GENERIC_BITOPS_LE_H_ */
diff --git a/include/asm-generic/bitops/minix-le.h b/include/asm-generic/bitops/minix-le.h
deleted file mode 100644
index 4a981c1bb1a..00000000000
--- a/include/asm-generic/bitops/minix-le.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef _ASM_GENERIC_BITOPS_MINIX_LE_H_
-#define _ASM_GENERIC_BITOPS_MINIX_LE_H_
-
-#include <asm-generic/bitops/le.h>
-
-#define minix_test_and_set_bit(nr,addr) \
- generic___test_and_set_le_bit((nr),(unsigned long *)(addr))
-#define minix_set_bit(nr,addr) \
- generic___set_le_bit((nr),(unsigned long *)(addr))
-#define minix_test_and_clear_bit(nr,addr) \
- generic___test_and_clear_le_bit((nr),(unsigned long *)(addr))
-#define minix_test_bit(nr,addr) \
- generic_test_le_bit((nr),(unsigned long *)(addr))
-#define minix_find_first_zero_bit(addr,size) \
- generic_find_first_zero_le_bit((unsigned long *)(addr),(size))
-
-#endif /* _ASM_GENERIC_BITOPS_MINIX_LE_H_ */
diff --git a/include/asm-generic/bitops/minix.h b/include/asm-generic/bitops/minix.h
deleted file mode 100644
index 91f42e87aa5..00000000000
--- a/include/asm-generic/bitops/minix.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef _ASM_GENERIC_BITOPS_MINIX_H_
-#define _ASM_GENERIC_BITOPS_MINIX_H_
-
-#define minix_test_and_set_bit(nr,addr) \
- __test_and_set_bit((nr),(unsigned long *)(addr))
-#define minix_set_bit(nr,addr) \
- __set_bit((nr),(unsigned long *)(addr))
-#define minix_test_and_clear_bit(nr,addr) \
- __test_and_clear_bit((nr),(unsigned long *)(addr))
-#define minix_test_bit(nr,addr) \
- test_bit((nr),(unsigned long *)(addr))
-#define minix_find_first_zero_bit(addr,size) \
- find_first_zero_bit((unsigned long *)(addr),(size))
-
-#endif /* _ASM_GENERIC_BITOPS_MINIX_H_ */
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index c2c9ba032d4..e5a3f588000 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -165,10 +165,43 @@ extern void warn_slowpath_null(const char *file, const int line);
#define WARN_ON_RATELIMIT(condition, state) \
WARN_ON((condition) && __ratelimit(state))
+/*
+ * WARN_ON_SMP() is for cases that the warning is either
+ * meaningless for !SMP or may even cause failures.
+ * This is usually used for cases that we have
+ * WARN_ON(!spin_is_locked(&lock)) checks, as spin_is_locked()
+ * returns 0 for uniprocessor settings.
+ * It can also be used with values that are only defined
+ * on SMP:
+ *
+ * struct foo {
+ * [...]
+ * #ifdef CONFIG_SMP
+ * int bar;
+ * #endif
+ * };
+ *
+ * void func(struct foo *zoot)
+ * {
+ * WARN_ON_SMP(!zoot->bar);
+ *
+ * For CONFIG_SMP, WARN_ON_SMP() should act the same as WARN_ON(),
+ * and should be a nop and return false for uniprocessor.
+ *
+ * if (WARN_ON_SMP(x)) returns true only when CONFIG_SMP is set
+ * and x is true.
+ */
#ifdef CONFIG_SMP
# define WARN_ON_SMP(x) WARN_ON(x)
#else
-# define WARN_ON_SMP(x) do { } while (0)
+/*
+ * Use of ({0;}) because WARN_ON_SMP(x) may be used either as
+ * a stand alone line statement or as a condition in an if ()
+ * statement.
+ * A simple "0" would cause gcc to give a "statement has no effect"
+ * warning.
+ */
+# define WARN_ON_SMP(x) ({0;})
#endif
#endif
diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h
index 2bcc5c7c22a..61e03dd7939 100644
--- a/include/asm-generic/cputime.h
+++ b/include/asm-generic/cputime.h
@@ -30,6 +30,9 @@ typedef u64 cputime64_t;
#define cputime64_to_jiffies64(__ct) (__ct)
#define jiffies64_to_cputime64(__jif) (__jif)
#define cputime_to_cputime64(__ct) ((u64) __ct)
+#define cputime64_gt(__a, __b) ((__a) > (__b))
+
+#define nsecs_to_cputime64(__ct) nsecs_to_jiffies64(__ct)
/*
diff --git a/include/asm-generic/errno.h b/include/asm-generic/errno.h
index 28cc03bf19e..a1331ce5044 100644
--- a/include/asm-generic/errno.h
+++ b/include/asm-generic/errno.h
@@ -108,4 +108,6 @@
#define ERFKILL 132 /* Operation not possible due to RF-kill */
+#define EHWPOISON 133 /* Memory page has hardware error */
+
#endif
diff --git a/include/asm-generic/fcntl.h b/include/asm-generic/fcntl.h
index 0fc16e3f0bf..84793c7025e 100644
--- a/include/asm-generic/fcntl.h
+++ b/include/asm-generic/fcntl.h
@@ -80,6 +80,10 @@
#define O_SYNC (__O_SYNC|O_DSYNC)
#endif
+#ifndef O_PATH
+#define O_PATH 010000000
+#endif
+
#ifndef O_NDELAY
#define O_NDELAY O_NONBLOCK
#endif
diff --git a/include/asm-generic/ftrace.h b/include/asm-generic/ftrace.h
new file mode 100644
index 00000000000..51abba9ea7a
--- /dev/null
+++ b/include/asm-generic/ftrace.h
@@ -0,0 +1,16 @@
+/*
+ * linux/include/asm-generic/ftrace.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_GENERIC_FTRACE_H__
+#define __ASM_GENERIC_FTRACE_H__
+
+/*
+ * Not all architectures need their own ftrace.h, the most
+ * common definitions are already in linux/ftrace.h.
+ */
+
+#endif /* __ASM_GENERIC_FTRACE_H__ */
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index 3c2344f4813..01f227e1425 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -6,7 +6,7 @@
#include <asm/errno.h>
static inline int
-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
+futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
@@ -16,7 +16,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
+ if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
pagefault_disable();
@@ -48,7 +48,8 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
}
static inline int
-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
{
return -ENOSYS;
}
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 4644c9a7f72..e0ffa3ddb02 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -94,6 +94,10 @@ static inline void __raw_writeq(u64 b, volatile void __iomem *addr)
#define writeq(b,addr) __raw_writeq(__cpu_to_le64(b),addr)
#endif
+#ifndef PCI_IOBASE
+#define PCI_IOBASE ((void __iomem *) 0)
+#endif
+
/*****************************************************************************/
/*
* traditional input/output functions
@@ -101,32 +105,32 @@ static inline void __raw_writeq(u64 b, volatile void __iomem *addr)
static inline u8 inb(unsigned long addr)
{
- return readb((volatile void __iomem *) addr);
+ return readb(addr + PCI_IOBASE);
}
static inline u16 inw(unsigned long addr)
{
- return readw((volatile void __iomem *) addr);
+ return readw(addr + PCI_IOBASE);
}
static inline u32 inl(unsigned long addr)
{
- return readl((volatile void __iomem *) addr);
+ return readl(addr + PCI_IOBASE);
}
static inline void outb(u8 b, unsigned long addr)
{
- writeb(b, (volatile void __iomem *) addr);
+ writeb(b, addr + PCI_IOBASE);
}
static inline void outw(u16 b, unsigned long addr)
{
- writew(b, (volatile void __iomem *) addr);
+ writew(b, addr + PCI_IOBASE);
}
static inline void outl(u32 b, unsigned long addr)
{
- writel(b, (volatile void __iomem *) addr);
+ writel(b, addr + PCI_IOBASE);
}
#define inb_p(addr) inb(addr)
@@ -213,32 +217,32 @@ static inline void outsl(unsigned long addr, const void *buffer, int count)
static inline void readsl(const void __iomem *addr, void *buf, int len)
{
- insl((unsigned long)addr, buf, len);
+ insl(addr - PCI_IOBASE, buf, len);
}
static inline void readsw(const void __iomem *addr, void *buf, int len)
{
- insw((unsigned long)addr, buf, len);
+ insw(addr - PCI_IOBASE, buf, len);
}
static inline void readsb(const void __iomem *addr, void *buf, int len)
{
- insb((unsigned long)addr, buf, len);
+ insb(addr - PCI_IOBASE, buf, len);
}
static inline void writesl(const void __iomem *addr, const void *buf, int len)
{
- outsl((unsigned long)addr, buf, len);
+ outsl(addr - PCI_IOBASE, buf, len);
}
static inline void writesw(const void __iomem *addr, const void *buf, int len)
{
- outsw((unsigned long)addr, buf, len);
+ outsw(addr - PCI_IOBASE, buf, len);
}
static inline void writesb(const void __iomem *addr, const void *buf, int len)
{
- outsb((unsigned long)addr, buf, len);
+ outsb(addr - PCI_IOBASE, buf, len);
}
#ifndef CONFIG_GENERIC_IOMAP
@@ -269,8 +273,9 @@ static inline void writesb(const void __iomem *addr, const void *buf, int len)
outsl((unsigned long) (p), (src), (count))
#endif /* CONFIG_GENERIC_IOMAP */
-
-#define IO_SPACE_LIMIT 0xffffffff
+#ifndef IO_SPACE_LIMIT
+#define IO_SPACE_LIMIT 0xffff
+#endif
#ifdef __KERNEL__
diff --git a/include/asm-generic/ioctls.h b/include/asm-generic/ioctls.h
index 3f3f2d189fb..199975fac39 100644
--- a/include/asm-generic/ioctls.h
+++ b/include/asm-generic/ioctls.h
@@ -73,6 +73,7 @@
#define TCSETXF 0x5434
#define TCSETXW 0x5435
#define TIOCSIG _IOW('T', 0x36, int) /* pty: generate signal */
+#define TIOCVHANGUP 0x5437
#define FIONCLEX 0x5450
#define FIOCLEX 0x5451
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 31b6188df22..b4bfe338ea0 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -4,6 +4,8 @@
#ifndef __ASSEMBLY__
#ifdef CONFIG_MMU
+#include <linux/mm_types.h>
+
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
extern int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index b3bfabc258f..c1a1216e29c 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -11,6 +11,7 @@ extern char _sinittext[], _einittext[];
extern char _end[];
extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[];
extern char __kprobes_text_start[], __kprobes_text_end[];
+extern char __entry_text_start[], __entry_text_end[];
extern char __initdata_begin[], __initdata_end[];
extern char __start_rodata[], __end_rodata[];
diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h
index 942d30b5aab..0dd4e87f6fb 100644
--- a/include/asm-generic/siginfo.h
+++ b/include/asm-generic/siginfo.h
@@ -192,7 +192,7 @@ typedef struct siginfo {
* SIGBUS si_codes
*/
#define BUS_ADRALN (__SI_FAULT|1) /* invalid address alignment */
-#define BUS_ADRERR (__SI_FAULT|2) /* non-existant physical address */
+#define BUS_ADRERR (__SI_FAULT|2) /* non-existent physical address */
#define BUS_OBJERR (__SI_FAULT|3) /* object specific hardware error */
/* hardware memory error consumed on a machine check: action required */
#define BUS_MCEERR_AR (__SI_FAULT|4)
diff --git a/include/asm-generic/sizes.h b/include/asm-generic/sizes.h
new file mode 100644
index 00000000000..ea5d4ef8106
--- /dev/null
+++ b/include/asm-generic/sizes.h
@@ -0,0 +1,47 @@
+/*
+ * linux/include/asm-generic/sizes.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_GENERIC_SIZES_H__
+#define __ASM_GENERIC_SIZES_H__
+
+#define SZ_1 0x00000001
+#define SZ_2 0x00000002
+#define SZ_4 0x00000004
+#define SZ_8 0x00000008
+#define SZ_16 0x00000010
+#define SZ_32 0x00000020
+#define SZ_64 0x00000040
+#define SZ_128 0x00000080
+#define SZ_256 0x00000100
+#define SZ_512 0x00000200
+
+#define SZ_1K 0x00000400
+#define SZ_2K 0x00000800
+#define SZ_4K 0x00001000
+#define SZ_8K 0x00002000
+#define SZ_16K 0x00004000
+#define SZ_32K 0x00008000
+#define SZ_64K 0x00010000
+#define SZ_128K 0x00020000
+#define SZ_256K 0x00040000
+#define SZ_512K 0x00080000
+
+#define SZ_1M 0x00100000
+#define SZ_2M 0x00200000
+#define SZ_4M 0x00400000
+#define SZ_8M 0x00800000
+#define SZ_16M 0x01000000
+#define SZ_32M 0x02000000
+#define SZ_64M 0x04000000
+#define SZ_128M 0x08000000
+#define SZ_256M 0x10000000
+#define SZ_512M 0x20000000
+
+#define SZ_1G 0x40000000
+#define SZ_2G 0x80000000
+
+#endif /* __ASM_GENERIC_SIZES_H__ */
diff --git a/include/asm-generic/types.h b/include/asm-generic/types.h
index fba7d33ca3f..7a0f69e6c61 100644
--- a/include/asm-generic/types.h
+++ b/include/asm-generic/types.h
@@ -12,31 +12,4 @@ typedef unsigned short umode_t;
#endif /* __ASSEMBLY__ */
-/*
- * These aren't exported outside the kernel to avoid name space clashes
- */
-#ifdef __KERNEL__
-#ifndef __ASSEMBLY__
-/*
- * DMA addresses may be very different from physical addresses
- * and pointers. i386 and powerpc may have 64 bit DMA on 32 bit
- * systems, while sparc64 uses 32 bit DMA addresses for 64 bit
- * physical addresses.
- * This default defines dma_addr_t to have the same size as
- * phys_addr_t, which is the most common way.
- * Do not define the dma64_addr_t type, which never really
- * worked.
- */
-#ifndef dma_addr_t
-#ifdef CONFIG_PHYS_ADDR_T_64BIT
-typedef u64 dma_addr_t;
-#else
-typedef u32 dma_addr_t;
-#endif /* CONFIG_PHYS_ADDR_T_64BIT */
-#endif /* dma_addr_t */
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* __KERNEL__ */
-
#endif /* _ASM_GENERIC_TYPES_H */
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index b218b8513d0..ac68c999b6c 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -288,14 +288,16 @@ strncpy_from_user(char *dst, const char __user *src, long count)
*
* Return 0 on exception, a value greater than N if too long
*/
-#ifndef strnlen_user
+#ifndef __strnlen_user
+#define __strnlen_user strnlen
+#endif
+
static inline long strnlen_user(const char __user *src, long n)
{
if (!access_ok(VERIFY_READ, src, 1))
return 0;
- return strlen((void * __force)src) + 1;
+ return __strnlen_user(src, n);
}
-#endif
static inline long strlen_user(const char __user *src)
{
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
index b969770196c..07c40d5149d 100644
--- a/include/asm-generic/unistd.h
+++ b/include/asm-generic/unistd.h
@@ -646,9 +646,17 @@ __SYSCALL(__NR_prlimit64, sys_prlimit64)
__SYSCALL(__NR_fanotify_init, sys_fanotify_init)
#define __NR_fanotify_mark 263
__SYSCALL(__NR_fanotify_mark, sys_fanotify_mark)
+#define __NR_name_to_handle_at 264
+__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
+#define __NR_open_by_handle_at 265
+__SYSCALL(__NR_open_by_handle_at, sys_open_by_handle_at)
+#define __NR_clock_adjtime 266
+__SYSCALL(__NR_clock_adjtime, sys_clock_adjtime)
+#define __NR_syncfs 267
+__SYSCALL(__NR_syncfs, sys_syncfs)
#undef __NR_syscalls
-#define __NR_syscalls 264
+#define __NR_syscalls 268
/*
* All syscalls below here should go away really,
diff --git a/include/asm-generic/user.h b/include/asm-generic/user.h
index 8b9c3c960ae..35638c34700 100644
--- a/include/asm-generic/user.h
+++ b/include/asm-generic/user.h
@@ -1,8 +1,8 @@
#ifndef __ASM_GENERIC_USER_H
#define __ASM_GENERIC_USER_H
/*
- * This file may define a 'struct user' structure. However, it it only
- * used for a.out file, which are not supported on new architectures.
+ * This file may define a 'struct user' structure. However, it is only
+ * used for a.out files, which are not supported on new architectures.
*/
#endif /* __ASM_GENERIC_USER_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 6ebb81030d2..bd297a20ab9 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -15,7 +15,7 @@
* HEAD_TEXT_SECTION
* INIT_TEXT_SECTION(PAGE_SIZE)
* INIT_DATA_SECTION(...)
- * PERCPU(PAGE_SIZE)
+ * PERCPU(CACHELINE_SIZE, PAGE_SIZE)
* __init_end = .;
*
* _stext = .;
@@ -124,7 +124,8 @@
#endif
#ifdef CONFIG_EVENT_TRACING
-#define FTRACE_EVENTS() VMLINUX_SYMBOL(__start_ftrace_events) = .; \
+#define FTRACE_EVENTS() . = ALIGN(8); \
+ VMLINUX_SYMBOL(__start_ftrace_events) = .; \
*(_ftrace_events) \
VMLINUX_SYMBOL(__stop_ftrace_events) = .;
#else
@@ -140,7 +141,8 @@
#endif
#ifdef CONFIG_FTRACE_SYSCALLS
-#define TRACE_SYSCALLS() VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
+#define TRACE_SYSCALLS() . = ALIGN(8); \
+ VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
*(__syscalls_metadata) \
VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
#else
@@ -165,10 +167,8 @@
CPU_KEEP(exit.data) \
MEM_KEEP(init.data) \
MEM_KEEP(exit.data) \
- . = ALIGN(32); \
- VMLINUX_SYMBOL(__start___tracepoints) = .; \
+ STRUCT_ALIGN(); \
*(__tracepoints) \
- VMLINUX_SYMBOL(__stop___tracepoints) = .; \
/* implement dynamic printk debug */ \
. = ALIGN(8); \
VMLINUX_SYMBOL(__start___verbose) = .; \
@@ -176,13 +176,7 @@
VMLINUX_SYMBOL(__stop___verbose) = .; \
LIKELY_PROFILE() \
BRANCH_PROFILE() \
- TRACE_PRINTKS() \
- \
- STRUCT_ALIGN(); \
- FTRACE_EVENTS() \
- \
- STRUCT_ALIGN(); \
- TRACE_SYSCALLS()
+ TRACE_PRINTKS()
/*
* Data section helpers
@@ -220,6 +214,10 @@
VMLINUX_SYMBOL(__start_rodata) = .; \
*(.rodata) *(.rodata.*) \
*(__vermagic) /* Kernel version magic */ \
+ . = ALIGN(8); \
+ VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
+ *(__tracepoints_ptrs) /* Tracepoints: pointer array */\
+ VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \
*(__markers_strings) /* Markers: strings */ \
*(__tracepoints_strings)/* Tracepoints: strings */ \
} \
@@ -426,6 +424,12 @@
*(.kprobes.text) \
VMLINUX_SYMBOL(__kprobes_text_end) = .;
+#define ENTRY_TEXT \
+ ALIGN_FUNCTION(); \
+ VMLINUX_SYMBOL(__entry_text_start) = .; \
+ *(.entry.text) \
+ VMLINUX_SYMBOL(__entry_text_end) = .;
+
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#define IRQENTRY_TEXT \
ALIGN_FUNCTION(); \
@@ -482,6 +486,8 @@
KERNEL_CTORS() \
*(.init.rodata) \
MCOUNT_REC() \
+ FTRACE_EVENTS() \
+ TRACE_SYSCALLS() \
DEV_DISCARD(init.rodata) \
CPU_DISCARD(init.rodata) \
MEM_DISCARD(init.rodata) \
@@ -683,13 +689,18 @@
/**
* PERCPU_VADDR - define output section for percpu area
+ * @cacheline: cacheline size
* @vaddr: explicit base address (optional)
* @phdr: destination PHDR (optional)
*
- * Macro which expands to output section for percpu area. If @vaddr
- * is not blank, it specifies explicit base address and all percpu
- * symbols will be offset from the given address. If blank, @vaddr
- * always equals @laddr + LOAD_OFFSET.
+ * Macro which expands to output section for percpu area.
+ *
+ * @cacheline is used to align subsections to avoid false cacheline
+ * sharing between subsections for different purposes.
+ *
+ * If @vaddr is not blank, it specifies explicit base address and all
+ * percpu symbols will be offset from the given address. If blank,
+ * @vaddr always equals @laddr + LOAD_OFFSET.
*
* @phdr defines the output PHDR to use if not blank. Be warned that
* output PHDR is sticky. If @phdr is specified, the next output
@@ -700,7 +711,7 @@
* If there is no need to put the percpu section at a predetermined
* address, use PERCPU().
*/
-#define PERCPU_VADDR(vaddr, phdr) \
+#define PERCPU_VADDR(cacheline, vaddr, phdr) \
VMLINUX_SYMBOL(__per_cpu_load) = .; \
.data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
- LOAD_OFFSET) { \
@@ -708,7 +719,9 @@
*(.data..percpu..first) \
. = ALIGN(PAGE_SIZE); \
*(.data..percpu..page_aligned) \
+ . = ALIGN(cacheline); \
*(.data..percpu..readmostly) \
+ . = ALIGN(cacheline); \
*(.data..percpu) \
*(.data..percpu..shared_aligned) \
VMLINUX_SYMBOL(__per_cpu_end) = .; \
@@ -717,18 +730,18 @@
/**
* PERCPU - define output section for percpu area, simple version
+ * @cacheline: cacheline size
* @align: required alignment
*
- * Align to @align and outputs output section for percpu area. This
- * macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and
+ * Align to @align and outputs output section for percpu area. This macro
+ * doesn't manipulate @vaddr or @phdr and __per_cpu_load and
* __per_cpu_start will be identical.
*
- * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except
- * that __per_cpu_load is defined as a relative symbol against
- * .data..percpu which is required for relocatable x86_32
- * configuration.
+ * This macro is equivalent to ALIGN(@align); PERCPU_VADDR(@cacheline,,)
+ * except that __per_cpu_load is defined as a relative symbol against
+ * .data..percpu which is required for relocatable x86_32 configuration.
*/
-#define PERCPU(align) \
+#define PERCPU(cacheline, align) \
. = ALIGN(align); \
.data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__per_cpu_load) = .; \
@@ -736,7 +749,9 @@
*(.data..percpu..first) \
. = ALIGN(PAGE_SIZE); \
*(.data..percpu..page_aligned) \
+ . = ALIGN(cacheline); \
*(.data..percpu..readmostly) \
+ . = ALIGN(cacheline); \
*(.data..percpu) \
*(.data..percpu..shared_aligned) \
VMLINUX_SYMBOL(__per_cpu_end) = .; \
@@ -758,7 +773,7 @@
* the sections that has this restriction (or similar)
* is located before the ones requiring PAGE_SIZE alignment.
* NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
- * matches the requirment of PAGE_ALIGNED_DATA.
+ * matches the requirement of PAGE_ALIGNED_DATA.
*
* use 0 as page_align if page_aligned data is not used */
#define RW_DATA_SECTION(cacheline, pagealigned, inittask) \