summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/vmlinux.lds.h34
-rw-r--r--include/drm/drm_fb_helper.h2
-rw-r--r--include/drm/drm_mm.h2
-rw-r--r--include/drm/drm_pciids.h5
-rw-r--r--include/drm/radeon_drm.h1
-rw-r--r--include/linux/bootmem.h2
-rw-r--r--include/linux/bsearch.h9
-rw-r--r--include/linux/capability.h13
-rw-r--r--include/linux/clockchips.h56
-rw-r--r--include/linux/clocksource.h34
-rw-r--r--include/linux/cpufreq.h52
-rw-r--r--include/linux/cred.h10
-rw-r--r--include/linux/device.h8
-rw-r--r--include/linux/dynamic_debug.h2
-rw-r--r--include/linux/fb.h1
-rw-r--r--include/linux/flex_array.h2
-rw-r--r--include/linux/fs.h1
-rw-r--r--include/linux/ftrace.h35
-rw-r--r--include/linux/ftrace_event.h1
-rw-r--r--include/linux/gfp.h2
-rw-r--r--include/linux/init.h14
-rw-r--r--include/linux/init_task.h1
-rw-r--r--include/linux/irq.h179
-rw-r--r--include/linux/irqdesc.h11
-rw-r--r--include/linux/jump_label.h89
-rw-r--r--include/linux/jump_label_ref.h44
-rw-r--r--include/linux/kernel.h1
-rw-r--r--include/linux/kmod.h1
-rw-r--r--include/linux/list.h35
-rw-r--r--include/linux/mfd/wm831x/pdata.h2
-rw-r--r--include/linux/mm.h24
-rw-r--r--include/linux/module.h37
-rw-r--r--include/linux/moduleparam.h7
-rw-r--r--include/linux/mutex.h2
-rw-r--r--include/linux/nfs_xdr.h1
-rw-r--r--include/linux/of_device.h8
-rw-r--r--include/linux/pci-ats.h52
-rw-r--r--include/linux/percpu.h2
-rw-r--r--include/linux/perf_event.h121
-rw-r--r--include/linux/platform_device.h63
-rw-r--r--include/linux/pm.h39
-rw-r--r--include/linux/pm_runtime.h42
-rw-r--r--include/linux/proc_fs.h2
-rw-r--r--include/linux/ptrace.h13
-rw-r--r--include/linux/rculist.h16
-rw-r--r--include/linux/sched.h64
-rw-r--r--include/linux/seqlock.h4
-rw-r--r--include/linux/ssb/ssb.h4
-rw-r--r--include/linux/string.h1
-rw-r--r--include/linux/sysdev.h11
-rw-r--r--include/linux/tracepoint.h22
-rw-r--r--include/linux/usb/usbnet.h1
-rw-r--r--include/net/inet_ecn.h16
-rw-r--r--include/net/ip_vs.h17
-rw-r--r--include/net/llc_pdu.h8
-rw-r--r--include/net/xfrm.h3
-rw-r--r--include/rdma/iw_cm.h11
-rw-r--r--include/rdma/rdma_cm.h10
-rw-r--r--include/rdma/rdma_user_cm.h5
-rw-r--r--include/scsi/scsi_device.h1
-rw-r--r--include/trace/events/gfpflags.h6
-rw-r--r--include/xen/events.h9
62 files changed, 811 insertions, 460 deletions
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index bd297a20ab98..077c00d94f6e 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -170,6 +170,10 @@
STRUCT_ALIGN(); \
*(__tracepoints) \
/* implement dynamic printk debug */ \
+ . = ALIGN(8); \
+ VMLINUX_SYMBOL(__start___jump_table) = .; \
+ *(__jump_table) \
+ VMLINUX_SYMBOL(__stop___jump_table) = .; \
. = ALIGN(8); \
VMLINUX_SYMBOL(__start___verbose) = .; \
*(__verbose) \
@@ -228,8 +232,6 @@
\
BUG_TABLE \
\
- JUMP_TABLE \
- \
/* PCI quirks */ \
.pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
@@ -274,70 +276,70 @@
/* Kernel symbol table: Normal symbols */ \
__ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab) = .; \
- *(__ksymtab) \
+ *(SORT(___ksymtab+*)) \
VMLINUX_SYMBOL(__stop___ksymtab) = .; \
} \
\
/* Kernel symbol table: GPL-only symbols */ \
__ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
- *(__ksymtab_gpl) \
+ *(SORT(___ksymtab_gpl+*)) \
VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
} \
\
/* Kernel symbol table: Normal unused symbols */ \
__ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
- *(__ksymtab_unused) \
+ *(SORT(___ksymtab_unused+*)) \
VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
} \
\
/* Kernel symbol table: GPL-only unused symbols */ \
__ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
- *(__ksymtab_unused_gpl) \
+ *(SORT(___ksymtab_unused_gpl+*)) \
VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
} \
\
/* Kernel symbol table: GPL-future-only symbols */ \
__ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
- *(__ksymtab_gpl_future) \
+ *(SORT(___ksymtab_gpl_future+*)) \
VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
} \
\
/* Kernel symbol table: Normal symbols */ \
__kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab) = .; \
- *(__kcrctab) \
+ *(SORT(___kcrctab+*)) \
VMLINUX_SYMBOL(__stop___kcrctab) = .; \
} \
\
/* Kernel symbol table: GPL-only symbols */ \
__kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
- *(__kcrctab_gpl) \
+ *(SORT(___kcrctab_gpl+*)) \
VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
} \
\
/* Kernel symbol table: Normal unused symbols */ \
__kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \
- *(__kcrctab_unused) \
+ *(SORT(___kcrctab_unused+*)) \
VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \
} \
\
/* Kernel symbol table: GPL-only unused symbols */ \
__kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
- *(__kcrctab_unused_gpl) \
+ *(SORT(___kcrctab_unused_gpl+*)) \
VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
} \
\
/* Kernel symbol table: GPL-future-only symbols */ \
__kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
- *(__kcrctab_gpl_future) \
+ *(SORT(___kcrctab_gpl_future+*)) \
VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
} \
\
@@ -589,14 +591,6 @@
#define BUG_TABLE
#endif
-#define JUMP_TABLE \
- . = ALIGN(8); \
- __jump_table : AT(ADDR(__jump_table) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start___jump_table) = .; \
- *(__jump_table) \
- VMLINUX_SYMBOL(__stop___jump_table) = .; \
- }
-
#ifdef CONFIG_PM_TRACE
#define TRACEDATA \
. = ALIGN(4); \
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index ade09d7b4271..c99c3d3e7811 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -127,7 +127,7 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
-bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
+int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel);
int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper);
int drm_fb_helper_debug_enter(struct fb_info *info);
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index c2f93a8ae2e1..564b14aa7e16 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -86,7 +86,7 @@ static inline bool drm_mm_initialized(struct drm_mm *mm)
}
#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
&(mm)->head_node.node_list, \
- node_list);
+ node_list)
#define drm_mm_for_each_scanned_node_reverse(entry, n, mm) \
for (entry = (mm)->prev_scanned_node, \
next = entry ? list_entry(entry->node_list.next, \
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 816e30cbd968..f04b2a3b0f49 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -155,6 +155,7 @@
{0x1002, 0x6719, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x671c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x671d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x671f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6720, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6721, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6722, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
@@ -167,6 +168,7 @@
{0x1002, 0x6729, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6738, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6739, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x673e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6740, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6741, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6742, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -199,6 +201,7 @@
{0x1002, 0x688D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6898, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6899, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x689b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x689c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HEMLOCK|RADEON_NEW_MEMMAP}, \
{0x1002, 0x689d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HEMLOCK|RADEON_NEW_MEMMAP}, \
{0x1002, 0x689e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
@@ -209,7 +212,9 @@
{0x1002, 0x68b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x68b8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
{0x1002, 0x68b9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68ba, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
{0x1002, 0x68be, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68bf, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
{0x1002, 0x68c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x68c1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x68c7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index 7aa5dddb2098..787f7b6fd622 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -910,6 +910,7 @@ struct drm_radeon_cs {
#define RADEON_INFO_CLOCK_CRYSTAL_FREQ 0x09 /* clock crystal frequency */
#define RADEON_INFO_NUM_BACKENDS 0x0a /* DB/backends for r600+ - need for OQ */
#define RADEON_INFO_NUM_TILE_PIPES 0x0b /* tile pipes for r600+ */
+#define RADEON_INFO_FUSION_GART_WORKING 0x0c /* fusion writes to GTT were broken before this */
struct drm_radeon_info {
uint32_t request;
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index b8613e806aa9..01eca1794e14 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -111,6 +111,8 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
__alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_node(pgdat, x) \
__alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
+#define alloc_bootmem_node_nopanic(pgdat, x) \
+ __alloc_bootmem_node_nopanic(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_pages_node(pgdat, x) \
__alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_pages_node_nopanic(pgdat, x) \
diff --git a/include/linux/bsearch.h b/include/linux/bsearch.h
new file mode 100644
index 000000000000..90b1aa867224
--- /dev/null
+++ b/include/linux/bsearch.h
@@ -0,0 +1,9 @@
+#ifndef _LINUX_BSEARCH_H
+#define _LINUX_BSEARCH_H
+
+#include <linux/types.h>
+
+void *bsearch(const void *key, const void *base, size_t num, size_t size,
+ int (*cmp)(const void *key, const void *elt));
+
+#endif /* _LINUX_BSEARCH_H */
diff --git a/include/linux/capability.h b/include/linux/capability.h
index 7cb23eae693d..4554db0cde86 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -551,18 +551,7 @@ extern bool has_capability_noaudit(struct task_struct *t, int cap);
extern bool capable(int cap);
extern bool ns_capable(struct user_namespace *ns, int cap);
extern bool task_ns_capable(struct task_struct *t, int cap);
-
-/**
- * nsown_capable - Check superior capability to one's own user_ns
- * @cap: The capability in question
- *
- * Return true if the current task has the given superior capability
- * targeted at its own user namespace.
- */
-static inline bool nsown_capable(int cap)
-{
- return ns_capable(current_user_ns(), cap);
-}
+extern bool nsown_capable(int cap);
/* audit system wants to get cap info from files as well */
extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index fc53492b6ad7..d6733e27af34 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -56,46 +56,52 @@ enum clock_event_nofitiers {
/**
* struct clock_event_device - clock event device descriptor
- * @name: ptr to clock event name
- * @features: features
+ * @event_handler: Assigned by the framework to be called by the low
+ * level handler of the event source
+ * @set_next_event: set next event function
+ * @next_event: local storage for the next event in oneshot mode
* @max_delta_ns: maximum delta value in ns
* @min_delta_ns: minimum delta value in ns
* @mult: nanosecond to cycles multiplier
* @shift: nanoseconds to cycles divisor (power of two)
+ * @mode: operating mode assigned by the management code
+ * @features: features
+ * @retries: number of forced programming retries
+ * @set_mode: set mode function
+ * @broadcast: function to broadcast events
+ * @min_delta_ticks: minimum delta value in ticks stored for reconfiguration
+ * @max_delta_ticks: maximum delta value in ticks stored for reconfiguration
+ * @name: ptr to clock event name
* @rating: variable to rate clock event devices
* @irq: IRQ number (only for non CPU local devices)
* @cpumask: cpumask to indicate for which CPUs this device works
- * @set_next_event: set next event function
- * @set_mode: set mode function
- * @event_handler: Assigned by the framework to be called by the low
- * level handler of the event source
- * @broadcast: function to broadcast events
* @list: list head for the management code
- * @mode: operating mode assigned by the management code
- * @next_event: local storage for the next event in oneshot mode
- * @retries: number of forced programming retries
*/
struct clock_event_device {
- const char *name;
- unsigned int features;
+ void (*event_handler)(struct clock_event_device *);
+ int (*set_next_event)(unsigned long evt,
+ struct clock_event_device *);
+ ktime_t next_event;
u64 max_delta_ns;
u64 min_delta_ns;
u32 mult;
u32 shift;
+ enum clock_event_mode mode;
+ unsigned int features;
+ unsigned long retries;
+
+ void (*broadcast)(const struct cpumask *mask);
+ void (*set_mode)(enum clock_event_mode mode,
+ struct clock_event_device *);
+ unsigned long min_delta_ticks;
+ unsigned long max_delta_ticks;
+
+ const char *name;
int rating;
int irq;
const struct cpumask *cpumask;
- int (*set_next_event)(unsigned long evt,
- struct clock_event_device *);
- void (*set_mode)(enum clock_event_mode mode,
- struct clock_event_device *);
- void (*event_handler)(struct clock_event_device *);
- void (*broadcast)(const struct cpumask *mask);
struct list_head list;
- enum clock_event_mode mode;
- ktime_t next_event;
- unsigned long retries;
-};
+} ____cacheline_aligned;
/*
* Calculate a multiplication factor for scaled math, which is used to convert
@@ -122,6 +128,12 @@ extern u64 clockevent_delta2ns(unsigned long latch,
struct clock_event_device *evt);
extern void clockevents_register_device(struct clock_event_device *dev);
+extern void clockevents_config_and_register(struct clock_event_device *dev,
+ u32 freq, unsigned long min_delta,
+ unsigned long max_delta);
+
+extern int clockevents_update_freq(struct clock_event_device *ce, u32 freq);
+
extern void clockevents_exchange_device(struct clock_event_device *old,
struct clock_event_device *new);
extern void clockevents_set_mode(struct clock_event_device *dev,
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index c37b21ad5a3b..c918fbd33ee5 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -159,42 +159,38 @@ extern u64 timecounter_cyc2time(struct timecounter *tc,
*/
struct clocksource {
/*
- * First part of structure is read mostly
+ * Hotpath data, fits in a single cache line when the
+ * clocksource itself is cacheline aligned.
*/
- char *name;
- struct list_head list;
- int rating;
cycle_t (*read)(struct clocksource *cs);
- int (*enable)(struct clocksource *cs);
- void (*disable)(struct clocksource *cs);
+ cycle_t cycle_last;
cycle_t mask;
u32 mult;
u32 shift;
u64 max_idle_ns;
- unsigned long flags;
- cycle_t (*vread)(void);
- void (*suspend)(struct clocksource *cs);
- void (*resume)(struct clocksource *cs);
+
#ifdef CONFIG_IA64
void *fsys_mmio; /* used by fsyscall asm code */
#define CLKSRC_FSYS_MMIO_SET(mmio, addr) ((mmio) = (addr))
#else
#define CLKSRC_FSYS_MMIO_SET(mmio, addr) do { } while (0)
#endif
-
- /*
- * Second part is written at each timer interrupt
- * Keep it in a different cache line to dirty no
- * more than one cache line.
- */
- cycle_t cycle_last ____cacheline_aligned_in_smp;
+ const char *name;
+ struct list_head list;
+ int rating;
+ cycle_t (*vread)(void);
+ int (*enable)(struct clocksource *cs);
+ void (*disable)(struct clocksource *cs);
+ unsigned long flags;
+ void (*suspend)(struct clocksource *cs);
+ void (*resume)(struct clocksource *cs);
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
/* Watchdog related data, used by the framework */
struct list_head wd_list;
cycle_t wd_last;
#endif
-};
+} ____cacheline_aligned;
/*
* Clock source flags bits::
@@ -341,4 +337,6 @@ static inline void update_vsyscall_tz(void)
extern void timekeeping_notify(struct clocksource *clock);
+extern int clocksource_i8253_init(void);
+
#endif /* _LINUX_CLOCKSOURCE_H */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 9343dd3de858..11be48e0d168 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -3,7 +3,7 @@
*
* Copyright (C) 2001 Russell King
* (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
- *
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -56,9 +56,9 @@ static inline int cpufreq_unregister_notifier(struct notifier_block *nb,
#define CPUFREQ_POLICY_POWERSAVE (1)
#define CPUFREQ_POLICY_PERFORMANCE (2)
-/* Frequency values here are CPU kHz so that hardware which doesn't run
- * with some frequencies can complain without having to guess what per
- * cent / per mille means.
+/* Frequency values here are CPU kHz so that hardware which doesn't run
+ * with some frequencies can complain without having to guess what per
+ * cent / per mille means.
* Maximum transition latency is in nanoseconds - if it's unknown,
* CPUFREQ_ETERNAL shall be used.
*/
@@ -72,13 +72,15 @@ extern struct kobject *cpufreq_global_kobject;
struct cpufreq_cpuinfo {
unsigned int max_freq;
unsigned int min_freq;
- unsigned int transition_latency; /* in 10^(-9) s = nanoseconds */
+
+ /* in 10^(-9) s = nanoseconds */
+ unsigned int transition_latency;
};
struct cpufreq_real_policy {
unsigned int min; /* in kHz */
unsigned int max; /* in kHz */
- unsigned int policy; /* see above */
+ unsigned int policy; /* see above */
struct cpufreq_governor *governor; /* see below */
};
@@ -94,7 +96,7 @@ struct cpufreq_policy {
unsigned int max; /* in kHz */
unsigned int cur; /* in kHz, only needed if cpufreq
* governors are used */
- unsigned int policy; /* see above */
+ unsigned int policy; /* see above */
struct cpufreq_governor *governor; /* see below */
struct work_struct update; /* if update_policy() needs to be
@@ -167,11 +169,11 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div, u_int mu
struct cpufreq_governor {
char name[CPUFREQ_NAME_LEN];
- int (*governor) (struct cpufreq_policy *policy,
+ int (*governor) (struct cpufreq_policy *policy,
unsigned int event);
ssize_t (*show_setspeed) (struct cpufreq_policy *policy,
char *buf);
- int (*store_setspeed) (struct cpufreq_policy *policy,
+ int (*store_setspeed) (struct cpufreq_policy *policy,
unsigned int freq);
unsigned int max_transition_latency; /* HW must be able to switch to
next freq faster than this value in nano secs or we
@@ -180,7 +182,8 @@ struct cpufreq_governor {
struct module *owner;
};
-/* pass a target to the cpufreq driver
+/*
+ * Pass a target to the cpufreq driver.
*/
extern int cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
@@ -237,9 +240,9 @@ struct cpufreq_driver {
/* flags */
-#define CPUFREQ_STICKY 0x01 /* the driver isn't removed even if
+#define CPUFREQ_STICKY 0x01 /* the driver isn't removed even if
* all ->init() calls failed */
-#define CPUFREQ_CONST_LOOPS 0x02 /* loops_per_jiffy or other kernel
+#define CPUFREQ_CONST_LOOPS 0x02 /* loops_per_jiffy or other kernel
* "constants" aren't affected by
* frequency transitions */
#define CPUFREQ_PM_NO_WARN 0x04 /* don't warn on suspend/resume speed
@@ -252,7 +255,7 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state);
-static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, unsigned int min, unsigned int max)
+static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, unsigned int min, unsigned int max)
{
if (policy->min < min)
policy->min = min;
@@ -386,34 +389,15 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
/* the following 3 funtions are for cpufreq core use only */
struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu);
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
-void cpufreq_cpu_put (struct cpufreq_policy *data);
+void cpufreq_cpu_put(struct cpufreq_policy *data);
/* the following are really really optional */
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
-void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
+void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
unsigned int cpu);
void cpufreq_frequency_table_put_attr(unsigned int cpu);
-/*********************************************************************
- * UNIFIED DEBUG HELPERS *
- *********************************************************************/
-
-#define CPUFREQ_DEBUG_CORE 1
-#define CPUFREQ_DEBUG_DRIVER 2
-#define CPUFREQ_DEBUG_GOVERNOR 4
-
-#ifdef CONFIG_CPU_FREQ_DEBUG
-
-extern void cpufreq_debug_printk(unsigned int type, const char *prefix,
- const char *fmt, ...);
-
-#else
-
-#define cpufreq_debug_printk(msg...) do { } while(0)
-
-#endif /* CONFIG_CPU_FREQ_DEBUG */
-
#endif /* _LINUX_CPUFREQ_H */
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 9aeeb0ba2003..be16b61283cc 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -146,6 +146,7 @@ struct cred {
void *security; /* subjective LSM security */
#endif
struct user_struct *user; /* real user ID subscription */
+ struct user_namespace *user_ns; /* cached user->user_ns */
struct group_info *group_info; /* supplementary groups for euid/fsgid */
struct rcu_head rcu; /* RCU deletion hook */
};
@@ -354,10 +355,15 @@ static inline void put_cred(const struct cred *_cred)
#define current_fsgid() (current_cred_xxx(fsgid))
#define current_cap() (current_cred_xxx(cap_effective))
#define current_user() (current_cred_xxx(user))
-#define _current_user_ns() (current_cred_xxx(user)->user_ns)
#define current_security() (current_cred_xxx(security))
-extern struct user_namespace *current_user_ns(void);
+#ifdef CONFIG_USER_NS
+#define current_user_ns() (current_cred_xxx(user_ns))
+#else
+extern struct user_namespace init_user_ns;
+#define current_user_ns() (&init_user_ns)
+#endif
+
#define current_uid_gid(_uid, _gid) \
do { \
diff --git a/include/linux/device.h b/include/linux/device.h
index ab8dfc095709..0d7535000821 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -442,7 +442,6 @@ struct device {
struct dev_archdata archdata;
struct device_node *of_node; /* associated device tree node */
- const struct of_device_id *of_match; /* matching of_device_id from driver */
dev_t devt; /* dev_t, creates the sysfs "dev" */
@@ -633,13 +632,6 @@ static inline int devtmpfs_mount(const char *mountpoint) { return 0; }
/* drivers/base/power/shutdown.c */
extern void device_shutdown(void);
-#ifndef CONFIG_ARCH_NO_SYSDEV_OPS
-/* drivers/base/sys.c */
-extern void sysdev_shutdown(void);
-#else
-static inline void sysdev_shutdown(void) { }
-#endif
-
/* debugging and troubleshooting/diagnostic helpers. */
extern const char *dev_driver_string(const struct device *dev);
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index 0c9653f11c18..e747ecd48e1c 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -1,8 +1,6 @@
#ifndef _DYNAMIC_DEBUG_H
#define _DYNAMIC_DEBUG_H
-#include <linux/jump_label.h>
-
/* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which
* bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They
* use independent hash functions, to reduce the chance of false positives.
diff --git a/include/linux/fb.h b/include/linux/fb.h
index df728c1c29ed..6a8274877171 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -832,6 +832,7 @@ struct fb_tile_ops {
#define FBINFO_CAN_FORCE_OUTPUT 0x200000
struct fb_info {
+ atomic_t count;
int node;
int flags;
struct mutex lock; /* Lock for open/release/ioctl funcs */
diff --git a/include/linux/flex_array.h b/include/linux/flex_array.h
index 70e4efabe0fb..ebeb2f3ad068 100644
--- a/include/linux/flex_array.h
+++ b/include/linux/flex_array.h
@@ -61,7 +61,7 @@ struct flex_array {
struct flex_array *flex_array_alloc(int element_size, unsigned int total,
gfp_t flags);
int flex_array_prealloc(struct flex_array *fa, unsigned int start,
- unsigned int end, gfp_t flags);
+ unsigned int nr_elements, gfp_t flags);
void flex_array_free(struct flex_array *fa);
void flex_array_free_parts(struct flex_array *fa);
int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
diff --git a/include/linux/fs.h b/include/linux/fs.h
index dbd860af0804..cdf9495df204 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -358,7 +358,6 @@ struct inodes_stat_t {
#define FS_EXTENT_FL 0x00080000 /* Extents */
#define FS_DIRECTIO_FL 0x00100000 /* Use direct i/o */
#define FS_NOCOW_FL 0x00800000 /* Do not cow file */
-#define FS_COW_FL 0x02000000 /* Cow file */
#define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */
#define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index ca29e03c1fac..9d88e1cb5dbb 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -29,9 +29,22 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);
+struct ftrace_hash;
+
+enum {
+ FTRACE_OPS_FL_ENABLED = 1 << 0,
+ FTRACE_OPS_FL_GLOBAL = 1 << 1,
+ FTRACE_OPS_FL_DYNAMIC = 1 << 2,
+};
+
struct ftrace_ops {
- ftrace_func_t func;
- struct ftrace_ops *next;
+ ftrace_func_t func;
+ struct ftrace_ops *next;
+ unsigned long flags;
+#ifdef CONFIG_DYNAMIC_FTRACE
+ struct ftrace_hash *notrace_hash;
+ struct ftrace_hash *filter_hash;
+#endif
};
extern int function_trace_stop;
@@ -146,14 +159,13 @@ extern void unregister_ftrace_function_probe_all(char *glob);
extern int ftrace_text_reserved(void *start, void *end);
enum {
- FTRACE_FL_FREE = (1 << 0),
- FTRACE_FL_FAILED = (1 << 1),
- FTRACE_FL_FILTER = (1 << 2),
- FTRACE_FL_ENABLED = (1 << 3),
- FTRACE_FL_NOTRACE = (1 << 4),
- FTRACE_FL_CONVERTED = (1 << 5),
+ FTRACE_FL_ENABLED = (1 << 30),
+ FTRACE_FL_FREE = (1 << 31),
};
+#define FTRACE_FL_MASK (0x3UL << 30)
+#define FTRACE_REF_MAX ((1 << 30) - 1)
+
struct dyn_ftrace {
union {
unsigned long ip; /* address of mcount call-site */
@@ -167,7 +179,12 @@ struct dyn_ftrace {
};
int ftrace_force_update(void);
-void ftrace_set_filter(unsigned char *buf, int len, int reset);
+void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
+ int len, int reset);
+void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
+ int len, int reset);
+void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
+void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
int register_ftrace_command(struct ftrace_func_command *cmd);
int unregister_ftrace_command(struct ftrace_func_command *cmd);
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 22b32af1b5ec..b5a550a39a70 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -37,6 +37,7 @@ struct trace_entry {
unsigned char flags;
unsigned char preempt_count;
int pid;
+ int padding;
};
#define FTRACE_MAX_EVENT \
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index bfb8f934521e..56d8fc87fbbc 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -353,6 +353,8 @@ extern unsigned long get_zeroed_page(gfp_t gfp_mask);
void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
void free_pages_exact(void *virt, size_t size);
+/* This is different from alloc_pages_exact_node !!! */
+void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
#define __get_free_page(gfp_mask) \
__get_free_pages((gfp_mask), 0)
diff --git a/include/linux/init.h b/include/linux/init.h
index 577671c55153..9146f39cdddf 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -79,29 +79,29 @@
#define __exitused __used
#endif
-#define __exit __section(.exit.text) __exitused __cold
+#define __exit __section(.exit.text) __exitused __cold notrace
/* Used for HOTPLUG */
-#define __devinit __section(.devinit.text) __cold
+#define __devinit __section(.devinit.text) __cold notrace
#define __devinitdata __section(.devinit.data)
#define __devinitconst __section(.devinit.rodata)
-#define __devexit __section(.devexit.text) __exitused __cold
+#define __devexit __section(.devexit.text) __exitused __cold notrace
#define __devexitdata __section(.devexit.data)
#define __devexitconst __section(.devexit.rodata)
/* Used for HOTPLUG_CPU */
-#define __cpuinit __section(.cpuinit.text) __cold
+#define __cpuinit __section(.cpuinit.text) __cold notrace
#define __cpuinitdata __section(.cpuinit.data)
#define __cpuinitconst __section(.cpuinit.rodata)
-#define __cpuexit __section(.cpuexit.text) __exitused __cold
+#define __cpuexit __section(.cpuexit.text) __exitused __cold notrace
#define __cpuexitdata __section(.cpuexit.data)
#define __cpuexitconst __section(.cpuexit.rodata)
/* Used for MEMORY_HOTPLUG */
-#define __meminit __section(.meminit.text) __cold
+#define __meminit __section(.meminit.text) __cold notrace
#define __meminitdata __section(.meminit.data)
#define __meminitconst __section(.meminit.rodata)
-#define __memexit __section(.memexit.text) __exitused __cold
+#define __memexit __section(.memexit.text) __exitused __cold notrace
#define __memexitdata __section(.memexit.data)
#define __memexitconst __section(.memexit.rodata)
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index caa151fbebb7..689496bb6654 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -134,7 +134,6 @@ extern struct cred init_cred;
.stack = &init_thread_info, \
.usage = ATOMIC_INIT(2), \
.flags = PF_KTHREAD, \
- .lock_depth = -1, \
.prio = MAX_PRIO-20, \
.static_prio = MAX_PRIO-20, \
.normal_prio = MAX_PRIO-20, \
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 09a308072f56..8b4538446636 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -53,12 +53,13 @@ typedef void (*irq_preflow_handler_t)(struct irq_data *data);
* Bits which can be modified via irq_set/clear/modify_status_flags()
* IRQ_LEVEL - Interrupt is level type. Will be also
* updated in the code when the above trigger
- * bits are modified via set_irq_type()
+ * bits are modified via irq_set_irq_type()
* IRQ_PER_CPU - Mark an interrupt PER_CPU. Will protect
* it from affinity setting
* IRQ_NOPROBE - Interrupt cannot be probed by autoprobing
* IRQ_NOREQUEST - Interrupt cannot be requested via
* request_irq()
+ * IRQ_NOTHREAD - Interrupt cannot be threaded
* IRQ_NOAUTOEN - Interrupt is not automatically enabled in
* request/setup_irq()
* IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set)
@@ -85,6 +86,7 @@ enum {
IRQ_NO_BALANCING = (1 << 13),
IRQ_MOVE_PCNTXT = (1 << 14),
IRQ_NESTED_THREAD = (1 << 15),
+ IRQ_NOTHREAD = (1 << 16),
};
#define IRQF_MODIFY_MASK \
@@ -261,23 +263,6 @@ static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d)
* struct irq_chip - hardware interrupt chip descriptor
*
* @name: name for /proc/interrupts
- * @startup: deprecated, replaced by irq_startup
- * @shutdown: deprecated, replaced by irq_shutdown
- * @enable: deprecated, replaced by irq_enable
- * @disable: deprecated, replaced by irq_disable
- * @ack: deprecated, replaced by irq_ack
- * @mask: deprecated, replaced by irq_mask
- * @mask_ack: deprecated, replaced by irq_mask_ack
- * @unmask: deprecated, replaced by irq_unmask
- * @eoi: deprecated, replaced by irq_eoi
- * @end: deprecated, will go away with __do_IRQ()
- * @set_affinity: deprecated, replaced by irq_set_affinity
- * @retrigger: deprecated, replaced by irq_retrigger
- * @set_type: deprecated, replaced by irq_set_type
- * @set_wake: deprecated, replaced by irq_wake
- * @bus_lock: deprecated, replaced by irq_bus_lock
- * @bus_sync_unlock: deprecated, replaced by irq_bus_sync_unlock
- *
* @irq_startup: start up the interrupt (defaults to ->enable if NULL)
* @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL)
* @irq_enable: enable the interrupt (defaults to chip->unmask if NULL)
@@ -295,6 +280,9 @@ static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d)
* @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips
* @irq_cpu_online: configure an interrupt source for a secondary CPU
* @irq_cpu_offline: un-configure an interrupt source for a secondary CPU
+ * @irq_suspend: function called from core code on suspend once per chip
+ * @irq_resume: function called from core code on resume once per chip
+ * @irq_pm_shutdown: function called from core code on shutdown once per chip
* @irq_print_chip: optional to print special chip info in show_interrupts
* @flags: chip specific flags
*
@@ -324,6 +312,10 @@ struct irq_chip {
void (*irq_cpu_online)(struct irq_data *data);
void (*irq_cpu_offline)(struct irq_data *data);
+ void (*irq_suspend)(struct irq_data *data);
+ void (*irq_resume)(struct irq_data *data);
+ void (*irq_pm_shutdown)(struct irq_data *data);
+
void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
unsigned long flags;
@@ -439,7 +431,7 @@ irq_set_handler(unsigned int irq, irq_flow_handler_t handle)
/*
* Set a highlevel chained flow handler for a given IRQ.
* (a chained handler is automatically enabled and set to
- * IRQ_NOREQUEST and IRQ_NOPROBE)
+ * IRQ_NOREQUEST, IRQ_NOPROBE, and IRQ_NOTHREAD)
*/
static inline void
irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle)
@@ -469,6 +461,16 @@ static inline void irq_set_probe(unsigned int irq)
irq_modify_status(irq, IRQ_NOPROBE, 0);
}
+static inline void irq_set_nothread(unsigned int irq)
+{
+ irq_modify_status(irq, 0, IRQ_NOTHREAD);
+}
+
+static inline void irq_set_thread(unsigned int irq)
+{
+ irq_modify_status(irq, IRQ_NOTHREAD, 0);
+}
+
static inline void irq_set_nested_thread(unsigned int irq, bool nest)
{
if (nest)
@@ -573,6 +575,145 @@ static inline int irq_reserve_irq(unsigned int irq)
return irq_reserve_irqs(irq, 1);
}
+#ifndef irq_reg_writel
+# define irq_reg_writel(val, addr) writel(val, addr)
+#endif
+#ifndef irq_reg_readl
+# define irq_reg_readl(addr) readl(addr)
+#endif
+
+/**
+ * struct irq_chip_regs - register offsets for struct irq_gci
+ * @enable: Enable register offset to reg_base
+ * @disable: Disable register offset to reg_base
+ * @mask: Mask register offset to reg_base
+ * @ack: Ack register offset to reg_base
+ * @eoi: Eoi register offset to reg_base
+ * @type: Type configuration register offset to reg_base
+ * @polarity: Polarity configuration register offset to reg_base
+ */
+struct irq_chip_regs {
+ unsigned long enable;
+ unsigned long disable;
+ unsigned long mask;
+ unsigned long ack;
+ unsigned long eoi;
+ unsigned long type;
+ unsigned long polarity;
+};
+
+/**
+ * struct irq_chip_type - Generic interrupt chip instance for a flow type
+ * @chip: The real interrupt chip which provides the callbacks
+ * @regs: Register offsets for this chip
+ * @handler: Flow handler associated with this chip
+ * @type: Chip can handle these flow types
+ *
+ * A irq_generic_chip can have several instances of irq_chip_type when
+ * it requires different functions and register offsets for different
+ * flow types.
+ */
+struct irq_chip_type {
+ struct irq_chip chip;
+ struct irq_chip_regs regs;
+ irq_flow_handler_t handler;
+ u32 type;
+};
+
+/**
+ * struct irq_chip_generic - Generic irq chip data structure
+ * @lock: Lock to protect register and cache data access
+ * @reg_base: Register base address (virtual)
+ * @irq_base: Interrupt base nr for this chip
+ * @irq_cnt: Number of interrupts handled by this chip
+ * @mask_cache: Cached mask register
+ * @type_cache: Cached type register
+ * @polarity_cache: Cached polarity register
+ * @wake_enabled: Interrupt can wakeup from suspend
+ * @wake_active: Interrupt is marked as an wakeup from suspend source
+ * @num_ct: Number of available irq_chip_type instances (usually 1)
+ * @private: Private data for non generic chip callbacks
+ * @list: List head for keeping track of instances
+ * @chip_types: Array of interrupt irq_chip_types
+ *
+ * Note, that irq_chip_generic can have multiple irq_chip_type
+ * implementations which can be associated to a particular irq line of
+ * an irq_chip_generic instance. That allows to share and protect
+ * state in an irq_chip_generic instance when we need to implement
+ * different flow mechanisms (level/edge) for it.
+ */
+struct irq_chip_generic {
+ raw_spinlock_t lock;
+ void __iomem *reg_base;
+ unsigned int irq_base;
+ unsigned int irq_cnt;
+ u32 mask_cache;
+ u32 type_cache;
+ u32 polarity_cache;
+ u32 wake_enabled;
+ u32 wake_active;
+ unsigned int num_ct;
+ void *private;
+ struct list_head list;
+ struct irq_chip_type chip_types[0];
+};
+
+/**
+ * enum irq_gc_flags - Initialization flags for generic irq chips
+ * @IRQ_GC_INIT_MASK_CACHE: Initialize the mask_cache by reading mask reg
+ * @IRQ_GC_INIT_NESTED_LOCK: Set the lock class of the irqs to nested for
+ * irq chips which need to call irq_set_wake() on
+ * the parent irq. Usually GPIO implementations
+ */
+enum irq_gc_flags {
+ IRQ_GC_INIT_MASK_CACHE = 1 << 0,
+ IRQ_GC_INIT_NESTED_LOCK = 1 << 1,
+};
+
+/* Generic chip callback functions */
+void irq_gc_noop(struct irq_data *d);
+void irq_gc_mask_disable_reg(struct irq_data *d);
+void irq_gc_mask_set_bit(struct irq_data *d);
+void irq_gc_mask_clr_bit(struct irq_data *d);
+void irq_gc_unmask_enable_reg(struct irq_data *d);
+void irq_gc_ack(struct irq_data *d);
+void irq_gc_mask_disable_reg_and_ack(struct irq_data *d);
+void irq_gc_eoi(struct irq_data *d);
+int irq_gc_set_wake(struct irq_data *d, unsigned int on);
+
+/* Setup functions for irq_chip_generic */
+struct irq_chip_generic *
+irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base,
+ void __iomem *reg_base, irq_flow_handler_t handler);
+void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
+ enum irq_gc_flags flags, unsigned int clr,
+ unsigned int set);
+int irq_setup_alt_chip(struct irq_data *d, unsigned int type);
+void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
+ unsigned int clr, unsigned int set);
+
+static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d)
+{
+ return container_of(d->chip, struct irq_chip_type, chip);
+}
+
+#define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX)
+
+#ifdef CONFIG_SMP
+static inline void irq_gc_lock(struct irq_chip_generic *gc)
+{
+ raw_spin_lock(&gc->lock);
+}
+
+static inline void irq_gc_unlock(struct irq_chip_generic *gc)
+{
+ raw_spin_unlock(&gc->lock);
+}
+#else
+static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
+static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
+#endif
+
#endif /* CONFIG_GENERIC_HARDIRQS */
#endif /* !CONFIG_S390 */
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index a082905b5ebe..2d921b35212c 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -16,16 +16,18 @@ struct timer_rand_state;
* @irq_data: per irq and chip data passed down to chip functions
* @timer_rand_state: pointer to timer rand state struct
* @kstat_irqs: irq stats per cpu
- * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()]
+ * @handle_irq: highlevel irq-events handler
+ * @preflow_handler: handler called before the flow handler (currently used by sparc)
* @action: the irq action chain
* @status: status information
* @core_internal_state__do_not_mess_with_it: core internal status information
* @depth: disable-depth, for nested irq_disable() calls
- * @wake_depth: enable depth, for multiple set_irq_wake() callers
+ * @wake_depth: enable depth, for multiple irq_set_irq_wake() callers
* @irq_count: stats field to detect stalled irqs
* @last_unhandled: aging timer for unhandled count
* @irqs_unhandled: stats field for spurious unhandled interrupts
* @lock: locking for SMP
+ * @affinity_hint: hint to user space for preferred irq affinity
* @affinity_notify: context for notification of affinity changes
* @pending_mask: pending rebalanced interrupts
* @threads_oneshot: bitfield to handle shared oneshot threads
@@ -109,10 +111,7 @@ static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *de
desc->handle_irq(irq, desc);
}
-static inline void generic_handle_irq(unsigned int irq)
-{
- generic_handle_irq_desc(irq, irq_to_desc(irq));
-}
+int generic_handle_irq(unsigned int irq);
/* Test to see if a driver has successfully requested an irq */
static inline int irq_has_action(unsigned int irq)
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 7880f18e4b86..83e745f3ead7 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -1,20 +1,43 @@
#ifndef _LINUX_JUMP_LABEL_H
#define _LINUX_JUMP_LABEL_H
+#include <linux/types.h>
+#include <linux/compiler.h>
+
#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
+
+struct jump_label_key {
+ atomic_t enabled;
+ struct jump_entry *entries;
+#ifdef CONFIG_MODULES
+ struct jump_label_mod *next;
+#endif
+};
+
# include <asm/jump_label.h>
# define HAVE_JUMP_LABEL
#endif
enum jump_label_type {
+ JUMP_LABEL_DISABLE = 0,
JUMP_LABEL_ENABLE,
- JUMP_LABEL_DISABLE
};
struct module;
#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_MODULES
+#define JUMP_LABEL_INIT {{ 0 }, NULL, NULL}
+#else
+#define JUMP_LABEL_INIT {{ 0 }, NULL}
+#endif
+
+static __always_inline bool static_branch(struct jump_label_key *key)
+{
+ return arch_static_branch(key);
+}
+
extern struct jump_entry __start___jump_table[];
extern struct jump_entry __stop___jump_table[];
@@ -23,37 +46,37 @@ extern void jump_label_unlock(void);
extern void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type);
extern void arch_jump_label_text_poke_early(jump_label_t addr);
-extern void jump_label_update(unsigned long key, enum jump_label_type type);
-extern void jump_label_apply_nops(struct module *mod);
extern int jump_label_text_reserved(void *start, void *end);
+extern void jump_label_inc(struct jump_label_key *key);
+extern void jump_label_dec(struct jump_label_key *key);
+extern bool jump_label_enabled(struct jump_label_key *key);
+extern void jump_label_apply_nops(struct module *mod);
-#define jump_label_enable(key) \
- jump_label_update((unsigned long)key, JUMP_LABEL_ENABLE);
+#else
-#define jump_label_disable(key) \
- jump_label_update((unsigned long)key, JUMP_LABEL_DISABLE);
+#include <asm/atomic.h>
-#else
+#define JUMP_LABEL_INIT {ATOMIC_INIT(0)}
-#define JUMP_LABEL(key, label) \
-do { \
- if (unlikely(*key)) \
- goto label; \
-} while (0)
+struct jump_label_key {
+ atomic_t enabled;
+};
-#define jump_label_enable(cond_var) \
-do { \
- *(cond_var) = 1; \
-} while (0)
+static __always_inline bool static_branch(struct jump_label_key *key)
+{
+ if (unlikely(atomic_read(&key->enabled)))
+ return true;
+ return false;
+}
-#define jump_label_disable(cond_var) \
-do { \
- *(cond_var) = 0; \
-} while (0)
+static inline void jump_label_inc(struct jump_label_key *key)
+{
+ atomic_inc(&key->enabled);
+}
-static inline int jump_label_apply_nops(struct module *mod)
+static inline void jump_label_dec(struct jump_label_key *key)
{
- return 0;
+ atomic_dec(&key->enabled);
}
static inline int jump_label_text_reserved(void *start, void *end)
@@ -64,16 +87,16 @@ static inline int jump_label_text_reserved(void *start, void *end)
static inline void jump_label_lock(void) {}
static inline void jump_label_unlock(void) {}
-#endif
+static inline bool jump_label_enabled(struct jump_label_key *key)
+{
+ return !!atomic_read(&key->enabled);
+}
-#define COND_STMT(key, stmt) \
-do { \
- __label__ jl_enabled; \
- JUMP_LABEL(key, jl_enabled); \
- if (0) { \
-jl_enabled: \
- stmt; \
- } \
-} while (0)
+static inline int jump_label_apply_nops(struct module *mod)
+{
+ return 0;
+}
+
+#endif
#endif
diff --git a/include/linux/jump_label_ref.h b/include/linux/jump_label_ref.h
deleted file mode 100644
index e5d012ad92c6..000000000000
--- a/include/linux/jump_label_ref.h
+++ /dev/null
@@ -1,44 +0,0 @@
-#ifndef _LINUX_JUMP_LABEL_REF_H
-#define _LINUX_JUMP_LABEL_REF_H
-
-#include <linux/jump_label.h>
-#include <asm/atomic.h>
-
-#ifdef HAVE_JUMP_LABEL
-
-static inline void jump_label_inc(atomic_t *key)
-{
- if (atomic_add_return(1, key) == 1)
- jump_label_enable(key);
-}
-
-static inline void jump_label_dec(atomic_t *key)
-{
- if (atomic_dec_and_test(key))
- jump_label_disable(key);
-}
-
-#else /* !HAVE_JUMP_LABEL */
-
-static inline void jump_label_inc(atomic_t *key)
-{
- atomic_inc(key);
-}
-
-static inline void jump_label_dec(atomic_t *key)
-{
- atomic_dec(key);
-}
-
-#undef JUMP_LABEL
-#define JUMP_LABEL(key, label) \
-do { \
- if (unlikely(__builtin_choose_expr( \
- __builtin_types_compatible_p(typeof(key), atomic_t *), \
- atomic_read((atomic_t *)(key)), *(key)))) \
- goto label; \
-} while (0)
-
-#endif /* HAVE_JUMP_LABEL */
-
-#endif /* _LINUX_JUMP_LABEL_REF_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 00cec4dc0ae2..f37ba716ef8b 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -283,6 +283,7 @@ extern char *get_options(const char *str, int nints, int *ints);
extern unsigned long long memparse(const char *ptr, char **retptr);
extern int core_kernel_text(unsigned long addr);
+extern int core_kernel_data(unsigned long addr);
extern int __kernel_text_address(unsigned long addr);
extern int kernel_text_address(unsigned long addr);
extern int func_ptr_is_kernel_text(void *ptr);
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index 6efd7a78de6a..310231823852 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -113,5 +113,6 @@ extern void usermodehelper_init(void);
extern int usermodehelper_disable(void);
extern void usermodehelper_enable(void);
+extern bool usermodehelper_is_disabled(void);
#endif /* __LINUX_KMOD_H__ */
diff --git a/include/linux/list.h b/include/linux/list.h
index 3a54266a1e85..cc6d2aa6b415 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -4,7 +4,7 @@
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/poison.h>
-#include <linux/prefetch.h>
+#include <linux/const.h>
/*
* Simple doubly linked list implementation.
@@ -367,18 +367,15 @@ static inline void list_splice_tail_init(struct list_head *list,
* @head: the head for your list.
*/
#define list_for_each(pos, head) \
- for (pos = (head)->next; prefetch(pos->next), pos != (head); \
- pos = pos->next)
+ for (pos = (head)->next; pos != (head); pos = pos->next)
/**
* __list_for_each - iterate over a list
* @pos: the &struct list_head to use as a loop cursor.
* @head: the head for your list.
*
- * This variant differs from list_for_each() in that it's the
- * simplest possible list iteration code, no prefetching is done.
- * Use this for code that knows the list to be very short (empty
- * or 1 entry) most of the time.
+ * This variant doesn't differ from list_for_each() any more.
+ * We don't do prefetching in either case.
*/
#define __list_for_each(pos, head) \
for (pos = (head)->next; pos != (head); pos = pos->next)
@@ -389,8 +386,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* @head: the head for your list.
*/
#define list_for_each_prev(pos, head) \
- for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \
- pos = pos->prev)
+ for (pos = (head)->prev; pos != (head); pos = pos->prev)
/**
* list_for_each_safe - iterate over a list safe against removal of list entry
@@ -410,7 +406,7 @@ static inline void list_splice_tail_init(struct list_head *list,
*/
#define list_for_each_prev_safe(pos, n, head) \
for (pos = (head)->prev, n = pos->prev; \
- prefetch(pos->prev), pos != (head); \
+ pos != (head); \
pos = n, n = pos->prev)
/**
@@ -421,7 +417,7 @@ static inline void list_splice_tail_init(struct list_head *list,
*/
#define list_for_each_entry(pos, head, member) \
for (pos = list_entry((head)->next, typeof(*pos), member); \
- prefetch(pos->member.next), &pos->member != (head); \
+ &pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member))
/**
@@ -432,7 +428,7 @@ static inline void list_splice_tail_init(struct list_head *list,
*/
#define list_for_each_entry_reverse(pos, head, member) \
for (pos = list_entry((head)->prev, typeof(*pos), member); \
- prefetch(pos->member.prev), &pos->member != (head); \
+ &pos->member != (head); \
pos = list_entry(pos->member.prev, typeof(*pos), member))
/**
@@ -457,7 +453,7 @@ static inline void list_splice_tail_init(struct list_head *list,
*/
#define list_for_each_entry_continue(pos, head, member) \
for (pos = list_entry(pos->member.next, typeof(*pos), member); \
- prefetch(pos->member.next), &pos->member != (head); \
+ &pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member))
/**
@@ -471,7 +467,7 @@ static inline void list_splice_tail_init(struct list_head *list,
*/
#define list_for_each_entry_continue_reverse(pos, head, member) \
for (pos = list_entry(pos->member.prev, typeof(*pos), member); \
- prefetch(pos->member.prev), &pos->member != (head); \
+ &pos->member != (head); \
pos = list_entry(pos->member.prev, typeof(*pos), member))
/**
@@ -483,7 +479,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* Iterate over list of given type, continuing from current position.
*/
#define list_for_each_entry_from(pos, head, member) \
- for (; prefetch(pos->member.next), &pos->member != (head); \
+ for (; &pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member))
/**
@@ -664,8 +660,7 @@ static inline void hlist_move_list(struct hlist_head *old,
#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
#define hlist_for_each(pos, head) \
- for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
- pos = pos->next)
+ for (pos = (head)->first; pos ; pos = pos->next)
#define hlist_for_each_safe(pos, n, head) \
for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
@@ -680,7 +675,7 @@ static inline void hlist_move_list(struct hlist_head *old,
*/
#define hlist_for_each_entry(tpos, pos, head, member) \
for (pos = (head)->first; \
- pos && ({ prefetch(pos->next); 1;}) && \
+ pos && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = pos->next)
@@ -692,7 +687,7 @@ static inline void hlist_move_list(struct hlist_head *old,
*/
#define hlist_for_each_entry_continue(tpos, pos, member) \
for (pos = (pos)->next; \
- pos && ({ prefetch(pos->next); 1;}) && \
+ pos && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = pos->next)
@@ -703,7 +698,7 @@ static inline void hlist_move_list(struct hlist_head *old,
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_from(tpos, pos, member) \
- for (; pos && ({ prefetch(pos->next); 1;}) && \
+ for (; pos && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = pos->next)
diff --git a/include/linux/mfd/wm831x/pdata.h b/include/linux/mfd/wm831x/pdata.h
index afe4db49402d..632d1567a1b6 100644
--- a/include/linux/mfd/wm831x/pdata.h
+++ b/include/linux/mfd/wm831x/pdata.h
@@ -81,7 +81,9 @@ struct wm831x_touch_pdata {
int rpu; /** Pen down sensitivity resistor divider */
int pressure; /** Report pressure (boolean) */
unsigned int data_irq; /** Touch data ready IRQ */
+ int data_irqf; /** IRQ flags for data ready IRQ */
unsigned int pd_irq; /** Touch pendown detect IRQ */
+ int pd_irqf; /** IRQ flags for pen down IRQ */
};
enum wm831x_watchdog_action {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 2348db26bc3d..6507dde38b16 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1011,11 +1011,33 @@ int set_page_dirty_lock(struct page *page);
int clear_page_dirty_for_io(struct page *page);
/* Is the vma a continuation of the stack vma above it? */
-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
+static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
{
return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
}
+static inline int stack_guard_page_start(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ return (vma->vm_flags & VM_GROWSDOWN) &&
+ (vma->vm_start == addr) &&
+ !vma_growsdown(vma->vm_prev, addr);
+}
+
+/* Is the vma a continuation of the stack vma below it? */
+static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
+{
+ return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
+}
+
+static inline int stack_guard_page_end(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ return (vma->vm_flags & VM_GROWSUP) &&
+ (vma->vm_end == addr) &&
+ !vma_growsup(vma->vm_next, addr);
+}
+
extern unsigned long move_page_tables(struct vm_area_struct *vma,
unsigned long old_addr, struct vm_area_struct *new_vma,
unsigned long new_addr, unsigned long len);
diff --git a/include/linux/module.h b/include/linux/module.h
index 5de42043dff0..d9ca2d5dc6d0 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -64,6 +64,9 @@ struct module_version_attribute {
const char *version;
} __attribute__ ((__aligned__(sizeof(void *))));
+extern ssize_t __modver_version_show(struct module_attribute *,
+ struct module *, char *);
+
struct module_kobject
{
struct kobject kobj;
@@ -172,12 +175,7 @@ extern struct module __this_module;
#define MODULE_VERSION(_version) MODULE_INFO(version, _version)
#else
#define MODULE_VERSION(_version) \
- extern ssize_t __modver_version_show(struct module_attribute *, \
- struct module *, char *); \
- static struct module_version_attribute __modver_version_attr \
- __used \
- __attribute__ ((__section__ ("__modver"),aligned(sizeof(void *)))) \
- = { \
+ static struct module_version_attribute ___modver_attr = { \
.mattr = { \
.attr = { \
.name = "version", \
@@ -187,7 +185,10 @@ extern struct module __this_module;
}, \
.module_name = KBUILD_MODNAME, \
.version = _version, \
- }
+ }; \
+ static const struct module_version_attribute \
+ __used __attribute__ ((__section__ ("__modver"))) \
+ * __moduleparam_const __modver_attr = &___modver_attr
#endif
/* Optional firmware file (or files) needed by the module
@@ -223,7 +224,7 @@ struct module_use {
extern void *__crc_##sym __attribute__((weak)); \
static const unsigned long __kcrctab_##sym \
__used \
- __attribute__((section("__kcrctab" sec), unused)) \
+ __attribute__((section("___kcrctab" sec "+" #sym), unused)) \
= (unsigned long) &__crc_##sym;
#else
#define __CRC_SYMBOL(sym, sec)
@@ -238,7 +239,7 @@ struct module_use {
= MODULE_SYMBOL_PREFIX #sym; \
static const struct kernel_symbol __ksymtab_##sym \
__used \
- __attribute__((section("__ksymtab" sec), unused)) \
+ __attribute__((section("___ksymtab" sec "+" #sym), unused)) \
= { (unsigned long)&sym, __kstrtab_##sym }
#define EXPORT_SYMBOL(sym) \
@@ -367,34 +368,35 @@ struct module
struct module_notes_attrs *notes_attrs;
#endif
+ /* The command line arguments (may be mangled). People like
+ keeping pointers to this stuff */
+ char *args;
+
#ifdef CONFIG_SMP
/* Per-cpu data. */
void __percpu *percpu;
unsigned int percpu_size;
#endif
- /* The command line arguments (may be mangled). People like
- keeping pointers to this stuff */
- char *args;
#ifdef CONFIG_TRACEPOINTS
- struct tracepoint * const *tracepoints_ptrs;
unsigned int num_tracepoints;
+ struct tracepoint * const *tracepoints_ptrs;
#endif
#ifdef HAVE_JUMP_LABEL
struct jump_entry *jump_entries;
unsigned int num_jump_entries;
#endif
#ifdef CONFIG_TRACING
- const char **trace_bprintk_fmt_start;
unsigned int num_trace_bprintk_fmt;
+ const char **trace_bprintk_fmt_start;
#endif
#ifdef CONFIG_EVENT_TRACING
struct ftrace_event_call **trace_events;
unsigned int num_trace_events;
#endif
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
- unsigned long *ftrace_callsites;
unsigned int num_ftrace_callsites;
+ unsigned long *ftrace_callsites;
#endif
#ifdef CONFIG_MODULE_UNLOAD
@@ -475,8 +477,9 @@ const struct kernel_symbol *find_symbol(const char *name,
bool warn);
/* Walk the exported symbol table */
-bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
- unsigned int symnum, void *data), void *data);
+bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
+ struct module *owner,
+ void *data), void *data);
/* Returns 0 and fills in value, defined and namebuf, or -ERANGE if
symnum out of range. */
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 07b41951e3fa..ddaae98c53f9 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -67,9 +67,9 @@ struct kparam_string {
struct kparam_array
{
unsigned int max;
+ unsigned int elemsize;
unsigned int *num;
const struct kernel_param_ops *ops;
- unsigned int elemsize;
void *elem;
};
@@ -371,8 +371,9 @@ extern int param_get_invbool(char *buffer, const struct kernel_param *kp);
*/
#define module_param_array_named(name, array, type, nump, perm) \
static const struct kparam_array __param_arr_##name \
- = { ARRAY_SIZE(array), nump, &param_ops_##type, \
- sizeof(array[0]), array }; \
+ = { .max = ARRAY_SIZE(array), .num = nump, \
+ .ops = &param_ops_##type, \
+ .elemsize = sizeof(array[0]), .elem = array }; \
__module_param_call(MODULE_PARAM_PREFIX, name, \
&param_array_ops, \
.arr = &__param_arr_##name, \
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 94b48bd40dd7..c75471db576e 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -51,7 +51,7 @@ struct mutex {
spinlock_t wait_lock;
struct list_head wait_list;
#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
- struct thread_info *owner;
+ struct task_struct *owner;
#endif
#ifdef CONFIG_DEBUG_MUTEXES
const char *name;
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 890dce242639..7e371f7df9c4 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -233,6 +233,7 @@ struct nfs4_layoutget {
struct nfs4_layoutget_args args;
struct nfs4_layoutget_res res;
struct pnfs_layout_segment **lsegpp;
+ gfp_t gfp_flags;
};
struct nfs4_getdeviceinfo_args {
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index 8bfe6c1d4365..ae5638480ef2 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -21,8 +21,7 @@ extern void of_device_make_bus_id(struct device *dev);
static inline int of_driver_match_device(struct device *dev,
const struct device_driver *drv)
{
- dev->of_match = of_match_device(drv->of_match_table, dev);
- return dev->of_match != NULL;
+ return of_match_device(drv->of_match_table, dev) != NULL;
}
extern struct platform_device *of_dev_get(struct platform_device *dev);
@@ -58,6 +57,11 @@ static inline int of_device_uevent(struct device *dev,
static inline void of_device_node_put(struct device *dev) { }
+static inline const struct of_device_id *of_match_device(
+ const struct of_device_id *matches, const struct device *dev)
+{
+ return NULL;
+}
#endif /* CONFIG_OF_DEVICE */
#endif /* _LINUX_OF_DEVICE_H */
diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h
new file mode 100644
index 000000000000..655824fa4c76
--- /dev/null
+++ b/include/linux/pci-ats.h
@@ -0,0 +1,52 @@
+#ifndef LINUX_PCI_ATS_H
+#define LINUX_PCI_ATS_H
+
+/* Address Translation Service */
+struct pci_ats {
+ int pos; /* capability position */
+ int stu; /* Smallest Translation Unit */
+ int qdep; /* Invalidate Queue Depth */
+ int ref_cnt; /* Physical Function reference count */
+ unsigned int is_enabled:1; /* Enable bit is set */
+};
+
+#ifdef CONFIG_PCI_IOV
+
+extern int pci_enable_ats(struct pci_dev *dev, int ps);
+extern void pci_disable_ats(struct pci_dev *dev);
+extern int pci_ats_queue_depth(struct pci_dev *dev);
+/**
+ * pci_ats_enabled - query the ATS status
+ * @dev: the PCI device
+ *
+ * Returns 1 if ATS capability is enabled, or 0 if not.
+ */
+static inline int pci_ats_enabled(struct pci_dev *dev)
+{
+ return dev->ats && dev->ats->is_enabled;
+}
+
+#else /* CONFIG_PCI_IOV */
+
+static inline int pci_enable_ats(struct pci_dev *dev, int ps)
+{
+ return -ENODEV;
+}
+
+static inline void pci_disable_ats(struct pci_dev *dev)
+{
+}
+
+static inline int pci_ats_queue_depth(struct pci_dev *dev)
+{
+ return -ENODEV;
+}
+
+static inline int pci_ats_enabled(struct pci_dev *dev)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PCI_IOV */
+
+#endif /* LINUX_PCI_ATS_H*/
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 3a5c4449fd36..8b97308e65df 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -948,7 +948,7 @@ do { \
irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
# endif
# define irqsafe_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- __pcpu_double_call_return_int(irqsafe_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
+ __pcpu_double_call_return_bool(irqsafe_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
#endif
#endif /* __LINUX_PERCPU_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index ee9f1e782800..3412684ce5d5 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -2,8 +2,8 @@
* Performance events:
*
* Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
- * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
- * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra
+ * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
+ * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
*
* Data type definitions, declarations, prototypes.
*
@@ -52,6 +52,8 @@ enum perf_hw_id {
PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
PERF_COUNT_HW_BRANCH_MISSES = 5,
PERF_COUNT_HW_BUS_CYCLES = 6,
+ PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7,
+ PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8,
PERF_COUNT_HW_MAX, /* non-ABI */
};
@@ -468,9 +470,9 @@ enum perf_callchain_context {
PERF_CONTEXT_MAX = (__u64)-4095,
};
-#define PERF_FLAG_FD_NO_GROUP (1U << 0)
-#define PERF_FLAG_FD_OUTPUT (1U << 1)
-#define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */
+#define PERF_FLAG_FD_NO_GROUP (1U << 0)
+#define PERF_FLAG_FD_OUTPUT (1U << 1)
+#define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */
#ifdef __KERNEL__
/*
@@ -484,9 +486,9 @@ enum perf_callchain_context {
#endif
struct perf_guest_info_callbacks {
- int (*is_in_guest) (void);
- int (*is_user_mode) (void);
- unsigned long (*get_guest_ip) (void);
+ int (*is_in_guest)(void);
+ int (*is_user_mode)(void);
+ unsigned long (*get_guest_ip)(void);
};
#ifdef CONFIG_HAVE_HW_BREAKPOINT
@@ -505,7 +507,7 @@ struct perf_guest_info_callbacks {
#include <linux/ftrace.h>
#include <linux/cpu.h>
#include <linux/irq_work.h>
-#include <linux/jump_label_ref.h>
+#include <linux/jump_label.h>
#include <asm/atomic.h>
#include <asm/local.h>
@@ -652,19 +654,19 @@ struct pmu {
* Start the transaction, after this ->add() doesn't need to
* do schedulability tests.
*/
- void (*start_txn) (struct pmu *pmu); /* optional */
+ void (*start_txn) (struct pmu *pmu); /* optional */
/*
* If ->start_txn() disabled the ->add() schedulability test
* then ->commit_txn() is required to perform one. On success
* the transaction is closed. On error the transaction is kept
* open until ->cancel_txn() is called.
*/
- int (*commit_txn) (struct pmu *pmu); /* optional */
+ int (*commit_txn) (struct pmu *pmu); /* optional */
/*
* Will cancel the transaction, assumes ->del() is called
* for each successful ->add() during the transaction.
*/
- void (*cancel_txn) (struct pmu *pmu); /* optional */
+ void (*cancel_txn) (struct pmu *pmu); /* optional */
};
/**
@@ -712,15 +714,15 @@ typedef void (*perf_overflow_handler_t)(struct perf_event *, int,
struct pt_regs *regs);
enum perf_group_flag {
- PERF_GROUP_SOFTWARE = 0x1,
+ PERF_GROUP_SOFTWARE = 0x1,
};
-#define SWEVENT_HLIST_BITS 8
-#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
+#define SWEVENT_HLIST_BITS 8
+#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
struct swevent_hlist {
- struct hlist_head heads[SWEVENT_HLIST_SIZE];
- struct rcu_head rcu_head;
+ struct hlist_head heads[SWEVENT_HLIST_SIZE];
+ struct rcu_head rcu_head;
};
#define PERF_ATTACH_CONTEXT 0x01
@@ -733,13 +735,13 @@ struct swevent_hlist {
* This is a per-cpu dynamically allocated data structure.
*/
struct perf_cgroup_info {
- u64 time;
- u64 timestamp;
+ u64 time;
+ u64 timestamp;
};
struct perf_cgroup {
- struct cgroup_subsys_state css;
- struct perf_cgroup_info *info; /* timing info, one per cpu */
+ struct cgroup_subsys_state css;
+ struct perf_cgroup_info *info; /* timing info, one per cpu */
};
#endif
@@ -923,7 +925,7 @@ struct perf_event_context {
/*
* Number of contexts where an event can trigger:
- * task, softirq, hardirq, nmi.
+ * task, softirq, hardirq, nmi.
*/
#define PERF_NR_CONTEXTS 4
@@ -1001,8 +1003,7 @@ struct perf_sample_data {
struct perf_raw_record *raw;
};
-static inline
-void perf_sample_data_init(struct perf_sample_data *data, u64 addr)
+static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr)
{
data->addr = addr;
data->raw = NULL;
@@ -1034,13 +1035,12 @@ static inline int is_software_event(struct perf_event *event)
return event->pmu->task_ctx_nr == perf_sw_context;
}
-extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
+extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
#ifndef perf_arch_fetch_caller_regs
-static inline void
-perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
+static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
#endif
/*
@@ -1063,26 +1063,24 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
{
struct pt_regs hot_regs;
- JUMP_LABEL(&perf_swevent_enabled[event_id], have_event);
- return;
-
-have_event:
- if (!regs) {
- perf_fetch_caller_regs(&hot_regs);
- regs = &hot_regs;
+ if (static_branch(&perf_swevent_enabled[event_id])) {
+ if (!regs) {
+ perf_fetch_caller_regs(&hot_regs);
+ regs = &hot_regs;
+ }
+ __perf_sw_event(event_id, nr, nmi, regs, addr);
}
- __perf_sw_event(event_id, nr, nmi, regs, addr);
}
-extern atomic_t perf_sched_events;
+extern struct jump_label_key perf_sched_events;
static inline void perf_event_task_sched_in(struct task_struct *task)
{
- COND_STMT(&perf_sched_events, __perf_event_task_sched_in(task));
+ if (static_branch(&perf_sched_events))
+ __perf_event_task_sched_in(task);
}
-static inline
-void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
+static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
{
perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
@@ -1100,14 +1098,10 @@ extern void perf_event_fork(struct task_struct *tsk);
/* Callchains */
DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
-extern void perf_callchain_user(struct perf_callchain_entry *entry,
- struct pt_regs *regs);
-extern void perf_callchain_kernel(struct perf_callchain_entry *entry,
- struct pt_regs *regs);
-
+extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
+extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
-static inline void
-perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
+static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
{
if (entry->nr < PERF_MAX_STACK_DEPTH)
entry->ip[entry->nr++] = ip;
@@ -1143,9 +1137,9 @@ extern void perf_tp_event(u64 addr, u64 count, void *record,
extern void perf_bp_event(struct perf_event *event, void *data);
#ifndef perf_misc_flags
-#define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \
- PERF_RECORD_MISC_KERNEL)
-#define perf_instruction_pointer(regs) instruction_pointer(regs)
+# define perf_misc_flags(regs) \
+ (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
+# define perf_instruction_pointer(regs) instruction_pointer(regs)
#endif
extern int perf_output_begin(struct perf_output_handle *handle,
@@ -1180,9 +1174,9 @@ static inline void
perf_bp_event(struct perf_event *event, void *data) { }
static inline int perf_register_guest_info_callbacks
-(struct perf_guest_info_callbacks *callbacks) { return 0; }
+(struct perf_guest_info_callbacks *callbacks) { return 0; }
static inline int perf_unregister_guest_info_callbacks
-(struct perf_guest_info_callbacks *callbacks) { return 0; }
+(struct perf_guest_info_callbacks *callbacks) { return 0; }
static inline void perf_event_mmap(struct vm_area_struct *vma) { }
static inline void perf_event_comm(struct task_struct *tsk) { }
@@ -1195,23 +1189,22 @@ static inline void perf_event_disable(struct perf_event *event) { }
static inline void perf_event_task_tick(void) { }
#endif
-#define perf_output_put(handle, x) \
- perf_output_copy((handle), &(x), sizeof(x))
+#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
/*
* This has to have a higher priority than migration_notifier in sched.c.
*/
-#define perf_cpu_notifier(fn) \
-do { \
- static struct notifier_block fn##_nb __cpuinitdata = \
- { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
- fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
- (void *)(unsigned long)smp_processor_id()); \
- fn(&fn##_nb, (unsigned long)CPU_STARTING, \
- (void *)(unsigned long)smp_processor_id()); \
- fn(&fn##_nb, (unsigned long)CPU_ONLINE, \
- (void *)(unsigned long)smp_processor_id()); \
- register_cpu_notifier(&fn##_nb); \
+#define perf_cpu_notifier(fn) \
+do { \
+ static struct notifier_block fn##_nb __cpuinitdata = \
+ { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
+ fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
+ (void *)(unsigned long)smp_processor_id()); \
+ fn(&fn##_nb, (unsigned long)CPU_STARTING, \
+ (void *)(unsigned long)smp_processor_id()); \
+ fn(&fn##_nb, (unsigned long)CPU_ONLINE, \
+ (void *)(unsigned long)smp_processor_id()); \
+ register_cpu_notifier(&fn##_nb); \
} while (0)
#endif /* __KERNEL__ */
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 744942c95fec..ede1a80e3358 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -150,9 +150,6 @@ extern struct platform_device *platform_create_bundle(struct platform_driver *dr
struct resource *res, unsigned int n_res,
const void *data, size_t size);
-extern const struct dev_pm_ops * platform_bus_get_pm_ops(void);
-extern void platform_bus_set_pm_ops(const struct dev_pm_ops *pm);
-
/* early platform driver interface */
struct early_platform_driver {
const char *class_str;
@@ -205,4 +202,64 @@ static inline char *early_platform_driver_setup_func(void) \
}
#endif /* MODULE */
+#ifdef CONFIG_PM_SLEEP
+extern int platform_pm_prepare(struct device *dev);
+extern void platform_pm_complete(struct device *dev);
+#else
+#define platform_pm_prepare NULL
+#define platform_pm_complete NULL
+#endif
+
+#ifdef CONFIG_SUSPEND
+extern int platform_pm_suspend(struct device *dev);
+extern int platform_pm_suspend_noirq(struct device *dev);
+extern int platform_pm_resume(struct device *dev);
+extern int platform_pm_resume_noirq(struct device *dev);
+#else
+#define platform_pm_suspend NULL
+#define platform_pm_resume NULL
+#define platform_pm_suspend_noirq NULL
+#define platform_pm_resume_noirq NULL
+#endif
+
+#ifdef CONFIG_HIBERNATE_CALLBACKS
+extern int platform_pm_freeze(struct device *dev);
+extern int platform_pm_freeze_noirq(struct device *dev);
+extern int platform_pm_thaw(struct device *dev);
+extern int platform_pm_thaw_noirq(struct device *dev);
+extern int platform_pm_poweroff(struct device *dev);
+extern int platform_pm_poweroff_noirq(struct device *dev);
+extern int platform_pm_restore(struct device *dev);
+extern int platform_pm_restore_noirq(struct device *dev);
+#else
+#define platform_pm_freeze NULL
+#define platform_pm_thaw NULL
+#define platform_pm_poweroff NULL
+#define platform_pm_restore NULL
+#define platform_pm_freeze_noirq NULL
+#define platform_pm_thaw_noirq NULL
+#define platform_pm_poweroff_noirq NULL
+#define platform_pm_restore_noirq NULL
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+#define USE_PLATFORM_PM_SLEEP_OPS \
+ .prepare = platform_pm_prepare, \
+ .complete = platform_pm_complete, \
+ .suspend = platform_pm_suspend, \
+ .resume = platform_pm_resume, \
+ .freeze = platform_pm_freeze, \
+ .thaw = platform_pm_thaw, \
+ .poweroff = platform_pm_poweroff, \
+ .restore = platform_pm_restore, \
+ .suspend_noirq = platform_pm_suspend_noirq, \
+ .resume_noirq = platform_pm_resume_noirq, \
+ .freeze_noirq = platform_pm_freeze_noirq, \
+ .thaw_noirq = platform_pm_thaw_noirq, \
+ .poweroff_noirq = platform_pm_poweroff_noirq, \
+ .restore_noirq = platform_pm_restore_noirq,
+#else
+#define USE_PLATFORM_PM_SLEEP_OPS
+#endif
+
#endif /* _PLATFORM_DEVICE_H_ */
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 512e09177e57..3160648ccdda 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -460,6 +460,7 @@ struct dev_pm_info {
unsigned long active_jiffies;
unsigned long suspended_jiffies;
unsigned long accounting_timestamp;
+ void *subsys_data; /* Owned by the subsystem. */
#endif
};
@@ -529,21 +530,17 @@ struct dev_power_domain {
*/
#ifdef CONFIG_PM_SLEEP
-#ifndef CONFIG_ARCH_NO_SYSDEV_OPS
-extern int sysdev_suspend(pm_message_t state);
-extern int sysdev_resume(void);
-#else
-static inline int sysdev_suspend(pm_message_t state) { return 0; }
-static inline int sysdev_resume(void) { return 0; }
-#endif
-
extern void device_pm_lock(void);
extern void dpm_resume_noirq(pm_message_t state);
extern void dpm_resume_end(pm_message_t state);
+extern void dpm_resume(pm_message_t state);
+extern void dpm_complete(pm_message_t state);
extern void device_pm_unlock(void);
extern int dpm_suspend_noirq(pm_message_t state);
extern int dpm_suspend_start(pm_message_t state);
+extern int dpm_suspend(pm_message_t state);
+extern int dpm_prepare(pm_message_t state);
extern void __suspend_report_result(const char *function, void *fn, int ret);
@@ -553,6 +550,16 @@ extern void __suspend_report_result(const char *function, void *fn, int ret);
} while (0)
extern int device_pm_wait_for_dev(struct device *sub, struct device *dev);
+
+extern int pm_generic_prepare(struct device *dev);
+extern int pm_generic_suspend(struct device *dev);
+extern int pm_generic_resume(struct device *dev);
+extern int pm_generic_freeze(struct device *dev);
+extern int pm_generic_thaw(struct device *dev);
+extern int pm_generic_restore(struct device *dev);
+extern int pm_generic_poweroff(struct device *dev);
+extern void pm_generic_complete(struct device *dev);
+
#else /* !CONFIG_PM_SLEEP */
#define device_pm_lock() do {} while (0)
@@ -569,6 +576,15 @@ static inline int device_pm_wait_for_dev(struct device *a, struct device *b)
{
return 0;
}
+
+#define pm_generic_prepare NULL
+#define pm_generic_suspend NULL
+#define pm_generic_resume NULL
+#define pm_generic_freeze NULL
+#define pm_generic_thaw NULL
+#define pm_generic_restore NULL
+#define pm_generic_poweroff NULL
+#define pm_generic_complete NULL
#endif /* !CONFIG_PM_SLEEP */
/* How to reorder dpm_list after device_move() */
@@ -579,11 +595,4 @@ enum dpm_order {
DPM_ORDER_DEV_LAST,
};
-extern int pm_generic_suspend(struct device *dev);
-extern int pm_generic_resume(struct device *dev);
-extern int pm_generic_freeze(struct device *dev);
-extern int pm_generic_thaw(struct device *dev);
-extern int pm_generic_restore(struct device *dev);
-extern int pm_generic_poweroff(struct device *dev);
-
#endif /* _LINUX_PM_H */
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 8de9aa6e7def..878cf84baeb1 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -245,4 +245,46 @@ static inline void pm_runtime_dont_use_autosuspend(struct device *dev)
__pm_runtime_use_autosuspend(dev, false);
}
+struct pm_clk_notifier_block {
+ struct notifier_block nb;
+ struct dev_power_domain *pwr_domain;
+ char *con_ids[];
+};
+
+#ifdef CONFIG_PM_RUNTIME_CLK
+extern int pm_runtime_clk_init(struct device *dev);
+extern void pm_runtime_clk_destroy(struct device *dev);
+extern int pm_runtime_clk_add(struct device *dev, const char *con_id);
+extern void pm_runtime_clk_remove(struct device *dev, const char *con_id);
+extern int pm_runtime_clk_suspend(struct device *dev);
+extern int pm_runtime_clk_resume(struct device *dev);
+#else
+static inline int pm_runtime_clk_init(struct device *dev)
+{
+ return -EINVAL;
+}
+static inline void pm_runtime_clk_destroy(struct device *dev)
+{
+}
+static inline int pm_runtime_clk_add(struct device *dev, const char *con_id)
+{
+ return -EINVAL;
+}
+static inline void pm_runtime_clk_remove(struct device *dev, const char *con_id)
+{
+}
+#define pm_runtime_clock_suspend NULL
+#define pm_runtime_clock_resume NULL
+#endif
+
+#ifdef CONFIG_HAVE_CLK
+extern void pm_runtime_clk_add_notifier(struct bus_type *bus,
+ struct pm_clk_notifier_block *clknb);
+#else
+static inline void pm_runtime_clk_add_notifier(struct bus_type *bus,
+ struct pm_clk_notifier_block *clknb)
+{
+}
+#endif
+
#endif
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 838c1149251a..eaf4350c0f90 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -208,6 +208,8 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
struct proc_dir_entry *parent,const char *dest) {return NULL;}
static inline struct proc_dir_entry *proc_mkdir(const char *name,
struct proc_dir_entry *parent) {return NULL;}
+static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
+ mode_t mode, struct proc_dir_entry *parent) { return NULL; }
static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
mode_t mode, struct proc_dir_entry *base,
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index a1147e5dd245..9178d5cc0b01 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -189,6 +189,10 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
child->ptrace = current->ptrace;
__ptrace_link(child, current->parent);
}
+
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ atomic_set(&child->ptrace_bp_refcnt, 1);
+#endif
}
/**
@@ -350,6 +354,13 @@ extern int task_current_syscall(struct task_struct *target, long *callno,
unsigned long args[6], unsigned int maxargs,
unsigned long *sp, unsigned long *pc);
-#endif
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+extern int ptrace_get_breakpoints(struct task_struct *tsk);
+extern void ptrace_put_breakpoints(struct task_struct *tsk);
+#else
+static inline void ptrace_put_breakpoints(struct task_struct *tsk) { }
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+
+#endif /* __KERNEL */
#endif
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 2dea94fc4402..e3beb315517a 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -253,7 +253,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
*/
#define list_for_each_entry_rcu(pos, head, member) \
for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \
- prefetch(pos->member.next), &pos->member != (head); \
+ &pos->member != (head); \
pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
@@ -270,7 +270,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
*/
#define list_for_each_continue_rcu(pos, head) \
for ((pos) = rcu_dereference_raw(list_next_rcu(pos)); \
- prefetch((pos)->next), (pos) != (head); \
+ (pos) != (head); \
(pos) = rcu_dereference_raw(list_next_rcu(pos)))
/**
@@ -284,7 +284,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
*/
#define list_for_each_entry_continue_rcu(pos, head, member) \
for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \
- prefetch(pos->member.next), &pos->member != (head); \
+ &pos->member != (head); \
pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
/**
@@ -427,7 +427,7 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
#define __hlist_for_each_rcu(pos, head) \
for (pos = rcu_dereference(hlist_first_rcu(head)); \
- pos && ({ prefetch(pos->next); 1; }); \
+ pos; \
pos = rcu_dereference(hlist_next_rcu(pos)))
/**
@@ -443,7 +443,7 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
*/
#define hlist_for_each_entry_rcu(tpos, pos, head, member) \
for (pos = rcu_dereference_raw(hlist_first_rcu(head)); \
- pos && ({ prefetch(pos->next); 1; }) && \
+ pos && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
pos = rcu_dereference_raw(hlist_next_rcu(pos)))
@@ -460,7 +460,7 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
*/
#define hlist_for_each_entry_rcu_bh(tpos, pos, head, member) \
for (pos = rcu_dereference_bh((head)->first); \
- pos && ({ prefetch(pos->next); 1; }) && \
+ pos && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
pos = rcu_dereference_bh(pos->next))
@@ -472,7 +472,7 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
*/
#define hlist_for_each_entry_continue_rcu(tpos, pos, member) \
for (pos = rcu_dereference((pos)->next); \
- pos && ({ prefetch(pos->next); 1; }) && \
+ pos && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
pos = rcu_dereference(pos->next))
@@ -484,7 +484,7 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
*/
#define hlist_for_each_entry_continue_rcu_bh(tpos, pos, member) \
for (pos = rcu_dereference_bh((pos)->next); \
- pos && ({ prefetch(pos->next); 1; }) && \
+ pos && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
pos = rcu_dereference_bh(pos->next))
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 18d63cea2848..12211e1666e2 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -360,7 +360,7 @@ extern signed long schedule_timeout_interruptible(signed long timeout);
extern signed long schedule_timeout_killable(signed long timeout);
extern signed long schedule_timeout_uninterruptible(signed long timeout);
asmlinkage void schedule(void);
-extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
+extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
struct nsproxy;
struct user_namespace;
@@ -731,10 +731,6 @@ struct sched_info {
/* timestamps */
unsigned long long last_arrival,/* when we last ran on a cpu */
last_queued; /* when we were last queued to run */
-#ifdef CONFIG_SCHEDSTATS
- /* BKL stats */
- unsigned int bkl_count;
-#endif
};
#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
@@ -868,6 +864,7 @@ static inline int sd_power_saving_flags(void)
struct sched_group {
struct sched_group *next; /* Must be a circular list */
+ atomic_t ref;
/*
* CPU power of this group, SCHED_LOAD_SCALE being max power for a
@@ -882,9 +879,6 @@ struct sched_group {
* NOTE: this field is variable length. (Allocated dynamically
* by attaching extra space to the end of the structure,
* depending on how many CPUs the kernel has booted up with)
- *
- * It is also be embedded into static data structures at build
- * time. (See 'struct static_sched_group' in kernel/sched.c)
*/
unsigned long cpumask[0];
};
@@ -894,17 +888,6 @@ static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
return to_cpumask(sg->cpumask);
}
-enum sched_domain_level {
- SD_LV_NONE = 0,
- SD_LV_SIBLING,
- SD_LV_MC,
- SD_LV_BOOK,
- SD_LV_CPU,
- SD_LV_NODE,
- SD_LV_ALLNODES,
- SD_LV_MAX
-};
-
struct sched_domain_attr {
int relax_domain_level;
};
@@ -913,6 +896,8 @@ struct sched_domain_attr {
.relax_domain_level = -1, \
}
+extern int sched_domain_level_max;
+
struct sched_domain {
/* These fields must be setup */
struct sched_domain *parent; /* top domain must be null terminated */
@@ -930,7 +915,7 @@ struct sched_domain {
unsigned int forkexec_idx;
unsigned int smt_gain;
int flags; /* See SD_* */
- enum sched_domain_level level;
+ int level;
/* Runtime fields. */
unsigned long last_balance; /* init to jiffies. units in jiffies */
@@ -973,6 +958,10 @@ struct sched_domain {
#ifdef CONFIG_SCHED_DEBUG
char *name;
#endif
+ union {
+ void *private; /* used during construction */
+ struct rcu_head rcu; /* used during destruction */
+ };
unsigned int span_weight;
/*
@@ -981,9 +970,6 @@ struct sched_domain {
* NOTE: this field is variable length. (Allocated dynamically
* by attaching extra space to the end of the structure,
* depending on how many CPUs the kernel has booted up with)
- *
- * It is also be embedded into static data structures at build
- * time. (See 'struct static_sched_domain' in kernel/sched.c)
*/
unsigned long span[0];
};
@@ -1048,8 +1034,12 @@ struct sched_domain;
#define WF_FORK 0x02 /* child wakeup after fork */
#define ENQUEUE_WAKEUP 1
-#define ENQUEUE_WAKING 2
-#define ENQUEUE_HEAD 4
+#define ENQUEUE_HEAD 2
+#ifdef CONFIG_SMP
+#define ENQUEUE_WAKING 4 /* sched_class::task_waking was called */
+#else
+#define ENQUEUE_WAKING 0
+#endif
#define DEQUEUE_SLEEP 1
@@ -1067,12 +1057,11 @@ struct sched_class {
void (*put_prev_task) (struct rq *rq, struct task_struct *p);
#ifdef CONFIG_SMP
- int (*select_task_rq)(struct rq *rq, struct task_struct *p,
- int sd_flag, int flags);
+ int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
void (*post_schedule) (struct rq *this_rq);
- void (*task_waking) (struct rq *this_rq, struct task_struct *task);
+ void (*task_waking) (struct task_struct *task);
void (*task_woken) (struct rq *this_rq, struct task_struct *task);
void (*set_cpus_allowed)(struct task_struct *p,
@@ -1197,13 +1186,11 @@ struct task_struct {
unsigned int flags; /* per process flags, defined below */
unsigned int ptrace;
- int lock_depth; /* BKL lock depth */
-
#ifdef CONFIG_SMP
-#ifdef __ARCH_WANT_UNLOCKED_CTXSW
- int oncpu;
-#endif
+ struct task_struct *wake_entry;
+ int on_cpu;
#endif
+ int on_rq;
int prio, static_prio, normal_prio;
unsigned int rt_priority;
@@ -1274,6 +1261,7 @@ struct task_struct {
/* Revert to default priority/policy when forking */
unsigned sched_reset_on_fork:1;
+ unsigned sched_contributes_to_load:1;
pid_t pid;
pid_t tgid;
@@ -1537,6 +1525,9 @@ struct task_struct {
unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
} memcg_batch;
#endif
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ atomic_t ptrace_bp_refcnt;
+#endif
};
/* Future-safe accessor for struct task_struct's cpus_allowed. */
@@ -2060,14 +2051,13 @@ extern void xtime_update(unsigned long ticks);
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
-extern void wake_up_new_task(struct task_struct *tsk,
- unsigned long clone_flags);
+extern void wake_up_new_task(struct task_struct *tsk);
#ifdef CONFIG_SMP
extern void kick_process(struct task_struct *tsk);
#else
static inline void kick_process(struct task_struct *tsk) { }
#endif
-extern void sched_fork(struct task_struct *p, int clone_flags);
+extern void sched_fork(struct task_struct *p);
extern void sched_dead(struct task_struct *p);
extern void proc_caches_init(void);
@@ -2192,8 +2182,10 @@ extern void set_task_comm(struct task_struct *tsk, char *from);
extern char *get_task_comm(char *to, struct task_struct *tsk);
#ifdef CONFIG_SMP
+void scheduler_ipi(void);
extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
#else
+static inline void scheduler_ipi(void) { }
static inline unsigned long wait_task_inactive(struct task_struct *p,
long match_state)
{
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index e98cd2e57194..06d69648fc86 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -88,12 +88,12 @@ static __always_inline unsigned read_seqbegin(const seqlock_t *sl)
unsigned ret;
repeat:
- ret = sl->sequence;
- smp_rmb();
+ ret = ACCESS_ONCE(sl->sequence);
if (unlikely(ret & 1)) {
cpu_relax();
goto repeat;
}
+ smp_rmb();
return ret;
}
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index 9659eff52ca2..045f72ab5dfd 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -404,7 +404,9 @@ extern bool ssb_is_sprom_available(struct ssb_bus *bus);
/* Set a fallback SPROM.
* See kdoc at the function definition for complete documentation. */
-extern int ssb_arch_set_fallback_sprom(const struct ssb_sprom *sprom);
+extern int ssb_arch_register_fallback_sprom(
+ int (*sprom_callback)(struct ssb_bus *bus,
+ struct ssb_sprom *out));
/* Suspend a SSB bus.
* Call this from the parent bus suspend routine. */
diff --git a/include/linux/string.h b/include/linux/string.h
index a716ee2a8adb..a176db2f2c85 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -123,6 +123,7 @@ extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
extern void argv_free(char **argv);
extern bool sysfs_streq(const char *s1, const char *s2);
+extern int strtobool(const char *s, bool *res);
#ifdef CONFIG_BINARY_PRINTF
int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
diff --git a/include/linux/sysdev.h b/include/linux/sysdev.h
index dfb078db8ebb..d35e783a598c 100644
--- a/include/linux/sysdev.h
+++ b/include/linux/sysdev.h
@@ -34,12 +34,6 @@ struct sysdev_class {
struct list_head drivers;
struct sysdev_class_attribute **attrs;
struct kset kset;
-#ifndef CONFIG_ARCH_NO_SYSDEV_OPS
- /* Default operations for these types of devices */
- int (*shutdown)(struct sys_device *);
- int (*suspend)(struct sys_device *, pm_message_t state);
- int (*resume)(struct sys_device *);
-#endif
};
struct sysdev_class_attribute {
@@ -77,11 +71,6 @@ struct sysdev_driver {
struct list_head entry;
int (*add)(struct sys_device *);
int (*remove)(struct sys_device *);
-#ifndef CONFIG_ARCH_NO_SYSDEV_OPS
- int (*shutdown)(struct sys_device *);
- int (*suspend)(struct sys_device *, pm_message_t state);
- int (*resume)(struct sys_device *);
-#endif
};
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 97c84a58efb8..d530a4460a0b 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -29,7 +29,7 @@ struct tracepoint_func {
struct tracepoint {
const char *name; /* Tracepoint name */
- int state; /* State. */
+ struct jump_label_key key;
void (*regfunc)(void);
void (*unregfunc)(void);
struct tracepoint_func __rcu *funcs;
@@ -146,9 +146,7 @@ void tracepoint_update_probe_range(struct tracepoint * const *begin,
extern struct tracepoint __tracepoint_##name; \
static inline void trace_##name(proto) \
{ \
- JUMP_LABEL(&__tracepoint_##name.state, do_trace); \
- return; \
-do_trace: \
+ if (static_branch(&__tracepoint_##name.key)) \
__DO_TRACE(&__tracepoint_##name, \
TP_PROTO(data_proto), \
TP_ARGS(data_args), \
@@ -176,14 +174,14 @@ do_trace: \
* structures, so we create an array of pointers that will be used for iteration
* on the tracepoints.
*/
-#define DEFINE_TRACE_FN(name, reg, unreg) \
- static const char __tpstrtab_##name[] \
- __attribute__((section("__tracepoints_strings"))) = #name; \
- struct tracepoint __tracepoint_##name \
- __attribute__((section("__tracepoints"))) = \
- { __tpstrtab_##name, 0, reg, unreg, NULL }; \
- static struct tracepoint * const __tracepoint_ptr_##name __used \
- __attribute__((section("__tracepoints_ptrs"))) = \
+#define DEFINE_TRACE_FN(name, reg, unreg) \
+ static const char __tpstrtab_##name[] \
+ __attribute__((section("__tracepoints_strings"))) = #name; \
+ struct tracepoint __tracepoint_##name \
+ __attribute__((section("__tracepoints"))) = \
+ { __tpstrtab_##name, JUMP_LABEL_INIT, reg, unreg, NULL };\
+ static struct tracepoint * const __tracepoint_ptr_##name __used \
+ __attribute__((section("__tracepoints_ptrs"))) = \
&__tracepoint_##name;
#define DEFINE_TRACE(name) \
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 0e1855079fbb..605b0aa8d852 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -68,6 +68,7 @@ struct usbnet {
# define EVENT_RX_PAUSED 5
# define EVENT_DEV_WAKING 6
# define EVENT_DEV_ASLEEP 7
+# define EVENT_DEV_OPEN 8
};
static inline struct usb_driver *driver_of(struct usb_interface *intf)
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
index 88bdd010d65d..2fa8d1341a0a 100644
--- a/include/net/inet_ecn.h
+++ b/include/net/inet_ecn.h
@@ -38,9 +38,19 @@ static inline __u8 INET_ECN_encapsulate(__u8 outer, __u8 inner)
return outer;
}
-#define INET_ECN_xmit(sk) do { inet_sk(sk)->tos |= INET_ECN_ECT_0; } while (0)
-#define INET_ECN_dontxmit(sk) \
- do { inet_sk(sk)->tos &= ~INET_ECN_MASK; } while (0)
+static inline void INET_ECN_xmit(struct sock *sk)
+{
+ inet_sk(sk)->tos |= INET_ECN_ECT_0;
+ if (inet6_sk(sk) != NULL)
+ inet6_sk(sk)->tclass |= INET_ECN_ECT_0;
+}
+
+static inline void INET_ECN_dontxmit(struct sock *sk)
+{
+ inet_sk(sk)->tos &= ~INET_ECN_MASK;
+ if (inet6_sk(sk) != NULL)
+ inet6_sk(sk)->tclass &= ~INET_ECN_MASK;
+}
#define IP6_ECN_flow_init(label) do { \
(label) &= ~htonl(INET_ECN_MASK << 20); \
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index d516f00c8e0f..86aefed6140b 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -791,6 +791,7 @@ struct ip_vs_app {
/* IPVS in network namespace */
struct netns_ipvs {
int gen; /* Generation */
+ int enable; /* enable like nf_hooks do */
/*
* Hash table: for real service lookups
*/
@@ -1089,6 +1090,22 @@ ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp)
atomic_inc(&ctl_cp->n_control);
}
+/*
+ * IPVS netns init & cleanup functions
+ */
+extern int __ip_vs_estimator_init(struct net *net);
+extern int __ip_vs_control_init(struct net *net);
+extern int __ip_vs_protocol_init(struct net *net);
+extern int __ip_vs_app_init(struct net *net);
+extern int __ip_vs_conn_init(struct net *net);
+extern int __ip_vs_sync_init(struct net *net);
+extern void __ip_vs_conn_cleanup(struct net *net);
+extern void __ip_vs_app_cleanup(struct net *net);
+extern void __ip_vs_protocol_cleanup(struct net *net);
+extern void __ip_vs_control_cleanup(struct net *net);
+extern void __ip_vs_estimator_cleanup(struct net *net);
+extern void __ip_vs_sync_cleanup(struct net *net);
+extern void __ip_vs_service_cleanup(struct net *net);
/*
* IPVS application functions
diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h
index 75b8e2968c9b..f57e7d46a453 100644
--- a/include/net/llc_pdu.h
+++ b/include/net/llc_pdu.h
@@ -199,7 +199,7 @@ struct llc_pdu_sn {
u8 ssap;
u8 ctrl_1;
u8 ctrl_2;
-};
+} __packed;
static inline struct llc_pdu_sn *llc_pdu_sn_hdr(struct sk_buff *skb)
{
@@ -211,7 +211,7 @@ struct llc_pdu_un {
u8 dsap;
u8 ssap;
u8 ctrl_1;
-};
+} __packed;
static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb)
{
@@ -359,7 +359,7 @@ struct llc_xid_info {
u8 fmt_id; /* always 0x81 for LLC */
u8 type; /* different if NULL/non-NULL LSAP */
u8 rw; /* sender receive window */
-};
+} __packed;
/**
* llc_pdu_init_as_xid_cmd - sets bytes 3, 4 & 5 of LLC header as XID
@@ -415,7 +415,7 @@ struct llc_frmr_info {
u8 curr_ssv; /* current send state variable val */
u8 curr_rsv; /* current receive state variable */
u8 ind_bits; /* indicator bits set with macro */
-};
+} __packed;
extern void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 type);
extern void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value);
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 6ae4bc5ce8a7..20afeaa39395 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -324,6 +324,7 @@ struct xfrm_state_afinfo {
int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
int (*output)(struct sk_buff *skb);
+ int (*output_finish)(struct sk_buff *skb);
int (*extract_input)(struct xfrm_state *x,
struct sk_buff *skb);
int (*extract_output)(struct xfrm_state *x,
@@ -1454,6 +1455,7 @@ static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
extern int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb);
extern int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
extern int xfrm4_output(struct sk_buff *skb);
+extern int xfrm4_output_finish(struct sk_buff *skb);
extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
extern int xfrm6_extract_header(struct sk_buff *skb);
@@ -1470,6 +1472,7 @@ extern __be32 xfrm6_tunnel_spi_lookup(struct net *net, xfrm_address_t *saddr);
extern int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb);
extern int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
extern int xfrm6_output(struct sk_buff *skb);
+extern int xfrm6_output_finish(struct sk_buff *skb);
extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
u8 **prevhdr);
diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
index cbb822e8d791..2d0191c90f9e 100644
--- a/include/rdma/iw_cm.h
+++ b/include/rdma/iw_cm.h
@@ -46,18 +46,9 @@ enum iw_cm_event_type {
IW_CM_EVENT_CLOSE /* close complete */
};
-enum iw_cm_event_status {
- IW_CM_EVENT_STATUS_OK = 0, /* request successful */
- IW_CM_EVENT_STATUS_ACCEPTED = 0, /* connect request accepted */
- IW_CM_EVENT_STATUS_REJECTED, /* connect request rejected */
- IW_CM_EVENT_STATUS_TIMEOUT, /* the operation timed out */
- IW_CM_EVENT_STATUS_RESET, /* reset from remote peer */
- IW_CM_EVENT_STATUS_EINVAL, /* asynchronous failure for bad parm */
-};
-
struct iw_cm_event {
enum iw_cm_event_type event;
- enum iw_cm_event_status status;
+ int status;
struct sockaddr_in local_addr;
struct sockaddr_in remote_addr;
void *private_data;
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 4fae90304648..169f7a53fb0c 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -329,4 +329,14 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr);
*/
void rdma_set_service_type(struct rdma_cm_id *id, int tos);
+/**
+ * rdma_set_reuseaddr - Allow the reuse of local addresses when binding
+ * the rdma_cm_id.
+ * @id: Communication identifier to configure.
+ * @reuse: Value indicating if the bound address is reusable.
+ *
+ * Reuse must be set before an address is bound to the id.
+ */
+int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse);
+
#endif /* RDMA_CM_H */
diff --git a/include/rdma/rdma_user_cm.h b/include/rdma/rdma_user_cm.h
index 1d165022c02d..fc82c1896f75 100644
--- a/include/rdma/rdma_user_cm.h
+++ b/include/rdma/rdma_user_cm.h
@@ -221,8 +221,9 @@ enum {
/* Option details */
enum {
- RDMA_OPTION_ID_TOS = 0,
- RDMA_OPTION_IB_PATH = 1
+ RDMA_OPTION_ID_TOS = 0,
+ RDMA_OPTION_ID_REUSEADDR = 1,
+ RDMA_OPTION_IB_PATH = 1
};
struct rdma_ucm_set_option {
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 2d3ec5094685..dd82e02ddde3 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -169,6 +169,7 @@ struct scsi_device {
sdev_dev;
struct execute_work ew; /* used to get process context on put */
+ struct work_struct requeue_work;
struct scsi_dh_data *scsi_dh_data;
enum scsi_device_state sdev_state;
diff --git a/include/trace/events/gfpflags.h b/include/trace/events/gfpflags.h
index e3615c093741..9fe3a36646e9 100644
--- a/include/trace/events/gfpflags.h
+++ b/include/trace/events/gfpflags.h
@@ -10,6 +10,7 @@
*/
#define show_gfp_flags(flags) \
(flags) ? __print_flags(flags, "|", \
+ {(unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \
{(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"}, \
{(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \
{(unsigned long)GFP_USER, "GFP_USER"}, \
@@ -32,6 +33,9 @@
{(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \
{(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \
{(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \
- {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"} \
+ {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"}, \
+ {(unsigned long)__GFP_NOTRACK, "GFP_NOTRACK"}, \
+ {(unsigned long)__GFP_NO_KSWAPD, "GFP_NO_KSWAPD"}, \
+ {(unsigned long)__GFP_OTHER_NODE, "GFP_OTHER_NODE"} \
) : "GFP_NOWAIT"
diff --git a/include/xen/events.h b/include/xen/events.h
index f1b87ad48ac7..9af21e19545a 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -85,7 +85,8 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc);
/* Bind an PSI pirq to an irq. */
int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
- int pirq, int vector, const char *name);
+ int pirq, int vector, const char *name,
+ domid_t domid);
#endif
/* De-allocates the above mentioned physical interrupt. */
@@ -94,4 +95,10 @@ int xen_destroy_irq(int irq);
/* Return irq from pirq */
int xen_irq_from_pirq(unsigned pirq);
+/* Return the pirq allocated to the irq. */
+int xen_pirq_from_irq(unsigned irq);
+
+/* Determine whether to ignore this IRQ if it is passed to a guest. */
+int xen_test_irq_shared(int irq);
+
#endif /* _XEN_EVENTS_H */