summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acoutput.h2
-rw-r--r--include/acpi/acpixf.h6
-rw-r--r--include/acpi/processor.h9
-rw-r--r--include/asm-generic/qspinlock.h5
-rw-r--r--include/asm-generic/qspinlock_types.h5
-rw-r--r--include/asm-generic/vmlinux.lds.h1
-rw-r--r--include/dt-bindings/pinctrl/mt7623-pinfunc.h520
-rw-r--r--include/kvm/arm_arch_timer.h5
-rw-r--r--include/kvm/arm_pmu.h110
-rw-r--r--include/kvm/arm_vgic.h8
-rw-r--r--include/linux/atomic.h21
-rw-r--r--include/linux/auto_dev-ioctl.h6
-rw-r--r--include/linux/auto_fs.h10
-rw-r--r--include/linux/cache.h14
-rw-r--r--include/linux/clockchips.h4
-rw-r--r--include/linux/clocksource.h45
-rw-r--r--include/linux/compiler.h17
-rw-r--r--include/linux/cpu.h27
-rw-r--r--include/linux/cpufreq.h47
-rw-r--r--include/linux/cpuhotplug.h93
-rw-r--r--include/linux/dma-mapping.h25
-rw-r--r--include/linux/fault-inject.h5
-rw-r--r--include/linux/ftrace.h12
-rw-r--r--include/linux/gfp.h12
-rw-r--r--include/linux/init.h4
-rw-r--r--include/linux/interrupt.h10
-rw-r--r--include/linux/ioport.h33
-rw-r--r--include/linux/irq.h27
-rw-r--r--include/linux/irqchip/mips-gic.h3
-rw-r--r--include/linux/irqdomain.h45
-rw-r--r--include/linux/kvm_host.h5
-rw-r--r--include/linux/latencytop.h3
-rw-r--r--include/linux/leds.h8
-rw-r--r--include/linux/lockdep.h2
-rw-r--r--include/linux/memcontrol.h103
-rw-r--r--include/linux/memory.h3
-rw-r--r--include/linux/memory_hotplug.h7
-rw-r--r--include/linux/mfd/max77686-private.h3
-rw-r--r--include/linux/migrate.h6
-rw-r--r--include/linux/mm.h42
-rw-r--r--include/linux/mm_types.h22
-rw-r--r--include/linux/mmdebug.h3
-rw-r--r--include/linux/mmzone.h18
-rw-r--r--include/linux/notifier.h2
-rw-r--r--include/linux/page_ext.h1
-rw-r--r--include/linux/page_owner.h50
-rw-r--r--include/linux/pagemap.h3
-rw-r--r--include/linux/perf_event.h7
-rw-r--r--include/linux/platform_data/ntc_thermistor.h1
-rw-r--r--include/linux/pm_domain.h13
-rw-r--r--include/linux/pm_opp.h27
-rw-r--r--include/linux/poison.h4
-rw-r--r--include/linux/posix-timers.h3
-rw-r--r--include/linux/pps_kernel.h17
-rw-r--r--include/linux/ptp_clock_kernel.h8
-rw-r--r--include/linux/pxa2xx_ssp.h1
-rw-r--r--include/linux/rcupdate.h6
-rw-r--r--include/linux/regmap.h107
-rw-r--r--include/linux/regulator/act8865.h4
-rw-r--r--include/linux/regulator/driver.h17
-rw-r--r--include/linux/regulator/lp872x.h5
-rw-r--r--include/linux/regulator/machine.h12
-rw-r--r--include/linux/rtc.h4
-rw-r--r--include/linux/sched.h34
-rw-r--r--include/linux/sched/sysctl.h4
-rw-r--r--include/linux/slab.h13
-rw-r--r--include/linux/slab_def.h3
-rw-r--r--include/linux/slub_def.h1
-rw-r--r--include/linux/spi/spi.h145
-rw-r--r--include/linux/srcu.h19
-rw-r--r--include/linux/swait.h172
-rw-r--r--include/linux/tick.h97
-rw-r--r--include/linux/timekeeper_internal.h2
-rw-r--r--include/linux/timekeeping.h58
-rw-r--r--include/linux/trace_events.h10
-rw-r--r--include/linux/tracepoint-defs.h14
-rw-r--r--include/linux/wait.h2
-rw-r--r--include/trace/events/btrfs.h2
-rw-r--r--include/trace/events/compaction.h2
-rw-r--r--include/trace/events/cpuhp.h66
-rw-r--r--include/trace/events/gfpflags.h43
-rw-r--r--include/trace/events/huge_memory.h2
-rw-r--r--include/trace/events/kmem.h2
-rw-r--r--include/trace/events/kvm.h9
-rw-r--r--include/trace/events/mmflags.h164
-rw-r--r--include/trace/events/power.h22
-rw-r--r--include/trace/events/timer.h36
-rw-r--r--include/trace/events/vmscan.h2
-rw-r--r--include/uapi/linux/auto_fs.h21
-rw-r--r--include/uapi/linux/auto_fs4.h17
-rw-r--r--include/uapi/linux/kvm.h19
-rw-r--r--include/uapi/linux/ptp_clock.h13
92 files changed, 2215 insertions, 427 deletions
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index 5bfc61943f88..34f601e7b88d 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -262,7 +262,7 @@
#define ACPI_GET_FUNCTION_NAME _acpi_function_name
/*
- * The Name parameter should be the procedure name as a quoted string.
+ * The Name parameter should be the procedure name as a non-quoted string.
* The function name is also used by the function exit macros below.
* Note: (const char) is used to be compatible with the debug interfaces
* and macros such as __func__.
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index c96621e87c19..17556979dc79 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -897,11 +897,9 @@ ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(3)
acpi_warning(const char *module_name,
u32 line_number,
const char *format, ...))
-ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(3)
+ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(1)
void ACPI_INTERNAL_VAR_XFACE
- acpi_info(const char *module_name,
- u32 line_number,
- const char *format, ...))
+ acpi_info(const char *format, ...))
ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(3)
void ACPI_INTERNAL_VAR_XFACE
acpi_bios_error(const char *module_name,
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 07fb100bcc68..6f1805dd5d3c 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -9,6 +9,7 @@
#define ACPI_PROCESSOR_CLASS "processor"
#define ACPI_PROCESSOR_DEVICE_NAME "Processor"
#define ACPI_PROCESSOR_DEVICE_HID "ACPI0007"
+#define ACPI_PROCESSOR_CONTAINER_HID "ACPI0010"
#define ACPI_PROCESSOR_BUSY_METRIC 10
@@ -394,14 +395,6 @@ static inline int acpi_processor_hotplug(struct acpi_processor *pr)
}
#endif /* CONFIG_ACPI_PROCESSOR_IDLE */
-#if defined(CONFIG_PM_SLEEP) & defined(CONFIG_ACPI_PROCESSOR_IDLE)
-void acpi_processor_syscore_init(void);
-void acpi_processor_syscore_exit(void);
-#else
-static inline void acpi_processor_syscore_init(void) {}
-static inline void acpi_processor_syscore_exit(void) {}
-#endif
-
/* in processor_thermal.c */
int acpi_processor_get_limit_info(struct acpi_processor *pr);
extern const struct thermal_cooling_device_ops processor_cooling_ops;
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index 39e1cb201b8e..35a52a880b2f 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -120,11 +120,6 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock)
#endif
/*
- * Initializier
- */
-#define __ARCH_SPIN_LOCK_UNLOCKED { ATOMIC_INIT(0) }
-
-/*
* Remapping spinlock architecture specific functions to the corresponding
* queued spinlock functions.
*/
diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h
index 85f888e86761..034acd0c4956 100644
--- a/include/asm-generic/qspinlock_types.h
+++ b/include/asm-generic/qspinlock_types.h
@@ -33,6 +33,11 @@ typedef struct qspinlock {
} arch_spinlock_t;
/*
+ * Initializier
+ */
+#define __ARCH_SPIN_LOCK_UNLOCKED { ATOMIC_INIT(0) }
+
+/*
* Bitfields in the atomic value:
*
* When NR_CPUS < 16K
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index c4bd0e2c173c..772c784ba763 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -256,6 +256,7 @@
.rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start_rodata) = .; \
*(.rodata) *(.rodata.*) \
+ *(.data..ro_after_init) /* Read only after init */ \
*(__vermagic) /* Kernel version magic */ \
. = ALIGN(8); \
VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
diff --git a/include/dt-bindings/pinctrl/mt7623-pinfunc.h b/include/dt-bindings/pinctrl/mt7623-pinfunc.h
new file mode 100644
index 000000000000..2f00bdc42442
--- /dev/null
+++ b/include/dt-bindings/pinctrl/mt7623-pinfunc.h
@@ -0,0 +1,520 @@
+#ifndef __DTS_MT7623_PINFUNC_H
+#define __DTS_MT7623_PINFUNC_H
+
+#include <dt-bindings/pinctrl/mt65xx.h>
+
+#define MT7623_PIN_0_PWRAP_SPI0_MI_FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
+#define MT7623_PIN_0_PWRAP_SPI0_MI_FUNC_PWRAP_SPIDO (MTK_PIN_NO(0) | 1)
+#define MT7623_PIN_0_PWRAP_SPI0_MI_FUNC_PWRAP_SPIDI (MTK_PIN_NO(0) | 2)
+
+#define MT7623_PIN_1_PWRAP_SPI0_MO_FUNC_GPIO1 (MTK_PIN_NO(1) | 0)
+#define MT7623_PIN_1_PWRAP_SPI0_MO_FUNC_PWRAP_SPIDI (MTK_PIN_NO(1) | 1)
+#define MT7623_PIN_1_PWRAP_SPI0_MO_FUNC_PWRAP_SPIDO (MTK_PIN_NO(1) | 2)
+
+#define MT7623_PIN_2_PWRAP_INT_FUNC_GPIO2 (MTK_PIN_NO(2) | 0)
+#define MT7623_PIN_2_PWRAP_INT_FUNC_PWRAP_INT (MTK_PIN_NO(2) | 1)
+
+#define MT7623_PIN_3_PWRAP_SPI0_CK_FUNC_GPIO3 (MTK_PIN_NO(3) | 0)
+#define MT7623_PIN_3_PWRAP_SPI0_CK_FUNC_PWRAP_SPICK_I (MTK_PIN_NO(3) | 1)
+
+#define MT7623_PIN_4_PWRAP_SPI0_CSN_FUNC_GPIO4 (MTK_PIN_NO(4) | 0)
+#define MT7623_PIN_4_PWRAP_SPI0_CSN_FUNC_PWRAP_SPICS_B_I (MTK_PIN_NO(4) | 1)
+
+#define MT7623_PIN_5_PWRAP_SPI0_CK2_FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
+#define MT7623_PIN_5_PWRAP_SPI0_CK2_FUNC_PWRAP_SPICK2_I (MTK_PIN_NO(5) | 1)
+
+#define MT7623_PIN_6_PWRAP_SPI0_CSN2_FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
+#define MT7623_PIN_6_PWRAP_SPI0_CSN2_FUNC_PWRAP_SPICS2_B_I (MTK_PIN_NO(6) | 1)
+
+#define MT7623_PIN_7_SPI1_CSN_FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
+#define MT7623_PIN_7_SPI1_CSN_FUNC_SPI1_CS (MTK_PIN_NO(7) | 1)
+
+#define MT7623_PIN_8_SPI1_MI_FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
+#define MT7623_PIN_8_SPI1_MI_FUNC_SPI1_MI (MTK_PIN_NO(8) | 1)
+#define MT7623_PIN_8_SPI1_MI_FUNC_SPI1_MO (MTK_PIN_NO(8) | 2)
+
+#define MT7623_PIN_9_SPI1_MO_FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
+#define MT7623_PIN_9_SPI1_MO_FUNC_SPI1_MO (MTK_PIN_NO(9) | 1)
+#define MT7623_PIN_9_SPI1_MO_FUNC_SPI1_MI (MTK_PIN_NO(9) | 2)
+
+#define MT7623_PIN_10_RTC32K_CK_FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
+#define MT7623_PIN_10_RTC32K_CK_FUNC_RTC32K_CK (MTK_PIN_NO(10) | 1)
+
+#define MT7623_PIN_11_WATCHDOG_FUNC_GPIO11 (MTK_PIN_NO(11) | 0)
+#define MT7623_PIN_11_WATCHDOG_FUNC_WATCHDOG (MTK_PIN_NO(11) | 1)
+
+#define MT7623_PIN_12_SRCLKENA_FUNC_GPIO12 (MTK_PIN_NO(12) | 0)
+#define MT7623_PIN_12_SRCLKENA_FUNC_SRCLKENA (MTK_PIN_NO(12) | 1)
+
+#define MT7623_PIN_13_SRCLKENAI_FUNC_GPIO13 (MTK_PIN_NO(13) | 0)
+#define MT7623_PIN_13_SRCLKENAI_FUNC_SRCLKENAI (MTK_PIN_NO(13) | 1)
+
+#define MT7623_PIN_14_GPIO14_FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
+#define MT7623_PIN_14_GPIO14_FUNC_URXD2 (MTK_PIN_NO(14) | 1)
+#define MT7623_PIN_14_GPIO14_FUNC_UTXD2 (MTK_PIN_NO(14) | 2)
+
+#define MT7623_PIN_15_GPIO15_FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
+#define MT7623_PIN_15_GPIO15_FUNC_UTXD2 (MTK_PIN_NO(15) | 1)
+#define MT7623_PIN_15_GPIO15_FUNC_URXD2 (MTK_PIN_NO(15) | 2)
+
+#define MT7623_PIN_18_PCM_CLK_FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
+#define MT7623_PIN_18_PCM_CLK_FUNC_PCM_CLK0 (MTK_PIN_NO(18) | 1)
+#define MT7623_PIN_18_PCM_CLK_FUNC_AP_PCM_CLKO (MTK_PIN_NO(18) | 6)
+
+#define MT7623_PIN_19_PCM_SYNC_FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
+#define MT7623_PIN_19_PCM_SYNC_FUNC_PCM_SYNC (MTK_PIN_NO(19) | 1)
+#define MT7623_PIN_19_PCM_SYNC_FUNC_AP_PCM_SYNC (MTK_PIN_NO(19) | 6)
+
+#define MT7623_PIN_20_PCM_RX_FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
+#define MT7623_PIN_20_PCM_RX_FUNC_PCM_RX (MTK_PIN_NO(20) | 1)
+#define MT7623_PIN_20_PCM_RX_FUNC_PCM_TX (MTK_PIN_NO(20) | 4)
+#define MT7623_PIN_20_PCM_RX_FUNC_AP_PCM_RX (MTK_PIN_NO(20) | 6)
+
+#define MT7623_PIN_21_PCM_TX_FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
+#define MT7623_PIN_21_PCM_TX_FUNC_PCM_TX (MTK_PIN_NO(21) | 1)
+#define MT7623_PIN_21_PCM_TX_FUNC_PCM_RX (MTK_PIN_NO(21) | 4)
+#define MT7623_PIN_21_PCM_TX_FUNC_AP_PCM_TX (MTK_PIN_NO(21) | 6)
+
+#define MT7623_PIN_22_EINT0_FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
+#define MT7623_PIN_22_EINT0_FUNC_UCTS0 (MTK_PIN_NO(22) | 1)
+#define MT7623_PIN_22_EINT0_FUNC_PCIE0_PERST_N (MTK_PIN_NO(22) | 2)
+
+#define MT7623_PIN_23_EINT1_FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
+#define MT7623_PIN_23_EINT1_FUNC_URTS0 (MTK_PIN_NO(23) | 1)
+#define MT7623_PIN_23_EINT1_FUNC_PCIE1_PERST_N (MTK_PIN_NO(23) | 2)
+
+#define MT7623_PIN_24_EINT2_FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
+#define MT7623_PIN_24_EINT2_FUNC_UCTS1 (MTK_PIN_NO(24) | 1)
+#define MT7623_PIN_24_EINT2_FUNC_PCIE2_PERST_N (MTK_PIN_NO(24) | 2)
+
+#define MT7623_PIN_25_EINT3_FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
+#define MT7623_PIN_25_EINT3_FUNC_URTS1 (MTK_PIN_NO(25) | 1)
+
+#define MT7623_PIN_26_EINT4_FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
+#define MT7623_PIN_26_EINT4_FUNC_UCTS3 (MTK_PIN_NO(26) | 1)
+#define MT7623_PIN_26_EINT4_FUNC_PCIE2_WAKE_N (MTK_PIN_NO(26) | 6)
+
+#define MT7623_PIN_27_EINT5_FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
+#define MT7623_PIN_27_EINT5_FUNC_URTS3 (MTK_PIN_NO(27) | 1)
+#define MT7623_PIN_27_EINT5_FUNC_PCIE1_WAKE_N (MTK_PIN_NO(27) | 6)
+
+#define MT7623_PIN_28_EINT6_FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
+#define MT7623_PIN_28_EINT6_FUNC_DRV_VBUS (MTK_PIN_NO(28) | 1)
+#define MT7623_PIN_28_EINT6_FUNC_PCIE0_WAKE_N (MTK_PIN_NO(28) | 6)
+
+#define MT7623_PIN_29_EINT7_FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
+#define MT7623_PIN_29_EINT7_FUNC_IDDIG (MTK_PIN_NO(29) | 1)
+#define MT7623_PIN_29_EINT7_FUNC_MSDC1_WP (MTK_PIN_NO(29) | 2)
+#define MT7623_PIN_29_EINT7_FUNC_PCIE2_PERST_N (MTK_PIN_NO(29) | 6)
+
+#define MT7623_PIN_33_I2S1_DATA_FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
+#define MT7623_PIN_33_I2S1_DATA_FUNC_I2S1_DATA (MTK_PIN_NO(33) | 1)
+#define MT7623_PIN_33_I2S1_DATA_FUNC_PCM_TX (MTK_PIN_NO(33) | 3)
+#define MT7623_PIN_33_I2S1_DATA_FUNC_AP_PCM_TX (MTK_PIN_NO(33) | 6)
+
+#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
+#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_I2S1_DATA_IN (MTK_PIN_NO(34) | 1)
+#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_PCM_RX (MTK_PIN_NO(34) | 3)
+#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_AP_PCM_RX (MTK_PIN_NO(34) | 6)
+
+#define MT7623_PIN_35_I2S1_BCK_FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
+#define MT7623_PIN_35_I2S1_BCK_FUNC_I2S1_BCK (MTK_PIN_NO(35) | 1)
+#define MT7623_PIN_35_I2S1_BCK_FUNC_PCM_CLK0 (MTK_PIN_NO(35) | 3)
+#define MT7623_PIN_35_I2S1_BCK_FUNC_AP_PCM_CLKO (MTK_PIN_NO(35) | 6)
+
+#define MT7623_PIN_36_I2S1_LRCK_FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
+#define MT7623_PIN_36_I2S1_LRCK_FUNC_I2S1_LRCK (MTK_PIN_NO(36) | 1)
+#define MT7623_PIN_36_I2S1_LRCK_FUNC_PCM_SYNC (MTK_PIN_NO(36) | 3)
+#define MT7623_PIN_36_I2S1_LRCK_FUNC_AP_PCM_SYNC (MTK_PIN_NO(36) | 6)
+
+#define MT7623_PIN_37_I2S1_MCLK_FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
+#define MT7623_PIN_37_I2S1_MCLK_FUNC_I2S1_MCLK (MTK_PIN_NO(37) | 1)
+
+#define MT7623_PIN_39_JTMS_FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
+#define MT7623_PIN_39_JTMS_FUNC_JTMS (MTK_PIN_NO(39) | 1)
+
+#define MT7623_PIN_40_JTCK_FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
+#define MT7623_PIN_40_JTCK_FUNC_JTCK (MTK_PIN_NO(40) | 1)
+
+#define MT7623_PIN_41_JTDI_FUNC_GPIO41 (MTK_PIN_NO(41) | 0)
+#define MT7623_PIN_41_JTDI_FUNC_JTDI (MTK_PIN_NO(41) | 1)
+
+#define MT7623_PIN_42_JTDO_FUNC_GPIO42 (MTK_PIN_NO(42) | 0)
+#define MT7623_PIN_42_JTDO_FUNC_JTDO (MTK_PIN_NO(42) | 1)
+
+#define MT7623_PIN_43_NCLE_FUNC_GPIO43 (MTK_PIN_NO(43) | 0)
+#define MT7623_PIN_43_NCLE_FUNC_NCLE (MTK_PIN_NO(43) | 1)
+#define MT7623_PIN_43_NCLE_FUNC_EXT_XCS2 (MTK_PIN_NO(43) | 2)
+
+#define MT7623_PIN_44_NCEB1_FUNC_GPIO44 (MTK_PIN_NO(44) | 0)
+#define MT7623_PIN_44_NCEB1_FUNC_NCEB1 (MTK_PIN_NO(44) | 1)
+#define MT7623_PIN_44_NCEB1_FUNC_IDDIG (MTK_PIN_NO(44) | 2)
+
+#define MT7623_PIN_45_NCEB0_FUNC_GPIO45 (MTK_PIN_NO(45) | 0)
+#define MT7623_PIN_45_NCEB0_FUNC_NCEB0 (MTK_PIN_NO(45) | 1)
+#define MT7623_PIN_45_NCEB0_FUNC_DRV_VBUS (MTK_PIN_NO(45) | 2)
+
+#define MT7623_PIN_46_IR_FUNC_GPIO46 (MTK_PIN_NO(46) | 0)
+#define MT7623_PIN_46_IR_FUNC_IR (MTK_PIN_NO(46) | 1)
+
+#define MT7623_PIN_47_NREB_FUNC_GPIO47 (MTK_PIN_NO(47) | 0)
+#define MT7623_PIN_47_NREB_FUNC_NREB (MTK_PIN_NO(47) | 1)
+
+#define MT7623_PIN_48_NRNB_FUNC_GPIO48 (MTK_PIN_NO(48) | 0)
+#define MT7623_PIN_48_NRNB_FUNC_NRNB (MTK_PIN_NO(48) | 1)
+
+#define MT7623_PIN_49_I2S0_DATA_FUNC_GPIO49 (MTK_PIN_NO(49) | 0)
+#define MT7623_PIN_49_I2S0_DATA_FUNC_I2S0_DATA (MTK_PIN_NO(49) | 1)
+#define MT7623_PIN_49_I2S0_DATA_FUNC_PCM_TX (MTK_PIN_NO(49) | 3)
+#define MT7623_PIN_49_I2S0_DATA_FUNC_AP_I2S_DO (MTK_PIN_NO(49) | 6)
+
+#define MT7623_PIN_53_SPI0_CSN_FUNC_GPIO53 (MTK_PIN_NO(53) | 0)
+#define MT7623_PIN_53_SPI0_CSN_FUNC_SPI0_CS (MTK_PIN_NO(53) | 1)
+#define MT7623_PIN_53_SPI0_CSN_FUNC_PWM1 (MTK_PIN_NO(53) | 5)
+
+#define MT7623_PIN_54_SPI0_CK_FUNC_GPIO54 (MTK_PIN_NO(54) | 0)
+#define MT7623_PIN_54_SPI0_CK_FUNC_SPI0_CK (MTK_PIN_NO(54) | 1)
+
+#define MT7623_PIN_55_SPI0_MI_FUNC_GPIO55 (MTK_PIN_NO(55) | 0)
+#define MT7623_PIN_55_SPI0_MI_FUNC_SPI0_MI (MTK_PIN_NO(55) | 1)
+#define MT7623_PIN_55_SPI0_MI_FUNC_SPI0_MO (MTK_PIN_NO(55) | 2)
+#define MT7623_PIN_55_SPI0_MI_FUNC_MSDC1_WP (MTK_PIN_NO(55) | 3)
+#define MT7623_PIN_55_SPI0_MI_FUNC_PWM2 (MTK_PIN_NO(55) | 5)
+
+#define MT7623_PIN_56_SPI0_MO_FUNC_GPIO56 (MTK_PIN_NO(56) | 0)
+#define MT7623_PIN_56_SPI0_MO_FUNC_SPI0_MO (MTK_PIN_NO(56) | 1)
+#define MT7623_PIN_56_SPI0_MO_FUNC_SPI0_MI (MTK_PIN_NO(56) | 2)
+
+#define MT7623_PIN_60_WB_RSTB_FUNC_GPIO60 (MTK_PIN_NO(60) | 0)
+#define MT7623_PIN_60_WB_RSTB_FUNC_WB_RSTB (MTK_PIN_NO(60) | 1)
+
+#define MT7623_PIN_61_GPIO61_FUNC_GPIO61 (MTK_PIN_NO(61) | 0)
+#define MT7623_PIN_61_GPIO61_FUNC_TEST_FD (MTK_PIN_NO(61) | 1)
+
+#define MT7623_PIN_62_GPIO62_FUNC_GPIO62 (MTK_PIN_NO(62) | 0)
+#define MT7623_PIN_62_GPIO62_FUNC_TEST_FC (MTK_PIN_NO(62) | 1)
+
+#define MT7623_PIN_63_WB_SCLK_FUNC_GPIO63 (MTK_PIN_NO(63) | 0)
+#define MT7623_PIN_63_WB_SCLK_FUNC_WB_SCLK (MTK_PIN_NO(63) | 1)
+
+#define MT7623_PIN_64_WB_SDATA_FUNC_GPIO64 (MTK_PIN_NO(64) | 0)
+#define MT7623_PIN_64_WB_SDATA_FUNC_WB_SDATA (MTK_PIN_NO(64) | 1)
+
+#define MT7623_PIN_65_WB_SEN_FUNC_GPIO65 (MTK_PIN_NO(65) | 0)
+#define MT7623_PIN_65_WB_SEN_FUNC_WB_SEN (MTK_PIN_NO(65) | 1)
+
+#define MT7623_PIN_66_WB_CRTL0_FUNC_GPIO66 (MTK_PIN_NO(66) | 0)
+#define MT7623_PIN_66_WB_CRTL0_FUNC_WB_CRTL0 (MTK_PIN_NO(66) | 1)
+
+#define MT7623_PIN_67_WB_CRTL1_FUNC_GPIO67 (MTK_PIN_NO(67) | 0)
+#define MT7623_PIN_67_WB_CRTL1_FUNC_WB_CRTL1 (MTK_PIN_NO(67) | 1)
+
+#define MT7623_PIN_68_WB_CRTL2_FUNC_GPIO68 (MTK_PIN_NO(68) | 0)
+#define MT7623_PIN_68_WB_CRTL2_FUNC_WB_CRTL2 (MTK_PIN_NO(68) | 1)
+
+#define MT7623_PIN_69_WB_CRTL3_FUNC_GPIO69 (MTK_PIN_NO(69) | 0)
+#define MT7623_PIN_69_WB_CRTL3_FUNC_WB_CRTL3 (MTK_PIN_NO(69) | 1)
+
+#define MT7623_PIN_70_WB_CRTL4_FUNC_GPIO70 (MTK_PIN_NO(70) | 0)
+#define MT7623_PIN_70_WB_CRTL4_FUNC_WB_CRTL4 (MTK_PIN_NO(70) | 1)
+
+#define MT7623_PIN_71_WB_CRTL5_FUNC_GPIO71 (MTK_PIN_NO(71) | 0)
+#define MT7623_PIN_71_WB_CRTL5_FUNC_WB_CRTL5 (MTK_PIN_NO(71) | 1)
+
+#define MT7623_PIN_72_I2S0_DATA_IN_FUNC_GPIO72 (MTK_PIN_NO(72) | 0)
+#define MT7623_PIN_72_I2S0_DATA_IN_FUNC_I2S0_DATA_IN (MTK_PIN_NO(72) | 1)
+#define MT7623_PIN_72_I2S0_DATA_IN_FUNC_PCM_RX (MTK_PIN_NO(72) | 3)
+#define MT7623_PIN_72_I2S0_DATA_IN_FUNC_PWM0 (MTK_PIN_NO(72) | 4)
+#define MT7623_PIN_72_I2S0_DATA_IN_FUNC_DISP_PWM (MTK_PIN_NO(72) | 5)
+#define MT7623_PIN_72_I2S0_DATA_IN_FUNC_AP_I2S_DI (MTK_PIN_NO(72) | 6)
+
+#define MT7623_PIN_73_I2S0_LRCK_FUNC_GPIO73 (MTK_PIN_NO(73) | 0)
+#define MT7623_PIN_73_I2S0_LRCK_FUNC_I2S0_LRCK (MTK_PIN_NO(73) | 1)
+#define MT7623_PIN_73_I2S0_LRCK_FUNC_PCM_SYNC (MTK_PIN_NO(73) | 3)
+#define MT7623_PIN_73_I2S0_LRCK_FUNC_AP_I2S_LRCK (MTK_PIN_NO(73) | 6)
+
+#define MT7623_PIN_74_I2S0_BCK_FUNC_GPIO74 (MTK_PIN_NO(74) | 0)
+#define MT7623_PIN_74_I2S0_BCK_FUNC_I2S0_BCK (MTK_PIN_NO(74) | 1)
+#define MT7623_PIN_74_I2S0_BCK_FUNC_PCM_CLK0 (MTK_PIN_NO(74) | 3)
+#define MT7623_PIN_74_I2S0_BCK_FUNC_AP_I2S_BCK (MTK_PIN_NO(74) | 6)
+
+#define MT7623_PIN_75_SDA0_FUNC_GPIO75 (MTK_PIN_NO(75) | 0)
+#define MT7623_PIN_75_SDA0_FUNC_SDA0 (MTK_PIN_NO(75) | 1)
+
+#define MT7623_PIN_76_SCL0_FUNC_GPIO76 (MTK_PIN_NO(76) | 0)
+#define MT7623_PIN_76_SCL0_FUNC_SCL0 (MTK_PIN_NO(76) | 1)
+
+#define MT7623_PIN_83_LCM_RST_FUNC_GPIO83 (MTK_PIN_NO(83) | 0)
+#define MT7623_PIN_83_LCM_RST_FUNC_LCM_RST (MTK_PIN_NO(83) | 1)
+
+#define MT7623_PIN_84_DSI_TE_FUNC_GPIO84 (MTK_PIN_NO(84) | 0)
+#define MT7623_PIN_84_DSI_TE_FUNC_DSI_TE (MTK_PIN_NO(84) | 1)
+
+#define MT7623_PIN_95_MIPI_TCN_FUNC_GPIO95 (MTK_PIN_NO(95) | 0)
+#define MT7623_PIN_95_MIPI_TCN_FUNC_TCN (MTK_PIN_NO(95) | 1)
+
+#define MT7623_PIN_96_MIPI_TCP_FUNC_GPIO96 (MTK_PIN_NO(96) | 0)
+#define MT7623_PIN_96_MIPI_TCP_FUNC_TCP (MTK_PIN_NO(96) | 1)
+
+#define MT7623_PIN_97_MIPI_TDN1_FUNC_GPIO97 (MTK_PIN_NO(97) | 0)
+#define MT7623_PIN_97_MIPI_TDN1_FUNC_TDN1 (MTK_PIN_NO(97) | 1)
+
+#define MT7623_PIN_98_MIPI_TDP1_FUNC_GPIO98 (MTK_PIN_NO(98) | 0)
+#define MT7623_PIN_98_MIPI_TDP1_FUNC_TDP1 (MTK_PIN_NO(98) | 1)
+
+#define MT7623_PIN_99_MIPI_TDN0_FUNC_GPIO99 (MTK_PIN_NO(99) | 0)
+#define MT7623_PIN_99_MIPI_TDN0_FUNC_TDN0 (MTK_PIN_NO(99) | 1)
+
+#define MT7623_PIN_100_MIPI_TDP0_FUNC_GPIO100 (MTK_PIN_NO(100) | 0)
+#define MT7623_PIN_100_MIPI_TDP0_FUNC_TDP0 (MTK_PIN_NO(100) | 1)
+
+#define MT7623_PIN_105_MSDC1_CMD_FUNC_GPIO105 (MTK_PIN_NO(105) | 0)
+#define MT7623_PIN_105_MSDC1_CMD_FUNC_MSDC1_CMD (MTK_PIN_NO(105) | 1)
+#define MT7623_PIN_105_MSDC1_CMD_FUNC_SDA1 (MTK_PIN_NO(105) | 3)
+#define MT7623_PIN_105_MSDC1_CMD_FUNC_I2SOUT_BCK (MTK_PIN_NO(105) | 6)
+
+#define MT7623_PIN_106_MSDC1_CLK_FUNC_GPIO106 (MTK_PIN_NO(106) | 0)
+#define MT7623_PIN_106_MSDC1_CLK_FUNC_MSDC1_CLK (MTK_PIN_NO(106) | 1)
+#define MT7623_PIN_106_MSDC1_CLK_FUNC_SCL1 (MTK_PIN_NO(106) | 3)
+#define MT7623_PIN_106_MSDC1_CLK_FUNC_I2SOUT_LRCK (MTK_PIN_NO(106) | 6)
+
+#define MT7623_PIN_107_MSDC1_DAT0_FUNC_GPIO107 (MTK_PIN_NO(107) | 0)
+#define MT7623_PIN_107_MSDC1_DAT0_FUNC_MSDC1_DAT0 (MTK_PIN_NO(107) | 1)
+#define MT7623_PIN_107_MSDC1_DAT0_FUNC_UTXD0 (MTK_PIN_NO(107) | 5)
+#define MT7623_PIN_107_MSDC1_DAT0_FUNC_I2SOUT_DATA_OUT (MTK_PIN_NO(107) | 6)
+
+#define MT7623_PIN_108_MSDC1_DAT1_FUNC_GPIO108 (MTK_PIN_NO(108) | 0)
+#define MT7623_PIN_108_MSDC1_DAT1_FUNC_MSDC1_DAT1 (MTK_PIN_NO(108) | 1)
+#define MT7623_PIN_108_MSDC1_DAT1_FUNC_PWM0 (MTK_PIN_NO(108) | 3)
+#define MT7623_PIN_108_MSDC1_DAT1_FUNC_URXD0 (MTK_PIN_NO(108) | 5)
+#define MT7623_PIN_108_MSDC1_DAT1_FUNC_PWM1 (MTK_PIN_NO(108) | 6)
+
+#define MT7623_PIN_109_MSDC1_DAT2_FUNC_GPIO109 (MTK_PIN_NO(109) | 0)
+#define MT7623_PIN_109_MSDC1_DAT2_FUNC_MSDC1_DAT2 (MTK_PIN_NO(109) | 1)
+#define MT7623_PIN_109_MSDC1_DAT2_FUNC_SDA2 (MTK_PIN_NO(109) | 3)
+#define MT7623_PIN_109_MSDC1_DAT2_FUNC_UTXD1 (MTK_PIN_NO(109) | 5)
+#define MT7623_PIN_109_MSDC1_DAT2_FUNC_PWM2 (MTK_PIN_NO(109) | 6)
+
+#define MT7623_PIN_110_MSDC1_DAT3_FUNC_GPIO110 (MTK_PIN_NO(110) | 0)
+#define MT7623_PIN_110_MSDC1_DAT3_FUNC_MSDC1_DAT3 (MTK_PIN_NO(110) | 1)
+#define MT7623_PIN_110_MSDC1_DAT3_FUNC_SCL2 (MTK_PIN_NO(110) | 3)
+#define MT7623_PIN_110_MSDC1_DAT3_FUNC_URXD1 (MTK_PIN_NO(110) | 5)
+#define MT7623_PIN_110_MSDC1_DAT3_FUNC_PWM3 (MTK_PIN_NO(110) | 6)
+
+#define MT7623_PIN_111_MSDC0_DAT7_FUNC_GPIO111 (MTK_PIN_NO(111) | 0)
+#define MT7623_PIN_111_MSDC0_DAT7_FUNC_MSDC0_DAT7 (MTK_PIN_NO(111) | 1)
+#define MT7623_PIN_111_MSDC0_DAT7_FUNC_NLD7 (MTK_PIN_NO(111) | 4)
+
+#define MT7623_PIN_112_MSDC0_DAT6_FUNC_GPIO112 (MTK_PIN_NO(112) | 0)
+#define MT7623_PIN_112_MSDC0_DAT6_FUNC_MSDC0_DAT6 (MTK_PIN_NO(112) | 1)
+#define MT7623_PIN_112_MSDC0_DAT6_FUNC_NLD6 (MTK_PIN_NO(112) | 4)
+
+#define MT7623_PIN_113_MSDC0_DAT5_FUNC_GPIO113 (MTK_PIN_NO(113) | 0)
+#define MT7623_PIN_113_MSDC0_DAT5_FUNC_MSDC0_DAT5 (MTK_PIN_NO(113) | 1)
+#define MT7623_PIN_113_MSDC0_DAT5_FUNC_NLD5 (MTK_PIN_NO(113) | 4)
+
+#define MT7623_PIN_114_MSDC0_DAT4_FUNC_GPIO114 (MTK_PIN_NO(114) | 0)
+#define MT7623_PIN_114_MSDC0_DAT4_FUNC_MSDC0_DAT4 (MTK_PIN_NO(114) | 1)
+#define MT7623_PIN_114_MSDC0_DAT4_FUNC_NLD4 (MTK_PIN_NO(114) | 4)
+
+#define MT7623_PIN_115_MSDC0_RSTB_FUNC_GPIO115 (MTK_PIN_NO(115) | 0)
+#define MT7623_PIN_115_MSDC0_RSTB_FUNC_MSDC0_RSTB (MTK_PIN_NO(115) | 1)
+#define MT7623_PIN_115_MSDC0_RSTB_FUNC_NLD8 (MTK_PIN_NO(115) | 4)
+
+#define MT7623_PIN_116_MSDC0_CMD_FUNC_GPIO116 (MTK_PIN_NO(116) | 0)
+#define MT7623_PIN_116_MSDC0_CMD_FUNC_MSDC0_CMD (MTK_PIN_NO(116) | 1)
+#define MT7623_PIN_116_MSDC0_CMD_FUNC_NALE (MTK_PIN_NO(116) | 4)
+
+#define MT7623_PIN_117_MSDC0_CLK_FUNC_GPIO117 (MTK_PIN_NO(117) | 0)
+#define MT7623_PIN_117_MSDC0_CLK_FUNC_MSDC0_CLK (MTK_PIN_NO(117) | 1)
+#define MT7623_PIN_117_MSDC0_CLK_FUNC_NWEB (MTK_PIN_NO(117) | 4)
+
+#define MT7623_PIN_118_MSDC0_DAT3_FUNC_GPIO118 (MTK_PIN_NO(118) | 0)
+#define MT7623_PIN_118_MSDC0_DAT3_FUNC_MSDC0_DAT3 (MTK_PIN_NO(118) | 1)
+#define MT7623_PIN_118_MSDC0_DAT3_FUNC_NLD3 (MTK_PIN_NO(118) | 4)
+
+#define MT7623_PIN_119_MSDC0_DAT2_FUNC_GPIO119 (MTK_PIN_NO(119) | 0)
+#define MT7623_PIN_119_MSDC0_DAT2_FUNC_MSDC0_DAT2 (MTK_PIN_NO(119) | 1)
+#define MT7623_PIN_119_MSDC0_DAT2_FUNC_NLD2 (MTK_PIN_NO(119) | 4)
+
+#define MT7623_PIN_120_MSDC0_DAT1_FUNC_GPIO120 (MTK_PIN_NO(120) | 0)
+#define MT7623_PIN_120_MSDC0_DAT1_FUNC_MSDC0_DAT1 (MTK_PIN_NO(120) | 1)
+#define MT7623_PIN_120_MSDC0_DAT1_FUNC_NLD1 (MTK_PIN_NO(120) | 4)
+
+#define MT7623_PIN_121_MSDC0_DAT0_FUNC_GPIO121 (MTK_PIN_NO(121) | 0)
+#define MT7623_PIN_121_MSDC0_DAT0_FUNC_MSDC0_DAT0 (MTK_PIN_NO(121) | 1)
+#define MT7623_PIN_121_MSDC0_DAT0_FUNC_NLD0 (MTK_PIN_NO(121) | 4)
+#define MT7623_PIN_121_MSDC0_DAT0_FUNC_WATCHDOG (MTK_PIN_NO(121) | 5)
+
+#define MT7623_PIN_122_GPIO122_FUNC_GPIO122 (MTK_PIN_NO(122) | 0)
+#define MT7623_PIN_122_GPIO122_FUNC_TEST (MTK_PIN_NO(122) | 1)
+#define MT7623_PIN_122_GPIO122_FUNC_SDA2 (MTK_PIN_NO(122) | 4)
+#define MT7623_PIN_122_GPIO122_FUNC_URXD0 (MTK_PIN_NO(122) | 5)
+
+#define MT7623_PIN_123_GPIO123_FUNC_GPIO123 (MTK_PIN_NO(123) | 0)
+#define MT7623_PIN_123_GPIO123_FUNC_TEST (MTK_PIN_NO(123) | 1)
+#define MT7623_PIN_123_GPIO123_FUNC_SCL2 (MTK_PIN_NO(123) | 4)
+#define MT7623_PIN_123_GPIO123_FUNC_UTXD0 (MTK_PIN_NO(123) | 5)
+
+#define MT7623_PIN_124_GPIO124_FUNC_GPIO124 (MTK_PIN_NO(124) | 0)
+#define MT7623_PIN_124_GPIO124_FUNC_TEST (MTK_PIN_NO(124) | 1)
+#define MT7623_PIN_124_GPIO124_FUNC_SDA1 (MTK_PIN_NO(124) | 4)
+#define MT7623_PIN_124_GPIO124_FUNC_PWM3 (MTK_PIN_NO(124) | 5)
+
+#define MT7623_PIN_125_GPIO125_FUNC_GPIO125 (MTK_PIN_NO(125) | 0)
+#define MT7623_PIN_125_GPIO125_FUNC_TEST (MTK_PIN_NO(125) | 1)
+#define MT7623_PIN_125_GPIO125_FUNC_SCL1 (MTK_PIN_NO(125) | 4)
+#define MT7623_PIN_125_GPIO125_FUNC_PWM4 (MTK_PIN_NO(125) | 5)
+
+#define MT7623_PIN_126_I2S0_MCLK_FUNC_GPIO126 (MTK_PIN_NO(126) | 0)
+#define MT7623_PIN_126_I2S0_MCLK_FUNC_I2S0_MCLK (MTK_PIN_NO(126) | 1)
+#define MT7623_PIN_126_I2S0_MCLK_FUNC_AP_I2S_MCLK (MTK_PIN_NO(126) | 6)
+
+#define MT7623_PIN_199_SPI1_CK_FUNC_GPIO199 (MTK_PIN_NO(199) | 0)
+#define MT7623_PIN_199_SPI1_CK_FUNC_SPI1_CK (MTK_PIN_NO(199) | 1)
+
+#define MT7623_PIN_200_URXD2_FUNC_GPIO200 (MTK_PIN_NO(200) | 0)
+#define MT7623_PIN_200_URXD2_FUNC_URXD2 (MTK_PIN_NO(200) | 6)
+
+#define MT7623_PIN_201_UTXD2_FUNC_GPIO201 (MTK_PIN_NO(201) | 0)
+#define MT7623_PIN_201_UTXD2_FUNC_UTXD2 (MTK_PIN_NO(201) | 6)
+
+#define MT7623_PIN_203_PWM0_FUNC_GPIO203 (MTK_PIN_NO(203) | 0)
+#define MT7623_PIN_203_PWM0_FUNC_PWM0 (MTK_PIN_NO(203) | 1)
+#define MT7623_PIN_203_PWM0_FUNC_DISP_PWM (MTK_PIN_NO(203) | 2)
+
+#define MT7623_PIN_204_PWM1_FUNC_GPIO204 (MTK_PIN_NO(204) | 0)
+#define MT7623_PIN_204_PWM1_FUNC_PWM1 (MTK_PIN_NO(204) | 1)
+
+#define MT7623_PIN_205_PWM2_FUNC_GPIO205 (MTK_PIN_NO(205) | 0)
+#define MT7623_PIN_205_PWM2_FUNC_PWM2 (MTK_PIN_NO(205) | 1)
+
+#define MT7623_PIN_206_PWM3_FUNC_GPIO206 (MTK_PIN_NO(206) | 0)
+#define MT7623_PIN_206_PWM3_FUNC_PWM3 (MTK_PIN_NO(206) | 1)
+
+#define MT7623_PIN_207_PWM4_FUNC_GPIO207 (MTK_PIN_NO(207) | 0)
+#define MT7623_PIN_207_PWM4_FUNC_PWM4 (MTK_PIN_NO(207) | 1)
+
+#define MT7623_PIN_208_AUD_EXT_CK1_FUNC_GPIO208 (MTK_PIN_NO(208) | 0)
+#define MT7623_PIN_208_AUD_EXT_CK1_FUNC_AUD_EXT_CK1 (MTK_PIN_NO(208) | 1)
+#define MT7623_PIN_208_AUD_EXT_CK1_FUNC_PWM0 (MTK_PIN_NO(208) | 2)
+#define MT7623_PIN_208_AUD_EXT_CK1_FUNC_PCIE0_PERST_N (MTK_PIN_NO(208) | 3)
+#define MT7623_PIN_208_AUD_EXT_CK1_FUNC_DISP_PWM (MTK_PIN_NO(208) | 5)
+
+#define MT7623_PIN_209_AUD_EXT_CK2_FUNC_GPIO209 (MTK_PIN_NO(209) | 0)
+#define MT7623_PIN_209_AUD_EXT_CK2_FUNC_AUD_EXT_CK2 (MTK_PIN_NO(209) | 1)
+#define MT7623_PIN_209_AUD_EXT_CK2_FUNC_MSDC1_WP (MTK_PIN_NO(209) | 2)
+#define MT7623_PIN_209_AUD_EXT_CK2_FUNC_PCIE1_PERST_N (MTK_PIN_NO(209) | 3)
+#define MT7623_PIN_209_AUD_EXT_CK2_FUNC_PWM1 (MTK_PIN_NO(209) | 5)
+
+#define MT7623_PIN_236_EXT_SDIO3_FUNC_GPIO236 (MTK_PIN_NO(236) | 0)
+#define MT7623_PIN_236_EXT_SDIO3_FUNC_EXT_SDIO3 (MTK_PIN_NO(236) | 1)
+#define MT7623_PIN_236_EXT_SDIO3_FUNC_IDDIG (MTK_PIN_NO(236) | 2)
+
+#define MT7623_PIN_237_EXT_SDIO2_FUNC_GPIO237 (MTK_PIN_NO(237) | 0)
+#define MT7623_PIN_237_EXT_SDIO2_FUNC_EXT_SDIO2 (MTK_PIN_NO(237) | 1)
+#define MT7623_PIN_237_EXT_SDIO2_FUNC_DRV_VBUS (MTK_PIN_NO(237) | 2)
+
+#define MT7623_PIN_238_EXT_SDIO1_FUNC_GPIO238 (MTK_PIN_NO(238) | 0)
+#define MT7623_PIN_238_EXT_SDIO1_FUNC_EXT_SDIO1 (MTK_PIN_NO(238) | 1)
+
+#define MT7623_PIN_239_EXT_SDIO0_FUNC_GPIO239 (MTK_PIN_NO(239) | 0)
+#define MT7623_PIN_239_EXT_SDIO0_FUNC_EXT_SDIO0 (MTK_PIN_NO(239) | 1)
+
+#define MT7623_PIN_240_EXT_XCS_FUNC_GPIO240 (MTK_PIN_NO(240) | 0)
+#define MT7623_PIN_240_EXT_XCS_FUNC_EXT_XCS (MTK_PIN_NO(240) | 1)
+
+#define MT7623_PIN_241_EXT_SCK_FUNC_GPIO241 (MTK_PIN_NO(241) | 0)
+#define MT7623_PIN_241_EXT_SCK_FUNC_EXT_SCK (MTK_PIN_NO(241) | 1)
+
+#define MT7623_PIN_242_URTS2_FUNC_GPIO242 (MTK_PIN_NO(242) | 0)
+#define MT7623_PIN_242_URTS2_FUNC_URTS2 (MTK_PIN_NO(242) | 1)
+#define MT7623_PIN_242_URTS2_FUNC_UTXD3 (MTK_PIN_NO(242) | 2)
+#define MT7623_PIN_242_URTS2_FUNC_URXD3 (MTK_PIN_NO(242) | 3)
+#define MT7623_PIN_242_URTS2_FUNC_SCL1 (MTK_PIN_NO(242) | 4)
+
+#define MT7623_PIN_243_UCTS2_FUNC_GPIO243 (MTK_PIN_NO(243) | 0)
+#define MT7623_PIN_243_UCTS2_FUNC_UCTS2 (MTK_PIN_NO(243) | 1)
+#define MT7623_PIN_243_UCTS2_FUNC_URXD3 (MTK_PIN_NO(243) | 2)
+#define MT7623_PIN_243_UCTS2_FUNC_UTXD3 (MTK_PIN_NO(243) | 3)
+#define MT7623_PIN_243_UCTS2_FUNC_SDA1 (MTK_PIN_NO(243) | 4)
+
+#define MT7623_PIN_250_GPIO250_FUNC_GPIO250 (MTK_PIN_NO(250) | 0)
+#define MT7623_PIN_250_GPIO250_FUNC_TEST_MD7 (MTK_PIN_NO(250) | 1)
+#define MT7623_PIN_250_GPIO250_FUNC_PCIE0_CLKREQ_N (MTK_PIN_NO(250) | 6)
+
+#define MT7623_PIN_251_GPIO251_FUNC_GPIO251 (MTK_PIN_NO(251) | 0)
+#define MT7623_PIN_251_GPIO251_FUNC_TEST_MD6 (MTK_PIN_NO(251) | 1)
+#define MT7623_PIN_251_GPIO251_FUNC_PCIE0_WAKE_N (MTK_PIN_NO(251) | 6)
+
+#define MT7623_PIN_252_GPIO252_FUNC_GPIO252 (MTK_PIN_NO(252) | 0)
+#define MT7623_PIN_252_GPIO252_FUNC_TEST_MD5 (MTK_PIN_NO(252) | 1)
+#define MT7623_PIN_252_GPIO252_FUNC_PCIE1_CLKREQ_N (MTK_PIN_NO(252) | 6)
+
+#define MT7623_PIN_253_GPIO253_FUNC_GPIO253 (MTK_PIN_NO(253) | 0)
+#define MT7623_PIN_253_GPIO253_FUNC_TEST_MD4 (MTK_PIN_NO(253) | 1)
+#define MT7623_PIN_253_GPIO253_FUNC_PCIE1_WAKE_N (MTK_PIN_NO(253) | 6)
+
+#define MT7623_PIN_254_GPIO254_FUNC_GPIO254 (MTK_PIN_NO(254) | 0)
+#define MT7623_PIN_254_GPIO254_FUNC_TEST_MD3 (MTK_PIN_NO(254) | 1)
+#define MT7623_PIN_254_GPIO254_FUNC_PCIE2_CLKREQ_N (MTK_PIN_NO(254) | 6)
+
+#define MT7623_PIN_255_GPIO255_FUNC_GPIO255 (MTK_PIN_NO(255) | 0)
+#define MT7623_PIN_255_GPIO255_FUNC_TEST_MD2 (MTK_PIN_NO(255) | 1)
+#define MT7623_PIN_255_GPIO255_FUNC_PCIE2_WAKE_N (MTK_PIN_NO(255) | 6)
+
+#define MT7623_PIN_256_GPIO256_FUNC_GPIO256 (MTK_PIN_NO(256) | 0)
+#define MT7623_PIN_256_GPIO256_FUNC_TEST_MD1 (MTK_PIN_NO(256) | 1)
+
+#define MT7623_PIN_257_GPIO257_FUNC_GPIO257 (MTK_PIN_NO(257) | 0)
+#define MT7623_PIN_257_GPIO257_FUNC_TEST_MD0 (MTK_PIN_NO(257) | 1)
+
+#define MT7623_PIN_261_MSDC1_INS_FUNC_GPIO261 (MTK_PIN_NO(261) | 0)
+#define MT7623_PIN_261_MSDC1_INS_FUNC_MSDC1_INS (MTK_PIN_NO(261) | 1)
+
+#define MT7623_PIN_262_G2_TXEN_FUNC_GPIO262 (MTK_PIN_NO(262) | 0)
+#define MT7623_PIN_262_G2_TXEN_FUNC_G2_TXEN (MTK_PIN_NO(262) | 1)
+
+#define MT7623_PIN_263_G2_TXD3_FUNC_GPIO263 (MTK_PIN_NO(263) | 0)
+#define MT7623_PIN_263_G2_TXD3_FUNC_G2_TXD3 (MTK_PIN_NO(263) | 1)
+
+#define MT7623_PIN_264_G2_TXD2_FUNC_GPIO264 (MTK_PIN_NO(264) | 0)
+#define MT7623_PIN_264_G2_TXD2_FUNC_G2_TXD2 (MTK_PIN_NO(264) | 1)
+
+#define MT7623_PIN_265_G2_TXD1_FUNC_GPIO265 (MTK_PIN_NO(265) | 0)
+#define MT7623_PIN_265_G2_TXD1_FUNC_G2_TXD1 (MTK_PIN_NO(265) | 1)
+
+#define MT7623_PIN_266_G2_TXD0_FUNC_GPIO266 (MTK_PIN_NO(266) | 0)
+#define MT7623_PIN_266_G2_TXD0_FUNC_G2_TXD0 (MTK_PIN_NO(266) | 1)
+
+#define MT7623_PIN_267_G2_TXCLK_FUNC_GPIO267 (MTK_PIN_NO(267) | 0)
+#define MT7623_PIN_267_G2_TXCLK_FUNC_G2_TXC (MTK_PIN_NO(267) | 1)
+
+#define MT7623_PIN_268_G2_RXCLK_FUNC_GPIO268 (MTK_PIN_NO(268) | 0)
+#define MT7623_PIN_268_G2_RXCLK_FUNC_G2_RXC (MTK_PIN_NO(268) | 1)
+
+#define MT7623_PIN_269_G2_RXD0_FUNC_GPIO269 (MTK_PIN_NO(269) | 0)
+#define MT7623_PIN_269_G2_RXD0_FUNC_G2_RXD0 (MTK_PIN_NO(269) | 1)
+
+#define MT7623_PIN_270_G2_RXD1_FUNC_GPIO270 (MTK_PIN_NO(270) | 0)
+#define MT7623_PIN_270_G2_RXD1_FUNC_G2_RXD1 (MTK_PIN_NO(270) | 1)
+
+#define MT7623_PIN_271_G2_RXD2_FUNC_GPIO271 (MTK_PIN_NO(271) | 0)
+#define MT7623_PIN_271_G2_RXD2_FUNC_G2_RXD2 (MTK_PIN_NO(271) | 1)
+
+#define MT7623_PIN_272_G2_RXD3_FUNC_GPIO272 (MTK_PIN_NO(272) | 0)
+#define MT7623_PIN_272_G2_RXD3_FUNC_G2_RXD3 (MTK_PIN_NO(272) | 1)
+
+#define MT7623_PIN_274_G2_RXDV_FUNC_GPIO274 (MTK_PIN_NO(274) | 0)
+#define MT7623_PIN_274_G2_RXDV_FUNC_G2_RXDV (MTK_PIN_NO(274) | 1)
+
+#define MT7623_PIN_275_G2_MDC_FUNC_GPIO275 (MTK_PIN_NO(275) | 0)
+#define MT7623_PIN_275_G2_MDC_FUNC_MDC (MTK_PIN_NO(275) | 1)
+
+#define MT7623_PIN_276_G2_MDIO_FUNC_GPIO276 (MTK_PIN_NO(276) | 0)
+#define MT7623_PIN_276_G2_MDIO_FUNC_MDIO (MTK_PIN_NO(276) | 1)
+
+#define MT7623_PIN_278_JTAG_RESET_FUNC_GPIO278 (MTK_PIN_NO(278) | 0)
+#define MT7623_PIN_278_JTAG_RESET_FUNC_JTAG_RESET (MTK_PIN_NO(278) | 1)
+
+#endif /* __DTS_MT7623_PINFUNC_H */
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index 1800227af9d6..b651aed9dc6b 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -55,6 +55,9 @@ struct arch_timer_cpu {
/* VGIC mapping */
struct irq_phys_map *map;
+
+ /* Active IRQ state caching */
+ bool active_cleared_last;
};
int kvm_timer_hyp_init(void);
@@ -74,4 +77,6 @@ bool kvm_timer_should_fire(struct kvm_vcpu *vcpu);
void kvm_timer_schedule(struct kvm_vcpu *vcpu);
void kvm_timer_unschedule(struct kvm_vcpu *vcpu);
+void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu);
+
#endif
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
new file mode 100644
index 000000000000..fe389ac31489
--- /dev/null
+++ b/include/kvm/arm_pmu.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2015 Linaro Ltd.
+ * Author: Shannon Zhao <shannon.zhao@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM_ARM_KVM_PMU_H
+#define __ASM_ARM_KVM_PMU_H
+
+#ifdef CONFIG_KVM_ARM_PMU
+
+#include <linux/perf_event.h>
+#include <asm/perf_event.h>
+
+#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
+
+struct kvm_pmc {
+ u8 idx; /* index into the pmu->pmc array */
+ struct perf_event *perf_event;
+ u64 bitmask;
+};
+
+struct kvm_pmu {
+ int irq_num;
+ struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS];
+ bool ready;
+ bool irq_level;
+};
+
+#define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready)
+#define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
+u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
+void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
+u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
+void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
+void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
+void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val);
+void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val);
+void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val);
+void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
+void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
+void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
+void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
+void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
+ u64 select_idx);
+bool kvm_arm_support_pmu_v3(void);
+int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr);
+int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr);
+int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr);
+#else
+struct kvm_pmu {
+};
+
+#define kvm_arm_pmu_v3_ready(v) (false)
+#define kvm_arm_pmu_irq_initialized(v) (false)
+static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
+ u64 select_idx)
+{
+ return 0;
+}
+static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
+ u64 select_idx, u64 val) {}
+static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
+static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
+static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {}
+static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {}
+static inline void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) {}
+static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
+static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
+static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {}
+static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {}
+static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu,
+ u64 data, u64 select_idx) {}
+static inline bool kvm_arm_support_pmu_v3(void) { return false; }
+static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ return -ENXIO;
+}
+static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ return -ENXIO;
+}
+static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ return -ENXIO;
+}
+#endif
+
+#endif
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 13a3d537811b..281caf847fad 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -279,12 +279,6 @@ struct vgic_v2_cpu_if {
u32 vgic_lr[VGIC_V2_MAX_LRS];
};
-/*
- * LRs are stored in reverse order in memory. make sure we index them
- * correctly.
- */
-#define VGIC_V3_LR_INDEX(lr) (VGIC_V3_MAX_LRS - 1 - lr)
-
struct vgic_v3_cpu_if {
#ifdef CONFIG_KVM_ARM_VGIC_V3
u32 vgic_hcr;
@@ -321,6 +315,8 @@ struct vgic_cpu {
/* Protected by the distributor's irq_phys_map_lock */
struct list_head irq_phys_map_list;
+
+ u64 live_lrs;
};
#define LR_EMPTY 0xff
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 301de78d65f7..6c502cb13c95 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -548,6 +548,27 @@ static inline int atomic_dec_if_positive(atomic_t *v)
}
#endif
+/**
+ * fetch_or - perform *ptr |= mask and return old value of *ptr
+ * @ptr: pointer to value
+ * @mask: mask to OR on the value
+ *
+ * cmpxchg based fetch_or, macro so it works for different integer types
+ */
+#ifndef fetch_or
+#define fetch_or(ptr, mask) \
+({ typeof(*(ptr)) __old, __val = *(ptr); \
+ for (;;) { \
+ __old = cmpxchg((ptr), __val, __val | (mask)); \
+ if (__old == __val) \
+ break; \
+ __val = __old; \
+ } \
+ __old; \
+})
+#endif
+
+
#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
#endif
diff --git a/include/linux/auto_dev-ioctl.h b/include/linux/auto_dev-ioctl.h
index 850f39b33e74..7caaf298f539 100644
--- a/include/linux/auto_dev-ioctl.h
+++ b/include/linux/auto_dev-ioctl.h
@@ -11,12 +11,7 @@
#define _LINUX_AUTO_DEV_IOCTL_H
#include <linux/auto_fs.h>
-
-#ifdef __KERNEL__
#include <linux/string.h>
-#else
-#include <string.h>
-#endif /* __KERNEL__ */
#define AUTOFS_DEVICE_NAME "autofs"
@@ -125,7 +120,6 @@ static inline void init_autofs_dev_ioctl(struct autofs_dev_ioctl *in)
in->ver_minor = AUTOFS_DEV_IOCTL_VERSION_MINOR;
in->size = sizeof(struct autofs_dev_ioctl);
in->ioctlfd = -1;
- return;
}
/*
diff --git a/include/linux/auto_fs.h b/include/linux/auto_fs.h
index fcd704d354c4..b4066bb89083 100644
--- a/include/linux/auto_fs.h
+++ b/include/linux/auto_fs.h
@@ -1,14 +1,10 @@
-/* -*- linux-c -*- ------------------------------------------------------- *
- *
- * linux/include/linux/auto_fs.h
- *
- * Copyright 1997 Transmeta Corporation - All Rights Reserved
+/*
+ * Copyright 1997 Transmeta Corporation - All Rights Reserved
*
* This file is part of the Linux kernel and is made available under
* the terms of the GNU General Public License, version 2, or at your
* option, any later version, incorporated herein by reference.
- *
- * ----------------------------------------------------------------------- */
+ */
#ifndef _LINUX_AUTO_FS_H
#define _LINUX_AUTO_FS_H
diff --git a/include/linux/cache.h b/include/linux/cache.h
index 17e7e82d2aa7..1be04f8c563a 100644
--- a/include/linux/cache.h
+++ b/include/linux/cache.h
@@ -12,10 +12,24 @@
#define SMP_CACHE_BYTES L1_CACHE_BYTES
#endif
+/*
+ * __read_mostly is used to keep rarely changing variables out of frequently
+ * updated cachelines. If an architecture doesn't support it, ignore the
+ * hint.
+ */
#ifndef __read_mostly
#define __read_mostly
#endif
+/*
+ * __ro_after_init is used to mark things that are read-only after init (i.e.
+ * after mark_rodata_ro() has been called). These are effectively read-only,
+ * but may get written to during init, so can't live in .rodata (via "const").
+ */
+#ifndef __ro_after_init
+#define __ro_after_init __attribute__((__section__(".data..ro_after_init")))
+#endif
+
#ifndef ____cacheline_aligned
#define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
#endif
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index bdcf358dfce2..0d442e34c349 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -190,9 +190,9 @@ extern void clockevents_config_and_register(struct clock_event_device *dev,
extern int clockevents_update_freq(struct clock_event_device *ce, u32 freq);
static inline void
-clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 minsec)
+clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 maxsec)
{
- return clocks_calc_mult_shift(&ce->mult, &ce->shift, NSEC_PER_SEC, freq, minsec);
+ return clocks_calc_mult_shift(&ce->mult, &ce->shift, NSEC_PER_SEC, freq, maxsec);
}
extern void clockevents_suspend(void);
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 6013021a3b39..a307bf62974f 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -118,6 +118,23 @@ struct clocksource {
/* simplify initialization of mask field */
#define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
+static inline u32 clocksource_freq2mult(u32 freq, u32 shift_constant, u64 from)
+{
+ /* freq = cyc/from
+ * mult/2^shift = ns/cyc
+ * mult = ns/cyc * 2^shift
+ * mult = from/freq * 2^shift
+ * mult = from * 2^shift / freq
+ * mult = (from<<shift) / freq
+ */
+ u64 tmp = ((u64)from) << shift_constant;
+
+ tmp += freq/2; /* round for do_div */
+ do_div(tmp, freq);
+
+ return (u32)tmp;
+}
+
/**
* clocksource_khz2mult - calculates mult from khz and shift
* @khz: Clocksource frequency in KHz
@@ -128,19 +145,7 @@ struct clocksource {
*/
static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant)
{
- /* khz = cyc/(Million ns)
- * mult/2^shift = ns/cyc
- * mult = ns/cyc * 2^shift
- * mult = 1Million/khz * 2^shift
- * mult = 1000000 * 2^shift / khz
- * mult = (1000000<<shift) / khz
- */
- u64 tmp = ((u64)1000000) << shift_constant;
-
- tmp += khz/2; /* round for do_div */
- do_div(tmp, khz);
-
- return (u32)tmp;
+ return clocksource_freq2mult(khz, shift_constant, NSEC_PER_MSEC);
}
/**
@@ -154,19 +159,7 @@ static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant)
*/
static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant)
{
- /* hz = cyc/(Billion ns)
- * mult/2^shift = ns/cyc
- * mult = ns/cyc * 2^shift
- * mult = 1Billion/hz * 2^shift
- * mult = 1000000000 * 2^shift / hz
- * mult = (1000000000<<shift) / hz
- */
- u64 tmp = ((u64)1000000000) << shift_constant;
-
- tmp += hz/2; /* round for do_div */
- do_div(tmp, hz);
-
- return (u32)tmp;
+ return clocksource_freq2mult(hz, shift_constant, NSEC_PER_SEC);
}
/**
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 48f5aab117ae..b5ff9881bef8 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -20,12 +20,14 @@
# define __pmem __attribute__((noderef, address_space(5)))
#ifdef CONFIG_SPARSE_RCU_POINTER
# define __rcu __attribute__((noderef, address_space(4)))
-#else
+#else /* CONFIG_SPARSE_RCU_POINTER */
# define __rcu
-#endif
+#endif /* CONFIG_SPARSE_RCU_POINTER */
+# define __private __attribute__((noderef))
extern void __chk_user_ptr(const volatile void __user *);
extern void __chk_io_ptr(const volatile void __iomem *);
-#else
+# define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
+#else /* __CHECKER__ */
# define __user
# define __kernel
# define __safe
@@ -44,7 +46,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
# define __percpu
# define __rcu
# define __pmem
-#endif
+# define __private
+# define ACCESS_PRIVATE(p, member) ((p)->member)
+#endif /* __CHECKER__ */
/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
#define ___PASTE(a,b) a##b
@@ -263,8 +267,9 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
* In contrast to ACCESS_ONCE these two macros will also work on aggregate
* data types like structs or unions. If the size of the accessed data
* type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
- * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a
- * compile-time warning.
+ * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at
+ * least two memcpy()s: one for the __builtin_memcpy() and then one for
+ * the macro doing the copy of variable - '__u' allocated on the stack.
*
* Their two major use cases are: (1) Mediating communication between
* process-level code and irq/NMI handlers, all running on the same CPU,
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index d2ca8c38f9c4..f9b1fab4388a 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -16,6 +16,7 @@
#include <linux/node.h>
#include <linux/compiler.h>
#include <linux/cpumask.h>
+#include <linux/cpuhotplug.h>
struct device;
struct device_node;
@@ -27,6 +28,9 @@ struct cpu {
struct device dev;
};
+extern void boot_cpu_init(void);
+extern void boot_cpu_state_init(void);
+
extern int register_cpu(struct cpu *cpu, int num);
extern struct device *get_cpu_device(unsigned cpu);
extern bool cpu_is_hotpluggable(unsigned cpu);
@@ -74,7 +78,7 @@ enum {
/* migration should happen before other stuff but after perf */
CPU_PRI_PERF = 20,
CPU_PRI_MIGRATION = 10,
- CPU_PRI_SMPBOOT = 9,
+
/* bring up workqueues before normal notifiers and down after */
CPU_PRI_WORKQUEUE_UP = 5,
CPU_PRI_WORKQUEUE_DOWN = -5,
@@ -97,9 +101,7 @@ enum {
* Called on the new cpu, just before
* enabling interrupts. Must not sleep,
* must not fail */
-#define CPU_DYING_IDLE 0x000B /* CPU (unsigned)v dying, reached
- * idle loop. */
-#define CPU_BROKEN 0x000C /* CPU (unsigned)v did not die properly,
+#define CPU_BROKEN 0x000B /* CPU (unsigned)v did not die properly,
* perhaps due to preemption. */
/* Used for CPU hotplug events occurring while tasks are frozen due to a suspend
@@ -118,6 +120,7 @@ enum {
#ifdef CONFIG_SMP
+extern bool cpuhp_tasks_frozen;
/* Need to know about CPUs going up/down? */
#if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
#define cpu_notifier(fn, pri) { \
@@ -167,7 +170,6 @@ static inline void __unregister_cpu_notifier(struct notifier_block *nb)
}
#endif
-void smpboot_thread_init(void);
int cpu_up(unsigned int cpu);
void notify_cpu_starting(unsigned int cpu);
extern void cpu_maps_update_begin(void);
@@ -177,6 +179,7 @@ extern void cpu_maps_update_done(void);
#define cpu_notifier_register_done cpu_maps_update_done
#else /* CONFIG_SMP */
+#define cpuhp_tasks_frozen 0
#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
@@ -215,10 +218,6 @@ static inline void cpu_notifier_register_done(void)
{
}
-static inline void smpboot_thread_init(void)
-{
-}
-
#endif /* CONFIG_SMP */
extern struct bus_type cpu_subsys;
@@ -265,11 +264,6 @@ static inline int disable_nonboot_cpus(void) { return 0; }
static inline void enable_nonboot_cpus(void) {}
#endif /* !CONFIG_PM_SLEEP_SMP */
-enum cpuhp_state {
- CPUHP_OFFLINE,
- CPUHP_ONLINE,
-};
-
void cpu_startup_entry(enum cpuhp_state state);
void cpu_idle_poll_ctrl(bool enable);
@@ -280,14 +274,15 @@ void arch_cpu_idle_enter(void);
void arch_cpu_idle_exit(void);
void arch_cpu_idle_dead(void);
-DECLARE_PER_CPU(bool, cpu_dead_idle);
-
int cpu_report_state(int cpu);
int cpu_check_up_prepare(int cpu);
void cpu_set_state_online(int cpu);
#ifdef CONFIG_HOTPLUG_CPU
bool cpu_wait_death(unsigned int cpu, int seconds);
bool cpu_report_death(void);
+void cpuhp_report_idle_dead(void);
+#else
+static inline void cpuhp_report_idle_dead(void) { }
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
#endif /* _LINUX_CPU_H_ */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 88a4215125bc..718e8725de8a 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -80,7 +80,6 @@ struct cpufreq_policy {
unsigned int last_policy; /* policy before unplug */
struct cpufreq_governor *governor; /* see below */
void *governor_data;
- bool governor_enabled; /* governor start/stop flag */
char last_governor[CPUFREQ_NAME_LEN]; /* last governor used */
struct work_struct update; /* if update_policy() needs to be
@@ -100,10 +99,6 @@ struct cpufreq_policy {
* - Any routine that will write to the policy structure and/or may take away
* the policy altogether (eg. CPU hotplug), will hold this lock in write
* mode before doing so.
- *
- * Additional rules:
- * - Lock should not be held across
- * __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
*/
struct rw_semaphore rwsem;
@@ -464,29 +459,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
int cpufreq_register_governor(struct cpufreq_governor *governor);
void cpufreq_unregister_governor(struct cpufreq_governor *governor);
-/* CPUFREQ DEFAULT GOVERNOR */
-/*
- * Performance governor is fallback governor if any other gov failed to auto
- * load due latency restrictions
- */
-#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
-extern struct cpufreq_governor cpufreq_gov_performance;
-#endif
-#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
-#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_performance)
-#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE)
-extern struct cpufreq_governor cpufreq_gov_powersave;
-#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_powersave)
-#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE)
-extern struct cpufreq_governor cpufreq_gov_userspace;
-#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_userspace)
-#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND)
-extern struct cpufreq_governor cpufreq_gov_ondemand;
-#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_ondemand)
-#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE)
-extern struct cpufreq_governor cpufreq_gov_conservative;
-#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_conservative)
-#endif
+struct cpufreq_governor *cpufreq_default_governor(void);
+struct cpufreq_governor *cpufreq_fallback_governor(void);
/*********************************************************************
* FREQUENCY TABLE HELPERS *
@@ -525,16 +499,6 @@ static inline void dev_pm_opp_free_cpufreq_table(struct device *dev,
}
#endif
-static inline bool cpufreq_next_valid(struct cpufreq_frequency_table **pos)
-{
- while ((*pos)->frequency != CPUFREQ_TABLE_END)
- if ((*pos)->frequency != CPUFREQ_ENTRY_INVALID)
- return true;
- else
- (*pos)++;
- return false;
-}
-
/*
* cpufreq_for_each_entry - iterate over a cpufreq_frequency_table
* @pos: the cpufreq_frequency_table * to use as a loop cursor.
@@ -551,8 +515,11 @@ static inline bool cpufreq_next_valid(struct cpufreq_frequency_table **pos)
* @table: the cpufreq_frequency_table * to iterate over.
*/
-#define cpufreq_for_each_valid_entry(pos, table) \
- for (pos = table; cpufreq_next_valid(&pos); pos++)
+#define cpufreq_for_each_valid_entry(pos, table) \
+ for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++) \
+ if (pos->frequency == CPUFREQ_ENTRY_INVALID) \
+ continue; \
+ else
int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table);
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
new file mode 100644
index 000000000000..5d68e15e46b7
--- /dev/null
+++ b/include/linux/cpuhotplug.h
@@ -0,0 +1,93 @@
+#ifndef __CPUHOTPLUG_H
+#define __CPUHOTPLUG_H
+
+enum cpuhp_state {
+ CPUHP_OFFLINE,
+ CPUHP_CREATE_THREADS,
+ CPUHP_NOTIFY_PREPARE,
+ CPUHP_BRINGUP_CPU,
+ CPUHP_AP_IDLE_DEAD,
+ CPUHP_AP_OFFLINE,
+ CPUHP_AP_NOTIFY_STARTING,
+ CPUHP_AP_ONLINE,
+ CPUHP_TEARDOWN_CPU,
+ CPUHP_AP_ONLINE_IDLE,
+ CPUHP_AP_SMPBOOT_THREADS,
+ CPUHP_AP_NOTIFY_ONLINE,
+ CPUHP_AP_ONLINE_DYN,
+ CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
+ CPUHP_ONLINE,
+};
+
+int __cpuhp_setup_state(enum cpuhp_state state, const char *name, bool invoke,
+ int (*startup)(unsigned int cpu),
+ int (*teardown)(unsigned int cpu));
+
+/**
+ * cpuhp_setup_state - Setup hotplug state callbacks with calling the callbacks
+ * @state: The state for which the calls are installed
+ * @name: Name of the callback (will be used in debug output)
+ * @startup: startup callback function
+ * @teardown: teardown callback function
+ *
+ * Installs the callback functions and invokes the startup callback on
+ * the present cpus which have already reached the @state.
+ */
+static inline int cpuhp_setup_state(enum cpuhp_state state,
+ const char *name,
+ int (*startup)(unsigned int cpu),
+ int (*teardown)(unsigned int cpu))
+{
+ return __cpuhp_setup_state(state, name, true, startup, teardown);
+}
+
+/**
+ * cpuhp_setup_state_nocalls - Setup hotplug state callbacks without calling the
+ * callbacks
+ * @state: The state for which the calls are installed
+ * @name: Name of the callback.
+ * @startup: startup callback function
+ * @teardown: teardown callback function
+ *
+ * Same as @cpuhp_setup_state except that no calls are executed are invoked
+ * during installation of this callback. NOP if SMP=n or HOTPLUG_CPU=n.
+ */
+static inline int cpuhp_setup_state_nocalls(enum cpuhp_state state,
+ const char *name,
+ int (*startup)(unsigned int cpu),
+ int (*teardown)(unsigned int cpu))
+{
+ return __cpuhp_setup_state(state, name, false, startup, teardown);
+}
+
+void __cpuhp_remove_state(enum cpuhp_state state, bool invoke);
+
+/**
+ * cpuhp_remove_state - Remove hotplug state callbacks and invoke the teardown
+ * @state: The state for which the calls are removed
+ *
+ * Removes the callback functions and invokes the teardown callback on
+ * the present cpus which have already reached the @state.
+ */
+static inline void cpuhp_remove_state(enum cpuhp_state state)
+{
+ __cpuhp_remove_state(state, true);
+}
+
+/**
+ * cpuhp_remove_state_nocalls - Remove hotplug state callbacks without invoking
+ * teardown
+ * @state: The state for which the calls are removed
+ */
+static inline void cpuhp_remove_state_nocalls(enum cpuhp_state state)
+{
+ __cpuhp_remove_state(state, false);
+}
+
+#ifdef CONFIG_SMP
+void cpuhp_online_idle(enum cpuhp_state state);
+#else
+static inline void cpuhp_online_idle(enum cpuhp_state state) { }
+#endif
+
+#endif
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 728ef074602a..5e45cf930a3f 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -641,31 +641,40 @@ static inline void dmam_release_declared_memory(struct device *dev)
}
#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
-static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
- dma_addr_t *dma_addr, gfp_t gfp)
+static inline void *dma_alloc_wc(struct device *dev, size_t size,
+ dma_addr_t *dma_addr, gfp_t gfp)
{
DEFINE_DMA_ATTRS(attrs);
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
return dma_alloc_attrs(dev, size, dma_addr, gfp, &attrs);
}
+#ifndef dma_alloc_writecombine
+#define dma_alloc_writecombine dma_alloc_wc
+#endif
-static inline void dma_free_writecombine(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_addr)
+static inline void dma_free_wc(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_addr)
{
DEFINE_DMA_ATTRS(attrs);
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
return dma_free_attrs(dev, size, cpu_addr, dma_addr, &attrs);
}
+#ifndef dma_free_writecombine
+#define dma_free_writecombine dma_free_wc
+#endif
-static inline int dma_mmap_writecombine(struct device *dev,
- struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr,
- size_t size)
+static inline int dma_mmap_wc(struct device *dev,
+ struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size)
{
DEFINE_DMA_ATTRS(attrs);
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
}
+#ifndef dma_mmap_writecombine
+#define dma_mmap_writecombine dma_mmap_wc
+#endif
#ifdef CONFIG_NEED_DMA_MAP_STATE
#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h
index 3159a7dba034..9f4956d8601c 100644
--- a/include/linux/fault-inject.h
+++ b/include/linux/fault-inject.h
@@ -62,10 +62,9 @@ static inline struct dentry *fault_create_debugfs_attr(const char *name,
#endif /* CONFIG_FAULT_INJECTION */
#ifdef CONFIG_FAILSLAB
-extern bool should_failslab(size_t size, gfp_t gfpflags, unsigned long flags);
+extern bool should_failslab(struct kmem_cache *s, gfp_t gfpflags);
#else
-static inline bool should_failslab(size_t size, gfp_t gfpflags,
- unsigned long flags)
+static inline bool should_failslab(struct kmem_cache *s, gfp_t gfpflags)
{
return false;
}
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index c2b340e23f62..6d9df3f7e334 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -713,6 +713,18 @@ static inline void __ftrace_enabled_restore(int enabled)
#define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
#define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
+static inline unsigned long get_lock_parent_ip(void)
+{
+ unsigned long addr = CALLER_ADDR0;
+
+ if (!in_lock_functions(addr))
+ return addr;
+ addr = CALLER_ADDR1;
+ if (!in_lock_functions(addr))
+ return addr;
+ return CALLER_ADDR2;
+}
+
#ifdef CONFIG_IRQSOFF_TRACER
extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index af1f2b24bbe4..bb16dfeb917e 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -9,6 +9,11 @@
struct vm_area_struct;
+/*
+ * In case of changes, please don't forget to update
+ * include/trace/events/mmflags.h and tools/perf/builtin-kmem.c
+ */
+
/* Plain integer GFP bitmasks. Do not use this directly. */
#define ___GFP_DMA 0x01u
#define ___GFP_HIGHMEM 0x02u
@@ -48,7 +53,6 @@ struct vm_area_struct;
#define __GFP_DMA ((__force gfp_t)___GFP_DMA)
#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
-#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */
#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
@@ -515,13 +519,7 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
void drain_all_pages(struct zone *zone);
void drain_local_pages(struct zone *zone);
-#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
void page_alloc_init_late(void);
-#else
-static inline void page_alloc_init_late(void)
-{
-}
-#endif
/*
* gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what
diff --git a/include/linux/init.h b/include/linux/init.h
index b449f378f995..aedb254abc37 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -142,6 +142,10 @@ void prepare_namespace(void);
void __init load_default_modules(void);
int __init init_rootfs(void);
+#ifdef CONFIG_DEBUG_RODATA
+void mark_rodata_ro(void);
+#endif
+
extern void (*late_time_init)(void);
extern bool initcall_debug;
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 0e95fcc75b2a..358076eda364 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -125,6 +125,16 @@ struct irqaction {
extern irqreturn_t no_action(int cpl, void *dev_id);
+/*
+ * If a (PCI) device interrupt is not connected we set dev->irq to
+ * IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we
+ * can distingiush that case from other error returns.
+ *
+ * 0x80000000 is guaranteed to be outside the available range of interrupts
+ * and easy to distinguish from other possible incorrect values.
+ */
+#define IRQ_NOTCONNECTED (1U << 31)
+
extern int __must_check
request_threaded_irq(unsigned int irq, irq_handler_t handler,
irq_handler_t thread_fn,
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 24bea087e7af..afb45597fb5f 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -20,6 +20,7 @@ struct resource {
resource_size_t end;
const char *name;
unsigned long flags;
+ unsigned long desc;
struct resource *parent, *sibling, *child;
};
@@ -49,12 +50,19 @@ struct resource {
#define IORESOURCE_WINDOW 0x00200000 /* forwarded by bridge */
#define IORESOURCE_MUXED 0x00400000 /* Resource is software muxed */
+#define IORESOURCE_EXT_TYPE_BITS 0x01000000 /* Resource extended types */
+#define IORESOURCE_SYSRAM 0x01000000 /* System RAM (modifier) */
+
#define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */
+
#define IORESOURCE_DISABLED 0x10000000
#define IORESOURCE_UNSET 0x20000000 /* No address assigned yet */
#define IORESOURCE_AUTO 0x40000000
#define IORESOURCE_BUSY 0x80000000 /* Driver has marked this resource busy */
+/* I/O resource extended types */
+#define IORESOURCE_SYSTEM_RAM (IORESOURCE_MEM|IORESOURCE_SYSRAM)
+
/* PnP IRQ specific bits (IORESOURCE_BITS) */
#define IORESOURCE_IRQ_HIGHEDGE (1<<0)
#define IORESOURCE_IRQ_LOWEDGE (1<<1)
@@ -105,6 +113,22 @@ struct resource {
/* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */
#define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */
+/*
+ * I/O Resource Descriptors
+ *
+ * Descriptors are used by walk_iomem_res_desc() and region_intersects()
+ * for searching a specific resource range in the iomem table. Assign
+ * a new descriptor when a resource range supports the search interfaces.
+ * Otherwise, resource.desc must be set to IORES_DESC_NONE (0).
+ */
+enum {
+ IORES_DESC_NONE = 0,
+ IORES_DESC_CRASH_KERNEL = 1,
+ IORES_DESC_ACPI_TABLES = 2,
+ IORES_DESC_ACPI_NV_STORAGE = 3,
+ IORES_DESC_PERSISTENT_MEMORY = 4,
+ IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5,
+};
/* helpers to define resources */
#define DEFINE_RES_NAMED(_start, _size, _name, _flags) \
@@ -113,6 +137,7 @@ struct resource {
.end = (_start) + (_size) - 1, \
.name = (_name), \
.flags = (_flags), \
+ .desc = IORES_DESC_NONE, \
}
#define DEFINE_RES_IO_NAMED(_start, _size, _name) \
@@ -170,6 +195,10 @@ static inline unsigned long resource_type(const struct resource *res)
{
return res->flags & IORESOURCE_TYPE_BITS;
}
+static inline unsigned long resource_ext_type(const struct resource *res)
+{
+ return res->flags & IORESOURCE_EXT_TYPE_BITS;
+}
/* True iff r1 completely contains r2 */
static inline bool resource_contains(struct resource *r1, struct resource *r2)
{
@@ -239,8 +268,8 @@ extern int
walk_system_ram_res(u64 start, u64 end, void *arg,
int (*func)(u64, u64, void *));
extern int
-walk_iomem_res(char *name, unsigned long flags, u64 start, u64 end, void *arg,
- int (*func)(u64, u64, void *));
+walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, u64 end,
+ void *arg, int (*func)(u64, u64, void *));
/* True if any part of r1 overlaps r2 */
static inline bool resource_overlaps(struct resource *r1, struct resource *r2)
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 3c1c96786248..c4de62348ff2 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -133,17 +133,23 @@ struct irq_domain;
* Use accessor functions to deal with it
* @node: node index useful for balancing
* @handler_data: per-IRQ data for the irq_chip methods
- * @affinity: IRQ affinity on SMP
+ * @affinity: IRQ affinity on SMP. If this is an IPI
+ * related irq, then this is the mask of the
+ * CPUs to which an IPI can be sent.
* @msi_desc: MSI descriptor
+ * @ipi_offset: Offset of first IPI target cpu in @affinity. Optional.
*/
struct irq_common_data {
- unsigned int state_use_accessors;
+ unsigned int __private state_use_accessors;
#ifdef CONFIG_NUMA
unsigned int node;
#endif
void *handler_data;
struct msi_desc *msi_desc;
cpumask_var_t affinity;
+#ifdef CONFIG_GENERIC_IRQ_IPI
+ unsigned int ipi_offset;
+#endif
};
/**
@@ -208,7 +214,7 @@ enum {
IRQD_FORWARDED_TO_VCPU = (1 << 20),
};
-#define __irqd_to_state(d) ((d)->common->state_use_accessors)
+#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
{
@@ -299,6 +305,8 @@ static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d)
__irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU;
}
+#undef __irqd_to_state
+
static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
{
return d->hwirq;
@@ -341,6 +349,8 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
* @irq_get_irqchip_state: return the internal state of an interrupt
* @irq_set_irqchip_state: set the internal state of a interrupt
* @irq_set_vcpu_affinity: optional to target a vCPU in a virtual machine
+ * @ipi_send_single: send a single IPI to destination cpus
+ * @ipi_send_mask: send an IPI to destination cpus in cpumask
* @flags: chip specific flags
*/
struct irq_chip {
@@ -385,6 +395,9 @@ struct irq_chip {
int (*irq_set_vcpu_affinity)(struct irq_data *data, void *vcpu_info);
+ void (*ipi_send_single)(struct irq_data *data, unsigned int cpu);
+ void (*ipi_send_mask)(struct irq_data *data, const struct cpumask *dest);
+
unsigned long flags;
};
@@ -934,4 +947,12 @@ static inline u32 irq_reg_readl(struct irq_chip_generic *gc,
return readl(gc->reg_base + reg_offset);
}
+/* Contrary to Linux irqs, for hardware irqs the irq number 0 is valid */
+#define INVALID_HWIRQ (~0UL)
+irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu);
+int __ipi_send_single(struct irq_desc *desc, unsigned int cpu);
+int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest);
+int ipi_send_single(unsigned int virq, unsigned int cpu);
+int ipi_send_mask(unsigned int virq, const struct cpumask *dest);
+
#endif /* _LINUX_IRQ_H */
diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h
index ce824db48d64..80f89e4a29ac 100644
--- a/include/linux/irqchip/mips-gic.h
+++ b/include/linux/irqchip/mips-gic.h
@@ -261,9 +261,6 @@ extern void gic_write_compare(cycle_t cnt);
extern void gic_write_cpu_compare(cycle_t cnt, int cpu);
extern void gic_start_count(void);
extern void gic_stop_count(void);
-extern void gic_send_ipi(unsigned int intr);
-extern unsigned int plat_ipi_call_int_xlate(unsigned int);
-extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
extern int gic_get_c0_compare_int(void);
extern int gic_get_c0_perfcount_int(void);
extern int gic_get_c0_fdc_int(void);
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 04579d9fbce4..ed48594e96d2 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -74,6 +74,7 @@ enum irq_domain_bus_token {
DOMAIN_BUS_PCI_MSI,
DOMAIN_BUS_PLATFORM_MSI,
DOMAIN_BUS_NEXUS,
+ DOMAIN_BUS_IPI,
};
/**
@@ -172,6 +173,12 @@ enum {
/* Core calls alloc/free recursive through the domain hierarchy. */
IRQ_DOMAIN_FLAG_AUTO_RECURSIVE = (1 << 1),
+ /* Irq domain is an IPI domain with virq per cpu */
+ IRQ_DOMAIN_FLAG_IPI_PER_CPU = (1 << 2),
+
+ /* Irq domain is an IPI domain with single virq */
+ IRQ_DOMAIN_FLAG_IPI_SINGLE = (1 << 3),
+
/*
* Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved
* for implementation specific purposes and ignored by the
@@ -206,6 +213,8 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
extern struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
enum irq_domain_bus_token bus_token);
extern void irq_set_default_host(struct irq_domain *host);
+extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
+ irq_hw_number_t hwirq, int node);
static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node)
{
@@ -335,6 +344,11 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_type);
+/* IPI functions */
+unsigned int irq_reserve_ipi(struct irq_domain *domain,
+ const struct cpumask *dest);
+void irq_destroy_ipi(unsigned int irq);
+
/* V2 interfaces to support hierarchy IRQ domains. */
extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
unsigned int virq);
@@ -400,6 +414,22 @@ static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
{
return domain->flags & IRQ_DOMAIN_FLAG_HIERARCHY;
}
+
+static inline bool irq_domain_is_ipi(struct irq_domain *domain)
+{
+ return domain->flags &
+ (IRQ_DOMAIN_FLAG_IPI_PER_CPU | IRQ_DOMAIN_FLAG_IPI_SINGLE);
+}
+
+static inline bool irq_domain_is_ipi_per_cpu(struct irq_domain *domain)
+{
+ return domain->flags & IRQ_DOMAIN_FLAG_IPI_PER_CPU;
+}
+
+static inline bool irq_domain_is_ipi_single(struct irq_domain *domain)
+{
+ return domain->flags & IRQ_DOMAIN_FLAG_IPI_SINGLE;
+}
#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
static inline void irq_domain_activate_irq(struct irq_data *data) { }
static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
@@ -413,6 +443,21 @@ static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
{
return false;
}
+
+static inline bool irq_domain_is_ipi(struct irq_domain *domain)
+{
+ return false;
+}
+
+static inline bool irq_domain_is_ipi_per_cpu(struct irq_domain *domain)
+{
+ return false;
+}
+
+static inline bool irq_domain_is_ipi_single(struct irq_domain *domain)
+{
+ return false;
+}
#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
#else /* CONFIG_IRQ_DOMAIN */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 861f690aa791..5276fe0916fc 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -25,6 +25,7 @@
#include <linux/irqflags.h>
#include <linux/context_tracking.h>
#include <linux/irqbypass.h>
+#include <linux/swait.h>
#include <asm/signal.h>
#include <linux/kvm.h>
@@ -218,7 +219,7 @@ struct kvm_vcpu {
int fpu_active;
int guest_fpu_loaded, guest_xcr0_loaded;
unsigned char fpu_counter;
- wait_queue_head_t wq;
+ struct swait_queue_head wq;
struct pid *pid;
int sigset_active;
sigset_t sigset;
@@ -782,7 +783,7 @@ static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
}
#endif
-static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
+static inline struct swait_queue_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
{
#ifdef __KVM_HAVE_ARCH_WQP
return vcpu->arch.wqp;
diff --git a/include/linux/latencytop.h b/include/linux/latencytop.h
index e23121f9d82a..59ccab297ae0 100644
--- a/include/linux/latencytop.h
+++ b/include/linux/latencytop.h
@@ -37,6 +37,9 @@ account_scheduler_latency(struct task_struct *task, int usecs, int inter)
void clear_all_latency_tracing(struct task_struct *p);
+extern int sysctl_latencytop(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+
#else
static inline void
diff --git a/include/linux/leds.h b/include/linux/leds.h
index bc1476fda96e..f203a8f89d30 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -39,6 +39,7 @@ struct led_classdev {
/* Lower 16 bits reflect status */
#define LED_SUSPENDED (1 << 0)
+#define LED_UNREGISTERING (1 << 1)
/* Upper 16 bits reflect control information */
#define LED_CORE_SUSPENDRESUME (1 << 16)
#define LED_BLINK_ONESHOT (1 << 17)
@@ -48,9 +49,12 @@ struct led_classdev {
#define LED_BLINK_DISABLE (1 << 21)
#define LED_SYSFS_DISABLE (1 << 22)
#define LED_DEV_CAP_FLASH (1 << 23)
+#define LED_HW_PLUGGABLE (1 << 24)
- /* Set LED brightness level */
- /* Must not sleep, use a workqueue if needed */
+ /* Set LED brightness level
+ * Must not sleep. Use brightness_set_blocking for drivers
+ * that can sleep while setting brightness.
+ */
void (*brightness_set)(struct led_classdev *led_cdev,
enum led_brightness brightness);
/*
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 4dca42fd32f5..d026b190c530 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -261,7 +261,6 @@ struct held_lock {
/*
* Initialization, self-test and debugging-output methods:
*/
-extern void lockdep_init(void);
extern void lockdep_info(void);
extern void lockdep_reset(void);
extern void lockdep_reset_lock(struct lockdep_map *lock);
@@ -392,7 +391,6 @@ static inline void lockdep_on(void)
# define lockdep_set_current_reclaim_state(g) do { } while (0)
# define lockdep_clear_current_reclaim_state() do { } while (0)
# define lockdep_trace_alloc(g) do { } while (0)
-# define lockdep_init() do { } while (0)
# define lockdep_info() do { } while (0)
# define lockdep_init_map(lock, name, key, sub) \
do { (void)(name); (void)(key); } while (0)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 792c8981e633..f0c4bec6565b 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -28,6 +28,7 @@
#include <linux/eventfd.h>
#include <linux/mmzone.h>
#include <linux/writeback.h>
+#include <linux/page-flags.h>
struct mem_cgroup;
struct page;
@@ -89,6 +90,10 @@ enum mem_cgroup_events_target {
};
#ifdef CONFIG_MEMCG
+
+#define MEM_CGROUP_ID_SHIFT 16
+#define MEM_CGROUP_ID_MAX USHRT_MAX
+
struct mem_cgroup_stat_cpu {
long count[MEMCG_NR_STAT];
unsigned long events[MEMCG_NR_EVENTS];
@@ -265,6 +270,11 @@ struct mem_cgroup {
extern struct mem_cgroup *root_mem_cgroup;
+static inline bool mem_cgroup_disabled(void)
+{
+ return !cgroup_subsys_enabled(memory_cgrp_subsys);
+}
+
/**
* mem_cgroup_events - count memory events against a cgroup
* @memcg: the memory cgroup
@@ -291,7 +301,7 @@ void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
void mem_cgroup_uncharge(struct page *page);
void mem_cgroup_uncharge_list(struct list_head *page_list);
-void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage);
+void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
@@ -312,6 +322,28 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
struct mem_cgroup_reclaim_cookie *);
void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
+static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
+{
+ if (mem_cgroup_disabled())
+ return 0;
+
+ return memcg->css.id;
+}
+
+/**
+ * mem_cgroup_from_id - look up a memcg from an id
+ * @id: the id to look up
+ *
+ * Caller must hold rcu_read_lock() and use css_tryget() as necessary.
+ */
+static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
+{
+ struct cgroup_subsys_state *css;
+
+ css = css_from_id(id, &memory_cgrp_subsys);
+ return mem_cgroup_from_css(css);
+}
+
/**
* parent_mem_cgroup - find the accounting parent of a memcg
* @memcg: memcg whose parent to find
@@ -353,11 +385,6 @@ static inline bool mm_match_cgroup(struct mm_struct *mm,
struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
ino_t page_cgroup_ino(struct page *page);
-static inline bool mem_cgroup_disabled(void)
-{
- return !cgroup_subsys_enabled(memory_cgrp_subsys);
-}
-
static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
{
if (mem_cgroup_disabled())
@@ -429,36 +456,43 @@ bool mem_cgroup_oom_synchronize(bool wait);
extern int do_swap_account;
#endif
-struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page);
-void mem_cgroup_end_page_stat(struct mem_cgroup *memcg);
+void lock_page_memcg(struct page *page);
+void unlock_page_memcg(struct page *page);
/**
* mem_cgroup_update_page_stat - update page state statistics
- * @memcg: memcg to account against
+ * @page: the page
* @idx: page state item to account
* @val: number of pages (positive or negative)
*
- * See mem_cgroup_begin_page_stat() for locking requirements.
+ * The @page must be locked or the caller must use lock_page_memcg()
+ * to prevent double accounting when the page is concurrently being
+ * moved to another memcg:
+ *
+ * lock_page(page) or lock_page_memcg(page)
+ * if (TestClearPageState(page))
+ * mem_cgroup_update_page_stat(page, state, -1);
+ * unlock_page(page) or unlock_page_memcg(page)
*/
-static inline void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
+static inline void mem_cgroup_update_page_stat(struct page *page,
enum mem_cgroup_stat_index idx, int val)
{
- VM_BUG_ON(!rcu_read_lock_held());
+ VM_BUG_ON(!(rcu_read_lock_held() || PageLocked(page)));
- if (memcg)
- this_cpu_add(memcg->stat->count[idx], val);
+ if (page->mem_cgroup)
+ this_cpu_add(page->mem_cgroup->stat->count[idx], val);
}
-static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
+static inline void mem_cgroup_inc_page_stat(struct page *page,
enum mem_cgroup_stat_index idx)
{
- mem_cgroup_update_page_stat(memcg, idx, 1);
+ mem_cgroup_update_page_stat(page, idx, 1);
}
-static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
+static inline void mem_cgroup_dec_page_stat(struct page *page,
enum mem_cgroup_stat_index idx)
{
- mem_cgroup_update_page_stat(memcg, idx, -1);
+ mem_cgroup_update_page_stat(page, idx, -1);
}
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
@@ -496,8 +530,17 @@ void mem_cgroup_split_huge_fixup(struct page *head);
#endif
#else /* CONFIG_MEMCG */
+
+#define MEM_CGROUP_ID_SHIFT 0
+#define MEM_CGROUP_ID_MAX 0
+
struct mem_cgroup;
+static inline bool mem_cgroup_disabled(void)
+{
+ return true;
+}
+
static inline void mem_cgroup_events(struct mem_cgroup *memcg,
enum mem_cgroup_events_index idx,
unsigned int nr)
@@ -539,7 +582,7 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
{
}
-static inline void mem_cgroup_replace_page(struct page *old, struct page *new)
+static inline void mem_cgroup_migrate(struct page *old, struct page *new)
{
}
@@ -580,9 +623,16 @@ static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
{
}
-static inline bool mem_cgroup_disabled(void)
+static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
{
- return true;
+ return 0;
+}
+
+static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
+{
+ WARN_ON_ONCE(id);
+ /* XXX: This should always return root_mem_cgroup */
+ return NULL;
}
static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
@@ -613,12 +663,11 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{
}
-static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page)
+static inline void lock_page_memcg(struct page *page)
{
- return NULL;
}
-static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg)
+static inline void unlock_page_memcg(struct page *page)
{
}
@@ -644,12 +693,12 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
return false;
}
-static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
+static inline void mem_cgroup_inc_page_stat(struct page *page,
enum mem_cgroup_stat_index idx)
{
}
-static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
+static inline void mem_cgroup_dec_page_stat(struct page *page,
enum mem_cgroup_stat_index idx)
{
}
@@ -765,7 +814,7 @@ int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
void __memcg_kmem_uncharge(struct page *page, int order);
/*
- * helper for acessing a memcg's index. It will be used as an index in the
+ * helper for accessing a memcg's index. It will be used as an index in the
* child cache array in kmem_cache, and also to derive its name. This function
* will return -1 when this is not a kmem-limited memcg.
*/
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 8b8d8d12348e..82730adba950 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -109,6 +109,9 @@ extern void unregister_memory_notifier(struct notifier_block *nb);
extern int register_memory_isolate_notifier(struct notifier_block *nb);
extern void unregister_memory_isolate_notifier(struct notifier_block *nb);
extern int register_new_memory(int, struct mem_section *);
+extern int memory_block_change_state(struct memory_block *mem,
+ unsigned long to_state,
+ unsigned long from_state_req);
#ifdef CONFIG_MEMORY_HOTREMOVE
extern int unregister_memory_section(struct mem_section *);
#endif
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 43405992d027..adbef586e696 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -99,6 +99,8 @@ extern void __online_page_free(struct page *page);
extern int try_online_node(int nid);
+extern bool memhp_auto_online;
+
#ifdef CONFIG_MEMORY_HOTREMOVE
extern bool is_pageblock_removable_nolock(struct page *page);
extern int arch_remove_memory(u64 start, u64 size);
@@ -196,6 +198,9 @@ void put_online_mems(void);
void mem_hotplug_begin(void);
void mem_hotplug_done(void);
+extern void set_zone_contiguous(struct zone *zone);
+extern void clear_zone_contiguous(struct zone *zone);
+
#else /* ! CONFIG_MEMORY_HOTPLUG */
/*
* Stub functions for when hotplug is off
@@ -267,7 +272,7 @@ static inline void remove_memory(int nid, u64 start, u64 size) {}
extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
void *arg, int (*func)(struct memory_block *, void *));
extern int add_memory(int nid, u64 start, u64 size);
-extern int add_memory_resource(int nid, struct resource *resource);
+extern int add_memory_resource(int nid, struct resource *resource, bool online);
extern int zone_for_memory(int nid, u64 start, u64 size, int zone_default,
bool for_device);
extern int arch_add_memory(int nid, u64 start, u64 size, bool for_device);
diff --git a/include/linux/mfd/max77686-private.h b/include/linux/mfd/max77686-private.h
index f5043490d67c..643dae777b43 100644
--- a/include/linux/mfd/max77686-private.h
+++ b/include/linux/mfd/max77686-private.h
@@ -437,14 +437,11 @@ enum max77686_irq {
struct max77686_dev {
struct device *dev;
struct i2c_client *i2c; /* 0xcc / PMIC, Battery Control, and FLASH */
- struct i2c_client *rtc; /* slave addr 0x0c */
unsigned long type;
struct regmap *regmap; /* regmap for mfd */
- struct regmap *rtc_regmap; /* regmap for rtc */
struct regmap_irq_chip_data *irq_data;
- struct regmap_irq_chip_data *rtc_irq_data;
int irq;
struct mutex irqlock;
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index cac1c0904d5f..9b50325e4ddf 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -23,9 +23,13 @@ enum migrate_reason {
MR_SYSCALL, /* also applies to cpusets */
MR_MEMPOLICY_MBIND,
MR_NUMA_MISPLACED,
- MR_CMA
+ MR_CMA,
+ MR_TYPES
};
+/* In mm/debug.c; also keep sync with include/trace/events/migrate.h */
+extern char *migrate_reason_names[MR_TYPES];
+
#ifdef CONFIG_MIGRATION
extern void putback_movable_pages(struct list_head *l);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 516e14944339..dbf1eddab964 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -387,7 +387,8 @@ enum {
REGION_MIXED,
};
-int region_intersects(resource_size_t offset, size_t size, const char *type);
+int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
+ unsigned long desc);
/* Support for virtually mapped pages */
struct page *vmalloc_to_page(const void *addr);
@@ -904,20 +905,11 @@ static inline struct mem_cgroup *page_memcg(struct page *page)
{
return page->mem_cgroup;
}
-
-static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg)
-{
- page->mem_cgroup = memcg;
-}
#else
static inline struct mem_cgroup *page_memcg(struct page *page)
{
return NULL;
}
-
-static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg)
-{
-}
#endif
/*
@@ -1299,10 +1291,9 @@ int __set_page_dirty_nobuffers(struct page *page);
int __set_page_dirty_no_writeback(struct page *page);
int redirty_page_for_writepage(struct writeback_control *wbc,
struct page *page);
-void account_page_dirtied(struct page *page, struct address_space *mapping,
- struct mem_cgroup *memcg);
+void account_page_dirtied(struct page *page, struct address_space *mapping);
void account_page_cleaned(struct page *page, struct address_space *mapping,
- struct mem_cgroup *memcg, struct bdi_writeback *wb);
+ struct bdi_writeback *wb);
int set_page_dirty(struct page *page);
int set_page_dirty_lock(struct page *page);
void cancel_dirty_page(struct page *page);
@@ -2138,6 +2129,8 @@ int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
+int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn, pgprot_t pgprot);
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
pfn_t pfn);
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
@@ -2175,6 +2168,17 @@ extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
unsigned long size, pte_fn_t fn, void *data);
+#ifdef CONFIG_PAGE_POISONING
+extern bool page_poisoning_enabled(void);
+extern void kernel_poison_pages(struct page *page, int numpages, int enable);
+extern bool page_is_poisoned(struct page *page);
+#else
+static inline bool page_poisoning_enabled(void) { return false; }
+static inline void kernel_poison_pages(struct page *page, int numpages,
+ int enable) { }
+static inline bool page_is_poisoned(struct page *page) { return false; }
+#endif
+
#ifdef CONFIG_DEBUG_PAGEALLOC
extern bool _debug_pagealloc_enabled;
extern void __kernel_map_pages(struct page *page, int numpages, int enable);
@@ -2194,14 +2198,18 @@ kernel_map_pages(struct page *page, int numpages, int enable)
}
#ifdef CONFIG_HIBERNATION
extern bool kernel_page_present(struct page *page);
-#endif /* CONFIG_HIBERNATION */
-#else
+#endif /* CONFIG_HIBERNATION */
+#else /* CONFIG_DEBUG_PAGEALLOC */
static inline void
kernel_map_pages(struct page *page, int numpages, int enable) {}
#ifdef CONFIG_HIBERNATION
static inline bool kernel_page_present(struct page *page) { return true; }
-#endif /* CONFIG_HIBERNATION */
-#endif
+#endif /* CONFIG_HIBERNATION */
+static inline bool debug_pagealloc_enabled(void)
+{
+ return false;
+}
+#endif /* CONFIG_DEBUG_PAGEALLOC */
#ifdef __HAVE_ARCH_GATE_AREA
extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 624b78b848b8..944b2b37313b 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -566,10 +566,26 @@ static inline void clear_tlb_flush_pending(struct mm_struct *mm)
}
#endif
-struct vm_special_mapping
-{
- const char *name;
+struct vm_fault;
+
+struct vm_special_mapping {
+ const char *name; /* The name, e.g. "[vdso]". */
+
+ /*
+ * If .fault is not provided, this points to a
+ * NULL-terminated array of pages that back the special mapping.
+ *
+ * This must not be NULL unless .fault is provided.
+ */
struct page **pages;
+
+ /*
+ * If non-NULL, then this is called to resolve page faults
+ * on the special mapping. If used, .pages is not checked.
+ */
+ int (*fault)(const struct vm_special_mapping *sm,
+ struct vm_area_struct *vma,
+ struct vm_fault *vmf);
};
enum tlb_flush_reason {
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index 053824b0a412..de7be78c6f0e 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -9,8 +9,7 @@ struct vm_area_struct;
struct mm_struct;
extern void dump_page(struct page *page, const char *reason);
-extern void dump_page_badflags(struct page *page, const char *reason,
- unsigned long badflags);
+extern void __dump_page(struct page *page, const char *reason);
void dump_vma(const struct vm_area_struct *vma);
void dump_mm(const struct mm_struct *mm);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 7b6c2cfee390..6de02ac378a0 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -63,6 +63,9 @@ enum {
MIGRATE_TYPES
};
+/* In mm/page_alloc.c; keep in sync also with show_migration_types() there */
+extern char * const migratetype_names[MIGRATE_TYPES];
+
#ifdef CONFIG_CMA
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
#else
@@ -209,10 +212,12 @@ struct zone_reclaim_stat {
};
struct lruvec {
- struct list_head lists[NR_LRU_LISTS];
- struct zone_reclaim_stat reclaim_stat;
+ struct list_head lists[NR_LRU_LISTS];
+ struct zone_reclaim_stat reclaim_stat;
+ /* Evictions & activations on the inactive file list */
+ atomic_long_t inactive_age;
#ifdef CONFIG_MEMCG
- struct zone *zone;
+ struct zone *zone;
#endif
};
@@ -487,9 +492,6 @@ struct zone {
spinlock_t lru_lock;
struct lruvec lruvec;
- /* Evictions & activations on the inactive file list */
- atomic_long_t inactive_age;
-
/*
* When free pages are below this point, additional steps are taken
* when reading the number of free pages to avoid per-cpu counter
@@ -520,6 +522,8 @@ struct zone {
bool compact_blockskip_flush;
#endif
+ bool contiguous;
+
ZONE_PADDING(_pad3_)
/* Zone statistics */
atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
@@ -758,6 +762,8 @@ static inline struct zone *lruvec_zone(struct lruvec *lruvec)
#endif
}
+extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru);
+
#ifdef CONFIG_HAVE_MEMORY_PRESENT
void memory_present(int nid, unsigned long start, unsigned long end);
#else
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index d14a4c362465..4149868de4e6 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -47,6 +47,8 @@
* runtime initialization.
*/
+struct notifier_block;
+
typedef int (*notifier_fn_t)(struct notifier_block *nb,
unsigned long action, void *data);
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index 17f118a82854..e1fe7cf5bddf 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -45,6 +45,7 @@ struct page_ext {
unsigned int order;
gfp_t gfp_mask;
unsigned int nr_entries;
+ int last_migrate_reason;
unsigned long trace_entries[8];
#endif
};
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
index cacaabea8a09..46f1b939948c 100644
--- a/include/linux/page_owner.h
+++ b/include/linux/page_owner.h
@@ -1,38 +1,54 @@
#ifndef __LINUX_PAGE_OWNER_H
#define __LINUX_PAGE_OWNER_H
+#include <linux/jump_label.h>
+
#ifdef CONFIG_PAGE_OWNER
-extern bool page_owner_inited;
+extern struct static_key_false page_owner_inited;
extern struct page_ext_operations page_owner_ops;
extern void __reset_page_owner(struct page *page, unsigned int order);
extern void __set_page_owner(struct page *page,
unsigned int order, gfp_t gfp_mask);
extern gfp_t __get_page_owner_gfp(struct page *page);
+extern void __copy_page_owner(struct page *oldpage, struct page *newpage);
+extern void __set_page_owner_migrate_reason(struct page *page, int reason);
+extern void __dump_page_owner(struct page *page);
static inline void reset_page_owner(struct page *page, unsigned int order)
{
- if (likely(!page_owner_inited))
- return;
-
- __reset_page_owner(page, order);
+ if (static_branch_unlikely(&page_owner_inited))
+ __reset_page_owner(page, order);
}
static inline void set_page_owner(struct page *page,
unsigned int order, gfp_t gfp_mask)
{
- if (likely(!page_owner_inited))
- return;
-
- __set_page_owner(page, order, gfp_mask);
+ if (static_branch_unlikely(&page_owner_inited))
+ __set_page_owner(page, order, gfp_mask);
}
static inline gfp_t get_page_owner_gfp(struct page *page)
{
- if (likely(!page_owner_inited))
+ if (static_branch_unlikely(&page_owner_inited))
+ return __get_page_owner_gfp(page);
+ else
return 0;
-
- return __get_page_owner_gfp(page);
+}
+static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
+{
+ if (static_branch_unlikely(&page_owner_inited))
+ __copy_page_owner(oldpage, newpage);
+}
+static inline void set_page_owner_migrate_reason(struct page *page, int reason)
+{
+ if (static_branch_unlikely(&page_owner_inited))
+ __set_page_owner_migrate_reason(page, reason);
+}
+static inline void dump_page_owner(struct page *page)
+{
+ if (static_branch_unlikely(&page_owner_inited))
+ __dump_page_owner(page);
}
#else
static inline void reset_page_owner(struct page *page, unsigned int order)
@@ -46,6 +62,14 @@ static inline gfp_t get_page_owner_gfp(struct page *page)
{
return 0;
}
-
+static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
+{
+}
+static inline void set_page_owner_migrate_reason(struct page *page, int reason)
+{
+}
+static inline void dump_page_owner(struct page *page)
+{
+}
#endif /* CONFIG_PAGE_OWNER */
#endif /* __LINUX_PAGE_OWNER_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 92395a0a7dc5..183b15ea052b 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -663,8 +663,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
extern void delete_from_page_cache(struct page *page);
-extern void __delete_from_page_cache(struct page *page, void *shadow,
- struct mem_cgroup *memcg);
+extern void __delete_from_page_cache(struct page *page, void *shadow);
int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
/*
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index f5c5a3fa2c81..79ec7bbf0155 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -468,6 +468,7 @@ struct perf_event {
int group_flags;
struct perf_event *group_leader;
struct pmu *pmu;
+ void *pmu_private;
enum perf_event_active_state state;
unsigned int attach_state;
@@ -1109,12 +1110,6 @@ static inline void perf_event_task_tick(void) { }
static inline int perf_event_release_kernel(struct perf_event *event) { return 0; }
#endif
-#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_NO_HZ_FULL)
-extern bool perf_event_can_stop_tick(void);
-#else
-static inline bool perf_event_can_stop_tick(void) { return true; }
-#endif
-
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
extern void perf_restore_debug_store(void);
#else
diff --git a/include/linux/platform_data/ntc_thermistor.h b/include/linux/platform_data/ntc_thermistor.h
index aed170588b74..698d0d59db76 100644
--- a/include/linux/platform_data/ntc_thermistor.h
+++ b/include/linux/platform_data/ntc_thermistor.h
@@ -28,6 +28,7 @@ enum ntc_thermistor_type {
TYPE_NCPXXWL333,
TYPE_B57330V2103,
TYPE_NCPXXWF104,
+ TYPE_NCPXXXH103,
};
struct ntc_thermistor_platform_data {
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index db21d3995f7e..49cd8890b873 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -19,6 +19,8 @@
/* Defines used for the flags field in the struct generic_pm_domain */
#define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */
+#define GENPD_MAX_NUM_STATES 8 /* Number of possible low power states */
+
enum gpd_status {
GPD_STATE_ACTIVE = 0, /* PM domain is active */
GPD_STATE_POWER_OFF, /* PM domain is off */
@@ -37,6 +39,11 @@ struct gpd_dev_ops {
bool (*active_wakeup)(struct device *dev);
};
+struct genpd_power_state {
+ s64 power_off_latency_ns;
+ s64 power_on_latency_ns;
+};
+
struct generic_pm_domain {
struct dev_pm_domain domain; /* PM domain operations */
struct list_head gpd_list_node; /* Node in the global PM domains list */
@@ -54,9 +61,7 @@ struct generic_pm_domain {
unsigned int prepared_count; /* Suspend counter of prepared devices */
bool suspend_power_off; /* Power status before system suspend */
int (*power_off)(struct generic_pm_domain *domain);
- s64 power_off_latency_ns;
int (*power_on)(struct generic_pm_domain *domain);
- s64 power_on_latency_ns;
struct gpd_dev_ops dev_ops;
s64 max_off_time_ns; /* Maximum allowed "suspended" time. */
bool max_off_time_changed;
@@ -66,6 +71,10 @@ struct generic_pm_domain {
void (*detach_dev)(struct generic_pm_domain *domain,
struct device *dev);
unsigned int flags; /* Bit field of configs for genpd */
+ struct genpd_power_state states[GENPD_MAX_NUM_STATES];
+ unsigned int state_count; /* number of states */
+ unsigned int state_idx; /* state that genpd will go to when off */
+
};
static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 95403d2ccaf5..cccaf4a29e9f 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -34,6 +34,8 @@ bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp);
int dev_pm_opp_get_opp_count(struct device *dev);
unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev);
+unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev);
+unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev);
struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev);
struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
@@ -60,6 +62,9 @@ int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
void dev_pm_opp_put_supported_hw(struct device *dev);
int dev_pm_opp_set_prop_name(struct device *dev, const char *name);
void dev_pm_opp_put_prop_name(struct device *dev);
+int dev_pm_opp_set_regulator(struct device *dev, const char *name);
+void dev_pm_opp_put_regulator(struct device *dev);
+int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
#else
static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
{
@@ -86,6 +91,16 @@ static inline unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
return 0;
}
+static inline unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
+{
+ return 0;
+}
+
+static inline unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
+{
+ return 0;
+}
+
static inline struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
{
return NULL;
@@ -151,6 +166,18 @@ static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
static inline void dev_pm_opp_put_prop_name(struct device *dev) {}
+static inline int dev_pm_opp_set_regulator(struct device *dev, const char *name)
+{
+ return -EINVAL;
+}
+
+static inline void dev_pm_opp_put_regulator(struct device *dev) {}
+
+static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
+{
+ return -EINVAL;
+}
+
#endif /* CONFIG_PM_OPP */
#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 4a27153574e2..51334edec506 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -30,7 +30,11 @@
#define TIMER_ENTRY_STATIC ((void *) 0x300 + POISON_POINTER_DELTA)
/********** mm/debug-pagealloc.c **********/
+#ifdef CONFIG_PAGE_POISONING_ZERO
+#define PAGE_POISON 0x00
+#else
#define PAGE_POISON 0xaa
+#endif
/********** mm/page_alloc.c ************/
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index 907f3fd191ac..62d44c176071 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -128,9 +128,6 @@ void posix_cpu_timer_schedule(struct k_itimer *timer);
void run_posix_cpu_timers(struct task_struct *task);
void posix_cpu_timers_exit(struct task_struct *task);
void posix_cpu_timers_exit_group(struct task_struct *task);
-
-bool posix_cpu_timers_can_stop_tick(struct task_struct *tsk);
-
void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx,
cputime_t *newval, cputime_t *oldval);
diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h
index 54bf1484d41f..35ac903956c7 100644
--- a/include/linux/pps_kernel.h
+++ b/include/linux/pps_kernel.h
@@ -111,22 +111,17 @@ static inline void timespec_to_pps_ktime(struct pps_ktime *kt,
kt->nsec = ts.tv_nsec;
}
-#ifdef CONFIG_NTP_PPS
-
static inline void pps_get_ts(struct pps_event_time *ts)
{
- ktime_get_raw_and_real_ts64(&ts->ts_raw, &ts->ts_real);
-}
+ struct system_time_snapshot snap;
-#else /* CONFIG_NTP_PPS */
-
-static inline void pps_get_ts(struct pps_event_time *ts)
-{
- ktime_get_real_ts64(&ts->ts_real);
+ ktime_get_snapshot(&snap);
+ ts->ts_real = ktime_to_timespec64(snap.real);
+#ifdef CONFIG_NTP_PPS
+ ts->ts_raw = ktime_to_timespec64(snap.raw);
+#endif
}
-#endif /* CONFIG_NTP_PPS */
-
/* Subtract known time delay from PPS event time(s) */
static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec64 delta)
{
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index b8b73066d137..6b15e168148a 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -38,6 +38,7 @@ struct ptp_clock_request {
};
};
+struct system_device_crosststamp;
/**
* struct ptp_clock_info - decribes a PTP hardware clock
*
@@ -67,6 +68,11 @@ struct ptp_clock_request {
* @gettime64: Reads the current time from the hardware clock.
* parameter ts: Holds the result.
*
+ * @getcrosststamp: Reads the current time from the hardware clock and
+ * system clock simultaneously.
+ * parameter cts: Contains timestamp (device,system) pair,
+ * where system time is realtime and monotonic.
+ *
* @settime64: Set the current time on the hardware clock.
* parameter ts: Time value to set.
*
@@ -105,6 +111,8 @@ struct ptp_clock_info {
int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta);
int (*adjtime)(struct ptp_clock_info *ptp, s64 delta);
int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts);
+ int (*getcrosststamp)(struct ptp_clock_info *ptp,
+ struct system_device_crosststamp *cts);
int (*settime64)(struct ptp_clock_info *p, const struct timespec64 *ts);
int (*enable)(struct ptp_clock_info *ptp,
struct ptp_clock_request *request, int on);
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index c2f2574ff61c..2a097d176ba9 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -197,6 +197,7 @@ enum pxa_ssp_type {
QUARK_X1000_SSP,
LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */
LPSS_BYT_SSP,
+ LPSS_BSW_SSP,
LPSS_SPT_SSP,
LPSS_BXT_SSP,
};
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 14e6f47ee16f..2657aff2725b 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -332,9 +332,7 @@ void rcu_init(void);
void rcu_sched_qs(void);
void rcu_bh_qs(void);
void rcu_check_callbacks(int user);
-struct notifier_block;
-int rcu_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu);
+void rcu_report_dead(unsigned int cpu);
#ifndef CONFIG_TINY_RCU
void rcu_end_inkernel_boot(void);
@@ -360,8 +358,6 @@ void rcu_user_exit(void);
#else
static inline void rcu_user_enter(void) { }
static inline void rcu_user_exit(void) { }
-static inline void rcu_user_hooks_switch(struct task_struct *prev,
- struct task_struct *next) { }
#endif /* CONFIG_NO_HZ_FULL */
#ifdef CONFIG_RCU_NOCB_CPU
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 18394343f489..3dc08ce15426 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -65,6 +65,36 @@ struct reg_sequence {
unsigned int delay_us;
};
+#define regmap_update_bits(map, reg, mask, val) \
+ regmap_update_bits_base(map, reg, mask, val, NULL, false, false)
+#define regmap_update_bits_async(map, reg, mask, val)\
+ regmap_update_bits_base(map, reg, mask, val, NULL, true, false)
+#define regmap_update_bits_check(map, reg, mask, val, change)\
+ regmap_update_bits_base(map, reg, mask, val, change, false, false)
+#define regmap_update_bits_check_async(map, reg, mask, val, change)\
+ regmap_update_bits_base(map, reg, mask, val, change, true, false)
+
+#define regmap_write_bits(map, reg, mask, val) \
+ regmap_update_bits_base(map, reg, mask, val, NULL, false, true)
+
+#define regmap_field_write(field, val) \
+ regmap_field_update_bits_base(field, ~0, val, NULL, false, false)
+#define regmap_field_force_write(field, val) \
+ regmap_field_update_bits_base(field, ~0, val, NULL, false, true)
+#define regmap_field_update_bits(field, mask, val)\
+ regmap_field_update_bits_base(field, mask, val, NULL, false, false)
+#define regmap_field_force_update_bits(field, mask, val) \
+ regmap_field_update_bits_base(field, mask, val, NULL, false, true)
+
+#define regmap_fields_write(field, id, val) \
+ regmap_fields_update_bits_base(field, id, ~0, val, NULL, false, false)
+#define regmap_fields_force_write(field, id, val) \
+ regmap_fields_update_bits_base(field, id, ~0, val, NULL, false, true)
+#define regmap_fields_update_bits(field, id, mask, val)\
+ regmap_fields_update_bits_base(field, id, mask, val, NULL, false, false)
+#define regmap_fields_force_update_bits(field, id, mask, val) \
+ regmap_fields_update_bits_base(field, id, mask, val, NULL, false, true)
+
#ifdef CONFIG_REGMAP
enum regmap_endian {
@@ -162,7 +192,7 @@ typedef void (*regmap_unlock)(void *);
* This field is a duplicate of a similar file in
* 'struct regmap_bus' and serves exact same purpose.
* Use it only for "no-bus" cases.
- * @max_register: Optional, specifies the maximum valid register index.
+ * @max_register: Optional, specifies the maximum valid register address.
* @wr_table: Optional, points to a struct regmap_access_table specifying
* valid ranges for write access.
* @rd_table: As above, for read access.
@@ -691,18 +721,9 @@ int regmap_raw_read(struct regmap *map, unsigned int reg,
void *val, size_t val_len);
int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
size_t val_count);
-int regmap_update_bits(struct regmap *map, unsigned int reg,
- unsigned int mask, unsigned int val);
-int regmap_write_bits(struct regmap *map, unsigned int reg,
- unsigned int mask, unsigned int val);
-int regmap_update_bits_async(struct regmap *map, unsigned int reg,
- unsigned int mask, unsigned int val);
-int regmap_update_bits_check(struct regmap *map, unsigned int reg,
- unsigned int mask, unsigned int val,
- bool *change);
-int regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
- unsigned int mask, unsigned int val,
- bool *change);
+int regmap_update_bits_base(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change, bool async, bool force);
int regmap_get_val_bytes(struct regmap *map);
int regmap_get_max_register(struct regmap *map);
int regmap_get_reg_stride(struct regmap *map);
@@ -770,18 +791,14 @@ struct regmap_field *devm_regmap_field_alloc(struct device *dev,
void devm_regmap_field_free(struct device *dev, struct regmap_field *field);
int regmap_field_read(struct regmap_field *field, unsigned int *val);
-int regmap_field_write(struct regmap_field *field, unsigned int val);
-int regmap_field_update_bits(struct regmap_field *field,
- unsigned int mask, unsigned int val);
-
-int regmap_fields_write(struct regmap_field *field, unsigned int id,
- unsigned int val);
-int regmap_fields_force_write(struct regmap_field *field, unsigned int id,
- unsigned int val);
+int regmap_field_update_bits_base(struct regmap_field *field,
+ unsigned int mask, unsigned int val,
+ bool *change, bool async, bool force);
int regmap_fields_read(struct regmap_field *field, unsigned int id,
unsigned int *val);
-int regmap_fields_update_bits(struct regmap_field *field, unsigned int id,
- unsigned int mask, unsigned int val);
+int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
+ unsigned int mask, unsigned int val,
+ bool *change, bool async, bool force);
/**
* Description of an IRQ for the generic regmap irq_chip.
@@ -868,6 +885,14 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
int irq_base, const struct regmap_irq_chip *chip,
struct regmap_irq_chip_data **data);
void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *data);
+
+int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
+ int irq_flags, int irq_base,
+ const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data);
+void devm_regmap_del_irq_chip(struct device *dev, int irq,
+ struct regmap_irq_chip_data *data);
+
int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data);
int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq);
struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data);
@@ -937,42 +962,26 @@ static inline int regmap_bulk_read(struct regmap *map, unsigned int reg,
return -EINVAL;
}
-static inline int regmap_update_bits(struct regmap *map, unsigned int reg,
- unsigned int mask, unsigned int val)
-{
- WARN_ONCE(1, "regmap API is disabled");
- return -EINVAL;
-}
-
-static inline int regmap_write_bits(struct regmap *map, unsigned int reg,
- unsigned int mask, unsigned int val)
-{
- WARN_ONCE(1, "regmap API is disabled");
- return -EINVAL;
-}
-
-static inline int regmap_update_bits_async(struct regmap *map,
- unsigned int reg,
- unsigned int mask, unsigned int val)
+static inline int regmap_update_bits_base(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change, bool async, bool force)
{
WARN_ONCE(1, "regmap API is disabled");
return -EINVAL;
}
-static inline int regmap_update_bits_check(struct regmap *map,
- unsigned int reg,
- unsigned int mask, unsigned int val,
- bool *change)
+static inline int regmap_field_update_bits_base(struct regmap_field *field,
+ unsigned int mask, unsigned int val,
+ bool *change, bool async, bool force)
{
WARN_ONCE(1, "regmap API is disabled");
return -EINVAL;
}
-static inline int regmap_update_bits_check_async(struct regmap *map,
- unsigned int reg,
- unsigned int mask,
- unsigned int val,
- bool *change)
+static inline int regmap_fields_update_bits_base(struct regmap_field *field,
+ unsigned int id,
+ unsigned int mask, unsigned int val,
+ bool *change, bool async, bool force)
{
WARN_ONCE(1, "regmap API is disabled");
return -EINVAL;
diff --git a/include/linux/regulator/act8865.h b/include/linux/regulator/act8865.h
index 15fa8f2d35c9..2eb386017fa5 100644
--- a/include/linux/regulator/act8865.h
+++ b/include/linux/regulator/act8865.h
@@ -68,12 +68,12 @@ enum {
* act8865_regulator_data - regulator data
* @id: regulator id
* @name: regulator name
- * @platform_data: regulator init data
+ * @init_data: regulator init data
*/
struct act8865_regulator_data {
int id;
const char *name;
- struct regulator_init_data *platform_data;
+ struct regulator_init_data *init_data;
};
/**
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 16ac9e108806..cd271e89a7e6 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -93,6 +93,8 @@ struct regulator_linear_range {
* @get_current_limit: Get the configured limit for a current-limited regulator.
* @set_input_current_limit: Configure an input limit.
*
+ * @set_active_discharge: Set active discharge enable/disable of regulators.
+ *
* @set_mode: Set the configured operating mode for the regulator.
* @get_mode: Get the configured operating mode for the regulator.
* @get_status: Return actual (not as-configured) status of regulator, as a
@@ -149,6 +151,7 @@ struct regulator_ops {
int (*set_input_current_limit) (struct regulator_dev *, int lim_uA);
int (*set_over_current_protection) (struct regulator_dev *);
+ int (*set_active_discharge) (struct regulator_dev *, bool enable);
/* enable/disable regulator */
int (*enable) (struct regulator_dev *);
@@ -266,6 +269,14 @@ enum regulator_type {
* @bypass_mask: Mask for control when using regmap set_bypass
* @bypass_val_on: Enabling value for control when using regmap set_bypass
* @bypass_val_off: Disabling value for control when using regmap set_bypass
+ * @active_discharge_off: Enabling value for control when using regmap
+ * set_active_discharge
+ * @active_discharge_on: Disabling value for control when using regmap
+ * set_active_discharge
+ * @active_discharge_mask: Mask for control when using regmap
+ * set_active_discharge
+ * @active_discharge_reg: Register for control when using regmap
+ * set_active_discharge
*
* @enable_time: Time taken for initial enable of regulator (in uS).
* @off_on_delay: guard time (in uS), before re-enabling a regulator
@@ -315,6 +326,10 @@ struct regulator_desc {
unsigned int bypass_mask;
unsigned int bypass_val_on;
unsigned int bypass_val_off;
+ unsigned int active_discharge_on;
+ unsigned int active_discharge_off;
+ unsigned int active_discharge_mask;
+ unsigned int active_discharge_reg;
unsigned int enable_time;
@@ -447,6 +462,8 @@ int regulator_set_voltage_time_sel(struct regulator_dev *rdev,
int regulator_set_bypass_regmap(struct regulator_dev *rdev, bool enable);
int regulator_get_bypass_regmap(struct regulator_dev *rdev, bool *enable);
+int regulator_set_active_discharge_regmap(struct regulator_dev *rdev,
+ bool enable);
void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data);
#endif
diff --git a/include/linux/regulator/lp872x.h b/include/linux/regulator/lp872x.h
index 132e05c46661..6029279f4eed 100644
--- a/include/linux/regulator/lp872x.h
+++ b/include/linux/regulator/lp872x.h
@@ -18,6 +18,9 @@
#define LP872X_MAX_REGULATORS 9
+#define LP8720_ENABLE_DELAY 200
+#define LP8725_ENABLE_DELAY 30000
+
enum lp872x_regulator_id {
LP8720_ID_BASE,
LP8720_ID_LDO1 = LP8720_ID_BASE,
@@ -79,12 +82,14 @@ struct lp872x_regulator_data {
* @update_config : if LP872X_GENERAL_CFG register is updated, set true
* @regulator_data : platform regulator id and init data
* @dvs : dvs data for buck voltage control
+ * @enable_gpio : gpio pin number for enable control
*/
struct lp872x_platform_data {
u8 general_config;
bool update_config;
struct lp872x_regulator_data regulator_data[LP872X_MAX_REGULATORS];
struct lp872x_dvs *dvs;
+ int enable_gpio;
};
#endif
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index a1067d0b3991..5d627c83a630 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -42,6 +42,13 @@ struct regulator;
#define REGULATOR_CHANGE_DRMS 0x10
#define REGULATOR_CHANGE_BYPASS 0x20
+/* Regulator active discharge flags */
+enum regulator_active_discharge {
+ REGULATOR_ACTIVE_DISCHARGE_DEFAULT,
+ REGULATOR_ACTIVE_DISCHARGE_DISABLE,
+ REGULATOR_ACTIVE_DISCHARGE_ENABLE,
+};
+
/**
* struct regulator_state - regulator state during low power system states
*
@@ -100,6 +107,9 @@ struct regulator_state {
* @initial_state: Suspend state to set by default.
* @initial_mode: Mode to set at startup.
* @ramp_delay: Time to settle down after voltage change (unit: uV/us)
+ * @active_discharge: Enable/disable active discharge. The enum
+ * regulator_active_discharge values are used for
+ * initialisation.
* @enable_time: Turn-on time of the rails (unit: microseconds)
*/
struct regulation_constraints {
@@ -140,6 +150,8 @@ struct regulation_constraints {
unsigned int ramp_delay;
unsigned int enable_time;
+ unsigned int active_discharge;
+
/* constraint flags */
unsigned always_on:1; /* regulator never off when system is on */
unsigned boot_on:1; /* bootloader/firmware enabled regulator */
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 3359f0422c6b..b693adac853b 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -89,6 +89,8 @@ struct rtc_class_ops {
int (*set_mmss)(struct device *, unsigned long secs);
int (*read_callback)(struct device *, int data);
int (*alarm_irq_enable)(struct device *, unsigned int enabled);
+ int (*read_offset)(struct device *, long *offset);
+ int (*set_offset)(struct device *, long offset);
};
#define RTC_DEVICE_NAME_SIZE 20
@@ -208,6 +210,8 @@ void rtc_timer_init(struct rtc_timer *timer, void (*f)(void *p), void *data);
int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer *timer,
ktime_t expires, ktime_t period);
void rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer *timer);
+int rtc_read_offset(struct rtc_device *rtc, long *offset);
+int rtc_set_offset(struct rtc_device *rtc, long offset);
void rtc_timer_do_work(struct work_struct *work);
static inline bool is_leap_year(unsigned int year)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a10494a94cc3..eb7f2f84009b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -182,8 +182,6 @@ extern void update_cpu_load_nohz(int active);
static inline void update_cpu_load_nohz(int active) { }
#endif
-extern unsigned long get_parent_ip(unsigned long addr);
-
extern void dump_cpu_task(int cpu);
struct seq_file;
@@ -719,6 +717,10 @@ struct signal_struct {
/* Earliest-expiration cache. */
struct task_cputime cputime_expires;
+#ifdef CONFIG_NO_HZ_FULL
+ unsigned long tick_dep_mask;
+#endif
+
struct list_head cpu_timers[3];
struct pid *tty_old_pgrp;
@@ -920,6 +922,10 @@ static inline int sched_info_on(void)
#endif
}
+#ifdef CONFIG_SCHEDSTATS
+void force_schedstat_enabled(void);
+#endif
+
enum cpu_idle_type {
CPU_IDLE,
CPU_NOT_IDLE,
@@ -1289,6 +1295,8 @@ struct sched_rt_entity {
unsigned long timeout;
unsigned long watchdog_stamp;
unsigned int time_slice;
+ unsigned short on_rq;
+ unsigned short on_list;
struct sched_rt_entity *back;
#ifdef CONFIG_RT_GROUP_SCHED
@@ -1329,10 +1337,6 @@ struct sched_dl_entity {
* task has to wait for a replenishment to be performed at the
* next firing of dl_timer.
*
- * @dl_new tells if a new instance arrived. If so we must
- * start executing it with full runtime and reset its absolute
- * deadline;
- *
* @dl_boosted tells if we are boosted due to DI. If so we are
* outside bandwidth enforcement mechanism (but only until we
* exit the critical section);
@@ -1340,7 +1344,7 @@ struct sched_dl_entity {
* @dl_yielded tells if task gave up the cpu before consuming
* all its available runtime during the last job.
*/
- int dl_throttled, dl_new, dl_boosted, dl_yielded;
+ int dl_throttled, dl_boosted, dl_yielded;
/*
* Bandwidth enforcement timer. Each -deadline task has its
@@ -1542,6 +1546,10 @@ struct task_struct {
VTIME_SYS,
} vtime_snap_whence;
#endif
+
+#ifdef CONFIG_NO_HZ_FULL
+ unsigned long tick_dep_mask;
+#endif
unsigned long nvcsw, nivcsw; /* context switch counts */
u64 start_time; /* monotonic time in nsec */
u64 real_start_time; /* boot based time in nsec */
@@ -2356,10 +2364,7 @@ static inline void wake_up_nohz_cpu(int cpu) { }
#endif
#ifdef CONFIG_NO_HZ_FULL
-extern bool sched_can_stop_tick(void);
extern u64 scheduler_tick_max_deferment(void);
-#else
-static inline bool sched_can_stop_tick(void) { return false; }
#endif
#ifdef CONFIG_SCHED_AUTOGROUP
@@ -3207,4 +3212,13 @@ static inline unsigned long rlimit_max(unsigned int limit)
return task_rlimit_max(current, limit);
}
+#ifdef CONFIG_CPU_FREQ
+struct update_util_data {
+ void (*func)(struct update_util_data *data,
+ u64 time, unsigned long util, unsigned long max);
+};
+
+void cpufreq_set_update_util_data(int cpu, struct update_util_data *data);
+#endif /* CONFIG_CPU_FREQ */
+
#endif
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index c9e4731cf10b..4f080ab4f2cd 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -95,4 +95,8 @@ extern int sysctl_numa_balancing(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
+extern int sysctl_schedstats(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+
#endif /* _SCHED_SYSCTL_H */
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 3627d5c1bc47..e4b568738ca3 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -20,7 +20,7 @@
* Flags to pass to kmem_cache_create().
* The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
*/
-#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
+#define SLAB_CONSISTENCY_CHECKS 0x00000100UL /* DEBUG: Perform (expensive) checks on alloc/free */
#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
@@ -314,7 +314,7 @@ void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment
void kmem_cache_free(struct kmem_cache *, void *);
/*
- * Bulk allocation and freeing operations. These are accellerated in an
+ * Bulk allocation and freeing operations. These are accelerated in an
* allocator specific way to avoid taking locks repeatedly or building
* metadata structures unnecessarily.
*
@@ -323,6 +323,15 @@ void kmem_cache_free(struct kmem_cache *, void *);
void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
+/*
+ * Caller must not use kfree_bulk() on memory not originally allocated
+ * by kmalloc(), because the SLOB allocator cannot handle this.
+ */
+static __always_inline void kfree_bulk(size_t size, void **p)
+{
+ kmem_cache_free_bulk(NULL, size, p);
+}
+
#ifdef CONFIG_NUMA
void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment;
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment;
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index cf139d3fa513..e878ba35ae91 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -60,6 +60,9 @@ struct kmem_cache {
atomic_t allocmiss;
atomic_t freehit;
atomic_t freemiss;
+#ifdef CONFIG_DEBUG_SLAB_LEAK
+ atomic_t store_user_clean;
+#endif
/*
* If debugging is enabled, then the allocator can add additional
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index b7e57927f521..ac5143f95ee6 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -81,6 +81,7 @@ struct kmem_cache {
int reserved; /* Reserved bytes at the end of slabs */
const char *name; /* Name (only for display!) */
struct list_head list; /* List of slab caches */
+ int red_left_pad; /* Left redzone padding size */
#ifdef CONFIG_SYSFS
struct kobject kobj; /* For sysfs */
#endif
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 53be3a4c60cb..857a9a1d82b5 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -25,6 +25,7 @@
struct dma_chan;
struct spi_master;
struct spi_transfer;
+struct spi_flash_read_message;
/*
* INTERFACES between SPI master-side drivers and SPI infrastructure.
@@ -53,6 +54,10 @@ extern struct bus_type spi_bus_type;
*
* @transfer_bytes_histo:
* transfer bytes histogramm
+ *
+ * @transfers_split_maxsize:
+ * number of transfers that have been split because of
+ * maxsize limit
*/
struct spi_statistics {
spinlock_t lock; /* lock for the whole structure */
@@ -72,6 +77,8 @@ struct spi_statistics {
#define SPI_STATISTICS_HISTO_SIZE 17
unsigned long transfer_bytes_histo[SPI_STATISTICS_HISTO_SIZE];
+
+ unsigned long transfers_split_maxsize;
};
void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
@@ -303,6 +310,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @min_speed_hz: Lowest supported transfer speed
* @max_speed_hz: Highest supported transfer speed
* @flags: other constraints relevant to this driver
+ * @max_transfer_size: function that returns the max transfer size for
+ * a &spi_device; may be %NULL, so the default %SIZE_MAX will be used.
* @bus_lock_spinlock: spinlock for SPI bus locking
* @bus_lock_mutex: mutex for SPI bus locking
* @bus_lock_flag: indicates that the SPI bus is locked for exclusive use
@@ -361,6 +370,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @handle_err: the subsystem calls the driver to handle an error that occurs
* in the generic implementation of transfer_one_message().
* @unprepare_message: undo any work done by prepare_message().
+ * @spi_flash_read: to support spi-controller hardwares that provide
+ * accelerated interface to read from flash devices.
* @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
* number. Any individual value may be -ENOENT for CS lines that
* are not GPIOs (driven by the SPI controller itself).
@@ -369,6 +380,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @dma_rx: DMA receive channel
* @dummy_rx: dummy receive buffer for full-duplex devices
* @dummy_tx: dummy transmit buffer for full-duplex devices
+ * @fw_translate_cs: If the boot firmware uses different numbering scheme
+ * what Linux expects, this optional hook can be used to translate
+ * between the two.
*
* Each SPI master controller can communicate with one or more @spi_device
* children. These make a small bus, sharing MOSI, MISO and SCK signals
@@ -513,6 +527,8 @@ struct spi_master {
struct spi_message *message);
int (*unprepare_message)(struct spi_master *master,
struct spi_message *message);
+ int (*spi_flash_read)(struct spi_device *spi,
+ struct spi_flash_read_message *msg);
/*
* These hooks are for drivers that use a generic implementation
@@ -537,6 +553,8 @@ struct spi_master {
/* dummy data for full duplex devices */
void *dummy_rx;
void *dummy_tx;
+
+ int (*fw_translate_cs)(struct spi_master *master, unsigned cs);
};
static inline void *spi_master_get_devdata(struct spi_master *master)
@@ -582,6 +600,38 @@ extern void spi_unregister_master(struct spi_master *master);
extern struct spi_master *spi_busnum_to_master(u16 busnum);
+/*
+ * SPI resource management while processing a SPI message
+ */
+
+typedef void (*spi_res_release_t)(struct spi_master *master,
+ struct spi_message *msg,
+ void *res);
+
+/**
+ * struct spi_res - spi resource management structure
+ * @entry: list entry
+ * @release: release code called prior to freeing this resource
+ * @data: extra data allocated for the specific use-case
+ *
+ * this is based on ideas from devres, but focused on life-cycle
+ * management during spi_message processing
+ */
+struct spi_res {
+ struct list_head entry;
+ spi_res_release_t release;
+ unsigned long long data[]; /* guarantee ull alignment */
+};
+
+extern void *spi_res_alloc(struct spi_device *spi,
+ spi_res_release_t release,
+ size_t size, gfp_t gfp);
+extern void spi_res_add(struct spi_message *message, void *res);
+extern void spi_res_free(void *res);
+
+extern void spi_res_release(struct spi_master *master,
+ struct spi_message *message);
+
/*---------------------------------------------------------------------------*/
/*
@@ -720,6 +770,7 @@ struct spi_transfer {
* @status: zero for success, else negative errno
* @queue: for use by whichever driver currently owns the message
* @state: for use by whichever driver currently owns the message
+ * @resources: for resource management when the spi message is processed
*
* A @spi_message is used to execute an atomic sequence of data transfers,
* each represented by a struct spi_transfer. The sequence is "atomic"
@@ -766,11 +817,15 @@ struct spi_message {
*/
struct list_head queue;
void *state;
+
+ /* list of spi_res reources when the spi message is processed */
+ struct list_head resources;
};
static inline void spi_message_init_no_memset(struct spi_message *m)
{
INIT_LIST_HEAD(&m->transfers);
+ INIT_LIST_HEAD(&m->resources);
}
static inline void spi_message_init(struct spi_message *m)
@@ -854,6 +909,60 @@ spi_max_transfer_size(struct spi_device *spi)
/*---------------------------------------------------------------------------*/
+/* SPI transfer replacement methods which make use of spi_res */
+
+struct spi_replaced_transfers;
+typedef void (*spi_replaced_release_t)(struct spi_master *master,
+ struct spi_message *msg,
+ struct spi_replaced_transfers *res);
+/**
+ * struct spi_replaced_transfers - structure describing the spi_transfer
+ * replacements that have occurred
+ * so that they can get reverted
+ * @release: some extra release code to get executed prior to
+ * relasing this structure
+ * @extradata: pointer to some extra data if requested or NULL
+ * @replaced_transfers: transfers that have been replaced and which need
+ * to get restored
+ * @replaced_after: the transfer after which the @replaced_transfers
+ * are to get re-inserted
+ * @inserted: number of transfers inserted
+ * @inserted_transfers: array of spi_transfers of array-size @inserted,
+ * that have been replacing replaced_transfers
+ *
+ * note: that @extradata will point to @inserted_transfers[@inserted]
+ * if some extra allocation is requested, so alignment will be the same
+ * as for spi_transfers
+ */
+struct spi_replaced_transfers {
+ spi_replaced_release_t release;
+ void *extradata;
+ struct list_head replaced_transfers;
+ struct list_head *replaced_after;
+ size_t inserted;
+ struct spi_transfer inserted_transfers[];
+};
+
+extern struct spi_replaced_transfers *spi_replace_transfers(
+ struct spi_message *msg,
+ struct spi_transfer *xfer_first,
+ size_t remove,
+ size_t insert,
+ spi_replaced_release_t release,
+ size_t extradatasize,
+ gfp_t gfp);
+
+/*---------------------------------------------------------------------------*/
+
+/* SPI transfer transformation methods */
+
+extern int spi_split_transfers_maxsize(struct spi_master *master,
+ struct spi_message *msg,
+ size_t maxsize,
+ gfp_t gfp);
+
+/*---------------------------------------------------------------------------*/
+
/* All these synchronous SPI transfer routines are utilities layered
* over the core async transfer primitive. Here, "synchronous" means
* they will sleep uninterruptibly until the async transfer completes.
@@ -1019,6 +1128,42 @@ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
return be16_to_cpu(result);
}
+/**
+ * struct spi_flash_read_message - flash specific information for
+ * spi-masters that provide accelerated flash read interfaces
+ * @buf: buffer to read data
+ * @from: offset within the flash from where data is to be read
+ * @len: length of data to be read
+ * @retlen: actual length of data read
+ * @read_opcode: read_opcode to be used to communicate with flash
+ * @addr_width: number of address bytes
+ * @dummy_bytes: number of dummy bytes
+ * @opcode_nbits: number of lines to send opcode
+ * @addr_nbits: number of lines to send address
+ * @data_nbits: number of lines for data
+ */
+struct spi_flash_read_message {
+ void *buf;
+ loff_t from;
+ size_t len;
+ size_t retlen;
+ u8 read_opcode;
+ u8 addr_width;
+ u8 dummy_bytes;
+ u8 opcode_nbits;
+ u8 addr_nbits;
+ u8 data_nbits;
+};
+
+/* SPI core interface for flash read support */
+static inline bool spi_flash_read_supported(struct spi_device *spi)
+{
+ return spi->master->spi_flash_read ? true : false;
+}
+
+int spi_flash_read(struct spi_device *spi,
+ struct spi_flash_read_message *msg);
+
/*---------------------------------------------------------------------------*/
/*
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index f5f80c5643ac..dc8eb63c6568 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -99,8 +99,23 @@ void process_srcu(struct work_struct *work);
}
/*
- * define and init a srcu struct at build time.
- * dont't call init_srcu_struct() nor cleanup_srcu_struct() on it.
+ * Define and initialize a srcu struct at build time.
+ * Do -not- call init_srcu_struct() nor cleanup_srcu_struct() on it.
+ *
+ * Note that although DEFINE_STATIC_SRCU() hides the name from other
+ * files, the per-CPU variable rules nevertheless require that the
+ * chosen name be globally unique. These rules also prohibit use of
+ * DEFINE_STATIC_SRCU() within a function. If these rules are too
+ * restrictive, declare the srcu_struct manually. For example, in
+ * each file:
+ *
+ * static struct srcu_struct my_srcu;
+ *
+ * Then, before the first use of each my_srcu, manually initialize it:
+ *
+ * init_srcu_struct(&my_srcu);
+ *
+ * See include/linux/percpu-defs.h for the rules on per-CPU variables.
*/
#define __DEFINE_SRCU(name, is_static) \
static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
diff --git a/include/linux/swait.h b/include/linux/swait.h
new file mode 100644
index 000000000000..c1f9c62a8a50
--- /dev/null
+++ b/include/linux/swait.h
@@ -0,0 +1,172 @@
+#ifndef _LINUX_SWAIT_H
+#define _LINUX_SWAIT_H
+
+#include <linux/list.h>
+#include <linux/stddef.h>
+#include <linux/spinlock.h>
+#include <asm/current.h>
+
+/*
+ * Simple wait queues
+ *
+ * While these are very similar to the other/complex wait queues (wait.h) the
+ * most important difference is that the simple waitqueue allows for
+ * deterministic behaviour -- IOW it has strictly bounded IRQ and lock hold
+ * times.
+ *
+ * In order to make this so, we had to drop a fair number of features of the
+ * other waitqueue code; notably:
+ *
+ * - mixing INTERRUPTIBLE and UNINTERRUPTIBLE sleeps on the same waitqueue;
+ * all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right
+ * sleeper state.
+ *
+ * - the exclusive mode; because this requires preserving the list order
+ * and this is hard.
+ *
+ * - custom wake functions; because you cannot give any guarantees about
+ * random code.
+ *
+ * As a side effect of this; the data structures are slimmer.
+ *
+ * One would recommend using this wait queue where possible.
+ */
+
+struct task_struct;
+
+struct swait_queue_head {
+ raw_spinlock_t lock;
+ struct list_head task_list;
+};
+
+struct swait_queue {
+ struct task_struct *task;
+ struct list_head task_list;
+};
+
+#define __SWAITQUEUE_INITIALIZER(name) { \
+ .task = current, \
+ .task_list = LIST_HEAD_INIT((name).task_list), \
+}
+
+#define DECLARE_SWAITQUEUE(name) \
+ struct swait_queue name = __SWAITQUEUE_INITIALIZER(name)
+
+#define __SWAIT_QUEUE_HEAD_INITIALIZER(name) { \
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
+ .task_list = LIST_HEAD_INIT((name).task_list), \
+}
+
+#define DECLARE_SWAIT_QUEUE_HEAD(name) \
+ struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INITIALIZER(name)
+
+extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
+ struct lock_class_key *key);
+
+#define init_swait_queue_head(q) \
+ do { \
+ static struct lock_class_key __key; \
+ __init_swait_queue_head((q), #q, &__key); \
+ } while (0)
+
+#ifdef CONFIG_LOCKDEP
+# define __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
+ ({ init_swait_queue_head(&name); name; })
+# define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \
+ struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name)
+#else
+# define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \
+ DECLARE_SWAIT_QUEUE_HEAD(name)
+#endif
+
+static inline int swait_active(struct swait_queue_head *q)
+{
+ return !list_empty(&q->task_list);
+}
+
+extern void swake_up(struct swait_queue_head *q);
+extern void swake_up_all(struct swait_queue_head *q);
+extern void swake_up_locked(struct swait_queue_head *q);
+
+extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
+extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state);
+extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state);
+
+extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
+extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
+
+/* as per ___wait_event() but for swait, therefore "exclusive == 0" */
+#define ___swait_event(wq, condition, state, ret, cmd) \
+({ \
+ struct swait_queue __wait; \
+ long __ret = ret; \
+ \
+ INIT_LIST_HEAD(&__wait.task_list); \
+ for (;;) { \
+ long __int = prepare_to_swait_event(&wq, &__wait, state);\
+ \
+ if (condition) \
+ break; \
+ \
+ if (___wait_is_interruptible(state) && __int) { \
+ __ret = __int; \
+ break; \
+ } \
+ \
+ cmd; \
+ } \
+ finish_swait(&wq, &__wait); \
+ __ret; \
+})
+
+#define __swait_event(wq, condition) \
+ (void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \
+ schedule())
+
+#define swait_event(wq, condition) \
+do { \
+ if (condition) \
+ break; \
+ __swait_event(wq, condition); \
+} while (0)
+
+#define __swait_event_timeout(wq, condition, timeout) \
+ ___swait_event(wq, ___wait_cond_timeout(condition), \
+ TASK_UNINTERRUPTIBLE, timeout, \
+ __ret = schedule_timeout(__ret))
+
+#define swait_event_timeout(wq, condition, timeout) \
+({ \
+ long __ret = timeout; \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __swait_event_timeout(wq, condition, timeout); \
+ __ret; \
+})
+
+#define __swait_event_interruptible(wq, condition) \
+ ___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \
+ schedule())
+
+#define swait_event_interruptible(wq, condition) \
+({ \
+ int __ret = 0; \
+ if (!(condition)) \
+ __ret = __swait_event_interruptible(wq, condition); \
+ __ret; \
+})
+
+#define __swait_event_interruptible_timeout(wq, condition, timeout) \
+ ___swait_event(wq, ___wait_cond_timeout(condition), \
+ TASK_INTERRUPTIBLE, timeout, \
+ __ret = schedule_timeout(__ret))
+
+#define swait_event_interruptible_timeout(wq, condition, timeout) \
+({ \
+ long __ret = timeout; \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __swait_event_interruptible_timeout(wq, \
+ condition, timeout); \
+ __ret; \
+})
+
+#endif /* _LINUX_SWAIT_H */
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 97fd4e543846..21f73649a4dc 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -97,6 +97,19 @@ static inline void tick_broadcast_exit(void)
tick_broadcast_oneshot_control(TICK_BROADCAST_EXIT);
}
+enum tick_dep_bits {
+ TICK_DEP_BIT_POSIX_TIMER = 0,
+ TICK_DEP_BIT_PERF_EVENTS = 1,
+ TICK_DEP_BIT_SCHED = 2,
+ TICK_DEP_BIT_CLOCK_UNSTABLE = 3
+};
+
+#define TICK_DEP_MASK_NONE 0
+#define TICK_DEP_MASK_POSIX_TIMER (1 << TICK_DEP_BIT_POSIX_TIMER)
+#define TICK_DEP_MASK_PERF_EVENTS (1 << TICK_DEP_BIT_PERF_EVENTS)
+#define TICK_DEP_MASK_SCHED (1 << TICK_DEP_BIT_SCHED)
+#define TICK_DEP_MASK_CLOCK_UNSTABLE (1 << TICK_DEP_BIT_CLOCK_UNSTABLE)
+
#ifdef CONFIG_NO_HZ_COMMON
extern int tick_nohz_enabled;
extern int tick_nohz_tick_stopped(void);
@@ -154,9 +167,73 @@ static inline int housekeeping_any_cpu(void)
return cpumask_any_and(housekeeping_mask, cpu_online_mask);
}
-extern void tick_nohz_full_kick(void);
+extern void tick_nohz_dep_set(enum tick_dep_bits bit);
+extern void tick_nohz_dep_clear(enum tick_dep_bits bit);
+extern void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit);
+extern void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit);
+extern void tick_nohz_dep_set_task(struct task_struct *tsk,
+ enum tick_dep_bits bit);
+extern void tick_nohz_dep_clear_task(struct task_struct *tsk,
+ enum tick_dep_bits bit);
+extern void tick_nohz_dep_set_signal(struct signal_struct *signal,
+ enum tick_dep_bits bit);
+extern void tick_nohz_dep_clear_signal(struct signal_struct *signal,
+ enum tick_dep_bits bit);
+
+/*
+ * The below are tick_nohz_[set,clear]_dep() wrappers that optimize off-cases
+ * on top of static keys.
+ */
+static inline void tick_dep_set(enum tick_dep_bits bit)
+{
+ if (tick_nohz_full_enabled())
+ tick_nohz_dep_set(bit);
+}
+
+static inline void tick_dep_clear(enum tick_dep_bits bit)
+{
+ if (tick_nohz_full_enabled())
+ tick_nohz_dep_clear(bit);
+}
+
+static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit)
+{
+ if (tick_nohz_full_cpu(cpu))
+ tick_nohz_dep_set_cpu(cpu, bit);
+}
+
+static inline void tick_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
+{
+ if (tick_nohz_full_cpu(cpu))
+ tick_nohz_dep_clear_cpu(cpu, bit);
+}
+
+static inline void tick_dep_set_task(struct task_struct *tsk,
+ enum tick_dep_bits bit)
+{
+ if (tick_nohz_full_enabled())
+ tick_nohz_dep_set_task(tsk, bit);
+}
+static inline void tick_dep_clear_task(struct task_struct *tsk,
+ enum tick_dep_bits bit)
+{
+ if (tick_nohz_full_enabled())
+ tick_nohz_dep_clear_task(tsk, bit);
+}
+static inline void tick_dep_set_signal(struct signal_struct *signal,
+ enum tick_dep_bits bit)
+{
+ if (tick_nohz_full_enabled())
+ tick_nohz_dep_set_signal(signal, bit);
+}
+static inline void tick_dep_clear_signal(struct signal_struct *signal,
+ enum tick_dep_bits bit)
+{
+ if (tick_nohz_full_enabled())
+ tick_nohz_dep_clear_signal(signal, bit);
+}
+
extern void tick_nohz_full_kick_cpu(int cpu);
-extern void tick_nohz_full_kick_all(void);
extern void __tick_nohz_task_switch(void);
#else
static inline int housekeeping_any_cpu(void)
@@ -166,9 +243,21 @@ static inline int housekeeping_any_cpu(void)
static inline bool tick_nohz_full_enabled(void) { return false; }
static inline bool tick_nohz_full_cpu(int cpu) { return false; }
static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
+
+static inline void tick_dep_set(enum tick_dep_bits bit) { }
+static inline void tick_dep_clear(enum tick_dep_bits bit) { }
+static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit) { }
+static inline void tick_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { }
+static inline void tick_dep_set_task(struct task_struct *tsk,
+ enum tick_dep_bits bit) { }
+static inline void tick_dep_clear_task(struct task_struct *tsk,
+ enum tick_dep_bits bit) { }
+static inline void tick_dep_set_signal(struct signal_struct *signal,
+ enum tick_dep_bits bit) { }
+static inline void tick_dep_clear_signal(struct signal_struct *signal,
+ enum tick_dep_bits bit) { }
+
static inline void tick_nohz_full_kick_cpu(int cpu) { }
-static inline void tick_nohz_full_kick(void) { }
-static inline void tick_nohz_full_kick_all(void) { }
static inline void __tick_nohz_task_switch(void) { }
#endif
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index 25247220b4b7..e88005459035 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -50,6 +50,7 @@ struct tk_read_base {
* @offs_tai: Offset clock monotonic -> clock tai
* @tai_offset: The current UTC to TAI offset in seconds
* @clock_was_set_seq: The sequence number of clock was set events
+ * @cs_was_changed_seq: The sequence number of clocksource change events
* @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second
* @raw_time: Monotonic raw base time in timespec64 format
* @cycle_interval: Number of clock cycles in one NTP interval
@@ -91,6 +92,7 @@ struct timekeeper {
ktime_t offs_tai;
s32 tai_offset;
unsigned int clock_was_set_seq;
+ u8 cs_was_changed_seq;
ktime_t next_leap_ktime;
struct timespec64 raw_time;
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index ec89d846324c..96f37bee3bc1 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -267,6 +267,64 @@ extern void ktime_get_raw_and_real_ts64(struct timespec64 *ts_raw,
struct timespec64 *ts_real);
/*
+ * struct system_time_snapshot - simultaneous raw/real time capture with
+ * counter value
+ * @cycles: Clocksource counter value to produce the system times
+ * @real: Realtime system time
+ * @raw: Monotonic raw system time
+ * @clock_was_set_seq: The sequence number of clock was set events
+ * @cs_was_changed_seq: The sequence number of clocksource change events
+ */
+struct system_time_snapshot {
+ cycle_t cycles;
+ ktime_t real;
+ ktime_t raw;
+ unsigned int clock_was_set_seq;
+ u8 cs_was_changed_seq;
+};
+
+/*
+ * struct system_device_crosststamp - system/device cross-timestamp
+ * (syncronized capture)
+ * @device: Device time
+ * @sys_realtime: Realtime simultaneous with device time
+ * @sys_monoraw: Monotonic raw simultaneous with device time
+ */
+struct system_device_crosststamp {
+ ktime_t device;
+ ktime_t sys_realtime;
+ ktime_t sys_monoraw;
+};
+
+/*
+ * struct system_counterval_t - system counter value with the pointer to the
+ * corresponding clocksource
+ * @cycles: System counter value
+ * @cs: Clocksource corresponding to system counter value. Used by
+ * timekeeping code to verify comparibility of two cycle values
+ */
+struct system_counterval_t {
+ cycle_t cycles;
+ struct clocksource *cs;
+};
+
+/*
+ * Get cross timestamp between system clock and device clock
+ */
+extern int get_device_system_crosststamp(
+ int (*get_time_fn)(ktime_t *device_time,
+ struct system_counterval_t *system_counterval,
+ void *ctx),
+ void *ctx,
+ struct system_time_snapshot *history,
+ struct system_device_crosststamp *xtstamp);
+
+/*
+ * Simultaneously snapshot realtime and monotonic raw clocks
+ */
+extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot);
+
+/*
* Persistent clock related interfaces
*/
extern int persistent_clock_is_local;
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 925730bc9fc1..705df7db4482 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -15,16 +15,6 @@ struct tracer;
struct dentry;
struct bpf_prog;
-struct trace_print_flags {
- unsigned long mask;
- const char *name;
-};
-
-struct trace_print_flags_u64 {
- unsigned long long mask;
- const char *name;
-};
-
const char *trace_print_flags_seq(struct trace_seq *p, const char *delim,
unsigned long flags,
const struct trace_print_flags *flag_array);
diff --git a/include/linux/tracepoint-defs.h b/include/linux/tracepoint-defs.h
index e1ee97c713bf..4ac89acb6136 100644
--- a/include/linux/tracepoint-defs.h
+++ b/include/linux/tracepoint-defs.h
@@ -3,13 +3,23 @@
/*
* File can be included directly by headers who only want to access
- * tracepoint->key to guard out of line trace calls. Otherwise
- * linux/tracepoint.h should be used.
+ * tracepoint->key to guard out of line trace calls, or the definition of
+ * trace_print_flags{_u64}. Otherwise linux/tracepoint.h should be used.
*/
#include <linux/atomic.h>
#include <linux/static_key.h>
+struct trace_print_flags {
+ unsigned long mask;
+ const char *name;
+};
+
+struct trace_print_flags_u64 {
+ unsigned long long mask;
+ const char *name;
+};
+
struct tracepoint_func {
void *func;
void *data;
diff --git a/include/linux/wait.h b/include/linux/wait.h
index ae71a769b89e..27d7a0ab5da3 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -338,7 +338,7 @@ do { \
schedule(); try_to_freeze())
/**
- * wait_event - sleep (or freeze) until a condition gets true
+ * wait_event_freezable - sleep (or freeze) until a condition gets true
* @wq: the waitqueue to wait on
* @condition: a C expression for the event to wait for
*
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index d866f21efbbf..677807f29a1c 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -6,7 +6,7 @@
#include <linux/writeback.h>
#include <linux/tracepoint.h>
-#include <trace/events/gfpflags.h>
+#include <trace/events/mmflags.h>
struct btrfs_root;
struct btrfs_fs_info;
diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h
index c92d1e1cbad9..111e5666e5eb 100644
--- a/include/trace/events/compaction.h
+++ b/include/trace/events/compaction.h
@@ -7,7 +7,7 @@
#include <linux/types.h>
#include <linux/list.h>
#include <linux/tracepoint.h>
-#include <trace/events/gfpflags.h>
+#include <trace/events/mmflags.h>
#define COMPACTION_STATUS \
EM( COMPACT_DEFERRED, "deferred") \
diff --git a/include/trace/events/cpuhp.h b/include/trace/events/cpuhp.h
new file mode 100644
index 000000000000..a72bd93ec7e5
--- /dev/null
+++ b/include/trace/events/cpuhp.h
@@ -0,0 +1,66 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cpuhp
+
+#if !defined(_TRACE_CPUHP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CPUHP_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(cpuhp_enter,
+
+ TP_PROTO(unsigned int cpu,
+ int target,
+ int idx,
+ int (*fun)(unsigned int)),
+
+ TP_ARGS(cpu, target, idx, fun),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, cpu )
+ __field( int, target )
+ __field( int, idx )
+ __field( void *, fun )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->target = target;
+ __entry->idx = idx;
+ __entry->fun = fun;
+ ),
+
+ TP_printk("cpu: %04u target: %3d step: %3d (%pf)",
+ __entry->cpu, __entry->target, __entry->idx, __entry->fun)
+);
+
+TRACE_EVENT(cpuhp_exit,
+
+ TP_PROTO(unsigned int cpu,
+ int state,
+ int idx,
+ int ret),
+
+ TP_ARGS(cpu, state, idx, ret),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, cpu )
+ __field( int, state )
+ __field( int, idx )
+ __field( int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->state = state;
+ __entry->idx = idx;
+ __entry->ret = ret;
+ ),
+
+ TP_printk(" cpu: %04u state: %3d step: %3d ret: %d",
+ __entry->cpu, __entry->state, __entry->idx, __entry->ret)
+);
+
+#endif
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/gfpflags.h b/include/trace/events/gfpflags.h
deleted file mode 100644
index dde6bf092c8a..000000000000
--- a/include/trace/events/gfpflags.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * The order of these masks is important. Matching masks will be seen
- * first and the left over flags will end up showing by themselves.
- *
- * For example, if we have GFP_KERNEL before GFP_USER we wil get:
- *
- * GFP_KERNEL|GFP_HARDWALL
- *
- * Thus most bits set go first.
- */
-#define show_gfp_flags(flags) \
- (flags) ? __print_flags(flags, "|", \
- {(unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \
- {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"}, \
- {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \
- {(unsigned long)GFP_USER, "GFP_USER"}, \
- {(unsigned long)GFP_TEMPORARY, "GFP_TEMPORARY"}, \
- {(unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \
- {(unsigned long)GFP_NOFS, "GFP_NOFS"}, \
- {(unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \
- {(unsigned long)GFP_NOIO, "GFP_NOIO"}, \
- {(unsigned long)__GFP_HIGH, "GFP_HIGH"}, \
- {(unsigned long)__GFP_ATOMIC, "GFP_ATOMIC"}, \
- {(unsigned long)__GFP_IO, "GFP_IO"}, \
- {(unsigned long)__GFP_COLD, "GFP_COLD"}, \
- {(unsigned long)__GFP_NOWARN, "GFP_NOWARN"}, \
- {(unsigned long)__GFP_REPEAT, "GFP_REPEAT"}, \
- {(unsigned long)__GFP_NOFAIL, "GFP_NOFAIL"}, \
- {(unsigned long)__GFP_NORETRY, "GFP_NORETRY"}, \
- {(unsigned long)__GFP_COMP, "GFP_COMP"}, \
- {(unsigned long)__GFP_ZERO, "GFP_ZERO"}, \
- {(unsigned long)__GFP_NOMEMALLOC, "GFP_NOMEMALLOC"}, \
- {(unsigned long)__GFP_MEMALLOC, "GFP_MEMALLOC"}, \
- {(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \
- {(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \
- {(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \
- {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"}, \
- {(unsigned long)__GFP_NOTRACK, "GFP_NOTRACK"}, \
- {(unsigned long)__GFP_DIRECT_RECLAIM, "GFP_DIRECT_RECLAIM"}, \
- {(unsigned long)__GFP_KSWAPD_RECLAIM, "GFP_KSWAPD_RECLAIM"}, \
- {(unsigned long)__GFP_OTHER_NODE, "GFP_OTHER_NODE"} \
- ) : "GFP_NOWAIT"
-
diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h
index 47c6212d8f3c..551ba4acde4d 100644
--- a/include/trace/events/huge_memory.h
+++ b/include/trace/events/huge_memory.h
@@ -6,8 +6,6 @@
#include <linux/tracepoint.h>
-#include <trace/events/gfpflags.h>
-
#define SCAN_STATUS \
EM( SCAN_FAIL, "failed") \
EM( SCAN_SUCCEED, "succeeded") \
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index f7554fd7fc62..ca7217389067 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -6,7 +6,7 @@
#include <linux/types.h>
#include <linux/tracepoint.h>
-#include <trace/events/gfpflags.h>
+#include <trace/events/mmflags.h>
DECLARE_EVENT_CLASS(kmem_alloc,
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index d6f83222a6a1..aa69253ecc7d 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -359,14 +359,15 @@ TRACE_EVENT(
#endif
TRACE_EVENT(kvm_halt_poll_ns,
- TP_PROTO(bool grow, unsigned int vcpu_id, int new, int old),
+ TP_PROTO(bool grow, unsigned int vcpu_id, unsigned int new,
+ unsigned int old),
TP_ARGS(grow, vcpu_id, new, old),
TP_STRUCT__entry(
__field(bool, grow)
__field(unsigned int, vcpu_id)
- __field(int, new)
- __field(int, old)
+ __field(unsigned int, new)
+ __field(unsigned int, old)
),
TP_fast_assign(
@@ -376,7 +377,7 @@ TRACE_EVENT(kvm_halt_poll_ns,
__entry->old = old;
),
- TP_printk("vcpu %u: halt_poll_ns %d (%s %d)",
+ TP_printk("vcpu %u: halt_poll_ns %u (%s %u)",
__entry->vcpu_id,
__entry->new,
__entry->grow ? "grow" : "shrink",
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
new file mode 100644
index 000000000000..a849185c82f0
--- /dev/null
+++ b/include/trace/events/mmflags.h
@@ -0,0 +1,164 @@
+/*
+ * The order of these masks is important. Matching masks will be seen
+ * first and the left over flags will end up showing by themselves.
+ *
+ * For example, if we have GFP_KERNEL before GFP_USER we wil get:
+ *
+ * GFP_KERNEL|GFP_HARDWALL
+ *
+ * Thus most bits set go first.
+ */
+
+#define __def_gfpflag_names \
+ {(unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \
+ {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"},\
+ {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \
+ {(unsigned long)GFP_USER, "GFP_USER"}, \
+ {(unsigned long)GFP_TEMPORARY, "GFP_TEMPORARY"}, \
+ {(unsigned long)GFP_KERNEL_ACCOUNT, "GFP_KERNEL_ACCOUNT"}, \
+ {(unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \
+ {(unsigned long)GFP_NOFS, "GFP_NOFS"}, \
+ {(unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \
+ {(unsigned long)GFP_NOIO, "GFP_NOIO"}, \
+ {(unsigned long)GFP_NOWAIT, "GFP_NOWAIT"}, \
+ {(unsigned long)GFP_DMA, "GFP_DMA"}, \
+ {(unsigned long)__GFP_HIGHMEM, "__GFP_HIGHMEM"}, \
+ {(unsigned long)GFP_DMA32, "GFP_DMA32"}, \
+ {(unsigned long)__GFP_HIGH, "__GFP_HIGH"}, \
+ {(unsigned long)__GFP_ATOMIC, "__GFP_ATOMIC"}, \
+ {(unsigned long)__GFP_IO, "__GFP_IO"}, \
+ {(unsigned long)__GFP_FS, "__GFP_FS"}, \
+ {(unsigned long)__GFP_COLD, "__GFP_COLD"}, \
+ {(unsigned long)__GFP_NOWARN, "__GFP_NOWARN"}, \
+ {(unsigned long)__GFP_REPEAT, "__GFP_REPEAT"}, \
+ {(unsigned long)__GFP_NOFAIL, "__GFP_NOFAIL"}, \
+ {(unsigned long)__GFP_NORETRY, "__GFP_NORETRY"}, \
+ {(unsigned long)__GFP_COMP, "__GFP_COMP"}, \
+ {(unsigned long)__GFP_ZERO, "__GFP_ZERO"}, \
+ {(unsigned long)__GFP_NOMEMALLOC, "__GFP_NOMEMALLOC"}, \
+ {(unsigned long)__GFP_MEMALLOC, "__GFP_MEMALLOC"}, \
+ {(unsigned long)__GFP_HARDWALL, "__GFP_HARDWALL"}, \
+ {(unsigned long)__GFP_THISNODE, "__GFP_THISNODE"}, \
+ {(unsigned long)__GFP_RECLAIMABLE, "__GFP_RECLAIMABLE"}, \
+ {(unsigned long)__GFP_MOVABLE, "__GFP_MOVABLE"}, \
+ {(unsigned long)__GFP_ACCOUNT, "__GFP_ACCOUNT"}, \
+ {(unsigned long)__GFP_NOTRACK, "__GFP_NOTRACK"}, \
+ {(unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \
+ {(unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \
+ {(unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\
+ {(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"},\
+ {(unsigned long)__GFP_OTHER_NODE, "__GFP_OTHER_NODE"} \
+
+#define show_gfp_flags(flags) \
+ (flags) ? __print_flags(flags, "|", \
+ __def_gfpflag_names \
+ ) : "none"
+
+#ifdef CONFIG_MMU
+#define IF_HAVE_PG_MLOCK(flag,string) ,{1UL << flag, string}
+#else
+#define IF_HAVE_PG_MLOCK(flag,string)
+#endif
+
+#ifdef CONFIG_ARCH_USES_PG_UNCACHED
+#define IF_HAVE_PG_UNCACHED(flag,string) ,{1UL << flag, string}
+#else
+#define IF_HAVE_PG_UNCACHED(flag,string)
+#endif
+
+#ifdef CONFIG_MEMORY_FAILURE
+#define IF_HAVE_PG_HWPOISON(flag,string) ,{1UL << flag, string}
+#else
+#define IF_HAVE_PG_HWPOISON(flag,string)
+#endif
+
+#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
+#define IF_HAVE_PG_IDLE(flag,string) ,{1UL << flag, string}
+#else
+#define IF_HAVE_PG_IDLE(flag,string)
+#endif
+
+#define __def_pageflag_names \
+ {1UL << PG_locked, "locked" }, \
+ {1UL << PG_error, "error" }, \
+ {1UL << PG_referenced, "referenced" }, \
+ {1UL << PG_uptodate, "uptodate" }, \
+ {1UL << PG_dirty, "dirty" }, \
+ {1UL << PG_lru, "lru" }, \
+ {1UL << PG_active, "active" }, \
+ {1UL << PG_slab, "slab" }, \
+ {1UL << PG_owner_priv_1, "owner_priv_1" }, \
+ {1UL << PG_arch_1, "arch_1" }, \
+ {1UL << PG_reserved, "reserved" }, \
+ {1UL << PG_private, "private" }, \
+ {1UL << PG_private_2, "private_2" }, \
+ {1UL << PG_writeback, "writeback" }, \
+ {1UL << PG_head, "head" }, \
+ {1UL << PG_swapcache, "swapcache" }, \
+ {1UL << PG_mappedtodisk, "mappedtodisk" }, \
+ {1UL << PG_reclaim, "reclaim" }, \
+ {1UL << PG_swapbacked, "swapbacked" }, \
+ {1UL << PG_unevictable, "unevictable" } \
+IF_HAVE_PG_MLOCK(PG_mlocked, "mlocked" ) \
+IF_HAVE_PG_UNCACHED(PG_uncached, "uncached" ) \
+IF_HAVE_PG_HWPOISON(PG_hwpoison, "hwpoison" ) \
+IF_HAVE_PG_IDLE(PG_young, "young" ) \
+IF_HAVE_PG_IDLE(PG_idle, "idle" )
+
+#define show_page_flags(flags) \
+ (flags) ? __print_flags(flags, "|", \
+ __def_pageflag_names \
+ ) : "none"
+
+#if defined(CONFIG_X86)
+#define __VM_ARCH_SPECIFIC {VM_PAT, "pat" }
+#elif defined(CONFIG_PPC)
+#define __VM_ARCH_SPECIFIC {VM_SAO, "sao" }
+#elif defined(CONFIG_PARISC) || defined(CONFIG_METAG) || defined(CONFIG_IA64)
+#define __VM_ARCH_SPECIFIC {VM_GROWSUP, "growsup" }
+#elif !defined(CONFIG_MMU)
+#define __VM_ARCH_SPECIFIC {VM_MAPPED_COPY,"mappedcopy" }
+#else
+#define __VM_ARCH_SPECIFIC {VM_ARCH_1, "arch_1" }
+#endif
+
+#ifdef CONFIG_MEM_SOFT_DIRTY
+#define IF_HAVE_VM_SOFTDIRTY(flag,name) {flag, name },
+#else
+#define IF_HAVE_VM_SOFTDIRTY(flag,name)
+#endif
+
+#define __def_vmaflag_names \
+ {VM_READ, "read" }, \
+ {VM_WRITE, "write" }, \
+ {VM_EXEC, "exec" }, \
+ {VM_SHARED, "shared" }, \
+ {VM_MAYREAD, "mayread" }, \
+ {VM_MAYWRITE, "maywrite" }, \
+ {VM_MAYEXEC, "mayexec" }, \
+ {VM_MAYSHARE, "mayshare" }, \
+ {VM_GROWSDOWN, "growsdown" }, \
+ {VM_PFNMAP, "pfnmap" }, \
+ {VM_DENYWRITE, "denywrite" }, \
+ {VM_LOCKONFAULT, "lockonfault" }, \
+ {VM_LOCKED, "locked" }, \
+ {VM_IO, "io" }, \
+ {VM_SEQ_READ, "seqread" }, \
+ {VM_RAND_READ, "randread" }, \
+ {VM_DONTCOPY, "dontcopy" }, \
+ {VM_DONTEXPAND, "dontexpand" }, \
+ {VM_ACCOUNT, "account" }, \
+ {VM_NORESERVE, "noreserve" }, \
+ {VM_HUGETLB, "hugetlb" }, \
+ __VM_ARCH_SPECIFIC , \
+ {VM_DONTDUMP, "dontdump" }, \
+IF_HAVE_VM_SOFTDIRTY(VM_SOFTDIRTY, "softdirty" ) \
+ {VM_MIXEDMAP, "mixedmap" }, \
+ {VM_HUGEPAGE, "hugepage" }, \
+ {VM_NOHUGEPAGE, "nohugepage" }, \
+ {VM_MERGEABLE, "mergeable" } \
+
+#define show_vma_flags(flags) \
+ (flags) ? __print_flags(flags, "|", \
+ __def_vmaflag_names \
+ ) : "none"
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index 284244ebfe8d..19e50300ce7d 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -38,6 +38,28 @@ DEFINE_EVENT(cpu, cpu_idle,
TP_ARGS(state, cpu_id)
);
+TRACE_EVENT(powernv_throttle,
+
+ TP_PROTO(int chip_id, const char *reason, int pmax),
+
+ TP_ARGS(chip_id, reason, pmax),
+
+ TP_STRUCT__entry(
+ __field(int, chip_id)
+ __string(reason, reason)
+ __field(int, pmax)
+ ),
+
+ TP_fast_assign(
+ __entry->chip_id = chip_id;
+ __assign_str(reason, reason);
+ __entry->pmax = pmax;
+ ),
+
+ TP_printk("Chip %d Pmax %d %s", __entry->chip_id,
+ __entry->pmax, __get_str(reason))
+);
+
TRACE_EVENT(pstate_sample,
TP_PROTO(u32 core_busy,
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index 073b9ac245ba..51440131d337 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -328,23 +328,49 @@ TRACE_EVENT(itimer_expire,
);
#ifdef CONFIG_NO_HZ_COMMON
+
+#define TICK_DEP_NAMES \
+ tick_dep_name(NONE) \
+ tick_dep_name(POSIX_TIMER) \
+ tick_dep_name(PERF_EVENTS) \
+ tick_dep_name(SCHED) \
+ tick_dep_name_end(CLOCK_UNSTABLE)
+
+#undef tick_dep_name
+#undef tick_dep_name_end
+
+#define tick_dep_name(sdep) TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
+#define tick_dep_name_end(sdep) TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
+
+TICK_DEP_NAMES
+
+#undef tick_dep_name
+#undef tick_dep_name_end
+
+#define tick_dep_name(sdep) { TICK_DEP_MASK_##sdep, #sdep },
+#define tick_dep_name_end(sdep) { TICK_DEP_MASK_##sdep, #sdep }
+
+#define show_tick_dep_name(val) \
+ __print_symbolic(val, TICK_DEP_NAMES)
+
TRACE_EVENT(tick_stop,
- TP_PROTO(int success, char *error_msg),
+ TP_PROTO(int success, int dependency),
- TP_ARGS(success, error_msg),
+ TP_ARGS(success, dependency),
TP_STRUCT__entry(
__field( int , success )
- __string( msg, error_msg )
+ __field( int , dependency )
),
TP_fast_assign(
__entry->success = success;
- __assign_str(msg, error_msg);
+ __entry->dependency = dependency;
),
- TP_printk("success=%s msg=%s", __entry->success ? "yes" : "no", __get_str(msg))
+ TP_printk("success=%d dependency=%s", __entry->success, \
+ show_tick_dep_name(__entry->dependency))
);
#endif
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index 31763dd8db1c..0101ef37f1ee 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -8,7 +8,7 @@
#include <linux/tracepoint.h>
#include <linux/mm.h>
#include <linux/memcontrol.h>
-#include <trace/events/gfpflags.h>
+#include <trace/events/mmflags.h>
#define RECLAIM_WB_ANON 0x0001u
#define RECLAIM_WB_FILE 0x0002u
diff --git a/include/uapi/linux/auto_fs.h b/include/uapi/linux/auto_fs.h
index bb991dfe134f..9175a1b4dc69 100644
--- a/include/uapi/linux/auto_fs.h
+++ b/include/uapi/linux/auto_fs.h
@@ -1,7 +1,4 @@
-/* -*- linux-c -*- ------------------------------------------------------- *
- *
- * linux/include/linux/auto_fs.h
- *
+/*
* Copyright 1997 Transmeta Corporation - All Rights Reserved
*
* This file is part of the Linux kernel and is made available under
@@ -51,7 +48,7 @@ struct autofs_packet_hdr {
struct autofs_packet_missing {
struct autofs_packet_hdr hdr;
- autofs_wqt_t wait_queue_token;
+ autofs_wqt_t wait_queue_token;
int len;
char name[NAME_MAX+1];
};
@@ -63,12 +60,12 @@ struct autofs_packet_expire {
char name[NAME_MAX+1];
};
-#define AUTOFS_IOC_READY _IO(0x93,0x60)
-#define AUTOFS_IOC_FAIL _IO(0x93,0x61)
-#define AUTOFS_IOC_CATATONIC _IO(0x93,0x62)
-#define AUTOFS_IOC_PROTOVER _IOR(0x93,0x63,int)
-#define AUTOFS_IOC_SETTIMEOUT32 _IOWR(0x93,0x64,compat_ulong_t)
-#define AUTOFS_IOC_SETTIMEOUT _IOWR(0x93,0x64,unsigned long)
-#define AUTOFS_IOC_EXPIRE _IOR(0x93,0x65,struct autofs_packet_expire)
+#define AUTOFS_IOC_READY _IO(0x93, 0x60)
+#define AUTOFS_IOC_FAIL _IO(0x93, 0x61)
+#define AUTOFS_IOC_CATATONIC _IO(0x93, 0x62)
+#define AUTOFS_IOC_PROTOVER _IOR(0x93, 0x63, int)
+#define AUTOFS_IOC_SETTIMEOUT32 _IOWR(0x93, 0x64, compat_ulong_t)
+#define AUTOFS_IOC_SETTIMEOUT _IOWR(0x93, 0x64, unsigned long)
+#define AUTOFS_IOC_EXPIRE _IOR(0x93, 0x65, struct autofs_packet_expire)
#endif /* _UAPI_LINUX_AUTO_FS_H */
diff --git a/include/uapi/linux/auto_fs4.h b/include/uapi/linux/auto_fs4.h
index e02982fa2953..8f8f1bdcca8c 100644
--- a/include/uapi/linux/auto_fs4.h
+++ b/include/uapi/linux/auto_fs4.h
@@ -1,6 +1,4 @@
-/* -*- c -*-
- * linux/include/linux/auto_fs4.h
- *
+/*
* Copyright 1999-2000 Jeremy Fitzhardinge <jeremy@goop.org>
*
* This file is part of the Linux kernel and is made available under
@@ -38,7 +36,6 @@
static inline void set_autofs_type_indirect(unsigned int *type)
{
*type = AUTOFS_TYPE_INDIRECT;
- return;
}
static inline unsigned int autofs_type_indirect(unsigned int type)
@@ -49,7 +46,6 @@ static inline unsigned int autofs_type_indirect(unsigned int type)
static inline void set_autofs_type_direct(unsigned int *type)
{
*type = AUTOFS_TYPE_DIRECT;
- return;
}
static inline unsigned int autofs_type_direct(unsigned int type)
@@ -60,7 +56,6 @@ static inline unsigned int autofs_type_direct(unsigned int type)
static inline void set_autofs_type_offset(unsigned int *type)
{
*type = AUTOFS_TYPE_OFFSET;
- return;
}
static inline unsigned int autofs_type_offset(unsigned int type)
@@ -81,7 +76,6 @@ static inline unsigned int autofs_type_trigger(unsigned int type)
static inline void set_autofs_type_any(unsigned int *type)
{
*type = AUTOFS_TYPE_ANY;
- return;
}
static inline unsigned int autofs_type_any(unsigned int type)
@@ -114,7 +108,7 @@ enum autofs_notify {
/* v4 multi expire (via pipe) */
struct autofs_packet_expire_multi {
struct autofs_packet_hdr hdr;
- autofs_wqt_t wait_queue_token;
+ autofs_wqt_t wait_queue_token;
int len;
char name[NAME_MAX+1];
};
@@ -154,11 +148,10 @@ union autofs_v5_packet_union {
autofs_packet_expire_direct_t expire_direct;
};
-#define AUTOFS_IOC_EXPIRE_MULTI _IOW(0x93,0x66,int)
+#define AUTOFS_IOC_EXPIRE_MULTI _IOW(0x93, 0x66, int)
#define AUTOFS_IOC_EXPIRE_INDIRECT AUTOFS_IOC_EXPIRE_MULTI
#define AUTOFS_IOC_EXPIRE_DIRECT AUTOFS_IOC_EXPIRE_MULTI
-#define AUTOFS_IOC_PROTOSUBVER _IOR(0x93,0x67,int)
-#define AUTOFS_IOC_ASKUMOUNT _IOR(0x93,0x70,int)
-
+#define AUTOFS_IOC_PROTOSUBVER _IOR(0x93, 0x67, int)
+#define AUTOFS_IOC_ASKUMOUNT _IOR(0x93, 0x70, int)
#endif /* _LINUX_AUTO_FS4_H */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 9da905157cee..a7f1f8032ec1 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -157,6 +157,7 @@ struct kvm_s390_skeys {
struct kvm_hyperv_exit {
#define KVM_EXIT_HYPERV_SYNIC 1
+#define KVM_EXIT_HYPERV_HCALL 2
__u32 type;
union {
struct {
@@ -165,6 +166,11 @@ struct kvm_hyperv_exit {
__u64 evt_page;
__u64 msg_page;
} synic;
+ struct {
+ __u64 input;
+ __u64 result;
+ __u64 params[2];
+ } hcall;
} u;
};
@@ -541,7 +547,13 @@ struct kvm_s390_pgm_info {
__u8 exc_access_id;
__u8 per_access_id;
__u8 op_access_id;
- __u8 pad[3];
+#define KVM_S390_PGM_FLAGS_ILC_VALID 0x01
+#define KVM_S390_PGM_FLAGS_ILC_0 0x02
+#define KVM_S390_PGM_FLAGS_ILC_1 0x04
+#define KVM_S390_PGM_FLAGS_ILC_MASK 0x06
+#define KVM_S390_PGM_FLAGS_NO_REWIND 0x08
+ __u8 flags;
+ __u8 pad[2];
};
struct kvm_s390_prefix_info {
@@ -850,6 +862,9 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_IOEVENTFD_ANY_LENGTH 122
#define KVM_CAP_HYPERV_SYNIC 123
#define KVM_CAP_S390_RI 124
+#define KVM_CAP_SPAPR_TCE_64 125
+#define KVM_CAP_ARM_PMU_V3 126
+#define KVM_CAP_VCPU_ATTRIBUTES 127
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1142,6 +1157,8 @@ struct kvm_s390_ucas_mapping {
/* Available with KVM_CAP_PPC_ALLOC_HTAB */
#define KVM_PPC_ALLOCATE_HTAB _IOWR(KVMIO, 0xa7, __u32)
#define KVM_CREATE_SPAPR_TCE _IOW(KVMIO, 0xa8, struct kvm_create_spapr_tce)
+#define KVM_CREATE_SPAPR_TCE_64 _IOW(KVMIO, 0xa8, \
+ struct kvm_create_spapr_tce_64)
/* Available with KVM_CAP_RMA */
#define KVM_ALLOCATE_RMA _IOR(KVMIO, 0xa9, struct kvm_allocate_rma)
/* Available with KVM_CAP_PPC_HTAB_FD */
diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h
index f0b7bfe5da92..ac6dded80ffa 100644
--- a/include/uapi/linux/ptp_clock.h
+++ b/include/uapi/linux/ptp_clock.h
@@ -51,7 +51,9 @@ struct ptp_clock_caps {
int n_per_out; /* Number of programmable periodic signals. */
int pps; /* Whether the clock supports a PPS callback. */
int n_pins; /* Number of input/output pins. */
- int rsv[14]; /* Reserved for future use. */
+ /* Whether the clock supports precise system-device cross timestamps */
+ int cross_timestamping;
+ int rsv[13]; /* Reserved for future use. */
};
struct ptp_extts_request {
@@ -81,6 +83,13 @@ struct ptp_sys_offset {
struct ptp_clock_time ts[2 * PTP_MAX_SAMPLES + 1];
};
+struct ptp_sys_offset_precise {
+ struct ptp_clock_time device;
+ struct ptp_clock_time sys_realtime;
+ struct ptp_clock_time sys_monoraw;
+ unsigned int rsv[4]; /* Reserved for future use. */
+};
+
enum ptp_pin_function {
PTP_PF_NONE,
PTP_PF_EXTTS,
@@ -124,6 +133,8 @@ struct ptp_pin_desc {
#define PTP_SYS_OFFSET _IOW(PTP_CLK_MAGIC, 5, struct ptp_sys_offset)
#define PTP_PIN_GETFUNC _IOWR(PTP_CLK_MAGIC, 6, struct ptp_pin_desc)
#define PTP_PIN_SETFUNC _IOW(PTP_CLK_MAGIC, 7, struct ptp_pin_desc)
+#define PTP_SYS_OFFSET_PRECISE \
+ _IOWR(PTP_CLK_MAGIC, 8, struct ptp_sys_offset_precise)
struct ptp_extts_event {
struct ptp_clock_time t; /* Time event occured. */