summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/common/sharpsl_pm.c9
-rw-r--r--arch/arm/mach-at91/pm.c7
-rw-r--r--arch/arm/mach-omap1/pm.c29
-rw-r--r--arch/arm/mach-omap2/pm.c35
-rw-r--r--arch/arm/mach-pnx4008/pm.c6
-rw-r--r--arch/arm/mach-pxa/pm.c4
-rw-r--r--arch/arm/mach-pxa/pxa25x.c4
-rw-r--r--arch/arm/mach-pxa/pxa27x.c2
-rw-r--r--arch/arm/mach-sa1100/pm.c6
-rw-r--r--arch/arm/nwfpe/fpopcode.h6
-rw-r--r--arch/arm/plat-s3c24xx/pm.c6
-rw-r--r--arch/blackfin/mach-common/pm.c59
-rw-r--r--arch/ia64/kernel/time.c5
-rw-r--r--arch/ia64/sn/kernel/xpnet.c13
-rw-r--r--arch/powerpc/configs/pmac32_defconfig1
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/time.c32
-rw-r--r--arch/powerpc/platforms/52xx/lite5200_pm.c34
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pm.c15
-rw-r--r--arch/sh/boards/hp6xx/pm.c6
-rw-r--r--arch/x86/ia32/ia32_binfmt.c1
-rw-r--r--arch/x86/kernel/acpi/wakeup_32.S12
-rw-r--r--arch/x86/kernel/acpi/wakeup_64.S32
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c66
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c10
-rw-r--r--arch/x86/kernel/mce_64.c39
-rw-r--r--arch/x86/kernel/msr.c35
-rw-r--r--arch/x86/kernel/suspend_64.c101
-rw-r--r--arch/x86/kernel/suspend_asm_64.S49
-rw-r--r--arch/x86/kernel/vsyscall_64.c23
-rw-r--r--arch/x86_64/Kconfig5
31 files changed, 371 insertions, 282 deletions
diff --git a/arch/arm/common/sharpsl_pm.c b/arch/arm/common/sharpsl_pm.c
index 111a7fa5deb..5bba5255b11 100644
--- a/arch/arm/common/sharpsl_pm.c
+++ b/arch/arm/common/sharpsl_pm.c
@@ -24,6 +24,7 @@
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/apm-emulation.h>
+#include <linux/suspend.h>
#include <asm/hardware.h>
#include <asm/mach-types.h>
@@ -765,9 +766,9 @@ static void sharpsl_apm_get_power_status(struct apm_power_info *info)
info->battery_life = sharpsl_pm.battstat.mainbat_percent;
}
-static struct pm_ops sharpsl_pm_ops = {
+static struct platform_suspend_ops sharpsl_pm_ops = {
.enter = corgi_pxa_pm_enter,
- .valid = pm_valid_only_mem,
+ .valid = suspend_valid_only_mem,
};
static int __init sharpsl_pm_probe(struct platform_device *pdev)
@@ -799,7 +800,7 @@ static int __init sharpsl_pm_probe(struct platform_device *pdev)
apm_get_power_status = sharpsl_apm_get_power_status;
- pm_set_ops(&sharpsl_pm_ops);
+ suspend_set_ops(&sharpsl_pm_ops);
mod_timer(&sharpsl_pm.ac_timer, jiffies + msecs_to_jiffies(250));
@@ -808,7 +809,7 @@ static int __init sharpsl_pm_probe(struct platform_device *pdev)
static int sharpsl_pm_remove(struct platform_device *pdev)
{
- pm_set_ops(NULL);
+ suspend_set_ops(NULL);
device_remove_file(&pdev->dev, &dev_attr_battery_percentage);
device_remove_file(&pdev->dev, &dev_attr_battery_voltage);
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index ddf9184d561..98cb6148291 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -10,10 +10,9 @@
* (at your option) any later version.
*/
-#include <linux/pm.h>
+#include <linux/suspend.h>
#include <linux/sched.h>
#include <linux/proc_fs.h>
-#include <linux/pm.h>
#include <linux/interrupt.h>
#include <linux/sysfs.h>
#include <linux/module.h>
@@ -199,7 +198,7 @@ error:
}
-static struct pm_ops at91_pm_ops ={
+static struct platform_suspend_ops at91_pm_ops ={
.valid = at91_pm_valid_state,
.set_target = at91_pm_set_target,
.enter = at91_pm_enter,
@@ -220,7 +219,7 @@ static int __init at91_pm_init(void)
/* Disable SDRAM low-power mode. Cannot be used with self-refresh. */
at91_sys_write(AT91_SDRAMC_LPR, 0);
- pm_set_ops(&at91_pm_ops);
+ suspend_set_ops(&at91_pm_ops);
return 0;
}
diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
index 089b8208de0..3bf01e28df3 100644
--- a/arch/arm/mach-omap1/pm.c
+++ b/arch/arm/mach-omap1/pm.c
@@ -35,10 +35,9 @@
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/pm.h>
+#include <linux/suspend.h>
#include <linux/sched.h>
#include <linux/proc_fs.h>
-#include <linux/pm.h>
#include <linux/interrupt.h>
#include <linux/sysfs.h>
#include <linux/module.h>
@@ -600,27 +599,15 @@ static void (*saved_idle)(void) = NULL;
/*
* omap_pm_prepare - Do preliminary suspend work.
- * @state: suspend state we're entering.
*
*/
-static int omap_pm_prepare(suspend_state_t state)
+static int omap_pm_prepare(void)
{
- int error = 0;
-
/* We cannot sleep in idle until we have resumed */
saved_idle = pm_idle;
pm_idle = NULL;
- switch (state)
- {
- case PM_SUSPEND_STANDBY:
- case PM_SUSPEND_MEM:
- break;
- default:
- return -EINVAL;
- }
-
- return error;
+ return 0;
}
@@ -648,16 +635,14 @@ static int omap_pm_enter(suspend_state_t state)
/**
* omap_pm_finish - Finish up suspend sequence.
- * @state: State we're coming out of.
*
* This is called after we wake back up (or if entering the sleep state
* failed).
*/
-static int omap_pm_finish(suspend_state_t state)
+static void omap_pm_finish(void)
{
pm_idle = saved_idle;
- return 0;
}
@@ -674,11 +659,11 @@ static struct irqaction omap_wakeup_irq = {
-static struct pm_ops omap_pm_ops ={
+static struct platform_suspend_ops omap_pm_ops ={
.prepare = omap_pm_prepare,
.enter = omap_pm_enter,
.finish = omap_pm_finish,
- .valid = pm_valid_only_mem,
+ .valid = suspend_valid_only_mem,
};
static int __init omap_pm_init(void)
@@ -735,7 +720,7 @@ static int __init omap_pm_init(void)
else if (cpu_is_omap16xx())
omap_writel(OMAP1610_IDLECT3_VAL, OMAP1610_IDLECT3);
- pm_set_ops(&omap_pm_ops);
+ suspend_set_ops(&omap_pm_ops);
#if defined(DEBUG) && defined(CONFIG_PROC_FS)
omap_pm_init_proc();
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index 6f4a5436d0c..baf7d82b458 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -16,10 +16,9 @@
* published by the Free Software Foundation.
*/
-#include <linux/pm.h>
+#include <linux/suspend.h>
#include <linux/sched.h>
#include <linux/proc_fs.h>
-#include <linux/pm.h>
#include <linux/interrupt.h>
#include <linux/sysfs.h>
#include <linux/module.h>
@@ -71,28 +70,12 @@ void omap2_pm_idle(void)
local_irq_enable();
}
-static int omap2_pm_prepare(suspend_state_t state)
+static int omap2_pm_prepare(void)
{
- int error = 0;
-
/* We cannot sleep in idle until we have resumed */
saved_idle = pm_idle;
pm_idle = NULL;
-
- switch (state)
- {
- case PM_SUSPEND_STANDBY:
- case PM_SUSPEND_MEM:
- break;
-
- case PM_SUSPEND_DISK:
- return -ENOTSUPP;
-
- default:
- return -EINVAL;
- }
-
- return error;
+ return 0;
}
#define INT0_WAKE_MASK (OMAP_IRQ_BIT(INT_24XX_GPIO_BANK1) | \
@@ -353,9 +336,6 @@ static int omap2_pm_enter(suspend_state_t state)
case PM_SUSPEND_MEM:
ret = omap2_pm_suspend();
break;
- case PM_SUSPEND_DISK:
- ret = -ENOTSUPP;
- break;
default:
ret = -EINVAL;
}
@@ -363,17 +343,16 @@ static int omap2_pm_enter(suspend_state_t state)
return ret;
}
-static int omap2_pm_finish(suspend_state_t state)
+static void omap2_pm_finish(void)
{
pm_idle = saved_idle;
- return 0;
}
-static struct pm_ops omap_pm_ops = {
+static struct platform_suspend_ops omap_pm_ops = {
.prepare = omap2_pm_prepare,
.enter = omap2_pm_enter,
.finish = omap2_pm_finish,
- .valid = pm_valid_only_mem,
+ .valid = suspend_valid_only_mem,
};
int __init omap2_pm_init(void)
@@ -397,7 +376,7 @@ int __init omap2_pm_init(void)
omap2_sram_suspend = omap_sram_push(omap24xx_cpu_suspend,
omap24xx_cpu_suspend_sz);
- pm_set_ops(&omap_pm_ops);
+ suspend_set_ops(&omap_pm_ops);
pm_idle = omap2_pm_idle;
pmdomain_init();
diff --git a/arch/arm/mach-pnx4008/pm.c b/arch/arm/mach-pnx4008/pm.c
index 2a137f33f75..40116d25434 100644
--- a/arch/arm/mach-pnx4008/pm.c
+++ b/arch/arm/mach-pnx4008/pm.c
@@ -15,7 +15,7 @@
#include <linux/rtc.h>
#include <linux/sched.h>
#include <linux/proc_fs.h>
-#include <linux/pm.h>
+#include <linux/suspend.h>
#include <linux/delay.h>
#include <linux/clk.h>
@@ -117,7 +117,7 @@ static int pnx4008_pm_valid(suspend_state_t state)
(state == PM_SUSPEND_MEM);
}
-static struct pm_ops pnx4008_pm_ops = {
+static struct platform_suspend_ops pnx4008_pm_ops = {
.enter = pnx4008_pm_enter,
.valid = pnx4008_pm_valid,
};
@@ -146,7 +146,7 @@ static int __init pnx4008_pm_init(void)
return -ENOMEM;
}
- pm_set_ops(&pnx4008_pm_ops);
+ suspend_set_ops(&pnx4008_pm_ops);
return 0;
}
diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
index b59a81a8e7d..a941c71c7d0 100644
--- a/arch/arm/mach-pxa/pm.c
+++ b/arch/arm/mach-pxa/pm.c
@@ -86,7 +86,7 @@ static int pxa_pm_valid(suspend_state_t state)
return -EINVAL;
}
-static struct pm_ops pxa_pm_ops = {
+static struct platform_suspend_ops pxa_pm_ops = {
.valid = pxa_pm_valid,
.enter = pxa_pm_enter,
};
@@ -104,7 +104,7 @@ static int __init pxa_pm_init(void)
return -ENOMEM;
}
- pm_set_ops(&pxa_pm_ops);
+ suspend_set_ops(&pxa_pm_ops);
return 0;
}
diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c
index 0d6a72504ca..dcd81f8d083 100644
--- a/arch/arm/mach-pxa/pxa25x.c
+++ b/arch/arm/mach-pxa/pxa25x.c
@@ -20,7 +20,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
-#include <linux/pm.h>
+#include <linux/suspend.h>
#include <asm/hardware.h>
#include <asm/arch/irqs.h>
@@ -215,7 +215,7 @@ static void pxa25x_cpu_pm_enter(suspend_state_t state)
static struct pxa_cpu_pm_fns pxa25x_cpu_pm_fns = {
.save_size = SLEEP_SAVE_SIZE,
- .valid = pm_valid_only_mem,
+ .valid = suspend_valid_only_mem,
.save = pxa25x_cpu_pm_save,
.restore = pxa25x_cpu_pm_restore,
.enter = pxa25x_cpu_pm_enter,
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
index 2d7fc39732e..d0f2b597db1 100644
--- a/arch/arm/mach-pxa/pxa27x.c
+++ b/arch/arm/mach-pxa/pxa27x.c
@@ -14,7 +14,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/pm.h>
+#include <linux/suspend.h>
#include <linux/platform_device.h>
#include <asm/hardware.h>
diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
index 01a37d3c072..246c573e725 100644
--- a/arch/arm/mach-sa1100/pm.c
+++ b/arch/arm/mach-sa1100/pm.c
@@ -122,14 +122,14 @@ unsigned long sleep_phys_sp(void *sp)
return virt_to_phys(sp);
}
-static struct pm_ops sa11x0_pm_ops = {
+static struct platform_suspend_ops sa11x0_pm_ops = {
.enter = sa11x0_pm_enter,
- .valid = pm_valid_only_mem,
+ .valid = suspend_valid_only_mem,
};
static int __init sa11x0_pm_init(void)
{
- pm_set_ops(&sa11x0_pm_ops);
+ suspend_set_ops(&sa11x0_pm_ops);
return 0;
}
diff --git a/arch/arm/nwfpe/fpopcode.h b/arch/arm/nwfpe/fpopcode.h
index ec78e3517fc..0090b19bbe6 100644
--- a/arch/arm/nwfpe/fpopcode.h
+++ b/arch/arm/nwfpe/fpopcode.h
@@ -369,20 +369,20 @@ TABLE 5
#define getRoundingMode(opcode) ((opcode & MASK_ROUNDING_MODE) >> 5)
#ifdef CONFIG_FPE_NWFPE_XP
-static inline __attribute_pure__ floatx80 getExtendedConstant(const unsigned int nIndex)
+static inline floatx80 __pure getExtendedConstant(const unsigned int nIndex)
{
extern const floatx80 floatx80Constant[];
return floatx80Constant[nIndex];
}
#endif
-static inline __attribute_pure__ float64 getDoubleConstant(const unsigned int nIndex)
+static inline float64 __pure getDoubleConstant(const unsigned int nIndex)
{
extern const float64 float64Constant[];
return float64Constant[nIndex];
}
-static inline __attribute_pure__ float32 getSingleConstant(const unsigned int nIndex)
+static inline float32 __pure getSingleConstant(const unsigned int nIndex)
{
extern const float32 float32Constant[];
return float32Constant[nIndex];
diff --git a/arch/arm/plat-s3c24xx/pm.c b/arch/arm/plat-s3c24xx/pm.c
index eab1850616d..4fdb3117744 100644
--- a/arch/arm/plat-s3c24xx/pm.c
+++ b/arch/arm/plat-s3c24xx/pm.c
@@ -612,9 +612,9 @@ static int s3c2410_pm_enter(suspend_state_t state)
return 0;
}
-static struct pm_ops s3c2410_pm_ops = {
+static struct platform_suspend_ops s3c2410_pm_ops = {
.enter = s3c2410_pm_enter,
- .valid = pm_valid_only_mem,
+ .valid = suspend_valid_only_mem,
};
/* s3c2410_pm_init
@@ -628,6 +628,6 @@ int __init s3c2410_pm_init(void)
{
printk("S3C2410 Power Management, (c) 2004 Simtec Electronics\n");
- pm_set_ops(&s3c2410_pm_ops);
+ suspend_set_ops(&s3c2410_pm_ops);
return 0;
}
diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
index b1030272220..dac51fb06f2 100644
--- a/arch/blackfin/mach-common/pm.c
+++ b/arch/blackfin/mach-common/pm.c
@@ -32,7 +32,7 @@
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include <linux/pm.h>
+#include <linux/suspend.h>
#include <linux/sched.h>
#include <linux/proc_fs.h>
#include <linux/io.h>
@@ -89,28 +89,15 @@ void bfin_pm_suspend_standby_enter(void)
#endif /* CONFIG_PM_WAKEUP_GPIO_BY_SIC_IWR */
}
-
/*
- * bfin_pm_prepare - Do preliminary suspend work.
- * @state: suspend state we're entering.
+ * bfin_pm_valid - Tell the PM core that we only support the standby sleep
+ * state
+ * @state: suspend state we're checking.
*
*/
-static int bfin_pm_prepare(suspend_state_t state)
+static int bfin_pm_valid(suspend_state_t state)
{
- int error = 0;
-
- switch (state) {
- case PM_SUSPEND_STANDBY:
- break;
-
- case PM_SUSPEND_MEM:
- return -ENOTSUPP;
-
- default:
- return -EINVAL;
- }
-
- return error;
+ return (state == PM_SUSPEND_STANDBY);
}
/*
@@ -135,44 +122,14 @@ static int bfin_pm_enter(suspend_state_t state)
return 0;
}
-/*
- * bfin_pm_finish - Finish up suspend sequence.
- * @state: State we're coming out of.
- *
- * This is called after we wake back up (or if entering the sleep state
- * failed).
- */
-static int bfin_pm_finish(suspend_state_t state)
-{
- switch (state) {
- case PM_SUSPEND_STANDBY:
- break;
-
- case PM_SUSPEND_MEM:
- return -ENOTSUPP;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int bfin_pm_valid(suspend_state_t state)
-{
- return (state == PM_SUSPEND_STANDBY);
-}
-
-struct pm_ops bfin_pm_ops = {
- .prepare = bfin_pm_prepare,
+struct platform_suspend_ops bfin_pm_ops = {
.enter = bfin_pm_enter,
- .finish = bfin_pm_finish,
.valid = bfin_pm_valid,
};
static int __init bfin_pm_init(void)
{
- pm_set_ops(&bfin_pm_ops);
+ suspend_set_ops(&bfin_pm_ops);
return 0;
}
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 98cfc90cab1..2bb84214e5f 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -371,6 +371,11 @@ ia64_setup_printk_clock(void)
ia64_printk_clock = ia64_itc_printk_clock;
}
+/* IA64 doesn't cache the timezone */
+void update_vsyscall_tz(void)
+{
+}
+
void update_vsyscall(struct timespec *wall, struct clocksource *c)
{
unsigned long flags;
diff --git a/arch/ia64/sn/kernel/xpnet.c b/arch/ia64/sn/kernel/xpnet.c
index e58fcadff2e..a5df672d839 100644
--- a/arch/ia64/sn/kernel/xpnet.c
+++ b/arch/ia64/sn/kernel/xpnet.c
@@ -269,8 +269,9 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
skb->protocol = eth_type_trans(skb, xpnet_device);
skb->ip_summed = CHECKSUM_UNNECESSARY;
- dev_dbg(xpnet, "passing skb to network layer; \n\tskb->head=0x%p "
- "skb->data=0x%p skb->tail=0x%p skb->end=0x%p skb->len=%d\n",
+ dev_dbg(xpnet, "passing skb to network layer\n"
+ KERN_DEBUG "\tskb->head=0x%p skb->data=0x%p skb->tail=0x%p "
+ "skb->end=0x%p skb->len=%d\n",
(void *)skb->head, (void *)skb->data, skb_tail_pointer(skb),
skb_end_pointer(skb), skb->len);
@@ -576,10 +577,10 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb);
msg->buf_pa = __pa(start_addr);
- dev_dbg(xpnet, "sending XPC message to %d:%d\nmsg->buf_pa="
- "0x%lx, msg->size=%u, msg->leadin_ignore=%u, "
- "msg->tailout_ignore=%u\n", dest_partid,
- XPC_NET_CHANNEL, msg->buf_pa, msg->size,
+ dev_dbg(xpnet, "sending XPC message to %d:%d\n"
+ KERN_DEBUG "msg->buf_pa=0x%lx, msg->size=%u, "
+ "msg->leadin_ignore=%u, msg->tailout_ignore=%u\n",
+ dest_partid, XPC_NET_CHANNEL, msg->buf_pa, msg->size,
msg->leadin_ignore, msg->tailout_ignore);
diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig
index 95b823b60c9..8e5988c4a16 100644
--- a/arch/powerpc/configs/pmac32_defconfig
+++ b/arch/powerpc/configs/pmac32_defconfig
@@ -209,7 +209,6 @@ CONFIG_PM=y
# CONFIG_PM_LEGACY is not set
CONFIG_PM_DEBUG=y
# CONFIG_PM_VERBOSE is not set
-# CONFIG_DISABLE_CONSOLE_SUSPEND is not set
CONFIG_PM_SLEEP=y
CONFIG_SUSPEND=y
CONFIG_HIBERNATION=y
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 0ae5d57b936..2c8e756d19a 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -141,6 +141,7 @@ int main(void)
DEFINE(PACALPPACAPTR, offsetof(struct paca_struct, lppaca_ptr));
DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr));
+ DEFINE(PACA_STARTSPURR, offsetof(struct paca_struct, startspurr));
DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
DEFINE(PACA_SLBSHADOWPTR, offsetof(struct paca_struct, slb_shadow_ptr));
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 863a5d6d9b1..9eb3284deac 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -212,23 +212,44 @@ static u64 read_purr(void)
}
/*
+ * Read the SPURR on systems that have it, otherwise the purr
+ */
+static u64 read_spurr(u64 purr)
+{
+ if (cpu_has_feature(CPU_FTR_SPURR))
+ return mfspr(SPRN_SPURR);
+ return purr;
+}
+
+/*
* Account time for a transition between system, hard irq
* or soft irq state.
*/
void account_system_vtime(struct task_struct *tsk)
{
- u64 now, delta;
+ u64 now, nowscaled, delta, deltascaled;
unsigned long flags;
local_irq_save(flags);
now = read_purr();
delta = now - get_paca()->startpurr;
get_paca()->startpurr = now;
+ nowscaled = read_spurr(now);
+ deltascaled = nowscaled - get_paca()->startspurr;
+ get_paca()->startspurr = nowscaled;
if (!in_interrupt()) {
+ /* deltascaled includes both user and system time.
+ * Hence scale it based on the purr ratio to estimate
+ * the system time */
+ deltascaled = deltascaled * get_paca()->system_time /
+ (get_paca()->system_time + get_paca()->user_time);
delta += get_paca()->system_time;
get_paca()->system_time = 0;
}
account_system_time(tsk, 0, delta);
+ get_paca()->purrdelta = delta;
+ account_system_time_scaled(tsk, deltascaled);
+ get_paca()->spurrdelta = deltascaled;
local_irq_restore(flags);
}
@@ -240,11 +261,17 @@ void account_system_vtime(struct task_struct *tsk)
*/
void account_process_vtime(struct task_struct *tsk)
{
- cputime_t utime;
+ cputime_t utime, utimescaled;
utime = get_paca()->user_time;
get_paca()->user_time = 0;
account_user_time(tsk, utime);
+
+ /* Estimate the scaled utime by scaling the real utime based
+ * on the last spurr to purr ratio */
+ utimescaled = utime * get_paca()->spurrdelta / get_paca()->purrdelta;
+ get_paca()->spurrdelta = get_paca()->purrdelta = 0;
+ account_user_time_scaled(tsk, utimescaled);
}
static void account_process_time(struct pt_regs *regs)
@@ -266,6 +293,7 @@ struct cpu_purr_data {
int initialized; /* thread is running */
u64 tb; /* last TB value read */
u64 purr; /* last PURR value read */
+ u64 spurr; /* last SPURR value read */
};
/*
diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c
index f26afcd4175..ffa14aff524 100644
--- a/arch/powerpc/platforms/52xx/lite5200_pm.c
+++ b/arch/powerpc/platforms/52xx/lite5200_pm.c
@@ -1,5 +1,5 @@
#include <linux/init.h>
-#include <linux/pm.h>
+#include <linux/suspend.h>
#include <asm/io.h>
#include <asm/time.h>
#include <asm/mpc52xx.h>
@@ -18,6 +18,8 @@ static void __iomem *sram;
static const int sram_size = 0x4000; /* 16 kBytes */
static void __iomem *mbar;
+static suspend_state_t lite5200_pm_target_state;
+
static int lite5200_pm_valid(suspend_state_t state)
{
switch (state) {
@@ -29,13 +31,22 @@ static int lite5200_pm_valid(suspend_state_t state)
}
}
-static int lite5200_pm_prepare(suspend_state_t state)
+static int lite5200_pm_set_target(suspend_state_t state)
+{
+ if (lite5200_pm_valid(state)) {
+ lite5200_pm_target_state = state;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int lite5200_pm_prepare(void)
{
/* deep sleep? let mpc52xx code handle that */
- if (state == PM_SUSPEND_STANDBY)
- return mpc52xx_pm_prepare(state);
+ if (lite5200_pm_target_state == PM_SUSPEND_STANDBY)
+ return mpc52xx_pm_prepare();
- if (state != PM_SUSPEND_MEM)
+ if (lite5200_pm_target_state != PM_SUSPEND_MEM)
return -EINVAL;
/* map registers */
@@ -190,17 +201,16 @@ static int lite5200_pm_enter(suspend_state_t state)
return 0;
}
-static int lite5200_pm_finish(suspend_state_t state)
+static void lite5200_pm_finish(void)
{
/* deep sleep? let mpc52xx code handle that */
- if (state == PM_SUSPEND_STANDBY) {
- return mpc52xx_pm_finish(state);
- }
- return 0;
+ if (lite5200_pm_target_state == PM_SUSPEND_STANDBY)
+ mpc52xx_pm_finish();
}
-static struct pm_ops lite5200_pm_ops = {
+static struct platform_suspend_ops lite5200_pm_ops = {
.valid = lite5200_pm_valid,
+ .set_target = lite5200_pm_set_target,
.prepare = lite5200_pm_prepare,
.enter = lite5200_pm_enter,
.finish = lite5200_pm_finish,
@@ -208,6 +218,6 @@ static struct pm_ops lite5200_pm_ops = {
int __init lite5200_pm_init(void)
{
- pm_set_ops(&lite5200_pm_ops);
+ suspend_set_ops(&lite5200_pm_ops);
return 0;
}
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
index ee2e7639c63..7ffa7babf25 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
@@ -1,5 +1,5 @@
#include <linux/init.h>
-#include <linux/pm.h>
+#include <linux/suspend.h>
#include <linux/io.h>
#include <asm/time.h>
#include <asm/cacheflush.h>
@@ -57,11 +57,8 @@ int mpc52xx_set_wakeup_gpio(u8 pin, u8 level)
return 0;
}
-int mpc52xx_pm_prepare(suspend_state_t state)
+int mpc52xx_pm_prepare(void)
{
- if (state != PM_SUSPEND_STANDBY)
- return -EINVAL;
-
/* map the whole register space */
mbar = mpc52xx_find_and_map("mpc5200");
if (!mbar) {
@@ -166,18 +163,16 @@ int mpc52xx_pm_enter(suspend_state_t state)
return 0;
}
-int mpc52xx_pm_finish(suspend_state_t state)
+void mpc52xx_pm_finish(void)
{
/* call board resume code */
if (mpc52xx_suspend.board_resume_finish)
mpc52xx_suspend.board_resume_finish(mbar);
iounmap(mbar);
-
- return 0;
}
-static struct pm_ops mpc52xx_pm_ops = {
+static struct platform_suspend_ops mpc52xx_pm_ops = {
.valid = mpc52xx_pm_valid,
.prepare = mpc52xx_pm_prepare,
.enter = mpc52xx_pm_enter,
@@ -186,6 +181,6 @@ static struct pm_ops mpc52xx_pm_ops = {
int __init mpc52xx_pm_init(void)
{
- pm_set_ops(&mpc52xx_pm_ops);
+ suspend_set_ops(&mpc52xx_pm_ops);
return 0;
}
diff --git a/arch/sh/boards/hp6xx/pm.c b/arch/sh/boards/hp6xx/pm.c
index 8143d1b948e..d22f6eac9cc 100644
--- a/arch/sh/boards/hp6xx/pm.c
+++ b/arch/sh/boards/hp6xx/pm.c
@@ -67,14 +67,14 @@ static int hp6x0_pm_enter(suspend_state_t state)
return 0;
}
-static struct pm_ops hp6x0_pm_ops = {
+static struct platform_suspend_ops hp6x0_pm_ops = {
.enter = hp6x0_pm_enter,
- .valid = pm_valid_only_mem,
+ .valid = suspend_valid_only_mem,
};
static int __init hp6x0_pm_init(void)
{
- pm_set_ops(&hp6x0_pm_ops);
+ suspend_set_ops(&hp6x0_pm_ops);
return 0;
}
diff --git a/arch/x86/ia32/ia32_binfmt.c b/arch/x86/ia32/ia32_binfmt.c
index 118b9f9ff49..5027650eb27 100644
--- a/arch/x86/ia32/ia32_binfmt.c
+++ b/arch/x86/ia32/ia32_binfmt.c
@@ -289,7 +289,6 @@ static void elf32_init(struct pt_regs *regs)
static ctl_table abi_table2[] = {
{
- .ctl_name = 99,
.procname = "vsyscall32",
.data = &sysctl_vsyscall32,
.maxlen = sizeof(int),
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
index f22ba8534d2..a97313b1270 100644
--- a/arch/x86/kernel/acpi/wakeup_32.S
+++ b/arch/x86/kernel/acpi/wakeup_32.S
@@ -11,7 +11,7 @@
#
# If physical address of wakeup_code is 0x12345, BIOS should call us with
# cs = 0x1234, eip = 0x05
-#
+#
#define BEEP \
inb $97, %al; \
@@ -52,7 +52,6 @@ wakeup_code:
BEEP
1:
mov $(wakeup_stack - wakeup_code), %sp # Private stack is needed for ASUS board
- movw $0x0e00 + 'S', %fs:(0x12)
pushl $0 # Kill any dangerous flags
popfl
@@ -90,9 +89,6 @@ wakeup_code:
# make sure %cr4 is set correctly (features, etc)
movl real_save_cr4 - wakeup_code, %eax
movl %eax, %cr4
- movw $0xb800, %ax
- movw %ax,%fs
- movw $0x0e00 + 'i', %fs:(0x12)
# need a gdt -- use lgdtl to force 32-bit operands, in case
# the GDT is located past 16 megabytes.
@@ -102,8 +98,6 @@ wakeup_code:
movl %eax, %cr0
jmp 1f
1:
- movw $0x0e00 + 'n', %fs:(0x14)
-
movl real_magic - wakeup_code, %eax
cmpl $0x12345678, %eax
jne bogus_real_magic
@@ -122,13 +116,11 @@ real_save_cr4: .long 0
real_magic: .long 0
video_mode: .long 0
realmode_flags: .long 0
-beep_flags: .long 0
real_efer_save_restore: .long 0
real_save_efer_edx: .long 0
real_save_efer_eax: .long 0
bogus_real_magic:
- movw $0x0e00 + 'B', %fs:(0x12)
jmp bogus_real_magic
/* This code uses an extended set of video mode numbers. These include:
@@ -194,7 +186,6 @@ wakeup_pmode_return:
movw %ax, %es
movw %ax, %fs
movw %ax, %gs
- movw $0x0e00 + 'u', 0xb8016
# reload the gdt, as we need the full 32 bit address
lgdt saved_gdt
@@ -218,7 +209,6 @@ wakeup_pmode_return:
jmp *%eax
bogus_magic:
- movw $0x0e00 + 'B', 0xb8018
jmp bogus_magic
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index 8b4357e1efe..55608ec2ed7 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -41,7 +41,6 @@ wakeup_code:
# Running in *copy* of this code, somewhere in low 1MB.
- movb $0xa1, %al ; outb %al, $0x80
cli
cld
# setup data segment
@@ -65,11 +64,6 @@ wakeup_code:
cmpl $0x12345678, %eax
jne bogus_real_magic
- call verify_cpu # Verify the cpu supports long
- # mode
- testl %eax, %eax
- jnz no_longmode
-
testl $1, realmode_flags - wakeup_code
jz 1f
lcall $0xc000,$3
@@ -84,12 +78,6 @@ wakeup_code:
call mode_set
1:
- movw $0xb800, %ax
- movw %ax,%fs
- movw $0x0e00 + 'L', %fs:(0x10)
-
- movb $0xa2, %al ; outb %al, $0x80
-
mov %ds, %ax # Find 32bit wakeup_code addr
movzx %ax, %esi # (Convert %ds:gdt to a liner ptr)
shll $4, %esi
@@ -117,14 +105,10 @@ wakeup_32_vector:
.code32
wakeup_32:
# Running in this code, but at low address; paging is not yet turned on.
- movb $0xa5, %al ; outb %al, $0x80
movl $__KERNEL_DS, %eax
movl %eax, %ds
- movw $0x0e00 + 'i', %ds:(0xb8012)
- movb $0xa8, %al ; outb %al, $0x80;
-
/*
* Prepare for entering 64bits mode
*/
@@ -200,16 +184,11 @@ wakeup_long64:
*/
lgdt cpu_gdt_descr
- movw $0x0e00 + 'n', %ds:(0xb8014)
- movb $0xa9, %al ; outb %al, $0x80
-
movq saved_magic, %rax
movq $0x123456789abcdef0, %rdx
cmpq %rdx, %rax
jne bogus_64_magic
- movw $0x0e00 + 'u', %ds:(0xb8016)
-
nop
nop
movw $__KERNEL_DS, %ax
@@ -220,13 +199,11 @@ wakeup_long64:
movw %ax, %gs
movq saved_rsp, %rsp
- movw $0x0e00 + 'x', %ds:(0xb8018)
movq saved_rbx, %rbx
movq saved_rdi, %rdi
movq saved_rsi, %rsi
movq saved_rbp, %rbp
- movw $0x0e00 + '!', %ds:(0xb801a)
movq saved_rip, %rax
jmp *%rax
@@ -256,21 +233,12 @@ realmode_flags: .quad 0
.code16
bogus_real_magic:
- movb $0xba,%al ; outb %al,$0x80
jmp bogus_real_magic
.code64
bogus_64_magic:
- movb $0xb3,%al ; outb %al,$0x80
jmp bogus_64_magic
-.code16
-no_longmode:
- movb $0xbc,%al ; outb %al,$0x80
- jmp no_longmode
-
-#include "../verify_cpu_64.S"
-
/* This code uses an extended set of video mode numbers. These include:
* Aliases for standard modes
* NORMAL_VGA (-1)
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 1826395ebee..297a2411694 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -499,6 +499,11 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {
static void free_cache_attributes(unsigned int cpu)
{
+ int i;
+
+ for (i = 0; i < num_cache_leaves; i++)
+ cache_remove_shared_cpu_map(cpu, i);
+
kfree(cpuid4_info[cpu]);
cpuid4_info[cpu] = NULL;
}
@@ -506,8 +511,8 @@ static void free_cache_attributes(unsigned int cpu)
static int __cpuinit detect_cache_attributes(unsigned int cpu)
{
struct _cpuid4_info *this_leaf;
- unsigned long j;
- int retval;
+ unsigned long j;
+ int retval;
cpumask_t oldmask;
if (num_cache_leaves == 0)
@@ -524,19 +529,26 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
goto out;
/* Do cpuid and store the results */
- retval = 0;
for (j = 0; j < num_cache_leaves; j++) {
this_leaf = CPUID4_INFO_IDX(cpu, j);
retval = cpuid4_cache_lookup(j, this_leaf);
- if (unlikely(retval < 0))
+ if (unlikely(retval < 0)) {
+ int i;
+
+ for (i = 0; i < j; i++)
+ cache_remove_shared_cpu_map(cpu, i);
break;
+ }
cache_shared_cpu_map_setup(cpu, j);
}
set_cpus_allowed(current, oldmask);
out:
- if (retval)
- free_cache_attributes(cpu);
+ if (retval) {
+ kfree(cpuid4_info[cpu]);
+ cpuid4_info[cpu] = NULL;
+ }
+
return retval;
}
@@ -669,7 +681,7 @@ static struct kobj_type ktype_percpu_entry = {
.sysfs_ops = &sysfs_ops,
};
-static void cpuid4_cache_sysfs_exit(unsigned int cpu)
+static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
{
kfree(cache_kobject[cpu]);
kfree(index_kobject[cpu]);
@@ -680,13 +692,14 @@ static void cpuid4_cache_sysfs_exit(unsigned int cpu)
static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
{
+ int err;
if (num_cache_leaves == 0)
return -ENOENT;
- detect_cache_attributes(cpu);
- if (cpuid4_info[cpu] == NULL)
- return -ENOENT;
+ err = detect_cache_attributes(cpu);
+ if (err)
+ return err;
/* Allocate all required memory */
cache_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
@@ -705,13 +718,15 @@ err_out:
return -ENOMEM;
}
+static cpumask_t cache_dev_map = CPU_MASK_NONE;
+
/* Add/Remove cache interface for CPU device */
static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
{
unsigned int cpu = sys_dev->id;
unsigned long i, j;
struct _index_kobject *this_object;
- int retval = 0;
+ int retval;
retval = cpuid4_cache_sysfs_init(cpu);
if (unlikely(retval < 0))
@@ -721,6 +736,10 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
kobject_set_name(cache_kobject[cpu], "%s", "cache");
cache_kobject[cpu]->ktype = &ktype_percpu_entry;
retval = kobject_register(cache_kobject[cpu]);
+ if (retval < 0) {
+ cpuid4_cache_sysfs_exit(cpu);
+ return retval;
+ }
for (i = 0; i < num_cache_leaves; i++) {
this_object = INDEX_KOBJECT_PTR(cpu,i);
@@ -740,6 +759,9 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
break;
}
}
+ if (!retval)
+ cpu_set(cpu, cache_dev_map);
+
return retval;
}
@@ -750,13 +772,14 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
if (cpuid4_info[cpu] == NULL)
return;
- for (i = 0; i < num_cache_leaves; i++) {
- cache_remove_shared_cpu_map(cpu, i);
+ if (!cpu_isset(cpu, cache_dev_map))
+ return;
+ cpu_clear(cpu, cache_dev_map);
+
+ for (i = 0; i < num_cache_leaves; i++)
kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
- }
kobject_unregister(cache_kobject[cpu]);
cpuid4_cache_sysfs_exit(cpu);
- return;
}
static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
@@ -781,7 +804,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
{
- .notifier_call = cacheinfo_cpu_callback,
+ .notifier_call = cacheinfo_cpu_callback,
};
static int __cpuinit cache_sysfs_init(void)
@@ -791,14 +814,15 @@ static int __cpuinit cache_sysfs_init(void)
if (num_cache_leaves == 0)
return 0;
- register_hotcpu_notifier(&cacheinfo_cpu_notifier);
-
for_each_online_cpu(i) {
- struct sys_device *sys_dev = get_cpu_sysdev((unsigned int)i);
+ int err;
+ struct sys_device *sys_dev = get_cpu_sysdev(i);
- cache_add_dev(sys_dev);
+ err = cache_add_dev(sys_dev);
+ if (err)
+ return err;
}
-
+ register_hotcpu_notifier(&cacheinfo_cpu_notifier);
return 0;
}
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 494d320d909..24885be5c48 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -131,17 +131,19 @@ static __cpuinit int thermal_throttle_cpu_callback(struct notifier_block *nfb,
{
unsigned int cpu = (unsigned long)hcpu;
struct sys_device *sys_dev;
- int err;
+ int err = 0;
sys_dev = get_cpu_sysdev(cpu);
switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
mutex_lock(&therm_cpu_lock);
err = thermal_throttle_add_dev(sys_dev);
mutex_unlock(&therm_cpu_lock);
WARN_ON(err);
break;
+ case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD:
case CPU_DEAD_FROZEN:
mutex_lock(&therm_cpu_lock);
@@ -149,7 +151,7 @@ static __cpuinit int thermal_throttle_cpu_callback(struct notifier_block *nfb,
mutex_unlock(&therm_cpu_lock);
break;
}
- return NOTIFY_OK;
+ return err ? NOTIFY_BAD : NOTIFY_OK;
}
static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
diff --git a/arch/x86/kernel/mce_64.c b/arch/x86/kernel/mce_64.c
index 8ca8f864896..66e6b797b2c 100644
--- a/arch/x86/kernel/mce_64.c
+++ b/arch/x86/kernel/mce_64.c
@@ -802,16 +802,29 @@ static __cpuinit int mce_create_device(unsigned int cpu)
if (!mce_available(&cpu_data[cpu]))
return -EIO;
+ memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject));
per_cpu(device_mce,cpu).id = cpu;
per_cpu(device_mce,cpu).cls = &mce_sysclass;
err = sysdev_register(&per_cpu(device_mce,cpu));
+ if (err)
+ return err;
+
+ for (i = 0; mce_attributes[i]; i++) {
+ err = sysdev_create_file(&per_cpu(device_mce,cpu),
+ mce_attributes[i]);
+ if (err)
+ goto error;
+ }
- if (!err) {
- for (i = 0; mce_attributes[i]; i++)
- sysdev_create_file(&per_cpu(device_mce,cpu),
- mce_attributes[i]);
+ return 0;
+error:
+ while (i--) {
+ sysdev_remove_file(&per_cpu(device_mce,cpu),
+ mce_attributes[i]);
}
+ sysdev_unregister(&per_cpu(device_mce,cpu));
+
return err;
}
@@ -823,7 +836,6 @@ static void mce_remove_device(unsigned int cpu)
sysdev_remove_file(&per_cpu(device_mce,cpu),
mce_attributes[i]);
sysdev_unregister(&per_cpu(device_mce,cpu));
- memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject));
}
/* Get notified when a cpu comes on/off. Be hotplug friendly. */
@@ -831,18 +843,21 @@ static int
mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
+ int err = 0;
switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- mce_create_device(cpu);
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+ err = mce_create_device(cpu);
break;
+ case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD:
case CPU_DEAD_FROZEN:
mce_remove_device(cpu);
break;
}
- return NOTIFY_OK;
+ return err ? NOTIFY_BAD : NOTIFY_OK;
}
static struct notifier_block mce_cpu_notifier = {
@@ -857,9 +872,13 @@ static __init int mce_init_device(void)
if (!mce_available(&boot_cpu_data))
return -EIO;
err = sysdev_class_register(&mce_sysclass);
+ if (err)
+ return err;
for_each_online_cpu(i) {
- mce_create_device(i);
+ err = mce_create_device(i);
+ if (err)
+ return err;
}
register_hotcpu_notifier(&mce_cpu_notifier);
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index df85c9c1360..e18e516cf54 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -133,37 +133,42 @@ static const struct file_operations msr_fops = {
.open = msr_open,
};
-static int __cpuinit msr_device_create(int i)
+static int __cpuinit msr_device_create(int cpu)
{
- int err = 0;
struct device *dev;
- dev = device_create(msr_class, NULL, MKDEV(MSR_MAJOR, i), "msr%d",i);
- if (IS_ERR(dev))
- err = PTR_ERR(dev);
- return err;
+ dev = device_create(msr_class, NULL, MKDEV(MSR_MAJOR, cpu),
+ "msr%d", cpu);
+ return IS_ERR(dev) ? PTR_ERR(dev) : 0;
+}
+
+static void msr_device_destroy(int cpu)
+{
+ device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu));
}
static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
+ int err = 0;
switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- msr_device_create(cpu);
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+ err = msr_device_create(cpu);
break;
+ case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD:
case CPU_DEAD_FROZEN:
- device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu));
+ msr_device_destroy(cpu);
break;
}
- return NOTIFY_OK;
+ return err ? NOTIFY_BAD : NOTIFY_OK;
}
-static struct notifier_block __cpuinitdata msr_class_cpu_notifier =
-{
+static struct notifier_block __cpuinitdata msr_class_cpu_notifier = {
.notifier_call = msr_class_cpu_callback,
};
@@ -196,7 +201,7 @@ static int __init msr_init(void)
out_class:
i = 0;
for_each_online_cpu(i)
- device_destroy(msr_class, MKDEV(MSR_MAJOR, i));
+ msr_device_destroy(i);
class_destroy(msr_class);
out_chrdev:
unregister_chrdev(MSR_MAJOR, "cpu/msr");
@@ -208,7 +213,7 @@ static void __exit msr_exit(void)
{
int cpu = 0;
for_each_online_cpu(cpu)
- device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu));
+ msr_device_destroy(cpu);
class_destroy(msr_class);
unregister_chrdev(MSR_MAJOR, "cpu/msr");
unregister_hotcpu_notifier(&msr_class_cpu_notifier);
diff --git a/arch/x86/kernel/suspend_64.c b/arch/x86/kernel/suspend_64.c
index 573c0a6e0ac..f8fafe527ff 100644
--- a/arch/x86/kernel/suspend_64.c
+++ b/arch/x86/kernel/suspend_64.c
@@ -150,8 +150,22 @@ void fix_processor_context(void)
/* Defined in arch/x86_64/kernel/suspend_asm.S */
extern int restore_image(void);
+/*
+ * Address to jump to in the last phase of restore in order to get to the image
+ * kernel's text (this value is passed in the image header).
+ */
+unsigned long restore_jump_address;
+
+/*
+ * Value of the cr3 register from before the hibernation (this value is passed
+ * in the image header).
+ */
+unsigned long restore_cr3;
+
pgd_t *temp_level4_pgt;
+void *relocated_restore_code;
+
static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
{
long i, j;
@@ -175,7 +189,7 @@ static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long en
if (paddr >= end)
break;
- pe = _PAGE_NX | _PAGE_PSE | _KERNPG_TABLE | paddr;
+ pe = __PAGE_KERNEL_LARGE_EXEC | paddr;
pe &= __supported_pte_mask;
set_pmd(pmd, __pmd(pe));
}
@@ -183,25 +197,42 @@ static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long en
return 0;
}
+static int res_kernel_text_pud_init(pud_t *pud, unsigned long start)
+{
+ pmd_t *pmd;
+ unsigned long paddr;
+
+ pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
+ if (!pmd)
+ return -ENOMEM;
+ set_pud(pud + pud_index(start), __pud(__pa(pmd) | _KERNPG_TABLE));
+ for (paddr = 0; paddr < KERNEL_TEXT_SIZE; pmd++, paddr += PMD_SIZE) {
+ unsigned long pe;
+
+ pe = __PAGE_KERNEL_LARGE_EXEC | _PAGE_GLOBAL | paddr;
+ pe &= __supported_pte_mask;
+ set_pmd(pmd, __pmd(pe));
+ }
+
+ return 0;
+}
+
static int set_up_temporary_mappings(void)
{
unsigned long start, end, next;
+ pud_t *pud;
int error;
temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC);
if (!temp_level4_pgt)
return -ENOMEM;
- /* It is safe to reuse the original kernel mapping */
- set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
- init_level4_pgt[pgd_index(__START_KERNEL_map)]);
-
/* Set up the direct mapping from scratch */
start = (unsigned long)pfn_to_kaddr(0);
end = (unsigned long)pfn_to_kaddr(end_pfn);
for (; start < end; start = next) {
- pud_t *pud = (pud_t *)get_safe_page(GFP_ATOMIC);
+ pud = (pud_t *)get_safe_page(GFP_ATOMIC);
if (!pud)
return -ENOMEM;
next = start + PGDIR_SIZE;
@@ -212,7 +243,17 @@ static int set_up_temporary_mappings(void)
set_pgd(temp_level4_pgt + pgd_index(start),
mk_kernel_pgd(__pa(pud)));
}
- return 0;
+
+ /* Set up the kernel text mapping from scratch */
+ pud = (pud_t *)get_safe_page(GFP_ATOMIC);
+ if (!pud)
+ return -ENOMEM;
+ error = res_kernel_text_pud_init(pud, __START_KERNEL_map);
+ if (!error)
+ set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
+ __pgd(__pa(pud) | _PAGE_TABLE));
+
+ return error;
}
int swsusp_arch_resume(void)
@@ -222,6 +263,13 @@ int swsusp_arch_resume(void)
/* We have got enough memory and from now on we cannot recover */
if ((error = set_up_temporary_mappings()))
return error;
+
+ relocated_restore_code = (void *)get_safe_page(GFP_ATOMIC);
+ if (!relocated_restore_code)
+ return -ENOMEM;
+ memcpy(relocated_restore_code, &core_restore_code,
+ &restore_registers - &core_restore_code);
+
restore_image();
return 0;
}
@@ -236,4 +284,43 @@ int pfn_is_nosave(unsigned long pfn)
unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
}
+
+struct restore_data_record {
+ unsigned long jump_address;
+ unsigned long cr3;
+ unsigned long magic;
+};
+
+#define RESTORE_MAGIC 0x0123456789ABCDEFUL
+
+/**
+ * arch_hibernation_header_save - populate the architecture specific part
+ * of a hibernation image header
+ * @addr: address to save the data at
+ */
+int arch_hibernation_header_save(void *addr, unsigned int max_size)
+{
+ struct restore_data_record *rdr = addr;
+
+ if (max_size < sizeof(struct restore_data_record))
+ return -EOVERFLOW;
+ rdr->jump_address = restore_jump_address;
+ rdr->cr3 = restore_cr3;
+ rdr->magic = RESTORE_MAGIC;
+ return 0;
+}
+
+/**
+ * arch_hibernation_header_restore - read the architecture specific data
+ * from the hibernation image header
+ * @addr: address to read the data from
+ */
+int arch_hibernation_header_restore(void *addr)
+{
+ struct restore_data_record *rdr = addr;
+
+ restore_jump_address = rdr->jump_address;
+ restore_cr3 = rdr->cr3;
+ return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL;
+}
#endif /* CONFIG_HIBERNATION */
diff --git a/arch/x86/kernel/suspend_asm_64.S b/arch/x86/kernel/suspend_asm_64.S
index 16d183f67bc..48344b666d2 100644
--- a/arch/x86/kernel/suspend_asm_64.S
+++ b/arch/x86/kernel/suspend_asm_64.S
@@ -2,8 +2,8 @@
*
* Distribute under GPLv2.
*
- * swsusp_arch_resume may not use any stack, nor any variable that is
- * not "NoSave" during copying pages:
+ * swsusp_arch_resume must not use any stack or any nonlocal variables while
+ * copying pages:
*
* Its rewriting one kernel image with another. What is stack in "old"
* image could very well be data page in "new" image, and overwriting
@@ -36,6 +36,13 @@ ENTRY(swsusp_arch_suspend)
movq %r15, saved_context_r15(%rip)
pushfq ; popq saved_context_eflags(%rip)
+ /* save the address of restore_registers */
+ movq $restore_registers, %rax
+ movq %rax, restore_jump_address(%rip)
+ /* save cr3 */
+ movq %cr3, %rax
+ movq %rax, restore_cr3(%rip)
+
call swsusp_save
ret
@@ -54,7 +61,17 @@ ENTRY(restore_image)
movq %rcx, %cr3;
movq %rax, %cr4; # turn PGE back on
+ /* prepare to jump to the image kernel */
+ movq restore_jump_address(%rip), %rax
+ movq restore_cr3(%rip), %rbx
+
+ /* prepare to copy image data to their original locations */
movq restore_pblist(%rip), %rdx
+ movq relocated_restore_code(%rip), %rcx
+ jmpq *%rcx
+
+ /* code below has been relocated to a safe page */
+ENTRY(core_restore_code)
loop:
testq %rdx, %rdx
jz done
@@ -62,7 +79,7 @@ loop:
/* get addresses from the pbe and copy the page */
movq pbe_address(%rdx), %rsi
movq pbe_orig_address(%rdx), %rdi
- movq $512, %rcx
+ movq $(PAGE_SIZE >> 3), %rcx
rep
movsq
@@ -70,10 +87,22 @@ loop:
movq pbe_next(%rdx), %rdx
jmp loop
done:
+ /* jump to the restore_registers address from the image header */
+ jmpq *%rax
+ /*
+ * NOTE: This assumes that the boot kernel's text mapping covers the
+ * image kernel's page containing restore_registers and the address of
+ * this page is the same as in the image kernel's text mapping (it
+ * should always be true, because the text mapping is linear, starting
+ * from 0, and is supposed to cover the entire kernel text for every
+ * kernel).
+ *
+ * code below belongs to the image kernel
+ */
+
+ENTRY(restore_registers)
/* go back to the original page tables */
- movq $(init_level4_pgt - __START_KERNEL_map), %rax
- addq phys_base(%rip), %rax
- movq %rax, %cr3
+ movq %rbx, %cr3
/* Flush TLB, including "global" things (vmalloc) */
movq mmu_cr4_features(%rip), %rax
@@ -84,12 +113,9 @@ done:
movq %rcx, %cr3
movq %rax, %cr4; # turn PGE back on
- movl $24, %eax
- movl %eax, %ds
-
movq saved_context_esp(%rip), %rsp
movq saved_context_ebp(%rip), %rbp
- /* Don't restore %rax, it must be 0 anyway */
+ /* restore GPRs (we don't restore %rax, it must be 0 anyway) */
movq saved_context_ebx(%rip), %rbx
movq saved_context_ecx(%rip), %rcx
movq saved_context_edx(%rip), %rdx
@@ -107,4 +133,7 @@ done:
xorq %rax, %rax
+ /* tell the hibernation core that we've just restored the memory */
+ movq %rax, in_suspend(%rip)
+
ret
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 8a67e282cb5..585541ca1a7 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -64,6 +64,16 @@ struct vsyscall_gtod_data __vsyscall_gtod_data __section_vsyscall_gtod_data =
.sysctl_enabled = 1,
};
+void update_vsyscall_tz(void)
+{
+ unsigned long flags;
+
+ write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
+ /* sys_tz has changed */
+ vsyscall_gtod_data.sys_tz = sys_tz;
+ write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
+}
+
void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
{
unsigned long flags;
@@ -77,7 +87,6 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
vsyscall_gtod_data.clock.shift = clock->shift;
vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
- vsyscall_gtod_data.sys_tz = sys_tz;
vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic;
write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
}
@@ -163,7 +172,7 @@ time_t __vsyscall(1) vtime(time_t *t)
if (unlikely(!__vsyscall_gtod_data.sysctl_enabled))
return time_syscall(t);
- vgettimeofday(&tv, 0);
+ vgettimeofday(&tv, NULL);
result = tv.tv_sec;
if (t)
*t = result;
@@ -257,18 +266,10 @@ out:
return ret;
}
-static int vsyscall_sysctl_nostrat(ctl_table *t, int __user *name, int nlen,
- void __user *oldval, size_t __user *oldlenp,
- void __user *newval, size_t newlen)
-{
- return -ENOSYS;
-}
-
static ctl_table kernel_table2[] = {
- { .ctl_name = 99, .procname = "vsyscall64",
+ { .procname = "vsyscall64",
.data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int),
.mode = 0644,
- .strategy = vsyscall_sysctl_nostrat,
.proc_handler = vsyscall_sysctl_change },
{}
};
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index 43fafe9e9c0..78cb68f2ebb 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -716,6 +716,11 @@ menu "Power management options"
source kernel/power/Kconfig
+config ARCH_HIBERNATION_HEADER
+ bool
+ depends on HIBERNATION
+ default y
+
source "drivers/acpi/Kconfig"
source "arch/x86/kernel/cpufreq/Kconfig"