diff options
author | Becky Bruce <bgill@freescale.com> | 2006-04-18 14:29:34 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-04-21 22:33:22 +1000 |
commit | ea1e847cc202e805769c3c46ba5e5c53714068a1 (patch) | |
tree | 4f35175c166f42a5b839af5dfe7e39155666b1fc /arch/ppc | |
parent | 23b2527d5eae89841eb66b46e80ec91980493dda (diff) |
[PATCH] ppc: Fix powersave code on arch/ppc
Fix asm_offsets.c and entry.S to work with the new power save code.
Changes in arch/powerpc needed to exist in arch/ppc as well since the
idle code is shared by both ppc and powerpc..
Signed-off-by: Becky Bruce <becky.bruce@freescale.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/ppc')
-rw-r--r-- | arch/ppc/kernel/asm-offsets.c | 1 | ||||
-rw-r--r-- | arch/ppc/kernel/entry.S | 33 |
2 files changed, 17 insertions, 17 deletions
diff --git a/arch/ppc/kernel/asm-offsets.c b/arch/ppc/kernel/asm-offsets.c index 77e4dc780f8..cc7c4aea939 100644 --- a/arch/ppc/kernel/asm-offsets.c +++ b/arch/ppc/kernel/asm-offsets.c @@ -134,6 +134,7 @@ main(void) DEFINE(TI_TASK, offsetof(struct thread_info, task)); DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain)); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); + DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); diff --git a/arch/ppc/kernel/entry.S b/arch/ppc/kernel/entry.S index 5891ecbdc70..1adc9145516 100644 --- a/arch/ppc/kernel/entry.S +++ b/arch/ppc/kernel/entry.S @@ -128,29 +128,26 @@ transfer_to_handler: stw r12,4(r11) #endif b 3f + 2: /* if from kernel, check interrupted DOZE/NAP mode and * check for stack overflow */ + lwz r9,THREAD_INFO-THREAD(r12) + cmplw r1,r9 /* if r1 <= current->thread_info */ + ble- stack_ovf /* then the kernel stack overflowed */ +5: #ifdef CONFIG_6xx - mfspr r11,SPRN_HID0 - mtcr r11 -BEGIN_FTR_SECTION - bt- 8,4f /* Check DOZE */ -END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE) -BEGIN_FTR_SECTION - bt- 9,4f /* Check NAP */ -END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) + tophys(r9,r9) /* check local flags */ + lwz r12,TI_LOCAL_FLAGS(r9) + mtcrf 0x01,r12 + bt- 31-TLF_NAPPING,4f #endif /* CONFIG_6xx */ .globl transfer_to_handler_cont transfer_to_handler_cont: - lwz r11,THREAD_INFO-THREAD(r12) - cmplw r1,r11 /* if r1 <= current->thread_info */ - ble- stack_ovf /* then the kernel stack overflowed */ 3: mflr r9 lwz r11,0(r9) /* virtual address of handler */ lwz r9,4(r9) /* where to go when done */ - FIX_SRR1(r10,r12) mtspr SPRN_SRR0,r11 mtspr SPRN_SRR1,r10 mtlr r9 @@ -158,7 +155,9 @@ transfer_to_handler_cont: RFI /* jump to handler, enable MMU */ #ifdef CONFIG_6xx -4: b power_save_6xx_restore +4: rlwinm r12,r12,0,~_TLF_NAPPING + stw r12,TI_LOCAL_FLAGS(r9) + b power_save_6xx_restore #endif /* @@ -167,10 +166,10 @@ transfer_to_handler_cont: */ stack_ovf: /* sometimes we use a statically-allocated stack, which is OK. */ - lis r11,_end@h - ori r11,r11,_end@l - cmplw r1,r11 - ble 3b /* r1 <= &_end is OK */ + lis r12,_end@h + ori r12,r12,_end@l + cmplw r1,r12 + ble 5b /* r1 <= &_end is OK */ SAVE_NVGPRS(r11) addi r3,r1,STACK_FRAME_OVERHEAD lis r1,init_thread_union@ha |