diff options
| author | Paul Mackerras <paulus@samba.org> | 2002-05-10 12:56:24 +1000 |
|---|---|---|
| committer | Paul Mackerras <paulus@samba.org> | 2002-05-10 12:56:24 +1000 |
| commit | 4e416e9812ca9fe32ea061026bd30e9470fa9421 (patch) | |
| tree | f201f87313f53271d9baefb14181c6d2cf8f9f0a /arch/ppc/kernel/entry.S | |
| parent | bb3ffc11438abe20c39265cf83c9e01340c6970b (diff) | |
PPC32: This changeset adds preemptible kernel support for ppc32
and also streamlines the exception entry/exit code by not saving
all the GPRs on the common exceptions (system call, external
interrupt and decrementer).
Diffstat (limited to 'arch/ppc/kernel/entry.S')
| -rw-r--r-- | arch/ppc/kernel/entry.S | 726 |
1 files changed, 466 insertions, 260 deletions
diff --git a/arch/ppc/kernel/entry.S b/arch/ppc/kernel/entry.S index 8a5f26447161..1414c7e02b91 100644 --- a/arch/ppc/kernel/entry.S +++ b/arch/ppc/kernel/entry.S @@ -46,45 +46,42 @@ * This code finishes saving the registers to the exception frame * and jumps to the appropriate handler for the exception, turning * on address translation. + * Note that we rely on the caller having set cr0.eq iff the exception + * occurred in kernel mode (i.e. MSR:PR = 0). */ + .globl transfer_to_handler_full +transfer_to_handler_full: + SAVE_NVGPRS(r11) + /* fall through */ + .globl transfer_to_handler transfer_to_handler: - stw r22,_NIP(r21) - stw r23,_MSR(r21) - SAVE_4GPRS(8, r21) - SAVE_8GPRS(12, r21) - SAVE_8GPRS(24, r21) - andi. r23,r23,MSR_PR - mfspr r23,SPRG3 - addi r2,r23,-THREAD /* set r2 to current */ - tovirt(r2,r2) + stw r2,GPR2(r11) + stw r12,_NIP(r11) + stw r9,_MSR(r11) + mfctr r12 + mfspr r2,XER + stw r12,_CTR(r11) + stw r2,_XER(r11) + mfspr r12,SPRG3 + addi r2,r12,-THREAD + tovirt(r2,r2) /* set r2 to current */ beq 2f /* if from user, fix up THREAD.regs */ - addi r24,r1,STACK_FRAME_OVERHEAD - stw r24,PT_REGS(r23) -#ifdef CONFIG_ALTIVEC -BEGIN_FTR_SECTION - mfspr r22,SPRN_VRSAVE /* if G4, save vrsave register value */ - stw r22,THREAD_VRSAVE(r23) -END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) -#endif /* CONFIG_ALTIVEC */ + addi r11,r1,STACK_FRAME_OVERHEAD + stw r11,PT_REGS(r12) b 3f 2: /* if from kernel, check for stack overflow */ - lwz r22,THREAD_INFO-THREAD(r23) - cmplw r1,r22 /* if r1 <= current->thread_info */ + lwz r11,THREAD_INFO-THREAD(r12) + cmplw r1,r11 /* if r1 <= current->thread_info */ ble- stack_ovf /* then the kernel stack overflowed */ 3: - mflr r23 - andi. r24,r23,0x3f00 /* get vector offset */ - stw r24,TRAP(r21) - li r22,0 - stw r22,RESULT(r21) - mtspr SPRG2,r22 /* r1 is now kernel sp */ - lwz r24,0(r23) /* virtual address of handler */ - lwz r23,4(r23) /* where to go when done */ - FIX_SRR1(r20,r22) - mtspr SRR0,r24 - mtspr SRR1,r20 - mtlr r23 + mflr r9 + lwz r11,0(r9) /* virtual address of handler */ + lwz r9,4(r9) /* where to go when done */ + FIX_SRR1(r10,r12) + mtspr SRR0,r11 + mtspr SRR1,r10 + mtlr r9 SYNC RFI /* jump to handler, enable MMU */ @@ -93,108 +90,63 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) * and call StackOverflow(regs), which should not return. */ stack_ovf: + SAVE_NVGPRS(r11) addi r3,r1,STACK_FRAME_OVERHEAD + tovirt(r2,r2) /* set r2 to current */ lis r1,init_thread_union@ha addi r1,r1,init_thread_union@l addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD - lis r24,StackOverflow@ha - addi r24,r24,StackOverflow@l - li r20,MSR_KERNEL - FIX_SRR1(r20,r22) - mtspr SRR0,r24 - mtspr SRR1,r20 + lis r9,StackOverflow@ha + addi r9,r9,StackOverflow@l + li r10,MSR_KERNEL + FIX_SRR1(r10,r12) + mtspr SRR0,r9 + mtspr SRR1,r10 SYNC RFI #endif /* CONFIG_PPC_ISERIES */ -#ifdef SHOW_SYSCALLS_TASK - .data -show_syscalls_task: - .long -1 -#endif - /* * Handle a system call. */ - .text .stabs "arch/ppc/kernel/",N_SO,0,0,0f .stabs "entry.S",N_SO,0,0,0f 0: _GLOBAL(DoSyscall) stw r0,THREAD+LAST_SYSCALL(r2) + stw r3,ORIG_GPR3(r1) + li r12,0 + stw r12,RESULT(r1) lwz r11,_CCR(r1) /* Clear SO bit in CR */ - lis r10,0x1000 - andc r11,r11,r10 + rlwinm r11,r11,0,4,2 stw r11,_CCR(r1) #ifdef SHOW_SYSCALLS -#ifdef SHOW_SYSCALLS_TASK - lis r31,show_syscalls_task@ha - lwz r31,show_syscalls_task@l(r31) - cmp 0,r2,r31 - bne 1f -#endif - lis r3,7f@ha - addi r3,r3,7f@l - lwz r4,GPR0(r1) - lwz r5,GPR3(r1) - lwz r6,GPR4(r1) - lwz r7,GPR5(r1) - lwz r8,GPR6(r1) - lwz r9,GPR7(r1) - bl printk - lis r3,77f@ha - addi r3,r3,77f@l - lwz r4,GPR8(r1) - lwz r5,GPR9(r1) - mr r6,r2 - bl printk - lwz r0,GPR0(r1) - lwz r3,GPR3(r1) - lwz r4,GPR4(r1) - lwz r5,GPR5(r1) - lwz r6,GPR6(r1) - lwz r7,GPR7(r1) - lwz r8,GPR8(r1) -1: + bl do_show_syscall #endif /* SHOW_SYSCALLS */ - cmpi 0,r0,0x7777 /* Special case for 'sys_sigreturn' */ - beq- 10f - cmpi 0,r0,0x6666 /* Special case for 'sys_rt_sigreturn' */ - beq- 16f + cmpli 0,r0,NR_syscalls + bge- 66f rlwinm r10,r1,0,0,18 /* current_thread_info() */ lwz r10,TI_FLAGS(r10) andi. r10,r10,_TIF_SYSCALL_TRACE - bne- 50f - cmpli 0,r0,NR_syscalls - bge- 66f + bne- syscall_dotrace +syscall_dotrace_cont: lis r10,sys_call_table@h ori r10,r10,sys_call_table@l slwi r0,r0,2 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */ - cmpi 0,r10,0 - beq- 66f mtlr r10 addi r9,r1,STACK_FRAME_OVERHEAD blrl /* Call handler */ - .globl ret_from_syscall_1 -ret_from_syscall_1: -20: stw r3,RESULT(r1) /* Save result */ + .globl ret_from_syscall +ret_from_syscall: #ifdef SHOW_SYSCALLS -#ifdef SHOW_SYSCALLS_TASK - cmp 0,r2,r31 - bne 91f + bl do_show_syscall_exit #endif - mr r4,r3 - lis r3,79f@ha - addi r3,r3,79f@l - bl printk - lwz r3,RESULT(r1) -91: -#endif - li r10,-_LAST_ERRNO - cmpl 0,r3,r10 - blt 30f + mr r6,r3 + li r11,-_LAST_ERRNO + cmpl 0,r3,r11 + blt+ 30f neg r3,r3 cmpi 0,r3,ERESTARTNOHAND bne 22f @@ -202,24 +154,50 @@ ret_from_syscall_1: 22: lwz r10,_CCR(r1) /* Set SO bit in CR */ oris r10,r10,0x1000 stw r10,_CCR(r1) -30: stw r3,GPR3(r1) /* Update return value */ - b ret_from_except + + /* disable interrupts so current_thread_info()->flags can't change */ +30: li r10,MSR_KERNEL /* doesn't include MSR_EE */ + SYNC + MTMSRD(r10) + rlwinm r12,r1,0,0,18 /* current_thread_info() */ + lwz r9,TI_FLAGS(r12) + andi. r0,r9,(_TIF_SYSCALL_TRACE|_TIF_SIGPENDING|_TIF_NEED_RESCHED) + bne- syscall_exit_work +syscall_exit_cont: + PPC405_ERR77(0,r1) + stwcx. r0,0,r1 /* to clear the reservation */ + lwz r4,_LINK(r1) + lwz r5,_CCR(r1) + mtlr r4 + mtcr r5 + lwz r7,_NIP(r1) + lwz r8,_MSR(r1) + FIX_SRR1(r8, r0) + lwz r2,GPR2(r1) + lwz r1,GPR1(r1) + mtspr SRR0,r7 + mtspr SRR1,r8 + SYNC + RFI + 66: li r3,ENOSYS b 22b -/* sys_sigreturn */ -10: addi r3,r1,STACK_FRAME_OVERHEAD - bl sys_sigreturn - cmpi 0,r3,0 /* Check for restarted system call */ - bge ret_from_except - b 20b -/* sys_rt_sigreturn */ -16: addi r3,r1,STACK_FRAME_OVERHEAD - bl sys_rt_sigreturn - cmpi 0,r3,0 /* Check for restarted system call */ - bge ret_from_except - b 20b + + .globl ret_from_fork +ret_from_fork: + REST_NVGPRS(r1) +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) + bl schedule_tail +#endif + li r3,0 + b ret_from_syscall + /* Traced system call support */ -50: bl do_syscall_trace +syscall_dotrace: + SAVE_NVGPRS(r1) + li r0,0xc00 + stw r0,TRAP(r1) + bl do_syscall_trace lwz r0,GPR0(r1) /* Restore original registers */ lwz r3,GPR3(r1) lwz r4,GPR4(r1) @@ -227,43 +205,167 @@ ret_from_syscall_1: lwz r6,GPR6(r1) lwz r7,GPR7(r1) lwz r8,GPR8(r1) - lwz r9,GPR9(r1) - cmpli 0,r0,NR_syscalls - bge- 66f - lis r10,sys_call_table@h - ori r10,r10,sys_call_table@l - slwi r0,r0,2 - lwzx r10,r10,r0 /* Fetch system call handler [ptr] */ - cmpi 0,r10,0 - beq- 66f - mtlr r10 - addi r9,r1,STACK_FRAME_OVERHEAD - blrl /* Call handler */ - .globl ret_from_syscall_2 -ret_from_syscall_2: - stw r3,RESULT(r1) /* Save result */ - stw r3,GPR0(r1) /* temporary gross hack to make strace work */ - li r10,-_LAST_ERRNO - cmpl 0,r3,r10 - blt 60f - neg r3,r3 - cmpi 0,r3,ERESTARTNOHAND - bne 52f - li r3,EINTR -52: lwz r10,_CCR(r1) /* Set SO bit in CR */ - oris r10,r10,0x1000 - stw r10,_CCR(r1) -60: stw r3,GPR3(r1) /* Update return value */ + REST_NVGPRS(r1) + b syscall_dotrace_cont + +syscall_exit_work: + stw r6,RESULT(r1) /* Save result */ + stw r3,GPR3(r1) /* Update return value */ + andi. r0,r9,_TIF_SYSCALL_TRACE + beq 5f + stw r6,GPR0(r1) /* temporary gross hack to make strace work */ + ori r10,r10,MSR_EE + SYNC + MTMSRD(r10) /* re-enable interrupts */ + lwz r4,TRAP(r1) + andi. r4,r4,1 + beq 4f + SAVE_NVGPRS(r1) + li r4,0xc00 + stw r4,TRAP(r1) +4: bl do_syscall_trace - b ret_from_except -66: li r3,ENOSYS - b 52b + REST_NVGPRS(r1) +2: + lwz r3,GPR3(r1) + li r10,MSR_KERNEL /* doesn't include MSR_EE */ + SYNC + MTMSRD(r10) /* disable interrupts again */ + rlwinm r12,r1,0,0,18 /* current_thread_info() */ + lwz r9,TI_FLAGS(r12) +5: + andi. r0,r9,_TIF_NEED_RESCHED + bne 1f + lwz r5,_MSR(r1) + andi. r5,r5,MSR_PR + beq syscall_exit_cont + andi. r0,r9,_TIF_SIGPENDING + beq syscall_exit_cont + ori r10,r10,MSR_EE + SYNC + MTMSRD(r10) /* re-enable interrupts */ + b syscall_do_signal +1: + ori r10,r10,MSR_EE + SYNC + MTMSRD(r10) /* re-enable interrupts */ + bl schedule + b 2b + #ifdef SHOW_SYSCALLS +do_show_syscall: +#ifdef SHOW_SYSCALLS_TASK + lis r11,show_syscalls_task@ha + lwz r11,show_syscalls_task@l(r11) + cmp 0,r2,r11 + bnelr +#endif + stw r31,GPR31(r1) + mflr r31 + lis r3,7f@ha + addi r3,r3,7f@l + lwz r4,GPR0(r1) + lwz r5,GPR3(r1) + lwz r6,GPR4(r1) + lwz r7,GPR5(r1) + lwz r8,GPR6(r1) + lwz r9,GPR7(r1) + bl printk + lis r3,77f@ha + addi r3,r3,77f@l + lwz r4,GPR8(r1) + mr r5,r2 + bl printk + lwz r0,GPR0(r1) + lwz r3,GPR3(r1) + lwz r4,GPR4(r1) + lwz r5,GPR5(r1) + lwz r6,GPR6(r1) + lwz r7,GPR7(r1) + lwz r8,GPR8(r1) + mtlr r31 + lwz r31,GPR31(r1) + blr + +do_show_syscall_exit: +#ifdef SHOW_SYSCALLS_TASK + lis r11,show_syscalls_task@ha + lwz r11,show_syscalls_task@l(r11) + cmp 0,r2,r11 + bnelr +#endif + stw r31,GPR31(r1) + mflr r31 + stw r3,RESULT(r1) /* Save result */ + mr r4,r3 + lis r3,79f@ha + addi r3,r3,79f@l + bl printk + lwz r3,RESULT(r1) + mtlr r31 + lwz r31,GPR31(r1) + blr + 7: .string "syscall %d(%x, %x, %x, %x, %x, " -77: .string "%x, %x), current=%p\n" +77: .string "%x), current=%p\n" 79: .string " -> %x\n" .align 2,0 + +#ifdef SHOW_SYSCALLS_TASK + .data + .globl show_syscalls_task +show_syscalls_task: + .long -1 + .text #endif +#endif /* SHOW_SYSCALLS */ + +/* + * The sigsuspend and rt_sigsuspend system calls can call do_signal + * and thus put the process into the stopped state where we might + * want to examine its user state with ptrace. Therefore we need + * to save all the nonvolatile registers (r13 - r31) before calling + * the C code. + */ + .globl ppc_sigsuspend +ppc_sigsuspend: + SAVE_NVGPRS(r1) + lwz r0,TRAP(r1) + rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ + stw r0,TRAP(r1) /* register set saved */ + b sys_sigsuspend + + .globl ppc_rt_sigsuspend +ppc_rt_sigsuspend: + SAVE_NVGPRS(r1) + lwz r0,TRAP(r1) + rlwinm r0,r0,0,0,30 + stw r0,TRAP(r1) + b sys_rt_sigsuspend + + .globl ppc_fork +ppc_fork: + SAVE_NVGPRS(r1) + lwz r0,TRAP(r1) + rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ + stw r0,TRAP(r1) /* register set saved */ + b sys_fork + + .globl ppc_vfork +ppc_vfork: + SAVE_NVGPRS(r1) + lwz r0,TRAP(r1) + rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ + stw r0,TRAP(r1) /* register set saved */ + b sys_vfork + + .globl ppc_clone +ppc_clone: + SAVE_NVGPRS(r1) + lwz r0,TRAP(r1) + rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ + stw r0,TRAP(r1) /* register set saved */ + b sys_clone /* * This routine switches between two different tasks. The process @@ -290,25 +392,26 @@ _GLOBAL(_switch) stwu r1,-INT_FRAME_SIZE(r1) mflr r0 stw r0,INT_FRAME_SIZE+4(r1) - /* r3-r13 are caller saved -- Cort */ - SAVE_8GPRS(14, r1) - SAVE_10GPRS(22, r1) + /* r3-r12 are caller saved -- Cort */ + SAVE_NVGPRS(r1) stw r0,_NIP(r1) /* Return to switch caller */ - mfmsr r22 + mfmsr r11 li r0,MSR_FP /* Disable floating-point */ #ifdef CONFIG_ALTIVEC BEGIN_FTR_SECTION oris r0,r0,MSR_VEC@h /* Disable altivec */ + mfspr r12,SPRN_VRSAVE /* save vrsave register value */ + stw r12,THREAD+THREAD_VRSAVE(r2) END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif /* CONFIG_ALTIVEC */ - and. r0,r0,r22 /* FP or altivec enabled? */ + and. r0,r0,r11 /* FP or altivec enabled? */ beq+ 1f - andc r22,r22,r0 - mtmsr r22 + andc r11,r11,r0 + MTMSRD(r11) isync -1: stw r22,_MSR(r1) - mfcr r20 - stw r20,_CCR(r1) +1: stw r11,_MSR(r1) + mfcr r10 + stw r10,_CCR(r1) stw r1,KSP(r3) /* Set old stack pointer */ tophys(r0,r4) @@ -318,148 +421,247 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) /* save the old current 'last' for return value */ mr r3,r2 addi r2,r4,-THREAD /* Update current */ + +#ifdef CONFIG_ALTIVEC +BEGIN_FTR_SECTION + lwz r0,THREAD+THREAD_VRSAVE(r2) + mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */ +END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) +#endif /* CONFIG_ALTIVEC */ + lwz r0,_CCR(r1) mtcrf 0xFF,r0 - /* r3-r13 are destroyed -- Cort */ - REST_2GPRS(14, r1) - REST_8GPRS(16, r1) - REST_8GPRS(24, r1) + /* r3-r12 are destroyed -- Cort */ + REST_NVGPRS(r1) lwz r4,_NIP(r1) /* Return to _switch caller in new task */ mtlr r4 addi r1,r1,INT_FRAME_SIZE blr - .globl ret_from_fork -ret_from_fork: -#if CONFIG_SMP || CONFIG_PREEMPT - bl schedule_tail -#endif - rlwinm r3,r1,0,0,18 - lwz r3,TI_FLAGS(r3) - andi. r0,r3,_TIF_SYSCALL_TRACE + .globl sigreturn_exit +sigreturn_exit: + subi r1,r3,STACK_FRAME_OVERHEAD + rlwinm r12,r1,0,0,18 /* current_thread_info() */ + lwz r9,TI_FLAGS(r12) + andi. r0,r9,_TIF_SYSCALL_TRACE bnel- do_syscall_trace - b ret_from_except + /* fall through */ + + .globl ret_from_except_full +ret_from_except_full: + REST_NVGPRS(r1) + /* fall through */ - .globl ret_from_intercept -ret_from_intercept: - /* - * We may be returning from RTL and cannot do the normal checks - * -- Cort - */ - cmpi 0,r3,0 - beq restore .globl ret_from_except ret_from_except: - REST_10GPRS(13,r1) - REST_8GPRS(23,r1) - REST_GPR(31,r1) - /* Hard-disable interrupts so that current_thread_info()->flags * can't change between when we test it and when we return * from the interrupt. */ -recheck: - mfmsr r10 - rlwinm r0,r10,0,17,15 /* clear MSR_EE in r0 */ -#ifdef CONFIG_4xx - rlwinm r0,r0,0,23,21 /* clear MSR_DE in r0 */ -#endif + li r10,MSR_KERNEL /* doesn't include EE */ SYNC /* Some chip revs have problems here... */ - mtmsr r0 /* Update machine state */ + MTMSRD(r10) /* disable interrupts */ lwz r3,_MSR(r1) /* Returning to user mode? */ andi. r3,r3,MSR_PR - beq+ restore /* if not, just restore regs and return */ + beq resume_kernel /* Check current_thread_info()->flags */ - rlwinm r3,r1,0,0,18 - lwz r3,TI_FLAGS(r3) - andi. r0,r3,(_TIF_SIGPENDING|_TIF_NEED_RESCHED) + rlwinm r9,r1,0,0,18 + lwz r9,TI_FLAGS(r9) + andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED) bne do_work - .globl ret_to_user_hook -ret_to_user_hook: - nop - -#ifdef CONFIG_ALTIVEC -BEGIN_FTR_SECTION - lwz r0,THREAD+THREAD_VRSAVE(r2) - mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */ -END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) -#endif /* CONFIG_ALTIVEC */ - - addi r0,r1,INT_FRAME_SIZE /* size of frame */ - stw r0,THREAD+KSP(r2) /* save kernel stack pointer */ +#ifdef CONFIG_PREEMPT + b restore -#ifndef CONFIG_PPC_ISERIES - tophys(r8,r1) - CLR_TOP32(r8) - mtspr SPRG2,r8 /* phys exception stack pointer */ -#else /* CONFIG_PPC_ISERIES */ - mfspr r2,SPRG1 /* Get Paca address */ - stw r1,PACAKSAVE(r2) /* save exception stack pointer */ -#endif /* CONFIG_PPC_ISERIES */ +resume_kernel: + rlwinm r9,r1,0,0,18 /* check current_thread_info->preempt_count */ + lwz r3,TI_PREEMPT(r9) + cmpwi 0,r0,0 /* if non-zero, just restore regs and return */ + bne restore + lwz r9,TI_FLAGS(r9) + andi. r0,r9,_TIF_NEED_RESCHED + bne do_work +#else +resume_kernel: +#endif /* CONFIG_PREEMPT */ /* interrupts are hard-disabled at this point */ restore: - REST_8GPRS(4, r1) - REST_GPR(12, r1) - lwz r3,_XER(r1) - mtspr XER,r3 + lwz r0,GPR0(r1) + lwz r2,GPR2(r1) + REST_4GPRS(3, r1) + REST_2GPRS(7, r1) + + lwz r10,_XER(r1) + lwz r11,_CTR(r1) + mtspr XER,r10 + mtctr r11 PPC405_ERR77(0,r1) stwcx. r0,0,r1 /* to clear the reservation */ - lwz r3,_CTR(r1) - lwz r0,_LINK(r1) - mtctr r3 - mtlr r0 +#ifndef CONFIG_4xx + lwz r9,_MSR(r1) + andi. r10,r9,MSR_RI /* check if this exception occurred */ + beql nonrecoverable /* at a bad place (MSR:RI = 0) */ - lwz r0,_MSR(r1) - lwz r3,_CCR(r1) - FIX_SRR1(r0,r2) - lwz r2,_NIP(r1) - mtcrf 0xFF,r3 + lwz r10,_CCR(r1) + lwz r11,_LINK(r1) + mtcrf 0xFF,r10 + mtlr r11 /* - * We can't afford to take an exception between setting SRR0/1 - * and the rfi. Since GPR0(r1) .. GPR3(r1) are in the same cache - * line, loading r3 here should mean that we should have a HPTE - * (for classic PPC) or TLB entry (for 4xx/8xx) for that cache - * line, even if it isn't covered by a BAT register. - * In addition, the cache line itself will be in L1 cache. - * There is still the possibility of the HPTE getting evicted - * on SMP systems. + * Once we put values in SRR0 and SRR1, we are in a state + * where exceptions are not recoverable, since taking an + * exception will trash SRR0 and SRR1. Therefore we clear the + * MSR:RI bit to indicate this. If we do take an exception, + * we can't return to the point of the exception but we + * can restart the exception exit path at the label + * exc_exit_restart below. -- paulus */ - lwz r3,GPR3(r1) - - mtspr SRR1,r0 - mtspr SRR0,r2 - lwz r0,GPR0(r1) - lwz r2,GPR2(r1) + li r10,MSR_KERNEL & ~MSR_RI + SYNC + MTMSRD(r10) /* clear the RI bit */ + .globl exc_exit_restart +exc_exit_restart: + lwz r9,_MSR(r1) + lwz r12,_NIP(r1) + FIX_SRR1(r9,r10) + mtspr SRR0,r12 + mtspr SRR1,r9 + REST_4GPRS(9, r1) lwz r1,GPR1(r1) + .globl exc_exit_restart_end +exc_exit_restart_end: SYNC - PPC405_ERR77_SYNC RFI +#else /* CONFIG_4xx */ + /* + * This is a bit different on 4xx because 4xx doesn't have + * the RI bit in the MSR, and because we have critical + * exceptions, for which we need to restore SRR0 and SRR1 + * and then use SRR2/SRR3 to return from the exception. + * The TLB miss handler checks if we have interrupted + * the exception exit path and restarts it if so. + */ + lwz r10,TRAP(r1) /* check for critical exception */ + lwz r11,_LINK(r1) + andi. r10,r10,2 + mtlr r11 + lwz r10,_CCR(r1) + bne crit_exc_exit + mtcrf 0xff,r10 + REST_2GPRS(9, r1) + .globl exc_exit_restart +exc_exit_restart: + lwz r11,_NIP(r1) + lwz r12,_MSR(r1) +exc_exit_start: + mtspr SRR0,r11 + mtspr SRR1,r12 + REST_2GPRS(11, r1) + lwz r1,GPR1(r1) + .globl exc_exit_restart_end +exc_exit_restart_end: + PPC405_ERR77_SYNC + rfi +crit_exc_exit: + mtcrf 0xff,r10 + /* avoid any possible TLB misses here by turning off MSR.DR, we + * assume the instructions here are mapped by a pinned TLB entry */ + li r10,MSR_IR + mtmsr r10 + isync + tophys(r1, r1) + lwz r9,_SRR0(r1) + lwz r10,_SRR1(r1) + mtspr SRR0,r9 + lwz r11,_NIP(r1) + mtspr SRR1,r10 + lwz r12,_MSR(r1) + mtspr SRR2,r11 + mtspr SRR3,r12 + REST_4GPRS(9, r1) + lwz r1,GPR1(r1) + PPC405_ERR77_SYNC + rfci +#endif /* CONFIG_4xx */ + +recheck: + li r10,MSR_KERNEL + SYNC + MTMSRD(r10) /* disable interrupts */ + rlwinm r9,r1,0,0,18 + lwz r9,TI_FLAGS(r9) +#ifdef CONFIG_PREEMPT + lwz r0,_MSR(r1) + li r11,_TIF_NEED_RESCHED + /* move MSR_PR bit down to TIF_SIGPENDING (0x4) bit */ + rlwimi r11,r0,18+TIF_SIGPENDING,31-TIF_SIGPENDING,31-TIF_SIGPENDING + and. r0,r9,r11 +#else /* CONFIG_PREEMPT */ + andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED) +#endif /* CONFIG_PREEMPT */ + beq restore do_work: ori r10,r10,MSR_EE SYNC - mtmsr r10 /* hard-enable interrupts */ - andi. r0,r3,_TIF_NEED_RESCHED + MTMSRD(r10) /* hard-enable interrupts */ + andi. r0,r9,_TIF_NEED_RESCHED beq 1f bl schedule b recheck 1: - andi. r0,r3,_TIF_SIGPENDING +syscall_do_signal: + /* save r13-r31 in the exception frame, if not already done */ + lwz r3,TRAP(r1) + andi. r0,r3,1 beq 2f - li r3,0 + SAVE_NVGPRS(r1) + rlwinm r3,r3,0,0,30 + stw r3,TRAP(r1) +2: li r3,0 addi r4,r1,STACK_FRAME_OVERHEAD bl do_signal + REST_NVGPRS(r1) b recheck -2: - /* nobody uses the TIF_NOTIFY_RESUME bit yet */ - b recheck + +/* + * We come here when we are at the end of handling an exception + * that occurred at a place where taking an exception will lose + * state information, such as the contents of SRR0 and SRR1. + */ +nonrecoverable: + lis r10,exc_exit_restart_end@ha + addi r10,r10,exc_exit_restart_end@l + cmplw r12,r10 + bge 3f + lis r11,exc_exit_restart@ha + addi r11,r11,exc_exit_restart@l + cmplw r12,r11 + blt 3f + lis r10,ee_restarts@ha + lwz r12,ee_restarts@l(r10) + addi r12,r12,1 + stw r12,ee_restarts@l(r10) + mr r12,r11 /* restart at exc_exit_restart */ + blr +3: /* OK, we can't recover, kill this process */ + lwz r3,TRAP(r1) + andi. r0,r3,1 + beq 4f + SAVE_NVGPRS(r1) + rlwinm r3,r3,0,0,30 + stw r3,TRAP(r1) +4: addi r3,r1,STACK_FRAME_OVERHEAD + bl nonrecoverable_exception + /* shouldn't return */ + b 4b + + .comm ee_restarts,4 /* * PROM code for specific machines follows. Put it @@ -472,39 +674,43 @@ do_work: * called with the MMU off. */ _GLOBAL(enter_rtas) + stwu r1,-INT_FRAME_SIZE(r1) mflr r0 - stw r0,20(r1) + stw r0,INT_FRAME_SIZE+4(r1) lis r4,rtas_data@ha lwz r4,rtas_data@l(r4) lis r6,1f@ha /* physical return address for rtas */ addi r6,r6,1f@l - addis r6,r6,-KERNELBASE@h - subi r7,r1,INT_FRAME_SIZE - addis r7,r7,-KERNELBASE@h + tophys(r6,r6) + tophys(r7,r1) lis r8,rtas_entry@ha lwz r8,rtas_entry@l(r8) mfmsr r9 stw r9,8(r1) - li r0,0 - ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_FE0|MSR_FE1 - andc r0,r9,r0 - li r10,MSR_IR|MSR_DR|MSR_FP - andc r9,r0,r10 + li r0,MSR_KERNEL SYNC /* disable interrupts so SRR0/1 */ - mtmsr r0 /* don't get trashed */ + MTMSRD(r0) /* don't get trashed */ + li r9,MSR_ mtlr r6 CLR_TOP32(r7) mtspr SPRG2,r7 mtspr SRR0,r8 mtspr SRR1,r9 RFI -1: addis r9,r1,-KERNELBASE@h - lwz r8,20(r9) /* get return address */ +1: tophys(r9,r1) + lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */ lwz r9,8(r9) /* original msr value */ FIX_SRR1(r9,r0) + addi r1,r1,INT_FRAME_SIZE li r0,0 mtspr SPRG2,r0 mtspr SRR0,r8 mtspr SRR1,r9 RFI /* return to caller */ + + .globl machine_check_in_rtas +machine_check_in_rtas: + twi 31,0,0 + /* XXX load up BATs and panic */ + #endif /* CONFIG_ALL_PPC */ |
