diff options
Diffstat (limited to 'arch/sh/kernel')
| -rw-r--r-- | arch/sh/kernel/cpu/fpu.c | 2 | ||||
| -rw-r--r-- | arch/sh/kernel/entry-common.S | 6 | ||||
| -rw-r--r-- | arch/sh/kernel/irq.c | 57 | ||||
| -rw-r--r-- | arch/sh/kernel/process_32.c | 6 | ||||
| -rw-r--r-- | arch/sh/kernel/process_64.c | 4 | 
5 files changed, 29 insertions, 46 deletions
diff --git a/arch/sh/kernel/cpu/fpu.c b/arch/sh/kernel/cpu/fpu.c index f8f7af51c128..4e332244ea75 100644 --- a/arch/sh/kernel/cpu/fpu.c +++ b/arch/sh/kernel/cpu/fpu.c @@ -44,7 +44,7 @@ void __fpu_state_restore(void)  	restore_fpu(tsk);  	task_thread_info(tsk)->status |= TS_USEDFPU; -	tsk->fpu_counter++; +	tsk->thread.fpu_counter++;  }  void fpu_state_restore(struct pt_regs *regs) diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index 9b6e4beeb296..ca46834294b7 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S @@ -108,7 +108,7 @@ need_resched:  	and	#(0xf0>>1), r0		! interrupts off (exception path)?  	cmp/eq	#(0xf0>>1), r0  	bt	noresched -	mov.l	3f, r0 +	mov.l	1f, r0  	jsr	@r0			! call preempt_schedule_irq  	 nop  	bra	need_resched @@ -119,9 +119,7 @@ noresched:  	 nop  	.align 2 -1:	.long	PREEMPT_ACTIVE -2:	.long	schedule -3:	.long	preempt_schedule_irq +1:	.long	preempt_schedule_irq  #endif  ENTRY(resume_userspace) diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index 063af10ff3c1..0833736afa32 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -149,47 +149,32 @@ void irq_ctx_exit(int cpu)  	hardirq_ctx[cpu] = NULL;  } -asmlinkage void do_softirq(void) +void do_softirq_own_stack(void)  { -	unsigned long flags;  	struct thread_info *curctx;  	union irq_ctx *irqctx;  	u32 *isp; -	if (in_interrupt()) -		return; - -	local_irq_save(flags); - -	if (local_softirq_pending()) { -		curctx = current_thread_info(); -		irqctx = softirq_ctx[smp_processor_id()]; -		irqctx->tinfo.task = curctx->task; -		irqctx->tinfo.previous_sp = current_stack_pointer; - -		/* build the stack frame on the softirq stack */ -		isp = (u32 *)((char *)irqctx + sizeof(*irqctx)); - -		__asm__ __volatile__ ( -			"mov	r15, r9		\n" -			"jsr	@%0		\n" -			/* switch to the softirq stack */ -			" mov	%1, r15		\n" -			/* restore the thread stack */ -			"mov	r9, r15		\n" -			: /* no outputs */ -			: "r" (__do_softirq), "r" (isp) -			: "memory", "r0", "r1", "r2", "r3", "r4", -			  "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr" -		); - -		/* -		 * Shouldn't happen, we returned above if in_interrupt(): -		 */ -		WARN_ON_ONCE(softirq_count()); -	} - -	local_irq_restore(flags); +	curctx = current_thread_info(); +	irqctx = softirq_ctx[smp_processor_id()]; +	irqctx->tinfo.task = curctx->task; +	irqctx->tinfo.previous_sp = current_stack_pointer; + +	/* build the stack frame on the softirq stack */ +	isp = (u32 *)((char *)irqctx + sizeof(*irqctx)); + +	__asm__ __volatile__ ( +		"mov	r15, r9		\n" +		"jsr	@%0		\n" +		/* switch to the softirq stack */ +		" mov	%1, r15		\n" +		/* restore the thread stack */ +		"mov	r9, r15		\n" +		: /* no outputs */ +		: "r" (__do_softirq), "r" (isp) +		: "memory", "r0", "r1", "r2", "r3", "r4", +		  "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr" +	);  }  #else  static inline void handle_one_irq(unsigned int irq) diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index ebd3933005b4..2885fc9d9dcd 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -156,7 +156,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,  #endif  		ti->addr_limit = KERNEL_DS;  		ti->status &= ~TS_USEDFPU; -		p->fpu_counter = 0; +		p->thread.fpu_counter = 0;  		return 0;  	}  	*childregs = *current_pt_regs(); @@ -189,7 +189,7 @@ __switch_to(struct task_struct *prev, struct task_struct *next)  	unlazy_fpu(prev, task_pt_regs(prev));  	/* we're going to use this soon, after a few expensive things */ -	if (next->fpu_counter > 5) +	if (next->thread.fpu_counter > 5)  		prefetch(next_t->xstate);  #ifdef CONFIG_MMU @@ -207,7 +207,7 @@ __switch_to(struct task_struct *prev, struct task_struct *next)  	 * restore of the math state immediately to avoid the trap; the  	 * chances of needing FPU soon are obviously high now  	 */ -	if (next->fpu_counter > 5) +	if (next->thread.fpu_counter > 5)  		__fpu_state_restore();  	return prev; diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c index 174d124b419e..e2062e643341 100644 --- a/arch/sh/kernel/process_64.c +++ b/arch/sh/kernel/process_64.c @@ -374,7 +374,7 @@ asmlinkage void ret_from_kernel_thread(void);  int copy_thread(unsigned long clone_flags, unsigned long usp,  		unsigned long arg, struct task_struct *p)  { -	struct pt_regs *childregs, *regs = current_pt_regs(); +	struct pt_regs *childregs;  #ifdef CONFIG_SH_FPU  	/* can't happen for a kernel thread */ @@ -393,7 +393,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,  	if (unlikely(p->flags & PF_KTHREAD)) {  		memset(childregs, 0, sizeof(struct pt_regs));  		childregs->regs[2] = (unsigned long)arg; -		childregs->regs[3] = (unsigned long)fn; +		childregs->regs[3] = (unsigned long)usp;  		childregs->sr = (1 << 30); /* not user_mode */  		childregs->sr |= SR_FD; /* Invalidate FPU flag */  		p->thread.pc = (unsigned long) ret_from_kernel_thread;  | 
