summaryrefslogtreecommitdiff
path: root/arch/ppc64/kernel
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2004-01-18 18:22:39 -0800
committerLinus Torvalds <torvalds@home.osdl.org>2004-01-18 18:22:39 -0800
commit04879b04bf500585d6b015e440be3ee7bdaf9cd4 (patch)
tree1025214cac9a033304d965d16272725e07207c82 /arch/ppc64/kernel
parentd4c6e4e119d3cd89b80321c7abb4914d33b594d1 (diff)
[PATCH] ppc64: VMX (Altivec) support & signal32 rework, from Ben Herrenschmidt
From: Anton Blanchard <anton@samba.org> VMX (Altivec) support & signal32 rework, from Ben Herrenschmidt
Diffstat (limited to 'arch/ppc64/kernel')
-rw-r--r--arch/ppc64/kernel/asm-offsets.c6
-rw-r--r--arch/ppc64/kernel/cputable.c7
-rw-r--r--arch/ppc64/kernel/entry.S31
-rw-r--r--arch/ppc64/kernel/head.S176
-rw-r--r--arch/ppc64/kernel/misc.S39
-rw-r--r--arch/ppc64/kernel/ppc_ksyms.c4
-rw-r--r--arch/ppc64/kernel/process.c105
-rw-r--r--arch/ppc64/kernel/setup.c5
-rw-r--r--arch/ppc64/kernel/signal.c186
-rw-r--r--arch/ppc64/kernel/signal32.c1128
-rw-r--r--arch/ppc64/kernel/sys_ppc32.c20
-rw-r--r--arch/ppc64/kernel/traps.c19
12 files changed, 1019 insertions, 707 deletions
diff --git a/arch/ppc64/kernel/asm-offsets.c b/arch/ppc64/kernel/asm-offsets.c
index 69c672faf9e6..38bcdcac3c3b 100644
--- a/arch/ppc64/kernel/asm-offsets.c
+++ b/arch/ppc64/kernel/asm-offsets.c
@@ -56,6 +56,12 @@ int main(void)
DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr));
DEFINE(KSP, offsetof(struct thread_struct, ksp));
+#ifdef CONFIG_ALTIVEC
+ DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0]));
+ DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave));
+ DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr));
+ DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr));
+#endif /* CONFIG_ALTIVEC */
DEFINE(MM, offsetof(struct task_struct, mm));
/* naca */
diff --git a/arch/ppc64/kernel/cputable.c b/arch/ppc64/kernel/cputable.c
index e44ab2ed31db..04fde4359be2 100644
--- a/arch/ppc64/kernel/cputable.c
+++ b/arch/ppc64/kernel/cputable.c
@@ -21,6 +21,13 @@
struct cpu_spec* cur_cpu_spec = NULL;
+/* NOTE:
+ * Unlike ppc32, ppc64 will only call this once for the boot CPU, it's
+ * the responsibility of the appropriate CPU save/restore functions to
+ * eventually copy these settings over. Those save/restore aren't yet
+ * part of the cputable though. That has to be fixed for both ppc32
+ * and ppc64
+ */
extern void __setup_cpu_power3(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_power4(unsigned long offset, struct cpu_spec* spec);
diff --git a/arch/ppc64/kernel/entry.S b/arch/ppc64/kernel/entry.S
index dc70aff394d7..84f41f686d6e 100644
--- a/arch/ppc64/kernel/entry.S
+++ b/arch/ppc64/kernel/entry.S
@@ -29,6 +29,7 @@
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/offsets.h>
+#include <asm/cputable.h>
#ifdef CONFIG_PPC_ISERIES
#define DO_SOFT_DISABLE
@@ -211,6 +212,15 @@ _GLOBAL(ret_from_syscall_2)
.align 2,0
#endif
+
+_GLOBAL(ppc32_swapcontext)
+ bl .sys32_swapcontext
+ b 80f
+
+_GLOBAL(ppc64_swapcontext)
+ bl .sys_swapcontext
+ b 80f
+
_GLOBAL(ppc32_sigreturn)
bl .sys32_sigreturn
b 80f
@@ -261,10 +271,17 @@ _GLOBAL(_switch)
SAVE_10GPRS(22, r1)
mflr r20 /* Return to switch caller */
mfmsr r22
- andi. r21, r22, MSR_FP
+ li r0, MSR_FP
+#ifdef CONFIG_ALTIVEC
+BEGIN_FTR_SECTION
+ oris r0,r0,MSR_VEC@h /* Disable altivec */
+ mfspr r24,SPRN_VRSAVE /* save vrsave register value */
+ std r24,THREAD_VRSAVE(r3)
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+#endif /* CONFIG_ALTIVEC */
+ and. r0,r0,r22
beq+ 1f
- li r6,MSR_FP /* Disable floating-point */
- andc r22,r22,r6
+ andc r22,r22,r0
mtmsrd r22
isync
1: std r20,_NIP(r1)
@@ -278,6 +295,14 @@ _GLOBAL(_switch)
ld r1,KSP(r4) /* Load new stack pointer */
ld r6,_CCR(r1)
mtcrf 0xFF,r6
+
+#ifdef CONFIG_ALTIVEC
+BEGIN_FTR_SECTION
+ ld r0,THREAD_VRSAVE(r4)
+ mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+#endif /* CONFIG_ALTIVEC */
+
/* r3-r13 are destroyed -- Cort */
REST_8GPRS(14, r1)
REST_10GPRS(22, r1)
diff --git a/arch/ppc64/kernel/head.S b/arch/ppc64/kernel/head.S
index 5c79d9d8332f..4d0a387291d2 100644
--- a/arch/ppc64/kernel/head.S
+++ b/arch/ppc64/kernel/head.S
@@ -391,9 +391,34 @@ __start_interrupts:
STD_EXCEPTION_PSERIES( 0xc00, SystemCall )
STD_EXCEPTION_PSERIES( 0xd00, SingleStep )
STD_EXCEPTION_PSERIES( 0xe00, Trap_0e )
- STD_EXCEPTION_PSERIES( 0xf00, PerformanceMonitor )
+
+ /* We need to deal with the Altivec unavailable exception
+ * here which is at 0xf20, thus in the middle of the
+ * prolog code of the PerformanceMonitor one. A little
+ * trickery is thus necessary
+ */
+ . = 0xf00
+ b .PerformanceMonitor_Pseries
+ . = 0xf20
+ b .AltivecUnavailable_Pseries
+
STD_EXCEPTION_PSERIES( 0x1300, InstructionBreakpoint )
+ STD_EXCEPTION_PSERIES( 0x1700, AltivecAssist )
+ /* Here are the "moved" performance monitor and
+ * altivec unavailable exceptions
+ */
+ . = 0x3000
+ .globl PerformanceMonitor_Pseries;
+.PerformanceMonitor_Pseries:
+ EXCEPTION_PROLOG_PSERIES(0xf00, PerformanceMonitor_common)
+
+ . = 0x3100
+ .globl AltivecUnavailable_Pseries;
+.AltivecUnavailable_Pseries:
+ EXCEPTION_PROLOG_PSERIES(0xf20, AltivecUnavailable_common)
+
+
/* Space for the naca. Architected to be located at real address
* NACA_PHYS_ADDR. Various tools rely on this location being fixed.
* The first dword of the naca is required by iSeries LPAR to
@@ -580,7 +605,11 @@ __end_stab:
STD_EXCEPTION_COMMON( 0xe00, Trap_0e, .UnknownException )
STD_EXCEPTION_COMMON( 0xf00, PerformanceMonitor, .PerformanceMonitorException )
STD_EXCEPTION_COMMON(0x1300, InstructionBreakpoint, .InstructionBreakpointException )
-
+#ifdef CONFIG_ALTIVEC
+ STD_EXCEPTION_COMMON(0x1700, AltivecAssist, .AltivecAssistException )
+#else
+ STD_EXCEPTION_COMMON(0x1700, AltivecAssist, .UnknownException )
+#endif
/*
* Return from an exception which is handled without calling
* save_remaining_regs. The caller is assumed to have done
@@ -755,6 +784,23 @@ FPUnavailable_common:
bl .KernelFPUnavailableException
BUG_OPCODE
+ .globl AltivecUnavailable_common
+AltivecUnavailable_common:
+ EXCEPTION_PROLOG_COMMON
+#ifdef CONFIG_ALTIVEC
+ bne .load_up_altivec /* if from user, just load it up */
+#endif
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ DO_COPY_EE()
+ li r6,0xf20
+ bl .save_remaining_regs
+#ifdef CONFIG_ALTIVEC
+ bl .KernelAltivecUnavailableException
+#else
+ bl .UnknownException
+#endif
+ BUG_OPCODE
+
.globl SystemCall_common
SystemCall_common:
EXCEPTION_PROLOG_COMMON
@@ -1483,6 +1529,126 @@ _GLOBAL(giveup_fpu)
#endif /* CONFIG_SMP */
blr
+
+#ifdef CONFIG_ALTIVEC
+
+/*
+ * load_up_altivec(unused, unused, tsk)
+ * Disable VMX for the task which had it previously,
+ * and save its vector registers in its thread_struct.
+ * Enables the VMX for use in the kernel on return.
+ * On SMP we know the VMX is free, since we give it up every
+ * switch (ie, no lazy save of the vector registers).
+ * On entry: r13 == 'current' && last_task_used_altivec != 'current'
+ */
+_STATIC(load_up_altivec)
+ mfmsr r5 /* grab the current MSR */
+ oris r5,r5,MSR_VEC@h
+ mtmsrd r5 /* enable use of VMX now */
+ isync
+
+/*
+ * For SMP, we don't do lazy VMX switching because it just gets too
+ * horrendously complex, especially when a task switches from one CPU
+ * to another. Instead we call giveup_altvec in switch_to.
+ * VRSAVE isn't dealt with here, that is done in the normal context
+ * switch code. Note that we could rely on vrsave value to eventually
+ * avoid saving all of the VREGs here...
+ */
+#ifndef CONFIG_SMP
+ LOADBASE(r3,last_task_used_altivec)
+ ld r4,last_task_used_altivec@l(r3)
+ cmpi 0,r4,0
+ beq 1f
+ /* Save VMX state to last_task_used_altivec's THREAD struct */
+ addi r4,r4,THREAD
+ SAVE_32VRS(0,r5,r4)
+ mfvscr vr0
+ li r10,THREAD_VSCR
+ stvx vr0,r10,r4
+ /* Disable VMX for last_task_used_altivec */
+ ld r5,PT_REGS(r4)
+ ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+ lis r20,MSR_VEC@h
+ andc r4,r4,r20
+ std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
+#endif /* CONFIG_SMP */
+ /* Hack: if we get an altivec unavailable trap with VRSAVE
+ * set to all zeros, we assume this is a broken application
+ * that fails to set it properly, and thus we switch it to
+ * all 1's
+ */
+ mfspr r4,SPRN_VRSAVE
+ cmpi 0,r4,0
+ bne+ 1f
+ li r4,-1
+ mtspr SPRN_VRSAVE,r4
+1:
+ /* enable use of VMX after return */
+ ld r4,PACACURRENT(r13)
+ addi r5,r4,THREAD /* Get THREAD */
+ oris r23,r23,MSR_VEC@h
+ li r4,1
+ li r10,THREAD_VSCR
+ stw r4,THREAD_USED_VR(r5)
+ lvx vr0,r10,r5
+ REST_32VRS(0,r4,r5)
+#ifndef CONFIG_SMP
+ /* Update last_task_used_math to 'current' */
+ subi r4,r5,THREAD /* Back to 'current' */
+ std r4,last_task_used_altivec@l(r3)
+#endif /* CONFIG_SMP */
+ /* restore registers and return */
+ b fast_exception_return
+
+/*
+ * disable_kernel_altivec()
+ * Disable the VMX.
+ */
+_GLOBAL(disable_kernel_altivec)
+ mfmsr r3
+ rldicl r0,r3,(63-MSR_VEC_LG),1
+ rldicl r3,r0,(MSR_VEC_LG+1),0
+ mtmsrd r3 /* disable use of VMX now */
+ isync
+ blr
+
+/*
+ * giveup_altivec(tsk)
+ * Disable VMX for the task given as the argument,
+ * and save the vector registers in its thread_struct.
+ * Enables the VMX for use in the kernel on return.
+ */
+_GLOBAL(giveup_altivec)
+ mfmsr r5
+ oris r5,r5,MSR_VEC@h
+ mtmsrd r5 /* enable use of VMX now */
+ isync
+ cmpi 0,r3,0
+ beqlr- /* if no previous owner, done */
+ addi r3,r3,THREAD /* want THREAD of task */
+ ld r5,PT_REGS(r3)
+ cmpi 0,r5,0
+ SAVE_32VRS(0,r4,r3)
+ mfvscr vr0
+ li r4,THREAD_VSCR
+ stvx vr0,r4,r3
+ beq 1f
+ ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+ lis r3,MSR_VEC@h
+ andc r4,r4,r3 /* disable FP for previous task */
+ std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
+#ifndef CONFIG_SMP
+ li r5,0
+ LOADBASE(r4,last_task_used_altivec)
+ std r5,last_task_used_altivec@l(r4)
+#endif /* CONFIG_SMP */
+ blr
+
+#endif /* CONFIG_ALTIVEC */
+
#ifdef CONFIG_SMP
/*
* This function is called after the master CPU has released the
@@ -1784,6 +1950,12 @@ _STATIC(start_here_common)
addi r2,r2,0x4000
addi r2,r2,0x4000
+ /* Apply the CPUs-specific fixups (nop out sections not relevant
+ * to this CPU
+ */
+ li r3,0
+ bl .do_cpu_ftr_fixups
+
/* setup the systemcfg pointer */
LOADADDR(r9,systemcfg)
SET_REG_TO_CONST(r8, SYSTEMCFG_VIRT_ADDR)
diff --git a/arch/ppc64/kernel/misc.S b/arch/ppc64/kernel/misc.S
index 0747b16b819f..775777ceab22 100644
--- a/arch/ppc64/kernel/misc.S
+++ b/arch/ppc64/kernel/misc.S
@@ -418,7 +418,7 @@ _GLOBAL(cvt_df)
blr
/*
- * identify_cpu,
+ * identify_cpu and calls setup_cpu
* In: r3 = base of the cpu_specs array
* r4 = address of cur_cpu_spec
* r5 = relocation offset
@@ -434,9 +434,17 @@ _GLOBAL(identify_cpu)
addi r3,r3,CPU_SPEC_ENTRY_SIZE
b 1b
1:
- add r3,r3,r5
- std r3,0(r4)
- blr
+ add r0,r3,r5
+ std r0,0(r4)
+ ld r4,CPU_SPEC_SETUP(r3)
+ sub r4,r4,r5
+ ld r4,0(r4)
+ sub r4,r4,r5
+ mtctr r4
+ /* Calling convention for cpu setup is r3=offset, r4=cur_cpu_spec */
+ mr r4,r3
+ mr r3,r5
+ bctr
/*
* do_cpu_ftr_fixups - goes through the list of CPU feature fixups
@@ -486,25 +494,6 @@ _GLOBAL(do_cpu_ftr_fixups)
isync
b 1b
-/*
- * call_setup_cpu - call the setup_cpu function for this cpu
- * r3 = data offset
- *
- * Setup function is called with:
- * r3 = data offset
- * r4 = ptr to CPU spec (relocated)
- */
-_GLOBAL(call_setup_cpu)
- LOADADDR(r4, cur_cpu_spec)
- sub r4,r4,r3
- lwz r4,0(r4) # load pointer to cpu_spec
- sub r4,r4,r3 # relocate
- lwz r6,CPU_SPEC_SETUP(r4) # load function pointer
- sub r6,r6,r3
- mtctr r6
- bctr
-
-
/*
* Create a kernel thread
@@ -823,7 +812,7 @@ _GLOBAL(sys_call_table32)
.llong .compat_clock_gettime
.llong .compat_clock_getres
.llong .compat_clock_nanosleep
- .llong .sys_ni_syscall
+ .llong .ppc32_swapcontext
.llong .sys32_tgkill /* 250 */
.llong .sys32_utimes
.llong .compat_statfs64
@@ -1082,7 +1071,7 @@ _GLOBAL(sys_call_table)
.llong .sys_clock_gettime
.llong .sys_clock_getres
.llong .sys_clock_nanosleep
- .llong .sys_ni_syscall
+ .llong .ppc64_swapcontext
.llong .sys_tgkill /* 250 */
.llong .sys_utimes
.llong .sys_statfs64
diff --git a/arch/ppc64/kernel/ppc_ksyms.c b/arch/ppc64/kernel/ppc_ksyms.c
index 1affcc338287..cc01d78363c4 100644
--- a/arch/ppc64/kernel/ppc_ksyms.c
+++ b/arch/ppc64/kernel/ppc_ksyms.c
@@ -164,7 +164,9 @@ EXPORT_SYMBOL(kernel_thread);
EXPORT_SYMBOL(flush_instruction_cache);
EXPORT_SYMBOL(_get_PVR);
EXPORT_SYMBOL(giveup_fpu);
-EXPORT_SYMBOL(enable_kernel_fp);
+#ifdef CONFIG_ALTIVEC
+EXPORT_SYMBOL(giveup_altivec);
+#endif
EXPORT_SYMBOL(flush_icache_range);
EXPORT_SYMBOL(flush_icache_user_range);
EXPORT_SYMBOL(flush_dcache_page);
diff --git a/arch/ppc64/kernel/process.c b/arch/ppc64/kernel/process.c
index 0b16f7615ad5..041827788def 100644
--- a/arch/ppc64/kernel/process.c
+++ b/arch/ppc64/kernel/process.c
@@ -50,7 +50,10 @@
#include <asm/cputable.h>
#include <asm/sections.h>
+#ifndef CONFIG_SMP
struct task_struct *last_task_used_math = NULL;
+struct task_struct *last_task_used_altivec = NULL;
+#endif
struct mm_struct ioremap_mm = { pgd : ioremap_dir
,page_table_lock : SPIN_LOCK_UNLOCKED };
@@ -58,8 +61,7 @@ struct mm_struct ioremap_mm = { pgd : ioremap_dir
char *sysmap = NULL;
unsigned long sysmap_size = 0;
-void
-enable_kernel_fp(void)
+void enable_kernel_fp(void)
{
#ifdef CONFIG_SMP
if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
@@ -70,6 +72,7 @@ enable_kernel_fp(void)
giveup_fpu(last_task_used_math);
#endif /* CONFIG_SMP */
}
+EXPORT_SYMBOL(enable_kernel_fp);
int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
{
@@ -85,6 +88,31 @@ int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
return 1;
}
+#ifdef CONFIG_ALTIVEC
+
+void enable_kernel_altivec(void)
+{
+#ifdef CONFIG_SMP
+ if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
+ giveup_altivec(current);
+ else
+ giveup_altivec(NULL); /* just enables FP for kernel */
+#else
+ giveup_altivec(last_task_used_altivec);
+#endif /* CONFIG_SMP */
+}
+EXPORT_SYMBOL(enable_kernel_altivec);
+
+int dump_task_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
+{
+ if (regs->msr & MSR_VEC)
+ giveup_altivec(current);
+ memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs));
+ return 1;
+}
+
+#endif /* CONFIG_ALTIVEC */
+
struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *new)
{
@@ -104,8 +132,20 @@ struct task_struct *__switch_to(struct task_struct *prev,
*/
if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
giveup_fpu(prev);
+#ifdef CONFIG_ALTIVEC
+ if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
+ giveup_altivec(prev);
+#endif /* CONFIG_ALTIVEC */
#endif /* CONFIG_SMP */
+#if defined(CONFIG_ALTIVEC) && !defined(CONFIG_SMP)
+ /* Avoid the trap. On smp this this never happens since
+ * we don't set last_task_used_altivec -- Cort
+ */
+ if (new->thread.regs && last_task_used_altivec == new)
+ new->thread.regs->msr |= MSR_VEC;
+#endif /* CONFIG_ALTIVEC */
+
new_thread = &new->thread;
old_thread = &current->thread;
@@ -158,8 +198,14 @@ void show_regs(struct pt_regs * regs)
void exit_thread(void)
{
+#ifndef CONFIG_SMP
if (last_task_used_math == current)
last_task_used_math = NULL;
+#ifdef CONFIG_ALTIVEC
+ if (last_task_used_altivec == current)
+ last_task_used_altivec = NULL;
+#endif /* CONFIG_ALTIVEC */
+#endif /* CONFIG_SMP */
}
void flush_thread(void)
@@ -169,8 +215,14 @@ void flush_thread(void)
if (t->flags & _TIF_ABI_PENDING)
t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT);
+#ifndef CONFIG_SMP
if (last_task_used_math == current)
last_task_used_math = NULL;
+#ifdef CONFIG_ALTIVEC
+ if (last_task_used_altivec == current)
+ last_task_used_altivec = NULL;
+#endif /* CONFIG_ALTIVEC */
+#endif /* CONFIG_SMP */
}
void
@@ -178,6 +230,25 @@ release_thread(struct task_struct *t)
{
}
+
+/*
+ * This gets called before we allocate a new thread and copy
+ * the current task into it.
+ */
+void prepare_to_copy(struct task_struct *tsk)
+{
+ struct pt_regs *regs = tsk->thread.regs;
+
+ if (regs == NULL)
+ return;
+ if (regs->msr & MSR_FP)
+ giveup_fpu(current);
+#ifdef CONFIG_ALTIVEC
+ if (regs->msr & MSR_VEC)
+ giveup_altivec(current);
+#endif /* CONFIG_ALTIVEC */
+}
+
/*
* Copy a thread..
*/
@@ -268,9 +339,25 @@ void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp)
regs->gpr[1] = sp;
regs->gpr[2] = toc;
regs->msr = MSR_USER64;
+#ifndef CONFIG_SMP
if (last_task_used_math == current)
last_task_used_math = 0;
+#endif /* CONFIG_SMP */
+ memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
current->thread.fpscr = 0;
+#ifdef CONFIG_ALTIVEC
+#ifndef CONFIG_SMP
+ if (last_task_used_altivec == current)
+ last_task_used_altivec = 0;
+#endif /* CONFIG_SMP */
+ memset(current->thread.vr, 0, sizeof(current->thread.vr));
+ current->thread.vscr.u[0] = 0;
+ current->thread.vscr.u[1] = 0;
+ current->thread.vscr.u[2] = 0;
+ current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
+ current->thread.vrsave = 0;
+ current->thread.used_vr = 0;
+#endif /* CONFIG_ALTIVEC */
}
int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
@@ -314,9 +401,6 @@ int sys_clone(unsigned long clone_flags, unsigned long p2, unsigned long p3,
}
}
- if (regs->msr & MSR_FP)
- giveup_fpu(current);
-
return do_fork(clone_flags & ~CLONE_IDLETASK, p2, regs, 0,
(int *)parent_tidptr, (int *)child_tidptr);
}
@@ -325,9 +409,6 @@ int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
unsigned long p4, unsigned long p5, unsigned long p6,
struct pt_regs *regs)
{
- if (regs->msr & MSR_FP)
- giveup_fpu(current);
-
return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
}
@@ -335,9 +416,6 @@ int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
unsigned long p4, unsigned long p5, unsigned long p6,
struct pt_regs *regs)
{
- if (regs->msr & MSR_FP)
- giveup_fpu(current);
-
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1], regs, 0,
NULL, NULL);
}
@@ -355,7 +433,10 @@ int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
goto out;
if (regs->msr & MSR_FP)
giveup_fpu(current);
-
+#ifdef CONFIG_ALTIVEC
+ if (regs->msr & MSR_VEC)
+ giveup_altivec(current);
+#endif /* CONFIG_ALTIVEC */
error = do_execve(filename, (char **) a1, (char **) a2, regs);
if (error == 0)
diff --git a/arch/ppc64/kernel/setup.c b/arch/ppc64/kernel/setup.c
index 6efa418bdb61..236b063b058d 100644
--- a/arch/ppc64/kernel/setup.c
+++ b/arch/ppc64/kernel/setup.c
@@ -312,6 +312,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
else
seq_printf(m, "unknown (%08x)", pvr);
+#ifdef CONFIG_ALTIVEC
+ if (cur_cpu_spec->cpu_features & CPU_FTR_ALTIVEC)
+ seq_printf(m, ", altivec supported");
+#endif /* CONFIG_ALTIVEC */
+
seq_printf(m, "\n");
#ifdef CONFIG_PPC_PSERIES
diff --git a/arch/ppc64/kernel/signal.c b/arch/ppc64/kernel/signal.c
index f6b0cba9cd57..f5f336f5feb5 100644
--- a/arch/ppc64/kernel/signal.c
+++ b/arch/ppc64/kernel/signal.c
@@ -114,19 +114,49 @@ long sys_sigaltstack(const stack_t *uss, stack_t *uoss, unsigned long r5,
* Set up the sigcontext for the signal frame.
*/
-static int
-setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
+static int setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
int signr, sigset_t *set, unsigned long handler)
{
+ /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
+ * process never used altivec yet (MSR_VEC is zero in pt_regs of
+ * the context). This is very important because we must ensure we
+ * don't lose the VRSAVE content that may have been set prior to
+ * the process doing its first vector operation
+ * Userland shall check AT_HWCAP to know wether it can rely on the
+ * v_regs pointer or not
+ */
+#ifdef CONFIG_ALTIVEC
+ elf_vrreg_t *v_regs = (elf_vrreg_t *)(((unsigned long)sc->vmx_reserve) & ~0xful);
+#endif
int err = 0;
if (regs->msr & MSR_FP)
giveup_fpu(current);
- current->thread.saved_msr = regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1);
- regs->msr = current->thread.saved_msr | current->thread.fpexc_mode;
- current->thread.saved_softe = regs->softe;
+ /* Make sure signal doesn't get spurrious FP exceptions */
+ current->thread.fpscr = 0;
+#ifdef CONFIG_ALTIVEC
+ err |= __put_user(v_regs, &sc->v_regs);
+
+ /* save altivec registers */
+ if (current->thread.used_vr) {
+ if (regs->msr & MSR_VEC)
+ giveup_altivec(current);
+ /* Copy 33 vec registers (vr0..31 and vscr) to the stack */
+ err |= __copy_to_user(v_regs, current->thread.vr, 33 * sizeof(vector128));
+ /* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg)
+ * contains valid data.
+ */
+ regs->msr |= MSR_VEC;
+ }
+ /* We always copy to/from vrsave, it's 0 if we don't have or don't
+ * use altivec.
+ */
+ err |= __put_user(current->thread.vrsave, (u32 *)&v_regs[33]);
+#else /* CONFIG_ALTIVEC */
+ err |= __put_user(0, &sc->v_regs);
+#endif /* CONFIG_ALTIVEC */
err |= __put_user(&sc->gp_regs, &sc->regs);
err |= __copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE);
err |= __copy_to_user(&sc->fp_regs, &current->thread.fpr, FP_REGS_SIZE);
@@ -135,9 +165,6 @@ setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
if (set != NULL)
err |= __put_user(set->sig[0], &sc->oldmask);
- regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
- current->thread.fpscr = 0;
-
return err;
}
@@ -145,23 +172,42 @@ setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
* Restore the sigcontext from the signal frame.
*/
-static int
-restore_sigcontext(struct pt_regs *regs, sigset_t *set, struct sigcontext *sc)
+static int restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig, struct sigcontext *sc)
{
+#ifdef CONFIG_ALTIVEC
+ elf_vrreg_t *v_regs;
+#endif
unsigned int err = 0;
+ unsigned long save_r13;
- if (regs->msr & MSR_FP)
- giveup_fpu(current);
-
+ /* If this is not a signal return, we preserve the TLS in r13 */
+ if (!sig)
+ save_r13 = regs->gpr[13];
err |= __copy_from_user(regs, &sc->gp_regs, GP_REGS_SIZE);
+ if (!sig)
+ regs->gpr[13] = save_r13;
err |= __copy_from_user(&current->thread.fpr, &sc->fp_regs, FP_REGS_SIZE);
- current->thread.fpexc_mode = regs->msr & (MSR_FE0 | MSR_FE1);
if (set != NULL)
err |= __get_user(set->sig[0], &sc->oldmask);
- /* Don't allow the signal handler to change these modulo FE{0,1} */
- regs->msr = current->thread.saved_msr & ~(MSR_FP | MSR_FE0 | MSR_FE1);
- regs->softe = current->thread.saved_softe;
+#ifdef CONFIG_ALTIVEC
+ err |= __get_user(v_regs, &sc->v_regs);
+ if (err)
+ return err;
+ /* Copy 33 vec registers (vr0..31 and vscr) from the stack */
+ if (v_regs != 0 && (regs->msr & MSR_VEC) != 0)
+ err |= __copy_from_user(current->thread.vr, v_regs, 33 * sizeof(vector128));
+ else if (current->thread.used_vr)
+ memset(&current->thread.vr, 0, 33);
+ /* Always get VRSAVE back */
+ if (v_regs != 0)
+ err |= __get_user(current->thread.vrsave, (u32 *)&v_regs[33]);
+ else
+ current->thread.vrsave = 0;
+#endif /* CONFIG_ALTIVEC */
+
+ /* Force reload of FP/VEC */
+ regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC);
return err;
}
@@ -169,8 +215,8 @@ restore_sigcontext(struct pt_regs *regs, sigset_t *set, struct sigcontext *sc)
/*
* Allocate space for the signal frame
*/
-static inline void *
-get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
+static inline void * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
+ size_t frame_size)
{
unsigned long newsp;
@@ -185,8 +231,10 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
return (void *)((newsp - frame_size) & -8ul);
}
-static int
-setup_trampoline(unsigned int syscall, unsigned int *tramp)
+/*
+ * Setup the trampoline code on the stack
+ */
+static int setup_trampoline(unsigned int syscall, unsigned int *tramp)
{
int i, err = 0;
@@ -209,6 +257,72 @@ setup_trampoline(unsigned int syscall, unsigned int *tramp)
}
/*
+ * Restore the user process's signal mask (also used by signal32.c)
+ */
+void restore_sigmask(sigset_t *set)
+{
+ sigdelsetmask(set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sighand->siglock);
+ current->blocked = *set;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+}
+
+
+/*
+ * Handle {get,set,swap}_context operations
+ */
+int sys_swapcontext(struct ucontext __user *old_ctx,
+ struct ucontext __user *new_ctx,
+ long ctx_size, long r6, long r7, long r8, struct pt_regs *regs)
+{
+ unsigned char tmp;
+ sigset_t set;
+
+ /* Context size is for future use. Right now, we only make sure
+ * we are passed something we understand
+ */
+ if (ctx_size < sizeof(struct ucontext))
+ return -EINVAL;
+
+ if (old_ctx != NULL) {
+ if (verify_area(VERIFY_WRITE, old_ctx, sizeof(*old_ctx))
+ || setup_sigcontext(&old_ctx->uc_mcontext, regs, 0, NULL, 0)
+ || __copy_to_user(&old_ctx->uc_sigmask,
+ &current->blocked, sizeof(sigset_t)))
+ return -EFAULT;
+ }
+ if (new_ctx == NULL)
+ return 0;
+ if (verify_area(VERIFY_READ, new_ctx, sizeof(*new_ctx))
+ || __get_user(tmp, (u8 *) new_ctx)
+ || __get_user(tmp, (u8 *) (new_ctx + 1) - 1))
+ return -EFAULT;
+
+ /*
+ * If we get a fault copying the context into the kernel's
+ * image of the user's registers, we can't just return -EFAULT
+ * because the user's registers will be corrupted. For instance
+ * the NIP value may have been updated but not some of the
+ * other registers. Given that we have done the verify_area
+ * and successfully read the first and last bytes of the region
+ * above, this should only happen in an out-of-memory situation
+ * or if another thread unmaps the region containing the context.
+ * We kill the task with a SIGSEGV in this situation.
+ */
+
+ if (__copy_from_user(&set, &new_ctx->uc_sigmask, sizeof(set)))
+ do_exit(SIGSEGV);
+ restore_sigmask(&set);
+ if (restore_sigcontext(regs, NULL, 0, &new_ctx->uc_mcontext))
+ do_exit(SIGSEGV);
+
+ /* This returns like rt_sigreturn */
+ return 0;
+}
+
+
+/*
* Do a signal return; undo the signal stack.
*/
@@ -218,7 +332,6 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
{
struct ucontext *uc = (struct ucontext *)regs->gpr[1];
sigset_t set;
- stack_t st;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
@@ -228,20 +341,14 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set)))
goto badframe;
- sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
- if (restore_sigcontext(regs, NULL, &uc->uc_mcontext))
+ restore_sigmask(&set);
+ if (restore_sigcontext(regs, NULL, 1, &uc->uc_mcontext))
goto badframe;
- if (__copy_from_user(&st, &uc->uc_stack, sizeof(st)))
- goto badframe;
- /* This function sets back the stack flags into
- the current task structure. */
- sys_sigaltstack(&st, NULL, 0, 0, 0, 0, regs);
+ /* do_sigaltstack expects a __user pointer and won't modify
+ * what's in there anyway
+ */
+ do_sigaltstack(&uc->uc_stack, NULL, regs->gpr[1]);
return regs->result;
@@ -253,8 +360,7 @@ badframe:
do_exit(SIGSEGV);
}
-static void
-setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info,
+static void setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs)
{
/* Handler is *really* a pointer to the function descriptor for
@@ -332,9 +438,8 @@ badframe:
/*
* OK, we're invoking a handler
*/
-static void
-handle_signal(unsigned long sig, struct k_sigaction *ka,
- siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
+static void handle_signal(unsigned long sig, struct k_sigaction *ka,
+ siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
{
/* Set up Signal Frame */
setup_rt_frame(sig, ka, info, oldset, regs);
@@ -352,8 +457,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
return;
}
-static inline void
-syscall_restart(struct pt_regs *regs, struct k_sigaction *ka)
+static inline void syscall_restart(struct pt_regs *regs, struct k_sigaction *ka)
{
switch ((int)regs->result) {
case -ERESTART_RESTARTBLOCK:
diff --git a/arch/ppc64/kernel/signal32.c b/arch/ppc64/kernel/signal32.c
index c463afd598e8..8bcf79b2e195 100644
--- a/arch/ppc64/kernel/signal32.c
+++ b/arch/ppc64/kernel/signal32.c
@@ -32,71 +32,207 @@
#define DEBUG_SIG 0
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-/*
- * These are the flags in the MSR that the user is allowed to change
- * by modifying the saved value of the MSR on the stack. SE and BE
- * should not be in this list since gdb may want to change these. I.e,
- * you should be able to step out of a signal handler to see what
- * instruction executes next after the signal handler completes.
- * Alternately, if you stepped into a signal handler, you should be
- * able to continue 'til the next breakpoint from within the signal
- * handler, even if the handler returns.
- */
-#if 0
-#define MSR_USERCHANGE (MSR_FE0 | MSR_FE1)
-#else
+
+#define GP_REGS_SIZE32 min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
+
/*
- * glibc tries to set FE0/FE1 via a signal handler. Since it only ever
- * sets both bits and this is the default setting we now disable this
- * behaviour. This is done to insure the new prctl which alters FE0/FE1 does
- * not get overriden by glibc. Setting and clearing FE0/FE1 via signal
- * handler has always been bogus since load_up_fpu used to set FE0/FE1
- * unconditionally.
+ * When we have signals to deliver, we set up on the
+ * user stack, going down from the original stack pointer:
+ * a sigregs32 struct
+ * a sigcontext32 struct
+ * a gap of __SIGNAL_FRAMESIZE32 bytes
+ *
+ * Each of these things must be a multiple of 16 bytes in size.
+ *
*/
-#define MSR_USERCHANGE 0
-#endif
-
struct sigregs32 {
- /*
- * the gp_regs array is 32 bit representation of the pt_regs
- * structure that was stored on the kernel stack during the
- * system call that was interrupted for the signal.
- *
- * Note that the entire pt_regs regs structure will fit in
- * the gp_regs structure because the ELF_NREG value is 48 for
- * PPC and the pt_regs structure contains 44 registers
- */
- elf_gregset_t32 gp_regs;
- double fp_regs[ELF_NFPREG];
- unsigned int tramp[2];
+ struct mcontext32 mctx; /* all the register values */
/*
* Programs using the rs6000/xcoff abi can save up to 19 gp
* regs and 18 fp regs below sp before decrementing it.
*/
- int abigap[56];
+ int abigap[56];
};
+/* We use the mc_pad field for the signal return trampoline. */
+#define tramp mc_pad
-struct rt_sigframe_32 {
+/*
+ * When we have rt signals to deliver, we set up on the
+ * user stack, going down from the original stack pointer:
+ * one rt_sigframe32 struct (siginfo + ucontext + ABI gap)
+ * a gap of __SIGNAL_FRAMESIZE32+16 bytes
+ * (the +16 is to get the siginfo and ucontext32 in the same
+ * positions as in older kernels).
+ *
+ * Each of these things must be a multiple of 16 bytes in size.
+ *
+ */
+struct rt_sigframe32 {
+ struct compat_siginfo info;
+ struct ucontext32 uc;
/*
- * Unused space at start of frame to allow for storing of
- * stack pointers
+ * Programs using the rs6000/xcoff abi can save up to 19 gp
+ * regs and 18 fp regs below sp before decrementing it.
*/
- unsigned long _unused;
- /*
- * This is a 32 bit pointer in user address space
- * it is a pointer to the siginfo stucture in the rt stack frame
+ int abigap[56];
+};
+
+
+/*
+ * Common utility functions used by signal and context support
+ *
+ */
+
+/*
+ * Restore the user process's signal mask
+ * (implemented in signal.c)
+ */
+extern void restore_sigmask(sigset_t *set);
+
+/*
+ * Functions for flipping sigsets (thanks to brain dead generic
+ * implementation that makes things simple for little endian only
+ */
+static inline void compat_from_sigset(compat_sigset_t *compat, sigset_t *set)
+{
+ switch (_NSIG_WORDS) {
+ case 4: compat->sig[5] = set->sig[3] & 0xffffffffull ;
+ compat->sig[7] = set->sig[3] >> 32;
+ case 3: compat->sig[4] = set->sig[2] & 0xffffffffull ;
+ compat->sig[5] = set->sig[2] >> 32;
+ case 2: compat->sig[2] = set->sig[1] & 0xffffffffull ;
+ compat->sig[3] = set->sig[1] >> 32;
+ case 1: compat->sig[0] = set->sig[0] & 0xffffffffull ;
+ compat->sig[1] = set->sig[0] >> 32;
+ }
+}
+
+static inline void sigset_from_compat(sigset_t *set, compat_sigset_t *compat)
+{
+ switch (_NSIG_WORDS) {
+ case 4: set->sig[3] = compat->sig[6] | (((long)compat->sig[7]) << 32);
+ case 3: set->sig[2] = compat->sig[4] | (((long)compat->sig[5]) << 32);
+ case 2: set->sig[1] = compat->sig[2] | (((long)compat->sig[3]) << 32);
+ case 1: set->sig[0] = compat->sig[0] | (((long)compat->sig[1]) << 32);
+ }
+}
+
+
+/*
+ * Save the current user registers on the user stack.
+ * We only save the altivec registers if the process has used
+ * altivec instructions at some point.
+ */
+static int save_user_regs(struct pt_regs *regs, struct mcontext32 *frame, int sigret)
+{
+ elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
+ int i, err = 0;
+
+ /* Make sure floating point registers are stored in regs */
+ if (regs->msr & MSR_FP)
+ giveup_fpu(current);
+
+ /* save general and floating-point registers */
+ for (i = 0; i <= PT_RESULT; i ++)
+ err |= __put_user((unsigned int)gregs[i], &frame->mc_gregs[i]);
+ err |= __copy_to_user(&frame->mc_fregs, current->thread.fpr,
+ ELF_NFPREG * sizeof(double));
+ if (err)
+ return 1;
+
+ current->thread.fpscr = 0; /* turn off all fp exceptions */
+
+#ifdef CONFIG_ALTIVEC
+ /* save altivec registers */
+ if (current->thread.used_vr) {
+ if (regs->msr & MSR_VEC)
+ giveup_altivec(current);
+ if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
+ ELF_NVRREG32 * sizeof(vector128)))
+ return 1;
+ /* set MSR_VEC in the saved MSR value to indicate that
+ frame->mc_vregs contains valid data */
+ if (__put_user(regs->msr | MSR_VEC, &frame->mc_gregs[PT_MSR]))
+ return 1;
+ }
+ /* else assert((regs->msr & MSR_VEC) == 0) */
+
+ /* We always copy to/from vrsave, it's 0 if we don't have or don't
+ * use altivec. Since VSCR only contains 32 bits saved in the least
+ * significant bits of a vector, we "cheat" and stuff VRSAVE in the
+ * most significant bits of that same vector. --BenH
*/
- u32 pinfo;
+ if (__put_user(current->thread.vrsave, (u32 *)&frame->mc_vregs[32]))
+ return 1;
+#endif /* CONFIG_ALTIVEC */
+
+ if (sigret) {
+ /* Set up the sigreturn trampoline: li r0,sigret; sc */
+ if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
+ || __put_user(0x44000002UL, &frame->tramp[1]))
+ return 1;
+ flush_icache_range((unsigned long) &frame->tramp[0],
+ (unsigned long) &frame->tramp[2]);
+ }
+
+ return 0;
+}
+
+/*
+ * Restore the current user register values from the user stack,
+ * (except for MSR).
+ */
+static int restore_user_regs(struct pt_regs *regs, struct mcontext32 __user *sr, int sig)
+{
+ elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
+ int i, err = 0;
+ unsigned int save_r2;
+#ifdef CONFIG_ALTIVEC
+ unsigned long msr;
+#endif
+
/*
- * This is a 32 bit pointer in user address space
- * it is a pointer to the user context in the rt stack frame
+ * restore general registers but not including MSR. Also take
+ * care of keeping r2 (TLS) intact if not a signal
*/
- u32 puc;
- struct compat_siginfo info;
- struct ucontext32 uc;
-};
+ if (!sig)
+ save_r2 = (unsigned int)regs->gpr[2];
+ for (i = 0; i < PT_MSR; i ++)
+ err |= __get_user(gregs[i], &sr->mc_gregs[i]);
+ for (i ++; i <= PT_RESULT; i ++)
+ err |= __get_user(gregs[i], &sr->mc_gregs[i]);
+ if (!sig)
+ regs->gpr[2] = (unsigned long) save_r2;
+ if (err)
+ return 1;
+
+ /* force the process to reload the FP registers from
+ current->thread when it next does FP instructions */
+ regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
+ if (__copy_from_user(current->thread.fpr, &sr->mc_fregs,
+ sizeof(sr->mc_fregs)))
+ return 1;
+
+#ifdef CONFIG_ALTIVEC
+ /* force the process to reload the altivec registers from
+ current->thread when it next does altivec instructions */
+ regs->msr &= ~MSR_VEC;
+ if (!__get_user(msr, &sr->mc_gregs[PT_MSR]) && (msr & MSR_VEC) != 0) {
+ /* restore altivec registers from the stack */
+ if (__copy_from_user(current->thread.vr, &sr->mc_vregs,
+ sizeof(sr->mc_vregs)))
+ return 1;
+ } else if (current->thread.used_vr)
+ memset(&current->thread.vr, 0, ELF_NVRREG32 * sizeof(vector128));
+
+ /* Always get VRSAVE back */
+ if (__get_user(current->thread.vrsave, (u32 *)&sr->mc_vregs[32]))
+ return 1;
+#endif /* CONFIG_ALTIVEC */
+ return 0;
+}
/*
@@ -181,209 +317,6 @@ long sys32_sigaction(int sig, struct old_sigaction32 *act,
}
-/*
- * When we have signals to deliver, we set up on the
- * user stack, going down from the original stack pointer:
- * a sigregs struct
- * one or more sigcontext structs
- * a gap of __SIGNAL_FRAMESIZE32 bytes
- *
- * Each of these things must be a multiple of 16 bytes in size.
- *
- */
-
-
-/*
- * Do a signal return; undo the signal stack.
- */
-long sys32_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7, unsigned long r8,
- struct pt_regs *regs)
-{
- struct sigcontext32 *sc, sigctx;
- struct sigregs32 *sr;
- int ret;
- elf_gregset_t32 saved_regs; /* an array of ELF_NGREG unsigned ints (32 bits) */
- sigset_t set;
- int i;
-
- sc = (struct sigcontext32 *)(regs->gpr[1] + __SIGNAL_FRAMESIZE32);
- if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
- goto badframe;
-
- /*
- * Note that PPC32 puts the upper 32 bits of the sigmask in the
- * unused part of the signal stackframe
- */
- set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
- sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- if (regs->msr & MSR_FP )
- giveup_fpu(current);
- /* Last stacked signal - restore registers */
- sr = (struct sigregs32*)(u64)sigctx.regs;
- /*
- * copy the 32 bit register values off the user stack
- * into the 32 bit register area
- */
- if (copy_from_user(saved_regs, &sr->gp_regs, sizeof(sr->gp_regs)))
- goto badframe;
- /*
- * The saved reg structure in the frame is an elf_grepset_t32,
- * it is a 32 bit register save of the registers in the
- * pt_regs structure that was stored on the kernel stack
- * during the system call when the system call was interrupted
- * for the signal. Only 32 bits are saved because the
- * sigcontext contains a pointer to the regs and the sig
- * context address is passed as a pointer to the signal
- * handler.
- *
- * The entries in the elf_grepset have the same index as the
- * elements in the pt_regs structure.
- */
- saved_regs[PT_MSR] = (regs->msr & ~MSR_USERCHANGE)
- | (saved_regs[PT_MSR] & MSR_USERCHANGE);
- /*
- * Register 2 is the kernel toc - should be reset on
- * any calls into the kernel
- */
- for (i = 0; i < 32; i++)
- regs->gpr[i] = (u64)(saved_regs[i]) & 0xFFFFFFFF;
-
- /*
- * restore the non gpr registers
- */
- regs->msr = (u64)(saved_regs[PT_MSR]) & 0xFFFFFFFF;
- /*
- * Insure that the interrupt mode is 64 bit, during 32 bit
- * execution. (This is necessary because we only saved
- * lower 32 bits of msr.)
- */
- regs->msr = regs->msr | MSR_ISF; /* When this thread is interrupted it should run in 64 bit mode. */
-
- regs->nip = (u64)(saved_regs[PT_NIP]) & 0xFFFFFFFF;
- regs->orig_gpr3 = (u64)(saved_regs[PT_ORIG_R3]) & 0xFFFFFFFF;
- regs->ctr = (u64)(saved_regs[PT_CTR]) & 0xFFFFFFFF;
- regs->link = (u64)(saved_regs[PT_LNK]) & 0xFFFFFFFF;
- regs->xer = (u64)(saved_regs[PT_XER]) & 0xFFFFFFFF;
- regs->ccr = (u64)(saved_regs[PT_CCR]) & 0xFFFFFFFF;
- /* regs->softe is left unchanged (like the MSR.EE bit) */
- /******************************************************/
- /* the DAR and the DSISR are only relevant during a */
- /* data or instruction storage interrupt. The value */
- /* will be set to zero. */
- /******************************************************/
- regs->dar = 0;
- regs->dsisr = 0;
- regs->result = (u64)(saved_regs[PT_RESULT]) & 0xFFFFFFFF;
-
- if (copy_from_user(current->thread.fpr, &sr->fp_regs,
- sizeof(sr->fp_regs)))
- goto badframe;
-
- ret = regs->result;
- return ret;
-
-badframe:
- do_exit(SIGSEGV);
-}
-
-/*
- * Set up a signal frame.
- */
-static void setup_frame32(struct pt_regs *regs, struct sigregs32 *frame,
- unsigned int newsp)
-{
- struct sigcontext32 *sc = (struct sigcontext32 *)(u64)newsp;
- int i;
-
- /* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
-
- if (verify_area(VERIFY_WRITE, frame, sizeof(*frame)))
- goto badframe;
- if (regs->msr & MSR_FP)
- giveup_fpu(current);
-
- /*
- * Copy the register contents for the pt_regs structure on the
- * kernel stack to the elf_gregset_t32 structure on the user
- * stack. This is a copy of 64 bit register values to 32 bit
- * register values. The high order 32 bits of the 64 bit
- * registers are not needed since a 32 bit application is
- * running and the saved registers are the contents of the
- * user registers at the time of a system call.
- *
- * The values saved on the user stack will be restored into
- * the registers during the signal return processing
- */
- for (i = 0; i < 32; i++) {
- if (__put_user((u32)regs->gpr[i], &frame->gp_regs[i]))
- goto badframe;
- }
-
- /*
- * Copy the non gpr registers to the user stack
- */
- if (__put_user((u32)regs->gpr[PT_NIP], &frame->gp_regs[PT_NIP])
- || __put_user((u32)regs->gpr[PT_MSR], &frame->gp_regs[PT_MSR])
- || __put_user((u32)regs->gpr[PT_ORIG_R3], &frame->gp_regs[PT_ORIG_R3])
- || __put_user((u32)regs->gpr[PT_CTR], &frame->gp_regs[PT_CTR])
- || __put_user((u32)regs->gpr[PT_LNK], &frame->gp_regs[PT_LNK])
- || __put_user((u32)regs->gpr[PT_XER], &frame->gp_regs[PT_XER])
- || __put_user((u32)regs->gpr[PT_CCR], &frame->gp_regs[PT_CCR])
-#if 0
- || __put_user((u32)regs->gpr[PT_MQ], &frame->gp_regs[PT_MQ])
-#endif
- || __put_user((u32)regs->gpr[PT_RESULT], &frame->gp_regs[PT_RESULT]))
- goto badframe;
-
-
- /*
- * Now copy the floating point registers onto the user stack
- *
- * Also set up so on the completion of the signal handler, the
- * sys_sigreturn will get control to reset the stack
- */
- if (__copy_to_user(&frame->fp_regs, current->thread.fpr,
- ELF_NFPREG * sizeof(double))
- /* li r0, __NR_sigreturn */
- || __put_user(0x38000000U + __NR_sigreturn, &frame->tramp[0])
- /* sc */
- || __put_user(0x44000002U, &frame->tramp[1]))
- goto badframe;
- flush_icache_range((unsigned long)&frame->tramp[0],
- (unsigned long)&frame->tramp[2]);
- current->thread.fpscr = 0; /* turn off all fp exceptions */
-
- newsp -= __SIGNAL_FRAMESIZE32;
- if (put_user(regs->gpr[1], (u32*)(u64)newsp)
- || get_user(regs->nip, &sc->handler)
- || get_user(regs->gpr[3], &sc->signal))
- goto badframe;
-
- regs->gpr[1] = newsp & 0xFFFFFFFF;
- /*
- * first parameter to the signal handler is the signal number
- * - the value is in gpr3
- * second parameter to the signal handler is the sigcontext
- * - set the value into gpr4
- */
- regs->gpr[4] = (unsigned long) sc;
- regs->link = (unsigned long) frame->tramp;
- return;
-
-badframe:
-#if DEBUG_SIG
- printk("badframe in setup_frame32, regs=%p frame=%p newsp=%lx\n",
- regs, frame, newsp);
-#endif
- do_exit(SIGSEGV);
-}
-
/*
* Start of RT signal support
@@ -405,115 +338,6 @@ badframe:
* siginfo32to64
*/
-/*
- * This code executes after the rt signal handler in 32 bit mode has
- * completed and returned
- */
-long sys32_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7, unsigned long r8,
- struct pt_regs * regs)
-{
- struct rt_sigframe_32 *rt_sf;
- struct sigcontext32 sigctx;
- struct sigregs32 *sr;
- int ret;
- elf_gregset_t32 saved_regs; /* an array of 32 bit register values */
- sigset_t set;
- stack_t st;
- int i;
- mm_segment_t old_fs;
-
- /* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
-
- /* Adjust the inputted reg1 to point to the first rt signal frame */
- rt_sf = (struct rt_sigframe_32 *)(regs->gpr[1] + __SIGNAL_FRAMESIZE32);
- /* Copy the information from the user stack */
- if (copy_from_user(&sigctx, &rt_sf->uc.uc_mcontext, sizeof(sigctx))
- || copy_from_user(&set, &rt_sf->uc.uc_sigmask, sizeof(set))
- || copy_from_user(&st,&rt_sf->uc.uc_stack, sizeof(st)))
- goto badframe;
-
- /*
- * Unblock the signal that was processed
- * After a signal handler runs -
- * if the signal is blockable - the signal will be unblocked
- * (sigkill and sigstop are not blockable)
- */
- sigdelsetmask(&set, ~_BLOCKABLE);
- /* update the current based on the sigmask found in the rt_stackframe */
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
- /* If currently owning the floating point - give them up */
- if (regs->msr & MSR_FP)
- giveup_fpu(current);
- /*
- * Set to point to the next rt_sigframe - this is used to
- * determine whether this is the last signal to process
- */
- sr = (struct sigregs32 *)(u64)sigctx.regs;
- if (copy_from_user(saved_regs, &sr->gp_regs, sizeof(sr->gp_regs)))
- goto badframe;
- /*
- * The saved reg structure in the frame is an elf_grepset_t32,
- * it is a 32 bit register save of the registers in the
- * pt_regs structure that was stored on the kernel stack
- * during the system call when the system call was interrupted
- * for the signal. Only 32 bits are saved because the
- * sigcontext contains a pointer to the regs and the sig
- * context address is passed as a pointer to the signal handler
- *
- * The entries in the elf_grepset have the same index as
- * the elements in the pt_regs structure.
- */
- saved_regs[PT_MSR] = (regs->msr & ~MSR_USERCHANGE)
- | (saved_regs[PT_MSR] & MSR_USERCHANGE);
- /*
- * Register 2 is the kernel toc - should be reset on any
- * calls into the kernel
- */
- for (i = 0; i < 32; i++)
- regs->gpr[i] = (u64)(saved_regs[i]) & 0xFFFFFFFF;
- /*
- * restore the non gpr registers
- */
- regs->msr = (u64)(saved_regs[PT_MSR]) & 0xFFFFFFFF;
- regs->nip = (u64)(saved_regs[PT_NIP]) & 0xFFFFFFFF;
- regs->orig_gpr3 = (u64)(saved_regs[PT_ORIG_R3]) & 0xFFFFFFFF;
- regs->ctr = (u64)(saved_regs[PT_CTR]) & 0xFFFFFFFF;
- regs->link = (u64)(saved_regs[PT_LNK]) & 0xFFFFFFFF;
- regs->xer = (u64)(saved_regs[PT_XER]) & 0xFFFFFFFF;
- regs->ccr = (u64)(saved_regs[PT_CCR]) & 0xFFFFFFFF;
- /* regs->softe is left unchanged (like MSR.EE) */
- /*
- * the DAR and the DSISR are only relevant during a
- * data or instruction storage interrupt. The value
- * will be set to zero.
- */
- regs->dar = 0;
- regs->dsisr = 0;
- regs->result = (u64)(saved_regs[PT_RESULT]) & 0xFFFFFFFF;
- if (copy_from_user(current->thread.fpr, &sr->fp_regs,
- sizeof(sr->fp_regs)))
- goto badframe;
- /* This function sets back the stack flags into
- the current task structure. */
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- do_sigaltstack(&st, NULL, regs->gpr[1]);
- set_fs(old_fs);
-
- ret = regs->result;
- return ret;
-
- badframe:
- do_exit(SIGSEGV);
-}
-
-
long sys32_rt_sigaction(int sig, const struct sigaction32 *act,
struct sigaction32 *oact, size_t sigsetsize)
@@ -530,16 +354,7 @@ long sys32_rt_sigaction(int sig, const struct sigaction32 *act,
ret = get_user((long)new_ka.sa.sa_handler, &act->sa_handler);
ret |= __copy_from_user(&set32, &act->sa_mask,
sizeof(compat_sigset_t));
- switch (_NSIG_WORDS) {
- case 4: new_ka.sa.sa_mask.sig[3] = set32.sig[6]
- | (((long)set32.sig[7]) << 32);
- case 3: new_ka.sa.sa_mask.sig[2] = set32.sig[4]
- | (((long)set32.sig[5]) << 32);
- case 2: new_ka.sa.sa_mask.sig[1] = set32.sig[2]
- | (((long)set32.sig[3]) << 32);
- case 1: new_ka.sa.sa_mask.sig[0] = set32.sig[0]
- | (((long)set32.sig[1]) << 32);
- }
+ sigset_from_compat(&new_ka.sa.sa_mask, &set32);
ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
if (ret)
return -EFAULT;
@@ -547,20 +362,7 @@ long sys32_rt_sigaction(int sig, const struct sigaction32 *act,
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
if (!ret && oact) {
- switch (_NSIG_WORDS) {
- case 4:
- set32.sig[7] = (old_ka.sa.sa_mask.sig[3] >> 32);
- set32.sig[6] = old_ka.sa.sa_mask.sig[3];
- case 3:
- set32.sig[5] = (old_ka.sa.sa_mask.sig[2] >> 32);
- set32.sig[4] = old_ka.sa.sa_mask.sig[2];
- case 2:
- set32.sig[3] = (old_ka.sa.sa_mask.sig[1] >> 32);
- set32.sig[2] = old_ka.sa.sa_mask.sig[1];
- case 1:
- set32.sig[1] = (old_ka.sa.sa_mask.sig[0] >> 32);
- set32.sig[0] = old_ka.sa.sa_mask.sig[0];
- }
+ compat_from_sigset(&set32, &old_ka.sa.sa_mask);
ret = put_user((long)old_ka.sa.sa_handler, &oact->sa_handler);
ret |= __copy_to_user(&oact->sa_mask, &set32,
sizeof(compat_sigset_t));
@@ -586,14 +388,8 @@ long sys32_rt_sigprocmask(u32 how, compat_sigset_t *set,
if (set) {
if (copy_from_user (&s32, set, sizeof(compat_sigset_t)))
- return -EFAULT;
-
- switch (_NSIG_WORDS) {
- case 4: s.sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
- case 3: s.sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
- case 2: s.sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
- case 1: s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
- }
+ return -EFAULT;
+ sigset_from_compat(&s, &s32);
}
set_fs(KERNEL_DS);
@@ -603,12 +399,7 @@ long sys32_rt_sigprocmask(u32 how, compat_sigset_t *set,
if (ret)
return ret;
if (oset) {
- switch (_NSIG_WORDS) {
- case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3];
- case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2];
- case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
- case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
- }
+ compat_from_sigset(&s32, &s);
if (copy_to_user (oset, &s32, sizeof(compat_sigset_t)))
return -EFAULT;
}
@@ -626,12 +417,7 @@ long sys32_rt_sigpending(compat_sigset_t *set, compat_size_t sigsetsize)
ret = sys_rt_sigpending(&s, sigsetsize);
set_fs(old_fs);
if (!ret) {
- switch (_NSIG_WORDS) {
- case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3];
- case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2];
- case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
- case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
- }
+ compat_from_sigset(&s32, &s);
if (copy_to_user (set, &s32, sizeof(compat_sigset_t)))
return -EFAULT;
}
@@ -693,12 +479,7 @@ long sys32_rt_sigtimedwait(compat_sigset_t *uthese, compat_siginfo_t *uinfo,
if (copy_from_user(&s32, uthese, sizeof(compat_sigset_t)))
return -EFAULT;
- switch (_NSIG_WORDS) {
- case 4: s.sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
- case 3: s.sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
- case 2: s.sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
- case 1: s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
- }
+ sigset_from_compat(&s, &s32);
if (uts && get_compat_timespec(&t, uts))
return -EFAULT;
set_fs(KERNEL_DS);
@@ -793,15 +574,9 @@ int sys32_rt_sigsuspend(compat_sigset_t* unewset, size_t sigsetsize, int p3,
* Swap the 2 words of the 64-bit sigset_t (they are stored
* in the "wrong" endian in 32-bit user storage).
*/
- switch (_NSIG_WORDS) {
- case 4: newset.sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
- case 3: newset.sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
- case 2: newset.sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
- case 1: newset.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
- }
+ sigset_from_compat(&newset, &s32);
sigdelsetmask(&newset, ~_BLOCKABLE);
-
spin_lock_irq(&current->sighand->siglock);
saveset = current->blocked;
current->blocked = newset;
@@ -827,237 +602,324 @@ int sys32_rt_sigsuspend(compat_sigset_t* unewset, size_t sigsetsize, int p3,
}
}
-
/*
- * Set up a rt signal frame.
+ * Start Alternate signal stack support
+ *
+ * System Calls
+ * sigaltatck sys32_sigaltstack
*/
-static void setup_rt_frame32(struct pt_regs *regs, struct sigregs32 *frame,
- unsigned int newsp)
+
+int sys32_sigaltstack(u32 newstack, u32 oldstack, int r5,
+ int r6, int r7, int r8, struct pt_regs *regs)
{
- unsigned int copyreg4, copyreg5;
- struct rt_sigframe_32 * rt_sf = (struct rt_sigframe_32 *) (u64)newsp;
- int i;
-
- if (verify_area(VERIFY_WRITE, frame, sizeof(*frame)))
- goto badframe;
- if (regs->msr & MSR_FP)
- giveup_fpu(current);
+ stack_t uss, uoss;
+ int ret;
+ mm_segment_t old_fs;
+ unsigned long sp;
/*
- * Copy the register contents for the pt_regs structure on the
- * kernel stack to the elf_gregset_t32 structure on the user
- * stack. This is a copy of 64 bit register values to 32 bit
- * register values. The high order 32 bits of the 64 bit
- * registers are not needed since a 32 bit application is
- * running and the saved registers are the contents of the
- * user registers at the time of a system call.
- *
- * The values saved on the user stack will be restored into
- * the registers during the signal return processing
+ * set sp to the user stack on entry to the system call
+ * the system call router sets R9 to the saved registers
*/
- for (i = 0; i < 32; i++) {
- if (__put_user((u32)regs->gpr[i], &frame->gp_regs[i]))
- goto badframe;
- }
+ sp = regs->gpr[1];
- /*
- * Copy the non gpr registers to the user stack
- */
- if (__put_user((u32)regs->gpr[PT_NIP], &frame->gp_regs[PT_NIP])
- || __put_user((u32)regs->gpr[PT_MSR], &frame->gp_regs[PT_MSR])
- || __put_user((u32)regs->gpr[PT_ORIG_R3], &frame->gp_regs[PT_ORIG_R3])
- || __put_user((u32)regs->gpr[PT_CTR], &frame->gp_regs[PT_CTR])
- || __put_user((u32)regs->gpr[PT_LNK], &frame->gp_regs[PT_LNK])
- || __put_user((u32)regs->gpr[PT_XER], &frame->gp_regs[PT_XER])
- || __put_user((u32)regs->gpr[PT_CCR], &frame->gp_regs[PT_CCR])
- || __put_user((u32)regs->gpr[PT_RESULT], &frame->gp_regs[PT_RESULT]))
- goto badframe;
+ /* Put new stack info in local 64 bit stack struct */
+ if (newstack &&
+ (get_user((long)uss.ss_sp,
+ &((stack_32_t *)(long)newstack)->ss_sp) ||
+ __get_user(uss.ss_flags,
+ &((stack_32_t *)(long)newstack)->ss_flags) ||
+ __get_user(uss.ss_size,
+ &((stack_32_t *)(long)newstack)->ss_size)))
+ return -EFAULT;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = do_sigaltstack(newstack ? &uss : NULL, oldstack ? &uoss : NULL,
+ sp);
+ set_fs(old_fs);
+ /* Copy the stack information to the user output buffer */
+ if (!ret && oldstack &&
+ (put_user((long)uoss.ss_sp,
+ &((stack_32_t *)(long)oldstack)->ss_sp) ||
+ __put_user(uoss.ss_flags,
+ &((stack_32_t *)(long)oldstack)->ss_flags) ||
+ __put_user(uoss.ss_size,
+ &((stack_32_t *)(long)oldstack)->ss_size)))
+ return -EFAULT;
+ return ret;
+}
- /*
- * Now copy the floating point registers onto the user stack
- *
- * Also set up so on the completion of the signal handler, the
- * sys_sigreturn will get control to reset the stack
- */
- if (__copy_to_user(&frame->fp_regs, current->thread.fpr,
- ELF_NFPREG * sizeof(double))
- || __put_user(0x38000000U + __NR_rt_sigreturn, &frame->tramp[0]) /* li r0, __NR_rt_sigreturn */
- || __put_user(0x44000002U, &frame->tramp[1])) /* sc */
- goto badframe;
+/*
+ * Set up a signal frame for a "real-time" signal handler
+ * (one which gets siginfo).
+ */
+static void handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
+ siginfo_t *info, sigset_t *oldset,
+ struct pt_regs * regs, unsigned long newsp)
+{
+ struct rt_sigframe32 __user *rt_sf;
+ struct mcontext32 __user *frame;
+ unsigned long origsp = newsp;
+ compat_sigset_t c_oldset;
- flush_icache_range((unsigned long) &frame->tramp[0],
- (unsigned long) &frame->tramp[2]);
- current->thread.fpscr = 0; /* turn off all fp exceptions */
+ /* Set up Signal Frame */
+ /* Put a Real Time Context onto stack */
+ newsp -= sizeof(*rt_sf);
+ rt_sf = (struct rt_sigframe32 __user *)newsp;
- /*
- * Retrieve rt_sigframe from stack and
- * set up registers for signal handler
- */
- newsp -= __SIGNAL_FRAMESIZE32;
-
+ /* create a stack frame for the caller of the handler */
+ newsp -= __SIGNAL_FRAMESIZE32 + 16;
+
+ if (verify_area(VERIFY_WRITE, (void __user *)newsp, origsp - newsp))
+ goto badframe;
+
+ compat_from_sigset(&c_oldset, oldset);
+
+ /* Put the siginfo & fill in most of the ucontext */
+ if (copy_siginfo_to_user32(&rt_sf->info, info)
+ || __put_user(0, &rt_sf->uc.uc_flags)
+ || __put_user(0, &rt_sf->uc.uc_link)
+ || __put_user(current->sas_ss_sp, &rt_sf->uc.uc_stack.ss_sp)
+ || __put_user(sas_ss_flags(regs->gpr[1]),
+ &rt_sf->uc.uc_stack.ss_flags)
+ || __put_user(current->sas_ss_size, &rt_sf->uc.uc_stack.ss_size)
+ || __put_user((u32)(u64)&rt_sf->uc.uc_mcontext, &rt_sf->uc.uc_regs)
+ || __copy_to_user(&rt_sf->uc.uc_sigmask, &c_oldset, sizeof(c_oldset)))
+ goto badframe;
- if (put_user((u32)(regs->gpr[1]), (unsigned int *)(u64)newsp)
- || get_user(regs->nip, &rt_sf->uc.uc_mcontext.handler)
- || get_user(regs->gpr[3], &rt_sf->uc.uc_mcontext.signal)
- || get_user(copyreg4, &rt_sf->pinfo)
- || get_user(copyreg5, &rt_sf->puc))
+ /* Save user registers on the stack */
+ frame = &rt_sf->uc.uc_mcontext;
+ if (save_user_regs(regs, frame, __NR_rt_sigreturn))
goto badframe;
- regs->gpr[4] = copyreg4;
- regs->gpr[5] = copyreg5;
- regs->gpr[1] = newsp;
+ if (put_user(regs->gpr[1], (unsigned long __user *)newsp))
+ goto badframe;
+ regs->gpr[1] = (unsigned long) newsp;
+ regs->gpr[3] = sig;
+ regs->gpr[4] = (unsigned long) &rt_sf->info;
+ regs->gpr[5] = (unsigned long) &rt_sf->uc;
regs->gpr[6] = (unsigned long) rt_sf;
+ regs->nip = (unsigned long) ka->sa.sa_handler;
regs->link = (unsigned long) frame->tramp;
+ regs->trap = 0;
return;
badframe:
#if DEBUG_SIG
- printk("badframe in setup_frame32, regs=%p frame=%p newsp=%lx\n",
+ printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n",
regs, frame, newsp);
#endif
- do_exit(SIGSEGV);
+ if (sig == SIGSEGV)
+ ka->sa.sa_handler = SIG_DFL;
+ force_sig(SIGSEGV, current);
}
+static long do_setcontext32(struct ucontext32 __user *ucp, struct pt_regs *regs, int sig)
+{
+ compat_sigset_t c_set;
+ sigset_t set;
+ u32 mcp;
+
+ if (__copy_from_user(&c_set, &ucp->uc_sigmask, sizeof(c_set))
+ || __get_user(mcp, &ucp->uc_regs))
+ return -EFAULT;
+ sigset_from_compat(&set, &c_set);
+ restore_sigmask(&set);
+ if (restore_user_regs(regs, (struct mcontext32 *)(u64)mcp, sig))
+ return -EFAULT;
+
+ return 0;
+}
/*
- * OK, we're invoking a handler
+ * Handle {get,set,swap}_context operations for 32 bits processes
*/
-static void handle_signal32(unsigned long sig, siginfo_t *info,
- sigset_t *oldset, struct pt_regs * regs, unsigned int *newspp,
- unsigned int frame)
+
+long sys32_swapcontext(struct ucontext32 __user *old_ctx,
+ struct ucontext32 __user *new_ctx,
+ int ctx_size, int r6, int r7, int r8, struct pt_regs *regs)
{
- struct sigcontext32 *sc;
- struct rt_sigframe_32 *rt_sf;
- struct k_sigaction *ka = &current->sighand->action[sig-1];
-
- if (regs->trap == 0x0C00 /* System Call! */
- && ((int)regs->result == -ERESTARTNOHAND ||
- (int)regs->result == -ERESTART_RESTARTBLOCK ||
- ((int)regs->result == -ERESTARTSYS &&
- !(ka->sa.sa_flags & SA_RESTART)))) {
- if ((int)regs->result == -ERESTART_RESTARTBLOCK)
- current_thread_info()->restart_block.fn
- = do_no_restart_syscall;
- regs->result = -EINTR;
+ unsigned char tmp;
+ compat_sigset_t c_set;
+
+ /* Context size is for future use. Right now, we only make sure
+ * we are passed something we understand
+ */
+ if (ctx_size < sizeof(struct ucontext32))
+ return -EINVAL;
+
+ if (old_ctx != NULL) {
+ compat_from_sigset(&c_set, &current->blocked);
+ if (verify_area(VERIFY_WRITE, old_ctx, sizeof(*old_ctx))
+ || save_user_regs(regs, &old_ctx->uc_mcontext, 0)
+ || __copy_to_user(&old_ctx->uc_sigmask, &c_set, sizeof(c_set))
+ || __put_user((u32)(u64)&old_ctx->uc_mcontext, &old_ctx->uc_regs))
+ return -EFAULT;
}
+ if (new_ctx == NULL)
+ return 0;
+ if (verify_area(VERIFY_READ, new_ctx, sizeof(*new_ctx))
+ || __get_user(tmp, (u8 *) new_ctx)
+ || __get_user(tmp, (u8 *) (new_ctx + 1) - 1))
+ return -EFAULT;
/*
- * Set up the signal frame
- * Determine if a real time frame and a siginfo is required
+ * If we get a fault copying the context into the kernel's
+ * image of the user's registers, we can't just return -EFAULT
+ * because the user's registers will be corrupted. For instance
+ * the NIP value may have been updated but not some of the
+ * other registers. Given that we have done the verify_area
+ * and successfully read the first and last bytes of the region
+ * above, this should only happen in an out-of-memory situation
+ * or if another thread unmaps the region containing the context.
+ * We kill the task with a SIGSEGV in this situation.
*/
- if (ka->sa.sa_flags & SA_SIGINFO) {
- *newspp -= sizeof(*rt_sf);
- rt_sf = (struct rt_sigframe_32 *)(u64)(*newspp);
- if (verify_area(VERIFY_WRITE, rt_sf, sizeof(*rt_sf)))
- goto badframe;
- if (__put_user((u32)(u64)ka->sa.sa_handler,
- &rt_sf->uc.uc_mcontext.handler)
- || __put_user((u32)(u64)&rt_sf->info, &rt_sf->pinfo)
- || __put_user((u32)(u64)&rt_sf->uc, &rt_sf->puc)
- /* put the siginfo on the user stack */
- || copy_siginfo_to_user32(&rt_sf->info, info)
- /* set the ucontext on the user stack */
- || __put_user(0, &rt_sf->uc.uc_flags)
- || __put_user(0, &rt_sf->uc.uc_link)
- || __put_user(current->sas_ss_sp, &rt_sf->uc.uc_stack.ss_sp)
- || __put_user(sas_ss_flags(regs->gpr[1]),
- &rt_sf->uc.uc_stack.ss_flags)
- || __put_user(current->sas_ss_size,
- &rt_sf->uc.uc_stack.ss_size)
- || __copy_to_user(&rt_sf->uc.uc_sigmask,
- oldset, sizeof(*oldset))
- /* point the mcontext.regs to the pramble register frame */
- || __put_user(frame, &rt_sf->uc.uc_mcontext.regs)
- || __put_user(sig,&rt_sf->uc.uc_mcontext.signal))
- goto badframe;
- } else {
- /* Put a sigcontext on the stack */
- *newspp -= sizeof(*sc);
- sc = (struct sigcontext32 *)(u64)*newspp;
- if (verify_area(VERIFY_WRITE, sc, sizeof(*sc)))
- goto badframe;
- /*
- * Note the upper 32 bits of the signal mask are stored
- * in the unused part of the signal stack frame
- */
- if (__put_user((u32)(u64)ka->sa.sa_handler, &sc->handler)
- || __put_user(oldset->sig[0], &sc->oldmask)
- || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
- || __put_user((unsigned int)frame, &sc->regs)
- || __put_user(sig, &sc->signal))
- goto badframe;
- }
+ if (do_setcontext32(new_ctx, regs, 0))
+ do_exit(SIGSEGV);
- if (ka->sa.sa_flags & SA_ONESHOT)
- ka->sa.sa_handler = SIG_DFL;
+ return 0;
+}
+
+long sys32_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
+ struct pt_regs *regs)
+{
+ struct rt_sigframe32 __user *rt_sf;
+ int ret;
+
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+ rt_sf = (struct rt_sigframe32 __user *)
+ (regs->gpr[1] + __SIGNAL_FRAMESIZE32 + 16);
+ if (verify_area(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
+ goto bad;
+ if (do_setcontext32(&rt_sf->uc, regs, 1))
+ goto bad;
+
+ /*
+ * It's not clear whether or why it is desirable to save the
+ * sigaltstack setting on signal delivery and restore it on
+ * signal return. But other architectures do this and we have
+ * always done it up until now so it is probably better not to
+ * change it. -- paulus
+ * We use the sys32_ version that does the 32/64 bits conversion
+ * and takes userland pointer directly. What about error checking ?
+ * nobody does any...
+ */
+ sys32_sigaltstack((u32)(u64)&rt_sf->uc.uc_stack, 0, 0, 0, 0, 0, regs);
+
+ regs->result &= 0xFFFFFFFF;
+ ret = regs->result;
+
+ return ret;
+
+ bad:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+
+/*
+ * OK, we're invoking a handler
+ */
+static void handle_signal32(unsigned long sig, struct k_sigaction *ka,
+ siginfo_t *info, sigset_t *oldset,
+ struct pt_regs * regs, unsigned long newsp)
+{
+ struct sigcontext32 __user *sc;
+ struct sigregs32 __user *frame;
+ unsigned long origsp = newsp;
+
+ /* Set up Signal Frame */
+ newsp -= sizeof(struct sigregs32);
+ frame = (struct sigregs32 __user *) newsp;
+
+ /* Put a sigcontext on the stack */
+ newsp -= sizeof(*sc);
+ sc = (struct sigcontext32 __user *) newsp;
+
+ /* create a stack frame for the caller of the handler */
+ newsp -= __SIGNAL_FRAMESIZE32;
+
+ if (verify_area(VERIFY_WRITE, (void *) newsp, origsp - newsp))
+ goto badframe;
+
+#if _NSIG != 64
+#error "Please adjust handle_signal32()"
+#endif
+ if (__put_user((u32)(u64)ka->sa.sa_handler, &sc->handler)
+ || __put_user(oldset->sig[0], &sc->oldmask)
+ || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
+ || __put_user((u32)(u64)frame, &sc->regs)
+ || __put_user(sig, &sc->signal))
+ goto badframe;
+
+ if (save_user_regs(regs, &frame->mctx, __NR_sigreturn))
+ goto badframe;
+
+ if (put_user(regs->gpr[1], (unsigned long __user *)newsp))
+ goto badframe;
+ regs->gpr[1] = (unsigned long) newsp;
+ regs->gpr[3] = sig;
+ regs->gpr[4] = (unsigned long) sc;
+ regs->nip = (unsigned long) ka->sa.sa_handler;
+ regs->link = (unsigned long) frame->mctx.tramp;
+ regs->trap = 0;
- if (!(ka->sa.sa_flags & SA_NODEFER)) {
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
- sigaddset(&current->blocked,sig);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- }
return;
badframe:
#if DEBUG_SIG
- printk("badframe in handle_signal32, regs=%p frame=%lx newsp=%lx\n",
+ printk("badframe in handle_signal, regs=%p frame=%x newsp=%x\n",
regs, frame, *newspp);
- printk("sc=%p sig=%d ka=%p info=%p oldset=%p\n", sc, sig, ka, info, oldset);
#endif
- do_exit(SIGSEGV);
+ if (sig == SIGSEGV)
+ ka->sa.sa_handler = SIG_DFL;
+ force_sig(SIGSEGV, current);
}
-
/*
- * Start Alternate signal stack support
- *
- * System Calls
- * sigaltatck sys32_sigaltstack
+ * Do a signal return; undo the signal stack.
*/
-
-int sys32_sigaltstack(u32 newstack, u32 oldstack, int p3,
- int p4, int p6, int p7, struct pt_regs *regs)
+long sys32_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
+ struct pt_regs *regs)
{
- stack_t uss, uoss;
+ struct sigcontext32 __user *sc;
+ struct sigcontext32 sigctx;
+ struct mcontext32 __user *sr;
+ sigset_t set;
int ret;
- mm_segment_t old_fs;
- unsigned long sp;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+ sc = (struct sigcontext32 __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE32);
+ if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
+ goto badframe;
/*
- * set sp to the user stack on entry to the system call
- * the system call router sets R9 to the saved registers
+ * Note that PPC32 puts the upper 32 bits of the sigmask in the
+ * unused part of the signal stackframe
*/
- sp = regs->gpr[1];
+ set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
+ restore_sigmask(&set);
- /* Put new stack info in local 64 bit stack struct */
- if (newstack &&
- (get_user((long)uss.ss_sp,
- &((stack_32_t *)(long)newstack)->ss_sp) ||
- __get_user(uss.ss_flags,
- &((stack_32_t *)(long)newstack)->ss_flags) ||
- __get_user(uss.ss_size,
- &((stack_32_t *)(long)newstack)->ss_size)))
- return -EFAULT;
+ sr = (struct mcontext32 *)(u64)sigctx.regs;
+ if (verify_area(VERIFY_READ, sr, sizeof(*sr))
+ || restore_user_regs(regs, sr, 1))
+ goto badframe;
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- ret = do_sigaltstack(newstack ? &uss : NULL, oldstack ? &uoss : NULL,
- sp);
- set_fs(old_fs);
- /* Copy the stack information to the user output buffer */
- if (!ret && oldstack &&
- (put_user((long)uoss.ss_sp,
- &((stack_32_t *)(long)oldstack)->ss_sp) ||
- __put_user(uoss.ss_flags,
- &((stack_32_t *)(long)oldstack)->ss_flags) ||
- __put_user(uoss.ss_size,
- &((stack_32_t *)(long)oldstack)->ss_size)))
- return -EFAULT;
+ regs->result &= 0xFFFFFFFF;
+ ret = regs->result;
return ret;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
}
@@ -1082,7 +944,7 @@ int do_signal32(sigset_t *oldset, struct pt_regs *regs)
siginfo_t info;
struct k_sigaction *ka;
unsigned int frame, newsp;
- int signr;
+ int signr, ret;
if (!oldset)
oldset = &current->blocked;
@@ -1090,40 +952,60 @@ int do_signal32(sigset_t *oldset, struct pt_regs *regs)
newsp = frame = 0;
signr = get_signal_to_deliver(&info, regs, NULL);
- if (signr > 0) {
- ka = &current->sighand->action[signr-1];
- if ((ka->sa.sa_flags & SA_ONSTACK)
- && (!on_sig_stack(regs->gpr[1])))
- newsp = (current->sas_ss_sp + current->sas_ss_size);
- else
- newsp = regs->gpr[1];
- newsp = frame = newsp - sizeof(struct sigregs32);
-
- /* Whee! Actually deliver the signal. */
- handle_signal32(signr, &info, oldset, regs, &newsp, frame);
- }
- if (regs->trap == 0x0C00) { /* System Call! */
- if ((int)regs->result == -ERESTARTNOHAND ||
- (int)regs->result == -ERESTARTSYS ||
- (int)regs->result == -ERESTARTNOINTR) {
- regs->gpr[3] = regs->orig_gpr3;
- regs->nip -= 4; /* Back up & retry system call */
- regs->result = 0;
- } else if ((int)regs->result == -ERESTART_RESTARTBLOCK) {
- regs->gpr[0] = __NR_restart_syscall;
- regs->nip -= 4;
+ ka = (signr == 0)? NULL: &current->sighand->action[signr-1];
+
+ if (regs->trap == 0x0C00 /* System Call! */
+ && regs->ccr & 0x10000000 /* error signalled */
+ && ((ret = regs->gpr[3]) == ERESTARTSYS
+ || ret == ERESTARTNOHAND || ret == ERESTARTNOINTR
+ || ret == ERESTART_RESTARTBLOCK)) {
+
+ if (signr > 0
+ && (ret == ERESTARTNOHAND || ret == ERESTART_RESTARTBLOCK
+ || (ret == ERESTARTSYS
+ && !(ka->sa.sa_flags & SA_RESTART)))) {
+ /* make the system call return an EINTR error */
+ regs->result = -EINTR;
+ regs->gpr[3] = EINTR;
+ /* note that the cr0.SO bit is already set */
+ } else {
+ regs->nip -= 4; /* Back up & retry system call */
regs->result = 0;
+ regs->trap = 0;
+ if (ret == ERESTART_RESTARTBLOCK)
+ regs->gpr[0] = __NR_restart_syscall;
+ else
+ regs->gpr[3] = regs->orig_gpr3;
}
}
- if (newsp == frame)
+ if (signr == 0)
return 0; /* no signals delivered */
- /* Invoke correct stack setup routine */
+ if ((ka->sa.sa_flags & SA_ONSTACK) && current->sas_ss_size
+ && (!on_sig_stack(regs->gpr[1])))
+ newsp = (current->sas_ss_sp + current->sas_ss_size);
+ else
+ newsp = regs->gpr[1];
+ newsp &= ~0xfUL;
+
+ /* Whee! Actually deliver the signal. */
if (ka->sa.sa_flags & SA_SIGINFO)
- setup_rt_frame32(regs, (struct sigregs32*)(u64)frame, newsp);
+ handle_rt_signal32(signr, ka, &info, oldset, regs, newsp);
else
- setup_frame32(regs, (struct sigregs32*)(u64)frame, newsp);
+ handle_signal32(signr, ka, &info, oldset, regs, newsp);
+
+ if (ka->sa.sa_flags & SA_ONESHOT)
+ ka->sa.sa_handler = SIG_DFL;
+
+ if (!(ka->sa.sa_flags & SA_NODEFER)) {
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ sigaddset(&current->blocked, signr);
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+ }
+
return 1;
}
diff --git a/arch/ppc64/kernel/sys_ppc32.c b/arch/ppc64/kernel/sys_ppc32.c
index b436b9951ba2..e0caa436821b 100644
--- a/arch/ppc64/kernel/sys_ppc32.c
+++ b/arch/ppc64/kernel/sys_ppc32.c
@@ -2106,6 +2106,10 @@ long sys32_execve(unsigned long a0, unsigned long a1, unsigned long a2,
goto out;
if (regs->msr & MSR_FP)
giveup_fpu(current);
+#ifdef CONFIG_ALTIVEC
+ if (regs->msr & MSR_VEC)
+ giveup_altivec(current);
+#endif /* CONFIG_ALTIVEC */
error = do_execve32(filename, (u32*) a1, (u32*) a2, regs);
@@ -2126,9 +2130,25 @@ void start_thread32(struct pt_regs* regs, unsigned long nip, unsigned long sp)
regs->nip = nip;
regs->gpr[1] = sp;
regs->msr = MSR_USER32;
+#ifndef CONFIG_SMP
if (last_task_used_math == current)
last_task_used_math = 0;
+#endif /* CONFIG_SMP */
current->thread.fpscr = 0;
+ memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
+#ifdef CONFIG_ALTIVEC
+#ifndef CONFIG_SMP
+ if (last_task_used_altivec == current)
+ last_task_used_altivec = 0;
+#endif /* CONFIG_SMP */
+ memset(current->thread.vr, 0, sizeof(current->thread.vr));
+ current->thread.vscr.u[0] = 0;
+ current->thread.vscr.u[1] = 0;
+ current->thread.vscr.u[2] = 0;
+ current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
+ current->thread.vrsave = 0;
+ current->thread.used_vr = 0;
+#endif /* CONFIG_ALTIVEC */
}
extern asmlinkage int sys_prctl(int option, unsigned long arg2, unsigned long arg3,
diff --git a/arch/ppc64/kernel/traps.c b/arch/ppc64/kernel/traps.c
index 7e9c0aee1e13..bcd9c9cc1cee 100644
--- a/arch/ppc64/kernel/traps.c
+++ b/arch/ppc64/kernel/traps.c
@@ -420,6 +420,14 @@ KernelFPUnavailableException(struct pt_regs *regs)
}
void
+KernelAltivecUnavailableException(struct pt_regs *regs)
+{
+ printk("Illegal VMX/Altivec used in kernel (task=0x%p, "
+ "pc=0x%016lx, trap=0x%lx)\n", current, regs->nip, regs->trap);
+ panic("Unrecoverable VMX/Altivec Unavailable Exception in Kernel");
+}
+
+void
SingleStepException(struct pt_regs *regs)
{
siginfo_t info;
@@ -488,6 +496,17 @@ AlignmentException(struct pt_regs *regs)
_exception(SIGBUS, &info, regs);
}
+#ifdef CONFIG_ALTIVEC
+void
+AltivecAssistException(struct pt_regs *regs)
+{
+ if (regs->msr & MSR_VEC)
+ giveup_altivec(current);
+ /* XXX quick hack for now: set the non-Java bit in the VSCR */
+ current->thread.vscr.u[3] |= 0x10000;
+}
+#endif /* CONFIG_ALTIVEC */
+
void __init trap_init(void)
{
}