summaryrefslogtreecommitdiff
path: root/arch/ppc64/kernel/process.c
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2004-01-18 18:22:39 -0800
committerLinus Torvalds <torvalds@home.osdl.org>2004-01-18 18:22:39 -0800
commit04879b04bf500585d6b015e440be3ee7bdaf9cd4 (patch)
tree1025214cac9a033304d965d16272725e07207c82 /arch/ppc64/kernel/process.c
parentd4c6e4e119d3cd89b80321c7abb4914d33b594d1 (diff)
[PATCH] ppc64: VMX (Altivec) support & signal32 rework, from Ben Herrenschmidt
From: Anton Blanchard <anton@samba.org> VMX (Altivec) support & signal32 rework, from Ben Herrenschmidt
Diffstat (limited to 'arch/ppc64/kernel/process.c')
-rw-r--r--arch/ppc64/kernel/process.c105
1 files changed, 93 insertions, 12 deletions
diff --git a/arch/ppc64/kernel/process.c b/arch/ppc64/kernel/process.c
index 0b16f7615ad5..041827788def 100644
--- a/arch/ppc64/kernel/process.c
+++ b/arch/ppc64/kernel/process.c
@@ -50,7 +50,10 @@
#include <asm/cputable.h>
#include <asm/sections.h>
+#ifndef CONFIG_SMP
struct task_struct *last_task_used_math = NULL;
+struct task_struct *last_task_used_altivec = NULL;
+#endif
struct mm_struct ioremap_mm = { pgd : ioremap_dir
,page_table_lock : SPIN_LOCK_UNLOCKED };
@@ -58,8 +61,7 @@ struct mm_struct ioremap_mm = { pgd : ioremap_dir
char *sysmap = NULL;
unsigned long sysmap_size = 0;
-void
-enable_kernel_fp(void)
+void enable_kernel_fp(void)
{
#ifdef CONFIG_SMP
if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
@@ -70,6 +72,7 @@ enable_kernel_fp(void)
giveup_fpu(last_task_used_math);
#endif /* CONFIG_SMP */
}
+EXPORT_SYMBOL(enable_kernel_fp);
int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
{
@@ -85,6 +88,31 @@ int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
return 1;
}
+#ifdef CONFIG_ALTIVEC
+
+void enable_kernel_altivec(void)
+{
+#ifdef CONFIG_SMP
+ if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
+ giveup_altivec(current);
+ else
+ giveup_altivec(NULL); /* just enables FP for kernel */
+#else
+ giveup_altivec(last_task_used_altivec);
+#endif /* CONFIG_SMP */
+}
+EXPORT_SYMBOL(enable_kernel_altivec);
+
+int dump_task_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
+{
+ if (regs->msr & MSR_VEC)
+ giveup_altivec(current);
+ memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs));
+ return 1;
+}
+
+#endif /* CONFIG_ALTIVEC */
+
struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *new)
{
@@ -104,8 +132,20 @@ struct task_struct *__switch_to(struct task_struct *prev,
*/
if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
giveup_fpu(prev);
+#ifdef CONFIG_ALTIVEC
+ if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
+ giveup_altivec(prev);
+#endif /* CONFIG_ALTIVEC */
#endif /* CONFIG_SMP */
+#if defined(CONFIG_ALTIVEC) && !defined(CONFIG_SMP)
+ /* Avoid the trap. On smp this this never happens since
+ * we don't set last_task_used_altivec -- Cort
+ */
+ if (new->thread.regs && last_task_used_altivec == new)
+ new->thread.regs->msr |= MSR_VEC;
+#endif /* CONFIG_ALTIVEC */
+
new_thread = &new->thread;
old_thread = &current->thread;
@@ -158,8 +198,14 @@ void show_regs(struct pt_regs * regs)
void exit_thread(void)
{
+#ifndef CONFIG_SMP
if (last_task_used_math == current)
last_task_used_math = NULL;
+#ifdef CONFIG_ALTIVEC
+ if (last_task_used_altivec == current)
+ last_task_used_altivec = NULL;
+#endif /* CONFIG_ALTIVEC */
+#endif /* CONFIG_SMP */
}
void flush_thread(void)
@@ -169,8 +215,14 @@ void flush_thread(void)
if (t->flags & _TIF_ABI_PENDING)
t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT);
+#ifndef CONFIG_SMP
if (last_task_used_math == current)
last_task_used_math = NULL;
+#ifdef CONFIG_ALTIVEC
+ if (last_task_used_altivec == current)
+ last_task_used_altivec = NULL;
+#endif /* CONFIG_ALTIVEC */
+#endif /* CONFIG_SMP */
}
void
@@ -178,6 +230,25 @@ release_thread(struct task_struct *t)
{
}
+
+/*
+ * This gets called before we allocate a new thread and copy
+ * the current task into it.
+ */
+void prepare_to_copy(struct task_struct *tsk)
+{
+ struct pt_regs *regs = tsk->thread.regs;
+
+ if (regs == NULL)
+ return;
+ if (regs->msr & MSR_FP)
+ giveup_fpu(current);
+#ifdef CONFIG_ALTIVEC
+ if (regs->msr & MSR_VEC)
+ giveup_altivec(current);
+#endif /* CONFIG_ALTIVEC */
+}
+
/*
* Copy a thread..
*/
@@ -268,9 +339,25 @@ void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp)
regs->gpr[1] = sp;
regs->gpr[2] = toc;
regs->msr = MSR_USER64;
+#ifndef CONFIG_SMP
if (last_task_used_math == current)
last_task_used_math = 0;
+#endif /* CONFIG_SMP */
+ memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
current->thread.fpscr = 0;
+#ifdef CONFIG_ALTIVEC
+#ifndef CONFIG_SMP
+ if (last_task_used_altivec == current)
+ last_task_used_altivec = 0;
+#endif /* CONFIG_SMP */
+ memset(current->thread.vr, 0, sizeof(current->thread.vr));
+ current->thread.vscr.u[0] = 0;
+ current->thread.vscr.u[1] = 0;
+ current->thread.vscr.u[2] = 0;
+ current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
+ current->thread.vrsave = 0;
+ current->thread.used_vr = 0;
+#endif /* CONFIG_ALTIVEC */
}
int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
@@ -314,9 +401,6 @@ int sys_clone(unsigned long clone_flags, unsigned long p2, unsigned long p3,
}
}
- if (regs->msr & MSR_FP)
- giveup_fpu(current);
-
return do_fork(clone_flags & ~CLONE_IDLETASK, p2, regs, 0,
(int *)parent_tidptr, (int *)child_tidptr);
}
@@ -325,9 +409,6 @@ int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
unsigned long p4, unsigned long p5, unsigned long p6,
struct pt_regs *regs)
{
- if (regs->msr & MSR_FP)
- giveup_fpu(current);
-
return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
}
@@ -335,9 +416,6 @@ int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
unsigned long p4, unsigned long p5, unsigned long p6,
struct pt_regs *regs)
{
- if (regs->msr & MSR_FP)
- giveup_fpu(current);
-
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1], regs, 0,
NULL, NULL);
}
@@ -355,7 +433,10 @@ int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
goto out;
if (regs->msr & MSR_FP)
giveup_fpu(current);
-
+#ifdef CONFIG_ALTIVEC
+ if (regs->msr & MSR_VEC)
+ giveup_altivec(current);
+#endif /* CONFIG_ALTIVEC */
error = do_execve(filename, (char **) a1, (char **) a2, regs);
if (error == 0)