summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@tango.paulus.ozlabs.org>2002-02-16 20:14:24 +1100
committerPaul Mackerras <paulus@tango.paulus.ozlabs.org>2002-02-16 20:14:24 +1100
commit03aed178cf27d8b67697883ae698372b662f3841 (patch)
treeac279d66664a024b29c7b117cce6c573f50d0b69 /arch
parentbff60e8b905ef243391391558e62a4826e86e207 (diff)
PPC fixes for SMP; also fix the stack overflow detection, remove
various bits of cruft, and remove the third argument to switch_to.
Diffstat (limited to 'arch')
-rw-r--r--arch/ppc/4xx_io/Config.in16
-rw-r--r--arch/ppc/config.in29
-rw-r--r--arch/ppc/kernel/entry.S66
-rw-r--r--arch/ppc/kernel/head.S77
-rw-r--r--arch/ppc/kernel/head_4xx.S81
-rw-r--r--arch/ppc/kernel/head_8xx.S58
-rw-r--r--arch/ppc/kernel/iSeries_head.S19
-rw-r--r--arch/ppc/kernel/irq.c9
-rw-r--r--arch/ppc/kernel/misc.S7
-rw-r--r--arch/ppc/kernel/mk_defs.c8
-rw-r--r--arch/ppc/kernel/process.c12
-rw-r--r--arch/ppc/kernel/setup.c2
-rw-r--r--arch/ppc/kernel/smp.c78
-rw-r--r--arch/ppc/lib/locks.c14
-rw-r--r--arch/ppc/mm/hashtable.S13
-rw-r--r--arch/ppc/mm/init.c46
16 files changed, 181 insertions, 354 deletions
diff --git a/arch/ppc/4xx_io/Config.in b/arch/ppc/4xx_io/Config.in
deleted file mode 100644
index e9ac9bb95d07..000000000000
--- a/arch/ppc/4xx_io/Config.in
+++ /dev/null
@@ -1,16 +0,0 @@
-#
-# MPC4xx driver options
-#
-mainmenu_option next_comment
-
-comment 'MPC4xx Driver Options'
-
-if [ "$CONFIG_STB03xxx" = "y" ]; then
- bool 'STB IR Keyboard' CONFIG_STB_KB
- bool 'SICC Serial port' CONFIG_SERIAL_SICC
- if [ "$CONFIG_SERIAL_SICC" = "y" -a "$CONFIG_UART0_TTYS1" = "y" ]; then
- define_bool CONFIG_UART1_DFLT_CONSOLE y
- define_bool CONFIG_SERIAL_SICC_CONSOLE y
- fi
-fi
-endmenu
diff --git a/arch/ppc/config.in b/arch/ppc/config.in
index 3ad3dec12539..02ccbb6bf388 100644
--- a/arch/ppc/config.in
+++ b/arch/ppc/config.in
@@ -294,6 +294,11 @@ if [ "$CONFIG_ADVANCED_OPTIONS" = "y" ]; then
fi
fi
+if [ "$CONFIG_ALL_PPC" = "y" ]; then
+ bool 'Support for ISA-bus hardware' CONFIG_ISA
+else
+ define_bool CONFIG_ISA n
+fi
define_bool CONFIG_EISA n
define_bool CONFIG_SBUS n
@@ -322,12 +327,6 @@ else
fi
fi
-if [ "$CONFIG_ALL_PPC" = "y" ]; then
- bool 'Support for ISA-bus hardware' CONFIG_ISA
-else
- define_bool CONFIG_ISA n
-fi
-
# only elf supported, a.out is not -- Cort
if [ "$CONFIG_PROC_FS" = "y" ]; then
define_bool CONFIG_KCORE_ELF y
@@ -588,8 +587,18 @@ if [ "$CONFIG_8260" = "y" ]; then
source arch/ppc/8260_io/Config.in
fi
-if [ "$CONFIG_4xx" = "y" ]; then
-source arch/ppc/4xx_io/Config.in
+if [ "$CONFIG_4xx" = "y"]; then
+ mainmenu_option next_comment
+ comment 'IBM 4xx options'
+ if [ "$CONFIG_STB03xxx" = "y" ]; then
+ bool 'STB IR Keyboard' CONFIG_STB_KB
+ bool 'SICC Serial port' CONFIG_SERIAL_SICC
+ if [ "$CONFIG_SERIAL_SICC" = "y" -a "$CONFIG_UART0_TTYS1" = "y" ]; then
+ define_bool CONFIG_UART1_DFLT_CONSOLE y
+ define_bool CONFIG_SERIAL_SICC_CONSOLE y
+ fi
+ fi
+ endmenu
fi
source drivers/usb/Config.in
@@ -598,6 +607,8 @@ if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
source net/bluetooth/Config.in
fi
+source lib/Config.in
+
mainmenu_option next_comment
comment 'Kernel hacking'
@@ -629,5 +640,3 @@ if [ "$CONFIG_MCPN765" = "y" -o "$CONFIG_SANDPOINT" = "y" \
bool 'Support for early boot texts over serial port' CONFIG_SERIAL_TEXT_DEBUG
fi
endmenu
-
-source lib/Config.in
diff --git a/arch/ppc/kernel/entry.S b/arch/ppc/kernel/entry.S
index 35e5c9c1363f..2bd06ecca9f6 100644
--- a/arch/ppc/kernel/entry.S
+++ b/arch/ppc/kernel/entry.S
@@ -41,6 +41,72 @@
#undef SHOW_SYSCALLS
#undef SHOW_SYSCALLS_TASK
+#ifndef CONFIG_PPC_ISERIES /* iSeries version is in iSeries_head.S */
+/*
+ * This code finishes saving the registers to the exception frame
+ * and jumps to the appropriate handler for the exception, turning
+ * on address translation.
+ */
+ .globl transfer_to_handler
+transfer_to_handler:
+ stw r22,_NIP(r21)
+ stw r23,_MSR(r21)
+ SAVE_4GPRS(8, r21)
+ SAVE_8GPRS(12, r21)
+ SAVE_8GPRS(24, r21)
+ andi. r23,r23,MSR_PR
+ mfspr r23,SPRG3
+ addi r2,r23,-THREAD /* set r2 to current */
+ tovirt(r2,r2)
+ beq 2f /* if from user, fix up THREAD.regs */
+ addi r24,r1,STACK_FRAME_OVERHEAD
+ stw r24,PT_REGS(r23)
+#ifdef CONFIG_ALTIVEC
+BEGIN_FTR_SECTION
+ mfspr r22,SPRN_VRSAVE /* if G4, save vrsave register value */
+ stw r22,THREAD_VRSAVE(r23)
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+#endif /* CONFIG_ALTIVEC */
+ b 3f
+2: /* if from kernel, check for stack overflow */
+ lwz r22,THREAD_INFO-THREAD(r23)
+ cmplw r1,r22 /* if r1 <= current->thread_info */
+ ble- stack_ovf /* then the kernel stack overflowed */
+3:
+ mflr r23
+ andi. r24,r23,0x3f00 /* get vector offset */
+ stw r24,TRAP(r21)
+ li r22,0
+ stw r22,RESULT(r21)
+ mtspr SPRG2,r22 /* r1 is now kernel sp */
+ lwz r24,0(r23) /* virtual address of handler */
+ lwz r23,4(r23) /* where to go when done */
+ FIX_SRR1(r20,r22)
+ mtspr SRR0,r24
+ mtspr SRR1,r20
+ mtlr r23
+ SYNC
+ RFI /* jump to handler, enable MMU */
+
+/*
+ * On kernel stack overflow, load up an initial stack pointer
+ * and call StackOverflow(regs), which should not return.
+ */
+stack_ovf:
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ lis r1,init_thread_union@ha
+ addi r1,r1,init_thread_union@l
+ addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
+ lis r24,StackOverflow@ha
+ addi r24,r24,StackOverflow@l
+ li r20,MSR_KERNEL
+ FIX_SRR1(r20,r22)
+ mtspr SRR0,r24
+ mtspr SRR1,r20
+ SYNC
+ RFI
+#endif /* CONFIG_PPC_ISERIES */
+
#ifdef SHOW_SYSCALLS_TASK
.data
show_syscalls_task:
diff --git a/arch/ppc/kernel/head.S b/arch/ppc/kernel/head.S
index 0cf9e3027900..cb60c3343fed 100644
--- a/arch/ppc/kernel/head.S
+++ b/arch/ppc/kernel/head.S
@@ -735,69 +735,6 @@ InstructionSegment:
#endif /* CONFIG_PPC64BRIDGE */
/*
- * This code finishes saving the registers to the exception frame
- * and jumps to the appropriate handler for the exception, turning
- * on address translation.
- */
- .globl transfer_to_handler
-transfer_to_handler:
- stw r22,_NIP(r21)
- stw r23,_MSR(r21)
- SAVE_4GPRS(8, r21)
- SAVE_8GPRS(12, r21)
- SAVE_8GPRS(24, r21)
- andi. r23,r23,MSR_PR
- mfspr r23,SPRG3 /* if from user, fix up THREAD.regs */
- beq 2f
- addi r24,r1,STACK_FRAME_OVERHEAD
- stw r24,PT_REGS(r23)
-#ifdef CONFIG_ALTIVEC
-BEGIN_FTR_SECTION
- mfspr r22,SPRN_VRSAVE /* if G4, save vrsave register value */
- stw r22,THREAD_VRSAVE(r23)
-END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
-#endif /* CONFIG_ALTIVEC */
-2: addi r2,r23,-THREAD /* set r2 to current */
- tovirt(r2,r2)
- mflr r23
- andi. r24,r23,0x3f00 /* get vector offset */
- stw r24,TRAP(r21)
- li r22,0
- stw r22,RESULT(r21)
- mtspr SPRG2,r22 /* r1 is now kernel sp */
- addi r24,r2,TASK_STRUCT_SIZE /* check for kernel stack overflow */
- cmplw 0,r1,r2
- cmplw 1,r1,r24
- crand 1,1,4
- bgt- stack_ovf /* if r2 < r1 < r2+TASK_STRUCT_SIZE */
- lwz r24,0(r23) /* virtual address of handler */
- lwz r23,4(r23) /* where to go when done */
- FIX_SRR1(r20,r22)
- mtspr SRR0,r24
- mtspr SRR1,r20
- mtlr r23
- SYNC
- RFI /* jump to handler, enable MMU */
-
-/*
- * On kernel stack overflow, load up an initial stack pointer
- * and call StackOverflow(regs), which should not return.
- */
-stack_ovf:
- addi r3,r1,STACK_FRAME_OVERHEAD
- lis r1,init_thread_union@ha
- addi r1,r1,init_thread_union@l
- addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
- lis r24,StackOverflow@ha
- addi r24,r24,StackOverflow@l
- li r20,MSR_KERNEL
- FIX_SRR1(r20,r22)
- mtspr SRR0,r24
- mtspr SRR1,r20
- SYNC
- RFI
-
-/*
* This task wants to use the FPU now.
* On UP, disable FP for the task which had the FPU previously,
* and save its floating-point registers in its thread_struct.
@@ -1221,15 +1158,15 @@ __secondary_start:
bl identify_cpu
bl call_setup_cpu /* Call setup_cpu for this CPU */
- /* get current */
- lis r2,current_set@h
- ori r2,r2,current_set@l
- tophys(r2,r2)
- slwi r24,r24,2 /* get current_set[cpu#] */
- lwzx r2,r2,r24
+ /* get current_thread_info and current */
+ lis r1,secondary_ti@ha
+ tophys(r1,r1)
+ lwz r1,secondary_ti@l(r1)
+ tophys(r2,r1)
+ lwz r2,TI_TASK(r2)
/* stack */
- addi r1,r2,THREAD_SIZE-STACK_FRAME_OVERHEAD
+ addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
li r0,0
tophys(r3,r1)
stw r0,0(r3)
diff --git a/arch/ppc/kernel/head_4xx.S b/arch/ppc/kernel/head_4xx.S
index f34425bfedfc..f7e8ded84d12 100644
--- a/arch/ppc/kernel/head_4xx.S
+++ b/arch/ppc/kernel/head_4xx.S
@@ -826,87 +826,6 @@ finish_tlb_load:
PPC405_ERR77_SYNC
rfi /* Should sync shadow TLBs */
-/* This code finishes saving the registers to the exception frame
- * and jumps to the appropriate handler for the exception, turning
- * on address translation.
- */
-_GLOBAL(transfer_to_handler)
- stw r22,_NIP(r21) /* Save the faulting IP on the stack */
- stw r23,_MSR(r21) /* Save the exception MSR on stack */
- SAVE_4GPRS(8, r21) /* Save r8 through r11 on the stack */
- SAVE_8GPRS(12, r21) /* Save r12 through r19 on the stack */
- SAVE_8GPRS(24, r21) /* Save r24 through r31 on the stack */
- andi. r23,r23,MSR_PR /* Is this from user space? */
- mfspr r23,SPRN_SPRG3 /* If from user, fix up THREAD.regs */
- beq 2f /* No, it is from the kernel; branch. */
- addi r24,r1,STACK_FRAME_OVERHEAD
- stw r24,PT_REGS(r23)
-2: addi r2,r23,-THREAD /* Set r2 to current thread */
- tovirt(r2,r2)
- mflr r23
- andi. r24,r23,0x3f00 /* Get vector offset */
- stw r24,TRAP(r21)
- li r22,RESULT
- /* No need to put an erratum #77 workaround here
- because interrupts are currently disabled */
- stwcx. r22,r22,r21 /* Clear the reservation */
- li r22,0
- stw r22,RESULT(r21)
- mtspr SPRN_SPRG2,r22 /* r1 is now the kernel stack pointer */
- addi r24,r2,TASK_STRUCT_SIZE /* Check for kernel stack overflow */
- cmplw cr0,r1,r2
- cmplw cr1,r1,r24
- crand cr1,cr1,cr4
- bgt- stack_ovf /* If r2 < r1 < r2 + TASK_STRUCT_SIZE */
- lwz r24,0(r23) /* Virtual address of the handler */
- lwz r23,4(r23) /* Handler return pointer */
- cmpwi cr0,r7,STND_EXC /* What type of exception is this? */
- bne 3f /* It is a critical exception... */
-
- /* Standard exception jump path
- */
-
- /* We have to recover r7 from the register save stack.
- * It was used to indicate standard/critical exception. In
- * the case of a standard exception that is the system call
- * trap, it may have originally contained one of the syscall
- * parameters and we have to get it back now.
- */
- lwz r7,GPR7(r21)
- mtspr SPRN_SRR0,r24 /* Set up the instruction pointer */
- mtspr SPRN_SRR1,r20 /* Set up the machine state register */
- mtlr r23 /* Set up the return pointer */
- SYNC
- /* We shouldn't need a 405 erratum #77 workaround here, because we're not
- * actually returning to the interrupted instruction yet. */
- rfi
-
- /* Critical exception jump path
- */
-
-3: mtspr SPRN_SRR2,r24 /* Set up the instruction pointer */
- mtspr SPRN_SRR3,r20 /* Set up the machine state register */
- mtlr r23 /* Set up the return pointer */
- SYNC
- rfci
-
-/* On kernel stack overlow, load up an initial stack pointer and call
- * StackOverflow(regs), which should NOT return.
- */
-
-stack_ovf:
- addi r3,r1,STACK_FRAME_OVERHEAD
- lis r1,init_thread_union@ha
- addi r1,r1,init_thread_union@l
- addi r1,r1,THREAD_SIZE - STACK_FRAME_OVERHEAD
- lis r24,StackOverflow@ha
- addi r24,r24,StackOverflow@l
- li r20,MSR_KERNEL
- mtspr SPRN_SRR0,r24
- mtspr SPRN_SRR1,r20
- SYNC
- rfi
-
/* extern void giveup_altivec(struct task_struct *prev)
*
* The PowerPC 4xx family of processors do not have AltiVec capabilities, so
diff --git a/arch/ppc/kernel/head_8xx.S b/arch/ppc/kernel/head_8xx.S
index 7c0f14b0f012..8d9b0d851dc7 100644
--- a/arch/ppc/kernel/head_8xx.S
+++ b/arch/ppc/kernel/head_8xx.S
@@ -637,63 +637,6 @@ DataTLBError:
. = 0x2000
-/*
- * This code finishes saving the registers to the exception frame
- * and jumps to the appropriate handler for the exception, turning
- * on address translation.
- */
- .globl transfer_to_handler
-transfer_to_handler:
- stw r22,_NIP(r21)
- lis r22,MSR_POW@h
- andc r23,r23,r22
- stw r23,_MSR(r21)
- SAVE_4GPRS(8, r21)
- SAVE_8GPRS(12, r21)
- SAVE_8GPRS(24, r21)
- andi. r23,r23,MSR_PR
- mfspr r23,SPRG3 /* if from user, fix up THREAD.regs */
- beq 2f
- addi r24,r1,STACK_FRAME_OVERHEAD
- stw r24,PT_REGS(r23)
-2: addi r2,r23,-THREAD /* set r2 to current */
- tovirt(r2,r2)
- mflr r23
- andi. r24,r23,0x3f00 /* get vector offset */
- stw r24,TRAP(r21)
- li r22,0
- stw r22,RESULT(r21)
- mtspr SPRG2,r22 /* r1 is now kernel sp */
- addi r24,r2,TASK_STRUCT_SIZE /* check for kernel stack overflow */
- cmplw 0,r1,r2
- cmplw 1,r1,r24
- crand 1,1,4
- bgt- stack_ovf /* if r2 < r1 < r2+TASK_STRUCT_SIZE */
- lwz r24,0(r23) /* virtual address of handler */
- lwz r23,4(r23) /* where to go when done */
- mtspr SRR0,r24
- mtspr SRR1,r20
- mtlr r23
- SYNC
- rfi /* jump to handler, enable MMU */
-
-/*
- * On kernel stack overflow, load up an initial stack pointer
- * and call StackOverflow(regs), which should not return.
- */
-stack_ovf:
- addi r3,r1,STACK_FRAME_OVERHEAD
- lis r1,init_thread_union@ha
- addi r1,r1,init_thread_union@l
- addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
- lis r24,StackOverflow@ha
- addi r24,r24,StackOverflow@l
- li r20,MSR_KERNEL
- mtspr SRR0,r24
- mtspr SRR1,r20
- SYNC
- rfi
-
.globl giveup_fpu
giveup_fpu:
blr
@@ -707,7 +650,6 @@ _GLOBAL(__setup_cpu_8xx)
* This is where the main kernel code starts.
*/
start_here:
-
/* ptr to current */
lis r2,init_task@h
ori r2,r2,init_task@l
diff --git a/arch/ppc/kernel/iSeries_head.S b/arch/ppc/kernel/iSeries_head.S
index a1d84d6a6c01..a191f75ae4ea 100644
--- a/arch/ppc/kernel/iSeries_head.S
+++ b/arch/ppc/kernel/iSeries_head.S
@@ -531,13 +531,17 @@ transfer_to_handler:
SAVE_GPR(31, r1)
andi. r23,r23,MSR_PR
- mfspr r23,SPRG3 /* if from user, fix up THREAD.regs */
- beq 2f
+ mfspr r23,SPRG3
+ addi r2,r23,-THREAD /* set r2 to current */
+ beq 2f /* if from user, fix up THREAD.regs */
addi r24,r1,STACK_FRAME_OVERHEAD
stw r24,PT_REGS(r23)
-2: addi r2,r23,-THREAD /* set r2 to current */
- li r22,RESULT
- stwcx. r22,r22,r1 /* to clear the reservation */
+ b 3f
+2: /* if from kernel, check for stack overflow */
+ lwz r22,THREAD_INFO(r2)
+ cmplw r1,r22 /* if r1 <= current->thread_info */
+ ble- stack_ovf /* then the kernel stack overflowed */
+3:
li r22,0
stw r22,RESULT(r1)
mfspr r23,SPRG1 /* Get Paca address */
@@ -545,11 +549,6 @@ transfer_to_handler:
mflr r23
andi. r24,r23,0x3f00 /* get vector offset */
stw r24,TRAP(r1)
- addi r24,r2,TASK_STRUCT_SIZE /* check for kernel stack overflow */
- cmplw 0,r1,r2
- cmplw 1,r1,r24
- crand 1,1,4
- bgt- stack_ovf /* if r2 < r1 < r2+TASK_STRUCT_SIZE */
lwz r24,0(r23) /* virtual address of handler */
lwz r23,4(r23) /* where to go when done */
li r20,MSR_KERNEL
diff --git a/arch/ppc/kernel/irq.c b/arch/ppc/kernel/irq.c
index 5c5e37aabb0e..3d98d3fc7926 100644
--- a/arch/ppc/kernel/irq.c
+++ b/arch/ppc/kernel/irq.c
@@ -586,8 +586,6 @@ atomic_t global_bh_count;
static void show(char * str)
{
- int i;
- unsigned long *stack;
int cpu = smp_processor_id();
printk("\n%s, CPU %d:\n", str, cpu);
@@ -598,13 +596,6 @@ static void show(char * str)
atomic_read(&global_bh_count),
local_bh_count(0),
local_bh_count(1));
- stack = (unsigned long *) &str;
- for (i = 40; i ; i--) {
- unsigned long x = *++stack;
- if (x > (unsigned long) &init_task_union && x < (unsigned long) &vsprintf) {
- printk("<[%08lx]> ", x);
- }
- }
}
static inline void wait_on_bh(void)
diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S
index 6e01a5c36d52..155e510dd3d0 100644
--- a/arch/ppc/kernel/misc.S
+++ b/arch/ppc/kernel/misc.S
@@ -25,6 +25,7 @@
#include <asm/cputable.h>
#include <asm/mmu.h>
#include <asm/ppc_asm.h>
+#include <asm/thread_info.h>
#include "ppc_defs.h"
.text
@@ -375,7 +376,8 @@ _GLOBAL(_tlbia)
SYNC
lis r9,hash_table_lock@h
ori r9,r9,hash_table_lock@l
- lwz r8,CPU(r2)
+ rlwinm r8,r1,0,0,18
+ lwz r8,TI_CPU(r8)
oris r8,r8,10
10: lwarx r7,0,r9
cmpi 0,r7,0
@@ -420,7 +422,8 @@ _GLOBAL(_tlbie)
SYNC
lis r9,hash_table_lock@h
ori r9,r9,hash_table_lock@l
- lwz r8,CPU(r2)
+ rlwinm r8,r1,0,0,18
+ lwz r8,TI_CPU(r8)
oris r8,r8,11
10: lwarx r7,0,r9
cmpi 0,r7,0
diff --git a/arch/ppc/kernel/mk_defs.c b/arch/ppc/kernel/mk_defs.c
index a1c86e3ca362..a896dc476689 100644
--- a/arch/ppc/kernel/mk_defs.c
+++ b/arch/ppc/kernel/mk_defs.c
@@ -42,19 +42,13 @@
int
main(void)
{
- DEFINE(THREAD_SIZE, THREAD_SIZE);
- DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
- DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
- DEFINE(STATE, offsetof(struct task_struct, state));
DEFINE(THREAD, offsetof(struct task_struct, thread));
+ DEFINE(THREAD_INFO, offsetof(struct task_struct, thread_info));
DEFINE(MM, offsetof(struct task_struct, mm));
- DEFINE(ACTIVE_MM, offsetof(struct task_struct, active_mm));
- DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct));
DEFINE(KSP, offsetof(struct thread_struct, ksp));
DEFINE(PGDIR, offsetof(struct thread_struct, pgdir));
DEFINE(LAST_SYSCALL, offsetof(struct thread_struct, last_syscall));
DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
- DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode));
DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0]));
DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr));
diff --git a/arch/ppc/kernel/process.c b/arch/ppc/kernel/process.c
index 05c09c56564e..df58f091cb5e 100644
--- a/arch/ppc/kernel/process.c
+++ b/arch/ppc/kernel/process.c
@@ -197,9 +197,7 @@ dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs)
return 1;
}
-void
-_switch_to(struct task_struct *prev, struct task_struct *new,
- struct task_struct **last)
+void switch_to(struct task_struct *prev, struct task_struct *new)
{
struct thread_struct *new_thread, *old_thread;
unsigned long s;
@@ -221,7 +219,7 @@ _switch_to(struct task_struct *prev, struct task_struct *new,
* every switch, just a save.
* -- Cort
*/
- if ( prev->thread.regs && (prev->thread.regs->msr & MSR_FP) )
+ if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
giveup_fpu(prev);
#ifdef CONFIG_ALTIVEC
/*
@@ -240,8 +238,6 @@ _switch_to(struct task_struct *prev, struct task_struct *new,
#endif /* CONFIG_ALTIVEC */
#endif /* CONFIG_SMP */
- current_set[smp_processor_id()] = new;
-
/* Avoid the trap. On smp this this never happens since
* we don't set last_task_used_altivec -- Cort
*/
@@ -249,7 +245,7 @@ _switch_to(struct task_struct *prev, struct task_struct *new,
new->thread.regs->msr |= MSR_VEC;
new_thread = &new->thread;
old_thread = &current->thread;
- *last = _switch(old_thread, new_thread);
+ _switch(old_thread, new_thread);
__restore_flags(s);
}
@@ -282,7 +278,7 @@ void show_regs(struct pt_regs * regs)
#endif
#ifdef CONFIG_SMP
- printk(" CPU: %d", current->processor);
+ printk(" CPU: %d", smp_processor_id());
#endif /* CONFIG_SMP */
printk("\n");
diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c
index 496e82920f50..2ccc28038858 100644
--- a/arch/ppc/kernel/setup.c
+++ b/arch/ppc/kernel/setup.c
@@ -162,7 +162,7 @@ int show_cpuinfo(struct seq_file *m, void *v)
return 0;
pvr = cpu_data[i].pvr;
lpj = cpu_data[i].loops_per_jiffy;
- seq_printf(m, "processor\t: %lu\n", i);
+ seq_printf(m, "processor\t: %d\n", i);
#else
pvr = mfspr(PVR);
lpj = loops_per_jiffy;
diff --git a/arch/ppc/kernel/smp.c b/arch/ppc/kernel/smp.c
index 221fee33e251..fddffbe59d83 100644
--- a/arch/ppc/kernel/smp.c
+++ b/arch/ppc/kernel/smp.c
@@ -37,6 +37,7 @@
#include <asm/smp.h>
#include <asm/residual.h>
#include <asm/time.h>
+#include <asm/thread_info.h>
int smp_threads_ready;
volatile int smp_commenced;
@@ -49,11 +50,12 @@ atomic_t ipi_sent;
spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
unsigned int prof_multiplier[NR_CPUS];
unsigned int prof_counter[NR_CPUS];
-cycles_t cacheflush_time;
+unsigned long cache_decay_ticks;
static int max_cpus __initdata = NR_CPUS;
unsigned long cpu_online_map;
int smp_hw_index[NR_CPUS];
static struct smp_ops_t *smp_ops;
+struct thread_info *secondary_ti;
/* all cpu mappings are 1-1 -- Cort */
volatile unsigned long cpu_callin_map[NR_CPUS];
@@ -66,6 +68,8 @@ int start_secondary(void *);
extern int cpu_idle(void *unused);
void smp_call_function_interrupt(void);
void smp_message_pass(int target, int msg, unsigned long data, int wait);
+static int __smp_call_function(void (*func) (void *info), void *info,
+ int wait, int target);
#ifdef CONFIG_PPC_ISERIES
extern void smp_iSeries_space_timers( unsigned nr );
@@ -108,7 +112,7 @@ void smp_message_recv(int msg, struct pt_regs *regs)
smp_call_function_interrupt();
break;
case PPC_MSG_RESCHEDULE:
- current->work.need_resched = 1;
+ set_need_resched();
break;
case PPC_MSG_INVALIDATE_TLB:
_tlbia();
@@ -192,8 +196,8 @@ static struct call_data_struct {
* in the system.
*/
-int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
- int wait)
+int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
+ int wait)
/*
* [SUMMARY] Run a function on all other CPUs.
* <func> The function to run. This must be fast and non-blocking.
@@ -207,12 +211,23 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
* hardware interrupt handler, you may call it from a bottom half handler.
*/
{
+ if (smp_num_cpus <= 1)
+ return 0;
+ return __smp_call_function(func, info, wait, MSG_ALL_BUT_SELF);
+}
+
+static int __smp_call_function(void (*func) (void *info), void *info,
+ int wait, int target)
+{
struct call_data_struct data;
- int ret = -1, cpus = smp_num_cpus-1;
+ int ret = -1;
int timeout;
+ int ncpus = 1;
- if (!cpus)
- return 0;
+ if (target == MSG_ALL_BUT_SELF)
+ ncpus = smp_num_cpus - 1;
+ else if (target == MSG_ALL)
+ ncpus = smp_num_cpus;
data.func = func;
data.info = info;
@@ -224,11 +239,11 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
spin_lock_bh(&call_lock);
call_data = &data;
/* Send a message to all other CPUs and wait for them to respond */
- smp_message_pass(MSG_ALL_BUT_SELF, PPC_MSG_CALL_FUNCTION, 0, 0);
+ smp_message_pass(target, PPC_MSG_CALL_FUNCTION, 0, 0);
/* Wait for response */
timeout = 1000000;
- while (atomic_read(&data.started) != cpus) {
+ while (atomic_read(&data.started) != ncpus) {
if (--timeout == 0) {
printk("smp_call_function on cpu %d: other cpus not responding (%d)\n",
smp_processor_id(), atomic_read(&data.started));
@@ -240,7 +255,7 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
if (wait) {
timeout = 1000000;
- while (atomic_read(&data.finished) != cpus) {
+ while (atomic_read(&data.finished) != ncpus) {
if (--timeout == 0) {
printk("smp_call_function on cpu %d: other cpus not finishing (%d/%d)\n",
smp_processor_id(), atomic_read(&data.finished), atomic_read(&data.started));
@@ -276,9 +291,28 @@ void smp_call_function_interrupt(void)
atomic_inc(&call_data->finished);
}
+/*
+ * Task migration callback.
+ */
+void smp_task_migration_interrupt(void *new_task)
+{
+ task_t *p;
+
+ p = new_task;
+ sched_task_migrated(p);
+}
+
+/*
+ * This function sends a 'task migration' IPI to another CPU.
+ * Must be called from syscall contexts, with interrupts *enabled*.
+ */
+void smp_migrate_task(int cpu, task_t *p)
+{
+ __smp_call_function(smp_task_migration_interrupt, p, 0, cpu);
+}
+
void __init smp_boot_cpus(void)
{
- extern struct task_struct *current_set[NR_CPUS];
int i, cpu_nr;
struct task_struct *p;
@@ -292,7 +326,6 @@ void __init smp_boot_cpus(void)
* cpu 0, the master -- Cort
*/
cpu_callin_map[0] = 1;
- current->cpu = 0;
for (i = 0; i < NR_CPUS; i++) {
prof_counter[i] = 1;
@@ -300,10 +333,9 @@ void __init smp_boot_cpus(void)
}
/*
- * XXX very rough, assumes 20 bus cycles to read a cache line,
- * timebase increments every 4 bus cycles, 32kB L1 data cache.
+ * XXX very rough.
*/
- cacheflush_time = 5 * 1024;
+ cache_decay_ticks = HZ/100;
smp_ops = ppc_md.smp_ops;
if (smp_ops == NULL) {
@@ -311,7 +343,7 @@ void __init smp_boot_cpus(void)
return;
}
- /* Probe arch for CPUs */
+ /* Probe platform for CPUs */
cpu_nr = smp_ops->probe();
/*
@@ -338,9 +370,8 @@ void __init smp_boot_cpus(void)
init_idle(p, i);
unhash_process(p);
- p->cpu = i;
- p->cpus_allowed = 1 << i; /* we schedule the first task manually */
- current_set[i] = p;
+ secondary_ti = p->thread_info;
+ p->thread_info->cpu = i;
/*
* There was a cache flush loop here to flush the cache
@@ -357,11 +388,10 @@ void __init smp_boot_cpus(void)
* use this value that I found through experimentation.
* -- Cort
*/
- for ( c = 1000; c && !cpu_callin_map[i] ; c-- )
+ for (c = 1000; c && !cpu_callin_map[i]; c--)
udelay(100);
- if ( cpu_callin_map[i] )
- {
+ if (cpu_callin_map[i]) {
char buf[32];
sprintf(buf, "found cpu %d", i);
if (ppc_md.progress) ppc_md.progress(buf, 0x350+i);
@@ -488,7 +518,7 @@ void __init smp_commence(void)
void __init smp_callin(void)
{
- int cpu = current->processor;
+ int cpu = smp_processor_id();
smp_store_cpu_info(cpu);
set_dec(tb_ticks_per_jiffy);
@@ -505,7 +535,7 @@ void __init smp_callin(void)
*/
cpu_online_map |= 1UL << smp_processor_id();
- while(!smp_commenced)
+ while (!smp_commenced)
barrier();
/* see smp_commence for more info */
diff --git a/arch/ppc/lib/locks.c b/arch/ppc/lib/locks.c
index 1960cd7c0817..ab3a87e3cdfb 100644
--- a/arch/ppc/lib/locks.c
+++ b/arch/ppc/lib/locks.c
@@ -48,7 +48,7 @@ static unsigned long __spin_trylock(volatile unsigned long *lock)
return ret;
}
-void _spin_lock(spinlock_t *lock)
+void _raw_spin_lock(spinlock_t *lock)
{
int cpu = smp_processor_id();
unsigned int stuck = INIT_STUCK;
@@ -69,7 +69,7 @@ void _spin_lock(spinlock_t *lock)
lock->owner_cpu = cpu;
}
-int spin_trylock(spinlock_t *lock)
+int _raw_spin_trylock(spinlock_t *lock)
{
if (__spin_trylock(&lock->lock))
return 0;
@@ -78,7 +78,7 @@ int spin_trylock(spinlock_t *lock)
return 1;
}
-void _spin_unlock(spinlock_t *lp)
+void _raw_spin_unlock(spinlock_t *lp)
{
if ( !lp->lock )
printk("_spin_unlock(%p): no lock cpu %d curr PC %p %s/%d\n",
@@ -99,7 +99,7 @@ void _spin_unlock(spinlock_t *lp)
* with the high bit (sign) being the "write" bit.
* -- Cort
*/
-void _read_lock(rwlock_t *rw)
+void _raw_read_lock(rwlock_t *rw)
{
unsigned long stuck = INIT_STUCK;
int cpu = smp_processor_id();
@@ -126,7 +126,7 @@ again:
wmb();
}
-void _read_unlock(rwlock_t *rw)
+void _raw_read_unlock(rwlock_t *rw)
{
if ( rw->lock == 0 )
printk("_read_unlock(): %s/%d (nip %08lX) lock %lx\n",
@@ -136,7 +136,7 @@ void _read_unlock(rwlock_t *rw)
atomic_dec((atomic_t *) &(rw)->lock);
}
-void _write_lock(rwlock_t *rw)
+void _raw_write_lock(rwlock_t *rw)
{
unsigned long stuck = INIT_STUCK;
int cpu = smp_processor_id();
@@ -176,7 +176,7 @@ again:
wmb();
}
-void _write_unlock(rwlock_t *rw)
+void _raw_write_unlock(rwlock_t *rw)
{
if ( !(rw->lock & (1<<31)) )
printk("_write_lock(): %s/%d (nip %08lX) lock %lx\n",
diff --git a/arch/ppc/mm/hashtable.S b/arch/ppc/mm/hashtable.S
index 2b934dc136c0..889588bcf2cc 100644
--- a/arch/ppc/mm/hashtable.S
+++ b/arch/ppc/mm/hashtable.S
@@ -32,6 +32,7 @@
#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/ppc_asm.h>
+#include <asm/thread_info.h>
#include <kernel/ppc_defs.h>
#ifdef CONFIG_SMP
@@ -63,9 +64,7 @@ hash_page:
#ifdef CONFIG_SMP
addis r2,r7,hash_table_lock@h
ori r2,r2,hash_table_lock@l
- mfspr r5,SPRG3
- lwz r0,CPU-THREAD(r5)
- oris r0,r0,0x0fff
+ lis r0,0x0fff
b 10f
11: lwz r6,0(r2)
cmpwi 0,r6,0
@@ -215,8 +214,9 @@ _GLOBAL(add_hash_page)
#ifdef CONFIG_SMP
lis r9,hash_table_lock@h
ori r9,r9,hash_table_lock@l
- lwz r8,CPU(r2)
- oris r8,r8,10
+ rlwinm r8,r1,0,0,18
+ lwz r8,TI_CPU(r8)
+ oris r8,r8,12
10: lwarx r7,0,r9
cmpi 0,r7,0
bne- 11f
@@ -511,7 +511,8 @@ _GLOBAL(flush_hash_page)
#ifdef CONFIG_SMP
lis r9,hash_table_lock@h
ori r9,r9,hash_table_lock@l
- lwz r8,CPU(r2)
+ rlwinm r8,r1,0,0,18
+ lwz r8,TI_CPU(r8)
oris r8,r8,9
10: lwarx r7,0,r9
cmpi 0,r7,0
diff --git a/arch/ppc/mm/init.c b/arch/ppc/mm/init.c
index 6ee19803fe62..e91a9761b745 100644
--- a/arch/ppc/mm/init.c
+++ b/arch/ppc/mm/init.c
@@ -135,7 +135,6 @@ void show_mem(void)
{
int i,free = 0,total = 0,reserved = 0;
int shared = 0, cached = 0;
- struct task_struct *p;
int highmem = 0;
printk("Mem-info:\n");
@@ -153,7 +152,7 @@ void show_mem(void)
else if (!page_count(mem_map+i))
free++;
else
- shared += atomic_read(&mem_map[i].count) - 1;
+ shared += page_count(mem_map+i) - 1;
}
printk("%d pages of RAM\n",total);
printk("%d pages of HIGHMEM\n", highmem);
@@ -163,49 +162,6 @@ void show_mem(void)
printk("%d pages swap cached\n",cached);
printk("%d pages in page table cache\n",(int)pgtable_cache_size);
show_buffers();
- printk("%-8s %3s %8s %8s %8s %9s %8s", "Process", "Pid",
- "Ctx", "Ctx<<4", "Last Sys", "pc", "task");
-#ifdef CONFIG_SMP
- printk(" %3s", "CPU");
-#endif /* CONFIG_SMP */
- printk("\n");
- for_each_task(p)
- {
- printk("%-8.8s %3d %8ld %8ld %8ld %c%08lx %08lx ",
- p->comm,p->pid,
- (p->mm)?p->mm->context:0,
- (p->mm)?(p->mm->context<<4):0,
- p->thread.last_syscall,
- (p->thread.regs)?user_mode(p->thread.regs) ? 'u' : 'k' : '?',
- (p->thread.regs)?p->thread.regs->nip:0,
- (ulong)p);
- {
- int iscur = 0;
-#ifdef CONFIG_SMP
- printk("%3d ", p->processor);
- if ( (p->processor != NO_PROC_ID) &&
- (p == current_set[p->processor]) )
- {
- iscur = 1;
- printk("current");
- }
-#else
- if ( p == current )
- {
- iscur = 1;
- printk("current");
- }
-
- if ( p == last_task_used_math )
- {
- if ( iscur )
- printk(",");
- printk("last math");
- }
-#endif /* CONFIG_SMP */
- printk("\n");
- }
- }
}
void si_meminfo(struct sysinfo *val)