summaryrefslogtreecommitdiff
path: root/arch/ppc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ppc/kernel')
-rw-r--r--arch/ppc/kernel/cputable.c6
-rw-r--r--arch/ppc/kernel/entry.S38
-rw-r--r--arch/ppc/kernel/head.S27
-rw-r--r--arch/ppc/kernel/head_4xx.S135
-rw-r--r--arch/ppc/kernel/l2cr.S2
-rw-r--r--arch/ppc/kernel/misc.S8
-rw-r--r--arch/ppc/kernel/mk_defs.c2
-rw-r--r--arch/ppc/kernel/open_pic.c62
-rw-r--r--arch/ppc/kernel/pci.c9
-rw-r--r--arch/ppc/kernel/ppc4xx_pic.c145
-rw-r--r--arch/ppc/kernel/ppc_ksyms.c4
-rw-r--r--arch/ppc/kernel/process.c2
-rw-r--r--arch/ppc/kernel/prom.c39
-rw-r--r--arch/ppc/kernel/syscalls.c2
-rw-r--r--arch/ppc/kernel/traps.c2
15 files changed, 371 insertions, 112 deletions
diff --git a/arch/ppc/kernel/cputable.c b/arch/ppc/kernel/cputable.c
index 2f948b3d6155..729e0bac40a4 100644
--- a/arch/ppc/kernel/cputable.c
+++ b/arch/ppc/kernel/cputable.c
@@ -204,7 +204,7 @@ struct cpu_spec cpu_specs[] = {
{ /* 7450 2.1 */
0xffffffff, 0x80000201, "7450",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_CAN_NAP |
- CPU_FTR_L2CR | CPU_FTR_TAU | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
+ CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450,
COMMON_PPC | PPC_FEATURE_HAS_ALTIVEC,
32, 32,
@@ -213,7 +213,7 @@ struct cpu_spec cpu_specs[] = {
{ /* 7450 2.3 and newer */
0xffff0000, 0x80000000, "7450",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_CAN_NAP |
- CPU_FTR_L2CR | CPU_FTR_TAU | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
+ CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450,
COMMON_PPC | PPC_FEATURE_HAS_ALTIVEC,
32, 32,
@@ -222,7 +222,7 @@ struct cpu_spec cpu_specs[] = {
{ /* 7455 */
0xffff0000, 0x80010000, "7455",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_CAN_NAP |
- CPU_FTR_L2CR | CPU_FTR_TAU | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
+ CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450,
COMMON_PPC | PPC_FEATURE_HAS_ALTIVEC,
32, 32,
diff --git a/arch/ppc/kernel/entry.S b/arch/ppc/kernel/entry.S
index 1414c7e02b91..51bad8f074cb 100644
--- a/arch/ppc/kernel/entry.S
+++ b/arch/ppc/kernel/entry.S
@@ -164,7 +164,6 @@ ret_from_syscall:
andi. r0,r9,(_TIF_SYSCALL_TRACE|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
bne- syscall_exit_work
syscall_exit_cont:
- PPC405_ERR77(0,r1)
stwcx. r0,0,r1 /* to clear the reservation */
lwz r4,_LINK(r1)
lwz r5,_CCR(r1)
@@ -568,6 +567,7 @@ exc_exit_start:
exc_exit_restart_end:
PPC405_ERR77_SYNC
rfi
+ b . /* prevent prefetch past rfi */
crit_exc_exit:
mtcrf 0xff,r10
/* avoid any possible TLB misses here by turning off MSR.DR, we
@@ -576,18 +576,40 @@ crit_exc_exit:
mtmsr r10
isync
tophys(r1, r1)
- lwz r9,_SRR0(r1)
- lwz r10,_SRR1(r1)
- mtspr SRR0,r9
+ lwz r9,_DEAR(r1)
+ lwz r10,_ESR(r1)
+ mtspr SPRN_DEAR,r9
+ mtspr SPRN_ESR,r10
lwz r11,_NIP(r1)
- mtspr SRR1,r10
lwz r12,_MSR(r1)
mtspr SRR2,r11
mtspr SRR3,r12
- REST_4GPRS(9, r1)
+ lwz r9,GPR9(r1)
+ lwz r12,GPR12(r1)
+ lwz r10,crit_sprg0@l(0)
+ mtspr SPRN_SPRG0,r10
+ lwz r10,crit_sprg1@l(0)
+ mtspr SPRN_SPRG1,r10
+ lwz r10,crit_sprg4@l(0)
+ mtspr SPRN_SPRG4,r10
+ lwz r10,crit_sprg5@l(0)
+ mtspr SPRN_SPRG5,r10
+ lwz r10,crit_sprg6@l(0)
+ mtspr SPRN_SPRG6,r10
+ lwz r10,crit_sprg7@l(0)
+ mtspr SPRN_SPRG7,r10
+ lwz r10,crit_srr0@l(0)
+ mtspr SRR0,r10
+ lwz r10,crit_srr1@l(0)
+ mtspr SRR1,r10
+ lwz r10,crit_pid@l(0)
+ mtspr SPRN_PID,r10
+ lwz r10,crit_r10@l(0)
+ lwz r11,crit_r11@l(0)
lwz r1,GPR1(r1)
PPC405_ERR77_SYNC
rfci
+ b . /* prevent prefetch past rfci */
#endif /* CONFIG_4xx */
recheck:
@@ -650,6 +672,10 @@ nonrecoverable:
mr r12,r11 /* restart at exc_exit_restart */
blr
3: /* OK, we can't recover, kill this process */
+ /* but the 601 doesn't implement the RI bit, so assume it's OK */
+BEGIN_FTR_SECTION
+ blr
+END_FTR_SECTION_IFSET(CPU_FTR_601)
lwz r3,TRAP(r1)
andi. r0,r3,1
beq 4f
diff --git a/arch/ppc/kernel/head.S b/arch/ppc/kernel/head.S
index cdd0807b90ef..175a7983959a 100644
--- a/arch/ppc/kernel/head.S
+++ b/arch/ppc/kernel/head.S
@@ -832,7 +832,12 @@ fast_exception_return:
/* aargh, a nonrecoverable interrupt, panic */
/* aargh, we don't know which trap this is */
-3: li r10,0
+/* but the 601 doesn't implement the RI bit, so assume it's OK */
+3:
+BEGIN_FTR_SECTION
+ b 2b
+END_FTR_SECTION_IFSET(CPU_FTR_601)
+ li r10,-1
stw r10,TRAP(r11)
addi r3,r1,STACK_FRAME_OVERHEAD
li r10,MSR_KERNEL
@@ -887,9 +892,9 @@ load_up_altivec:
add r4,r4,r6
addi r4,r4,THREAD /* want THREAD of last_task_used_altivec */
SAVE_32VR(0,r10,r4)
- MFVSCR(vr0)
+ mfvscr vr0
li r10,THREAD_VSCR
- STVX(vr0,r10,r4)
+ stvx vr0,r10,r4
lwz r5,PT_REGS(r4)
add r5,r5,r6
lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
@@ -902,8 +907,8 @@ load_up_altivec:
oris r9,r9,MSR_VEC@h
mfspr r5,SPRG3 /* current task's THREAD (phys) */
li r10,THREAD_VSCR
- LVX(vr0,r10,r5)
- MTVSCR(vr0)
+ lvx vr0,r10,r5
+ mtvscr vr0
REST_32VR(0,r10,r5)
#ifndef CONFIG_SMP
subi r4,r5,THREAD
@@ -951,9 +956,9 @@ giveup_altivec:
lwz r5,PT_REGS(r3)
cmpi 0,r5,0
SAVE_32VR(0, r4, r3)
- MFVSCR(vr0)
+ mfvscr vr0
li r4,THREAD_VSCR
- STVX(vr0, r4, r3)
+ stvx vr0,r4,r3
beq 1f
lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
lis r3,MSR_VEC@h
@@ -1272,7 +1277,11 @@ _GLOBAL(__setup_cpu_generic)
setup_common_caches:
mfspr r11,HID0
andi. r0,r11,HID0_DCE
+#ifdef CONFIG_DCACHE_DISABLE
+ ori r11,r11,HID0_ICE
+#else
ori r11,r11,HID0_ICE|HID0_DCE
+#endif
ori r8,r11,HID0_ICFI
bne 1f /* don't invalidate the D-cache */
ori r8,r8,HID0_DCI /* unless it wasn't enabled */
@@ -1362,7 +1371,7 @@ setup_7450_23_hid0:
isync
blr
-/* 7450
+/* 7455
* Enable Store Gathering (SGE), Branch Folding (FOLD)
* Branch History Table (BHTE), Branch Target ICache (BTIC)
* Dynamic Power Management (DPM), Speculative (SPD)
@@ -1546,7 +1555,7 @@ _GLOBAL(set_context)
#endif
li r4,0
BEGIN_FTR_SECTION
- dssall 0
+ dssall
sync
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3:
diff --git a/arch/ppc/kernel/head_4xx.S b/arch/ppc/kernel/head_4xx.S
index 69e9398df14b..beae3cee8672 100644
--- a/arch/ppc/kernel/head_4xx.S
+++ b/arch/ppc/kernel/head_4xx.S
@@ -82,6 +82,7 @@ turn_on_mmu:
mtspr SRR0,r0
SYNC
rfi /* enables MMU */
+ b . /* prevent prefetch past rfi */
/*
* This area is used for temporarily saving registers during the
@@ -89,7 +90,28 @@ turn_on_mmu:
*/
. = 0xc0
crit_save:
- .space 8
+_GLOBAL(crit_r10)
+ .space 4
+_GLOBAL(crit_r11)
+ .space 4
+_GLOBAL(crit_sprg0)
+ .space 4
+_GLOBAL(crit_sprg1)
+ .space 4
+_GLOBAL(crit_sprg4)
+ .space 4
+_GLOBAL(crit_sprg5)
+ .space 4
+_GLOBAL(crit_sprg6)
+ .space 4
+_GLOBAL(crit_sprg7)
+ .space 4
+_GLOBAL(crit_pid)
+ .space 4
+_GLOBAL(crit_srr0)
+ .space 4
+_GLOBAL(crit_srr1)
+ .space 4
/*
* Exception vector entry code. This code runs with address translation
@@ -140,6 +162,59 @@ crit_save:
* This is OK since we don't support SMP on these processors.
*/
#define CRITICAL_EXCEPTION_PROLOG \
+ stw r10,crit_r10@l(0); /* save two registers to work with */\
+ stw r11,crit_r11@l(0); \
+ mfspr r10,SPRG0; \
+ stw r10,crit_sprg0@l(0); \
+ mfspr r10,SPRG1; \
+ stw r10,crit_sprg1@l(0); \
+ mfspr r10,SPRG4; \
+ stw r10,crit_sprg4@l(0); \
+ mfspr r10,SPRG5; \
+ stw r10,crit_sprg5@l(0); \
+ mfspr r10,SPRG6; \
+ stw r10,crit_sprg6@l(0); \
+ mfspr r10,SPRG7; \
+ stw r10,crit_sprg7@l(0); \
+ mfspr r10,SPRN_PID; \
+ stw r10,crit_pid@l(0); \
+ mfspr r10,SRR0; \
+ stw r10,crit_srr0@l(0); \
+ mfspr r10,SRR1; \
+ stw r10,crit_srr1@l(0); \
+ mfcr r10; /* save CR in r10 for now */\
+ mfspr r11,SPRN_SRR3; /* check whether user or kernel */\
+ andi. r11,r11,MSR_PR; \
+ lis r11,critical_stack_top@h; \
+ ori r11,r11,critical_stack_top@l; \
+ beq 1f; \
+ /* COMING FROM USER MODE */ \
+ mfspr r11,SPRG3; /* if from user, start at top of */\
+ lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
+ addi r11,r11,THREAD_SIZE; \
+1: subi r11,r11,INT_FRAME_SIZE; /* Allocate an exception frame */\
+ tophys(r11,r11); \
+ stw r10,_CCR(r11); /* save various registers */\
+ stw r12,GPR12(r11); \
+ stw r9,GPR9(r11); \
+ mflr r10; \
+ stw r10,_LINK(r11); \
+ mfspr r12,SPRN_DEAR; /* save DEAR and ESR in the frame */\
+ stw r12,_DEAR(r11); /* since they may have had stuff */\
+ mfspr r9,SPRN_ESR; /* in them at the point where the */\
+ stw r9,_ESR(r11); /* exception was taken */\
+ mfspr r12,SRR2; \
+ stw r1,GPR1(r11); \
+ mfspr r9,SRR3; \
+ stw r1,0(r11); \
+ tovirt(r1,r11); \
+ rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
+ stw r0,GPR0(r11); \
+ SAVE_4GPRS(3, r11); \
+ SAVE_2GPRS(7, r11)
+
+#if 0
+#define CRITICAL_EXCEPTION_PROLOG \
stw r10,crit_save@l(0); /* save two registers to work with */\
stw r11,4+crit_save@l(0); \
mfcr r10; /* save CR in r10 for now */\
@@ -173,6 +248,7 @@ crit_save:
stw r0,GPR0(r11); \
SAVE_4GPRS(3, r11); \
SAVE_2GPRS(7, r11)
+#endif
/*
* Exception vectors.
@@ -197,13 +273,14 @@ label:
START_EXCEPTION(n, label); \
CRITICAL_EXCEPTION_PROLOG; \
addi r3,r1,STACK_FRAME_OVERHEAD; \
- EXC_XFER_TEMPLATE(hdlr, n+2, NOCOPY, transfer_to_handler_full, \
+ EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
+ NOCOPY, transfer_to_handler_full, \
ret_from_except_full)
-#define EXC_XFER_TEMPLATE(hdlr, trap, copyee, tfer, ret) \
+#define EXC_XFER_TEMPLATE(hdlr, trap, msr, copyee, tfer, ret) \
li r10,trap; \
stw r10,TRAP(r11); \
- li r10,MSR_KERNEL; \
+ li r10,msr; \
copyee(r10, r9); \
bl tfer; \
.long hdlr; \
@@ -213,19 +290,19 @@ label:
#define NOCOPY(d, s)
#define EXC_XFER_STD(n, hdlr) \
- EXC_XFER_TEMPLATE(hdlr, n, NOCOPY, transfer_to_handler_full, \
+ EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, NOCOPY, transfer_to_handler_full, \
ret_from_except_full)
#define EXC_XFER_LITE(n, hdlr) \
- EXC_XFER_TEMPLATE(hdlr, n+1, NOCOPY, transfer_to_handler, \
+ EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, NOCOPY, transfer_to_handler, \
ret_from_except)
#define EXC_XFER_EE(n, hdlr) \
- EXC_XFER_TEMPLATE(hdlr, n, COPY_EE, transfer_to_handler_full, \
+ EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, COPY_EE, transfer_to_handler_full, \
ret_from_except_full)
#define EXC_XFER_EE_LITE(n, hdlr) \
- EXC_XFER_TEMPLATE(hdlr, n+1, COPY_EE, transfer_to_handler, \
+ EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, COPY_EE, transfer_to_handler, \
ret_from_except)
@@ -347,6 +424,7 @@ label:
mfspr r10, SPRG0
PPC405_ERR77_SYNC
rfi /* Should sync shadow TLBs */
+ b . /* prevent prefetch past rfi */
2:
/* The bailout. Restore registers to pre-exception conditions
@@ -615,6 +693,7 @@ label:
mfspr r10, SPRG0
PPC405_ERR77_SYNC
rfi /* Should sync shadow TLBs */
+ b . /* prevent prefetch past rfi */
2:
/* The bailout. Restore registers to pre-exception conditions
@@ -721,6 +800,7 @@ check_single_step_in_exception:
sync
rfci /* return to the exception handler */
+ b . /* prevent prefetch past rfi */
2:
mtcrf 0xff,r10 /* restore registers */
@@ -746,31 +826,18 @@ check_single_step_in_exception:
* Actually, it will fit now, but oh well.....a common place
* to load the TLB.
*/
+tlb_4xx_index:
+ .long 0
finish_tlb_load:
-
- /* Since it has a unified TLB, and we can take data faults on
- * instruction pages by copying data, we have to check if the
- * EPN is already in the TLB.
- */
- tlbsx. r9, 0, r10
- beq 6f
-
/* load the next available TLB index.
*/
- lis r12, tlb_4xx_index@h
- ori r12, r12, tlb_4xx_index@l
- tophys(r12, r12)
- lwz r9, 0(r12)
+ lwz r9, tlb_4xx_index@l(0)
addi r9, r9, 1
-#ifdef CONFIG_PIN_TLB
cmpwi 0, r9, 61 /* reserve entries 62, 63 for kernel */
ble 7f
li r9, 0
7:
-#else
- andi. r9, r9, (PPC4XX_TLB_SIZE-1)
-#endif
- stw r9, 0(r12)
+ stw r9, tlb_4xx_index@l(0)
6:
tlbwe r11, r9, TLB_DATA /* Load TLB LO */
@@ -804,6 +871,7 @@ finish_tlb_load:
mfspr r10, SPRG0
PPC405_ERR77_SYNC
rfi /* Should sync shadow TLBs */
+ b . /* prevent prefetch past rfi */
/* extern void giveup_fpu(struct task_struct *prev)
*
@@ -857,14 +925,11 @@ start_here:
mtspr SRR0,r4
mtspr SRR1,r3
rfi
+ b . /* prevent prefetch past rfi */
/* Load up the kernel context */
2:
SYNC /* Force all PTE updates to finish */
-#ifndef CONFIG_PIN_TLB
- tlbia /* Clear all TLB entries */
- sync /* wait for tlbia/tlbie to finish */
-#endif
/* set up the PTE pointers for the Abatron bdiGDB.
*/
@@ -883,6 +948,7 @@ start_here:
mtspr SRR0,r3
mtspr SRR1,r4
rfi /* enable MMU and jump to start_kernel */
+ b . /* prevent prefetch past rfi */
/* Set up the initial MMU state so we can do the first level of
* kernel initialization. This maps the first 16 MBytes of memory 1:1
@@ -956,7 +1022,10 @@ _GLOBAL(set_context)
lwz r5, 0xf0(r5)
stw r4, 0x4(r5)
#endif
+ sync
mtspr SPRN_PID,r3
+ isync /* Need an isync to flush shadow */
+ /* TLBs after changing PID */
blr
/* We put a few things here that have to be page-aligned. This stuff
@@ -969,6 +1038,14 @@ _GLOBAL(empty_zero_page)
_GLOBAL(swapper_pg_dir)
.space 4096
+
+/* Stack for handling critical exceptions from kernel mode */
+ .section .bss
+critical_stack_bottom:
+ .space 4096
+critical_stack_top:
+ .previous
+
/* This space gets a copy of optional info passed to us by the bootstrap
* which is used to pass parameters into the kernel like root=/dev/sda1, etc.
*/
diff --git a/arch/ppc/kernel/l2cr.S b/arch/ppc/kernel/l2cr.S
index b5bdc1e18e23..dd50c3d50ea6 100644
--- a/arch/ppc/kernel/l2cr.S
+++ b/arch/ppc/kernel/l2cr.S
@@ -255,7 +255,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_L3CR)
isync
/* Stop DST streams */
- dssall 0
+ dssall
/* Get the current enable bit of the L3CR into r4 */
mfspr r4,SPRN_L3CR
diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S
index 8fbabdf5985e..f0fb7c7bb1ea 100644
--- a/arch/ppc/kernel/misc.S
+++ b/arch/ppc/kernel/misc.S
@@ -354,7 +354,7 @@ _GLOBAL(_nmask_and_or_msr)
* Flush MMU TLB
*/
_GLOBAL(_tlbia)
-#if defined(CONFIG_40x) && defined(CONFIG_PIN_TLB)
+#if defined(CONFIG_40x)
/* This needs to be coordinated with other pinning functions since
* we don't keep a memory location of number of entries to reduce
* cache pollution during these operations.
@@ -367,7 +367,7 @@ _GLOBAL(_tlbia)
cmpwi 0, r3, 61 /* reserve last two entries */
ble 1b
isync
-#else
+#else /* ! defined(CONFIG_40x) */
#if defined(CONFIG_SMP)
rlwinm r8,r1,0,0,18
lwz r8,TI_CPU(r8)
@@ -401,7 +401,7 @@ _GLOBAL(_tlbia)
tlbia
sync
#endif /* CONFIG_SMP */
-#endif /* defined(CONFIG_40x) && defined(CONFIG_PIN_TLB) */
+#endif /* ! defined(CONFIG_40x) */
blr
/*
@@ -412,7 +412,7 @@ _GLOBAL(_tlbie)
tlbsx. r3, 0, r3
bne 10f
sync
- /* There are only 64 TLB entries, so r3 < 64, which means bit 25, is clear.
+ /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
* Since 25 is the V bit in the TLB_TAG, loading this value will invalidate
* the TLB entry. */
tlbwe r3, r3, TLB_TAG
diff --git a/arch/ppc/kernel/mk_defs.c b/arch/ppc/kernel/mk_defs.c
index d4cce91f178c..99bfc3b087b8 100644
--- a/arch/ppc/kernel/mk_defs.c
+++ b/arch/ppc/kernel/mk_defs.c
@@ -112,8 +112,6 @@ main(void)
*/
DEFINE(_DEAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
DEFINE(_ESR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
- DEFINE(_SRR0, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
- DEFINE(_SRR1, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
DEFINE(ORIG_GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, orig_gpr3));
DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result));
DEFINE(TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap));
diff --git a/arch/ppc/kernel/open_pic.c b/arch/ppc/kernel/open_pic.c
index 97908db5f032..9727f59e8f95 100644
--- a/arch/ppc/kernel/open_pic.c
+++ b/arch/ppc/kernel/open_pic.c
@@ -302,7 +302,7 @@ void openpic_set_sources(int first_irq, int num_irqs, void *first_ISR)
ISR[i] = src;
}
-void __init openpic_init(int main_pic, int offset, int programmer_switch_irq)
+void __init openpic_init(int linux_irq_offset)
{
u_int t, i;
u_int timerfreq;
@@ -351,16 +351,13 @@ void __init openpic_init(int main_pic, int offset, int programmer_switch_irq)
printk("OpenPIC timer frequency is %d.%06d MHz\n",
timerfreq / 1000000, timerfreq % 1000000);
- if (!main_pic)
- return;
-
- open_pic_irq_offset = offset;
+ open_pic_irq_offset = linux_irq_offset;
/* Initialize timer interrupts */
if ( ppc_md.progress ) ppc_md.progress("openpic timer",0x3ba);
for (i = 0; i < OPENPIC_NUM_TIMERS; i++) {
/* Disabled, Priority 0 */
- openpic_inittimer(i, 0, OPENPIC_VEC_TIMER+i+offset);
+ openpic_inittimer(i, 0, OPENPIC_VEC_TIMER+i+linux_irq_offset);
/* No processor */
openpic_maptimer(i, 0);
}
@@ -370,10 +367,12 @@ void __init openpic_init(int main_pic, int offset, int programmer_switch_irq)
if ( ppc_md.progress ) ppc_md.progress("openpic ipi",0x3bb);
for (i = 0; i < OPENPIC_NUM_IPI; i++) {
/* Disabled, Priority 10..13 */
- openpic_initipi(i, 10+i, OPENPIC_VEC_IPI+i+offset);
+ openpic_initipi(i, 10+i, OPENPIC_VEC_IPI+i+linux_irq_offset);
/* IPIs are per-CPU */
- irq_desc[OPENPIC_VEC_IPI+i+offset].status |= IRQ_PER_CPU;
- irq_desc[OPENPIC_VEC_IPI+i+offset].handler = &open_pic_ipi;
+ irq_desc[OPENPIC_VEC_IPI+i+linux_irq_offset].status |=
+ IRQ_PER_CPU;
+ irq_desc[OPENPIC_VEC_IPI+i+linux_irq_offset].handler =
+ &open_pic_ipi;
}
#endif
@@ -384,15 +383,14 @@ void __init openpic_init(int main_pic, int offset, int programmer_switch_irq)
/* Init all external sources, including possibly the cascade. */
for (i = 0; i < NumSources; i++) {
- int pri, sense;
+ int sense;
if (ISR[i] == 0)
continue;
/* the bootloader may have left it enabled (bad !) */
- openpic_disable_irq(i+offset);
+ openpic_disable_irq(i+linux_irq_offset);
- pri = (i == programmer_switch_irq)? 9: 8;
/*
* We find the vale from either the InitSenses table
* or assume a negative polarity level interrupt.
@@ -400,26 +398,27 @@ void __init openpic_init(int main_pic, int offset, int programmer_switch_irq)
sense = (i < OpenPIC_NumInitSenses)? OpenPIC_InitSenses[i]: 1;
if ((sense & IRQ_SENSE_MASK) == 1)
- irq_desc[i+offset].status = IRQ_LEVEL;
+ irq_desc[i+linux_irq_offset].status = IRQ_LEVEL;
- /* Enabled, Priority 8 or 9 */
- openpic_initirq(i, pri, i+offset, (sense & IRQ_POLARITY_MASK),
+ /* Enabled, Priority 8 */
+ openpic_initirq(i, 8, i + linux_irq_offset,
+ (sense & IRQ_POLARITY_MASK),
(sense & IRQ_SENSE_MASK));
/* Processor 0 */
openpic_mapirq(i, 1<<0, 0);
}
/* Init descriptors */
- for (i = offset; i < NumSources + offset; i++)
+ for (i = linux_irq_offset; i < NumSources + linux_irq_offset; i++)
irq_desc[i].handler = &open_pic;
/* Initialize the spurious interrupt */
if (ppc_md.progress) ppc_md.progress("openpic spurious",0x3bd);
- openpic_set_spurious(OPENPIC_VEC_SPURIOUS+offset);
+ openpic_set_spurious(OPENPIC_VEC_SPURIOUS+linux_irq_offset);
/* Initialize the cascade */
- if (offset) {
- if (request_irq(offset, no_action, SA_INTERRUPT,
+ if (linux_irq_offset) {
+ if (request_irq(linux_irq_offset, no_action, SA_INTERRUPT,
"82c59 cascade", NULL))
printk("Unable to get OpenPIC IRQ 0 for cascade\n");
}
@@ -653,6 +652,31 @@ static void __init openpic_maptimer(u_int timer, u_int cpumask)
physmask(cpumask));
}
+/*
+ * Initalize the interrupt source which will generate an NMI (and disable it).
+ *
+ * irq: The logical IRQ which generates an NMI.
+ */
+void __init
+openpic_init_nmi_irq(u_int irq)
+{
+ int sense;
+
+ /* If this wasn't given, assume a level, negative polarity interrupt. */
+ sense = (irq < OpenPIC_NumInitSenses) ? OpenPIC_InitSenses[irq] :
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE);
+
+ openpic_safe_writefield(&ISR[irq]->Vector_Priority,
+ OPENPIC_PRIORITY_MASK | OPENPIC_VECTOR_MASK |
+ OPENPIC_SENSE_MASK | OPENPIC_POLARITY_MASK,
+ (9 << OPENPIC_PRIORITY_SHIFT) |
+ (irq + open_pic_irq_offset) |
+ ((sense & IRQ_POLARITY_MASK) ?
+ OPENPIC_POLARITY_POSITIVE :
+ OPENPIC_POLARITY_NEGATIVE) |
+ ((sense & IRQ_SENSE_MASK) ? OPENPIC_SENSE_LEVEL
+ : OPENPIC_SENSE_EDGE));
+}
/*
*
diff --git a/arch/ppc/kernel/pci.c b/arch/ppc/kernel/pci.c
index 1d20f1f4409a..a47335985996 100644
--- a/arch/ppc/kernel/pci.c
+++ b/arch/ppc/kernel/pci.c
@@ -923,6 +923,7 @@ pci_process_bridge_OF_ranges(struct pci_controller *hose,
struct device_node *dev, int primary)
{
unsigned int *ranges, *prev;
+ unsigned int size;
int rlen = 0;
int memno = 0;
struct resource *res;
@@ -963,12 +964,16 @@ pci_process_bridge_OF_ranges(struct pci_controller *hose,
ranges = (unsigned int *) get_property(dev, "ranges", &rlen);
while ((rlen -= np * sizeof(unsigned int)) >= 0) {
res = NULL;
+ size = ranges[na+4];
switch (ranges[0] >> 24) {
case 1: /* I/O space */
if (ranges[2] != 0)
break;
hose->io_base_phys = ranges[na+2];
- hose->io_base_virt = ioremap(ranges[na+2], ranges[na+4]);
+ /* limit I/O space to 16MB */
+ if (size > 0x01000000)
+ size = 0x01000000;
+ hose->io_base_virt = ioremap(ranges[na+2], size);
if (primary)
isa_io_base = (unsigned long) hose->io_base_virt;
res = &hose->io_resource;
@@ -997,7 +1002,7 @@ pci_process_bridge_OF_ranges(struct pci_controller *hose,
}
if (res != NULL) {
res->name = dev->full_name;
- res->end = res->start + ranges[na+4] - 1;
+ res->end = res->start + size - 1;
res->parent = NULL;
res->sibling = NULL;
res->child = NULL;
diff --git a/arch/ppc/kernel/ppc4xx_pic.c b/arch/ppc/kernel/ppc4xx_pic.c
index ae96d1c722e9..6619898dbe97 100644
--- a/arch/ppc/kernel/ppc4xx_pic.c
+++ b/arch/ppc/kernel/ppc4xx_pic.c
@@ -15,10 +15,14 @@
* there are eight internal interrupts for the on-chip serial port
* (SPU), DMA controller, and JTAG controller.
*
- * The PowerPC 405 cores' Universal Interrupt Controller (UIC) has 32
- * possible interrupts as well. There are seven, configurable external
- * interrupt pins and there are 17 internal interrupts for the on-chip
- * serial port, DMA controller, on-chip Ethernet controller, PCI, etc.
+ * The PowerPC 405/440 cores' Universal Interrupt Controller (UIC) has
+ * 32 possible interrupts as well. Depending on the core and SoC
+ * implementation, a portion of the interrrupts are used for on-chip
+ * peripherals and a portion of the interrupts are available to be
+ * configured for external devices generating interrupts.
+ *
+ * The PowerNP and 440GP (and most likely future implementations) have
+ * cascaded UICs.
*
*/
@@ -30,11 +34,9 @@
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/irq.h>
-#include <asm/ibm4xx.h>
#include <asm/ppc4xx_pic.h>
/* Global Variables */
-
struct hw_interrupt_type *ppc4xx_pic;
/* Six of one, half dozen of the other....#ifdefs, separate files,
@@ -128,7 +130,11 @@ ppc403_aic_disable_and_ack(unsigned int irq)
mtdcr(DCRN_EXISR, (1 << (31 - bit)));
}
-#else /* !CONFIG_403 */
+#else
+
+#ifndef UIC1
+#define UIC1 UIC0
+#endif
static void
ppc405_uic_enable(unsigned int irq)
@@ -137,9 +143,18 @@ ppc405_uic_enable(unsigned int irq)
bit = irq & 0x1f;
word = irq >> 5;
-
+#ifdef UIC_DEBUG
+ printk("ppc405_uic_enable - irq %d word %d bit 0x%x\n",irq, word , bit);
+#endif
ppc_cached_irq_mask[word] |= 1 << (31 - bit);
- mtdcr(DCRN_UIC0_ER, ppc_cached_irq_mask[word]);
+ switch (word){
+ case 0:
+ mtdcr(DCRN_UIC_ER(UIC0), ppc_cached_irq_mask[word]);
+ break;
+ case 1:
+ mtdcr(DCRN_UIC_ER(UIC1), ppc_cached_irq_mask[word]);
+ break;
+ }
}
static void
@@ -149,9 +164,18 @@ ppc405_uic_disable(unsigned int irq)
bit = irq & 0x1f;
word = irq >> 5;
-
+#ifdef UIC_DEBUG
+ printk("ppc405_uic_disable - irq %d word %d bit 0x%x\n",irq, word , bit);
+#endif
ppc_cached_irq_mask[word] &= ~(1 << (31 - bit));
- mtdcr(DCRN_UIC0_ER, ppc_cached_irq_mask[word]);
+ switch (word){
+ case 0:
+ mtdcr(DCRN_UIC_ER(UIC0), ppc_cached_irq_mask[word]);
+ break;
+ case 1:
+ mtdcr(DCRN_UIC_ER(UIC1), ppc_cached_irq_mask[word]);
+ break;
+ }
}
static void
@@ -162,9 +186,20 @@ ppc405_uic_disable_and_ack(unsigned int irq)
bit = irq & 0x1f;
word = irq >> 5;
+#ifdef UIC_DEBUG
+printk("ppc405_uic_disable_and_ack - irq %d word %d bit 0x%x\n",irq, word , bit);
+#endif
ppc_cached_irq_mask[word] &= ~(1 << (31 - bit));
- mtdcr(DCRN_UIC0_ER, ppc_cached_irq_mask[word]);
- mtdcr(DCRN_UIC0_SR, (1 << (31 - bit)));
+ switch (word){
+ case 0:
+ mtdcr(DCRN_UIC_ER(UIC0), ppc_cached_irq_mask[word]);
+ mtdcr(DCRN_UIC_SR(UIC0), (1 << (31 - bit)));
+ break;
+ case 1:
+ mtdcr(DCRN_UIC_ER(UIC1), ppc_cached_irq_mask[word]);
+ mtdcr(DCRN_UIC_SR(UIC1), (1 << (31 - bit)));
+ break;
+ }
}
static void
@@ -176,23 +211,49 @@ ppc405_uic_end(unsigned int irq)
bit = irq & 0x1f;
word = irq >> 5;
- tr_bits = mfdcr(DCRN_UIC0_TR);
+#ifdef UIC_DEBUG
+ printk("ppc405_uic_end - irq %d word %d bit 0x%x\n",irq, word , bit);
+#endif
+
+ switch (word){
+ case 0:
+ tr_bits = mfdcr(DCRN_UIC_TR(UIC0));
+ break;
+ case 1:
+ tr_bits = mfdcr(DCRN_UIC_TR(UIC1));
+ break;
+ }
+
if ((tr_bits & (1 << (31 - bit))) == 0) {
/* level trigger */
- mtdcr(DCRN_UIC0_SR, 1 << (31 - bit));
+ switch (word){
+ case 0:
+ mtdcr(DCRN_UIC_SR(UIC0), 1 << (31 - bit));
+ break;
+ case 1:
+ mtdcr(DCRN_UIC_SR(UIC1), 1 << (31 - bit));
+ break;
+ }
}
if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
ppc_cached_irq_mask[word] |= 1 << (31 - bit);
- mtdcr(DCRN_UIC0_ER, ppc_cached_irq_mask[word]);
+ switch (word){
+ case 0:
+ mtdcr(DCRN_UIC_ER(UIC0), ppc_cached_irq_mask[word]);
+ break;
+ case 1:
+ mtdcr(DCRN_UIC_ER(UIC1), ppc_cached_irq_mask[word]);
+ break;
+ }
}
}
static struct hw_interrupt_type ppc405_uic = {
-#if defined (CONFIG_405GP)
- "405GP UIC",
+#if (NR_UICS == 1)
+ "IBM UIC",
#else
- "NP405 UIC",
+ "IBM UIC Cascade",
#endif
NULL,
NULL,
@@ -206,16 +267,27 @@ static struct hw_interrupt_type ppc405_uic = {
int
ppc405_pic_get_irq(struct pt_regs *regs)
{
- int irq;
+ int irq, cas_irq;
unsigned long bits;
-
+ cas_irq = 0;
/*
* Only report the status of those interrupts that are actually
* enabled.
*/
- bits = mfdcr(DCRN_UIC0_MSR);
-
+ bits = mfdcr(DCRN_UIC_MSR(UIC0));
+
+#if (NR_UICS > 1)
+ if (bits & UIC_CASCADE_MASK){
+ bits = mfdcr(DCRN_UIC_MSR(UIC1));
+ cas_irq = 32 - ffs(bits);
+ irq = 32 + cas_irq;
+ } else {
+ irq = 32 - ffs(bits);
+ if (irq == 32)
+ irq= -1;
+ }
+#else
/*
* Walk through the interrupts from highest priority to lowest, and
* report the first pending interrupt found.
@@ -223,10 +295,14 @@ ppc405_pic_get_irq(struct pt_regs *regs)
* result from 32.
*/
irq = 32 - ffs(bits);
-
- if (irq == NR_AIC_IRQS)
+#endif
+ if (irq == (NR_UIC_IRQS * NR_UICS))
irq = -1;
+#ifdef UIC_DEBUG
+printk("ppc405_pic_get_irq - irq %d bit 0x%x\n",irq, bits);
+#endif
+
return (irq);
}
#endif
@@ -239,18 +315,31 @@ ppc4xx_pic_init(void)
* explicity requested.
*/
ppc_cached_irq_mask[0] = 0;
+ ppc_cached_irq_mask[1] = 0;
-#ifdef CONFIG_403
+#if defined CONFIG_403
mtdcr(DCRN_EXIER, ppc_cached_irq_mask[0]);
ppc4xx_pic = &ppc403_aic;
ppc_md.get_irq = ppc403_pic_get_irq;
#else
- mtdcr(DCRN_UIC0_ER, ppc_cached_irq_mask[0]);
+#if (NR_UICS > 1)
+ ppc_cached_irq_mask[0] |= 1 << (31 - UIC0_UIC1NC ); /* enable cascading interrupt */
+ mtdcr(DCRN_UIC_ER(UIC0), ppc_cached_irq_mask[0]);
+ mtdcr(DCRN_UIC_ER(UIC1), ppc_cached_irq_mask[1]);
+
+ /* Set all interrupts to non-critical.
+ */
+ mtdcr(DCRN_UIC_CR(UIC0), 0);
+ mtdcr(DCRN_UIC_CR(UIC1), 0);
+
+#else
+ mtdcr(DCRN_UIC_ER(UIC0), ppc_cached_irq_mask[0]);
/* Set all interrupts to non-critical.
*/
- mtdcr(DCRN_UIC0_CR, 0);
+ mtdcr(DCRN_UIC_CR(UIC0), 0);
+#endif
ppc4xx_pic = &ppc405_uic;
ppc_md.get_irq = ppc405_pic_get_irq;
diff --git a/arch/ppc/kernel/ppc_ksyms.c b/arch/ppc/kernel/ppc_ksyms.c
index eef23f3c55dd..6e732fbcd3da 100644
--- a/arch/ppc/kernel/ppc_ksyms.c
+++ b/arch/ppc/kernel/ppc_ksyms.c
@@ -77,10 +77,6 @@ extern unsigned char __res[];
extern unsigned long mm_ptov (unsigned long paddr);
-extern void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle);
-extern void consistent_free(void *vaddr);
-extern void consistent_sync(void *vaddr, size_t size, int direction);
-
EXPORT_SYMBOL(clear_page);
EXPORT_SYMBOL(do_signal);
EXPORT_SYMBOL(do_syscall_trace);
diff --git a/arch/ppc/kernel/process.c b/arch/ppc/kernel/process.c
index 843586742026..a63d75944635 100644
--- a/arch/ppc/kernel/process.c
+++ b/arch/ppc/kernel/process.c
@@ -197,7 +197,7 @@ dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs)
return 1;
}
-void switch_to(struct task_struct *prev, struct task_struct *new)
+void __switch_to(struct task_struct *prev, struct task_struct *new)
{
struct thread_struct *new_thread, *old_thread;
unsigned long s;
diff --git a/arch/ppc/kernel/prom.c b/arch/ppc/kernel/prom.c
index c0be2375cfa4..3a3b36040be7 100644
--- a/arch/ppc/kernel/prom.c
+++ b/arch/ppc/kernel/prom.c
@@ -520,6 +520,38 @@ prom_n_size_cells(struct device_node* np)
}
static unsigned long __init
+map_addr(struct device_node *np, unsigned long space, unsigned long addr)
+{
+ int na;
+ unsigned int *ranges;
+ int rlen = 0;
+ unsigned int type;
+
+ type = (space >> 24) & 3;
+ if (type == 0)
+ return addr;
+
+ while ((np = np->parent) != NULL) {
+ if (strcmp(np->type, "pci") != 0)
+ continue;
+ /* PCI bridge: map the address through the ranges property */
+ na = prom_n_addr_cells(np);
+ ranges = (unsigned int *) get_property(np, "ranges", &rlen);
+ while ((rlen -= (na + 5) * sizeof(unsigned int)) >= 0) {
+ if (((ranges[0] >> 24) & 3) == type
+ && ranges[2] <= addr
+ && addr - ranges[2] < ranges[na+4]) {
+ /* ok, this matches, translate it */
+ addr += ranges[na+2] - ranges[2];
+ break;
+ }
+ ranges += na + 5;
+ }
+ }
+ return addr;
+}
+
+static unsigned long __init
interpret_pci_props(struct device_node *np, unsigned long mem_start,
int naddrc, int nsizec)
{
@@ -533,9 +565,9 @@ interpret_pci_props(struct device_node *np, unsigned long mem_start,
i = 0;
adr = (struct address_range *) mem_start;
while ((l -= sizeof(struct pci_reg_property)) >= 0) {
- /* XXX assumes PCI addresses mapped 1-1 to physical */
adr[i].space = pci_addrs[i].addr.a_hi;
- adr[i].address = pci_addrs[i].addr.a_lo;
+ adr[i].address = map_addr(np, pci_addrs[i].addr.a_hi,
+ pci_addrs[i].addr.a_lo);
adr[i].size = pci_addrs[i].size_lo;
++i;
}
@@ -772,13 +804,14 @@ prom_get_irq_senses(unsigned char *senses, int off, int max)
for (np = allnodes; np != 0; np = np->allnext) {
for (j = 0; j < np->n_intrs; j++) {
i = np->intrs[j].line;
- if (i >= off && i < max)
+ if (i >= off && i < max) {
if (np->intrs[j].sense == 1)
senses[i-off] = (IRQ_SENSE_LEVEL
| IRQ_POLARITY_NEGATIVE);
else
senses[i-off] = (IRQ_SENSE_EDGE
| IRQ_POLARITY_POSITIVE);
+ }
}
}
}
diff --git a/arch/ppc/kernel/syscalls.c b/arch/ppc/kernel/syscalls.c
index 87540676a5e4..6a439c76e134 100644
--- a/arch/ppc/kernel/syscalls.c
+++ b/arch/ppc/kernel/syscalls.c
@@ -117,7 +117,7 @@ sys_ipc (uint call, int first, int second, int third, void *ptr, long fifth)
if ((ret = verify_area (VERIFY_READ, ptr, sizeof(tmp)))
|| (ret = copy_from_user(&tmp,
(struct ipc_kludge *) ptr,
- sizeof (tmp)) ? -EFAULT : 0)
+ sizeof (tmp)) ? -EFAULT : 0))
break;
ret = sys_msgrcv (first, tmp.msgp, second, tmp.msgtyp,
third);
diff --git a/arch/ppc/kernel/traps.c b/arch/ppc/kernel/traps.c
index d2b0124f3177..c4c412e95222 100644
--- a/arch/ppc/kernel/traps.c
+++ b/arch/ppc/kernel/traps.c
@@ -123,6 +123,7 @@ MachineCheckException(struct pt_regs *regs)
unsigned long msr = regs->msr;
if (user_mode(regs)) {
+ regs->msr |= MSR_RI;
_exception(SIGSEGV, regs);
return;
}
@@ -134,6 +135,7 @@ MachineCheckException(struct pt_regs *regs)
#endif
if (debugger_fault_handler) {
debugger_fault_handler(regs);
+ regs->msr |= MSR_RI;
return;
}