summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKumar Gala <galak@freescale.com>2005-03-13 00:24:06 -0800
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-03-13 00:24:06 -0800
commit82c7633ae8d2d99cf5ba235ddc01e89e9e09678f (patch)
tree03590ddfa0e3f5e613edf0f2b8bd40f38f45fc08
parent70fc7b77c21bb703af6d7cc8329fa0a1669bdbcd (diff)
[PATCH] ppc32: Remove SPR short-hand defines
Removed the Special purpose register (SPR) short-hand defines to help with name space pollution. All SPRs are now referenced as SPRN_<foo>. Signed-off-by: Kumar Gala <kumar.gala@freescale.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--arch/ppc/8xx_io/enet.c2
-rw-r--r--arch/ppc/8xx_io/fec.c2
-rw-r--r--arch/ppc/boot/common/util.S72
-rw-r--r--arch/ppc/boot/simple/clear.S2
-rw-r--r--arch/ppc/boot/simple/embed_config.c2
-rw-r--r--arch/ppc/boot/simple/head.S2
-rw-r--r--arch/ppc/boot/simple/misc-ev64260.S6
-rw-r--r--arch/ppc/kernel/cpu_setup_6xx.S32
-rw-r--r--arch/ppc/kernel/entry.S76
-rw-r--r--arch/ppc/kernel/head.S224
-rw-r--r--arch/ppc/kernel/head_44x.S98
-rw-r--r--arch/ppc/kernel/head_4xx.S156
-rw-r--r--arch/ppc/kernel/head_8xx.S166
-rw-r--r--arch/ppc/kernel/head_booke.h72
-rw-r--r--arch/ppc/kernel/head_e500.S108
-rw-r--r--arch/ppc/kernel/l2cr.S20
-rw-r--r--arch/ppc/kernel/misc.S16
-rw-r--r--arch/ppc/kernel/setup.c2
-rw-r--r--arch/ppc/kernel/smp.c4
-rw-r--r--arch/ppc/kernel/traps.c2
-rw-r--r--arch/ppc/mm/hashtable.S2
-rw-r--r--arch/ppc/mm/ppc_mmu.c2
-rw-r--r--arch/ppc/platforms/4xx/ebony.c2
-rw-r--r--arch/ppc/platforms/83xx/mpc834x_sys.c12
-rw-r--r--arch/ppc/platforms/85xx/mpc8540_ads.c2
-rw-r--r--arch/ppc/platforms/85xx/mpc8560_ads.c2
-rw-r--r--arch/ppc/platforms/85xx/mpc85xx_ads_common.c6
-rw-r--r--arch/ppc/platforms/85xx/mpc85xx_cds_common.c8
-rw-r--r--arch/ppc/platforms/85xx/sbc8560.c2
-rw-r--r--arch/ppc/platforms/85xx/sbc85xx.c6
-rw-r--r--arch/ppc/platforms/85xx/stx_gp3.c8
-rw-r--r--arch/ppc/platforms/adir_setup.c4
-rw-r--r--arch/ppc/platforms/chestnut.c4
-rw-r--r--arch/ppc/platforms/cpci690.c6
-rw-r--r--arch/ppc/platforms/ev64260.c12
-rw-r--r--arch/ppc/platforms/gemini_prom.S42
-rw-r--r--arch/ppc/platforms/gemini_setup.c10
-rw-r--r--arch/ppc/platforms/k2.c18
-rw-r--r--arch/ppc/platforms/katana.c6
-rw-r--r--arch/ppc/platforms/lopec.c4
-rw-r--r--arch/ppc/platforms/mcpn765.c4
-rw-r--r--arch/ppc/platforms/mvme5100.c4
-rw-r--r--arch/ppc/platforms/pcore.c4
-rw-r--r--arch/ppc/platforms/pmac_cache.S44
-rw-r--r--arch/ppc/platforms/pmac_cpufreq.c4
-rw-r--r--arch/ppc/platforms/pmac_feature.c2
-rw-r--r--arch/ppc/platforms/pmac_setup.c2
-rw-r--r--arch/ppc/platforms/pmac_sleep.S8
-rw-r--r--arch/ppc/platforms/pmac_smp.c2
-rw-r--r--arch/ppc/platforms/pplus.c8
-rw-r--r--arch/ppc/platforms/prep_setup.c4
-rw-r--r--arch/ppc/platforms/prpmc750.c4
-rw-r--r--arch/ppc/platforms/prpmc800.c4
-rw-r--r--arch/ppc/platforms/spruce.c4
-rw-r--r--arch/ppc/syslib/btext.c2
-rw-r--r--arch/ppc/syslib/ibm440gx_common.c2
-rw-r--r--arch/ppc/syslib/mpc52xx_setup.c4
-rw-r--r--include/asm-ppc/cache.h12
-rw-r--r--include/asm-ppc/mmu.h26
-rw-r--r--include/asm-ppc/reg.h85
-rw-r--r--include/asm-ppc/reg_booke.h20
61 files changed, 683 insertions, 788 deletions
diff --git a/arch/ppc/8xx_io/enet.c b/arch/ppc/8xx_io/enet.c
index f720d2a4ca6d..4ea7158e5062 100644
--- a/arch/ppc/8xx_io/enet.c
+++ b/arch/ppc/8xx_io/enet.c
@@ -657,7 +657,7 @@ static int __init scc_enet_init(void)
cp = cpmp; /* Get pointer to Communication Processor */
- immap = (immap_t *)(mfspr(IMMR) & 0xFFFF0000); /* and to internal registers */
+ immap = (immap_t *)(mfspr(SPRN_IMMR) & 0xFFFF0000); /* and to internal registers */
bd = (bd_t *)__res;
diff --git a/arch/ppc/8xx_io/fec.c b/arch/ppc/8xx_io/fec.c
index 88eb1aa10dc0..0730392dcc20 100644
--- a/arch/ppc/8xx_io/fec.c
+++ b/arch/ppc/8xx_io/fec.c
@@ -1735,7 +1735,7 @@ static int __init fec_enet_init(void)
/* Bits moved from Rev. D onward.
*/
- if ((mfspr(IMMR) & 0xffff) < 0x0501)
+ if ((mfspr(SPRN_IMMR) & 0xffff) < 0x0501)
immap->im_ioport.iop_pddir = 0x1c58; /* Pre rev. D */
else
immap->im_ioport.iop_pddir = 0x1fff; /* Rev. D and later */
diff --git a/arch/ppc/boot/common/util.S b/arch/ppc/boot/common/util.S
index d0161ac4cc56..47e641455bc5 100644
--- a/arch/ppc/boot/common/util.S
+++ b/arch/ppc/boot/common/util.S
@@ -47,23 +47,23 @@ disable_6xx_mmu:
/* Clear BATs */
li r8,0
- mtspr DBAT0U,r8
- mtspr DBAT0L,r8
- mtspr DBAT1U,r8
- mtspr DBAT1L,r8
- mtspr DBAT2U,r8
- mtspr DBAT2L,r8
- mtspr DBAT3U,r8
- mtspr DBAT3L,r8
+ mtspr SPRN_DBAT0U,r8
+ mtspr SPRN_DBAT0L,r8
+ mtspr SPRN_DBAT1U,r8
+ mtspr SPRN_DBAT1L,r8
+ mtspr SPRN_DBAT2U,r8
+ mtspr SPRN_DBAT2L,r8
+ mtspr SPRN_DBAT3U,r8
+ mtspr SPRN_DBAT3L,r8
.clearbats_601:
- mtspr IBAT0U,r8
- mtspr IBAT0L,r8
- mtspr IBAT1U,r8
- mtspr IBAT1L,r8
- mtspr IBAT2U,r8
- mtspr IBAT2L,r8
- mtspr IBAT3U,r8
- mtspr IBAT3L,r8
+ mtspr SPRN_IBAT0U,r8
+ mtspr SPRN_IBAT0L,r8
+ mtspr SPRN_IBAT1U,r8
+ mtspr SPRN_IBAT1L,r8
+ mtspr SPRN_IBAT2U,r8
+ mtspr SPRN_IBAT2L,r8
+ mtspr SPRN_IBAT3U,r8
+ mtspr SPRN_IBAT3L,r8
isync
sync
sync
@@ -84,14 +84,14 @@ disable_6xx_l1cache:
/* Enable, invalidate and then disable the L1 icache/dcache. */
li r8,0
ori r8,r8,(HID0_ICE|HID0_DCE|HID0_ICFI|HID0_DCI)
- mfspr r11,HID0
+ mfspr r11,SPRN_HID0
or r11,r11,r8
andc r10,r11,r8
isync
- mtspr HID0,r8
+ mtspr SPRN_HID0,r8
sync
isync
- mtspr HID0,r10
+ mtspr SPRN_HID0,r10
sync
isync
blr
@@ -107,17 +107,17 @@ _setup_L2CR:
/* Invalidate/disable L2 cache */
sync
isync
- mfspr r8,L2CR
+ mfspr r8,SPRN_L2CR
rlwinm r8,r8,0,1,31
oris r8,r8,L2CR_L2I@h
sync
isync
- mtspr L2CR,r8
+ mtspr SPRN_L2CR,r8
sync
isync
/* Wait for the invalidation to complete */
- mfspr r8,PVR
+ mfspr r8,SPRN_PVR
srwi r8,r8,16
cmplwi cr0,r8,0x8000 /* 7450 */
cmplwi cr1,r8,0x8001 /* 7455 */
@@ -126,19 +126,19 @@ _setup_L2CR:
cror 4*cr0+eq,4*cr0+eq,4*cr2+eq
bne 2f
-1: mfspr r8,L2CR /* On 745x, poll L2I bit (bit 10) */
+1: mfspr r8,SPRN_L2CR /* On 745x, poll L2I bit (bit 10) */
rlwinm. r9,r8,0,10,10
bne 1b
b 3f
-2: mfspr r8,L2CR /* On 75x & 74[01]0, poll L2IP bit (bit 31) */
+2: mfspr r8,SPRN_L2CR /* On 75x & 74[01]0, poll L2IP bit (bit 31) */
rlwinm. r9,r8,0,31,31
bne 2b
3: rlwinm r8,r8,0,11,9 /* Turn off L2I bit */
sync
isync
- mtspr L2CR,r8
+ mtspr SPRN_L2CR,r8
sync
isync
blr
@@ -148,24 +148,24 @@ _setup_L3CR:
/* Invalidate/disable L3 cache */
sync
isync
- mfspr r8,L3CR
+ mfspr r8,SPRN_L3CR
rlwinm r8,r8,0,1,31
ori r8,r8,L3CR_L3I@l
sync
isync
- mtspr L3CR,r8
+ mtspr SPRN_L3CR,r8
sync
isync
/* Wait for the invalidation to complete */
-1: mfspr r8,L3CR
+1: mfspr r8,SPRN_L3CR
rlwinm. r9,r8,0,21,21
bne 1b
rlwinm r8,r8,0,22,20 /* Turn off L3I bit */
sync
isync
- mtspr L3CR,r8
+ mtspr SPRN_L3CR,r8
sync
isync
blr
@@ -190,7 +190,7 @@ timebase_period_ns:
*/
.globl udelay
udelay:
- mfspr r4,PVR
+ mfspr r4,SPRN_PVR
srwi r4,r4,16
cmpwi 0,r4,1 /* 601 ? */
bne .udelay_not_601
@@ -240,11 +240,11 @@ _GLOBAL(flush_instruction_cache)
#ifdef CONFIG_8xx
lis r3, IDC_INVALL@h
- mtspr IC_CST, r3
+ mtspr SPRN_IC_CST, r3
lis r3, IDC_ENABLE@h
- mtspr IC_CST, r3
+ mtspr SPRN_IC_CST, r3
lis r3, IDC_DISABLE@h
- mtspr DC_CST, r3
+ mtspr SPRN_DC_CST, r3
#elif CONFIG_4xx
lis r3,start@h # r9 = &_start
lis r4,_etext@ha
@@ -258,14 +258,14 @@ _GLOBAL(flush_instruction_cache)
/* Enable, invalidate and then disable the L1 icache/dcache. */
li r3,0
ori r3,r3,(HID0_ICE|HID0_DCE|HID0_ICFI|HID0_DCI)
- mfspr r4,HID0
+ mfspr r4,SPRN_HID0
or r5,r4,r3
isync
- mtspr HID0,r5
+ mtspr SPRN_HID0,r5
sync
isync
ori r5,r4,HID0_ICE /* Enable cache */
- mtspr HID0,r5
+ mtspr SPRN_HID0,r5
sync
isync
#endif
diff --git a/arch/ppc/boot/simple/clear.S b/arch/ppc/boot/simple/clear.S
index cd15b1644969..95c5647a0f51 100644
--- a/arch/ppc/boot/simple/clear.S
+++ b/arch/ppc/boot/simple/clear.S
@@ -7,7 +7,7 @@
bl _setup_L2CR; \
\
/* If 745x, turn off L3CR as well */ \
- mfspr r8,PVR; \
+ mfspr r8,SPRN_PVR; \
srwi r8,r8,16; \
\
cmpli cr0,r8,0x8000; /* 7450 */ \
diff --git a/arch/ppc/boot/simple/embed_config.c b/arch/ppc/boot/simple/embed_config.c
index a4fbc390084c..c342b47e763e 100644
--- a/arch/ppc/boot/simple/embed_config.c
+++ b/arch/ppc/boot/simple/embed_config.c
@@ -506,7 +506,7 @@ embed_config(bd_t **bdp)
memcpy(bd->bi_enetaddr, cp, 6);
/* can busfreq be calculated? */
- pvr = mfspr(PVR);
+ pvr = mfspr(SPRN_PVR);
if ((pvr & 0xffff0000) == 0x80820000) {
bd->bi_busfreq = 100000000;
clk_8280(bd);
diff --git a/arch/ppc/boot/simple/head.S b/arch/ppc/boot/simple/head.S
index ba4c7b268737..4fb4dcfd5ec8 100644
--- a/arch/ppc/boot/simple/head.S
+++ b/arch/ppc/boot/simple/head.S
@@ -52,7 +52,7 @@ start_:
*/
li r4,0x0000
isync
- mtspr HID0,r4
+ mtspr SPRN_HID0,r4
sync
isync
#endif
diff --git a/arch/ppc/boot/simple/misc-ev64260.S b/arch/ppc/boot/simple/misc-ev64260.S
index 2dc3e6c5032a..9b42be0f2f84 100644
--- a/arch/ppc/boot/simple/misc-ev64260.S
+++ b/arch/ppc/boot/simple/misc-ev64260.S
@@ -21,13 +21,13 @@
.globl mv64x60_board_init
mv64x60_board_init:
/* DINK doesn't enable 745x timebase, so enable here (Adrian Cox) */
- mfspr r25,PVR
+ mfspr r25,SPRN_PVR
srwi r25,r25,16
cmplwi r25,(PVR_7450 >> 16)
bne 1f
- mfspr r25,HID0
+ mfspr r25,SPRN_HID0
oris r25,r25,(HID0_TBEN >> 16)
- mtspr HID0,r25
+ mtspr SPRN_HID0,r25
1:
#if (CONFIG_MV64X60_NEW_BASE != CONFIG_MV64X60_BASE)
li r23,20
diff --git a/arch/ppc/kernel/cpu_setup_6xx.S b/arch/ppc/kernel/cpu_setup_6xx.S
index 9a4ee63fd07c..74f781b486a3 100644
--- a/arch/ppc/kernel/cpu_setup_6xx.S
+++ b/arch/ppc/kernel/cpu_setup_6xx.S
@@ -73,16 +73,16 @@ _GLOBAL(__setup_cpu_745x)
/* Enable caches for 603's, 604, 750 & 7400 */
setup_common_caches:
- mfspr r11,HID0
+ mfspr r11,SPRN_HID0
andi. r0,r11,HID0_DCE
ori r11,r11,HID0_ICE|HID0_DCE
ori r8,r11,HID0_ICFI
bne 1f /* don't invalidate the D-cache */
ori r8,r8,HID0_DCI /* unless it wasn't enabled */
1: sync
- mtspr HID0,r8 /* enable and invalidate caches */
+ mtspr SPRN_HID0,r8 /* enable and invalidate caches */
sync
- mtspr HID0,r11 /* enable caches */
+ mtspr SPRN_HID0,r11 /* enable caches */
sync
isync
blr
@@ -91,13 +91,13 @@ setup_common_caches:
* Enable superscalar execution & branch history table
*/
setup_604_hid0:
- mfspr r11,HID0
+ mfspr r11,SPRN_HID0
ori r11,r11,HID0_SIED|HID0_BHTE
ori r8,r11,HID0_BTCD
sync
- mtspr HID0,r8 /* flush branch target address cache */
+ mtspr SPRN_HID0,r8 /* flush branch target address cache */
sync /* on 604e/604r */
- mtspr HID0,r11
+ mtspr SPRN_HID0,r11
sync
isync
blr
@@ -150,7 +150,7 @@ setup_7410_workarounds:
* Clear Instruction cache throttling (ICTC)
*/
setup_750_7400_hid0:
- mfspr r11,HID0
+ mfspr r11,SPRN_HID0
ori r11,r11,HID0_SGE | HID0_ABE | HID0_BHTE | HID0_BTIC
BEGIN_FTR_SECTION
oris r11,r11,HID0_DPM@h /* enable dynamic power mgmt */
@@ -158,9 +158,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
li r3,HID0_SPD
andc r11,r11,r3 /* clear SPD: enable speculative */
li r3,0
- mtspr ICTC,r3 /* Instruction Cache Throttling off */
+ mtspr SPRN_ICTC,r3 /* Instruction Cache Throttling off */
isync
- mtspr HID0,r11
+ mtspr SPRN_HID0,r11
sync
isync
blr
@@ -214,7 +214,7 @@ setup_745x_specifics:
andc r6,r6,r7
stw r6,CPU_SPEC_FEATURES(r5)
1:
- mfspr r11,HID0
+ mfspr r11,SPRN_HID0
/* All of the bits we have to set.....
*/
@@ -232,9 +232,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
andc r11,r11,r3 /* clear SPD: enable speculative */
li r3,0
- mtspr ICTC,r3 /* Instruction Cache Throttling off */
+ mtspr SPRN_ICTC,r3 /* Instruction Cache Throttling off */
isync
- mtspr HID0,r11
+ mtspr SPRN_HID0,r11
sync
isync
@@ -285,7 +285,7 @@ _GLOBAL(__save_cpu_setup)
stw r3,CS_HID0(r5)
/* Now deal with CPU type dependent registers */
- mfspr r3,PVR
+ mfspr r3,SPRN_PVR
srwi r3,r3,16
cmplwi cr0,r3,0x8000 /* 7450 */
cmplwi cr1,r3,0x000c /* 7400 */
@@ -323,7 +323,7 @@ _GLOBAL(__save_cpu_setup)
mfspr r4,SPRN_HID1
stw r4,CS_HID1(r5)
/* If rev 2.x, backup HID2 */
- mfspr r3,PVR
+ mfspr r3,SPRN_PVR
andi. r3,r3,0xff00
cmpwi cr0,r3,0x0200
bne 1f
@@ -354,7 +354,7 @@ _GLOBAL(__restore_cpu_setup)
isync
/* Now deal with CPU type dependent registers */
- mfspr r3,PVR
+ mfspr r3,SPRN_PVR
srwi r3,r3,16
cmplwi cr0,r3,0x8000 /* 7450 */
cmplwi cr1,r3,0x000c /* 7400 */
@@ -414,7 +414,7 @@ _GLOBAL(__restore_cpu_setup)
* to PLL 0 on all
*/
/* If rev 2.x, restore HID2 with low voltage bit cleared */
- mfspr r3,PVR
+ mfspr r3,SPRN_PVR
andi. r3,r3,0xff00
cmpwi cr0,r3,0x0200
bne 4f
diff --git a/arch/ppc/kernel/entry.S b/arch/ppc/kernel/entry.S
index 08083be4c6ab..370ee2a59c1f 100644
--- a/arch/ppc/kernel/entry.S
+++ b/arch/ppc/kernel/entry.S
@@ -47,8 +47,8 @@
#ifdef CONFIG_BOOKE
#define COR r8 /* Critical Offset Register (COR) */
#define BOOKE_LOAD_COR lis COR,crit_save@ha
-#define BOOKE_REST_COR mfspr COR,SPRG2
-#define BOOKE_SAVE_COR mtspr SPRG2,COR
+#define BOOKE_REST_COR mfspr COR,SPRN_SPRG2
+#define BOOKE_SAVE_COR mtspr SPRN_SPRG2,COR
#else
#define COR 0
#define BOOKE_LOAD_COR
@@ -59,13 +59,13 @@
#ifdef CONFIG_BOOKE
.globl mcheck_transfer_to_handler
mcheck_transfer_to_handler:
- mtspr SPRG6W,r8
+ mtspr SPRN_SPRG6W,r8
lis r8,mcheck_save@ha
lwz r0,mcheck_r10@l(r8)
stw r0,GPR10(r11)
lwz r0,mcheck_r11@l(r8)
stw r0,GPR11(r11)
- mfspr r8,SPRG6R
+ mfspr r8,SPRN_SPRG6R
b transfer_to_handler_full
#endif
@@ -101,10 +101,10 @@ transfer_to_handler:
stw r9,_MSR(r11)
andi. r2,r9,MSR_PR
mfctr r12
- mfspr r2,XER
+ mfspr r2,SPRN_XER
stw r12,_CTR(r11)
stw r2,_XER(r11)
- mfspr r12,SPRG3
+ mfspr r12,SPRN_SPRG3
addi r2,r12,-THREAD
tovirt(r2,r2) /* set r2 to current */
beq 2f /* if from user, fix up THREAD.regs */
@@ -152,8 +152,8 @@ transfer_to_handler_cont:
lwz r11,0(r9) /* virtual address of handler */
lwz r9,4(r9) /* where to go when done */
FIX_SRR1(r10,r12)
- mtspr SRR0,r11
- mtspr SRR1,r10
+ mtspr SPRN_SRR0,r11
+ mtspr SPRN_SRR1,r10
mtlr r9
SYNC
RFI /* jump to handler, enable MMU */
@@ -177,8 +177,8 @@ stack_ovf:
addi r9,r9,StackOverflow@l
LOAD_MSR_KERNEL(r10,MSR_KERNEL)
FIX_SRR1(r10,r12)
- mtspr SRR0,r9
- mtspr SRR1,r10
+ mtspr SPRN_SRR0,r9
+ mtspr SPRN_SRR1,r10
SYNC
RFI
@@ -260,8 +260,8 @@ syscall_exit_cont:
FIX_SRR1(r8, r0)
lwz r2,GPR2(r1)
lwz r1,GPR1(r1)
- mtspr SRR0,r7
- mtspr SRR1,r8
+ mtspr SPRN_SRR0,r7
+ mtspr SPRN_SRR1,r8
SYNC
RFI
@@ -538,7 +538,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
tophys(r0,r4)
CLR_TOP32(r0)
- mtspr SPRG3,r0 /* Update current THREAD phys addr */
+ mtspr SPRN_SPRG3,r0 /* Update current THREAD phys addr */
lwz r1,KSP(r4) /* Load new stack pointer */
/* save the old current 'last' for return value */
@@ -642,7 +642,7 @@ restore:
lwz r10,_XER(r1)
lwz r11,_CTR(r1)
- mtspr XER,r10
+ mtspr SPRN_XER,r10
mtctr r11
PPC405_ERR77(0,r1)
@@ -675,8 +675,8 @@ exc_exit_restart:
lwz r9,_MSR(r1)
lwz r12,_NIP(r1)
FIX_SRR1(r9,r10)
- mtspr SRR0,r12
- mtspr SRR1,r9
+ mtspr SPRN_SRR0,r12
+ mtspr SPRN_SRR1,r9
REST_4GPRS(9, r1)
lwz r1,GPR1(r1)
.globl exc_exit_restart_end
@@ -702,8 +702,8 @@ exc_exit_restart:
lwz r11,_NIP(r1)
lwz r12,_MSR(r1)
exc_exit_start:
- mtspr SRR0,r11
- mtspr SRR1,r12
+ mtspr SPRN_SRR0,r11
+ mtspr SPRN_SRR1,r12
REST_2GPRS(11, r1)
lwz r1,GPR1(r1)
.globl exc_exit_restart_end
@@ -742,7 +742,7 @@ ret_from_crit_exc:
lwz r10,_XER(r1)
lwz r11,_CTR(r1)
- mtspr XER,r10
+ mtspr SPRN_XER,r10
mtctr r11
PPC405_ERR77(0,r1)
@@ -766,8 +766,8 @@ ret_from_crit_exc:
mtspr SPRN_ESR,r10
lwz r11,_NIP(r1)
lwz r12,_MSR(r1)
- mtspr CSRR0,r11
- mtspr CSRR1,r12
+ mtspr SPRN_CSRR0,r11
+ mtspr SPRN_CSRR1,r12
lwz r9,GPR9(r1)
lwz r12,GPR12(r1)
BOOKE_SAVE_COR
@@ -787,9 +787,9 @@ ret_from_crit_exc:
lwz r10,crit_sprg7@l(COR)
mtspr SPRN_SPRG7,r10
lwz r10,crit_srr0@l(COR)
- mtspr SRR0,r10
+ mtspr SPRN_SRR0,r10
lwz r10,crit_srr1@l(COR)
- mtspr SRR1,r10
+ mtspr SPRN_SRR1,r10
lwz r10,crit_pid@l(COR)
mtspr SPRN_PID,r10
lwz r10,GPR10(r1)
@@ -820,7 +820,7 @@ ret_from_mcheck_exc:
lwz r10,_XER(r1)
lwz r11,_CTR(r1)
- mtspr XER,r10
+ mtspr SPRN_XER,r10
mtctr r11
stwcx. r0,0,r1 /* to clear the reservation */
@@ -835,11 +835,11 @@ ret_from_mcheck_exc:
mtspr SPRN_ESR,r10
lwz r11,_NIP(r1)
lwz r12,_MSR(r1)
- mtspr MCSRR0,r11
- mtspr MCSRR1,r12
+ mtspr SPRN_MCSRR0,r11
+ mtspr SPRN_MCSRR1,r12
lwz r9,GPR9(r1)
lwz r12,GPR12(r1)
- mtspr SPRG6W,r8
+ mtspr SPRN_SPRG6W,r8
lis r8,mcheck_save@ha
lwz r10,mcheck_sprg0@l(r8)
mtspr SPRN_SPRG0,r10
@@ -852,19 +852,19 @@ ret_from_mcheck_exc:
lwz r10,mcheck_sprg7@l(r8)
mtspr SPRN_SPRG7,r10
lwz r10,mcheck_srr0@l(r8)
- mtspr SRR0,r10
+ mtspr SPRN_SRR0,r10
lwz r10,mcheck_srr1@l(r8)
- mtspr SRR1,r10
+ mtspr SPRN_SRR1,r10
lwz r10,mcheck_csrr0@l(r8)
- mtspr CSRR0,r10
+ mtspr SPRN_CSRR0,r10
lwz r10,mcheck_csrr1@l(r8)
- mtspr CSRR1,r10
+ mtspr SPRN_CSRR1,r10
lwz r10,mcheck_pid@l(r8)
mtspr SPRN_PID,r10
lwz r10,GPR10(r1)
lwz r11,GPR11(r1)
lwz r1,GPR1(r1)
- mfspr r8,SPRG6R
+ mfspr r8,SPRN_SPRG6R
RFMCI
#endif /* CONFIG_BOOKE */
@@ -997,9 +997,9 @@ _GLOBAL(enter_rtas)
li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
mtlr r6
CLR_TOP32(r7)
- mtspr SPRG2,r7
- mtspr SRR0,r8
- mtspr SRR1,r9
+ mtspr SPRN_SPRG2,r7
+ mtspr SPRN_SRR0,r8
+ mtspr SPRN_SRR1,r9
RFI
1: tophys(r9,r1)
lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
@@ -1007,9 +1007,9 @@ _GLOBAL(enter_rtas)
FIX_SRR1(r9,r0)
addi r1,r1,INT_FRAME_SIZE
li r0,0
- mtspr SPRG2,r0
- mtspr SRR0,r8
- mtspr SRR1,r9
+ mtspr SPRN_SPRG2,r0
+ mtspr SPRN_SRR0,r8
+ mtspr SPRN_SRR1,r9
RFI /* return to caller */
.globl machine_check_in_rtas
diff --git a/arch/ppc/kernel/head.S b/arch/ppc/kernel/head.S
index 3a3e3c7424df..1a89a71e0acc 100644
--- a/arch/ppc/kernel/head.S
+++ b/arch/ppc/kernel/head.S
@@ -41,12 +41,12 @@
#define LOAD_BAT(n, reg, RA, RB) \
ld RA,(n*32)+0(reg); \
ld RB,(n*32)+8(reg); \
- mtspr IBAT##n##U,RA; \
- mtspr IBAT##n##L,RB; \
+ mtspr SPRN_IBAT##n##U,RA; \
+ mtspr SPRN_IBAT##n##L,RB; \
ld RA,(n*32)+16(reg); \
ld RB,(n*32)+24(reg); \
- mtspr DBAT##n##U,RA; \
- mtspr DBAT##n##L,RB; \
+ mtspr SPRN_DBAT##n##U,RA; \
+ mtspr SPRN_DBAT##n##L,RB; \
#else /* CONFIG_PPC64BRIDGE */
@@ -54,17 +54,17 @@
#define LOAD_BAT(n, reg, RA, RB) \
/* see the comment for clear_bats() -- Cort */ \
li RA,0; \
- mtspr IBAT##n##U,RA; \
- mtspr DBAT##n##U,RA; \
+ mtspr SPRN_IBAT##n##U,RA; \
+ mtspr SPRN_DBAT##n##U,RA; \
lwz RA,(n*16)+0(reg); \
lwz RB,(n*16)+4(reg); \
- mtspr IBAT##n##U,RA; \
- mtspr IBAT##n##L,RB; \
+ mtspr SPRN_IBAT##n##U,RA; \
+ mtspr SPRN_IBAT##n##L,RB; \
beq 1f; \
lwz RA,(n*16)+8(reg); \
lwz RB,(n*16)+12(reg); \
- mtspr DBAT##n##U,RA; \
- mtspr DBAT##n##L,RB; \
+ mtspr SPRN_DBAT##n##U,RA; \
+ mtspr SPRN_DBAT##n##L,RB; \
1:
#endif /* CONFIG_PPC64BRIDGE */
@@ -224,10 +224,10 @@ __after_mmu_off:
turn_on_mmu:
mfmsr r0
ori r0,r0,MSR_DR|MSR_IR
- mtspr SRR1,r0
+ mtspr SPRN_SRR1,r0
lis r0,start_here@h
ori r0,r0,start_here@l
- mtspr SRR0,r0
+ mtspr SPRN_SRR0,r0
SYNC
RFI /* enables MMU */
@@ -260,18 +260,18 @@ __secondary_hold:
* task's thread_struct.
*/
#define EXCEPTION_PROLOG \
- mtspr SPRG0,r10; \
- mtspr SPRG1,r11; \
+ mtspr SPRN_SPRG0,r10; \
+ mtspr SPRN_SPRG1,r11; \
mfcr r10; \
EXCEPTION_PROLOG_1; \
EXCEPTION_PROLOG_2
#define EXCEPTION_PROLOG_1 \
- mfspr r11,SRR1; /* check whether user or kernel */ \
+ mfspr r11,SPRN_SRR1; /* check whether user or kernel */ \
andi. r11,r11,MSR_PR; \
tophys(r11,r1); /* use tophys(r1) if kernel */ \
beq 1f; \
- mfspr r11,SPRG3; \
+ mfspr r11,SPRN_SPRG3; \
lwz r11,THREAD_INFO-THREAD(r11); \
addi r11,r11,THREAD_SIZE; \
tophys(r11,r11); \
@@ -283,14 +283,14 @@ __secondary_hold:
stw r10,_CCR(r11); /* save registers */ \
stw r12,GPR12(r11); \
stw r9,GPR9(r11); \
- mfspr r10,SPRG0; \
+ mfspr r10,SPRN_SPRG0; \
stw r10,GPR10(r11); \
- mfspr r12,SPRG1; \
+ mfspr r12,SPRN_SPRG1; \
stw r12,GPR11(r11); \
mflr r10; \
stw r10,_LINK(r11); \
- mfspr r12,SRR0; \
- mfspr r9,SRR1; \
+ mfspr r12,SPRN_SRR0; \
+ mfspr r9,SPRN_SRR1; \
stw r1,GPR1(r11); \
stw r1,0(r11); \
tovirt(r1,r11); /* set new kernel sp */ \
@@ -373,11 +373,11 @@ i##n: \
* -- paulus.
*/
. = 0x200
- mtspr SPRG0,r10
- mtspr SPRG1,r11
+ mtspr SPRN_SPRG0,r10
+ mtspr SPRN_SPRG1,r11
mfcr r10
#ifdef CONFIG_PPC_CHRP
- mfspr r11,SPRG2
+ mfspr r11,SPRN_SPRG2
cmpwi 0,r11,0
bne 7f
#endif /* CONFIG_PPC_CHRP */
@@ -385,7 +385,7 @@ i##n: \
7: EXCEPTION_PROLOG_2
addi r3,r1,STACK_FRAME_OVERHEAD
#ifdef CONFIG_PPC_CHRP
- mfspr r4,SPRG2
+ mfspr r4,SPRN_SPRG2
cmpwi cr1,r4,0
bne cr1,1f
#endif
@@ -403,15 +403,15 @@ DataAccessCont:
DataAccess:
EXCEPTION_PROLOG
#endif /* CONFIG_PPC64BRIDGE */
- mfspr r10,DSISR
+ mfspr r10,SPRN_DSISR
andis. r0,r10,0xa470 /* weird error? */
bne 1f /* if not, try to put a PTE */
- mfspr r4,DAR /* into the hash table */
+ mfspr r4,SPRN_DAR /* into the hash table */
rlwinm r3,r10,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */
bl hash_page
1: stw r10,_DSISR(r11)
mr r5,r10
- mfspr r4,DAR
+ mfspr r4,SPRN_DAR
EXC_XFER_EE_LITE(0x300, handle_page_fault)
#ifdef CONFIG_PPC64BRIDGE
@@ -451,9 +451,9 @@ InstructionAccess:
. = 0x600
Alignment:
EXCEPTION_PROLOG
- mfspr r4,DAR
+ mfspr r4,SPRN_DAR
stw r4,_DAR(r11)
- mfspr r5,DSISR
+ mfspr r5,SPRN_DSISR
stw r5,_DSISR(r11)
addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_EE(0x600, AlignmentException)
@@ -518,16 +518,16 @@ InstructionTLBMiss:
*/
mfctr r0
/* Get PTE (linux-style) and check access */
- mfspr r3,IMISS
+ mfspr r3,SPRN_IMISS
lis r1,KERNELBASE@h /* check if kernel address */
cmplw 0,r3,r1
- mfspr r2,SPRG3
+ mfspr r2,SPRN_SPRG3
li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */
lwz r2,PGDIR(r2)
blt+ 112f
lis r2,swapper_pg_dir@ha /* if kernel address, use */
addi r2,r2,swapper_pg_dir@l /* kernel page table */
- mfspr r1,SRR1 /* and MSR_PR bit from SRR1 */
+ mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */
rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */
112: tophys(r2,r2)
rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
@@ -553,26 +553,26 @@ InstructionTLBMiss:
ori r1,r1,0xe14 /* clear out reserved bits and M */
andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */
mtspr SPRN_RPA,r1
- mfspr r3,IMISS
+ mfspr r3,SPRN_IMISS
tlbli r3
- mfspr r3,SRR1 /* Need to restore CR0 */
+ mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
mtcrf 0x80,r3
rfi
InstructionAddressInvalid:
- mfspr r3,SRR1
+ mfspr r3,SPRN_SRR1
rlwinm r1,r3,9,6,6 /* Get load/store bit */
addis r1,r1,0x2000
- mtspr DSISR,r1 /* (shouldn't be needed) */
+ mtspr SPRN_DSISR,r1 /* (shouldn't be needed) */
mtctr r0 /* Restore CTR */
andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
or r2,r2,r1
- mtspr SRR1,r2
- mfspr r1,IMISS /* Get failing address */
+ mtspr SPRN_SRR1,r2
+ mfspr r1,SPRN_IMISS /* Get failing address */
rlwinm. r2,r2,0,31,31 /* Check for little endian access */
rlwimi r2,r2,1,30,30 /* change 1 -> 3 */
xor r1,r1,r2
- mtspr DAR,r1 /* Set fault address */
+ mtspr SPRN_DAR,r1 /* Set fault address */
mfmsr r0 /* Restore "normal" registers */
xoris r0,r0,MSR_TGPR>>16
mtcrf 0x80,r3 /* Restore CR0 */
@@ -592,16 +592,16 @@ DataLoadTLBMiss:
*/
mfctr r0
/* Get PTE (linux-style) and check access */
- mfspr r3,DMISS
+ mfspr r3,SPRN_DMISS
lis r1,KERNELBASE@h /* check if kernel address */
cmplw 0,r3,r1
- mfspr r2,SPRG3
+ mfspr r2,SPRN_SPRG3
li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */
lwz r2,PGDIR(r2)
blt+ 112f
lis r2,swapper_pg_dir@ha /* if kernel address, use */
addi r2,r2,swapper_pg_dir@l /* kernel page table */
- mfspr r1,SRR1 /* and MSR_PR bit from SRR1 */
+ mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */
rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */
112: tophys(r2,r2)
rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
@@ -627,24 +627,24 @@ DataLoadTLBMiss:
ori r1,r1,0xe14 /* clear out reserved bits and M */
andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */
mtspr SPRN_RPA,r1
- mfspr r3,DMISS
+ mfspr r3,SPRN_DMISS
tlbld r3
- mfspr r3,SRR1 /* Need to restore CR0 */
+ mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
mtcrf 0x80,r3
rfi
DataAddressInvalid:
- mfspr r3,SRR1
+ mfspr r3,SPRN_SRR1
rlwinm r1,r3,9,6,6 /* Get load/store bit */
addis r1,r1,0x2000
- mtspr DSISR,r1
+ mtspr SPRN_DSISR,r1
mtctr r0 /* Restore CTR */
andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
- mtspr SRR1,r2
- mfspr r1,DMISS /* Get failing address */
+ mtspr SPRN_SRR1,r2
+ mfspr r1,SPRN_DMISS /* Get failing address */
rlwinm. r2,r2,0,31,31 /* Check for little endian access */
beq 20f /* Jump if big endian */
xori r1,r1,3
-20: mtspr DAR,r1 /* Set fault address */
+20: mtspr SPRN_DAR,r1 /* Set fault address */
mfmsr r0 /* Restore "normal" registers */
xoris r0,r0,MSR_TGPR>>16
mtcrf 0x80,r3 /* Restore CR0 */
@@ -664,16 +664,16 @@ DataStoreTLBMiss:
*/
mfctr r0
/* Get PTE (linux-style) and check access */
- mfspr r3,DMISS
+ mfspr r3,SPRN_DMISS
lis r1,KERNELBASE@h /* check if kernel address */
cmplw 0,r3,r1
- mfspr r2,SPRG3
+ mfspr r2,SPRN_SPRG3
li r1,_PAGE_RW|_PAGE_USER|_PAGE_PRESENT /* access flags */
lwz r2,PGDIR(r2)
blt+ 112f
lis r2,swapper_pg_dir@ha /* if kernel address, use */
addi r2,r2,swapper_pg_dir@l /* kernel page table */
- mfspr r1,SRR1 /* and MSR_PR bit from SRR1 */
+ mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */
rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */
112: tophys(r2,r2)
rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
@@ -695,9 +695,9 @@ DataStoreTLBMiss:
li r1,0xe15 /* clear out reserved bits and M */
andc r1,r3,r1 /* PP = user? 2: 0 */
mtspr SPRN_RPA,r1
- mfspr r3,DMISS
+ mfspr r3,SPRN_DMISS
tlbld r3
- mfspr r3,SRR1 /* Need to restore CR0 */
+ mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
mtcrf 0x80,r3
rfi
@@ -765,7 +765,7 @@ InstructionAccess:
DataSegment:
EXCEPTION_PROLOG
addi r3,r1,STACK_FRAME_OVERHEAD
- mfspr r4,DAR
+ mfspr r4,SPRN_DAR
stw r4,_DAR(r11)
EXC_XFER_STD(0x380, UnknownException)
@@ -816,7 +816,7 @@ load_up_fpu:
1:
#endif /* CONFIG_SMP */
/* enable use of FP after return */
- mfspr r5,SPRG3 /* current task's THREAD (phys) */
+ mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
lwz r4,THREAD_FPEXC_MODE(r5)
ori r9,r9,MSR_FP /* enable FP for current */
or r9,r9,r4
@@ -843,8 +843,8 @@ fast_exception_return:
lwz r10,_LINK(r11)
mtlr r10
REST_GPR(10, r11)
- mtspr SRR1,r9
- mtspr SRR0,r12
+ mtspr SPRN_SRR1,r9
+ mtspr SPRN_SRR0,r12
REST_GPR(9, r11)
REST_GPR(12, r11)
lwz r11,GPR11(r11)
@@ -945,7 +945,7 @@ load_up_altivec:
#endif /* CONFIG_SMP */
/* enable use of AltiVec after return */
oris r9,r9,MSR_VEC@h
- mfspr r5,SPRG3 /* current task's THREAD (phys) */
+ mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
li r4,1
li r10,THREAD_VSCR
stw r4,THREAD_USED_VR(r5)
@@ -1142,8 +1142,8 @@ fix_mem_constants:
lis r8,0
#endif
ori r8,r8,0x2 /* 128KB, supervisor */
- mtspr DBAT3U,r8
- mtspr DBAT3L,r8
+ mtspr SPRN_DBAT3U,r8
+ mtspr SPRN_DBAT3L,r8
lis r12,__ptov_table_begin@h
ori r12,r12,__ptov_table_begin@l
@@ -1179,12 +1179,12 @@ fix_mem_constants:
#ifdef CONFIG_GEMINI
.globl __secondary_start_gemini
__secondary_start_gemini:
- mfspr r4,HID0
+ mfspr r4,SPRN_HID0
ori r4,r4,HID0_ICFI
li r3,0
ori r3,r3,HID0_ICE
andc r4,r4,r3
- mtspr HID0,r4
+ mtspr SPRN_HID0,r4
sync
bl gemini_prom_init
b __secondary_start
@@ -1255,17 +1255,17 @@ __secondary_start:
tophys(r4,r2)
addi r4,r4,THREAD /* phys address of our thread_struct */
CLR_TOP32(r4)
- mtspr SPRG3,r4
+ mtspr SPRN_SPRG3,r4
li r3,0
- mtspr SPRG2,r3 /* 0 => not in RTAS */
+ mtspr SPRN_SPRG2,r3 /* 0 => not in RTAS */
/* enable MMU and jump to start_secondary */
li r4,MSR_KERNEL
FIX_SRR1(r4,r5)
lis r3,start_secondary@h
ori r3,r3,start_secondary@l
- mtspr SRR0,r3
- mtspr SRR1,r4
+ mtspr SPRN_SRR0,r3
+ mtspr SPRN_SRR1,r4
SYNC
RFI
#endif /* CONFIG_SMP */
@@ -1301,7 +1301,7 @@ load_up_mmu:
lis r6,_SDR1@ha
tophys(r6,r6)
lwz r6,_SDR1@l(r6)
- mtspr SDR1,r6
+ mtspr SPRN_SDR1,r6
#ifdef CONFIG_PPC64BRIDGE
/* clear the ASR so we only use the pseudo-segment registers. */
li r6,0
@@ -1343,9 +1343,9 @@ start_here:
tophys(r4,r2)
addi r4,r4,THREAD /* init task's THREAD */
CLR_TOP32(r4)
- mtspr SPRG3,r4
+ mtspr SPRN_SPRG3,r4
li r3,0
- mtspr SPRG2,r3 /* 0 => not in RTAS */
+ mtspr SPRN_SPRG2,r3 /* 0 => not in RTAS */
/* stack */
lis r1,init_thread_union@ha
@@ -1387,8 +1387,8 @@ start_here:
tophys(r4,r4)
li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
FIX_SRR1(r3,r5)
- mtspr SRR0,r4
- mtspr SRR1,r3
+ mtspr SPRN_SRR0,r4
+ mtspr SPRN_SRR1,r3
SYNC
RFI
/* Load up the kernel context */
@@ -1413,8 +1413,8 @@ start_here:
FIX_SRR1(r4,r5)
lis r3,start_kernel@h
ori r3,r3,start_kernel@l
- mtspr SRR0,r3
- mtspr SRR1,r4
+ mtspr SPRN_SRR0,r3
+ mtspr SPRN_SRR1,r4
SYNC
RFI
@@ -1461,28 +1461,28 @@ _GLOBAL(set_context)
*/
clear_bats:
li r10,0
- mfspr r9,PVR
+ mfspr r9,SPRN_PVR
rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
cmpwi r9, 1
beq 1f
- mtspr DBAT0U,r10
- mtspr DBAT0L,r10
- mtspr DBAT1U,r10
- mtspr DBAT1L,r10
- mtspr DBAT2U,r10
- mtspr DBAT2L,r10
- mtspr DBAT3U,r10
- mtspr DBAT3L,r10
+ mtspr SPRN_DBAT0U,r10
+ mtspr SPRN_DBAT0L,r10
+ mtspr SPRN_DBAT1U,r10
+ mtspr SPRN_DBAT1L,r10
+ mtspr SPRN_DBAT2U,r10
+ mtspr SPRN_DBAT2L,r10
+ mtspr SPRN_DBAT3U,r10
+ mtspr SPRN_DBAT3L,r10
1:
- mtspr IBAT0U,r10
- mtspr IBAT0L,r10
- mtspr IBAT1U,r10
- mtspr IBAT1L,r10
- mtspr IBAT2U,r10
- mtspr IBAT2L,r10
- mtspr IBAT3U,r10
- mtspr IBAT3L,r10
+ mtspr SPRN_IBAT0U,r10
+ mtspr SPRN_IBAT0L,r10
+ mtspr SPRN_IBAT1U,r10
+ mtspr SPRN_IBAT1L,r10
+ mtspr SPRN_IBAT2U,r10
+ mtspr SPRN_IBAT2L,r10
+ mtspr SPRN_IBAT3U,r10
+ mtspr SPRN_IBAT3L,r10
BEGIN_FTR_SECTION
/* Here's a tweak: at this point, CPU setup have
* not been called yet, so HIGH_BAT_EN may not be
@@ -1523,8 +1523,8 @@ mmu_off:
andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */
beqlr
andc r3,r3,r0
- mtspr SRR0,r4
- mtspr SRR1,r3
+ mtspr SPRN_SRR0,r4
+ mtspr SPRN_SRR1,r3
sync
RFI
@@ -1537,7 +1537,7 @@ mmu_off:
initial_bats:
lis r11,KERNELBASE@h
#ifndef CONFIG_PPC64BRIDGE
- mfspr r9,PVR
+ mfspr r9,SPRN_PVR
rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
cmpwi 0,r9,1
bne 4f
@@ -1545,10 +1545,10 @@ initial_bats:
li r8,0x7f /* valid, block length = 8MB */
oris r9,r11,0x800000@h /* set up BAT reg for 2nd 8M */
oris r10,r8,0x800000@h /* set up BAT reg for 2nd 8M */
- mtspr IBAT0U,r11 /* N.B. 601 has valid bit in */
- mtspr IBAT0L,r8 /* lower BAT register */
- mtspr IBAT1U,r9
- mtspr IBAT1L,r10
+ mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */
+ mtspr SPRN_IBAT0L,r8 /* lower BAT register */
+ mtspr SPRN_IBAT1U,r9
+ mtspr SPRN_IBAT1L,r10
isync
blr
#endif /* CONFIG_PPC64BRIDGE */
@@ -1570,10 +1570,10 @@ initial_bats:
clrldi r11,r11,32
clrldi r8,r8,32
#endif /* CONFIG_PPC64BRIDGE */
- mtspr DBAT0L,r8 /* N.B. 6xx (not 601) have valid */
- mtspr DBAT0U,r11 /* bit in upper BAT register */
- mtspr IBAT0L,r8
- mtspr IBAT0U,r11
+ mtspr SPRN_DBAT0L,r8 /* N.B. 6xx (not 601) have valid */
+ mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */
+ mtspr SPRN_IBAT0L,r8
+ mtspr SPRN_IBAT0U,r11
isync
blr
@@ -1589,15 +1589,15 @@ setup_disp_bat:
addi r8,r8,disp_BAT@l
lwz r11,0(r8)
lwz r8,4(r8)
- mfspr r9,PVR
+ mfspr r9,SPRN_PVR
rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
cmpwi 0,r9,1
beq 1f
- mtspr DBAT3L,r8
- mtspr DBAT3U,r11
+ mtspr SPRN_DBAT3L,r8
+ mtspr SPRN_DBAT3U,r11
blr
-1: mtspr IBAT3L,r8
- mtspr IBAT3U,r11
+1: mtspr SPRN_IBAT3L,r8
+ mtspr SPRN_IBAT3U,r11
blr
#endif /* !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) */
@@ -1612,7 +1612,7 @@ setup_disp_bat:
initial_mm_power4:
addis r14,r3,_SDR1@ha /* get the value from _SDR1 */
lwz r14,_SDR1@l(r14) /* assume hash table below 4GB */
- mtspr SDR1,r14
+ mtspr SPRN_SDR1,r14
slbia
lis r4,0x2000 /* set pseudo-segment reg 12 */
ori r5,r4,0x0ccc
@@ -1649,18 +1649,18 @@ m8260_gorom:
sync
mtmsr r0
sync
- mfspr r11, HID0
+ mfspr r11, SPRN_HID0
lis r10, 0
ori r10,r10,HID0_ICE|HID0_DCE
andc r11, r11, r10
- mtspr HID0, r11
+ mtspr SPRN_HID0, r11
isync
li r5, MSR_ME|MSR_RI
lis r6,2f@h
addis r6,r6,-KERNELBASE@h
ori r6,r6,2f@l
- mtspr SRR0,r6
- mtspr SRR1,r5
+ mtspr SPRN_SRR0,r6
+ mtspr SPRN_SRR1,r5
isync
sync
rfi
diff --git a/arch/ppc/kernel/head_44x.S b/arch/ppc/kernel/head_44x.S
index 1dc13796973b..e61f3be049fe 100644
--- a/arch/ppc/kernel/head_44x.S
+++ b/arch/ppc/kernel/head_44x.S
@@ -162,10 +162,10 @@ skpinv: addi r4,r4,1 /* Increment */
/* Force context change */
mfmsr r0
- mtspr SRR1, r0
+ mtspr SPRN_SRR1, r0
lis r0,3f@h
ori r0,r0,3f@l
- mtspr SRR0,r0
+ mtspr SPRN_SRR0,r0
sync
rfi
@@ -238,7 +238,7 @@ skpinv: addi r4,r4,1 /* Increment */
/* ptr to current thread */
addi r4,r2,THREAD /* init task's THREAD */
- mtspr SPRG3,r4
+ mtspr SPRN_SPRG3,r4
/* stack */
lis r1,init_thread_union@h
@@ -274,8 +274,8 @@ skpinv: addi r4,r4,1 /* Increment */
ori r4,r4,start_kernel@l
lis r3,MSR_KERNEL@h
ori r3,r3,MSR_KERNEL@l
- mtspr SRR0,r4
- mtspr SRR1,r3
+ mtspr SPRN_SRR0,r4
+ mtspr SPRN_SRR1,r3
rfi /* change context and jump to start_kernel */
/*
@@ -308,12 +308,12 @@ interrupt_base:
/* Data Storage Interrupt */
START_EXCEPTION(DataStorage)
- mtspr SPRG0, r10 /* Save some working registers */
- mtspr SPRG1, r11
- mtspr SPRG4W, r12
- mtspr SPRG5W, r13
+ mtspr SPRN_SPRG0, r10 /* Save some working registers */
+ mtspr SPRN_SPRG1, r11
+ mtspr SPRN_SPRG4W, r12
+ mtspr SPRN_SPRG5W, r13
mfcr r11
- mtspr SPRG7W, r11
+ mtspr SPRN_SPRG7W, r11
/*
* Check if it was a store fault, if not then bail
@@ -342,7 +342,7 @@ interrupt_base:
/* Get the PGD for the current thread */
3:
- mfspr r11,SPRG3
+ mfspr r11,SPRN_SPRG3
lwz r11,PGDIR(r11)
/* Load PID into MMUCR TID */
@@ -390,13 +390,13 @@ interrupt_base:
/* Done...restore registers and get out of here.
*/
- mfspr r11, SPRG7R
+ mfspr r11, SPRN_SPRG7R
mtcr r11
- mfspr r13, SPRG5R
- mfspr r12, SPRG4R
+ mfspr r13, SPRN_SPRG5R
+ mfspr r12, SPRN_SPRG4R
- mfspr r11, SPRG1
- mfspr r10, SPRG0
+ mfspr r11, SPRN_SPRG1
+ mfspr r10, SPRN_SPRG0
rfi /* Force context change */
2:
@@ -404,13 +404,13 @@ interrupt_base:
* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
*/
- mfspr r11, SPRG7R
+ mfspr r11, SPRN_SPRG7R
mtcr r11
- mfspr r13, SPRG5R
- mfspr r12, SPRG4R
+ mfspr r13, SPRN_SPRG5R
+ mfspr r12, SPRN_SPRG4R
- mfspr r11, SPRG1
- mfspr r10, SPRG0
+ mfspr r11, SPRN_SPRG1
+ mfspr r10, SPRN_SPRG0
b data_access
/* Instruction Storage Interrupt */
@@ -449,12 +449,12 @@ interrupt_base:
/* Data TLB Error Interrupt */
START_EXCEPTION(DataTLBError)
- mtspr SPRG0, r10 /* Save some working registers */
- mtspr SPRG1, r11
- mtspr SPRG4W, r12
- mtspr SPRG5W, r13
+ mtspr SPRN_SPRG0, r10 /* Save some working registers */
+ mtspr SPRN_SPRG1, r11
+ mtspr SPRN_SPRG4W, r12
+ mtspr SPRN_SPRG5W, r13
mfcr r11
- mtspr SPRG7W, r11
+ mtspr SPRN_SPRG7W, r11
mfspr r10, SPRN_DEAR /* Get faulting address */
/* If we are faulting a kernel address, we have to use the
@@ -472,7 +472,7 @@ interrupt_base:
/* Get the PGD for the current thread */
3:
- mfspr r11,SPRG3
+ mfspr r11,SPRN_SPRG3
lwz r11,PGDIR(r11)
/* Load PID into MMUCR TID */
@@ -503,12 +503,12 @@ interrupt_base:
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
*/
- mfspr r11, SPRG7R
+ mfspr r11, SPRN_SPRG7R
mtcr r11
- mfspr r13, SPRG5R
- mfspr r12, SPRG4R
- mfspr r11, SPRG1
- mfspr r10, SPRG0
+ mfspr r13, SPRN_SPRG5R
+ mfspr r12, SPRN_SPRG4R
+ mfspr r11, SPRN_SPRG1
+ mfspr r10, SPRN_SPRG0
b data_access
/* Instruction TLB Error Interrupt */
@@ -518,13 +518,13 @@ interrupt_base:
* to a different point.
*/
START_EXCEPTION(InstructionTLBError)
- mtspr SPRG0, r10 /* Save some working registers */
- mtspr SPRG1, r11
- mtspr SPRG4W, r12
- mtspr SPRG5W, r13
+ mtspr SPRN_SPRG0, r10 /* Save some working registers */
+ mtspr SPRN_SPRG1, r11
+ mtspr SPRN_SPRG4W, r12
+ mtspr SPRN_SPRG5W, r13
mfcr r11
- mtspr SPRG7W, r11
- mfspr r10, SRR0 /* Get faulting address */
+ mtspr SPRN_SPRG7W, r11
+ mfspr r10, SPRN_SRR0 /* Get faulting address */
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
@@ -541,7 +541,7 @@ interrupt_base:
/* Get the PGD for the current thread */
3:
- mfspr r11,SPRG3
+ mfspr r11,SPRN_SPRG3
lwz r11,PGDIR(r11)
/* Load PID into MMUCR TID */
@@ -572,12 +572,12 @@ interrupt_base:
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
*/
- mfspr r11, SPRG7R
+ mfspr r11, SPRN_SPRG7R
mtcr r11
- mfspr r13, SPRG5R
- mfspr r12, SPRG4R
- mfspr r11, SPRG1
- mfspr r10, SPRG0
+ mfspr r13, SPRN_SPRG5R
+ mfspr r12, SPRN_SPRG4R
+ mfspr r11, SPRN_SPRG1
+ mfspr r10, SPRN_SPRG0
b InstructionStorage
/* Debug Interrupt */
@@ -661,12 +661,12 @@ finish_tlb_load:
/* Done...restore registers and get out of here.
*/
- mfspr r11, SPRG7R
+ mfspr r11, SPRN_SPRG7R
mtcr r11
- mfspr r13, SPRG5R
- mfspr r12, SPRG4R
- mfspr r11, SPRG1
- mfspr r10, SPRG0
+ mfspr r13, SPRN_SPRG5R
+ mfspr r12, SPRN_SPRG4R
+ mfspr r11, SPRN_SPRG1
+ mfspr r10, SPRN_SPRG0
rfi /* Force context change */
/*
diff --git a/arch/ppc/kernel/head_4xx.S b/arch/ppc/kernel/head_4xx.S
index f2b0f9f1bb70..dd23e90ac816 100644
--- a/arch/ppc/kernel/head_4xx.S
+++ b/arch/ppc/kernel/head_4xx.S
@@ -77,10 +77,10 @@ _GLOBAL(_start)
turn_on_mmu:
lis r0,MSR_KERNEL@h
ori r0,r0,MSR_KERNEL@l
- mtspr SRR1,r0
+ mtspr SPRN_SRR1,r0
lis r0,start_here@h
ori r0,r0,start_here@l
- mtspr SRR0,r0
+ mtspr SPRN_SRR0,r0
SYNC
rfi /* enables MMU */
b . /* prevent prefetch past rfi */
@@ -130,7 +130,7 @@ _GLOBAL(crit_srr1)
mfspr r11,SPRN_SRR1; /* check whether user or kernel */\
andi. r11,r11,MSR_PR; \
beq 1f; \
- mfspr r1,SPRG3; /* if from user, start at top of */\
+ mfspr r1,SPRN_SPRG3; /* if from user, start at top of */\
lwz r1,THREAD_INFO-THREAD(r1); /* this thread's kernel stack */\
addi r1,r1,THREAD_SIZE; \
1: subi r1,r1,INT_FRAME_SIZE; /* Allocate an exception frame */\
@@ -138,16 +138,16 @@ _GLOBAL(crit_srr1)
stw r10,_CCR(r11); /* save various registers */\
stw r12,GPR12(r11); \
stw r9,GPR9(r11); \
- mfspr r10,SPRG0; \
+ mfspr r10,SPRN_SPRG0; \
stw r10,GPR10(r11); \
- mfspr r12,SPRG1; \
+ mfspr r12,SPRN_SPRG1; \
stw r12,GPR11(r11); \
mflr r10; \
stw r10,_LINK(r11); \
- mfspr r10,SPRG2; \
- mfspr r12,SRR0; \
+ mfspr r10,SPRN_SPRG2; \
+ mfspr r12,SPRN_SRR0; \
stw r10,GPR1(r11); \
- mfspr r9,SRR1; \
+ mfspr r9,SPRN_SRR1; \
stw r10,0(r11); \
rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
stw r0,GPR0(r11); \
@@ -165,23 +165,23 @@ _GLOBAL(crit_srr1)
#define CRITICAL_EXCEPTION_PROLOG \
stw r10,crit_r10@l(0); /* save two registers to work with */\
stw r11,crit_r11@l(0); \
- mfspr r10,SPRG0; \
+ mfspr r10,SPRN_SPRG0; \
stw r10,crit_sprg0@l(0); \
- mfspr r10,SPRG1; \
+ mfspr r10,SPRN_SPRG1; \
stw r10,crit_sprg1@l(0); \
- mfspr r10,SPRG4; \
+ mfspr r10,SPRN_SPRG4; \
stw r10,crit_sprg4@l(0); \
- mfspr r10,SPRG5; \
+ mfspr r10,SPRN_SPRG5; \
stw r10,crit_sprg5@l(0); \
- mfspr r10,SPRG6; \
+ mfspr r10,SPRN_SPRG6; \
stw r10,crit_sprg6@l(0); \
- mfspr r10,SPRG7; \
+ mfspr r10,SPRN_SPRG7; \
stw r10,crit_sprg7@l(0); \
mfspr r10,SPRN_PID; \
stw r10,crit_pid@l(0); \
- mfspr r10,SRR0; \
+ mfspr r10,SPRN_SRR0; \
stw r10,crit_srr0@l(0); \
- mfspr r10,SRR1; \
+ mfspr r10,SPRN_SRR1; \
stw r10,crit_srr1@l(0); \
mfcr r10; /* save CR in r10 for now */\
mfspr r11,SPRN_SRR3; /* check whether user or kernel */\
@@ -190,7 +190,7 @@ _GLOBAL(crit_srr1)
ori r11,r11,critical_stack_top@l; \
beq 1f; \
/* COMING FROM USER MODE */ \
- mfspr r11,SPRG3; /* if from user, start at top of */\
+ mfspr r11,SPRN_SPRG3; /* if from user, start at top of */\
lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
addi r11,r11,THREAD_SIZE; \
1: subi r11,r11,INT_FRAME_SIZE; /* Allocate an exception frame */\
@@ -204,9 +204,9 @@ _GLOBAL(crit_srr1)
stw r12,_DEAR(r11); /* since they may have had stuff */\
mfspr r9,SPRN_ESR; /* in them at the point where the */\
stw r9,_ESR(r11); /* exception was taken */\
- mfspr r12,SRR2; \
+ mfspr r12,SPRN_SRR2; \
stw r1,GPR1(r11); \
- mfspr r9,SRR3; \
+ mfspr r9,SPRN_SRR3; \
stw r1,0(r11); \
tovirt(r1,r11); \
rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
@@ -299,8 +299,8 @@ label:
* and exit. Otherwise, we call heavywight functions to do the work.
*/
START_EXCEPTION(0x0300, DataStorage)
- mtspr SPRG0, r10 /* Save some working registers */
- mtspr SPRG1, r11
+ mtspr SPRN_SPRG0, r10 /* Save some working registers */
+ mtspr SPRN_SPRG1, r11
#ifdef CONFIG_403GCX
stw r12, 0(r0)
stw r9, 4(r0)
@@ -309,12 +309,12 @@ label:
stw r11, 8(r0)
stw r12, 12(r0)
#else
- mtspr SPRG4, r12
- mtspr SPRG5, r9
+ mtspr SPRN_SPRG4, r12
+ mtspr SPRN_SPRG5, r9
mfcr r11
mfspr r12, SPRN_PID
- mtspr SPRG7, r11
- mtspr SPRG6, r12
+ mtspr SPRN_SPRG7, r11
+ mtspr SPRN_SPRG6, r12
#endif
/* First, check if it was a zone fault (which means a user
@@ -341,7 +341,7 @@ label:
/* Get the PGD for the current thread.
*/
3:
- mfspr r11,SPRG3
+ mfspr r11,SPRN_SPRG3
lwz r11,PGDIR(r11)
4:
tophys(r11, r11)
@@ -388,15 +388,15 @@ label:
lwz r9, 4(r0)
lwz r12, 0(r0)
#else
- mfspr r12, SPRG6
- mfspr r11, SPRG7
+ mfspr r12, SPRN_SPRG6
+ mfspr r11, SPRN_SPRG7
mtspr SPRN_PID, r12
mtcr r11
- mfspr r9, SPRG5
- mfspr r12, SPRG4
+ mfspr r9, SPRN_SPRG5
+ mfspr r12, SPRN_SPRG4
#endif
- mfspr r11, SPRG1
- mfspr r10, SPRG0
+ mfspr r11, SPRN_SPRG1
+ mfspr r10, SPRN_SPRG0
PPC405_ERR77_SYNC
rfi /* Should sync shadow TLBs */
b . /* prevent prefetch past rfi */
@@ -413,15 +413,15 @@ label:
lwz r9, 4(r0)
lwz r12, 0(r0)
#else
- mfspr r12, SPRG6
- mfspr r11, SPRG7
+ mfspr r12, SPRN_SPRG6
+ mfspr r11, SPRN_SPRG7
mtspr SPRN_PID, r12
mtcr r11
- mfspr r9, SPRG5
- mfspr r12, SPRG4
+ mfspr r9, SPRN_SPRG5
+ mfspr r12, SPRN_SPRG4
#endif
- mfspr r11, SPRG1
- mfspr r10, SPRG0
+ mfspr r11, SPRN_SPRG1
+ mfspr r10, SPRN_SPRG0
b DataAccess
/*
@@ -496,8 +496,8 @@ label:
* load TLB entries from the page table if they exist.
*/
START_EXCEPTION(0x1100, DTLBMiss)
- mtspr SPRG0, r10 /* Save some working registers */
- mtspr SPRG1, r11
+ mtspr SPRN_SPRG0, r10 /* Save some working registers */
+ mtspr SPRN_SPRG1, r11
#ifdef CONFIG_403GCX
stw r12, 0(r0)
stw r9, 4(r0)
@@ -506,12 +506,12 @@ label:
stw r11, 8(r0)
stw r12, 12(r0)
#else
- mtspr SPRG4, r12
- mtspr SPRG5, r9
+ mtspr SPRN_SPRG4, r12
+ mtspr SPRN_SPRG5, r9
mfcr r11
mfspr r12, SPRN_PID
- mtspr SPRG7, r11
- mtspr SPRG6, r12
+ mtspr SPRN_SPRG7, r11
+ mtspr SPRN_SPRG6, r12
#endif
mfspr r10, SPRN_DEAR /* Get faulting address */
@@ -529,7 +529,7 @@ label:
/* Get the PGD for the current thread.
*/
3:
- mfspr r11,SPRG3
+ mfspr r11,SPRN_SPRG3
lwz r11,PGDIR(r11)
4:
tophys(r11, r11)
@@ -579,15 +579,15 @@ label:
lwz r9, 4(r0)
lwz r12, 0(r0)
#else
- mfspr r12, SPRG6
- mfspr r11, SPRG7
+ mfspr r12, SPRN_SPRG6
+ mfspr r11, SPRN_SPRG7
mtspr SPRN_PID, r12
mtcr r11
- mfspr r9, SPRG5
- mfspr r12, SPRG4
+ mfspr r9, SPRN_SPRG5
+ mfspr r12, SPRN_SPRG4
#endif
- mfspr r11, SPRG1
- mfspr r10, SPRG0
+ mfspr r11, SPRN_SPRG1
+ mfspr r10, SPRN_SPRG0
b DataAccess
/* 0x1200 - Instruction TLB Miss Exception
@@ -595,8 +595,8 @@ label:
* registers and bailout to a different point.
*/
START_EXCEPTION(0x1200, ITLBMiss)
- mtspr SPRG0, r10 /* Save some working registers */
- mtspr SPRG1, r11
+ mtspr SPRN_SPRG0, r10 /* Save some working registers */
+ mtspr SPRN_SPRG1, r11
#ifdef CONFIG_403GCX
stw r12, 0(r0)
stw r9, 4(r0)
@@ -605,14 +605,14 @@ label:
stw r11, 8(r0)
stw r12, 12(r0)
#else
- mtspr SPRG4, r12
- mtspr SPRG5, r9
+ mtspr SPRN_SPRG4, r12
+ mtspr SPRN_SPRG5, r9
mfcr r11
mfspr r12, SPRN_PID
- mtspr SPRG7, r11
- mtspr SPRG6, r12
+ mtspr SPRN_SPRG7, r11
+ mtspr SPRN_SPRG6, r12
#endif
- mfspr r10, SRR0 /* Get faulting address */
+ mfspr r10, SPRN_SRR0 /* Get faulting address */
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
@@ -628,7 +628,7 @@ label:
/* Get the PGD for the current thread.
*/
3:
- mfspr r11,SPRG3
+ mfspr r11,SPRN_SPRG3
lwz r11,PGDIR(r11)
4:
tophys(r11, r11)
@@ -678,15 +678,15 @@ label:
lwz r9, 4(r0)
lwz r12, 0(r0)
#else
- mfspr r12, SPRG6
- mfspr r11, SPRG7
+ mfspr r12, SPRN_SPRG6
+ mfspr r11, SPRN_SPRG7
mtspr SPRN_PID, r12
mtcr r11
- mfspr r9, SPRG5
- mfspr r12, SPRG4
+ mfspr r9, SPRN_SPRG5
+ mfspr r12, SPRN_SPRG4
#endif
- mfspr r11, SPRG1
- mfspr r10, SPRG0
+ mfspr r11, SPRN_SPRG1
+ mfspr r10, SPRN_SPRG0
b InstructionAccess
EXCEPTION(0x1300, Trap_13, UnknownException, EXC_XFER_EE)
@@ -754,8 +754,8 @@ label:
lwz r0,GPR0(r11)
lwz r1,GPR1(r11)
mtcrf 0x80,r10
- mtspr SRR2,r12
- mtspr SRR3,r9
+ mtspr SPRN_SRR2,r12
+ mtspr SPRN_SRR3,r9
lwz r9,GPR9(r11)
lwz r12,GPR12(r11)
lwz r10,crit_r10@l(0)
@@ -831,15 +831,15 @@ finish_tlb_load:
lwz r9, 4(r0)
lwz r12, 0(r0)
#else
- mfspr r12, SPRG6
- mfspr r11, SPRG7
+ mfspr r12, SPRN_SPRG6
+ mfspr r11, SPRN_SPRG7
mtspr SPRN_PID, r12
mtcr r11
- mfspr r9, SPRG5
- mfspr r12, SPRG4
+ mfspr r9, SPRN_SPRG5
+ mfspr r12, SPRN_SPRG4
#endif
- mfspr r11, SPRG1
- mfspr r10, SPRG0
+ mfspr r11, SPRN_SPRG1
+ mfspr r10, SPRN_SPRG0
PPC405_ERR77_SYNC
rfi /* Should sync shadow TLBs */
b . /* prevent prefetch past rfi */
@@ -863,7 +863,7 @@ start_here:
/* ptr to phys current thread */
tophys(r4,r2)
addi r4,r4,THREAD /* init task's THREAD */
- mtspr SPRG3,r4
+ mtspr SPRN_SPRG3,r4
/* stack */
lis r1,init_thread_union@ha
@@ -894,8 +894,8 @@ start_here:
tophys(r4,r4)
lis r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@h
ori r3,r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@l
- mtspr SRR0,r4
- mtspr SRR1,r3
+ mtspr SPRN_SRR0,r4
+ mtspr SPRN_SRR1,r3
rfi
b . /* prevent prefetch past rfi */
@@ -920,8 +920,8 @@ start_here:
ori r4,r4,MSR_KERNEL@l
lis r3,start_kernel@h
ori r3,r3,start_kernel@l
- mtspr SRR0,r3
- mtspr SRR1,r4
+ mtspr SPRN_SRR0,r3
+ mtspr SPRN_SRR1,r4
rfi /* enable MMU and jump to start_kernel */
b . /* prevent prefetch past rfi */
diff --git a/arch/ppc/kernel/head_8xx.S b/arch/ppc/kernel/head_8xx.S
index db3b530941a8..1917ed5aba7a 100644
--- a/arch/ppc/kernel/head_8xx.S
+++ b/arch/ppc/kernel/head_8xx.S
@@ -101,10 +101,10 @@ __start:
turn_on_mmu:
mfmsr r0
ori r0,r0,MSR_DR|MSR_IR
- mtspr SRR1,r0
+ mtspr SPRN_SRR1,r0
lis r0,start_here@h
ori r0,r0,start_here@l
- mtspr SRR0,r0
+ mtspr SPRN_SRR0,r0
SYNC
rfi /* enables MMU */
@@ -115,18 +115,18 @@ turn_on_mmu:
* task's thread_struct.
*/
#define EXCEPTION_PROLOG \
- mtspr SPRG0,r10; \
- mtspr SPRG1,r11; \
+ mtspr SPRN_SPRG0,r10; \
+ mtspr SPRN_SPRG1,r11; \
mfcr r10; \
EXCEPTION_PROLOG_1; \
EXCEPTION_PROLOG_2
#define EXCEPTION_PROLOG_1 \
- mfspr r11,SRR1; /* check whether user or kernel */ \
+ mfspr r11,SPRN_SRR1; /* check whether user or kernel */ \
andi. r11,r11,MSR_PR; \
tophys(r11,r1); /* use tophys(r1) if kernel */ \
beq 1f; \
- mfspr r11,SPRG3; \
+ mfspr r11,SPRN_SPRG3; \
lwz r11,THREAD_INFO-THREAD(r11); \
addi r11,r11,THREAD_SIZE; \
tophys(r11,r11); \
@@ -138,14 +138,14 @@ turn_on_mmu:
stw r10,_CCR(r11); /* save registers */ \
stw r12,GPR12(r11); \
stw r9,GPR9(r11); \
- mfspr r10,SPRG0; \
+ mfspr r10,SPRN_SPRG0; \
stw r10,GPR10(r11); \
- mfspr r12,SPRG1; \
+ mfspr r12,SPRN_SPRG1; \
stw r12,GPR11(r11); \
mflr r10; \
stw r10,_LINK(r11); \
- mfspr r12,SRR0; \
- mfspr r9,SRR1; \
+ mfspr r12,SPRN_SRR0; \
+ mfspr r9,SPRN_SRR1; \
stw r1,GPR1(r11); \
stw r1,0(r11); \
tovirt(r1,r11); /* set new kernel sp */ \
@@ -209,9 +209,9 @@ i##n: \
. = 0x200
MachineCheck:
EXCEPTION_PROLOG
- mfspr r4,DAR
+ mfspr r4,SPRN_DAR
stw r4,_DAR(r11)
- mfspr r5,DSISR
+ mfspr r5,SPRN_DSISR
stw r5,_DSISR(r11)
addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_STD(0x200, MachineCheckException)
@@ -223,10 +223,10 @@ MachineCheck:
. = 0x300
DataAccess:
EXCEPTION_PROLOG
- mfspr r10,DSISR
+ mfspr r10,SPRN_DSISR
stw r10,_DSISR(r11)
mr r5,r10
- mfspr r4,DAR
+ mfspr r4,SPRN_DAR
EXC_XFER_EE_LITE(0x300, handle_page_fault)
/* Instruction access exception.
@@ -247,9 +247,9 @@ InstructionAccess:
. = 0x600
Alignment:
EXCEPTION_PROLOG
- mfspr r4,DAR
+ mfspr r4,SPRN_DAR
stw r4,_DAR(r11)
- mfspr r5,DSISR
+ mfspr r5,SPRN_DSISR
stw r5,_DSISR(r11)
addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_EE(0x600, AlignmentException)
@@ -301,14 +301,14 @@ InstructionTLBMiss:
stw r3, 8(r0)
#endif
DO_8xx_CPU6(0x3f80, r3)
- mtspr M_TW, r10 /* Save a couple of working registers */
+ mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
mfcr r10
stw r10, 0(r0)
stw r11, 4(r0)
- mfspr r10, SRR0 /* Get effective address of fault */
+ mfspr r10, SPRN_SRR0 /* Get effective address of fault */
DO_8xx_CPU6(0x3780, r3)
- mtspr MD_EPN, r10 /* Have to use MD_EPN for walk, MI_EPN can't */
- mfspr r10, M_TWB /* Get level 1 table entry address */
+ mtspr SPRN_MD_EPN, r1 /* Have to use MD_EPN for walk, MI_EPN can't */
+ mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
@@ -328,10 +328,10 @@ InstructionTLBMiss:
*/
ori r11,r11,1 /* Set valid bit */
DO_8xx_CPU6(0x2b80, r3)
- mtspr MI_TWC, r11 /* Set segment attributes */
+ mtspr SPRN_MI_TWC, r11 /* Set segment attributes */
DO_8xx_CPU6(0x3b80, r3)
- mtspr MD_TWC, r11 /* Load pte table base address */
- mfspr r11, MD_TWC /* ....and get the pte address */
+ mtspr SPRN_MD_TWC, r11 /* Load pte table base address */
+ mfspr r11, SPRN_MD_TWC /* ....and get the pte address */
lwz r10, 0(r11) /* Get the pte */
ori r10, r10, _PAGE_ACCESSED
@@ -346,9 +346,9 @@ InstructionTLBMiss:
2: li r11, 0x00f0
rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
DO_8xx_CPU6(0x2d80, r3)
- mtspr MI_RPN, r10 /* Update TLB entry */
+ mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
- mfspr r10, M_TW /* Restore registers */
+ mfspr r10, SPRN_M_TW /* Restore registers */
lwz r11, 0(r0)
mtcr r11
lwz r11, 4(r0)
@@ -363,11 +363,11 @@ DataStoreTLBMiss:
stw r3, 8(r0)
#endif
DO_8xx_CPU6(0x3f80, r3)
- mtspr M_TW, r10 /* Save a couple of working registers */
+ mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
mfcr r10
stw r10, 0(r0)
stw r11, 4(r0)
- mfspr r10, M_TWB /* Get level 1 table entry address */
+ mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
@@ -386,8 +386,8 @@ DataStoreTLBMiss:
*/
ori r11, r11, 1 /* Set valid bit in physical L2 page */
DO_8xx_CPU6(0x3b80, r3)
- mtspr MD_TWC, r11 /* Load pte table base address */
- mfspr r10, MD_TWC /* ....and get the pte address */
+ mtspr SPRN_MD_TWC, r11 /* Load pte table base address */
+ mfspr r10, SPRN_MD_TWC /* ....and get the pte address */
lwz r10, 0(r10) /* Get the pte */
/* Insert the Guarded flag into the TWC from the Linux PTE.
@@ -398,9 +398,9 @@ DataStoreTLBMiss:
*/
rlwimi r11, r10, 0, 27, 27
DO_8xx_CPU6(0x3b80, r3)
- mtspr MD_TWC, r11
+ mtspr SPRN_MD_TWC, r11
- mfspr r11, MD_TWC /* get the pte address again */
+ mfspr r11, SPRN_MD_TWC /* get the pte address again */
ori r10, r10, _PAGE_ACCESSED
stw r10, 0(r11)
@@ -413,9 +413,9 @@ DataStoreTLBMiss:
2: li r11, 0x00f0
rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
DO_8xx_CPU6(0x3d80, r3)
- mtspr MD_RPN, r10 /* Update TLB entry */
+ mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
- mfspr r10, M_TW /* Restore registers */
+ mfspr r10, SPRN_M_TW /* Restore registers */
lwz r11, 0(r0)
mtcr r11
lwz r11, 4(r0)
@@ -446,14 +446,14 @@ DataTLBError:
stw r3, 8(r0)
#endif
DO_8xx_CPU6(0x3f80, r3)
- mtspr M_TW, r10 /* Save a couple of working registers */
+ mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
mfcr r10
stw r10, 0(r0)
stw r11, 4(r0)
/* First, make sure this was a store operation.
*/
- mfspr r10, DSISR
+ mfspr r10, SPRN_DSISR
andis. r11, r10, 0x0200 /* If set, indicates store op */
beq 2f
@@ -473,15 +473,15 @@ DataTLBError:
* are initialized in mapin_ram(). This will avoid the problem,
* assuming we only use the dcbi instruction on kernel addresses.
*/
- mfspr r10, DAR
+ mfspr r10, SPRN_DAR
rlwinm r11, r10, 0, 0, 19
ori r11, r11, MD_EVALID
- mfspr r10, M_CASID
+ mfspr r10, SPRN_M_CASID
rlwimi r11, r10, 0, 28, 31
DO_8xx_CPU6(0x3780, r3)
- mtspr MD_EPN, r11
+ mtspr SPRN_MD_EPN, r11
- mfspr r10, M_TWB /* Get level 1 table entry address */
+ mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
@@ -500,8 +500,8 @@ DataTLBError:
*/
ori r11, r11, 1 /* Set valid bit in physical L2 page */
DO_8xx_CPU6(0x3b80, r3)
- mtspr MD_TWC, r11 /* Load pte table base address */
- mfspr r11, MD_TWC /* ....and get the pte address */
+ mtspr SPRN_MD_TWC, r11 /* Load pte table base address */
+ mfspr r11, SPRN_MD_TWC /* ....and get the pte address */
lwz r10, 0(r11) /* Get the pte */
andi. r11, r10, _PAGE_RW /* Is it writeable? */
@@ -510,7 +510,7 @@ DataTLBError:
/* Update 'changed', among others.
*/
ori r10, r10, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
- mfspr r11, MD_TWC /* Get pte address again */
+ mfspr r11, SPRN_MD_TWC /* Get pte address again */
stw r10, 0(r11) /* and update pte in table */
/* The Linux PTE won't go exactly into the MMU TLB.
@@ -522,9 +522,9 @@ DataTLBError:
li r11, 0x00f0
rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
DO_8xx_CPU6(0x3d80, r3)
- mtspr MD_RPN, r10 /* Update TLB entry */
+ mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
- mfspr r10, M_TW /* Restore registers */
+ mfspr r10, SPRN_M_TW /* Restore registers */
lwz r11, 0(r0)
mtcr r11
lwz r11, 4(r0)
@@ -533,7 +533,7 @@ DataTLBError:
#endif
rfi
2:
- mfspr r10, M_TW /* Restore registers */
+ mfspr r10, SPRN_M_TW /* Restore registers */
lwz r11, 0(r0)
mtcr r11
lwz r11, 4(r0)
@@ -576,9 +576,9 @@ start_here:
/* ptr to phys current thread */
tophys(r4,r2)
addi r4,r4,THREAD /* init task's THREAD */
- mtspr SPRG3,r4
+ mtspr SPRN_SPRG3,r4
li r3,0
- mtspr SPRG2,r3 /* 0 => r1 has kernel sp */
+ mtspr SPRN_SPRG2,r3 /* 0 => r1 has kernel sp */
/* stack */
lis r1,init_thread_union@ha
@@ -619,13 +619,13 @@ start_here:
stw r3, 12(r4)
lwz r3, 12(r4)
#endif
- mtspr M_TWB, r6
+ mtspr SPRN_M_TWB, r6
lis r4,2f@h
ori r4,r4,2f@l
tophys(r4,r4)
li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
- mtspr SRR0,r4
- mtspr SRR1,r3
+ mtspr SPRN_SRR0,r4
+ mtspr SPRN_SRR1,r3
rfi
/* Load up the kernel context */
2:
@@ -647,8 +647,8 @@ start_here:
li r4,MSR_KERNEL
lis r3,start_kernel@h
ori r3,r3,start_kernel@l
- mtspr SRR0,r3
- mtspr SRR1,r4
+ mtspr SPRN_SRR0,r3
+ mtspr SPRN_SRR1,r4
rfi /* enable MMU and jump to start_kernel */
/* Set up the initial MMU state so we can do the first level of
@@ -667,7 +667,7 @@ initial_mmu:
#else
li r8, 0
#endif
- mtspr MI_CTR, r8 /* Set instruction MMU control */
+ mtspr SPRN_MI_CTR, r8 /* Set instruction MMU control */
#ifdef CONFIG_PIN_TLB
lis r10, (MD_RSV4I | MD_RESETVAL)@h
@@ -679,7 +679,7 @@ initial_mmu:
#ifndef CONFIG_8xx_COPYBACK
oris r10, r10, MD_WTDEF@h
#endif
- mtspr MD_CTR, r10 /* Set data TLB control */
+ mtspr SPRN_MD_CTR, r10 /* Set data TLB control */
/* Now map the lower 8 Meg into the TLBs. For this quick hack,
* we can load the instruction and data TLB registers with the
@@ -687,61 +687,61 @@ initial_mmu:
*/
lis r8, KERNELBASE@h /* Create vaddr for TLB */
ori r8, r8, MI_EVALID /* Mark it valid */
- mtspr MI_EPN, r8
- mtspr MD_EPN, r8
+ mtspr SPRN_MI_EPN, r8
+ mtspr SPRN_MD_EPN, r8
li r8, MI_PS8MEG /* Set 8M byte page */
ori r8, r8, MI_SVALID /* Make it valid */
- mtspr MI_TWC, r8
- mtspr MD_TWC, r8
+ mtspr SPRN_MI_TWC, r8
+ mtspr SPRN_MD_TWC, r8
li r8, MI_BOOTINIT /* Create RPN for address 0 */
- mtspr MI_RPN, r8 /* Store TLB entry */
- mtspr MD_RPN, r8
+ mtspr SPRN_MI_RPN, r8 /* Store TLB entry */
+ mtspr SPRN_MD_RPN, r8
lis r8, MI_Kp@h /* Set the protection mode */
- mtspr MI_AP, r8
- mtspr MD_AP, r8
+ mtspr SPRN_MI_AP, r8
+ mtspr SPRN_MD_AP, r8
/* Map another 8 MByte at the IMMR to get the processor
* internal registers (among other things).
*/
#ifdef CONFIG_PIN_TLB
addi r10, r10, 0x0100
- mtspr MD_CTR, r10
+ mtspr SPRN_MD_CTR, r10
#endif
mfspr r9, 638 /* Get current IMMR */
andis. r9, r9, 0xff80 /* Get 8Mbyte boundary */
mr r8, r9 /* Create vaddr for TLB */
ori r8, r8, MD_EVALID /* Mark it valid */
- mtspr MD_EPN, r8
+ mtspr SPRN_MD_EPN, r8
li r8, MD_PS8MEG /* Set 8M byte page */
ori r8, r8, MD_SVALID /* Make it valid */
- mtspr MD_TWC, r8
+ mtspr SPRN_MD_TWC, r8
mr r8, r9 /* Create paddr for TLB */
ori r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */
- mtspr MD_RPN, r8
+ mtspr SPRN_MD_RPN, r8
#ifdef CONFIG_PIN_TLB
/* Map two more 8M kernel data pages.
*/
addi r10, r10, 0x0100
- mtspr MD_CTR, r10
+ mtspr SPRN_MD_CTR, r10
lis r8, KERNELBASE@h /* Create vaddr for TLB */
addis r8, r8, 0x0080 /* Add 8M */
ori r8, r8, MI_EVALID /* Mark it valid */
- mtspr MD_EPN, r8
+ mtspr SPRN_MD_EPN, r8
li r9, MI_PS8MEG /* Set 8M byte page */
ori r9, r9, MI_SVALID /* Make it valid */
- mtspr MD_TWC, r9
+ mtspr SPRN_MD_TWC, r9
li r11, MI_BOOTINIT /* Create RPN for address 0 */
addis r11, r11, 0x0080 /* Add 8M */
- mtspr MD_RPN, r8
+ mtspr SPRN_MD_RPN, r8
addis r8, r8, 0x0080 /* Add 8M */
- mtspr MD_EPN, r8
- mtspr MD_TWC, r9
+ mtspr SPRN_MD_EPN, r8
+ mtspr SPRN_MD_TWC, r9
addis r11, r11, 0x0080 /* Add 8M */
- mtspr MD_RPN, r8
+ mtspr SPRN_MD_RPN, r8
#endif
/* Since the cache is enabled according to the information we
@@ -749,20 +749,20 @@ initial_mmu:
* We should probably check/set other modes....later.
*/
lis r8, IDC_INVALL@h
- mtspr IC_CST, r8
- mtspr DC_CST, r8
+ mtspr SPRN_IC_CST, r8
+ mtspr SPRN_DC_CST, r8
lis r8, IDC_ENABLE@h
- mtspr IC_CST, r8
+ mtspr SPRN_IC_CST, r8
#ifdef CONFIG_8xx_COPYBACK
- mtspr DC_CST, r8
+ mtspr SPRN_DC_CST, r8
#else
/* For a debug option, I left this here to easily enable
* the write through cache mode
*/
lis r8, DC_SFWT@h
- mtspr DC_CST, r8
+ mtspr SPRN_DC_CST, r8
lis r8, IDC_ENABLE@h
- mtspr DC_CST, r8
+ mtspr SPRN_DC_CST, r8
#endif
blr
@@ -793,15 +793,15 @@ _GLOBAL(set_context)
li r7, 0x3980
stw r7, 12(r6)
lwz r7, 12(r6)
- mtspr M_TWB, r4 /* Update MMU base address */
+ mtspr SPRN_M_TWB, r4 /* Update MMU base address */
li r7, 0x3380
stw r7, 12(r6)
lwz r7, 12(r6)
- mtspr M_CASID, r3 /* Update context */
+ mtspr SPRN_M_CASID, r3 /* Update context */
#else
- mtspr M_CASID,r3 /* Update context */
+ mtspr SPRN_M_CASID,r3 /* Update context */
tophys (r4, r4)
- mtspr M_TWB, r4 /* and pgd */
+ mtspr SPRN_M_TWB, r4 /* and pgd */
#endif
SYNC
blr
diff --git a/arch/ppc/kernel/head_booke.h b/arch/ppc/kernel/head_booke.h
index d63de6fc2b8a..b69813a9f9e9 100644
--- a/arch/ppc/kernel/head_booke.h
+++ b/arch/ppc/kernel/head_booke.h
@@ -18,7 +18,7 @@
mfspr r11,SPRN_SRR1; /* check whether user or kernel */\
andi. r11,r11,MSR_PR; \
beq 1f; \
- mfspr r1,SPRG3; /* if from user, start at top of */\
+ mfspr r1,SPRN_SPRG3; /* if from user, start at top of */\
lwz r1,THREAD_INFO-THREAD(r1); /* this thread's kernel stack */\
addi r1,r1,THREAD_SIZE; \
1: subi r1,r1,INT_FRAME_SIZE; /* Allocate an exception frame */\
@@ -26,16 +26,16 @@
stw r10,_CCR(r11); /* save various registers */\
stw r12,GPR12(r11); \
stw r9,GPR9(r11); \
- mfspr r10,SPRG0; \
+ mfspr r10,SPRN_SPRG0; \
stw r10,GPR10(r11); \
- mfspr r12,SPRG1; \
+ mfspr r12,SPRN_SPRG1; \
stw r12,GPR11(r11); \
mflr r10; \
stw r10,_LINK(r11); \
- mfspr r10,SPRG4R; \
- mfspr r12,SRR0; \
+ mfspr r10,SPRN_SPRG4R; \
+ mfspr r12,SPRN_SRR0; \
stw r10,GPR1(r11); \
- mfspr r9,SRR1; \
+ mfspr r9,SPRN_SRR1; \
stw r10,0(r11); \
rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
stw r0,GPR0(r11); \
@@ -55,27 +55,27 @@
* since the MMU is always on and the save area is offset from KERNELBASE.
*/
#define CRITICAL_EXCEPTION_PROLOG \
- mtspr SPRG2,r8; /* SPRG2 only used in criticals */ \
+ mtspr SPRN_SPRG2,r8; /* SPRG2 only used in criticals */ \
lis r8,crit_save@ha; \
stw r10,crit_r10@l(r8); \
stw r11,crit_r11@l(r8); \
- mfspr r10,SPRG0; \
+ mfspr r10,SPRN_SPRG0; \
stw r10,crit_sprg0@l(r8); \
- mfspr r10,SPRG1; \
+ mfspr r10,SPRN_SPRG1; \
stw r10,crit_sprg1@l(r8); \
- mfspr r10,SPRG4R; \
+ mfspr r10,SPRN_SPRG4R; \
stw r10,crit_sprg4@l(r8); \
- mfspr r10,SPRG5R; \
+ mfspr r10,SPRN_SPRG5R; \
stw r10,crit_sprg5@l(r8); \
- mfspr r10,SPRG7R; \
+ mfspr r10,SPRN_SPRG7R; \
stw r10,crit_sprg7@l(r8); \
mfspr r10,SPRN_PID; \
stw r10,crit_pid@l(r8); \
- mfspr r10,SRR0; \
+ mfspr r10,SPRN_SRR0; \
stw r10,crit_srr0@l(r8); \
- mfspr r10,SRR1; \
+ mfspr r10,SPRN_SRR1; \
stw r10,crit_srr1@l(r8); \
- mfspr r8,SPRG2; /* SPRG2 only used in criticals */ \
+ mfspr r8,SPRN_SPRG2; /* SPRG2 only used in criticals */ \
mfcr r10; /* save CR in r10 for now */\
mfspr r11,SPRN_CSRR1; /* check whether user or kernel */\
andi. r11,r11,MSR_PR; \
@@ -83,7 +83,7 @@
ori r11,r11,critical_stack_top@l; \
beq 1f; \
/* COMING FROM USER MODE */ \
- mfspr r11,SPRG3; /* if from user, start at top of */\
+ mfspr r11,SPRN_SPRG3; /* if from user, start at top of */\
lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
addi r11,r11,THREAD_SIZE; \
1: subi r11,r11,INT_FRAME_SIZE; /* Allocate an exception frame */\
@@ -96,9 +96,9 @@
stw r12,_DEAR(r11); /* since they may have had stuff */\
mfspr r9,SPRN_ESR; /* in them at the point where the */\
stw r9,_ESR(r11); /* exception was taken */\
- mfspr r12,CSRR0; \
+ mfspr r12,SPRN_CSRR0; \
stw r1,GPR1(r11); \
- mfspr r9,CSRR1; \
+ mfspr r9,SPRN_CSRR1; \
stw r1,0(r11); \
tovirt(r1,r11); \
rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
@@ -116,31 +116,31 @@
* is always on and the save area is offset from KERNELBASE.
*/
#define MCHECK_EXCEPTION_PROLOG \
- mtspr SPRG6W,r8; /* SPRG6 used in machine checks */ \
+ mtspr SPRN_SPRG6W,r8; /* SPRG6 used in machine checks */ \
lis r8,mcheck_save@ha; \
stw r10,mcheck_r10@l(r8); \
stw r11,mcheck_r11@l(r8); \
- mfspr r10,SPRG0; \
+ mfspr r10,SPRN_SPRG0; \
stw r10,mcheck_sprg0@l(r8); \
- mfspr r10,SPRG1; \
+ mfspr r10,SPRN_SPRG1; \
stw r10,mcheck_sprg1@l(r8); \
- mfspr r10,SPRG4R; \
+ mfspr r10,SPRN_SPRG4R; \
stw r10,mcheck_sprg4@l(r8); \
- mfspr r10,SPRG5R; \
+ mfspr r10,SPRN_SPRG5R; \
stw r10,mcheck_sprg5@l(r8); \
- mfspr r10,SPRG7R; \
+ mfspr r10,SPRN_SPRG7R; \
stw r10,mcheck_sprg7@l(r8); \
mfspr r10,SPRN_PID; \
stw r10,mcheck_pid@l(r8); \
- mfspr r10,SRR0; \
+ mfspr r10,SPRN_SRR0; \
stw r10,mcheck_srr0@l(r8); \
- mfspr r10,SRR1; \
+ mfspr r10,SPRN_SRR1; \
stw r10,mcheck_srr1@l(r8); \
- mfspr r10,CSRR0; \
+ mfspr r10,SPRN_CSRR0; \
stw r10,mcheck_csrr0@l(r8); \
- mfspr r10,CSRR1; \
+ mfspr r10,SPRN_CSRR1; \
stw r10,mcheck_csrr1@l(r8); \
- mfspr r8,SPRG6R; /* SPRG6 used in machine checks */ \
+ mfspr r8,SPRN_SPRG6R; /* SPRG6 used in machine checks */ \
mfcr r10; /* save CR in r10 for now */\
mfspr r11,SPRN_MCSRR1; /* check whether user or kernel */\
andi. r11,r11,MSR_PR; \
@@ -148,7 +148,7 @@
ori r11,r11,mcheck_stack_top@l; \
beq 1f; \
/* COMING FROM USER MODE */ \
- mfspr r11,SPRG3; /* if from user, start at top of */\
+ mfspr r11,SPRN_SPRG3; /* if from user, start at top of */\
lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
addi r11,r11,THREAD_SIZE; \
1: subi r11,r11,INT_FRAME_SIZE; /* Allocate an exception frame */\
@@ -161,9 +161,9 @@
stw r12,_DEAR(r11); /* since they may have had stuff */\
mfspr r9,SPRN_ESR; /* in them at the point where the */\
stw r9,_ESR(r11); /* exception was taken */\
- mfspr r12,MCSRR0; \
+ mfspr r12,SPRN_MCSRR0; \
stw r1,GPR1(r11); \
- mfspr r9,MCSRR1; \
+ mfspr r9,SPRN_MCSRR1; \
stw r1,0(r11); \
tovirt(r1,r11); \
rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
@@ -285,15 +285,15 @@ label:
lwz r0,GPR0(r11); \
lwz r1,GPR1(r11); \
mtcrf 0x80,r10; \
- mtspr CSRR0,r12; \
- mtspr CSRR1,r9; \
+ mtspr SPRN_CSRR0,r12; \
+ mtspr SPRN_CSRR1,r9; \
lwz r9,GPR9(r11); \
lwz r12,GPR12(r11); \
- mtspr SPRG2,r8; /* SPRG2 only used in criticals */ \
+ mtspr SPRN_SPRG2,r8; /* SPRG2 only used in criticals */ \
lis r8,crit_save@ha; \
lwz r10,crit_r10@l(r8); \
lwz r11,crit_r11@l(r8); \
- mfspr r8,SPRG2; \
+ mfspr r8,SPRN_SPRG2; \
\
rfci; \
b .; \
diff --git a/arch/ppc/kernel/head_e500.S b/arch/ppc/kernel/head_e500.S
index 47a35214f3d1..d83353d5eab5 100644
--- a/arch/ppc/kernel/head_e500.S
+++ b/arch/ppc/kernel/head_e500.S
@@ -188,8 +188,8 @@ skpinv: addi r6,r6,1 /* Increment */
1: mflr r9
rlwimi r7,r9,0,20,31
addi r7,r7,24
- mtspr SRR0,r7
- mtspr SRR1,r6
+ mtspr SPRN_SRR0,r7
+ mtspr SPRN_SRR1,r6
rfi
/* 4. Clear out PIDs & Search info */
@@ -236,8 +236,8 @@ skpinv: addi r6,r6,1 /* Increment */
1: mflr r9
rlwimi r6,r9,0,20,31
addi r6,r6,24
- mtspr SRR0,r6
- mtspr SRR1,r7
+ mtspr SPRN_SRR0,r6
+ mtspr SPRN_SRR1,r7
rfi /* start execution out of TLB1[0] entry */
/* 8. Clear out the temp mapping */
@@ -302,7 +302,7 @@ skpinv: addi r6,r6,1 /* Increment */
/* ptr to current thread */
addi r4,r2,THREAD /* init task's THREAD */
- mtspr SPRG3,r4
+ mtspr SPRN_SPRG3,r4
/* stack */
lis r1,init_thread_union@h
@@ -342,8 +342,8 @@ skpinv: addi r6,r6,1 /* Increment */
ori r4,r4,start_kernel@l
lis r3,MSR_KERNEL@h
ori r3,r3,MSR_KERNEL@l
- mtspr SRR0,r4
- mtspr SRR1,r3
+ mtspr SPRN_SRR0,r4
+ mtspr SPRN_SRR1,r3
rfi /* change context and jump to start_kernel */
/*
@@ -372,12 +372,12 @@ interrupt_base:
/* Data Storage Interrupt */
START_EXCEPTION(DataStorage)
- mtspr SPRG0, r10 /* Save some working registers */
- mtspr SPRG1, r11
- mtspr SPRG4W, r12
- mtspr SPRG5W, r13
+ mtspr SPRN_SPRG0, r10 /* Save some working registers */
+ mtspr SPRN_SPRG1, r11
+ mtspr SPRN_SPRG4W, r12
+ mtspr SPRN_SPRG5W, r13
mfcr r11
- mtspr SPRG7W, r11
+ mtspr SPRN_SPRG7W, r11
/*
* Check if it was a store fault, if not then bail
@@ -401,7 +401,7 @@ interrupt_base:
/* Get the PGD for the current thread */
3:
- mfspr r11,SPRG3
+ mfspr r11,SPRN_SPRG3
lwz r11,PGDIR(r11)
4:
rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */
@@ -442,12 +442,12 @@ interrupt_base:
tlbwe
/* Done...restore registers and get out of here. */
- mfspr r11, SPRG7R
+ mfspr r11, SPRN_SPRG7R
mtcr r11
- mfspr r13, SPRG5R
- mfspr r12, SPRG4R
- mfspr r11, SPRG1
- mfspr r10, SPRG0
+ mfspr r13, SPRN_SPRG5R
+ mfspr r12, SPRN_SPRG4R
+ mfspr r11, SPRN_SPRG1
+ mfspr r10, SPRN_SPRG0
rfi /* Force context change */
2:
@@ -455,12 +455,12 @@ interrupt_base:
* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
*/
- mfspr r11, SPRG7R
+ mfspr r11, SPRN_SPRG7R
mtcr r11
- mfspr r13, SPRG5R
- mfspr r12, SPRG4R
- mfspr r11, SPRG1
- mfspr r10, SPRG0
+ mfspr r13, SPRN_SPRG5R
+ mfspr r12, SPRN_SPRG4R
+ mfspr r11, SPRN_SPRG1
+ mfspr r10, SPRN_SPRG0
b data_access
/* Instruction Storage Interrupt */
@@ -499,12 +499,12 @@ interrupt_base:
/* Data TLB Error Interrupt */
START_EXCEPTION(DataTLBError)
- mtspr SPRG0, r10 /* Save some working registers */
- mtspr SPRG1, r11
- mtspr SPRG4W, r12
- mtspr SPRG5W, r13
+ mtspr SPRN_SPRG0, r10 /* Save some working registers */
+ mtspr SPRN_SPRG1, r11
+ mtspr SPRN_SPRG4W, r12
+ mtspr SPRN_SPRG5W, r13
mfcr r11
- mtspr SPRG7W, r11
+ mtspr SPRN_SPRG7W, r11
mfspr r10, SPRN_DEAR /* Get faulting address */
/* If we are faulting a kernel address, we have to use the
@@ -525,7 +525,7 @@ interrupt_base:
/* Get the PGD for the current thread */
3:
- mfspr r11,SPRG3
+ mfspr r11,SPRN_SPRG3
lwz r11,PGDIR(r11)
4:
@@ -548,12 +548,12 @@ interrupt_base:
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
*/
- mfspr r11, SPRG7R
+ mfspr r11, SPRN_SPRG7R
mtcr r11
- mfspr r13, SPRG5R
- mfspr r12, SPRG4R
- mfspr r11, SPRG1
- mfspr r10, SPRG0
+ mfspr r13, SPRN_SPRG5R
+ mfspr r12, SPRN_SPRG4R
+ mfspr r11, SPRN_SPRG1
+ mfspr r10, SPRN_SPRG0
b data_access
/* Instruction TLB Error Interrupt */
@@ -563,13 +563,13 @@ interrupt_base:
* to a different point.
*/
START_EXCEPTION(InstructionTLBError)
- mtspr SPRG0, r10 /* Save some working registers */
- mtspr SPRG1, r11
- mtspr SPRG4W, r12
- mtspr SPRG5W, r13
+ mtspr SPRN_SPRG0, r10 /* Save some working registers */
+ mtspr SPRN_SPRG1, r11
+ mtspr SPRN_SPRG4W, r12
+ mtspr SPRN_SPRG5W, r13
mfcr r11
- mtspr SPRG7W, r11
- mfspr r10, SRR0 /* Get faulting address */
+ mtspr SPRN_SPRG7W, r11
+ mfspr r10, SPRN_SRR0 /* Get faulting address */
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
@@ -589,7 +589,7 @@ interrupt_base:
/* Get the PGD for the current thread */
3:
- mfspr r11,SPRG3
+ mfspr r11,SPRN_SPRG3
lwz r11,PGDIR(r11)
4:
@@ -613,12 +613,12 @@ interrupt_base:
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
*/
- mfspr r11, SPRG7R
+ mfspr r11, SPRN_SPRG7R
mtcr r11
- mfspr r13, SPRG5R
- mfspr r12, SPRG4R
- mfspr r11, SPRG1
- mfspr r10, SPRG0
+ mfspr r13, SPRN_SPRG5R
+ mfspr r12, SPRN_SPRG4R
+ mfspr r11, SPRN_SPRG1
+ mfspr r10, SPRN_SPRG0
b InstructionStorage
#ifdef CONFIG_SPE
@@ -713,12 +713,12 @@ finish_tlb_load:
tlbwe
/* Done...restore registers and get out of here. */
- mfspr r11, SPRG7R
+ mfspr r11, SPRN_SPRG7R
mtcr r11
- mfspr r13, SPRG5R
- mfspr r12, SPRG4R
- mfspr r11, SPRG1
- mfspr r10, SPRG0
+ mfspr r13, SPRN_SPRG5R
+ mfspr r12, SPRN_SPRG4R
+ mfspr r11, SPRN_SPRG1
+ mfspr r10, SPRN_SPRG0
rfi /* Force context change */
#ifdef CONFIG_SPE
@@ -762,7 +762,7 @@ load_up_spe:
#endif /* CONFIG_SMP */
/* enable use of SPE after return */
oris r9,r9,MSR_SPE@h
- mfspr r5,SPRG3 /* current task's THREAD (phys) */
+ mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
li r4,1
li r10,THREAD_ACC
stw r4,THREAD_USED_SPE(r5)
@@ -781,8 +781,8 @@ load_up_spe:
lwz r10,_LINK(r11)
mtlr r10
REST_GPR(10, r11)
- mtspr SRR1,r9
- mtspr SRR0,r12
+ mtspr SPRN_SRR1,r9
+ mtspr SPRN_SRR0,r12
REST_GPR(9, r11)
REST_GPR(12, r11)
lwz r11,GPR11(r11)
diff --git a/arch/ppc/kernel/l2cr.S b/arch/ppc/kernel/l2cr.S
index 8abf2ee7a0c0..c39441048266 100644
--- a/arch/ppc/kernel/l2cr.S
+++ b/arch/ppc/kernel/l2cr.S
@@ -125,14 +125,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
* DPM can possibly interfere with the state machine in the processor
* that invalidates the L2 cache tags.
*/
- mfspr r8,HID0 /* Save HID0 in r8 */
+ mfspr r8,SPRN_HID0 /* Save HID0 in r8 */
rlwinm r4,r8,0,12,10 /* Turn off HID0[DPM] */
sync
- mtspr HID0,r4 /* Disable DPM */
+ mtspr SPRN_HID0,r4 /* Disable DPM */
sync
/* Get the current enable bit of the L2CR into r4 */
- mfspr r4,L2CR
+ mfspr r4,SPRN_L2CR
/* Tweak some bits */
rlwinm r5,r3,0,0,0 /* r5 contains the new enable bit */
@@ -186,7 +186,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
.balign L1_CACHE_LINE_SIZE
22:
sync
- mtspr L2CR,r3
+ mtspr SPRN_L2CR,r3
sync
b 23f
20:
@@ -199,27 +199,27 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
/* Perform a global invalidation */
oris r3,r3,0x0020
sync
- mtspr L2CR,r3
+ mtspr SPRN_L2CR,r3
sync
isync /* For errata */
BEGIN_FTR_SECTION
/* On the 7450, we wait for the L2I bit to clear......
*/
-10: mfspr r3,L2CR
+10: mfspr r3,SPRN_L2CR
andis. r4,r3,0x0020
bne 10b
b 11f
END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
/* Wait for the invalidation to complete */
-3: mfspr r3,L2CR
+3: mfspr r3,SPRN_L2CR
rlwinm. r4,r3,0,31,31
bne 3b
11: rlwinm r3,r3,0,11,9 /* Turn off the L2I bit */
sync
- mtspr L2CR,r3
+ mtspr SPRN_L2CR,r3
sync
/* See if we need to enable the cache */
@@ -228,7 +228,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
/* Enable the cache */
oris r3,r3,0x8000
- mtspr L2CR,r3
+ mtspr SPRN_L2CR,r3
sync
4:
@@ -250,7 +250,7 @@ _GLOBAL(_get_L2CR)
/* Return the L2CR contents */
li r3,0
BEGIN_FTR_SECTION
- mfspr r3,L2CR
+ mfspr r3,SPRN_L2CR
END_FTR_SECTION_IFSET(CPU_FTR_L2CR)
blr
diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S
index 3105f54c4ed2..73f7c23b0dd4 100644
--- a/arch/ppc/kernel/misc.S
+++ b/arch/ppc/kernel/misc.S
@@ -216,10 +216,10 @@ _GLOBAL(low_choose_750fx_pll)
/* If switching to PLL1, disable HID0:BTIC */
cmplwi cr0,r3,0
beq 1f
- mfspr r5,HID0
+ mfspr r5,SPRN_HID0
rlwinm r5,r5,0,27,25
sync
- mtspr HID0,r5
+ mtspr SPRN_HID0,r5
isync
sync
@@ -241,10 +241,10 @@ _GLOBAL(low_choose_750fx_pll)
/* If switching to PLL0, enable HID0:BTIC */
cmplwi cr0,r3,0
bne 1f
- mfspr r5,HID0
+ mfspr r5,SPRN_HID0
ori r5,r5,HID0_BTIC
sync
- mtspr HID0,r5
+ mtspr SPRN_HID0,r5
isync
sync
@@ -579,7 +579,7 @@ _GLOBAL(flush_instruction_cache)
#if defined(CONFIG_8xx)
isync
lis r5, IDC_INVALL@h
- mtspr IC_CST, r5
+ mtspr SPRN_IC_CST, r5
#elif defined(CONFIG_4xx)
#ifdef CONFIG_403GCX
li r3, 512
@@ -597,14 +597,14 @@ _GLOBAL(flush_instruction_cache)
ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
mtspr SPRN_L1CSR1,r3
#else
- mfspr r3,PVR
+ mfspr r3,SPRN_PVR
rlwinm r3,r3,16,16,31
cmpwi 0,r3,1
beqlr /* for 601, do nothing */
/* 603/604 processor - use invalidate-all bit in HID0 */
- mfspr r3,HID0
+ mfspr r3,SPRN_HID0
ori r3,r3,HID0_ICFI
- mtspr HID0,r3
+ mtspr SPRN_HID0,r3
#endif /* CONFIG_8xx/4xx */
isync
blr
diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c
index 062836ed4751..c833304fe3d5 100644
--- a/arch/ppc/kernel/setup.c
+++ b/arch/ppc/kernel/setup.c
@@ -179,7 +179,7 @@ int show_cpuinfo(struct seq_file *m, void *v)
pvr = cpu_data[i].pvr;
lpj = cpu_data[i].loops_per_jiffy;
#else
- pvr = mfspr(PVR);
+ pvr = mfspr(SPRN_PVR);
lpj = loops_per_jiffy;
#endif
diff --git a/arch/ppc/kernel/smp.c b/arch/ppc/kernel/smp.c
index 7ed8a3bb81df..f9ffa5aea32e 100644
--- a/arch/ppc/kernel/smp.c
+++ b/arch/ppc/kernel/smp.c
@@ -119,7 +119,7 @@ void smp_message_recv(int msg, struct pt_regs *regs)
*/
void smp_send_tlb_invalidate(int cpu)
{
- if ( PVR_VER(mfspr(PVR)) == 8 )
+ if ( PVR_VER(mfspr(SPRN_PVR)) == 8 )
smp_message_pass(MSG_ALL_BUT_SELF, PPC_MSG_INVALIDATE_TLB, 0, 0);
}
@@ -283,7 +283,7 @@ static void __devinit smp_store_cpu_info(int id)
/* assume bogomips are same for everything */
c->loops_per_jiffy = loops_per_jiffy;
- c->pvr = mfspr(PVR);
+ c->pvr = mfspr(SPRN_PVR);
}
void __init smp_prepare_cpus(unsigned int max_cpus)
diff --git a/arch/ppc/kernel/traps.c b/arch/ppc/kernel/traps.c
index 8ce9af76ffa6..916a181055c7 100644
--- a/arch/ppc/kernel/traps.c
+++ b/arch/ppc/kernel/traps.c
@@ -486,7 +486,7 @@ static int emulate_instruction(struct pt_regs *regs)
*/
if ((instword & INST_MFSPR_PVR_MASK) == INST_MFSPR_PVR) {
rd = (instword >> 21) & 0x1f;
- regs->gpr[rd] = mfspr(PVR);
+ regs->gpr[rd] = mfspr(SPRN_PVR);
return 0;
}
diff --git a/arch/ppc/mm/hashtable.S b/arch/ppc/mm/hashtable.S
index e3c7d94fc844..ab83132a7ed0 100644
--- a/arch/ppc/mm/hashtable.S
+++ b/arch/ppc/mm/hashtable.S
@@ -101,7 +101,7 @@ _GLOBAL(hash_page)
/* Get PTE (linux-style) and check access */
lis r0,KERNELBASE@h /* check if kernel address */
cmplw 0,r4,r0
- mfspr r8,SPRG3 /* current task's THREAD (phys) */
+ mfspr r8,SPRN_SPRG3 /* current task's THREAD (phys) */
ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
lwz r5,PGDIR(r8) /* virt page-table root */
blt+ 112f /* assume user more likely */
diff --git a/arch/ppc/mm/ppc_mmu.c b/arch/ppc/mm/ppc_mmu.c
index 3643da9e1df5..9a381ed5eb21 100644
--- a/arch/ppc/mm/ppc_mmu.c
+++ b/arch/ppc/mm/ppc_mmu.c
@@ -142,7 +142,7 @@ void __init setbat(int index, unsigned long virt, unsigned long phys,
flags |= _PAGE_COHERENT;
bl = (size >> 17) - 1;
- if (PVR_VER(mfspr(PVR)) != 1) {
+ if (PVR_VER(mfspr(SPRN_PVR)) != 1) {
/* 603, 604, etc. */
/* Do DBAT first */
wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
diff --git a/arch/ppc/platforms/4xx/ebony.c b/arch/ppc/platforms/4xx/ebony.c
index 3026edcf064d..f63bca83e757 100644
--- a/arch/ppc/platforms/4xx/ebony.c
+++ b/arch/ppc/platforms/4xx/ebony.c
@@ -97,7 +97,7 @@ ebony_calibrate_decr(void)
* on Rev. C silicon then errata forces us to
* use the internal clock.
*/
- switch (PVR_REV(mfspr(PVR))) {
+ switch (PVR_REV(mfspr(SPRN_PVR))) {
case PVR_REV(PVR_440GP_RB):
freq = EBONY_440GP_RB_SYSCLK;
break;
diff --git a/arch/ppc/platforms/83xx/mpc834x_sys.c b/arch/ppc/platforms/83xx/mpc834x_sys.c
index 02a426edd9e6..1260fd932f0a 100644
--- a/arch/ppc/platforms/83xx/mpc834x_sys.c
+++ b/arch/ppc/platforms/83xx/mpc834x_sys.c
@@ -140,8 +140,8 @@ mpc834x_sys_show_cpuinfo(struct seq_file *m)
/* get the core frequency */
freq = binfo->bi_intfreq;
- pvid = mfspr(PVR);
- svid = mfspr(SVR);
+ pvid = mfspr(SPRN_PVR);
+ svid = mfspr(SPRN_SVR);
seq_printf(m, "chip\t\t: MPC%s\n", cur_ppc_sys_spec->ppc_sys_name);
seq_printf(m, "Vendor\t\t: Freescale Inc.\n");
@@ -154,7 +154,7 @@ mpc834x_sys_show_cpuinfo(struct seq_file *m)
seq_printf(m, "SVR\t\t: 0x%x\n", svid);
/* Display cpu Pll setting */
- phid1 = mfspr(HID1);
+ phid1 = mfspr(SPRN_HID1);
seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
/* Display the amount of memory */
@@ -193,8 +193,8 @@ mpc834x_sys_set_bat(void)
{
/* we steal the lowest ioremap addr for virt space */
mb();
- mtspr(DBAT1U, VIRT_IMMRBAR | 0x1e);
- mtspr(DBAT1L, immrbar | 0x2a);
+ mtspr(SPRN_DBAT1U, VIRT_IMMRBAR | 0x1e);
+ mtspr(SPRN_DBAT1L, immrbar | 0x2a);
mb();
}
@@ -257,7 +257,7 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
}
#endif
- identify_ppc_sys_by_id(mfspr(SVR));
+ identify_ppc_sys_by_id(mfspr(SPRN_SVR));
/* setup the PowerPC module struct */
ppc_md.setup_arch = mpc834x_sys_setup_arch;
diff --git a/arch/ppc/platforms/85xx/mpc8540_ads.c b/arch/ppc/platforms/85xx/mpc8540_ads.c
index fca01e6bc4d0..bfa0ecc8afab 100644
--- a/arch/ppc/platforms/85xx/mpc8540_ads.c
+++ b/arch/ppc/platforms/85xx/mpc8540_ads.c
@@ -187,7 +187,7 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
strcpy(cmd_line, (char *) (r6 + KERNELBASE));
}
- identify_ppc_sys_by_id(mfspr(SVR));
+ identify_ppc_sys_by_id(mfspr(SPRN_SVR));
/* setup the PowerPC module struct */
ppc_md.setup_arch = mpc8540ads_setup_arch;
diff --git a/arch/ppc/platforms/85xx/mpc8560_ads.c b/arch/ppc/platforms/85xx/mpc8560_ads.c
index 16c8c7b5d1b9..513c5ecdcd4f 100644
--- a/arch/ppc/platforms/85xx/mpc8560_ads.c
+++ b/arch/ppc/platforms/85xx/mpc8560_ads.c
@@ -197,7 +197,7 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
strcpy(cmd_line, (char *) (r6 + KERNELBASE));
}
- identify_ppc_sys_by_id(mfspr(SVR));
+ identify_ppc_sys_by_id(mfspr(SPRN_SVR));
/* setup the PowerPC module struct */
ppc_md.setup_arch = mpc8560ads_setup_arch;
diff --git a/arch/ppc/platforms/85xx/mpc85xx_ads_common.c b/arch/ppc/platforms/85xx/mpc85xx_ads_common.c
index 546d145a1811..7e1e11a1195a 100644
--- a/arch/ppc/platforms/85xx/mpc85xx_ads_common.c
+++ b/arch/ppc/platforms/85xx/mpc85xx_ads_common.c
@@ -126,8 +126,8 @@ mpc85xx_ads_show_cpuinfo(struct seq_file *m)
/* get the core frequency */
freq = binfo->bi_intfreq;
- pvid = mfspr(PVR);
- svid = mfspr(SVR);
+ pvid = mfspr(SPRN_PVR);
+ svid = mfspr(SPRN_SVR);
seq_printf(m, "chip\t\t: MPC%s\n", cur_ppc_sys_spec->ppc_sys_name);
seq_printf(m, "Vendor\t\t: Freescale Semiconductor\n");
@@ -137,7 +137,7 @@ mpc85xx_ads_show_cpuinfo(struct seq_file *m)
seq_printf(m, "SVR\t\t: 0x%x\n", svid);
/* Display cpu Pll setting */
- phid1 = mfspr(HID1);
+ phid1 = mfspr(SPRN_HID1);
seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
/* Display the amount of memory */
diff --git a/arch/ppc/platforms/85xx/mpc85xx_cds_common.c b/arch/ppc/platforms/85xx/mpc85xx_cds_common.c
index f63e679a4a8c..59d80b69f7f8 100644
--- a/arch/ppc/platforms/85xx/mpc85xx_cds_common.c
+++ b/arch/ppc/platforms/85xx/mpc85xx_cds_common.c
@@ -143,8 +143,8 @@ mpc85xx_cds_show_cpuinfo(struct seq_file *m)
/* get the core frequency */
freq = binfo->bi_intfreq;
- pvid = mfspr(PVR);
- svid = mfspr(SVR);
+ pvid = mfspr(SPRN_PVR);
+ svid = mfspr(SPRN_SVR);
seq_printf(m, "chip\t\t: MPC%s\n", cur_ppc_sys_spec->ppc_sys_name);
seq_printf(m, "Vendor\t\t: Freescale Semiconductor\n");
@@ -154,7 +154,7 @@ mpc85xx_cds_show_cpuinfo(struct seq_file *m)
seq_printf(m, "SVR\t\t: 0x%x\n", svid);
/* Display cpu Pll setting */
- phid1 = mfspr(HID1);
+ phid1 = mfspr(SPRN_HID1);
seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
/* Display the amount of memory */
@@ -448,7 +448,7 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
strcpy(cmd_line, (char *) (r6 + KERNELBASE));
}
- identify_ppc_sys_by_id(mfspr(SVR));
+ identify_ppc_sys_by_id(mfspr(SPRN_SVR));
/* setup the PowerPC module struct */
ppc_md.setup_arch = mpc85xx_cds_setup_arch;
diff --git a/arch/ppc/platforms/85xx/sbc8560.c b/arch/ppc/platforms/85xx/sbc8560.c
index 6a857b7474d9..9ab05e590c3e 100644
--- a/arch/ppc/platforms/85xx/sbc8560.c
+++ b/arch/ppc/platforms/85xx/sbc8560.c
@@ -198,7 +198,7 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
strcpy(cmd_line, (char *) (r6 + KERNELBASE));
}
- identify_ppc_sys_by_id(mfspr(SVR));
+ identify_ppc_sys_by_id(mfspr(SPRN_SVR));
/* setup the PowerPC module struct */
ppc_md.setup_arch = sbc8560_setup_arch;
diff --git a/arch/ppc/platforms/85xx/sbc85xx.c b/arch/ppc/platforms/85xx/sbc85xx.c
index b7db50d2d96d..f2f9e73fafed 100644
--- a/arch/ppc/platforms/85xx/sbc85xx.c
+++ b/arch/ppc/platforms/85xx/sbc85xx.c
@@ -126,8 +126,8 @@ sbc8560_show_cpuinfo(struct seq_file *m)
/* get the core frequency */
freq = binfo->bi_intfreq;
- pvid = mfspr(PVR);
- svid = mfspr(SVR);
+ pvid = mfspr(SPRN_PVR);
+ svid = mfspr(SPRN_SVR);
seq_printf(m, "chip\t\t: MPC%s\n", cur_ppc_sys_spec->ppc_sys_name);
seq_printf(m, "Vendor\t\t: Wind River\n");
@@ -137,7 +137,7 @@ sbc8560_show_cpuinfo(struct seq_file *m)
seq_printf(m, "SVR\t\t: 0x%x\n", svid);
/* Display cpu Pll setting */
- phid1 = mfspr(HID1);
+ phid1 = mfspr(SPRN_HID1);
seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
/* Display the amount of memory */
diff --git a/arch/ppc/platforms/85xx/stx_gp3.c b/arch/ppc/platforms/85xx/stx_gp3.c
index 5214a1d345da..3a1986067fdd 100644
--- a/arch/ppc/platforms/85xx/stx_gp3.c
+++ b/arch/ppc/platforms/85xx/stx_gp3.c
@@ -263,8 +263,8 @@ gp3_show_cpuinfo(struct seq_file *m)
/* get the core frequency */
freq = binfo->bi_intfreq;
- pvid = mfspr(PVR);
- svid = mfspr(SVR);
+ pvid = mfspr(SPRN_PVR);
+ svid = mfspr(SPRN_SVR);
memsize = total_memory;
@@ -277,7 +277,7 @@ gp3_show_cpuinfo(struct seq_file *m)
seq_printf(m, "SVR\t\t: 0x%x\n", svid);
/* Display cpu Pll setting */
- phid1 = mfspr(HID1);
+ phid1 = mfspr(SPRN_HID1);
seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
/* Display the amount of memory */
@@ -349,7 +349,7 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
strcpy(cmd_line, (char *) (r6 + KERNELBASE));
}
- identify_ppc_sys_by_id(mfspr(SVR));
+ identify_ppc_sys_by_id(mfspr(SPRN_SVR));
/* setup the PowerPC module struct */
ppc_md.setup_arch = gp3_setup_arch;
diff --git a/arch/ppc/platforms/adir_setup.c b/arch/ppc/platforms/adir_setup.c
index 0b4c98648b36..6a6754ee0617 100644
--- a/arch/ppc/platforms/adir_setup.c
+++ b/arch/ppc/platforms/adir_setup.c
@@ -60,7 +60,7 @@ adir_get_cpu_speed(void)
unsigned long hid1;
int cpu_speed;
- hid1 = mfspr(HID1) >> 28;
+ hid1 = mfspr(SPRN_HID1) >> 28;
hid1 = cpu_750cx[hid1];
@@ -126,7 +126,7 @@ adir_setup_arch(void)
printk("SBS Adirondack port (C) 2001 SBS Technologies, Inc.\n");
/* Identify the CPU manufacturer */
- cpu = mfspr(PVR);
+ cpu = mfspr(SPRN_PVR);
printk("CPU manufacturer: IBM [rev=%04x]\n", (cpu & 0xffff));
}
diff --git a/arch/ppc/platforms/chestnut.c b/arch/ppc/platforms/chestnut.c
index 096e0fbe2f48..7786818bd9d0 100644
--- a/arch/ppc/platforms/chestnut.c
+++ b/arch/ppc/platforms/chestnut.c
@@ -513,8 +513,8 @@ static __inline__ void
chestnut_set_bat(void)
{
mb();
- mtspr(DBAT3U, 0xf0001ffe);
- mtspr(DBAT3L, 0xf000002a);
+ mtspr(SPRN_DBAT3U, 0xf0001ffe);
+ mtspr(SPRN_DBAT3L, 0xf000002a);
mb();
}
diff --git a/arch/ppc/platforms/cpci690.c b/arch/ppc/platforms/cpci690.c
index 0405efe845e4..f65850e5f6df 100644
--- a/arch/ppc/platforms/cpci690.c
+++ b/arch/ppc/platforms/cpci690.c
@@ -89,7 +89,7 @@ cpci690_get_cpu_speed(void)
{
unsigned long hid1;
- hid1 = mfspr(HID1) >> 28;
+ hid1 = mfspr(SPRN_HID1) >> 28;
return cpci690_get_bus_speed() * cpu_7xx[hid1]/2;
}
@@ -441,8 +441,8 @@ cpci690_set_bat(u32 addr, u32 size)
size = ((size >> 17) - 1) << 2;
mb();
- mtspr(DBAT1U, addr | size | 0x2); /* Vs == 1; Vp == 0 */
- mtspr(DBAT1L, addr | 0x2a); /* WIMG bits == 0101; PP == r/w access */
+ mtspr(SPRN_DBAT1U, addr | size | 0x2); /* Vs == 1; Vp == 0 */
+ mtspr(SPRN_DBAT1L, addr | 0x2a); /* WIMG bits == 0101; PP == r/w access */
mb();
return;
diff --git a/arch/ppc/platforms/ev64260.c b/arch/ppc/platforms/ev64260.c
index 29d494dce214..227a2a236790 100644
--- a/arch/ppc/platforms/ev64260.c
+++ b/arch/ppc/platforms/ev64260.c
@@ -80,14 +80,14 @@ ev64260_get_cpu_speed(void)
{
unsigned long pvr, hid1, pll_ext;
- pvr = PVR_VER(mfspr(PVR));
+ pvr = SPRN_VER(mfspr(SPRN_PVR));
if (pvr != PVR_VER(PVR_7450)) {
- hid1 = mfspr(HID1) >> 28;
+ hid1 = mfspr(SPRN_HID1) >> 28;
return ev64260_get_bus_speed() * cpu_7xx[hid1]/2;
}
else {
- hid1 = (mfspr(HID1) & 0x0001e000) >> 13;
+ hid1 = (mfspr(SPRN_HID1) & 0x0001e000) >> 13;
pll_ext = 0; /* No way to read; must get from schematic */
return ev64260_get_bus_speed() * cpu_745x[pll_ext][hid1]/2;
}
@@ -530,7 +530,7 @@ ev64260_show_cpuinfo(struct seq_file *m)
{
uint pvid;
- pvid = mfspr(PVR);
+ pvid = mfspr(SPRN_PVR);
seq_printf(m, "vendor\t\t: " BOARD_VENDOR "\n");
seq_printf(m, "machine\t\t: " BOARD_MACHINE "\n");
seq_printf(m, "cpu MHz\t\t: %d\n", ev64260_get_cpu_speed()/1000/1000);
@@ -563,8 +563,8 @@ static __inline__ void
ev64260_set_bat(void)
{
mb();
- mtspr(DBAT1U, 0xfb0001fe);
- mtspr(DBAT1L, 0xfb00002a);
+ mtspr(SPRN_DBAT1U, 0xfb0001fe);
+ mtspr(SPRN_DBAT1L, 0xfb00002a);
mb();
return;
diff --git a/arch/ppc/platforms/gemini_prom.S b/arch/ppc/platforms/gemini_prom.S
index 84729e2a093c..8c5065d56505 100644
--- a/arch/ppc/platforms/gemini_prom.S
+++ b/arch/ppc/platforms/gemini_prom.S
@@ -40,29 +40,29 @@ _GLOBAL(gemini_prom_init)
/* zero out the bats now that the MMU is off */
prom_no_mmu:
li r3,0
- mtspr IBAT0U,r3
- mtspr IBAT0L,r3
- mtspr IBAT1U,r3
- mtspr IBAT1L,r3
- mtspr IBAT2U,r3
- mtspr IBAT2L,r3
- mtspr IBAT3U,r3
- mtspr IBAT3L,r3
+ mtspr SPRN_IBAT0U,r3
+ mtspr SPRN_IBAT0L,r3
+ mtspr SPRN_IBAT1U,r3
+ mtspr SPRN_IBAT1L,r3
+ mtspr SPRN_IBAT2U,r3
+ mtspr SPRN_IBAT2L,r3
+ mtspr SPRN_IBAT3U,r3
+ mtspr SPRN_IBAT3L,r3
- mtspr DBAT0U,r3
- mtspr DBAT0L,r3
- mtspr DBAT1U,r3
- mtspr DBAT1L,r3
- mtspr DBAT2U,r3
- mtspr DBAT2L,r3
- mtspr DBAT3U,r3
- mtspr DBAT3L,r3
+ mtspr SPRN_DBAT0U,r3
+ mtspr SPRN_DBAT0L,r3
+ mtspr SPRN_DBAT1U,r3
+ mtspr SPRN_DBAT1L,r3
+ mtspr SPRN_DBAT2U,r3
+ mtspr SPRN_DBAT2L,r3
+ mtspr SPRN_DBAT3U,r3
+ mtspr SPRN_DBAT3L,r3
#endif
/* the bootloader (as far as I'm currently aware) doesn't mess with page
tables, but since we're already here, might as well zap these, too */
li r4,0
- mtspr SDR1,r4
+ mtspr SPRN_SDR1,r4
li r4,16
mtctr r4
@@ -75,9 +75,9 @@ prom_no_mmu:
#ifdef CONFIG_SMP
/* The 750 book (and Mot/IBM support) says that this will "assist" snooping
when in SMP. Not sure yet whether this should stay or leave... */
- mfspr r4,HID0
+ mfspr r4,SPRN_HID0
ori r4,r4,HID0_ABE
- mtspr HID0,r4
+ mtspr SPRN_HID0,r4
sync
#endif /* CONFIG_SMP */
blr
@@ -88,6 +88,6 @@ _GLOBAL(_gemini_reboot)
lis r5,GEMINI_BOOT_INIT@h
ori r5,r5,GEMINI_BOOT_INIT@l
li r6,MSR_IP
- mtspr SRR0,r5
- mtspr SRR1,r6
+ mtspr SPRN_SRR0,r5
+ mtspr SPRN_SRR1,r6
rfi
diff --git a/arch/ppc/platforms/gemini_setup.c b/arch/ppc/platforms/gemini_setup.c
index 2576d8578c1b..1a42cb9b1134 100644
--- a/arch/ppc/platforms/gemini_setup.c
+++ b/arch/ppc/platforms/gemini_setup.c
@@ -202,8 +202,8 @@ gemini_get_clock_speed(void)
unsigned long hid1, pvr;
int clock;
- pvr = mfspr(PVR);
- hid1 = (mfspr(HID1) >> 28) & 0xf;
+ pvr = mfspr(SPRN_PVR);
+ hid1 = (mfspr(SPRN_HID1) >> 28) & 0xf;
if (PVR_VER(pvr) == 8 ||
PVR_VER(pvr) == 12)
hid1 = cpu_7xx[hid1];
@@ -238,7 +238,7 @@ void __init gemini_init_l2(void)
reg = readb(GEMINI_L2CFG);
brev = readb(GEMINI_BREV);
fam = readb(GEMINI_FEAT);
- pvr = mfspr(PVR);
+ pvr = mfspr(SPRN_PVR);
switch(PVR_VER(pvr)) {
@@ -537,8 +537,8 @@ void __init platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
int i;
/* Restore BATs for now */
- mtspr(DBAT3U, 0xf0001fff);
- mtspr(DBAT3L, 0xf000002a);
+ mtspr(SPRN_DBAT3U, 0xf0001fff);
+ mtspr(SPRN_DBAT3L, 0xf000002a);
parse_bootinfo(find_bootinfo());
diff --git a/arch/ppc/platforms/k2.c b/arch/ppc/platforms/k2.c
index be5c0511ffb9..aacb438708ff 100644
--- a/arch/ppc/platforms/k2.c
+++ b/arch/ppc/platforms/k2.c
@@ -392,9 +392,9 @@ static int k2_get_cpu_speed(void)
unsigned long hid1;
int cpu_speed;
- hid1 = mfspr(HID1) >> 28;
+ hid1 = mfspr(SPRN_HID1) >> 28;
- if ((mfspr(PVR) >> 16) == 8)
+ if ((mfspr(SPRN_PVR) >> 16) == 8)
hid1 = cpu_7xx[hid1];
else
hid1 = cpu_6xx[hid1];
@@ -472,7 +472,7 @@ static void __init k2_setup_arch(void)
"(source@mvista.com)\n");
/* Identify the CPU manufacturer */
- cpu = PVR_REV(mfspr(PVR));
+ cpu = PVR_REV(mfspr(SPRN_PVR));
printk(KERN_INFO "CPU manufacturer: %s [rev=%04x]\n",
(cpu & (1 << 15)) ? "IBM" : "Motorola", cpu);
}
@@ -486,8 +486,8 @@ static void k2_restart(char *cmd)
/* SRR0 has system reset vector, SRR1 has default MSR value */
/* rfi restores MSR from SRR1 and sets the PC to the SRR0 value */
- mtspr(SRR0, 0xfff00100);
- mtspr(SRR1, 0);
+ mtspr(SPRN_SRR0, 0xfff00100);
+ mtspr(SPRN_SRR1, 0);
__asm__ __volatile__("rfi\n\t");
/* not reached */
@@ -513,10 +513,10 @@ static __inline__ void k2_set_bat(void)
mb();
/* setup DBATs */
- mtspr(DBAT2U, 0x80001ffe);
- mtspr(DBAT2L, 0x8000002a);
- mtspr(DBAT3U, 0xf0001ffe);
- mtspr(DBAT3L, 0xf000002a);
+ mtspr(SPRN_DBAT2U, 0x80001ffe);
+ mtspr(SPRN_DBAT2L, 0x8000002a);
+ mtspr(SPRN_DBAT3U, 0xf0001ffe);
+ mtspr(SPRN_DBAT3L, 0xf000002a);
/* wait for updates */
mb();
diff --git a/arch/ppc/platforms/katana.c b/arch/ppc/platforms/katana.c
index ba7387ea091e..7eb7f2aa3988 100644
--- a/arch/ppc/platforms/katana.c
+++ b/arch/ppc/platforms/katana.c
@@ -452,7 +452,7 @@ katana_setup_arch(void)
* DD2.0 has bug that requires the L2 to be in WRT mode
* avoid dirty data in cache
*/
- if (PVR_REV(mfspr(PVR)) == 0x0200) {
+ if (PVR_REV(mfspr(SPRN_PVR)) == 0x0200) {
printk(KERN_INFO "DD2.0 detected. Setting L2 cache"
"to Writethrough mode\n");
_set_L2CR(L2CR_L2E | L2CR_L2PE | L2CR_L2WT);
@@ -733,8 +733,8 @@ static inline void
katana_set_bat(void)
{
mb();
- mtspr(DBAT2U, 0xf0001ffe);
- mtspr(DBAT2L, 0xf000002a);
+ mtspr(SPRN_DBAT2U, 0xf0001ffe);
+ mtspr(SPRN_DBAT2L, 0xf000002a);
mb();
}
diff --git a/arch/ppc/platforms/lopec.c b/arch/ppc/platforms/lopec.c
index f0c203085937..a5569525e0af 100644
--- a/arch/ppc/platforms/lopec.c
+++ b/arch/ppc/platforms/lopec.c
@@ -319,8 +319,8 @@ static __inline__ void
lopec_set_bat(void)
{
mb();
- mtspr(DBAT1U, 0xf8000ffe);
- mtspr(DBAT1L, 0xf800002a);
+ mtspr(SPRN_DBAT1U, 0xf8000ffe);
+ mtspr(SPRN_DBAT1L, 0xf800002a);
mb();
}
diff --git a/arch/ppc/platforms/mcpn765.c b/arch/ppc/platforms/mcpn765.c
index 83dcc8fae831..e88d294ea593 100644
--- a/arch/ppc/platforms/mcpn765.c
+++ b/arch/ppc/platforms/mcpn765.c
@@ -470,8 +470,8 @@ static __inline__ void
mcpn765_set_bat(void)
{
mb();
- mtspr(DBAT1U, 0xfe8000fe);
- mtspr(DBAT1L, 0xfe80002a);
+ mtspr(SPRN_DBAT1U, 0xfe8000fe);
+ mtspr(SPRN_DBAT1L, 0xfe80002a);
mb();
}
diff --git a/arch/ppc/platforms/mvme5100.c b/arch/ppc/platforms/mvme5100.c
index 0ecea917d205..b292b44b760c 100644
--- a/arch/ppc/platforms/mvme5100.c
+++ b/arch/ppc/platforms/mvme5100.c
@@ -246,8 +246,8 @@ static __inline__ void
mvme5100_set_bat(void)
{
mb();
- mtspr(DBAT1U, 0xf0001ffe);
- mtspr(DBAT1L, 0xf000002a);
+ mtspr(SPRN_DBAT1U, 0xf0001ffe);
+ mtspr(SPRN_DBAT1L, 0xf000002a);
mb();
}
diff --git a/arch/ppc/platforms/pcore.c b/arch/ppc/platforms/pcore.c
index af8b51d876d4..d7191630a650 100644
--- a/arch/ppc/platforms/pcore.c
+++ b/arch/ppc/platforms/pcore.c
@@ -282,8 +282,8 @@ static __inline__ void
pcore_set_bat(void)
{
mb();
- mtspr(DBAT3U, 0xf0001ffe);
- mtspr(DBAT3L, 0xfe80002a);
+ mtspr(SPRN_DBAT3U, 0xf0001ffe);
+ mtspr(SPRN_DBAT3L, 0xfe80002a);
mb();
}
diff --git a/arch/ppc/platforms/pmac_cache.S b/arch/ppc/platforms/pmac_cache.S
index fa156633dbc7..c00e0352044d 100644
--- a/arch/ppc/platforms/pmac_cache.S
+++ b/arch/ppc/platforms/pmac_cache.S
@@ -55,7 +55,7 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
/* Stop DPM */
- mfspr r8,SPRN_HID0 /* Save HID0 in r8 */
+ mfspr r8,SPRN_HID0 /* Save SPRN_HID0 in r8 */
rlwinm r4,r8,0,12,10 /* Turn off HID0[DPM] */
sync
mtspr SPRN_HID0,r4 /* Disable DPM */
@@ -86,13 +86,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
sync
/* Get the current enable bit of the L2CR into r4 */
- mfspr r5,L2CR
+ mfspr r5,SPRN_L2CR
/* Set to data-only (pre-745x bit) */
oris r3,r5,L2CR_L2DO@h
b 2f
/* When disabling L2, code must be in L1 */
.balign 32
-1: mtspr L2CR,r3
+1: mtspr SPRN_L2CR,r3
3: sync
isync
b 1f
@@ -117,7 +117,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
b 2f
/* When disabling L2, code must be in L1 */
.balign 32
-1: mtspr L2CR,r5
+1: mtspr SPRN_L2CR,r5
3: sync
isync
b 1f
@@ -129,18 +129,18 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
isync
/* Invalidate L2. This is pre-745x, we clear the L2I bit ourselves */
oris r4,r5,L2CR_L2I@h
- mtspr L2CR,r4
+ mtspr SPRN_L2CR,r4
sync
isync
xoris r4,r4,L2CR_L2I@h
sync
- mtspr L2CR,r4
+ mtspr SPRN_L2CR,r4
sync
/* now disable the L1 data cache */
- mfspr r0,HID0
+ mfspr r0,SPRN_HID0
rlwinm r0,r0,0,~HID0_DCE
- mtspr HID0,r0
+ mtspr SPRN_HID0,r0
sync
isync
@@ -239,14 +239,14 @@ flush_disable_745x:
isync
/* Flush the L2 cache using the hardware assist */
- mfspr r3,L2CR
+ mfspr r3,SPRN_L2CR
cmpwi r3,0 /* check if it is enabled first */
bge 4f
oris r0,r3,(L2CR_L2IO_745x|L2CR_L2DO_745x)@h
b 2f
/* When disabling/locking L2, code must be in L1 */
.balign 32
-1: mtspr L2CR,r0 /* lock the L2 cache */
+1: mtspr SPRN_L2CR,r0 /* lock the L2 cache */
3: sync
isync
b 1f
@@ -258,8 +258,8 @@ flush_disable_745x:
isync
ori r0,r3,L2CR_L2HWF_745x
sync
- mtspr L2CR,r0 /* set the hardware flush bit */
-3: mfspr r0,L2CR /* wait for it to go to 0 */
+ mtspr SPRN_L2CR,r0 /* set the hardware flush bit */
+3: mfspr r0,SPRN_L2CR /* wait for it to go to 0 */
andi. r0,r0,L2CR_L2HWF_745x
bne 3b
sync
@@ -267,7 +267,7 @@ flush_disable_745x:
b 2f
/* When disabling L2, code must be in L1 */
.balign 32
-1: mtspr L2CR,r3 /* disable the L2 cache */
+1: mtspr SPRN_L2CR,r3 /* disable the L2 cache */
3: sync
isync
b 1f
@@ -278,34 +278,34 @@ flush_disable_745x:
1: sync
isync
oris r4,r3,L2CR_L2I@h
- mtspr L2CR,r4
+ mtspr SPRN_L2CR,r4
sync
isync
-1: mfspr r4,L2CR
+1: mfspr r4,SPRN_L2CR
andis. r0,r4,L2CR_L2I@h
bne 1b
sync
BEGIN_FTR_SECTION
/* Flush the L3 cache using the hardware assist */
-4: mfspr r3,L3CR
+4: mfspr r3,SPRN_L3CR
cmpwi r3,0 /* check if it is enabled */
bge 6f
oris r0,r3,L3CR_L3IO@h
ori r0,r0,L3CR_L3DO
sync
- mtspr L3CR,r0 /* lock the L3 cache */
+ mtspr SPRN_L3CR,r0 /* lock the L3 cache */
sync
isync
ori r0,r0,L3CR_L3HWF
sync
- mtspr L3CR,r0 /* set the hardware flush bit */
-5: mfspr r0,L3CR /* wait for it to go to zero */
+ mtspr SPRN_L3CR,r0 /* set the hardware flush bit */
+5: mfspr r0,SPRN_L3CR /* wait for it to go to zero */
andi. r0,r0,L3CR_L3HWF
bne 5b
rlwinm r3,r3,0,~L3CR_L3E
sync
- mtspr L3CR,r3 /* disable the L3 cache */
+ mtspr SPRN_L3CR,r3 /* disable the L3 cache */
sync
ori r4,r3,L3CR_L3I
mtspr SPRN_L3CR,r4
@@ -315,9 +315,9 @@ BEGIN_FTR_SECTION
sync
END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
-6: mfspr r0,HID0 /* now disable the L1 data cache */
+6: mfspr r0,SPRN_HID0 /* now disable the L1 data cache */
rlwinm r0,r0,0,~HID0_DCE
- mtspr HID0,r0
+ mtspr SPRN_HID0,r0
sync
isync
mtmsr r11 /* restore DR and EE */
diff --git a/arch/ppc/platforms/pmac_cpufreq.c b/arch/ppc/platforms/pmac_cpufreq.c
index c644ab8f0818..fc924476a424 100644
--- a/arch/ppc/platforms/pmac_cpufreq.c
+++ b/arch/ppc/platforms/pmac_cpufreq.c
@@ -156,7 +156,7 @@ static int __pmac dfs_set_cpu_speed(int low_speed)
static unsigned int __pmac dfs_get_cpu_speed(unsigned int cpu)
{
- if (mfspr(HID1) & HID1_DFS)
+ if (mfspr(SPRN_HID1) & HID1_DFS)
return low_freq;
else
return hi_freq;
@@ -542,7 +542,7 @@ static int __init pmac_cpufreq_setup(void)
set_speed_proc = pmu_set_cpu_speed;
}
/* Else check for 750FX */
- else if (PVR_VER(mfspr(PVR)) == 0x7000) {
+ else if (PVR_VER(mfspr(SPRN_PVR)) == 0x7000) {
if (get_property(cpunode, "dynamic-power-step", NULL) == NULL)
goto out;
hi_freq = cur_freq;
diff --git a/arch/ppc/platforms/pmac_feature.c b/arch/ppc/platforms/pmac_feature.c
index 8869713396c1..d17546650a0c 100644
--- a/arch/ppc/platforms/pmac_feature.c
+++ b/arch/ppc/platforms/pmac_feature.c
@@ -1796,7 +1796,7 @@ core99_sleep_state(struct device_node* node, long param, long value)
if (value == 1) {
pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL,
*reg, 0x05);
- } else if (value == 0 && (mfspr(HID1) & HID1_DFS)) {
+ } else if (value == 0 && (mfspr(SPRN_HID1) & HID1_DFS)) {
pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL,
*reg, 0x04);
}
diff --git a/arch/ppc/platforms/pmac_setup.c b/arch/ppc/platforms/pmac_setup.c
index 6d46e8bd45bc..01ab92758929 100644
--- a/arch/ppc/platforms/pmac_setup.c
+++ b/arch/ppc/platforms/pmac_setup.c
@@ -247,7 +247,7 @@ pmac_setup_arch(void)
int *fp;
unsigned long pvr;
- pvr = PVR_VER(mfspr(PVR));
+ pvr = PVR_VER(mfspr(SPRN_PVR));
/* Set loops_per_jiffy to a half-way reasonable value,
for use until calibrate_delay gets called. */
diff --git a/arch/ppc/platforms/pmac_sleep.S b/arch/ppc/platforms/pmac_sleep.S
index cc69636f1769..3139b6766ad3 100644
--- a/arch/ppc/platforms/pmac_sleep.S
+++ b/arch/ppc/platforms/pmac_sleep.S
@@ -182,12 +182,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
/*
* Set the HID0 and MSR for sleep.
*/
- mfspr r2,HID0
+ mfspr r2,SPRN_HID0
rlwinm r2,r2,0,10,7 /* clear doze, nap */
oris r2,r2,HID0_SLEEP@h
sync
isync
- mtspr HID0,r2
+ mtspr SPRN_HID0,r2
sync
/* This loop puts us back to sleep in case we have a spurrious
@@ -216,10 +216,10 @@ _GLOBAL(core99_wake_up)
/* Make sure HID0 no longer contains any sleep bit and that data cache
* is disabled
*/
- mfspr r3,HID0
+ mfspr r3,SPRN_HID0
rlwinm r3,r3,0,11,7 /* clear SLEEP, NAP, DOZE bits */
rlwinm 3,r3,0,18,15 /* clear DCE, ICE */
- mtspr HID0,r3
+ mtspr SPRN_HID0,r3
sync
isync
diff --git a/arch/ppc/platforms/pmac_smp.c b/arch/ppc/platforms/pmac_smp.c
index 0ca3a6b69152..2b88745576a0 100644
--- a/arch/ppc/platforms/pmac_smp.c
+++ b/arch/ppc/platforms/pmac_smp.c
@@ -294,7 +294,7 @@ static int __init smp_psurge_probe(void)
int i, ncpus;
/* We don't do SMP on the PPC601 -- paulus */
- if (PVR_VER(mfspr(PVR)) == 1)
+ if (PVR_VER(mfspr(SPRN_PVR)) == 1)
return 1;
/*
diff --git a/arch/ppc/platforms/pplus.c b/arch/ppc/platforms/pplus.c
index 1688f369516d..65705c911795 100644
--- a/arch/ppc/platforms/pplus.c
+++ b/arch/ppc/platforms/pplus.c
@@ -849,10 +849,10 @@ static __inline__ void pplus_set_bat(void)
mb();
/* setup DBATs */
- mtspr(DBAT2U, 0x80001ffe);
- mtspr(DBAT2L, 0x8000002a);
- mtspr(DBAT3U, 0xf0001ffe);
- mtspr(DBAT3L, 0xf000002a);
+ mtspr(SPRN_DBAT2U, 0x80001ffe);
+ mtspr(SPRN_DBAT2L, 0x8000002a);
+ mtspr(SPRN_DBAT3U, 0xf0001ffe);
+ mtspr(SPRN_DBAT3L, 0xf000002a);
/* wait for updates */
mb();
diff --git a/arch/ppc/platforms/prep_setup.c b/arch/ppc/platforms/prep_setup.c
index c6bd7b86bd61..67d74b7080fe 100644
--- a/arch/ppc/platforms/prep_setup.c
+++ b/arch/ppc/platforms/prep_setup.c
@@ -690,8 +690,8 @@ prep_set_bat(void)
mb();
/* setup DBATs */
- mtspr(DBAT2U, 0x80001ffe);
- mtspr(DBAT2L, 0x8000002a);
+ mtspr(SPRN_DBAT2U, 0x80001ffe);
+ mtspr(SPRN_DBAT2L, 0x8000002a);
/* wait for updates */
mb();
diff --git a/arch/ppc/platforms/prpmc750.c b/arch/ppc/platforms/prpmc750.c
index b89d144c8016..c894e1ab5934 100644
--- a/arch/ppc/platforms/prpmc750.c
+++ b/arch/ppc/platforms/prpmc750.c
@@ -302,8 +302,8 @@ static void __init prpmc750_init_IRQ(void)
static __inline__ void prpmc750_set_bat(void)
{
mb();
- mtspr(DBAT1U, 0xf0001ffe);
- mtspr(DBAT1L, 0xf000002a);
+ mtspr(SPRN_DBAT1U, 0xf0001ffe);
+ mtspr(SPRN_DBAT1L, 0xf000002a);
mb();
}
diff --git a/arch/ppc/platforms/prpmc800.c b/arch/ppc/platforms/prpmc800.c
index 0e99f8bc3f3b..8b09fa69b35b 100644
--- a/arch/ppc/platforms/prpmc800.c
+++ b/arch/ppc/platforms/prpmc800.c
@@ -419,8 +419,8 @@ static void __init prpmc800_init_IRQ(void)
static __inline__ void prpmc800_set_bat(void)
{
mb();
- mtspr(DBAT1U, 0xf0001ffe);
- mtspr(DBAT1L, 0xf000002a);
+ mtspr(SPRN_DBAT1U, 0xf0001ffe);
+ mtspr(SPRN_DBAT1L, 0xf000002a);
mb();
}
diff --git a/arch/ppc/platforms/spruce.c b/arch/ppc/platforms/spruce.c
index 74be324564fa..5ad70d357cb9 100644
--- a/arch/ppc/platforms/spruce.c
+++ b/arch/ppc/platforms/spruce.c
@@ -278,8 +278,8 @@ static __inline__ void
spruce_set_bat(void)
{
mb();
- mtspr(DBAT1U, 0xf8000ffe);
- mtspr(DBAT1L, 0xf800002a);
+ mtspr(SPRN_DBAT1U, 0xf8000ffe);
+ mtspr(SPRN_DBAT1L, 0xf800002a);
mb();
}
diff --git a/arch/ppc/syslib/btext.c b/arch/ppc/syslib/btext.c
index e3f9e8e3b6bb..7734f6836174 100644
--- a/arch/ppc/syslib/btext.c
+++ b/arch/ppc/syslib/btext.c
@@ -137,7 +137,7 @@ btext_prepare_BAT(void)
boot_text_mapped = 0;
return;
}
- if (PVR_VER(mfspr(PVR)) != 1) {
+ if (PVR_VER(mfspr(SPRN_PVR)) != 1) {
/* 603, 604, G3, G4, ... */
lowbits = addr & ~0xFF000000UL;
addr &= 0xFF000000UL;
diff --git a/arch/ppc/syslib/ibm440gx_common.c b/arch/ppc/syslib/ibm440gx_common.c
index 5e2ce6bf5976..4ad85e0e0234 100644
--- a/arch/ppc/syslib/ibm440gx_common.c
+++ b/arch/ppc/syslib/ibm440gx_common.c
@@ -221,7 +221,7 @@ void __init ibm440gx_l2c_setup(struct ibm44x_clocks* p)
/* Disable L2C on rev.A, rev.B and 800MHz version of rev.C,
enable it on all other revisions
*/
- u32 pvr = mfspr(PVR);
+ u32 pvr = mfspr(SPRN_PVR);
if (pvr == PVR_440GX_RA || pvr == PVR_440GX_RB ||
(pvr == PVR_440GX_RC && p->cpu > 667000000))
ibm440gx_l2c_disable();
diff --git a/arch/ppc/syslib/mpc52xx_setup.c b/arch/ppc/syslib/mpc52xx_setup.c
index 6bd014d7c9dc..b89fcf068745 100644
--- a/arch/ppc/syslib/mpc52xx_setup.c
+++ b/arch/ppc/syslib/mpc52xx_setup.c
@@ -80,8 +80,8 @@ mpc52xx_set_bat(void)
* mpc52xx_find_end_of_memory, and UARTs/GPIO access for debug
*/
mb();
- mtspr(DBAT2U, 0xf0001ffe);
- mtspr(DBAT2L, 0xf000002a);
+ mtspr(SPRN_DBAT2U, 0xf0001ffe);
+ mtspr(SPRN_DBAT2L, 0xf000002a);
mb();
}
diff --git a/include/asm-ppc/cache.h b/include/asm-ppc/cache.h
index 1fcf0f3e7b87..38f2f1be4a87 100644
--- a/include/asm-ppc/cache.h
+++ b/include/asm-ppc/cache.h
@@ -50,12 +50,12 @@ extern void flush_dcache_all(void);
/* Cache control on the MPC8xx is provided through some additional
* special purpose registers.
*/
-#define IC_CST 560 /* Instruction cache control/status */
-#define IC_ADR 561 /* Address needed for some commands */
-#define IC_DAT 562 /* Read-only data register */
-#define DC_CST 568 /* Data cache control/status */
-#define DC_ADR 569 /* Address needed for some commands */
-#define DC_DAT 570 /* Read-only data register */
+#define SPRN_IC_CST 560 /* Instruction cache control/status */
+#define SPRN_IC_ADR 561 /* Address needed for some commands */
+#define SPRN_IC_DAT 562 /* Read-only data register */
+#define SPRN_DC_CST 568 /* Data cache control/status */
+#define SPRN_DC_ADR 569 /* Address needed for some commands */
+#define SPRN_DC_DAT 570 /* Read-only data register */
/* Commands. Only the first few are available to the instruction cache.
*/
diff --git a/include/asm-ppc/mmu.h b/include/asm-ppc/mmu.h
index 4d1d4fac3133..4a0c67f672c2 100644
--- a/include/asm-ppc/mmu.h
+++ b/include/asm-ppc/mmu.h
@@ -152,7 +152,7 @@ typedef struct _P601_BAT {
* is written, and the contents of several registers are used to
* create the entry.
*/
-#define MI_CTR 784 /* Instruction TLB control register */
+#define SPRN_MI_CTR 784 /* Instruction TLB control register */
#define MI_GPM 0x80000000 /* Set domain manager mode */
#define MI_PPM 0x40000000 /* Set subpage protection */
#define MI_CIDEF 0x20000000 /* Set cache inhibit when MMU dis */
@@ -164,7 +164,7 @@ typedef struct _P601_BAT {
/* These are the Ks and Kp from the PowerPC books. For proper operation,
* Ks = 0, Kp = 1.
*/
-#define MI_AP 786
+#define SPRN_MI_AP 786
#define MI_Ks 0x80000000 /* Should not be set */
#define MI_Kp 0x40000000 /* Should always be set */
@@ -172,7 +172,7 @@ typedef struct _P601_BAT {
* about the last instruction TLB miss. When MI_RPN is written, bits in
* this register are used to create the TLB entry.
*/
-#define MI_EPN 787
+#define SPRN_MI_EPN 787
#define MI_EPNMASK 0xfffff000 /* Effective page number for entry */
#define MI_EVALID 0x00000200 /* Entry is valid */
#define MI_ASIDMASK 0x0000000f /* ASID match value */
@@ -182,7 +182,7 @@ typedef struct _P601_BAT {
* For the instruction TLB, it contains bits that get loaded into the
* TLB entry when the MI_RPN is written.
*/
-#define MI_TWC 789
+#define SPRN_MI_TWC 789
#define MI_APG 0x000001e0 /* Access protection group (0) */
#define MI_GUARDED 0x00000010 /* Guarded storage */
#define MI_PSMASK 0x0000000c /* Mask of page size bits */
@@ -196,7 +196,7 @@ typedef struct _P601_BAT {
* causes a TLB entry to be created for the instruction TLB, using
* additional information from the MI_EPN, and MI_TWC registers.
*/
-#define MI_RPN 790
+#define SPRN_MI_RPN 790
/* Define an RPN value for mapping kernel memory to large virtual
* pages for boot initialization. This has real page number of 0,
@@ -205,7 +205,7 @@ typedef struct _P601_BAT {
*/
#define MI_BOOTINIT 0x000001fd
-#define MD_CTR 792 /* Data TLB control register */
+#define SPRN_MD_CTR 792 /* Data TLB control register */
#define MD_GPM 0x80000000 /* Set domain manager mode */
#define MD_PPM 0x40000000 /* Set subpage protection */
#define MD_CIDEF 0x20000000 /* Set cache inhibit when MMU dis */
@@ -216,14 +216,14 @@ typedef struct _P601_BAT {
#define MD_IDXMASK 0x00001f00 /* TLB index to be loaded */
#define MD_RESETVAL 0x04000000 /* Value of register at reset */
-#define M_CASID 793 /* Address space ID (context) to match */
+#define SPRN_M_CASID 793 /* Address space ID (context) to match */
#define MC_ASIDMASK 0x0000000f /* Bits used for ASID value */
/* These are the Ks and Kp from the PowerPC books. For proper operation,
* Ks = 0, Kp = 1.
*/
-#define MD_AP 794
+#define SPRN_MD_AP 794
#define MD_Ks 0x80000000 /* Should not be set */
#define MD_Kp 0x40000000 /* Should always be set */
@@ -231,7 +231,7 @@ typedef struct _P601_BAT {
* about the last instruction TLB miss. When MD_RPN is written, bits in
* this register are used to create the TLB entry.
*/
-#define MD_EPN 795
+#define SPRN_MD_EPN 795
#define MD_EPNMASK 0xfffff000 /* Effective page number for entry */
#define MD_EVALID 0x00000200 /* Entry is valid */
#define MD_ASIDMASK 0x0000000f /* ASID match value */
@@ -241,7 +241,7 @@ typedef struct _P601_BAT {
* During a software tablewalk, reading this register provides the address
* of the entry associated with MD_EPN.
*/
-#define M_TWB 796
+#define SPRN_M_TWB 796
#define M_L1TB 0xfffff000 /* Level 1 table base address */
#define M_L1INDX 0x00000ffc /* Level 1 index, when read */
/* Reset value is undefined */
@@ -251,7 +251,7 @@ typedef struct _P601_BAT {
* when the MD_RPN is written. It is also provides the hardware assist
* for finding the PTE address during software tablewalk.
*/
-#define MD_TWC 797
+#define SPRN_MD_TWC 797
#define MD_L2TB 0xfffff000 /* Level 2 table base address */
#define MD_L2INDX 0xfffffe00 /* Level 2 index (*pte), when read */
#define MD_APG 0x000001e0 /* Access protection group (0) */
@@ -269,12 +269,12 @@ typedef struct _P601_BAT {
* causes a TLB entry to be created for the data TLB, using
* additional information from the MD_EPN, and MD_TWC registers.
*/
-#define MD_RPN 798
+#define SPRN_MD_RPN 798
/* This is a temporary storage register that could be used to save
* a processor working register during a tablewalk.
*/
-#define M_TW 799
+#define SPRN_M_TW 799
/*
* At present, all PowerPC 400-class processors share a similar TLB
diff --git a/include/asm-ppc/reg.h b/include/asm-ppc/reg.h
index df2b51cc74a0..3372dee36a8c 100644
--- a/include/asm-ppc/reg.h
+++ b/include/asm-ppc/reg.h
@@ -335,91 +335,6 @@
#define MMCR0_PMC2_LOADMISSTIME 0x5
#define MMCR0_PMXE (1 << 26)
-/* Short-hand versions for a number of the above SPRNs */
-#define CTR SPRN_CTR /* Counter Register */
-#define DAR SPRN_DAR /* Data Address Register */
-#define DABR SPRN_DABR /* Data Address Breakpoint Register */
-#define DBAT0L SPRN_DBAT0L /* Data BAT 0 Lower Register */
-#define DBAT0U SPRN_DBAT0U /* Data BAT 0 Upper Register */
-#define DBAT1L SPRN_DBAT1L /* Data BAT 1 Lower Register */
-#define DBAT1U SPRN_DBAT1U /* Data BAT 1 Upper Register */
-#define DBAT2L SPRN_DBAT2L /* Data BAT 2 Lower Register */
-#define DBAT2U SPRN_DBAT2U /* Data BAT 2 Upper Register */
-#define DBAT3L SPRN_DBAT3L /* Data BAT 3 Lower Register */
-#define DBAT3U SPRN_DBAT3U /* Data BAT 3 Upper Register */
-#define DBAT4L SPRN_DBAT4L /* Data BAT 4 Lower Register */
-#define DBAT4U SPRN_DBAT4U /* Data BAT 4 Upper Register */
-#define DBAT5L SPRN_DBAT5L /* Data BAT 5 Lower Register */
-#define DBAT5U SPRN_DBAT5U /* Data BAT 5 Upper Register */
-#define DBAT6L SPRN_DBAT6L /* Data BAT 6 Lower Register */
-#define DBAT6U SPRN_DBAT6U /* Data BAT 6 Upper Register */
-#define DBAT7L SPRN_DBAT7L /* Data BAT 7 Lower Register */
-#define DBAT7U SPRN_DBAT7U /* Data BAT 7 Upper Register */
-//#define DEC SPRN_DEC /* Decrement Register */
-#define DMISS SPRN_DMISS /* Data TLB Miss Register */
-#define DSISR SPRN_DSISR /* Data Storage Interrupt Status Register */
-#define EAR SPRN_EAR /* External Address Register */
-#define HASH1 SPRN_HASH1 /* Primary Hash Address Register */
-#define HASH2 SPRN_HASH2 /* Secondary Hash Address Register */
-#define HID0 SPRN_HID0 /* Hardware Implementation Register 0 */
-#define HID1 SPRN_HID1 /* Hardware Implementation Register 1 */
-#define IABR SPRN_IABR /* Instruction Address Breakpoint Register */
-#define IBAT0L SPRN_IBAT0L /* Instruction BAT 0 Lower Register */
-#define IBAT0U SPRN_IBAT0U /* Instruction BAT 0 Upper Register */
-#define IBAT1L SPRN_IBAT1L /* Instruction BAT 1 Lower Register */
-#define IBAT1U SPRN_IBAT1U /* Instruction BAT 1 Upper Register */
-#define IBAT2L SPRN_IBAT2L /* Instruction BAT 2 Lower Register */
-#define IBAT2U SPRN_IBAT2U /* Instruction BAT 2 Upper Register */
-#define IBAT3L SPRN_IBAT3L /* Instruction BAT 3 Lower Register */
-#define IBAT3U SPRN_IBAT3U /* Instruction BAT 3 Upper Register */
-#define IBAT4L SPRN_IBAT4L /* Instruction BAT 4 Lower Register */
-#define IBAT4U SPRN_IBAT4U /* Instruction BAT 4 Upper Register */
-#define IBAT5L SPRN_IBAT5L /* Instruction BAT 5 Lower Register */
-#define IBAT5U SPRN_IBAT5U /* Instruction BAT 5 Upper Register */
-#define IBAT6L SPRN_IBAT6L /* Instruction BAT 6 Lower Register */
-#define IBAT6U SPRN_IBAT6U /* Instruction BAT 6 Upper Register */
-#define IBAT7L SPRN_IBAT7L /* Instruction BAT 7 Lower Register */
-#define IBAT7U SPRN_IBAT7U /* Instruction BAT 7 Upper Register */
-#define ICMP SPRN_ICMP /* Instruction TLB Compare Register */
-#define IMISS SPRN_IMISS /* Instruction TLB Miss Register */
-#define IMMR SPRN_IMMR /* PPC 860/821 Internal Memory Map Register */
-#define L2CR SPRN_L2CR /* Classic PPC L2 cache control register */
-#define L3CR SPRN_L3CR /* PPC 745x L3 cache control register */
-//#define LR SPRN_LR
-#define PVR SPRN_PVR /* Processor Version */
-//#define RPA SPRN_RPA /* Required Physical Address Register */
-#define SDR1 SPRN_SDR1 /* MMU hash base register */
-#define SPR0 SPRN_SPRG0 /* Supervisor Private Registers */
-#define SPR1 SPRN_SPRG1
-#define SPR2 SPRN_SPRG2
-#define SPR3 SPRN_SPRG3
-#define SPR4 SPRN_SPRG4
-#define SPR5 SPRN_SPRG5
-#define SPR6 SPRN_SPRG6
-#define SPR7 SPRN_SPRG7
-#define SPRG0 SPRN_SPRG0
-#define SPRG1 SPRN_SPRG1
-#define SPRG2 SPRN_SPRG2
-#define SPRG3 SPRN_SPRG3
-#define SPRG4 SPRN_SPRG4
-#define SPRG5 SPRN_SPRG5
-#define SPRG6 SPRN_SPRG6
-#define SPRG7 SPRN_SPRG7
-#define SRR0 SPRN_SRR0 /* Save and Restore Register 0 */
-#define SRR1 SPRN_SRR1 /* Save and Restore Register 1 */
-#define SRR2 SPRN_SRR2 /* Save and Restore Register 2 */
-#define SRR3 SPRN_SRR3 /* Save and Restore Register 3 */
-#define SVR SPRN_SVR /* System Version Register */
-#define ICTC SPRN_ICTC /* Instruction Cache Throttling Control Reg */
-#define THRM1 SPRN_THRM1 /* Thermal Management Register 1 */
-#define THRM2 SPRN_THRM2 /* Thermal Management Register 2 */
-#define THRM3 SPRN_THRM3 /* Thermal Management Register 3 */
-#define XER SPRN_XER
-#define TBRL SPRN_TBRL /* Time Base Read Lower Register */
-#define TBRU SPRN_TBRU /* Time Base Read Upper Register */
-#define TBWL SPRN_TBWL /* Time Base Write Lower Register */
-#define TBWU SPRN_TBWU /* Time Base Write Upper Register */
-
/* Processor Version Register */
/* Processor Version Register (PVR) field extraction */
diff --git a/include/asm-ppc/reg_booke.h b/include/asm-ppc/reg_booke.h
index 5c025f9b28e4..4b03f8e26b72 100644
--- a/include/asm-ppc/reg_booke.h
+++ b/include/asm-ppc/reg_booke.h
@@ -427,26 +427,6 @@ do { \
#define SPEFSCR_FOVFE 0x00000004 /* Embedded FP overflow enable */
#define SPEFSCR_FRMC 0x00000003 /* Embedded FP rounding mode control */
-/* Short-hand for various SPRs. */
-#ifdef CONFIG_BOOKE
-#define CSRR0 SPRN_CSRR0 /* Critical Save and Restore Register 0 */
-#define CSRR1 SPRN_CSRR1 /* Critical Save and Restore Register 1 */
-#else
-#define CSRR0 SPRN_SRR2 /* Logically and functionally equivalent. */
-#define CSRR1 SPRN_SRR3 /* Logically and functionally equivalent. */
-#endif
-#define MCSRR0 SPRN_MCSRR0 /* Machine Check Save and Restore Register 0 */
-#define MCSRR1 SPRN_MCSRR1 /* Machine Check Save and Restore Register 1 */
-#define DCMP SPRN_DCMP /* Data TLB Compare Register */
-#define SPRG4R SPRN_SPRG4R /* Supervisor Private Registers */
-#define SPRG5R SPRN_SPRG5R
-#define SPRG6R SPRN_SPRG6R
-#define SPRG7R SPRN_SPRG7R
-#define SPRG4W SPRN_SPRG4W
-#define SPRG5W SPRN_SPRG5W
-#define SPRG6W SPRN_SPRG6W
-#define SPRG7W SPRN_SPRG7W
-
/*
* The IBM-403 is an even more odd special case, as it is much
* older than the IBM-405 series. We put these down here incase someone