summaryrefslogtreecommitdiff
path: root/arch/ppc/kernel
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2003-05-01 18:31:10 +1000
committerPaul Mackerras <paulus@samba.org>2003-05-01 18:31:10 +1000
commitf9c39b08d9c089360d19df2bfa7f32e08f358d6a (patch)
treee7a34117e69c2b2105e609b9b87e8a24c7affe0e /arch/ppc/kernel
parentf80da3c43cd40e649b6267d34b8598122df347e9 (diff)
PPC32: flush the cache more thoroughly on sleep.
Diffstat (limited to 'arch/ppc/kernel')
-rw-r--r--arch/ppc/kernel/l2cr.S142
1 files changed, 116 insertions, 26 deletions
diff --git a/arch/ppc/kernel/l2cr.S b/arch/ppc/kernel/l2cr.S
index 6a94c425a537..c1fbb0c5cb0b 100644
--- a/arch/ppc/kernel/l2cr.S
+++ b/arch/ppc/kernel/l2cr.S
@@ -40,9 +40,11 @@
Author: Terry Greeniaus (tgree@phys.ualberta.ca)
Please e-mail updates to this file to me, thanks!
*/
+#include <linux/config.h>
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/ppc_asm.h>
+#include <asm/cache.h>
/* Usage:
@@ -101,6 +103,8 @@ BEGIN_FTR_SECTION
blr
END_FTR_SECTION_IFCLR(CPU_FTR_L2CR)
+ mflr r9
+
/* Stop DST streams */
BEGIN_FTR_SECTION
DSSALL
@@ -115,6 +119,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
mtmsr r4
isync
+ /* Before we perform the global invalidation, we must disable dynamic
+ * power management via HID0[DPM] to work around a processor bug where
+ * DPM can possibly interfere with the state machine in the processor
+ * that invalidates the L2 cache tags.
+ */
+ mfspr r8,HID0 /* Save HID0 in r8 */
+ rlwinm r4,r8,0,12,10 /* Turn off HID0[DPM] */
+ sync
+ mtspr HID0,r4 /* Disable DPM */
+ sync
+
+ /* Flush & disable L1 */
+ mr r5,r3
+ bl __flush_disable_L1
+ mr r3,r5
+
/* Get the current enable bit of the L2CR into r4 */
mfspr r4,L2CR
@@ -136,27 +156,28 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
/**** Might be a good idea to set L2DO here - to prevent instructions
from getting into the cache. But since we invalidate
the next time we enable the cache it doesn't really matter.
- Don't do this unless you accommodate all processor variations.
+ Don't do this unless you accomodate all processor variations.
The bit moved on the 7450.....
****/
/* TODO: use HW flush assist when available */
- lis r4,0x0004
+ lis r4,0x0002
mtctr r4
li r4,0
1:
lwzx r0,r0,r4
addi r4,r4,32 /* Go to start of next cache line */
bdnz 1b
+ isync
/* Now, flush the first 4MB of memory */
- lis r4,0x0004
+ lis r4,0x0002
mtctr r4
li r4,0
sync
1:
- dcbf r0,r4
+ dcbf 0,r4
addi r4,r4,32 /* Go to start of next cache line */
bdnz 1b
@@ -166,25 +187,19 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
* L1 icache
*/
b 20f
-21:
+ .balign L1_CACHE_LINE_SIZE
+22:
sync
mtspr L2CR,r3
sync
- b 22f
+ b 23f
20:
- b 21b
-22:
- /* Before we perform the global invalidation, we must disable dynamic
- * power management via HID0[DPM] to work around a processor bug where
- * DPM can possibly interfere with the state machine in the processor
- * that invalidates the L2 cache tags.
- */
- mfspr r8,HID0 /* Save HID0 in r8 */
- rlwinm r4,r8,0,12,10 /* Turn off HID0[DPM] */
- sync
- mtspr HID0,r4 /* Disable DPM */
- sync
-
+ b 21f
+21: sync
+ isync
+ b 22b
+
+23:
/* Perform a global invalidation */
oris r3,r3,0x0020
sync
@@ -211,11 +226,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
mtspr L2CR,r3
sync
- /* Restore HID0[DPM] to whatever it was before */
- sync
- mtspr 1008,r8
- sync
-
/* See if we need to enable the cache */
cmplwi r5,0
beq 4f
@@ -225,10 +235,20 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
mtspr L2CR,r3
sync
+4:
+ bl __inval_enable_L1
+
+ /* Restore HID0[DPM] to whatever it was before */
+ sync
+ mtspr 1008,r8
+ sync
+
/* Restore MSR (restores EE and DR bits to original state) */
-4: SYNC
+ SYNC
mtmsr r7
isync
+
+ mtlr r9
blr
_GLOBAL(_get_L2CR)
@@ -286,7 +306,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_L3CR)
li r4,0
1:
lwzx r0,r0,r4
- dcbf r0,r4
+ dcbf 0,r4
addi r4,r4,32 /* Go to start of next cache line */
bdnz 1b
@@ -360,3 +380,73 @@ END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
/* --- End of PowerLogix code ---
*/
+
+
+/* flush_disable_L1() - Flush and disable L1 cache
+ *
+ * clobbers r0, r3, ctr, cr0
+ *
+ */
+ .globl __flush_disable_L1
+ __flush_disable_L1:
+
+ /* Stop pending alitvec streams and memory accesses */
+BEGIN_FTR_SECTION
+ DSSALL
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+ sync
+
+ /* Load counter to 0x1000 cache lines (128k) and
+ * load cache with datas
+ */
+ lis r3,0x0002
+// li r3,0x1000 /* 128kB / 32B */
+ mtctr r3
+ li r3, 0
+1:
+ lwz r0,0(r3)
+ addi r3,r3,0x0020 /* Go to start of next cache line */
+ bdnz 1b
+ isync
+ sync
+
+ /* Now flush those cache lines */
+ lis r3,0x0002
+// li r3,0x1000 /* 128kB / 32B */
+ mtctr r3
+ li r3, 0
+1:
+ dcbf 0,r3
+ addi r3,r3,0x0020 /* Go to start of next cache line */
+ bdnz 1b
+ sync
+
+ /* We can now disable the L1 cache (HID0:DCE, HID0:ICE) */
+ mfspr r3,SPRN_HID0
+ rlwinm r3,r3,0,18,15
+ mtspr SPRN_HID0,r3
+ sync
+ isync
+ blr
+
+/* inval_enable_L1 - Invalidate and enable L1 cache
+ *
+ * Assumes L1 is already disabled and MSR:EE is off
+ *
+ * clobbers r3
+ */
+ .globl __inval_enable_L1
+ __inval_enable_L1:
+ /* Enable and then Flash inval the instruction & data cache */
+ mfspr r3,SPRN_HID0
+ ori r3,r3, HID0_ICE|HID0_ICFI|HID0_DCE|HID0_DCI
+ sync
+ isync
+ mtspr SPRN_HID0,r3
+ xori r3,r3, HID0_ICFI|HID0_DCI
+ mtspr SPRN_HID0,r3
+ sync
+
+ blr
+
+