summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2004-01-18 18:22:27 -0800
committerLinus Torvalds <torvalds@home.osdl.org>2004-01-18 18:22:27 -0800
commitd4c6e4e119d3cd89b80321c7abb4914d33b594d1 (patch)
treed506891ca348238669b141c29512189ba5349ff3
parent820880f327cc7b168368800f6d0c4f60a731053a (diff)
[PATCH] ppc64: fix POWER3 boot
From: Anton Blanchard <anton@samba.org> Binutils uses the recent mtcrf optimisation when compiling for a POWER4 target. Unfortunately this causes a program check on POWER3. We required compiling for POWER4 so the tlbiel instruction would be recognised. For the moment we hardwire the tlbiel instruction, longer term we can use the binutils -many flag.
-rw-r--r--arch/ppc64/Kconfig8
-rw-r--r--arch/ppc64/Makefile9
-rw-r--r--arch/ppc64/kernel/pSeries_htab.c26
-rw-r--r--include/asm-ppc64/mmu.h44
4 files changed, 56 insertions, 31 deletions
diff --git a/arch/ppc64/Kconfig b/arch/ppc64/Kconfig
index 1ee3c6877aa5..ed83c105e192 100644
--- a/arch/ppc64/Kconfig
+++ b/arch/ppc64/Kconfig
@@ -72,6 +72,14 @@ config PPC64
bool
default y
+config POWER4_ONLY
+ bool "Optimize for POWER4"
+ default n
+ ---help---
+ Cause the compiler to optimize for POWER4 processors. The resulting
+ binary will not work on POWER3 or RS64 processors when compiled with
+ binutils 2.15 or later.
+
config SMP
bool "Symmetric multi-processing support"
---help---
diff --git a/arch/ppc64/Makefile b/arch/ppc64/Makefile
index 86936cd92a95..dd8796644774 100644
--- a/arch/ppc64/Makefile
+++ b/arch/ppc64/Makefile
@@ -28,8 +28,13 @@ endif
LDFLAGS := -m elf64ppc
LDFLAGS_vmlinux := -Bstatic -e $(KERNELLOAD) -Ttext $(KERNELLOAD)
-CFLAGS += -msoft-float -pipe -Wno-uninitialized -mminimal-toc \
- -mcpu=power4
+CFLAGS += -msoft-float -pipe -Wno-uninitialized -mminimal-toc
+
+ifeq ($(CONFIG_POWER4_ONLY),y)
+CFLAGS += -mcpu=power4
+else
+CFLAGS += -mtune=power4
+endif
have_zero_bss := $(shell if $(CC) -fno-zero-initialized-in-bss -S -o /dev/null -xc /dev/null > /dev/null 2>&1; then echo y; else echo n; fi)
diff --git a/arch/ppc64/kernel/pSeries_htab.c b/arch/ppc64/kernel/pSeries_htab.c
index d76001dc1d2a..9b515914bb52 100644
--- a/arch/ppc64/kernel/pSeries_htab.c
+++ b/arch/ppc64/kernel/pSeries_htab.c
@@ -219,10 +219,10 @@ static long pSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
/* Ensure it is out of the tlb too */
if ((cur_cpu_spec->cpu_features & CPU_FTR_TLBIEL) && !large && local) {
- _tlbiel(va);
+ tlbiel(va);
} else {
spin_lock_irqsave(&pSeries_tlbie_lock, flags);
- _tlbie(va, large);
+ tlbie(va, large);
spin_unlock_irqrestore(&pSeries_tlbie_lock, flags);
}
@@ -256,7 +256,7 @@ static void pSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
/* Ensure it is out of the tlb too */
spin_lock_irqsave(&pSeries_tlbie_lock, flags);
- _tlbie(va, 0);
+ tlbie(va, 0);
spin_unlock_irqrestore(&pSeries_tlbie_lock, flags);
}
@@ -285,10 +285,10 @@ static void pSeries_hpte_invalidate(unsigned long slot, unsigned long va,
/* Invalidate the tlb */
if ((cur_cpu_spec->cpu_features & CPU_FTR_TLBIEL) && !large && local) {
- _tlbiel(va);
+ tlbiel(va);
} else {
spin_lock_irqsave(&pSeries_tlbie_lock, flags);
- _tlbie(va, large);
+ tlbie(va, large);
spin_unlock_irqrestore(&pSeries_tlbie_lock, flags);
}
}
@@ -350,12 +350,8 @@ static void pSeries_flush_hash_range(unsigned long context,
if ((cur_cpu_spec->cpu_features & CPU_FTR_TLBIEL) && !large && local) {
asm volatile("ptesync":::"memory");
- for (i = 0; i < j; i++) {
- asm volatile("\n\
- clrldi %0,%0,16\n\
- tlbiel %0"
- : : "r" (batch->vaddr[i]) : "memory" );
- }
+ for (i = 0; i < j; i++)
+ __tlbiel(batch->vaddr[i]);
asm volatile("ptesync":::"memory");
} else {
@@ -364,12 +360,8 @@ static void pSeries_flush_hash_range(unsigned long context,
asm volatile("ptesync":::"memory");
- for (i = 0; i < j; i++) {
- asm volatile("\n\
- clrldi %0,%0,16\n\
- tlbie %0"
- : : "r" (batch->vaddr[i]) : "memory" );
- }
+ for (i = 0; i < j; i++)
+ __tlbie(batch->vaddr[i], 0);
asm volatile("eieio; tlbsync; ptesync":::"memory");
diff --git a/include/asm-ppc64/mmu.h b/include/asm-ppc64/mmu.h
index 0a6b77e3d725..e7ac473b5162 100644
--- a/include/asm-ppc64/mmu.h
+++ b/include/asm-ppc64/mmu.h
@@ -202,26 +202,46 @@ static inline unsigned long hpt_hash(unsigned long vpn, int large)
return (vsid & 0x7fffffffff) ^ page;
}
-static inline void _tlbie(unsigned long va, int large)
+static inline void __tlbie(unsigned long va, int large)
{
- asm volatile("ptesync": : :"memory");
+ /* clear top 16 bits, non SLS segment */
+ va &= ~(0xffffULL << 48);
- if (large) {
- asm volatile("clrldi %0,%0,16\n\
- tlbie %0,1" : : "r"(va) : "memory");
- } else {
- asm volatile("clrldi %0,%0,16\n\
- tlbie %0,0" : : "r"(va) : "memory");
- }
+ if (large)
+ asm volatile("tlbie %0,1" : : "r"(va) : "memory");
+ else
+ asm volatile("tlbie %0,0" : : "r"(va) : "memory");
+}
+static inline void tlbie(unsigned long va, int large)
+{
+ asm volatile("ptesync": : :"memory");
+ __tlbie(va, large);
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
-static inline void _tlbiel(unsigned long va)
+static inline void __tlbiel(unsigned long va)
+{
+ /* clear top 16 bits, non SLS segment */
+ va &= ~(0xffffULL << 48);
+
+ /*
+ * Thanks to Alan Modra we are now able to use machine specific
+ * assembly instructions (like tlbiel) by using the gas -many flag.
+ * However we have to support older toolchains so for the moment
+ * we hardwire it.
+ */
+#if 0
+ asm volatile("tlbiel %0" : : "r"(va) : "memory");
+#else
+ asm volatile(".long 0x7c000224 | (%0 << 11)" : : "r"(va) : "memory");
+#endif
+}
+
+static inline void tlbiel(unsigned long va)
{
asm volatile("ptesync": : :"memory");
- asm volatile("clrldi %0,%0,16\n\
- tlbiel %0" : : "r"(va) : "memory");
+ __tlbiel(va);
asm volatile("ptesync": : :"memory");
}