summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2004-05-31 18:48:25 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2004-05-31 18:48:25 -0700
commitdbfd829db92e75e8a3ed0664de01efb6f7b800de (patch)
treed47209c541bcb9144ef445f47625018eda0f0bc8
parent1348305a129afe22389811e9ac28c4103cc3e7e7 (diff)
[PATCH] ppc64: iseries bolted SLB fix
From: Anton Blanchard <anton@samba.org> Legacy iseries has problems with the bolted vmalloc patch. This patch disables the optimisation on iseries and creates a slb_add_bolted helper function. Also, while we require all SLB entries to be context switched, we werent informing the iseries hypervisor. Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--arch/ppc64/kernel/pacaData.c4
-rw-r--r--arch/ppc64/kernel/stab.c49
2 files changed, 25 insertions, 28 deletions
diff --git a/arch/ppc64/kernel/pacaData.c b/arch/ppc64/kernel/pacaData.c
index b12048b4dc53..d283f4897699 100644
--- a/arch/ppc64/kernel/pacaData.c
+++ b/arch/ppc64/kernel/pacaData.c
@@ -42,6 +42,7 @@ struct systemcfg *systemcfg;
.xStab_data = { \
.real = (asrr), /* Real pointer to segment table */ \
.virt = (asrv), /* Virt pointer to segment table */ \
+ .next_round_robin = 1, \
}, \
.lpQueuePtr = (lpq), /* &xItLpQueue, */ \
/* .xRtas = { \
@@ -54,7 +55,8 @@ struct systemcfg *systemcfg;
.xFPRegsInUse = 1, \
.xDynProcStatus = 2, \
.xDecrVal = 0x00ff0000, \
- .xEndOfQuantum = 0xfffffffffffffffful \
+ .xEndOfQuantum = 0xfffffffffffffffful, \
+ .xSLBCount = 64, \
}, \
.xRegSav = { \
.xDesc = 0xd397d9e2, /* "LpRS" */ \
diff --git a/arch/ppc64/kernel/stab.c b/arch/ppc64/kernel/stab.c
index 2a0873928228..0618904494b3 100644
--- a/arch/ppc64/kernel/stab.c
+++ b/arch/ppc64/kernel/stab.c
@@ -24,6 +24,23 @@ static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid);
static void make_slbe(unsigned long esid, unsigned long vsid, int large,
int kernel_segment);
+static inline void slb_add_bolted(void)
+{
+#ifndef CONFIG_PPC_ISERIES
+ unsigned long esid = GET_ESID(VMALLOCBASE);
+ unsigned long vsid = get_kernel_vsid(VMALLOCBASE);
+
+ WARN_ON(!irqs_disabled());
+
+ /*
+ * Bolt in the first vmalloc segment. Since modules end
+ * up there it gets hit very heavily.
+ */
+ get_paca()->xStab_data.next_round_robin = 1;
+ make_slbe(esid, vsid, 0, 1);
+#endif
+}
+
/*
* Build an entry for the base kernel segment and put it into
* the segment table or SLB. All other segment table or SLB
@@ -44,26 +61,16 @@ void stab_initialize(unsigned long stab)
/* Invalidate the entire SLB & all the ERATS */
#ifdef CONFIG_PPC_ISERIES
asm volatile("isync; slbia; isync":::"memory");
- /*
- * The hypervisor loads SLB entry 0, but we need to increment
- * next_round_robin to avoid overwriting it
- */
- get_paca()->xStab_data.next_round_robin = 1;
#else
asm volatile("isync":::"memory");
asm volatile("slbmte %0,%0"::"r" (0) : "memory");
asm volatile("isync; slbia; isync":::"memory");
+ get_paca()->xStab_data.next_round_robin = 0;
make_slbe(esid, vsid, seg0_largepages, 1);
asm volatile("isync":::"memory");
#endif
- /*
- * Bolt in the first vmalloc segment. Since modules end
- * up there it gets hit very heavily.
- */
- esid = GET_ESID(VMALLOCBASE);
- vsid = get_kernel_vsid(VMALLOCBASE);
- make_slbe(esid, vsid, 0, 1);
+ slb_add_bolted();
} else {
asm volatile("isync; slbia; isync":::"memory");
make_ste(stab, esid, vsid);
@@ -443,8 +450,9 @@ int slb_allocate(unsigned long ea)
}
esid = GET_ESID(ea);
-
+#ifndef CONFIG_PPC_ISERIES
BUG_ON((esid << SID_SHIFT) == VMALLOCBASE);
+#endif
__slb_allocate(esid, vsid, context);
return 0;
@@ -501,9 +509,6 @@ void flush_slb(struct task_struct *tsk, struct mm_struct *mm)
unsigned long word0;
slb_dword0 data;
} esid_data;
- unsigned long esid, vsid;
-
- WARN_ON(!irqs_disabled());
if (offset <= NR_STAB_CACHE_ENTRIES) {
int i;
@@ -517,17 +522,7 @@ void flush_slb(struct task_struct *tsk, struct mm_struct *mm)
asm volatile("isync" : : : "memory");
} else {
asm volatile("isync; slbia; isync" : : : "memory");
-
- /*
- * Bolt in the first vmalloc segment. Since modules end
- * up there it gets hit very heavily. We must not touch
- * the vmalloc region between the slbia and here, thats
- * why we require interrupts off.
- */
- esid = GET_ESID(VMALLOCBASE);
- vsid = get_kernel_vsid(VMALLOCBASE);
- get_paca()->xStab_data.next_round_robin = 1;
- make_slbe(esid, vsid, 0, 1);
+ slb_add_bolted();
}
/* Workaround POWER5 < DD2.1 issue */