summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/ppc64/kernel/Makefile5
-rw-r--r--arch/ppc64/kernel/head.S2
-rw-r--r--arch/ppc64/kernel/htab.c11
-rw-r--r--include/asm-ppc64/cache.h3
4 files changed, 15 insertions, 6 deletions
diff --git a/arch/ppc64/kernel/Makefile b/arch/ppc64/kernel/Makefile
index 335e49173c75..d3021607968b 100644
--- a/arch/ppc64/kernel/Makefile
+++ b/arch/ppc64/kernel/Makefile
@@ -45,6 +45,11 @@ include $(TOPDIR)/Rules.make
#
head.o: head.S ppc_defs.h
+misc.o: misc.S ppc_defs.h
+entry.o: entry.S ppc_defs.h
+hvCall.o: hvCall.S ppc_defs.h
+pSeries_hvCall.o: pSeries_hvCall.S ppc_defs.h
+sys32.o: sys32.S ppc_defs.h
ppc_defs.h: mk_defs.c ppc_defs.head \
$(TOPDIR)/include/asm/mmu.h \
diff --git a/arch/ppc64/kernel/head.S b/arch/ppc64/kernel/head.S
index 0d2c0b4964f8..16dce2c1085d 100644
--- a/arch/ppc64/kernel/head.S
+++ b/arch/ppc64/kernel/head.S
@@ -853,11 +853,13 @@ _GLOBAL(do_stab_bolted)
ld r22,8(r21) /* get SRR1 */
andi. r22,r22,MSR_PR /* check if from user */
bne+ stab_bolted_user_return /* from user, send the error on up */
+#if 0
li r3,0
#ifdef CONFIG_XMON
bl .xmon
#endif
1: b 1b
+#endif
2:
/* (((ea >> 28) & 0x1fff) << 15) | (ea >> 60) */
mfspr r21,DAR
diff --git a/arch/ppc64/kernel/htab.c b/arch/ppc64/kernel/htab.c
index 3f4741cdb6c2..5f820365d476 100644
--- a/arch/ppc64/kernel/htab.c
+++ b/arch/ppc64/kernel/htab.c
@@ -210,7 +210,7 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid,
* If no pte found or not present, send the problem up to
* do_page_fault
*/
- if (!ptep || !pte_present(*ptep))
+ if (unlikely(!ptep || !pte_present(*ptep)))
return 1;
/*
@@ -218,7 +218,7 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid,
* prevented then send the problem up to do_page_fault.
*/
access |= _PAGE_PRESENT;
- if (access & ~(pte_val(*ptep)))
+ if (unlikely(access & ~(pte_val(*ptep))))
return 1;
/*
@@ -246,7 +246,8 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid,
#define PPC64_HWNOEXEC (1 << 2)
/* We do lazy icache flushing on POWER4 */
- if (__is_processor(PV_POWER4) && pfn_valid(pte_pfn(new_pte))) {
+ if (unlikely(__is_processor(PV_POWER4) &&
+ pfn_valid(pte_pfn(new_pte)))) {
struct page *page = pte_page(new_pte);
/* page is dirty */
@@ -262,7 +263,7 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid,
}
/* Check if pte already has an hpte (case 2) */
- if (pte_val(old_pte) & _PAGE_HASHPTE) {
+ if (unlikely(pte_val(old_pte) & _PAGE_HASHPTE)) {
/* There MIGHT be an HPTE for this pte */
unsigned long hash, slot, secondary;
@@ -282,7 +283,7 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid,
*ptep = new_pte;
}
- if (!(pte_val(old_pte) & _PAGE_HASHPTE)) {
+ if (likely(!(pte_val(old_pte) & _PAGE_HASHPTE))) {
/* XXX fix large pte flag */
unsigned long hash = hpt_hash(vpn, 0);
unsigned long hpte_group;
diff --git a/include/asm-ppc64/cache.h b/include/asm-ppc64/cache.h
index 00928de36087..e3f2b238459f 100644
--- a/include/asm-ppc64/cache.h
+++ b/include/asm-ppc64/cache.h
@@ -8,6 +8,7 @@
#define __ARCH_PPC64_CACHE_H
/* bytes per L1 cache line */
-#define L1_CACHE_BYTES 128
+#define L1_CACHE_SHIFT 7
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#endif