summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-03-13 00:21:15 -0800
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-03-13 00:21:15 -0800
commit772bb69313ec9939720bbee9681cda32f71a33b9 (patch)
tree4a7203bcace741c96c65ca9f14dd0a99966a0b62
parentf71513bf9a2ea83fcd2e2c985884a6ab806d24fd (diff)
[PATCH] ptwalk: pud and pmd folded
Nick Piggin's patch to fold away most of the pud and pmd levels when not required. Adjusted to define minimal pud_addr_end (in the 4LEVEL_HACK case too) and pmd_addr_end. Responsible for half of the savings. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/asm-generic/4level-fixup.h4
-rw-r--r--include/asm-generic/pgtable-nopmd.h5
-rw-r--r--include/asm-generic/pgtable-nopud.h5
-rw-r--r--include/asm-generic/pgtable.h4
-rw-r--r--mm/memory.c34
5 files changed, 26 insertions, 26 deletions
diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
index 02675742f472..c20ec257ecc0 100644
--- a/include/asm-generic/4level-fixup.h
+++ b/include/asm-generic/4level-fixup.h
@@ -2,6 +2,7 @@
#define _4LEVEL_FIXUP_H
#define __ARCH_HAS_4LEVEL_HACK
+#define __PAGETABLE_PUD_FOLDED
#define PUD_SIZE PGDIR_SIZE
#define PUD_MASK PGDIR_MASK
@@ -31,4 +32,7 @@
#define pud_free(x) do { } while (0)
#define __pud_free_tlb(tlb, x) do { } while (0)
+#undef pud_addr_end
+#define pud_addr_end(addr, end) (end)
+
#endif
diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
index b7714d41138c..c8d53ba20e19 100644
--- a/include/asm-generic/pgtable-nopmd.h
+++ b/include/asm-generic/pgtable-nopmd.h
@@ -5,6 +5,8 @@
#include <asm-generic/pgtable-nopud.h>
+#define __PAGETABLE_PMD_FOLDED
+
/*
* Having the pmd type consist of a pud gets the size right, and allows
* us to conceptually access the pud entry that this pmd is folded into
@@ -55,6 +57,9 @@ static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address)
#define pmd_free(x) do { } while (0)
#define __pmd_free_tlb(tlb, x) do { } while (0)
+#undef pmd_addr_end
+#define pmd_addr_end(addr, end) (end)
+
#endif /* __ASSEMBLY__ */
#endif /* _PGTABLE_NOPMD_H */
diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
index ffce31fef970..82e29f0ce467 100644
--- a/include/asm-generic/pgtable-nopud.h
+++ b/include/asm-generic/pgtable-nopud.h
@@ -3,6 +3,8 @@
#ifndef __ASSEMBLY__
+#define __PAGETABLE_PUD_FOLDED
+
/*
* Having the pud type consist of a pgd gets the size right, and allows
* us to conceptually access the pgd entry that this pud is folded into
@@ -52,5 +54,8 @@ static inline pud_t * pud_offset(pgd_t * pgd, unsigned long address)
#define pud_free(x) do { } while (0)
#define __pud_free_tlb(tlb, x) do { } while (0)
+#undef pud_addr_end
+#define pud_addr_end(addr, end) (end)
+
#endif /* __ASSEMBLY__ */
#endif /* _PGTABLE_NOPUD_H */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 20f401dcffda..60148466d023 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -146,15 +146,19 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
(__boundary - 1 < (end) - 1)? __boundary: (end); \
})
+#ifndef pud_addr_end
#define pud_addr_end(addr, end) \
({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
(__boundary - 1 < (end) - 1)? __boundary: (end); \
})
+#endif
+#ifndef pmd_addr_end
#define pmd_addr_end(addr, end) \
({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
(__boundary - 1 < (end) - 1)? __boundary: (end); \
})
+#endif
#ifndef __ASSEMBLY__
/*
diff --git a/mm/memory.c b/mm/memory.c
index e1a0814602f5..db09f2089ed2 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1973,15 +1973,12 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
return VM_FAULT_OOM;
}
-#ifndef __ARCH_HAS_4LEVEL_HACK
+#ifndef __PAGETABLE_PUD_FOLDED
/*
* Allocate page upper directory.
*
* We've already handled the fast-path in-line, and we own the
* page table lock.
- *
- * On a two-level or three-level page table, this ends up actually being
- * entirely optimized away.
*/
pud_t fastcall *__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
{
@@ -2005,15 +2002,14 @@ pud_t fastcall *__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long addr
out:
return pud_offset(pgd, address);
}
+#endif /* __PAGETABLE_PUD_FOLDED */
+#ifndef __PAGETABLE_PMD_FOLDED
/*
* Allocate page middle directory.
*
* We've already handled the fast-path in-line, and we own the
* page table lock.
- *
- * On a two-level page table, this ends up actually being entirely
- * optimized away.
*/
pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
{
@@ -2029,38 +2025,24 @@ pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr
* Because we dropped the lock, we should re-check the
* entry, as somebody else could have populated it..
*/
+#ifndef __ARCH_HAS_4LEVEL_HACK
if (pud_present(*pud)) {
pmd_free(new);
goto out;
}
pud_populate(mm, pud, new);
- out:
- return pmd_offset(pud, address);
-}
#else
-pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
-{
- pmd_t *new;
-
- spin_unlock(&mm->page_table_lock);
- new = pmd_alloc_one(mm, address);
- spin_lock(&mm->page_table_lock);
- if (!new)
- return NULL;
-
- /*
- * Because we dropped the lock, we should re-check the
- * entry, as somebody else could have populated it..
- */
if (pgd_present(*pud)) {
pmd_free(new);
goto out;
}
pgd_populate(mm, pud, new);
-out:
+#endif /* __ARCH_HAS_4LEVEL_HACK */
+
+ out:
return pmd_offset(pud, address);
}
-#endif
+#endif /* __PAGETABLE_PMD_FOLDED */
int make_pages_present(unsigned long addr, unsigned long end)
{