summaryrefslogtreecommitdiff
path: root/include/asm-arm
diff options
context:
space:
mode:
authorRussell King <rmk@flint.arm.linux.org.uk>2003-04-27 19:51:24 +0100
committerRussell King <rmk@flint.arm.linux.org.uk>2003-04-27 19:51:24 +0100
commitd7181b4b753c8a5a6893036eeabe431724b21bd0 (patch)
tree4c7f7470a22cdc91aa42024ae4ae16526ef7e6c6 /include/asm-arm
parent0faa91fd7b1fb0ecc2251fbdc3c695aaf78b2389 (diff)
[ARM] Inline PMD entry cache handling
The common case is building a kernel for one CPU type, and we are able to allow GCC to optimise any the PMD entry cache handling assembly which will never be used.
Diffstat (limited to 'include/asm-arm')
-rw-r--r--include/asm-arm/cpu-multi32.h5
-rw-r--r--include/asm-arm/cpu-single.h2
-rw-r--r--include/asm-arm/proc-armv/pgalloc.h5
-rw-r--r--include/asm-arm/proc-armv/pgtable.h20
-rw-r--r--include/asm-arm/proc-armv/tlbflush.h40
5 files changed, 54 insertions, 18 deletions
diff --git a/include/asm-arm/cpu-multi32.h b/include/asm-arm/cpu-multi32.h
index 0ed88c812944..59835af6fa00 100644
--- a/include/asm-arm/cpu-multi32.h
+++ b/include/asm-arm/cpu-multi32.h
@@ -94,10 +94,6 @@ extern struct processor {
*/
void (*set_pgd)(unsigned long pgd_phys, struct mm_struct *mm);
/*
- * Set a PMD (handling IMP bit 4)
- */
- void (*flush_pmd)(pmd_t *pmdp);
- /*
* Set a PTE
*/
void (*set_pte)(pte_t *ptep, pte_t pte);
@@ -126,7 +122,6 @@ extern const struct processor sa110_processor_functions;
#define cpu_icache_invalidate_page(vp) processor.icache.invalidate_page(vp)
#define cpu_set_pgd(pgd,mm) processor.pgtable.set_pgd(pgd,mm)
-#define cpu_flush_pmd(pmdp) processor.pgtable.flush_pmd(pmdp)
#define cpu_set_pte(ptep, pte) processor.pgtable.set_pte(ptep, pte)
#define cpu_switch_mm(pgd,mm) cpu_set_pgd(__virt_to_phys((unsigned long)(pgd)),mm)
diff --git a/include/asm-arm/cpu-single.h b/include/asm-arm/cpu-single.h
index 843e3a03db58..738b61dc2553 100644
--- a/include/asm-arm/cpu-single.h
+++ b/include/asm-arm/cpu-single.h
@@ -36,7 +36,6 @@
#define cpu_icache_invalidate_range __cpu_fn(CPU_NAME,_icache_invalidate_range)
#define cpu_icache_invalidate_page __cpu_fn(CPU_NAME,_icache_invalidate_page)
#define cpu_set_pgd __cpu_fn(CPU_NAME,_set_pgd)
-#define cpu_flush_pmd __cpu_fn(CPU_NAME,_flush_pmd)
#define cpu_set_pte __cpu_fn(CPU_NAME,_set_pte)
#ifndef __ASSEMBLY__
@@ -65,7 +64,6 @@ extern void cpu_icache_invalidate_range(unsigned long start, unsigned long end);
extern void cpu_icache_invalidate_page(void *virt_page);
extern void cpu_set_pgd(unsigned long pgd_phys, struct mm_struct *mm);
-extern void cpu_flush_pmd(pmd_t *pmdp);
extern void cpu_set_pte(pte_t *ptep, pte_t pte);
extern volatile void cpu_reset(unsigned long addr);
diff --git a/include/asm-arm/proc-armv/pgalloc.h b/include/asm-arm/proc-armv/pgalloc.h
index 3263c346ccba..0e65ab7362e4 100644
--- a/include/asm-arm/proc-armv/pgalloc.h
+++ b/include/asm-arm/proc-armv/pgalloc.h
@@ -6,6 +6,7 @@
* Page table allocation/freeing primitives for 32-bit ARM processors.
*/
#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
#include "pgtable.h"
/*
@@ -92,7 +93,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
pmdval = __pa(pte_ptr) | _PAGE_KERNEL_TABLE;
pmdp[0] = __pmd(pmdval);
pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
- cpu_flush_pmd(pmdp);
+ flush_pmd_entry(pmdp);
}
static inline void
@@ -105,5 +106,5 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
pmdval = page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE;
pmdp[0] = __pmd(pmdval);
pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
- cpu_flush_pmd(pmdp);
+ flush_pmd_entry(pmdp);
}
diff --git a/include/asm-arm/proc-armv/pgtable.h b/include/asm-arm/proc-armv/pgtable.h
index 53f2b3da4d16..616d80d69b1f 100644
--- a/include/asm-arm/proc-armv/pgtable.h
+++ b/include/asm-arm/proc-armv/pgtable.h
@@ -51,6 +51,7 @@
#define PMD_SECT_TEX(x) ((x) << 12) /* v5 */
#define PMD_SECT_UNCACHED (0)
+#define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
#define PMD_SECT_WT (PMD_SECT_CACHEABLE)
#define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
#define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE)
@@ -120,14 +121,19 @@
#define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL))
#define pmd_bad(pmd) (pmd_val(pmd) & 2)
-#define set_pmd(pmdp,pmd) do { *pmdp = pmd; cpu_flush_pmd(pmdp); } while (0)
-static inline void pmd_clear(pmd_t *pmdp)
-{
- pmdp[0] = __pmd(0);
- pmdp[1] = __pmd(0);
- cpu_flush_pmd(pmdp);
-}
+#define set_pmd(pmdp,pmd) \
+ do { \
+ *pmdp = pmd; \
+ flush_pmd_entry(pmdp); \
+ } while (0)
+
+#define pmd_clear(pmdp) \
+ do { \
+ pmdp[0] = __pmd(0); \
+ pmdp[1] = __pmd(0); \
+ clean_pmd_entry(pmdp); \
+ } while (0)
static inline pte_t *pmd_page_kernel(pmd_t pmd)
{
diff --git a/include/asm-arm/proc-armv/tlbflush.h b/include/asm-arm/proc-armv/tlbflush.h
index d063ede9c50f..278c0624c11f 100644
--- a/include/asm-arm/proc-armv/tlbflush.h
+++ b/include/asm-arm/proc-armv/tlbflush.h
@@ -20,6 +20,7 @@
#define TLB_V4_D_FULL (1 << 10)
#define TLB_V4_I_FULL (1 << 11)
+#define TLB_DCLEAN (1 << 30)
#define TLB_WB (1 << 31)
/*
@@ -65,7 +66,7 @@
# define v4_always_flags (-1UL)
#endif
-#define v4wbi_tlb_flags (TLB_WB | \
+#define v4wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \
TLB_V4_I_FULL | TLB_V4_D_FULL | \
TLB_V4_I_PAGE | TLB_V4_D_PAGE)
@@ -84,7 +85,7 @@
# define v4wbi_always_flags (-1UL)
#endif
-#define v4wb_tlb_flags (TLB_WB | \
+#define v4wb_tlb_flags (TLB_WB | TLB_DCLEAN | \
TLB_V4_I_FULL | TLB_V4_D_FULL | \
TLB_V4_D_PAGE)
@@ -287,6 +288,41 @@ static inline void flush_tlb_kernel_page(unsigned long kaddr)
asm("mcr%? p15, 0, %0, c8, c5, 0" : : "r" (zero));
}
+/*
+ * flush_pmd_entry
+ *
+ * Flush a PMD entry (word aligned, or double-word aligned) to
+ * RAM if the TLB for the CPU we are running on requires this.
+ * This is typically used when we are creating PMD entries.
+ *
+ * clean_pmd_entry
+ *
+ * Clean (but don't drain the write buffer) if the CPU requires
+ * these operations. This is typically used when we are removing
+ * PMD entries.
+ */
+static inline void flush_pmd_entry(pmd_t *pmd)
+{
+ const unsigned int zero = 0;
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ if (tlb_flag(TLB_DCLEAN))
+ asm("mcr%? p15, 0, %0, c7, c10, 1 @ flush_pmd"
+ : : "r" (pmd));
+ if (tlb_flag(TLB_WB))
+ asm("mcr%? p15, 0, %0, c7, c10, 4 @ flush_pmd"
+ : : "r" (zero));
+}
+
+static inline void clean_pmd_entry(pmd_t *pmd)
+{
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ if (tlb_flag(TLB_DCLEAN))
+ asm("mcr%? p15, 0, %0, c7, c10, 1 @ flush_pmd"
+ : : "r" (pmd));
+}
+
#undef tlb_flag
#undef always_tlb_flags
#undef possible_tlb_flags