summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorRussell King <rmk@flint.arm.linux.org.uk>2002-02-25 10:24:56 +0000
committerRussell King <rmk@flint.arm.linux.org.uk>2002-02-25 10:24:56 +0000
commita6560a26b8bed3ff000f77cdf77789657d94cd8b (patch)
tree471168208d990edc0c97424bb610cb396f742207 /include
parenteac94688e46e081de1c3b516ee3595d5d8a7c400 (diff)
Clean up ARM TLB handling code; previously there was a lot of code
replication across each processor type, each handling alignment of addresses slightly differently. We unify this mess, and allow for greater flexibility in the per-CPU architecture TLB handlers. We also start to remove the ARM cache.h -> cpu_*.h -> proc-fns.h mess making the code cleaner and easier to follow. Documentation describing the expected behaviour of each TLB function for the 32-bit ARM processors is also included.
Diffstat (limited to 'include')
-rw-r--r--include/asm-arm/cpu-multi32.h19
-rw-r--r--include/asm-arm/cpu-single.h7
-rw-r--r--include/asm-arm/glue.h78
-rw-r--r--include/asm-arm/proc-armv/cache.h138
-rw-r--r--include/asm-arm/procinfo.h10
5 files changed, 170 insertions, 82 deletions
diff --git a/include/asm-arm/cpu-multi32.h b/include/asm-arm/cpu-multi32.h
index c7ce093e2085..9db83322ea54 100644
--- a/include/asm-arm/cpu-multi32.h
+++ b/include/asm-arm/cpu-multi32.h
@@ -93,21 +93,6 @@ extern struct processor {
void (*invalidate_page)(void *virt_page);
} icache;
- struct { /* TLB */
- /*
- * flush all TLBs
- */
- void (*invalidate_all)(void);
- /*
- * flush a specific TLB
- */
- void (*invalidate_range)(unsigned long address, unsigned long end);
- /*
- * flush a specific TLB
- */
- void (*invalidate_page)(unsigned long address, int flags);
- } tlb;
-
struct { /* PageTable */
/*
* Set the page table
@@ -152,10 +137,6 @@ extern const struct processor sa110_processor_functions;
#define cpu_icache_invalidate_range(s,e) processor.icache.invalidate_range(s,e)
#define cpu_icache_invalidate_page(vp) processor.icache.invalidate_page(vp)
-#define cpu_tlb_invalidate_all() processor.tlb.invalidate_all()
-#define cpu_tlb_invalidate_range(s,e) processor.tlb.invalidate_range(s,e)
-#define cpu_tlb_invalidate_page(vp,f) processor.tlb.invalidate_page(vp,f)
-
#define cpu_set_pgd(pgd) processor.pgtable.set_pgd(pgd)
#define cpu_set_pmd(pmdp, pmd) processor.pgtable.set_pmd(pmdp, pmd)
#define cpu_set_pte(ptep, pte) processor.pgtable.set_pte(ptep, pte)
diff --git a/include/asm-arm/cpu-single.h b/include/asm-arm/cpu-single.h
index 9742554641b7..6f794bb570fe 100644
--- a/include/asm-arm/cpu-single.h
+++ b/include/asm-arm/cpu-single.h
@@ -37,9 +37,6 @@
#define cpu_dcache_clean_entry __cpu_fn(CPU_NAME,_dcache_clean_entry)
#define cpu_icache_invalidate_range __cpu_fn(CPU_NAME,_icache_invalidate_range)
#define cpu_icache_invalidate_page __cpu_fn(CPU_NAME,_icache_invalidate_page)
-#define cpu_tlb_invalidate_all __cpu_fn(CPU_NAME,_tlb_invalidate_all)
-#define cpu_tlb_invalidate_range __cpu_fn(CPU_NAME,_tlb_invalidate_range)
-#define cpu_tlb_invalidate_page __cpu_fn(CPU_NAME,_tlb_invalidate_page)
#define cpu_set_pgd __cpu_fn(CPU_NAME,_set_pgd)
#define cpu_set_pmd __cpu_fn(CPU_NAME,_set_pmd)
#define cpu_set_pte __cpu_fn(CPU_NAME,_set_pte)
@@ -73,10 +70,6 @@ extern void cpu_dcache_clean_entry(unsigned long address);
extern void cpu_icache_invalidate_range(unsigned long start, unsigned long end);
extern void cpu_icache_invalidate_page(void *virt_page);
-extern void cpu_tlb_invalidate_all(void);
-extern void cpu_tlb_invalidate_range(unsigned long address, unsigned long end);
-extern void cpu_tlb_invalidate_page(unsigned long address, int flags);
-
extern void cpu_set_pgd(unsigned long pgd_phys);
extern void cpu_set_pmd(pmd_t *pmdp, pmd_t pmd);
extern void cpu_set_pte(pte_t *ptep, pte_t pte);
diff --git a/include/asm-arm/glue.h b/include/asm-arm/glue.h
new file mode 100644
index 000000000000..a4f098d8aabb
--- /dev/null
+++ b/include/asm-arm/glue.h
@@ -0,0 +1,78 @@
+/*
+ * linux/include/asm-arm/glue.h
+ *
+ * Copyright (C) 1997-1999 Russell King
+ * Copyright (C) 2000-2002 Deep Blue Solutions Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file provides the glue to stick the processor-specific bits
+ * into the kernel in an efficient manner. The idea is to use branches
+ * when we're only targetting one class of TLB, or indirect calls
+ * when we're targetting multiple classes of TLBs.
+ */
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+
+#ifdef __STDC__
+#define ____glue(name,fn) name##fn
+#else
+#define ____glue(name,fn) name/**/fn
+#endif
+#define __glue(name,fn) ____glue(name,fn)
+
+/*
+ * Select MMU TLB handling.
+ */
+
+/*
+ * ARMv3 MMU
+ */
+#undef _TLB
+#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
+# ifdef _TLB
+# define MULTI_TLB 1
+# else
+# define _TLB v3
+# endif
+#endif
+
+/*
+ * ARMv4 MMU without write buffer
+ */
+#if defined(CONFIG_CPU_ARM720T)
+# ifdef _TLB
+# define MULTI_TLB 1
+# else
+# define _TLB v4
+# endif
+#endif
+
+/*
+ * ARMv4 MMU with write buffer, with invalidate I TLB entry instruction
+ */
+#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
+ defined(CONFIG_CPU_ARM926T) || defined(CONFIG_CPU_ARM1020) || \
+ defined(CONFIG_CPU_XSCALE)
+# ifdef _TLB
+# define MULTI_TLB 1
+# else
+# define _TLB v4wbi
+# endif
+#endif
+
+/*
+ * ARMv4 MMU with write buffer, without invalidate I TLB entry instruction
+ */
+#if defined(CONFIG_CPU_SA110) || defined(CONFIG_CPU_SA1100)
+# ifdef _TLB
+# define MULTI_TLB 1
+# else
+# define _TLB v4wb
+# endif
+#endif
+
+#endif
diff --git a/include/asm-arm/proc-armv/cache.h b/include/asm-arm/proc-armv/cache.h
index 2f5f206fc43c..c73f03773594 100644
--- a/include/asm-arm/proc-armv/cache.h
+++ b/include/asm-arm/proc-armv/cache.h
@@ -8,6 +8,7 @@
* published by the Free Software Foundation.
*/
#include <asm/mman.h>
+#include <asm/glue.h>
/*
* This flag is used to indicate that the page pointed to by a pte
@@ -200,69 +201,93 @@ static inline void flush_icache_page(struct vm_area_struct *vma, struct page *pa
} while (0)
/*
- * Old ARM MEMC stuff. This supports the reversed mapping handling that
- * we have on the older 26-bit machines. We don't have a MEMC chip, so...
- */
-#define memc_update_all() do { } while (0)
-#define memc_update_mm(mm) do { } while (0)
-#define memc_update_addr(mm,pte,log) do { } while (0)
-#define memc_clear(mm,physaddr) do { } while (0)
-
-/*
- * TLB flushing.
+ * TLB Management
+ * ==============
+ *
+ * The arch/arm/mm/tlb-*.S files implement this methods.
+ *
+ * The TLB specific code is expected to perform whatever tests it
+ * needs to determine if it should invalidate the TLB for each
+ * call. Start addresses are inclusive and end addresses are
+ * exclusive; it is safe to round these addresses down.
+ *
+ * flush_tlb_all()
+ *
+ * Invalidate the entire TLB.
*
- * - flush_tlb_all() flushes all processes TLBs
- * - flush_tlb_mm(mm) flushes the specified mm context TLB's
- * - flush_tlb_page(vma, vmaddr) flushes TLB for specified page
- * - flush_tlb_range(vma, start, end) flushes TLB for specified range of pages
+ * flush_tlb_mm(mm)
*
- * We drain the write buffer in here to ensure that the page tables in ram
- * are really up to date. It is more efficient to do this here...
+ * Invalidate all TLB entries in a particular address
+ * space.
+ * - mm - mm_struct describing address space
+ *
+ * flush_tlb_range(mm,start,end)
+ *
+ * Invalidate a range of TLB entries in the specified
+ * address space.
+ * - mm - mm_struct describing address space
+ * - start - start address (may not be aligned)
+ * - end - end address (exclusive, may not be aligned)
+ *
+ * flush_tlb_page(vaddr,vma)
+ *
+ * Invalidate the specified page in the specified address range.
+ * - vaddr - virtual address (may not be aligned)
+ * - vma - vma_struct describing address range
+ *
+ * flush_kern_tlb_page(kaddr)
+ *
+ * Invalidate the TLB entry for the specified page. The address
+ * will be in the kernels virtual memory space. Current uses
+ * only require the D-TLB to be invalidated.
+ * - kaddr - Kernel virtual memory address
*/
-/*
- * Notes:
- * current->active_mm is the currently active memory description.
- * current->mm == NULL iff we are lazy.
- */
-#define flush_tlb_all() \
- do { \
- cpu_tlb_invalidate_all(); \
- } while (0)
+struct cpu_tlb_fns {
+ void (*flush_kern_all)(void);
+ void (*flush_user_mm)(struct mm_struct *);
+ void (*flush_user_range)(unsigned long, unsigned long, struct vm_area_struct *);
+ void (*flush_user_page)(unsigned long, struct vm_area_struct *);
+ void (*flush_kern_page)(unsigned long);
+};
/*
- * Flush all user virtual address space translations described by `_mm'.
- *
- * Currently, this is always called for current->mm, which should be
- * the same as current->active_mm. This is currently not be called for
- * the lazy TLB case.
+ * Convert calls to our calling convention.
*/
-#define flush_tlb_mm(_mm) \
- do { \
- if ((_mm) == current->active_mm) \
- cpu_tlb_invalidate_all(); \
- } while (0)
+#define flush_tlb_all() __cpu_flush_kern_tlb_all()
+#define flush_tlb_mm(mm) __cpu_flush_user_tlb_mm(mm)
+#define flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma)
+#define flush_tlb_page(vma,vaddr) __cpu_flush_user_tlb_page(vaddr,vma)
+#define flush_kern_tlb_page(kaddr) __cpu_flush_kern_tlb_page(kaddr)
/*
- * Flush the specified range of user virtual address space translations.
- *
- * _mm may not be current->active_mm, but may not be NULL.
+ * Now select the calling method
*/
-#define flush_tlb_range(_vma,_start,_end) \
- do { \
- if ((_mm)->vm_mm == current->active_mm) \
- cpu_tlb_invalidate_range((_start), (_end)); \
- } while (0)
+#ifdef MULTI_TLB
-/*
- * Flush the specified user virtual address space translation.
- */
-#define flush_tlb_page(_vma,_page) \
- do { \
- if ((_vma)->vm_mm == current->active_mm) \
- cpu_tlb_invalidate_page((_page), \
- ((_vma)->vm_flags & VM_EXEC)); \
- } while (0)
+extern struct cpu_tlb_fns cpu_tlb;
+
+#define __cpu_flush_kern_tlb_all cpu_tlb.flush_kern_all
+#define __cpu_flush_user_tlb_mm cpu_tlb.flush_user_mm
+#define __cpu_flush_user_tlb_range cpu_tlb.flush_user_range
+#define __cpu_flush_user_tlb_page cpu_tlb.flush_user_page
+#define __cpu_flush_kern_tlb_page cpu_tlb.flush_kern_page
+
+#else
+
+#define __cpu_flush_kern_tlb_all __glue(_TLB,_flush_kern_tlb_all)
+#define __cpu_flush_user_tlb_mm __glue(_TLB,_flush_user_tlb_mm)
+#define __cpu_flush_user_tlb_range __glue(_TLB,_flush_user_tlb_range)
+#define __cpu_flush_user_tlb_page __glue(_TLB,_flush_user_tlb_page)
+#define __cpu_flush_kern_tlb_page __glue(_TLB,_flush_kern_tlb_page)
+
+extern void __cpu_flush_kern_tlb_all(void);
+extern void __cpu_flush_user_tlb_mm(struct mm_struct *);
+extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
+extern void __cpu_flush_user_tlb_page(unsigned long, struct vm_area_struct *);
+extern void __cpu_flush_kern_tlb_page(unsigned long);
+
+#endif
/*
* if PG_dcache_dirty is set for the page, we need to ensure that any
@@ -270,3 +295,12 @@ static inline void flush_icache_page(struct vm_area_struct *vma, struct page *pa
* back to the page.
*/
extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte);
+
+/*
+ * Old ARM MEMC stuff. This supports the reversed mapping handling that
+ * we have on the older 26-bit machines. We don't have a MEMC chip, so...
+ */
+#define memc_update_all() do { } while (0)
+#define memc_update_mm(mm) do { } while (0)
+#define memc_update_addr(mm,pte,log) do { } while (0)
+#define memc_clear(mm,physaddr) do { } while (0)
diff --git a/include/asm-arm/procinfo.h b/include/asm-arm/procinfo.h
index 4411a5d1f5a3..dda78d4e7156 100644
--- a/include/asm-arm/procinfo.h
+++ b/include/asm-arm/procinfo.h
@@ -14,6 +14,9 @@
#include <asm/proc-fns.h>
+struct cpu_tlb_fns;
+struct processor;
+
struct proc_info_item {
const char *manufacturer;
const char *cpu_name;
@@ -37,15 +40,14 @@ struct proc_info_list {
const char *elf_name;
unsigned int elf_hwcap;
struct proc_info_item *info;
-#ifdef MULTI_CPU
struct processor *proc;
-#else
- void *unused;
-#endif
+ struct cpu_tlb_fns *tlb;
};
#endif /* __ASSEMBLY__ */
+#define PROC_INFO_SZ 40
+
#define HWCAP_SWP 1
#define HWCAP_HALF 2
#define HWCAP_THUMB 4