summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorRichard Henderson <rth@are.twiddle.net>2002-03-24 09:52:10 -0800
committerRichard Henderson <rth@are.twiddle.net>2002-03-24 09:52:10 -0800
commit7ee912babeec5f7190820dac442db634ae2136de (patch)
tree30a3867ef6490db8952d6fe10cfd10007623937f /include
parent085c9a18acd4003fa77cf852aa770e52b2ce6a71 (diff)
Break an include loop by moving cache flushing routines from
asm/pgtable.h and/or asm/pgalloc.h to asm/cacheflush.h, and tlb flushing routines to asm/tlbflush.h.
Diffstat (limited to 'include')
-rw-r--r--include/asm-alpha/cacheflush.h64
-rw-r--r--include/asm-alpha/pgalloc.h224
-rw-r--r--include/asm-alpha/tlbflush.h155
-rw-r--r--include/asm-i386/cacheflush.h18
-rw-r--r--include/asm-i386/pgalloc.h186
-rw-r--r--include/asm-i386/pgtable.h65
-rw-r--r--include/asm-i386/tlbflush.h141
-rw-r--r--include/linux/highmem.h1
8 files changed, 385 insertions, 469 deletions
diff --git a/include/asm-alpha/cacheflush.h b/include/asm-alpha/cacheflush.h
new file mode 100644
index 000000000000..f04d7579fde7
--- /dev/null
+++ b/include/asm-alpha/cacheflush.h
@@ -0,0 +1,64 @@
+#ifndef _ALPHA_CACHEFLUSH_H
+#define _ALPHA_CACHEFLUSH_H
+
+#include <linux/config.h>
+#include <linux/mm.h>
+
+/* Caches aren't brain-dead on the Alpha. */
+#define flush_cache_all() do { } while (0)
+#define flush_cache_mm(mm) do { } while (0)
+#define flush_cache_range(vma, start, end) do { } while (0)
+#define flush_cache_page(vma, vmaddr) do { } while (0)
+#define flush_page_to_ram(page) do { } while (0)
+#define flush_dcache_page(page) do { } while (0)
+
+/* Note that the following two definitions are _highly_ dependent
+ on the contexts in which they are used in the kernel. I personally
+ think it is criminal how loosely defined these macros are. */
+
+/* We need to flush the kernel's icache after loading modules. The
+ only other use of this macro is in load_aout_interp which is not
+ used on Alpha.
+
+ Note that this definition should *not* be used for userspace
+ icache flushing. While functional, it is _way_ overkill. The
+ icache is tagged with ASNs and it suffices to allocate a new ASN
+ for the process. */
+#ifndef CONFIG_SMP
+#define flush_icache_range(start, end) imb()
+#else
+#define flush_icache_range(start, end) smp_imb()
+extern void smp_imb(void);
+#endif
+
+/* We need to flush the userspace icache after setting breakpoints in
+ ptrace.
+
+ Instead of indiscriminately using imb, take advantage of the fact
+ that icache entries are tagged with the ASN and load a new mm context. */
+/* ??? Ought to use this in arch/alpha/kernel/signal.c too. */
+
+#ifndef CONFIG_SMP
+extern void __load_new_mm_context(struct mm_struct *);
+static inline void
+flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+ unsigned long addr, int len)
+{
+ if (vma->vm_flags & VM_EXEC) {
+ struct mm_struct *mm = vma->vm_mm;
+ if (current->active_mm == mm)
+ __load_new_mm_context(mm);
+ else
+ mm->context[smp_processor_id()] = 0;
+ }
+}
+#else
+extern void flush_icache_user_range(struct vm_area_struct *vma,
+ struct page *page, unsigned long addr, int len);
+#endif
+
+/* This is used only in do_no_page and do_swap_page. */
+#define flush_icache_page(vma, page) \
+ flush_icache_user_range((vma), (page), 0, 0)
+
+#endif /* _ALPHA_CACHEFLUSH_H */
diff --git a/include/asm-alpha/pgalloc.h b/include/asm-alpha/pgalloc.h
index 0fbeaa56610d..445dc28b827a 100644
--- a/include/asm-alpha/pgalloc.h
+++ b/include/asm-alpha/pgalloc.h
@@ -3,228 +3,6 @@
#include <linux/config.h>
-#ifndef __EXTERN_INLINE
-#define __EXTERN_INLINE extern inline
-#define __MMU_EXTERN_INLINE
-#endif
-
-extern void __load_new_mm_context(struct mm_struct *);
-
-
-/* Caches aren't brain-dead on the Alpha. */
-#define flush_cache_all() do { } while (0)
-#define flush_cache_mm(mm) do { } while (0)
-#define flush_cache_range(vma, start, end) do { } while (0)
-#define flush_cache_page(vma, vmaddr) do { } while (0)
-#define flush_page_to_ram(page) do { } while (0)
-#define flush_dcache_page(page) do { } while (0)
-
-/* Note that the following two definitions are _highly_ dependent
- on the contexts in which they are used in the kernel. I personally
- think it is criminal how loosely defined these macros are. */
-
-/* We need to flush the kernel's icache after loading modules. The
- only other use of this macro is in load_aout_interp which is not
- used on Alpha.
-
- Note that this definition should *not* be used for userspace
- icache flushing. While functional, it is _way_ overkill. The
- icache is tagged with ASNs and it suffices to allocate a new ASN
- for the process. */
-#ifndef CONFIG_SMP
-#define flush_icache_range(start, end) imb()
-#else
-#define flush_icache_range(start, end) smp_imb()
-extern void smp_imb(void);
-#endif
-
-
-/*
- * Use a few helper functions to hide the ugly broken ASN
- * numbers on early Alphas (ev4 and ev45)
- */
-
-__EXTERN_INLINE void
-ev4_flush_tlb_current(struct mm_struct *mm)
-{
- __load_new_mm_context(mm);
- tbiap();
-}
-
-__EXTERN_INLINE void
-ev5_flush_tlb_current(struct mm_struct *mm)
-{
- __load_new_mm_context(mm);
-}
-
-static inline void
-flush_tlb_other(struct mm_struct *mm)
-{
- long * mmc = &mm->context[smp_processor_id()];
- /*
- * Check it's not zero first to avoid cacheline ping pong when
- * possible.
- */
- if (*mmc)
- *mmc = 0;
-}
-
-/* We need to flush the userspace icache after setting breakpoints in
- ptrace.
-
- Instead of indiscriminately using imb, take advantage of the fact
- that icache entries are tagged with the ASN and load a new mm context. */
-/* ??? Ought to use this in arch/alpha/kernel/signal.c too. */
-
-#ifndef CONFIG_SMP
-static inline void
-flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
- unsigned long addr, int len)
-{
- if (vma->vm_flags & VM_EXEC) {
- struct mm_struct *mm = vma->vm_mm;
- if (current->active_mm == mm)
- __load_new_mm_context(mm);
- else
- mm->context[smp_processor_id()] = 0;
- }
-}
-#else
-extern void flush_icache_user_range(struct vm_area_struct *vma,
- struct page *page, unsigned long addr, int len);
-#endif
-
-/* this is used only in do_no_page and do_swap_page */
-#define flush_icache_page(vma, page) flush_icache_user_range((vma), (page), 0, 0)
-
-/*
- * Flush just one page in the current TLB set.
- * We need to be very careful about the icache here, there
- * is no way to invalidate a specific icache page..
- */
-
-__EXTERN_INLINE void
-ev4_flush_tlb_current_page(struct mm_struct * mm,
- struct vm_area_struct *vma,
- unsigned long addr)
-{
- int tbi_flag = 2;
- if (vma->vm_flags & VM_EXEC) {
- __load_new_mm_context(mm);
- tbi_flag = 3;
- }
- tbi(tbi_flag, addr);
-}
-
-__EXTERN_INLINE void
-ev5_flush_tlb_current_page(struct mm_struct * mm,
- struct vm_area_struct *vma,
- unsigned long addr)
-{
- if (vma->vm_flags & VM_EXEC)
- __load_new_mm_context(mm);
- else
- tbi(2, addr);
-}
-
-
-#ifdef CONFIG_ALPHA_GENERIC
-# define flush_tlb_current alpha_mv.mv_flush_tlb_current
-# define flush_tlb_current_page alpha_mv.mv_flush_tlb_current_page
-#else
-# ifdef CONFIG_ALPHA_EV4
-# define flush_tlb_current ev4_flush_tlb_current
-# define flush_tlb_current_page ev4_flush_tlb_current_page
-# else
-# define flush_tlb_current ev5_flush_tlb_current
-# define flush_tlb_current_page ev5_flush_tlb_current_page
-# endif
-#endif
-
-#ifdef __MMU_EXTERN_INLINE
-#undef __EXTERN_INLINE
-#undef __MMU_EXTERN_INLINE
-#endif
-
-/*
- * Flush current user mapping.
- */
-static inline void flush_tlb(void)
-{
- flush_tlb_current(current->active_mm);
-}
-
-/*
- * Flush a specified range of user mapping page tables
- * from TLB.
- * Although Alpha uses VPTE caches, this can be a nop, as Alpha does
- * not have finegrained tlb flushing, so it will flush VPTE stuff
- * during next flush_tlb_range.
- */
-static inline void flush_tlb_pgtables(struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
-}
-
-#ifndef CONFIG_SMP
-/*
- * Flush everything (kernel mapping may also have
- * changed due to vmalloc/vfree)
- */
-static inline void flush_tlb_all(void)
-{
- tbia();
-}
-
-/*
- * Flush a specified user mapping
- */
-static inline void flush_tlb_mm(struct mm_struct *mm)
-{
- if (mm == current->active_mm)
- flush_tlb_current(mm);
- else
- flush_tlb_other(mm);
-}
-
-/*
- * Page-granular tlb flush.
- *
- * do a tbisd (type = 2) normally, and a tbis (type = 3)
- * if it is an executable mapping. We want to avoid the
- * itlb flush, because that potentially also does a
- * icache flush.
- */
-static inline void flush_tlb_page(struct vm_area_struct *vma,
- unsigned long addr)
-{
- struct mm_struct * mm = vma->vm_mm;
-
- if (mm == current->active_mm)
- flush_tlb_current_page(mm, vma, addr);
- else
- flush_tlb_other(mm);
-}
-
-/*
- * Flush a specified range of user mapping: on the
- * Alpha we flush the whole user tlb.
- */
-static inline void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
-{
- flush_tlb_mm(vma->vm_mm);
-}
-
-#else /* CONFIG_SMP */
-
-extern void flush_tlb_all(void);
-extern void flush_tlb_mm(struct mm_struct *);
-extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
-extern void flush_tlb_range(struct vm_area_struct *, unsigned long, unsigned long);
-
-#endif /* CONFIG_SMP */
-
/*
* Allocate and free page tables. The xxx_kernel() versions are
* used to allocate a kernel page table - this turns on ASN bits
@@ -292,4 +70,6 @@ pte_free(struct page *page)
__free_page(page);
}
+#define check_pgt_cache() do { } while (0)
+
#endif /* _ALPHA_PGALLOC_H */
diff --git a/include/asm-alpha/tlbflush.h b/include/asm-alpha/tlbflush.h
new file mode 100644
index 000000000000..a8b6748c25ce
--- /dev/null
+++ b/include/asm-alpha/tlbflush.h
@@ -0,0 +1,155 @@
+#ifndef _ALPHA_TLBFLUSH_H
+#define _ALPHA_TLBFLUSH_H
+
+#include <linux/config.h>
+#include <linux/mm.h>
+
+#ifndef __EXTERN_INLINE
+#define __EXTERN_INLINE extern inline
+#define __MMU_EXTERN_INLINE
+#endif
+
+extern void __load_new_mm_context(struct mm_struct *);
+
+
+/* Use a few helper functions to hide the ugly broken ASN
+ numbers on early Alphas (ev4 and ev45). */
+
+__EXTERN_INLINE void
+ev4_flush_tlb_current(struct mm_struct *mm)
+{
+ __load_new_mm_context(mm);
+ tbiap();
+}
+
+__EXTERN_INLINE void
+ev5_flush_tlb_current(struct mm_struct *mm)
+{
+ __load_new_mm_context(mm);
+}
+
+/* Flush just one page in the current TLB set. We need to be very
+ careful about the icache here, there is no way to invalidate a
+ specific icache page. */
+
+__EXTERN_INLINE void
+ev4_flush_tlb_current_page(struct mm_struct * mm,
+ struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ int tbi_flag = 2;
+ if (vma->vm_flags & VM_EXEC) {
+ __load_new_mm_context(mm);
+ tbi_flag = 3;
+ }
+ tbi(tbi_flag, addr);
+}
+
+__EXTERN_INLINE void
+ev5_flush_tlb_current_page(struct mm_struct * mm,
+ struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ if (vma->vm_flags & VM_EXEC)
+ __load_new_mm_context(mm);
+ else
+ tbi(2, addr);
+}
+
+
+#ifdef CONFIG_ALPHA_GENERIC
+# define flush_tlb_current alpha_mv.mv_flush_tlb_current
+# define flush_tlb_current_page alpha_mv.mv_flush_tlb_current_page
+#else
+# ifdef CONFIG_ALPHA_EV4
+# define flush_tlb_current ev4_flush_tlb_current
+# define flush_tlb_current_page ev4_flush_tlb_current_page
+# else
+# define flush_tlb_current ev5_flush_tlb_current
+# define flush_tlb_current_page ev5_flush_tlb_current_page
+# endif
+#endif
+
+#ifdef __MMU_EXTERN_INLINE
+#undef __EXTERN_INLINE
+#undef __MMU_EXTERN_INLINE
+#endif
+
+/* Flush current user mapping. */
+static inline void
+flush_tlb(void)
+{
+ flush_tlb_current(current->active_mm);
+}
+
+/* Flush someone else's user mapping. */
+static inline void
+flush_tlb_other(struct mm_struct *mm)
+{
+ long *mmc = &mm->context[smp_processor_id()];
+ /* Check it's not zero first to avoid cacheline ping pong
+ when possible. */
+ if (*mmc) *mmc = 0;
+}
+
+/* Flush a specified range of user mapping page tables from TLB.
+ Although Alpha uses VPTE caches, this can be a nop, as Alpha does
+ not have finegrained tlb flushing, so it will flush VPTE stuff
+ during next flush_tlb_range. */
+
+static inline void
+flush_tlb_pgtables(struct mm_struct *mm, unsigned long start,
+ unsigned long end)
+{
+}
+
+#ifndef CONFIG_SMP
+/* Flush everything (kernel mapping may also have changed
+ due to vmalloc/vfree). */
+static inline void flush_tlb_all(void)
+{
+ tbia();
+}
+
+/* Flush a specified user mapping. */
+static inline void
+flush_tlb_mm(struct mm_struct *mm)
+{
+ if (mm == current->active_mm)
+ flush_tlb_current(mm);
+ else
+ flush_tlb_other(mm);
+}
+
+/* Page-granular tlb flush. */
+static inline void
+flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
+{
+ struct mm_struct *mm = vma->vm_mm;
+
+ if (mm == current->active_mm)
+ flush_tlb_current_page(mm, vma, addr);
+ else
+ flush_tlb_other(mm);
+}
+
+/* Flush a specified range of user mapping. On the Alpha we flush
+ the whole user tlb. */
+static inline void
+flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ flush_tlb_mm(vma->vm_mm);
+}
+
+#else /* CONFIG_SMP */
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *);
+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+extern void flush_tlb_range(struct vm_area_struct *, unsigned long,
+ unsigned long);
+
+#endif /* CONFIG_SMP */
+
+#endif /* _ALPHA_TLBFLUSH_H */
diff --git a/include/asm-i386/cacheflush.h b/include/asm-i386/cacheflush.h
new file mode 100644
index 000000000000..58d027dfc5ff
--- /dev/null
+++ b/include/asm-i386/cacheflush.h
@@ -0,0 +1,18 @@
+#ifndef _I386_CACHEFLUSH_H
+#define _I386_CACHEFLUSH_H
+
+/* Keep includes the same across arches. */
+#include <linux/mm.h>
+
+/* Caches aren't brain-dead on the intel. */
+#define flush_cache_all() do { } while (0)
+#define flush_cache_mm(mm) do { } while (0)
+#define flush_cache_range(vma, start, end) do { } while (0)
+#define flush_cache_page(vma, vmaddr) do { } while (0)
+#define flush_page_to_ram(page) do { } while (0)
+#define flush_dcache_page(page) do { } while (0)
+#define flush_icache_range(start, end) do { } while (0)
+#define flush_icache_page(vma,pg) do { } while (0)
+#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
+
+#endif /* _I386_CACHEFLUSH_H */
diff --git a/include/asm-i386/pgalloc.h b/include/asm-i386/pgalloc.h
index 0af21645a1f1..b078cdd4adaa 100644
--- a/include/asm-i386/pgalloc.h
+++ b/include/asm-i386/pgalloc.h
@@ -5,7 +5,6 @@
#include <asm/processor.h>
#include <asm/fixmap.h>
#include <linux/threads.h>
-#include <linux/highmem.h>
#define pmd_populate_kernel(mm, pmd, pte) \
set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
@@ -20,109 +19,11 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *p
* Allocate and free page tables.
*/
-#if defined (CONFIG_X86_PAE)
-/*
- * We can't include <linux/slab.h> here, thus these uglinesses.
- */
-struct kmem_cache_s;
-
-extern struct kmem_cache_s *pae_pgd_cachep;
-extern void *kmem_cache_alloc(struct kmem_cache_s *, int);
-extern void kmem_cache_free(struct kmem_cache_s *, void *);
-
-
-static inline pgd_t *pgd_alloc(struct mm_struct *mm)
-{
- int i;
- pgd_t *pgd = kmem_cache_alloc(pae_pgd_cachep, GFP_KERNEL);
-
- if (pgd) {
- for (i = 0; i < USER_PTRS_PER_PGD; i++) {
- unsigned long pmd = __get_free_page(GFP_KERNEL);
- if (!pmd)
- goto out_oom;
- clear_page(pmd);
- set_pgd(pgd + i, __pgd(1 + __pa(pmd)));
- }
- memcpy(pgd + USER_PTRS_PER_PGD,
- swapper_pg_dir + USER_PTRS_PER_PGD,
- (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
- }
- return pgd;
-out_oom:
- for (i--; i >= 0; i--)
- free_page((unsigned long)__va(pgd_val(pgd[i])-1));
- kmem_cache_free(pae_pgd_cachep, pgd);
- return NULL;
-}
-
-#else
+extern pgd_t *pgd_alloc(struct mm_struct *);
+extern void pgd_free(pgd_t *pgd);
-static inline pgd_t *pgd_alloc(struct mm_struct *mm)
-{
- pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
-
- if (pgd) {
- memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
- memcpy(pgd + USER_PTRS_PER_PGD,
- swapper_pg_dir + USER_PTRS_PER_PGD,
- (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
- }
- return pgd;
-}
-
-#endif /* CONFIG_X86_PAE */
-
-static inline void pgd_free(pgd_t *pgd)
-{
-#if defined(CONFIG_X86_PAE)
- int i;
-
- for (i = 0; i < USER_PTRS_PER_PGD; i++)
- free_page((unsigned long)__va(pgd_val(pgd[i])-1));
- kmem_cache_free(pae_pgd_cachep, pgd);
-#else
- free_page((unsigned long)pgd);
-#endif
-}
-
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
-{
- int count = 0;
- pte_t *pte;
-
- do {
- pte = (pte_t *) __get_free_page(GFP_KERNEL);
- if (pte)
- clear_page(pte);
- else {
- current->state = TASK_UNINTERRUPTIBLE;
- schedule_timeout(HZ);
- }
- } while (!pte && (count++ < 10));
- return pte;
-}
-
-static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
-{
- int count = 0;
- struct page *pte;
-
- do {
-#if CONFIG_HIGHPTE
- pte = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, 0);
-#else
- pte = alloc_pages(GFP_KERNEL, 0);
-#endif
- if (pte)
- clear_highpage(pte);
- else {
- current->state = TASK_UNINTERRUPTIBLE;
- schedule_timeout(HZ);
- }
- } while (!pte && (count++ < 10));
- return pte;
-}
+extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
+extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
static inline void pte_free_kernel(pte_t *pte)
{
@@ -144,85 +45,6 @@ static inline void pte_free(struct page *pte)
#define pmd_free(x) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG()
-/*
- * TLB flushing:
- *
- * - flush_tlb() flushes the current mm struct TLBs
- * - flush_tlb_all() flushes all processes TLBs
- * - flush_tlb_mm(mm) flushes the specified mm context TLB's
- * - flush_tlb_page(vma, vmaddr) flushes one page
- * - flush_tlb_range(vma, start, end) flushes a range of pages
- * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
- *
- * ..but the i386 has somewhat limited tlb flushing capabilities,
- * and page-granular flushes are available only on i486 and up.
- */
-
-#ifndef CONFIG_SMP
-
-#define flush_tlb() __flush_tlb()
-#define flush_tlb_all() __flush_tlb_all()
-#define local_flush_tlb() __flush_tlb()
-
-static inline void flush_tlb_mm(struct mm_struct *mm)
-{
- if (mm == current->active_mm)
- __flush_tlb();
-}
-
-static inline void flush_tlb_page(struct vm_area_struct *vma,
- unsigned long addr)
-{
- if (vma->vm_mm == current->active_mm)
- __flush_tlb_one(addr);
-}
-
-static inline void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
-{
- if (vma->vm_mm == current->active_mm)
- __flush_tlb();
-}
-
-#else
-
-#include <asm/smp.h>
-
-#define local_flush_tlb() \
- __flush_tlb()
-
-extern void flush_tlb_all(void);
-extern void flush_tlb_current_task(void);
-extern void flush_tlb_mm(struct mm_struct *);
-extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
-
-#define flush_tlb() flush_tlb_current_task()
-
-static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
-{
- flush_tlb_mm(vma->vm_mm);
-}
-
-#define TLBSTATE_OK 1
-#define TLBSTATE_LAZY 2
-
-struct tlb_state
-{
- struct mm_struct *active_mm;
- int state;
- char __cacheline_padding[24];
-};
-extern struct tlb_state cpu_tlbstate[NR_CPUS];
-
-
-#endif
-
-static inline void flush_tlb_pgtables(struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- /* i386 does not keep any page table caches in TLB */
-}
-
#define check_pgt_cache() do { } while (0)
#endif /* _I386_PGALLOC_H */
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index 1bf46187be83..354e45ca86af 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -24,71 +24,6 @@
extern pgd_t swapper_pg_dir[1024];
extern void paging_init(void);
-/* Caches aren't brain-dead on the intel. */
-#define flush_cache_all() do { } while (0)
-#define flush_cache_mm(mm) do { } while (0)
-#define flush_cache_range(vma, start, end) do { } while (0)
-#define flush_cache_page(vma, vmaddr) do { } while (0)
-#define flush_page_to_ram(page) do { } while (0)
-#define flush_dcache_page(page) do { } while (0)
-#define flush_icache_range(start, end) do { } while (0)
-#define flush_icache_page(vma,pg) do { } while (0)
-#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
-
-#define __flush_tlb() \
- do { \
- unsigned int tmpreg; \
- \
- __asm__ __volatile__( \
- "movl %%cr3, %0; # flush TLB \n" \
- "movl %0, %%cr3; \n" \
- : "=r" (tmpreg) \
- :: "memory"); \
- } while (0)
-
-/*
- * Global pages have to be flushed a bit differently. Not a real
- * performance problem because this does not happen often.
- */
-#define __flush_tlb_global() \
- do { \
- unsigned int tmpreg; \
- \
- __asm__ __volatile__( \
- "movl %1, %%cr4; # turn off PGE \n" \
- "movl %%cr3, %0; # flush TLB \n" \
- "movl %0, %%cr3; \n" \
- "movl %2, %%cr4; # turn PGE back on \n" \
- : "=&r" (tmpreg) \
- : "r" (mmu_cr4_features & ~X86_CR4_PGE), \
- "r" (mmu_cr4_features) \
- : "memory"); \
- } while (0)
-
-extern unsigned long pgkern_mask;
-
-/*
- * Do not check the PGE bit unnecesserily if this is a PPro+ kernel.
- */
-#ifdef CONFIG_X86_PGE
-# define __flush_tlb_all() __flush_tlb_global()
-#else
-# define __flush_tlb_all() \
- do { \
- if (cpu_has_pge) \
- __flush_tlb_global(); \
- else \
- __flush_tlb(); \
- } while (0)
-#endif
-
-#ifndef CONFIG_X86_INVLPG
-#define __flush_tlb_one(addr) __flush_tlb()
-#else
-#define __flush_tlb_one(addr) \
-__asm__ __volatile__("invlpg %0": :"m" (*(char *) addr))
-#endif
-
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
diff --git a/include/asm-i386/tlbflush.h b/include/asm-i386/tlbflush.h
new file mode 100644
index 000000000000..f05abd2f437a
--- /dev/null
+++ b/include/asm-i386/tlbflush.h
@@ -0,0 +1,141 @@
+#ifndef _I386_TLBFLUSH_H
+#define _I386_TLBFLUSH_H
+
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <asm/processor.h>
+
+#define __flush_tlb() \
+ do { \
+ unsigned int tmpreg; \
+ \
+ __asm__ __volatile__( \
+ "movl %%cr3, %0; # flush TLB \n" \
+ "movl %0, %%cr3; \n" \
+ : "=r" (tmpreg) \
+ :: "memory"); \
+ } while (0)
+
+/*
+ * Global pages have to be flushed a bit differently. Not a real
+ * performance problem because this does not happen often.
+ */
+#define __flush_tlb_global() \
+ do { \
+ unsigned int tmpreg; \
+ \
+ __asm__ __volatile__( \
+ "movl %1, %%cr4; # turn off PGE \n" \
+ "movl %%cr3, %0; # flush TLB \n" \
+ "movl %0, %%cr3; \n" \
+ "movl %2, %%cr4; # turn PGE back on \n" \
+ : "=&r" (tmpreg) \
+ : "r" (mmu_cr4_features & ~X86_CR4_PGE), \
+ "r" (mmu_cr4_features) \
+ : "memory"); \
+ } while (0)
+
+extern unsigned long pgkern_mask;
+
+/*
+ * Do not check the PGE bit unnecesserily if this is a PPro+ kernel.
+ */
+#ifdef CONFIG_X86_PGE
+# define __flush_tlb_all() __flush_tlb_global()
+#else
+# define __flush_tlb_all() \
+ do { \
+ if (cpu_has_pge) \
+ __flush_tlb_global(); \
+ else \
+ __flush_tlb(); \
+ } while (0)
+#endif
+
+#ifndef CONFIG_X86_INVLPG
+#define __flush_tlb_one(addr) __flush_tlb()
+#else
+#define __flush_tlb_one(addr) \
+__asm__ __volatile__("invlpg %0": :"m" (*(char *) addr))
+#endif
+
+/*
+ * TLB flushing:
+ *
+ * - flush_tlb() flushes the current mm struct TLBs
+ * - flush_tlb_all() flushes all processes TLBs
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ * - flush_tlb_page(vma, vmaddr) flushes one page
+ * - flush_tlb_range(vma, start, end) flushes a range of pages
+ * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
+ *
+ * ..but the i386 has somewhat limited tlb flushing capabilities,
+ * and page-granular flushes are available only on i486 and up.
+ */
+
+#ifndef CONFIG_SMP
+
+#define flush_tlb() __flush_tlb()
+#define flush_tlb_all() __flush_tlb_all()
+#define local_flush_tlb() __flush_tlb()
+
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+ if (mm == current->active_mm)
+ __flush_tlb();
+}
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ if (vma->vm_mm == current->active_mm)
+ __flush_tlb_one(addr);
+}
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ if (vma->vm_mm == current->active_mm)
+ __flush_tlb();
+}
+
+#else
+
+#include <asm/smp.h>
+
+#define local_flush_tlb() \
+ __flush_tlb()
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_current_task(void);
+extern void flush_tlb_mm(struct mm_struct *);
+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+
+#define flush_tlb() flush_tlb_current_task()
+
+static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
+{
+ flush_tlb_mm(vma->vm_mm);
+}
+
+#define TLBSTATE_OK 1
+#define TLBSTATE_LAZY 2
+
+struct tlb_state
+{
+ struct mm_struct *active_mm;
+ int state;
+ char __cacheline_padding[24];
+};
+extern struct tlb_state cpu_tlbstate[NR_CPUS];
+
+
+#endif
+
+static inline void flush_tlb_pgtables(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ /* i386 does not keep any page table caches in TLB */
+}
+
+#endif /* _I386_TLBFLUSH_H */
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index bffa75c6ee5f..abe6fde56849 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -4,6 +4,7 @@
#include <linux/config.h>
#include <linux/bio.h>
#include <linux/fs.h>
+#include <asm/cacheflush.h>
#ifdef CONFIG_HIGHMEM