diff options
| author | Paul Mackerras <paulus@nanango.paulus.ozlabs.org> | 2002-04-11 16:49:37 +1000 |
|---|---|---|
| committer | Paul Mackerras <paulus@nanango.paulus.ozlabs.org> | 2002-04-11 16:49:37 +1000 |
| commit | c7ad40cc2b77557310ceb5a2a7bc000713a80950 (patch) | |
| tree | 793f565815ea67a9c7287b2b1da6a3146ab283ac /include | |
| parent | 22e962f9b7a7abbc2d17ceaf3917bb8e67b68a8f (diff) | |
Add flush_tlb_kernel_range for PPC and clean up the PPC tlb
flushing code a little.
Diffstat (limited to 'include')
| -rw-r--r-- | include/asm-ppc/tlbflush.h | 36 |
1 files changed, 19 insertions, 17 deletions
diff --git a/include/asm-ppc/tlbflush.h b/include/asm-ppc/tlbflush.h index be5fe5b98d2c..8d589d1e3a05 100644 --- a/include/asm-ppc/tlbflush.h +++ b/include/asm-ppc/tlbflush.h @@ -23,41 +23,48 @@ extern void _tlbia(void); #if defined(CONFIG_4xx) #define __tlbia() asm volatile ("tlbia; sync" : : : "memory") -static inline void local_flush_tlb_all(void) +static inline void flush_tlb_all(void) { __tlbia(); } -static inline void local_flush_tlb_mm(struct mm_struct *mm) +static inline void flush_tlb_mm(struct mm_struct *mm) { __tlbia(); } -static inline void local_flush_tlb_page(struct vm_area_struct *vma, +static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { _tlbie(vmaddr); } -static inline void local_flush_tlb_range(struct mm_struct *mm, +static inline void flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end) { __tlbia(); } +static inline void flush_tlb_kernel_range(unsigned long start, + unsigned long end) + { __tlbia(); } #define update_mmu_cache(vma, addr, pte) do { } while (0) #elif defined(CONFIG_8xx) #define __tlbia() asm volatile ("tlbia; sync" : : : "memory") -static inline void local_flush_tlb_all(void) +static inline void flush_tlb_all(void) { __tlbia(); } -static inline void local_flush_tlb_mm(struct mm_struct *mm) +static inline void flush_tlb_mm(struct mm_struct *mm) { __tlbia(); } -static inline void local_flush_tlb_page(struct vm_area_struct *vma, +static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { _tlbie(vmaddr); } -static inline void local_flush_tlb_range(struct mm_struct *mm, +static inline void flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end) { __tlbia(); } +static inline void flush_tlb_kernel_range(unsigned long start, + unsigned long end) + { __tlbia(); } #define update_mmu_cache(vma, addr, pte) do { } while (0) #else /* 6xx, 7xx, 7xxx cpus */ struct mm_struct; struct vm_area_struct; -extern void local_flush_tlb_all(void); -extern void local_flush_tlb_mm(struct mm_struct *mm); -extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); -extern void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, +extern void flush_tlb_all(void); +extern void flush_tlb_mm(struct mm_struct *mm); +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); /* * This gets called at the end of handling a page fault, when @@ -69,11 +76,6 @@ extern void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long star extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); #endif -#define flush_tlb_all local_flush_tlb_all -#define flush_tlb_mm local_flush_tlb_mm -#define flush_tlb_page local_flush_tlb_page -#define flush_tlb_range local_flush_tlb_range - /* * This is called in munmap when we have freed up some page-table * pages. We don't need to do anything here, there's nothing special |
