diff options
| author | Russell King <rmk@flint.arm.linux.org.uk> | 2004-10-29 18:34:00 +0100 |
|---|---|---|
| committer | Russell King <rmk@flint.arm.linux.org.uk> | 2004-10-29 18:34:00 +0100 |
| commit | e2c6ee0ae62f5a61bca0ddfddb173e2e8238ffc7 (patch) | |
| tree | 8bd47c78cab92b132a7c3e1a57bf54cb74873e55 /include/asm-arm/cacheflush.h | |
| parent | a6a4b52e27f3efecdb87024f53108fdac4493d2a (diff) | |
[ARM] Use cpu_vm_mask to determine whether to flush TLB/caches.
Since bit 0 is only set when the MM is mapped onto the CPU, we can use
this rather than comparing the MM pointer with current->active_mm.
This simplifies the inline cache flushing and tlb code.
Diffstat (limited to 'include/asm-arm/cacheflush.h')
| -rw-r--r-- | include/asm-arm/cacheflush.h | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/include/asm-arm/cacheflush.h b/include/asm-arm/cacheflush.h index 250d09ca5619..c70f6de1d2ba 100644 --- a/include/asm-arm/cacheflush.h +++ b/include/asm-arm/cacheflush.h @@ -256,14 +256,14 @@ extern void dmac_flush_range(unsigned long, unsigned long); static inline void flush_cache_mm(struct mm_struct *mm) { - if (current->active_mm == mm) + if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) __cpuc_flush_user_all(); } static inline void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - if (current->active_mm == vma->vm_mm) + if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), vma->vm_flags); } @@ -271,7 +271,7 @@ flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long static inline void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr) { - if (current->active_mm == vma->vm_mm) { + if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { unsigned long addr = user_addr & PAGE_MASK; __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); } |
