diff options
| author | Russell King <rmk@flint.arm.linux.org.uk> | 2002-04-17 21:47:54 +0100 |
|---|---|---|
| committer | Russell King <rmk@flint.arm.linux.org.uk> | 2002-04-17 21:47:54 +0100 |
| commit | 0822977d13a85a0e72b02a5d6bf1fc2b2df31b60 (patch) | |
| tree | cbe226d658bec13f8b8a46cbd840c15165c89078 | |
| parent | f582a9c3eafd3cfcfb8a49e4dc72be1e277baf9e (diff) | |
2.5.8 ARM updates:
- preempt updates
- build fixes
- new tlb flush macro
- add asm/cacheflush.h and asm/tlbflush.h
| -rw-r--r-- | arch/arm/kernel/dma.c | 1 | ||||
| -rw-r--r-- | arch/arm/kernel/entry-armv.S | 15 | ||||
| -rw-r--r-- | arch/arm/kernel/entry-common.S | 3 | ||||
| -rw-r--r-- | arch/arm/kernel/irq.c | 1 | ||||
| -rw-r--r-- | arch/arm/kernel/process.c | 2 | ||||
| -rw-r--r-- | arch/arm/kernel/semaphore.c | 1 | ||||
| -rw-r--r-- | arch/arm/kernel/signal.c | 4 | ||||
| -rw-r--r-- | arch/arm/kernel/time.c | 1 | ||||
| -rw-r--r-- | arch/arm/mm/minicache.c | 2 | ||||
| -rw-r--r-- | arch/arm/mm/proc-xscale.S | 2 | ||||
| -rw-r--r-- | arch/arm/mm/tlb-v3.S | 2 | ||||
| -rw-r--r-- | arch/arm/mm/tlb-v4.S | 24 | ||||
| -rw-r--r-- | arch/arm/mm/tlb-v4wb.S | 59 | ||||
| -rw-r--r-- | arch/arm/nwfpe/fpmodule.c | 1 | ||||
| -rw-r--r-- | include/asm-arm/cacheflush.h | 15 | ||||
| -rw-r--r-- | include/asm-arm/io.h | 2 | ||||
| -rw-r--r-- | include/asm-arm/pgalloc.h | 5 | ||||
| -rw-r--r-- | include/asm-arm/proc-armo/cache.h | 63 | ||||
| -rw-r--r-- | include/asm-arm/proc-armo/tlbflush.h | 63 | ||||
| -rw-r--r-- | include/asm-arm/proc-armv/cache.h | 105 | ||||
| -rw-r--r-- | include/asm-arm/proc-armv/tlbflush.h | 125 | ||||
| -rw-r--r-- | include/asm-arm/thread_info.h | 2 | ||||
| -rw-r--r-- | include/asm-arm/tlbflush.h | 15 |
23 files changed, 323 insertions, 190 deletions
diff --git a/arch/arm/kernel/dma.c b/arch/arm/kernel/dma.c index c6ea827fb70e..c5a4c6a09dab 100644 --- a/arch/arm/kernel/dma.c +++ b/arch/arm/kernel/dma.c @@ -17,6 +17,7 @@ #include <linux/mman.h> #include <linux/init.h> #include <linux/spinlock.h> +#include <linux/errno.h> #include <asm/dma.h> diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 0b799eb31163..ba2ed5015126 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -756,7 +756,6 @@ __irq_svc: sub sp, sp, #S_FRAME_SIZE #ifdef CONFIG_PREEMPT ldr r0, [r8, #TI_FLAGS] @ get flags tst r0, #_TIF_NEED_RESCHED - ldrne r6, .LCirq_stat blne svc_preempt preempt_return: ldr r0, [r8, #TI_PREEMPT] @ read preempt value @@ -770,20 +769,20 @@ preempt_return: #ifdef CONFIG_PREEMPT svc_preempt: teq r9, #0 @ was preempt count = 0 + ldreq r6, .LCirq_stat movne pc, lr @ no ldr r0, [r6, #4] @ local_irq_count ldr r1, [r6, #8] @ local_bh_count adds r0, r0, r1 movne pc, lr - ldr r1, [r8, #TI_TASK] - set_cpsr_c r2, #MODE_SVC @ enable IRQs - str r0, [r1, #0] @ current->state = TASK_RUNNING -1: bl SYMBOL_NAME(schedule) + mov r7, #PREEMPT_ACTIVE + str r7, [r8, #TI_PREEMPT] @ set PREEMPT_ACTIVE +1: set_cpsr_c r2, #MODE_SVC @ enable IRQs + bl SYMBOL_NAME(schedule) set_cpsr_c r0, #PSR_I_BIT | MODE_SVC @ disable IRQs - ldr r0, [r8, #TI_FLAGS] + ldr r0, [r8, #TI_FLAGS] @ get new tasks TI_FLAGS tst r0, #_TIF_NEED_RESCHED - beq preempt_return - set_cpsr_c r0, #MODE_SVC @ enable IRQs + beq preempt_return @ go again b 1b #endif diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 8d1566adbcbf..546d7554b342 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -76,6 +76,9 @@ __do_notify_resume: * This is how we return from a fork. */ ENTRY(ret_from_fork) +#ifdef CONFIG_PREEMPT + bl schedule_tail +#endif get_thread_info tsk ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing mov why, #1 diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index e98c895953ad..345b69e84e92 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -29,6 +29,7 @@ #include <linux/smp.h> #include <linux/init.h> #include <linux/seq_file.h> +#include <linux/errno.h> #include <asm/irq.h> #include <asm/system.h> diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 1118e5301200..d073d21127fc 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -73,7 +73,7 @@ void (*pm_power_off)(void); * This is our default idle handler. We need to disable * interrupts here to ensure we don't miss a wakeup call. */ -static void default_idle(void) +void default_idle(void) { __cli(); if (!need_resched() && !hlt_counter) diff --git a/arch/arm/kernel/semaphore.c b/arch/arm/kernel/semaphore.c index 19aa6e9222af..2ac3faa7b364 100644 --- a/arch/arm/kernel/semaphore.c +++ b/arch/arm/kernel/semaphore.c @@ -13,6 +13,7 @@ */ #include <linux/config.h> #include <linux/sched.h> +#include <linux/errno.h> #include <asm/semaphore.h> diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 08e9740fd56c..b90df71485d0 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -628,14 +628,12 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall) case SIGSTOP: { struct signal_struct *sig; + current->state = TASK_STOPPED; current->exit_code = signr; sig = current->parent->sig; - preempt_disable(); - current->state = TASK_STOPPED; if (sig && !(sig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) notify_parent(current, SIGCHLD); schedule(); - preempt_enable(); continue; } diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index 39b50b5a7018..7c7e03c5b6e9 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c @@ -25,6 +25,7 @@ #include <linux/init.h> #include <linux/smp.h> #include <linux/timex.h> +#include <linux/errno.h> #include <asm/hardware.h> #include <asm/io.h> diff --git a/arch/arm/mm/minicache.c b/arch/arm/mm/minicache.c index 7f20b9d0ea85..9529c4b27ad6 100644 --- a/arch/arm/mm/minicache.c +++ b/arch/arm/mm/minicache.c @@ -44,7 +44,7 @@ static pte_t *minicache_pte; unsigned long map_page_minicache(unsigned long virt) { set_pte(minicache_pte, mk_pte_phys(__pa(virt), minicache_pgprot)); - flush_kern_tlb_page(minicache_address); + flush_tlb_kernel_page(minicache_address); return minicache_address; } diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index d3b439b6816a..67cb6ae4df40 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -723,7 +723,6 @@ ENTRY(xscale_processor_functions) .word cpu_xscale_set_pgd .word cpu_xscale_set_pmd .word cpu_xscale_set_pte - .size xscale_processor_functions, . - xscale_processor_functions .type cpu_80200_info, #object @@ -779,6 +778,5 @@ __pxa250_proc_info: .long xscale_processor_functions .long v4wbi_tlb_fns .long v5te_mc_user_fns - .size __cotulla_proc_info, . - __cotulla_proc_info .size __pxa250_proc_info, . - __pxa250_proc_info diff --git a/arch/arm/mm/tlb-v3.S b/arch/arm/mm/tlb-v3.S index 0d48947b8354..270108bb40c0 100644 --- a/arch/arm/mm/tlb-v3.S +++ b/arch/arm/mm/tlb-v3.S @@ -53,6 +53,7 @@ ENTRY(v3_flush_user_tlb_range) act_mm r3 @ get current->active_mm teq r2, r3 @ == mm ? movne pc, lr @ no, we dont do anything +ENTRY(v3_flush_kern_tlb_range) bic r0, r0, #0x0ff bic r0, r0, #0xf00 1: mcr p15, 0, r0, c6, c0, 0 @ invalidate TLB entry @@ -87,5 +88,6 @@ ENTRY(v3_tlb_fns) .long v3_flush_user_tlb_mm .long v3_flush_user_tlb_range .long v3_flush_user_tlb_page + .long v3_flush_kern_tlb_range .long v3_flush_kern_tlb_page .size v3_tlb_fns, . - v3_tlb_fns diff --git a/arch/arm/mm/tlb-v4.S b/arch/arm/mm/tlb-v4.S index 294059991d3e..d697d1f09b3b 100644 --- a/arch/arm/mm/tlb-v4.S +++ b/arch/arm/mm/tlb-v4.S @@ -42,7 +42,7 @@ ENTRY(v4_flush_kern_tlb_all) /* * v4_flush_user_tlb_range(start, end, mm) * - * Invalidate a range of TLB entries in the specified address space. + * Invalidate a range of TLB entries in the specified user address space. * * - start - range start address * - end - range end address @@ -86,6 +86,27 @@ ENTRY(v4_flush_user_tlb_page) mov pc, lr /* + * v4_flush_kerm_tlb_range(start, end) + * + * Invalidate a range of TLB entries in the specified kernel + * address range. + * + * - start - virtual address (may not be aligned) + * - end - virtual address (may not be aligned) + */ + .align 5 +ENTRY(v4_flush_kern_tlb_range) + bic r0, r0, #0x0ff + bic r0, r0, #0xf00 +1: mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry + add r0, r0, #PAGE_SZ + cmp r0, r1 + blo 1b + mcr p15, 0, r3, c8, c5, 0 @ invalidate I TLB + mov pc, lr + + +/* * v4_flush_kern_tlb_page(kaddr) * * Invalidate the TLB entry for the specified page. The address @@ -106,5 +127,6 @@ ENTRY(v4_tlb_fns) .long v4_flush_user_tlb_mm .long v4_flush_user_tlb_range .long v4_flush_user_tlb_page + .long v4_flush_kern_tlb_range .long v4_flush_kern_tlb_page .size v4_tlb_fns, . - v4_tlb_fns diff --git a/arch/arm/mm/tlb-v4wb.S b/arch/arm/mm/tlb-v4wb.S index 3cc408421c40..3cdca44fcfb2 100644 --- a/arch/arm/mm/tlb-v4wb.S +++ b/arch/arm/mm/tlb-v4wb.S @@ -88,7 +88,41 @@ ENTRY(v4wb_flush_user_tlb_page) mcr p15, 0, r3, c7, c10, 4 @ drain WB tst r2, #VM_EXEC mcrne p15, 0, r3, c8, c5, 0 @ invalidate I TLB + mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry + mov pc, lr + +/* + * v4_flush_kerm_tlb_range(start, end) + * + * Invalidate a range of TLB entries in the specified kernel + * address range. + * + * - start - virtual address (may not be aligned) + * - end - virtual address (may not be aligned) + */ +ENTRY(v4wb_flush_kern_tlb_range) + mov r3, #0 + mcr p15, 0, r3, c7, c10, 4 @ drain WB + bic r0, r0, #0x0ff + bic r0, r0, #0xf00 + mcr p15, 0, r3, c8, c5, 0 @ invalidate I TLB +1: mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry + add r0, r0, #PAGE_SZ + cmp r0, r1 + blo 1b + mov pc, lr + +/* + * v4_flush_kern_tlb_page(kaddr) + * + * Invalidate the TLB entry for the specified page. The address + * will be in the kernels virtual memory space. Current uses + * only require the D-TLB to be invalidated. + * + * - kaddr - Kernel virtual memory address + */ ENTRY(v4wb_flush_kern_tlb_page) + mcr p15, 0, r3, c8, c5, 0 @ invalidate I TLB mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry mov pc, lr @@ -107,14 +141,17 @@ ENTRY(v4wb_flush_kern_tlb_page) */ .align 5 ENTRY(v4wbi_flush_user_tlb_range) + vma_vm_mm ip, r2 act_mm r3 @ get current->active_mm - teq r2, r3 @ == mm ? + eors r3, ip, r3 @ == mm ? movne pc, lr @ no, we dont do anything mov r3, #0 mcr p15, 0, r3, c7, c10, 4 @ drain WB + vma_vm_flags r2, r2 bic r0, r0, #0x0ff bic r0, r0, #0xf00 -1: mcr p15, 0, r0, c8, c5, 1 @ invalidate I TLB entry +1: tst r2, #VM_EXEC + mcrne p15, 0, r0, c8, c5, 1 @ invalidate I TLB entry mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry add r0, r0, #PAGE_SZ cmp r0, r1 @@ -140,7 +177,23 @@ ENTRY(v4wbi_flush_user_tlb_page) mcr p15, 0, r3, c7, c10, 4 @ drain WB tst r2, #VM_EXEC mcrne p15, 0, r0, c8, c5, 1 @ invalidate I TLB entry + mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry + mov pc, lr + +ENTRY(v4wbi_flush_kern_tlb_range) + mov r3, #0 + mcr p15, 0, r3, c7, c10, 4 @ drain WB + bic r0, r0, #0x0ff + bic r0, r0, #0xf00 +1: mcr p15, 0, r0, c8, c5, 1 @ invalidate I TLB entry + mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry + add r0, r0, #PAGE_SZ + cmp r0, r1 + blo 1b + mov pc, lr + ENTRY(v4wbi_flush_kern_tlb_page) + mcr p15, 0, r0, c8, c5, 1 @ invalidate I TLB entry mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry mov pc, lr @@ -152,6 +205,7 @@ ENTRY(v4wb_tlb_fns) .long v4wb_flush_user_tlb_mm .long v4wb_flush_user_tlb_range .long v4wb_flush_user_tlb_page + .long v4wb_flush_kern_tlb_range .long v4wb_flush_kern_tlb_page .size v4wb_tlb_fns, . - v4wb_tlb_fns @@ -161,5 +215,6 @@ ENTRY(v4wbi_tlb_fns) .long v4wbi_flush_user_tlb_mm .long v4wbi_flush_user_tlb_range .long v4wbi_flush_user_tlb_page + .long v4wbi_flush_kern_tlb_range .long v4wbi_flush_kern_tlb_page .size v4wbi_tlb_fns, . - v4wbi_tlb_fns diff --git a/arch/arm/nwfpe/fpmodule.c b/arch/arm/nwfpe/fpmodule.c index d367cfe2b283..528fa710aa34 100644 --- a/arch/arm/nwfpe/fpmodule.c +++ b/arch/arm/nwfpe/fpmodule.c @@ -28,6 +28,7 @@ #include <linux/config.h> /* XXX */ +#include <linux/errno.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/signal.h> diff --git a/include/asm-arm/cacheflush.h b/include/asm-arm/cacheflush.h new file mode 100644 index 000000000000..7e1e15320598 --- /dev/null +++ b/include/asm-arm/cacheflush.h @@ -0,0 +1,15 @@ +/* + * linux/include/asm-arm/cacheflush.h + * + * Copyright (C) 2000-2002 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASMARM_CACHEFLUSH_H +#define _ASMARM_CACHEFLUSH_H + +#include <asm/proc/cache.h> + +#endif diff --git a/include/asm-arm/io.h b/include/asm-arm/io.h index d527c3c568c5..d12af77640aa 100644 --- a/include/asm-arm/io.h +++ b/include/asm-arm/io.h @@ -272,7 +272,7 @@ extern void consistent_sync(void *vaddr, size_t size, int rw); /* * Change "struct page" to physical address. */ -#ifdef CONFIG_DISCONTIG +#ifdef CONFIG_DISCONTIGMEM #define page_to_phys(page) \ ((((page) - page_zone(page)->zone_mem_map) << PAGE_SHIFT) \ + page_zone(page)->zone_start_paddr) diff --git a/include/asm-arm/pgalloc.h b/include/asm-arm/pgalloc.h index 18c0c5354f9c..f0e2c9f5393d 100644 --- a/include/asm-arm/pgalloc.h +++ b/include/asm-arm/pgalloc.h @@ -10,11 +10,10 @@ #ifndef _ASMARM_PGALLOC_H #define _ASMARM_PGALLOC_H -#include <linux/config.h> - #include <asm/processor.h> +#include <asm/cacheflush.h> +#include <asm/tlbflush.h> -#include <asm/proc/cache.h> #include <asm/proc/pgalloc.h> /* diff --git a/include/asm-arm/proc-armo/cache.h b/include/asm-arm/proc-armo/cache.h index 3d87ca3842b6..1ed553bc0aa8 100644 --- a/include/asm-arm/proc-armo/cache.h +++ b/include/asm-arm/proc-armo/cache.h @@ -27,66 +27,3 @@ /* DAG: ARM3 will flush cache on MEMC updates anyway? so don't bother */ #define clean_cache_area(_start,_size) do { } while (0) - -/* - * TLB flushing: - * - * - flush_tlb_all() flushes all processes TLBs - * - flush_tlb_mm(mm) flushes the specified mm context TLB's - * - flush_tlb_page(vma, vmaddr) flushes one page - * - flush_tlb_range(vma, start, end) flushes a range of pages - */ -#define flush_tlb_all() memc_update_all() -#define flush_tlb_mm(mm) memc_update_mm(mm) -#define flush_tlb_range(vma,start,end) \ - do { memc_update_mm(vma->vm_mm); (void)(start); (void)(end); } while (0) -#define flush_tlb_page(vma, vmaddr) do { } while (0) - -/* - * The following handle the weird MEMC chip - */ -static inline void memc_update_all(void) -{ - struct task_struct *p; - - cpu_memc_update_all(init_mm.pgd); - for_each_task(p) { - if (!p->mm) - continue; - cpu_memc_update_all(p->mm->pgd); - } - processor._set_pgd(current->active_mm->pgd); -} - -static inline void memc_update_mm(struct mm_struct *mm) -{ - cpu_memc_update_all(mm->pgd); - - if (mm == current->active_mm) - processor._set_pgd(mm->pgd); -} - -static inline void -memc_clear(struct mm_struct *mm, struct page *page) -{ - cpu_memc_update_entry(mm->pgd, (unsigned long) page_address(page), 0); - - if (mm == current->active_mm) - processor._set_pgd(mm->pgd); -} - -static inline void -memc_update_addr(struct mm_struct *mm, pte_t pte, unsigned long vaddr) -{ - cpu_memc_update_entry(mm->pgd, pte_val(pte), vaddr); - - if (mm == current->active_mm) - processor._set_pgd(mm->pgd); -} - -static inline void -update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) -{ - struct mm_struct *mm = vma->vm_mm; - memc_update_addr(mm, pte, addr); -} diff --git a/include/asm-arm/proc-armo/tlbflush.h b/include/asm-arm/proc-armo/tlbflush.h new file mode 100644 index 000000000000..f10e5b66b596 --- /dev/null +++ b/include/asm-arm/proc-armo/tlbflush.h @@ -0,0 +1,63 @@ +/* + * TLB flushing: + * + * - flush_tlb_all() flushes all processes TLBs + * - flush_tlb_mm(mm) flushes the specified mm context TLB's + * - flush_tlb_page(vma, vmaddr) flushes one page + * - flush_tlb_range(vma, start, end) flushes a range of pages + */ +#define flush_tlb_all() memc_update_all() +#define flush_tlb_mm(mm) memc_update_mm(mm) +#define flush_tlb_range(vma,start,end) \ + do { memc_update_mm(vma->vm_mm); (void)(start); (void)(end); } while (0) +#define flush_tlb_page(vma, vmaddr) do { } while (0) + +/* + * The following handle the weird MEMC chip + */ +static inline void memc_update_all(void) +{ + struct task_struct *p; + + cpu_memc_update_all(init_mm.pgd); + for_each_task(p) { + if (!p->mm) + continue; + cpu_memc_update_all(p->mm->pgd); + } + processor._set_pgd(current->active_mm->pgd); +} + +static inline void memc_update_mm(struct mm_struct *mm) +{ + cpu_memc_update_all(mm->pgd); + + if (mm == current->active_mm) + processor._set_pgd(mm->pgd); +} + +static inline void +memc_clear(struct mm_struct *mm, struct page *page) +{ + cpu_memc_update_entry(mm->pgd, (unsigned long) page_address(page), 0); + + if (mm == current->active_mm) + processor._set_pgd(mm->pgd); +} + +static inline void +memc_update_addr(struct mm_struct *mm, pte_t pte, unsigned long vaddr) +{ + cpu_memc_update_entry(mm->pgd, pte_val(pte), vaddr); + + if (mm == current->active_mm) + processor._set_pgd(mm->pgd); +} + +static inline void +update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) +{ + struct mm_struct *mm = vma->vm_mm; + memc_update_addr(mm, pte, addr); +} + diff --git a/include/asm-arm/proc-armv/cache.h b/include/asm-arm/proc-armv/cache.h index 623e262d4166..fe678c2b0d7e 100644 --- a/include/asm-arm/proc-armv/cache.h +++ b/include/asm-arm/proc-armv/cache.h @@ -132,108 +132,3 @@ static inline void flush_dcache_page(struct page *page) do { \ cpu_icache_invalidate_range((_s), (_e)); \ } while (0) - -/* - * TLB Management - * ============== - * - * The arch/arm/mm/tlb-*.S files implement these methods. - * - * The TLB specific code is expected to perform whatever tests it - * needs to determine if it should invalidate the TLB for each - * call. Start addresses are inclusive and end addresses are - * exclusive; it is safe to round these addresses down. - * - * flush_tlb_all() - * - * Invalidate the entire TLB. - * - * flush_tlb_mm(mm) - * - * Invalidate all TLB entries in a particular address - * space. - * - mm - mm_struct describing address space - * - * flush_tlb_range(mm,start,end) - * - * Invalidate a range of TLB entries in the specified - * address space. - * - mm - mm_struct describing address space - * - start - start address (may not be aligned) - * - end - end address (exclusive, may not be aligned) - * - * flush_tlb_page(vaddr,vma) - * - * Invalidate the specified page in the specified address range. - * - vaddr - virtual address (may not be aligned) - * - vma - vma_struct describing address range - * - * flush_kern_tlb_page(kaddr) - * - * Invalidate the TLB entry for the specified page. The address - * will be in the kernels virtual memory space. Current uses - * only require the D-TLB to be invalidated. - * - kaddr - Kernel virtual memory address - */ - -struct cpu_tlb_fns { - void (*flush_kern_all)(void); - void (*flush_user_mm)(struct mm_struct *); - void (*flush_user_range)(unsigned long, unsigned long, struct vm_area_struct *); - void (*flush_user_page)(unsigned long, struct vm_area_struct *); - void (*flush_kern_page)(unsigned long); -}; - -/* - * Convert calls to our calling convention. - */ -#define flush_tlb_all() __cpu_flush_kern_tlb_all() -#define flush_tlb_mm(mm) __cpu_flush_user_tlb_mm(mm) -#define flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma) -#define flush_tlb_page(vma,vaddr) __cpu_flush_user_tlb_page(vaddr,vma) -#define flush_kern_tlb_page(kaddr) __cpu_flush_kern_tlb_page(kaddr) - -/* - * Now select the calling method - */ -#ifdef MULTI_TLB - -extern struct cpu_tlb_fns cpu_tlb; - -#define __cpu_flush_kern_tlb_all cpu_tlb.flush_kern_all -#define __cpu_flush_user_tlb_mm cpu_tlb.flush_user_mm -#define __cpu_flush_user_tlb_range cpu_tlb.flush_user_range -#define __cpu_flush_user_tlb_page cpu_tlb.flush_user_page -#define __cpu_flush_kern_tlb_page cpu_tlb.flush_kern_page - -#else - -#define __cpu_flush_kern_tlb_all __glue(_TLB,_flush_kern_tlb_all) -#define __cpu_flush_user_tlb_mm __glue(_TLB,_flush_user_tlb_mm) -#define __cpu_flush_user_tlb_range __glue(_TLB,_flush_user_tlb_range) -#define __cpu_flush_user_tlb_page __glue(_TLB,_flush_user_tlb_page) -#define __cpu_flush_kern_tlb_page __glue(_TLB,_flush_kern_tlb_page) - -extern void __cpu_flush_kern_tlb_all(void); -extern void __cpu_flush_user_tlb_mm(struct mm_struct *); -extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *); -extern void __cpu_flush_user_tlb_page(unsigned long, struct vm_area_struct *); -extern void __cpu_flush_kern_tlb_page(unsigned long); - -#endif - -/* - * if PG_dcache_dirty is set for the page, we need to ensure that any - * cache entries for the kernels virtual memory range are written - * back to the page. - */ -extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte); - -/* - * Old ARM MEMC stuff. This supports the reversed mapping handling that - * we have on the older 26-bit machines. We don't have a MEMC chip, so... - */ -#define memc_update_all() do { } while (0) -#define memc_update_mm(mm) do { } while (0) -#define memc_update_addr(mm,pte,log) do { } while (0) -#define memc_clear(mm,physaddr) do { } while (0) diff --git a/include/asm-arm/proc-armv/tlbflush.h b/include/asm-arm/proc-armv/tlbflush.h new file mode 100644 index 000000000000..d465e954ae13 --- /dev/null +++ b/include/asm-arm/proc-armv/tlbflush.h @@ -0,0 +1,125 @@ +/* + * linux/include/asm-arm/proc-armv/tlbflush.h + * + * Copyright (C) 1999-2002 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* + * TLB Management + * ============== + * + * The arch/arm/mm/tlb-*.S files implement these methods. + * + * The TLB specific code is expected to perform whatever tests it + * needs to determine if it should invalidate the TLB for each + * call. Start addresses are inclusive and end addresses are + * exclusive; it is safe to round these addresses down. + * + * flush_tlb_all() + * + * Invalidate the entire TLB. + * + * flush_tlb_mm(mm) + * + * Invalidate all TLB entries in a particular address + * space. + * - mm - mm_struct describing address space + * + * flush_tlb_range(mm,start,end) + * + * Invalidate a range of TLB entries in the specified + * address space. + * - mm - mm_struct describing address space + * - start - start address (may not be aligned) + * - end - end address (exclusive, may not be aligned) + * + * flush_tlb_page(vaddr,vma) + * + * Invalidate the specified page in the specified address range. + * - vaddr - virtual address (may not be aligned) + * - vma - vma_struct describing address range + * + * flush_kern_tlb_page(kaddr) + * + * Invalidate the TLB entry for the specified page. The address + * will be in the kernels virtual memory space. Current uses + * only require the D-TLB to be invalidated. + * - kaddr - Kernel virtual memory address + */ + +struct cpu_tlb_fns { + void (*flush_kern_all)(void); + void (*flush_user_mm)(struct mm_struct *); + void (*flush_user_range)(unsigned long, unsigned long, struct vm_area_struct *); + void (*flush_user_page)(unsigned long, struct vm_area_struct *); + void (*flush_kern_range)(unsigned long, unsigned long); + void (*flush_kern_page)(unsigned long); +}; + +/* + * Convert calls to our calling convention. + */ +#define flush_tlb_all() __cpu_flush_kern_tlb_all() +#define flush_tlb_mm(mm) __cpu_flush_user_tlb_mm(mm) +#define flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma) +#define flush_tlb_page(vma,vaddr) __cpu_flush_user_tlb_page(vaddr,vma) +#define flush_tlb_kernel_range(s,e) __cpu_flush_kern_tlb_range(s,e) +#define flush_tlb_kernel_page(kaddr) __cpu_flush_kern_tlb_page(kaddr) + +/* + * Now select the calling method + */ +#ifdef MULTI_TLB + +extern struct cpu_tlb_fns cpu_tlb; + +#define __cpu_flush_kern_tlb_all cpu_tlb.flush_kern_all +#define __cpu_flush_user_tlb_mm cpu_tlb.flush_user_mm +#define __cpu_flush_user_tlb_range cpu_tlb.flush_user_range +#define __cpu_flush_user_tlb_page cpu_tlb.flush_user_page +#define __cpu_flush_kern_tlb_range cpu_tlb.flush_kern_range +#define __cpu_flush_kern_tlb_page cpu_tlb.flush_kern_page + +#else + +#define __cpu_flush_kern_tlb_all __glue(_TLB,_flush_kern_tlb_all) +#define __cpu_flush_user_tlb_mm __glue(_TLB,_flush_user_tlb_mm) +#define __cpu_flush_user_tlb_range __glue(_TLB,_flush_user_tlb_range) +#define __cpu_flush_user_tlb_page __glue(_TLB,_flush_user_tlb_page) +#define __cpu_flush_kern_tlb_range __glue(_TLB,_flush_kern_tlb_range) +#define __cpu_flush_kern_tlb_page __glue(_TLB,_flush_kern_tlb_page) + +extern void __cpu_flush_kern_tlb_all(void); +extern void __cpu_flush_user_tlb_mm(struct mm_struct *); +extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *); +extern void __cpu_flush_user_tlb_page(unsigned long, struct vm_area_struct *); +extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long); +extern void __cpu_flush_kern_tlb_page(unsigned long); + +#endif + +/* + * if PG_dcache_dirty is set for the page, we need to ensure that any + * cache entries for the kernels virtual memory range are written + * back to the page. + */ +extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte); + +/* + * ARM processors do not cache TLB tables in RAM. + */ +#define flush_tlb_pgtables(mm,start,end) do { } while (0) + +/* + * Old ARM MEMC stuff. This supports the reversed mapping handling that + * we have on the older 26-bit machines. We don't have a MEMC chip, so... + */ +#define memc_update_all() do { } while (0) +#define memc_update_mm(mm) do { } while (0) +#define memc_update_addr(mm,pte,log) do { } while (0) +#define memc_clear(mm,physaddr) do { } while (0) + diff --git a/include/asm-arm/thread_info.h b/include/asm-arm/thread_info.h index bfef8bfb0e15..ede2ac4359b1 100644 --- a/include/asm-arm/thread_info.h +++ b/include/asm-arm/thread_info.h @@ -102,6 +102,8 @@ static inline unsigned long __thread_saved_fp(struct thread_info *thread) #endif +#define PREEMPT_ACTIVE 0x04000000 + /* * thread information flags: * TIF_SYSCALL_TRACE - syscall trace active diff --git a/include/asm-arm/tlbflush.h b/include/asm-arm/tlbflush.h new file mode 100644 index 000000000000..9011f00fca32 --- /dev/null +++ b/include/asm-arm/tlbflush.h @@ -0,0 +1,15 @@ +/* + * linux/include/asm-arm/tlbflush.h + * + * Copyright (C) 2000-2002 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASMARM_TLBFLUSH_H +#define _ASMARM_TLBFLUSH_H + +#include <asm-arm/proc/tlbflush.h> + +#endif |
