diff options
| author | Russell King <rmk@flint.arm.linux.org.uk> | 2004-09-07 16:45:36 +0100 |
|---|---|---|
| committer | Russell King <rmk@flint.arm.linux.org.uk> | 2004-09-07 16:45:36 +0100 |
| commit | 71432e79b76bc0d8b6a5a4a515b2cf19b5405e4a (patch) | |
| tree | 3db9013b6e180a362d4b37216c81b994aa4fc075 | |
| parent | 86afe9ae4d1a89b7e4b46b86867b3d75e2e320a9 (diff) | |
[ARM] Add flush_cache_user_page() for sys_cacheflush()
Add flush_cache_user_page() to handle sys_cacheflush. Userspace
wants to use this call to ensure I/D cache coherency for runtime-
built code (eg, used by Java etc). This does not fit well with
the purpose of flush_cache_range(), so provide
flush_cache_user_range() instead.
| -rw-r--r-- | arch/arm/kernel/traps.c | 4 | ||||
| -rw-r--r-- | arch/arm/mm/cache-v3.S | 14 | ||||
| -rw-r--r-- | arch/arm/mm/cache-v4.S | 14 | ||||
| -rw-r--r-- | arch/arm/mm/cache-v4wb.S | 14 | ||||
| -rw-r--r-- | arch/arm/mm/cache-v4wt.S | 14 | ||||
| -rw-r--r-- | arch/arm/mm/cache-v6.S | 17 | ||||
| -rw-r--r-- | arch/arm/mm/proc-arm1020.S | 14 | ||||
| -rw-r--r-- | arch/arm/mm/proc-arm1020e.S | 13 | ||||
| -rw-r--r-- | arch/arm/mm/proc-arm1022.S | 14 | ||||
| -rw-r--r-- | arch/arm/mm/proc-arm1026.S | 13 | ||||
| -rw-r--r-- | arch/arm/mm/proc-arm920.S | 14 | ||||
| -rw-r--r-- | arch/arm/mm/proc-arm922.S | 14 | ||||
| -rw-r--r-- | arch/arm/mm/proc-arm925.S | 14 | ||||
| -rw-r--r-- | arch/arm/mm/proc-arm926.S | 14 | ||||
| -rw-r--r-- | arch/arm/mm/proc-xscale.S | 17 | ||||
| -rw-r--r-- | include/asm-arm/cacheflush.h | 12 |
16 files changed, 214 insertions, 2 deletions
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index e5c7f285bfaf..b6bb8f6ab46d 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -381,7 +381,7 @@ do_cache_op(unsigned long start, unsigned long end, int flags) { struct vm_area_struct *vma; - if (end < start) + if (end < start || flags) return; vma = find_vma(current->active_mm, start); @@ -391,7 +391,7 @@ do_cache_op(unsigned long start, unsigned long end, int flags) if (end > vma->vm_end) end = vma->vm_end; - flush_cache_range(vma, start, end); + flush_cache_user_range(vma, start, end); } } diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S index 6659439f63fc..e1994788cf0e 100644 --- a/arch/arm/mm/cache-v3.S +++ b/arch/arm/mm/cache-v3.S @@ -57,6 +57,19 @@ ENTRY(v3_flush_user_cache_range) * - end - virtual end address */ ENTRY(v3_coherent_kern_range) + /* FALLTHROUGH */ + +/* + * coherent_user_range(start, end) + * + * Ensure coherency between the Icache and the Dcache in the + * region described by start. If you have non-snooping + * Harvard caches, you need to implement this function. + * + * - start - virtual start address + * - end - virtual end address + */ +ENTRY(v3_coherent_user_range) mov pc, lr /* @@ -116,6 +129,7 @@ ENTRY(v3_cache_fns) .long v3_flush_user_cache_all .long v3_flush_user_cache_range .long v3_coherent_kern_range + .long v3_coherent_user_range .long v3_flush_kern_dcache_page .long v3_dma_inv_range .long v3_dma_clean_range diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S index bbc822f16a7f..b8ad5d58ebe2 100644 --- a/arch/arm/mm/cache-v4.S +++ b/arch/arm/mm/cache-v4.S @@ -59,6 +59,19 @@ ENTRY(v4_flush_user_cache_range) * - end - virtual end address */ ENTRY(v4_coherent_kern_range) + /* FALLTHROUGH */ + +/* + * coherent_user_range(start, end) + * + * Ensure coherency between the Icache and the Dcache in the + * region described by start. If you have non-snooping + * Harvard caches, you need to implement this function. + * + * - start - virtual start address + * - end - virtual end address + */ +ENTRY(v4_coherent_user_range) mov pc, lr /* @@ -118,6 +131,7 @@ ENTRY(v4_cache_fns) .long v4_flush_user_cache_all .long v4_flush_user_cache_range .long v4_coherent_kern_range + .long v4_coherent_user_range .long v4_flush_kern_dcache_page .long v4_dma_inv_range .long v4_dma_clean_range diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S index 4f7c918e6ac9..5c4055b62d97 100644 --- a/arch/arm/mm/cache-v4wb.S +++ b/arch/arm/mm/cache-v4wb.S @@ -121,6 +121,19 @@ ENTRY(v4wb_flush_kern_dcache_page) * - end - virtual end address */ ENTRY(v4wb_coherent_kern_range) + /* fall through */ + +/* + * coherent_user_range(start, end) + * + * Ensure coherency between the Icache and the Dcache in the + * region described by start. If you have non-snooping + * Harvard caches, you need to implement this function. + * + * - start - virtual start address + * - end - virtual end address + */ +ENTRY(v4wb_coherent_user_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry @@ -195,6 +208,7 @@ ENTRY(v4wb_cache_fns) .long v4wb_flush_user_cache_all .long v4wb_flush_user_cache_range .long v4wb_coherent_kern_range + .long v4wb_coherent_user_range .long v4wb_flush_kern_dcache_page .long v4wb_dma_inv_range .long v4wb_dma_clean_range diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S index 61c9fc60f2a4..9bcabd86c6f3 100644 --- a/arch/arm/mm/cache-v4wt.S +++ b/arch/arm/mm/cache-v4wt.S @@ -97,6 +97,19 @@ ENTRY(v4wt_flush_user_cache_range) * - end - virtual end address */ ENTRY(v4wt_coherent_kern_range) + /* FALLTRHOUGH */ + +/* + * coherent_user_range(start, end) + * + * Ensure coherency between the Icache and the Dcache in the + * region described by start. If you have non-snooping + * Harvard caches, you need to implement this function. + * + * - start - virtual start address + * - end - virtual end address + */ +ENTRY(v4wt_coherent_user_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry add r0, r0, #CACHE_DLINESIZE @@ -167,6 +180,7 @@ ENTRY(v4wt_cache_fns) .long v4wt_flush_user_cache_all .long v4wt_flush_user_cache_range .long v4wt_coherent_kern_range + .long v4wt_coherent_user_range .long v4wt_flush_kern_dcache_page .long v4wt_dma_inv_range .long v4wt_dma_clean_range diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index 336aa0e4057b..85c10a71e7c6 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S @@ -75,6 +75,22 @@ ENTRY(v6_flush_user_cache_range) * - the Icache does not read data from the write buffer */ ENTRY(v6_coherent_kern_range) + /* FALLTHROUGH */ + +/* + * v6_coherent_user_range(start,end) + * + * Ensure that the I and D caches are coherent within specified + * region. This is typically used when code has been written to + * a memory region, and will be executed. + * + * - start - virtual start address of region + * - end - virtual end address of region + * + * It is assumed that: + * - the Icache does not read data from the write buffer + */ +ENTRY(v6_coherent_user_range) bic r0, r0, #CACHE_LINE_SIZE - 1 1: #ifdef HARVARD_CACHE @@ -203,6 +219,7 @@ ENTRY(v6_cache_fns) .long v6_flush_user_cache_all .long v6_flush_user_cache_range .long v6_coherent_kern_range + .long v6_coherent_user_range .long v6_flush_kern_dcache_page .long v6_dma_inv_range .long v6_dma_clean_range diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S index 7fe21a95d0bd..1267ab586711 100644 --- a/arch/arm/mm/proc-arm1020.S +++ b/arch/arm/mm/proc-arm1020.S @@ -196,6 +196,19 @@ ENTRY(arm1020_flush_user_cache_range) * - end - virtual end address */ ENTRY(arm1020_coherent_kern_range) + /* FALLTRHOUGH */ + +/* + * coherent_user_range(start, end) + * + * Ensure coherency between the Icache and the Dcache in the + * region described by start. If you have non-snooping + * Harvard caches, you need to implement this function. + * + * - start - virtual start address + * - end - virtual end address + */ +ENTRY(arm1020_coherent_user_range) mov ip, #0 bic r0, r0, #CACHE_DLINESIZE - 1 mcr p15, 0, ip, c7, c10, 4 @@ -317,6 +330,7 @@ ENTRY(arm1020_cache_fns) .long arm1020_flush_user_cache_all .long arm1020_flush_user_cache_range .long arm1020_coherent_kern_range + .long arm1020_coherent_user_range .long arm1020_flush_kern_dcache_page .long arm1020_dma_inv_range .long arm1020_dma_clean_range diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S index 389e2b351800..947790dd3043 100644 --- a/arch/arm/mm/proc-arm1020e.S +++ b/arch/arm/mm/proc-arm1020e.S @@ -193,6 +193,18 @@ ENTRY(arm1020e_flush_user_cache_range) * - end - virtual end address */ ENTRY(arm1020e_coherent_kern_range) + /* FALLTHROUGH */ +/* + * coherent_user_range(start, end) + * + * Ensure coherency between the Icache and the Dcache in the + * region described by start. If you have non-snooping + * Harvard caches, you need to implement this function. + * + * - start - virtual start address + * - end - virtual end address + */ +ENTRY(arm1020e_coherent_user_range) mov ip, #0 bic r0, r0, #CACHE_DLINESIZE - 1 1: @@ -304,6 +316,7 @@ ENTRY(arm1020e_cache_fns) .long arm1020e_flush_user_cache_all .long arm1020e_flush_user_cache_range .long arm1020e_coherent_kern_range + .long arm1020e_coherent_user_range .long arm1020e_flush_kern_dcache_page .long arm1020e_dma_inv_range .long arm1020e_dma_clean_range diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S index 6d01ea8a3ace..7c84263844f8 100644 --- a/arch/arm/mm/proc-arm1022.S +++ b/arch/arm/mm/proc-arm1022.S @@ -180,6 +180,19 @@ ENTRY(arm1022_flush_user_cache_range) * - end - virtual end address */ ENTRY(arm1022_coherent_kern_range) + /* FALLTHROUGH */ + +/* + * coherent_user_range(start, end) + * + * Ensure coherency between the Icache and the Dcache in the + * region described by start. If you have non-snooping + * Harvard caches, you need to implement this function. + * + * - start - virtual start address + * - end - virtual end address + */ +ENTRY(arm1022_coherent_user_range) mov ip, #0 bic r0, r0, #CACHE_DLINESIZE - 1 1: @@ -291,6 +304,7 @@ ENTRY(arm1022_cache_fns) .long arm1022_flush_user_cache_all .long arm1022_flush_user_cache_range .long arm1022_coherent_kern_range + .long arm1022_coherent_user_range .long arm1022_flush_kern_dcache_page .long arm1022_dma_inv_range .long arm1022_dma_clean_range diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S index 226f27ad4add..38a06cb1e9fc 100644 --- a/arch/arm/mm/proc-arm1026.S +++ b/arch/arm/mm/proc-arm1026.S @@ -175,6 +175,18 @@ ENTRY(arm1026_flush_user_cache_range) * - end - virtual end address */ ENTRY(arm1026_coherent_kern_range) + /* FALLTHROUGH */ +/* + * coherent_user_range(start, end) + * + * Ensure coherency between the Icache and the Dcache in the + * region described by start. If you have non-snooping + * Harvard caches, you need to implement this function. + * + * - start - virtual start address + * - end - virtual end address + */ +ENTRY(arm1026_coherent_user_range) mov ip, #0 bic r0, r0, #CACHE_DLINESIZE - 1 1: @@ -286,6 +298,7 @@ ENTRY(arm1026_cache_fns) .long arm1026_flush_user_cache_all .long arm1026_flush_user_cache_range .long arm1026_coherent_kern_range + .long arm1026_coherent_user_range .long arm1026_flush_kern_dcache_page .long arm1026_dma_inv_range .long arm1026_dma_clean_range diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 3922a8213d20..8c9204a7c3a3 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S @@ -182,6 +182,19 @@ ENTRY(arm920_flush_user_cache_range) * - end - virtual end address */ ENTRY(arm920_coherent_kern_range) + /* FALLTHROUGH */ + +/* + * coherent_user_range(start, end) + * + * Ensure coherency between the Icache and the Dcache in the + * region described by start, end. If you have non-snooping + * Harvard caches, you need to implement this function. + * + * - start - virtual start address + * - end - virtual end address + */ +ENTRY(arm920_coherent_user_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry @@ -277,6 +290,7 @@ ENTRY(arm920_cache_fns) .long arm920_flush_user_cache_all .long arm920_flush_user_cache_range .long arm920_coherent_kern_range + .long arm920_coherent_user_range .long arm920_flush_kern_dcache_page .long arm920_dma_inv_range .long arm920_dma_clean_range diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S index 86065117c875..13e65cb8a575 100644 --- a/arch/arm/mm/proc-arm922.S +++ b/arch/arm/mm/proc-arm922.S @@ -184,6 +184,19 @@ ENTRY(arm922_flush_user_cache_range) * - end - virtual end address */ ENTRY(arm922_coherent_kern_range) + /* FALLTHROUGH */ + +/* + * coherent_user_range(start, end) + * + * Ensure coherency between the Icache and the Dcache in the + * region described by start, end. If you have non-snooping + * Harvard caches, you need to implement this function. + * + * - start - virtual start address + * - end - virtual end address + */ +ENTRY(arm922_coherent_user_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry @@ -279,6 +292,7 @@ ENTRY(arm922_cache_fns) .long arm922_flush_user_cache_all .long arm922_flush_user_cache_range .long arm922_coherent_kern_range + .long arm922_coherent_user_range .long arm922_flush_kern_dcache_page .long arm922_dma_inv_range .long arm922_dma_clean_range diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S index 917eb5e16235..e8b3ff1fbc92 100644 --- a/arch/arm/mm/proc-arm925.S +++ b/arch/arm/mm/proc-arm925.S @@ -225,6 +225,19 @@ ENTRY(arm925_flush_user_cache_range) * - end - virtual end address */ ENTRY(arm925_coherent_kern_range) + /* FALLTHROUGH */ + +/* + * coherent_user_range(start, end) + * + * Ensure coherency between the Icache and the Dcache in the + * region described by start, end. If you have non-snooping + * Harvard caches, you need to implement this function. + * + * - start - virtual start address + * - end - virtual end address + */ +ENTRY(arm925_coherent_user_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry @@ -329,6 +342,7 @@ ENTRY(arm925_cache_fns) .long arm925_flush_user_cache_all .long arm925_flush_user_cache_range .long arm925_coherent_kern_range + .long arm925_coherent_user_range .long arm925_flush_kern_dcache_page .long arm925_dma_inv_range .long arm925_dma_clean_range diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 1ad2afecef04..5631c8bbdd6d 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S @@ -185,6 +185,19 @@ ENTRY(arm926_flush_user_cache_range) * - end - virtual end address */ ENTRY(arm926_coherent_kern_range) + /* FALLTHROUGH */ + +/* + * coherent_user_range(start, end) + * + * Ensure coherency between the Icache and the Dcache in the + * region described by start, end. If you have non-snooping + * Harvard caches, you need to implement this function. + * + * - start - virtual start address + * - end - virtual end address + */ +ENTRY(arm926_coherent_user_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry @@ -289,6 +302,7 @@ ENTRY(arm926_cache_fns) .long arm926_flush_user_cache_all .long arm926_flush_user_cache_range .long arm926_coherent_kern_range + .long arm926_coherent_user_range .long arm926_flush_kern_dcache_page .long arm926_dma_inv_range .long arm926_dma_clean_range diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 15314c0f56eb..2de4e3505d23 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -241,6 +241,22 @@ ENTRY(xscale_flush_user_cache_range) * it also trashes the mini I-cache used by JTAG debuggers. */ ENTRY(xscale_coherent_kern_range) + /* FALLTHROUGH */ + +/* + * coherent_user_range(start, end) + * + * Ensure coherency between the Icache and the Dcache in the + * region described by start. If you have non-snooping + * Harvard caches, you need to implement this function. + * + * - start - virtual start address + * - end - virtual end address + * + * Note: single I-cache line invalidation isn't used here since + * it also trashes the mini I-cache used by JTAG debuggers. + */ +ENTRY(xscale_coherent_user_range) bic r0, r0, #CACHELINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHELINESIZE @@ -341,6 +357,7 @@ ENTRY(xscale_cache_fns) .long xscale_flush_user_cache_all .long xscale_flush_user_cache_range .long xscale_coherent_kern_range + .long xscale_coherent_user_range .long xscale_flush_kern_dcache_page .long xscale_dma_inv_range .long xscale_dma_clean_range diff --git a/include/asm-arm/cacheflush.h b/include/asm-arm/cacheflush.h index 84d875206dad..9cfb98c191c1 100644 --- a/include/asm-arm/cacheflush.h +++ b/include/asm-arm/cacheflush.h @@ -157,6 +157,7 @@ struct cpu_cache_fns { void (*flush_user_range)(unsigned long, unsigned long, unsigned int); void (*coherent_kern_range)(unsigned long, unsigned long); + void (*coherent_user_range)(unsigned long, unsigned long); void (*flush_kern_dcache_page)(void *); void (*dma_inv_range)(unsigned long, unsigned long); @@ -175,6 +176,7 @@ extern struct cpu_cache_fns cpu_cache; #define __cpuc_flush_user_all cpu_cache.flush_user_all #define __cpuc_flush_user_range cpu_cache.flush_user_range #define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range +#define __cpuc_coherent_user_range cpu_cache.coherent_user_range #define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page /* @@ -193,12 +195,14 @@ extern struct cpu_cache_fns cpu_cache; #define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all) #define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) #define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range) +#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range) #define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page) extern void __cpuc_flush_kern_all(void); extern void __cpuc_flush_user_all(void); extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); +extern void __cpuc_coherent_user_range(unsigned long, unsigned long); extern void __cpuc_flush_dcache_page(void *); /* @@ -268,6 +272,14 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr) } /* + * flush_cache_user_range is used when we want to ensure that the + * Harvard caches are synchronised for the user space address range. + * This is used for the ARM private sys_cacheflush system call. + */ +#define flush_cache_user_range(vma,start,end) \ + __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end)) + +/* * Perform necessary cache operations to ensure that data previously * stored within this range of addresses can be executed by the CPU. */ |
