diff options
| author | Russell King <rmk@flint.arm.linux.org.uk> | 2003-04-28 02:47:44 +0100 |
|---|---|---|
| committer | Russell King <rmk@flint.arm.linux.org.uk> | 2003-04-28 02:47:44 +0100 |
| commit | d37d9068570fe5177134e0ee75273ec01d9be5b3 (patch) | |
| tree | 6f968affce6d7e7127ff491d2557e07bf3293e8f /include/asm-arm | |
| parent | 10eacf1775e168e57c610cfcc7358fa357fbb0d5 (diff) | |
[ARM] Part 2 in the cache API changes.
This is the new API; we now have methods for handling DMA which are
separate from those handling the TLB consistency issues, which are
in turn separate from the methods handling the cache coherency
issues.
Implementations are, however, free to alias these methods internally.
Diffstat (limited to 'include/asm-arm')
| -rw-r--r-- | include/asm-arm/cpu-multi32.h | 96 | ||||
| -rw-r--r-- | include/asm-arm/cpu-single.h | 23 | ||||
| -rw-r--r-- | include/asm-arm/proc-armv/cache.h | 201 | ||||
| -rw-r--r-- | include/asm-arm/procinfo.h | 4 |
4 files changed, 207 insertions, 117 deletions
diff --git a/include/asm-arm/cpu-multi32.h b/include/asm-arm/cpu-multi32.h index 59835af6fa00..bb8b5bb19041 100644 --- a/include/asm-arm/cpu-multi32.h +++ b/include/asm-arm/cpu-multi32.h @@ -46,85 +46,31 @@ extern struct processor { /* * Processor architecture specific */ - struct { /* CACHE */ - /* - * flush all caches - */ - void (*clean_invalidate_all)(void); - /* - * flush a specific page or pages - */ - void (*clean_invalidate_range)(unsigned long address, unsigned long end, int flags); - } cache; - - struct { /* D-cache */ - /* - * invalidate the specified data range - */ - void (*invalidate_range)(unsigned long start, unsigned long end); - /* - * clean specified data range - */ - void (*clean_range)(unsigned long start, unsigned long end); - /* - * obsolete flush cache entry - */ - void (*clean_page)(void *virt_page); - /* - * clean a virtual address range from the - * D-cache without flushing the cache. - */ - void (*clean_entry)(unsigned long start); - } dcache; - - struct { /* I-cache */ - /* - * invalidate the I-cache for the specified range - */ - void (*invalidate_range)(unsigned long start, unsigned long end); - /* - * invalidate the I-cache for the specified virtual page - */ - void (*invalidate_page)(void *virt_page); - } icache; + /* + * clean a virtual address range from the + * D-cache without flushing the cache. + */ + void (*dcache_clean_area)(void *addr, int size); - struct { /* PageTable */ - /* - * Set the page table - */ - void (*set_pgd)(unsigned long pgd_phys, struct mm_struct *mm); - /* - * Set a PTE - */ - void (*set_pte)(pte_t *ptep, pte_t pte); - } pgtable; + /* + * Set the page table + */ + void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm); + /* + * Set a PTE + */ + void (*set_pte)(pte_t *ptep, pte_t pte); } processor; -extern const struct processor arm6_processor_functions; -extern const struct processor arm7_processor_functions; -extern const struct processor sa110_processor_functions; - -#define cpu_check_bugs() processor._check_bugs() -#define cpu_proc_init() processor._proc_init() -#define cpu_proc_fin() processor._proc_fin() -#define cpu_reset(addr) processor.reset(addr) -#define cpu_do_idle() processor._do_idle() - -#define cpu_cache_clean_invalidate_all() processor.cache.clean_invalidate_all() -#define cpu_cache_clean_invalidate_range(s,e,f) processor.cache.clean_invalidate_range(s,e,f) - -#define cpu_dcache_clean_page(vp) processor.dcache.clean_page(vp) -#define cpu_dcache_clean_entry(addr) processor.dcache.clean_entry(addr) -#define cpu_dcache_clean_range(s,e) processor.dcache.clean_range(s,e) -#define cpu_dcache_invalidate_range(s,e) processor.dcache.invalidate_range(s,e) - -#define cpu_icache_invalidate_range(s,e) processor.icache.invalidate_range(s,e) -#define cpu_icache_invalidate_page(vp) processor.icache.invalidate_page(vp) - -#define cpu_set_pgd(pgd,mm) processor.pgtable.set_pgd(pgd,mm) -#define cpu_set_pte(ptep, pte) processor.pgtable.set_pte(ptep, pte) +#define cpu_check_bugs() processor._check_bugs() +#define cpu_proc_init() processor._proc_init() +#define cpu_proc_fin() processor._proc_fin() +#define cpu_reset(addr) processor.reset(addr) +#define cpu_do_idle() processor._do_idle() +#define cpu_dcache_clean_area(addr,sz) processor.dcache_clean_area(addr,sz) +#define cpu_set_pte(ptep, pte) processor.set_pte(ptep, pte) -#define cpu_switch_mm(pgd,mm) cpu_set_pgd(__virt_to_phys((unsigned long)(pgd)),mm) +#define cpu_switch_mm(pgd,mm) processor.switch_mm(__virt_to_phys((unsigned long)(pgd)),mm) #define cpu_get_pgd() \ ({ \ diff --git a/include/asm-arm/cpu-single.h b/include/asm-arm/cpu-single.h index 738b61dc2553..30394b11876e 100644 --- a/include/asm-arm/cpu-single.h +++ b/include/asm-arm/cpu-single.h @@ -27,14 +27,7 @@ #define cpu_proc_fin __cpu_fn(CPU_NAME,_proc_fin) #define cpu_reset __cpu_fn(CPU_NAME,_reset) #define cpu_do_idle __cpu_fn(CPU_NAME,_do_idle) -#define cpu_cache_clean_invalidate_all __cpu_fn(CPU_NAME,_cache_clean_invalidate_all) -#define cpu_cache_clean_invalidate_range __cpu_fn(CPU_NAME,_cache_clean_invalidate_range) -#define cpu_dcache_invalidate_range __cpu_fn(CPU_NAME,_dcache_invalidate_range) -#define cpu_dcache_clean_range __cpu_fn(CPU_NAME,_dcache_clean_range) -#define cpu_dcache_clean_page __cpu_fn(CPU_NAME,_dcache_clean_page) -#define cpu_dcache_clean_entry __cpu_fn(CPU_NAME,_dcache_clean_entry) -#define cpu_icache_invalidate_range __cpu_fn(CPU_NAME,_icache_invalidate_range) -#define cpu_icache_invalidate_page __cpu_fn(CPU_NAME,_icache_invalidate_page) +#define cpu_dcache_clean_area __cpu_fn(CPU_NAME,_dcache_clean_area) #define cpu_set_pgd __cpu_fn(CPU_NAME,_set_pgd) #define cpu_set_pte __cpu_fn(CPU_NAME,_set_pte) @@ -46,23 +39,11 @@ struct mm_struct; /* declare all the functions as extern */ -extern void cpu_data_abort(unsigned long pc); extern void cpu_check_bugs(void); extern void cpu_proc_init(void); extern void cpu_proc_fin(void); extern int cpu_do_idle(void); - -extern void cpu_cache_clean_invalidate_all(void); -extern void cpu_cache_clean_invalidate_range(unsigned long address, unsigned long end, int flags); - -extern void cpu_dcache_invalidate_range(unsigned long start, unsigned long end); -extern void cpu_dcache_clean_range(unsigned long start, unsigned long end); -extern void cpu_dcache_clean_page(void *virt_page); -extern void cpu_dcache_clean_entry(unsigned long address); - -extern void cpu_icache_invalidate_range(unsigned long start, unsigned long end); -extern void cpu_icache_invalidate_page(void *virt_page); - +extern void cpu_dcache_clean_area(void *, int); extern void cpu_set_pgd(unsigned long pgd_phys, struct mm_struct *mm); extern void cpu_set_pte(pte_t *ptep, pte_t pte); diff --git a/include/asm-arm/proc-armv/cache.h b/include/asm-arm/proc-armv/cache.h index ba2bc2972b6d..250a69e335ff 100644 --- a/include/asm-arm/proc-armv/cache.h +++ b/include/asm-arm/proc-armv/cache.h @@ -11,22 +11,156 @@ #include <asm/glue.h> /* + * Cache Model + * =========== + */ +#undef _CACHE +#undef MULTI_CACHE + +#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE v3 +# endif +#endif + +#if defined(CONFIG_CPU_ARM720T) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE v4 +# endif +#endif + +#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \ + defined(CONFIG_CPU_ARM1020) +# define MULTI_CACHE 1 +#endif + +#if defined(CONFIG_CPU_ARM926T) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE arm926 +# endif +#endif + +#if defined(CONFIG_CPU_SA110) || defined(CONFIG_CPU_SA1100) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE v4wb +# endif +#endif + +#if defined(CONFIG_CPU_XSCALE) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE xscale +# endif +#endif + +#if !defined(_CACHE) && !defined(MULTI_CACHE) +#error Unknown cache maintainence model +#endif + +/* * This flag is used to indicate that the page pointed to by a pte * is dirty and requires cleaning before returning it to the user. */ #define PG_dcache_dirty PG_arch_1 /* - * Cache handling for 32-bit ARM processors. + * MM Cache Management + * =================== + * + * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files + * implement these methods. + * + * Start addresses are inclusive and end addresses are exclusive; + * start addresses should be rounded down, end addresses up. + * + * See linux/Documentation/cachetlb.txt for more information. + * Please note that the implementation of these, and the required + * effects are cache-type (VIVT/VIPT/PIPT) specific. + * + * flush_cache_kern_all() + * + * Unconditionally clean and invalidate the entire cache. + * + * flush_cache_user_mm(mm) + * + * Clean and invalidate all user space cache entries + * before a change of page tables. + * + * flush_cache_user_range(start, end, flags) + * + * Clean and invalidate a range of cache entries in the + * specified address space before a change of page tables. + * - start - user start address (inclusive, page aligned) + * - end - user end address (exclusive, page aligned) + * - flags - vma->vm_flags field + * + * coherent_kern_range(start, end) + * + * Ensure coherency between the Icache and the Dcache in the + * region described by start, end. If you have non-snooping + * Harvard caches, you need to implement this function. + * - start - virtual start address + * - end - virtual end address + * + * DMA Cache Coherency + * =================== + * + * dma_inv_range(start, end) * - * Note that on ARM, we have a more accurate specification than that - * Linux's "flush". We therefore do not use "flush" here, but instead - * use: + * Invalidate (discard) the specified virtual address range. + * May not write back any entries. If 'start' or 'end' + * are not cache line aligned, those lines must be written + * back. + * - start - virtual start address + * - end - virtual end address * - * clean: the act of pushing dirty cache entries out to memory. - * invalidate: the act of discarding data held within the cache, - * whether it is dirty or not. + * dma_clean_range(start, end) + * + * Clean (write back) the specified virtual address range. + * - start - virtual start address + * - end - virtual end address + * + * dma_flush_range(start, end) + * + * Clean and invalidate the specified virtual address range. + * - start - virtual start address + * - end - virtual end address + */ + +struct cpu_cache_fns { + void (*flush_kern_all)(void); + void (*flush_user_all)(void); + void (*flush_user_range)(unsigned long, unsigned long, unsigned int); + + void (*coherent_kern_range)(unsigned long, unsigned long); + void (*flush_kern_dcache_page)(void *); + + void (*dma_inv_range)(unsigned long, unsigned long); + void (*dma_clean_range)(unsigned long, unsigned long); + void (*dma_flush_range)(unsigned long, unsigned long); +}; + +/* + * Select the calling method */ +#ifdef MULTI_CACHE + +extern struct cpu_cache_fns cpu_cache; + +#define __cpuc_flush_kern_all cpu_cache.flush_kern_all +#define __cpuc_flush_user_all cpu_cache.flush_user_all +#define __cpuc_flush_user_range cpu_cache.flush_user_range +#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range +#define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page /* * These are private to the dma-mapping API. Do not use directly. @@ -34,27 +168,57 @@ * is visible to DMA, or data written by DMA to system memory is * visible to the CPU. */ -#define dmac_inv_range cpu_dcache_invalidate_range -#define dmac_clean_range cpu_dcache_clean_range -#define dmac_flush_range(_s,_e) cpu_cache_clean_invalidate_range((_s),(_e),0) +#define dmac_inv_range cpu_cache.dma_inv_range +#define dmac_clean_range cpu_cache.dma_clean_range +#define dmac_flush_range cpu_cache.dma_flush_range + +#else + +#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all) +#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all) +#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) +#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range) +#define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page) + +extern void __cpuc_flush_kern_all(void); +extern void __cpuc_flush_user_all(void); +extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); +extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); +extern void __cpuc_flush_dcache_page(void *); + +/* + * These are private to the dma-mapping API. Do not use directly. + * Their sole purpose is to ensure that data held in the cache + * is visible to DMA, or data written by DMA to system memory is + * visible to the CPU. + */ +#define dmac_inv_range __glue(_CACHE,_dma_inv_range) +#define dmac_clean_range __glue(_CACHE,_dma_clean_range) +#define dmac_flush_range __glue(_CACHE,_dma_flush_range) + +extern void dmac_inv_range(unsigned long, unsigned long); +extern void dmac_clean_range(unsigned long, unsigned long); +extern void dmac_flush_range(unsigned long, unsigned long); + +#endif /* * Convert calls to our calling convention. */ -#define flush_cache_all() cpu_cache_clean_invalidate_all() +#define flush_cache_all() __cpuc_flush_kern_all() static inline void flush_cache_mm(struct mm_struct *mm) { if (current->active_mm == mm) - cpu_cache_clean_invalidate_all(); + __cpuc_flush_user_all(); } static inline void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { if (current->active_mm == vma->vm_mm) - cpu_cache_clean_invalidate_range(start & PAGE_MASK, - PAGE_ALIGN(end), vma->vm_flags); + __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), + vma->vm_flags); } static inline void @@ -62,8 +226,7 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr) { if (current->active_mm == vma->vm_mm) { unsigned long addr = user_addr & PAGE_MASK; - cpu_cache_clean_invalidate_range(addr, addr + PAGE_SIZE, - vma->vm_flags & VM_EXEC); + __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); } } @@ -71,15 +234,13 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr) * Perform necessary cache operations to ensure that data previously * stored within this range of addresses can be executed by the CPU. */ -#define flush_icache_range(s,e) cpu_icache_invalidate_range(s,e) +#define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e) /* * Perform necessary cache operations to ensure that the TLB will * see data written in the specified area. */ -#define clean_dcache_area(start,size) \ - cpu_cache_clean_invalidate_range((unsigned long)start, \ - ((unsigned long)start) + size, 0); +#define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size) /* * flush_dcache_page is used when the kernel has written to the page diff --git a/include/asm-arm/procinfo.h b/include/asm-arm/procinfo.h index a762a23a7560..a3116e9f3758 100644 --- a/include/asm-arm/procinfo.h +++ b/include/asm-arm/procinfo.h @@ -14,6 +14,7 @@ struct cpu_tlb_fns; struct cpu_user_fns; +struct cpu_cache_fns; struct processor; /* @@ -37,13 +38,14 @@ struct proc_info_list { struct processor *proc; struct cpu_tlb_fns *tlb; struct cpu_user_fns *user; + struct cpu_cache_fns *cache; }; extern unsigned int elf_hwcap; #endif /* __ASSEMBLY__ */ -#define PROC_INFO_SZ 44 +#define PROC_INFO_SZ 48 #define HWCAP_SWP 1 #define HWCAP_HALF 2 |
