diff options
| author | Patrick Mochel <mochel@osdl.org> | 2003-09-03 18:07:18 -0700 |
|---|---|---|
| committer | Patrick Mochel <mochel@osdl.org> | 2003-09-03 18:07:18 -0700 |
| commit | 29b12b99c0fd84b52974bbea7161fba8223fefa8 (patch) | |
| tree | 316e822599cce593f7f493cfde4eee922efb6a99 /include | |
| parent | 5c0cb1ca95bd45b65e04ec46fae13b1a36fa52be (diff) | |
| parent | 6cea4e9ecabb8f3f07cdb60b17054549e591bc30 (diff) | |
Merge osdl.org:/home/mochel/src/kernel/linux-2.5-virgin
into osdl.org:/home/mochel/src/kernel/linux-2.5-core
Diffstat (limited to 'include')
85 files changed, 2538 insertions, 2981 deletions
diff --git a/include/asm-alpha/semaphore.h b/include/asm-alpha/semaphore.h index ab26cfbaddbf..a363f018a35f 100644 --- a/include/asm-alpha/semaphore.h +++ b/include/asm-alpha/semaphore.h @@ -88,14 +88,18 @@ extern void __up_wakeup(struct semaphore *); static inline void __down(struct semaphore *sem) { - long count = atomic_dec_return(&sem->count); + long count; + might_sleep(); + count = atomic_dec_return(&sem->count); if (unlikely(count < 0)) __down_failed(sem); } static inline int __down_interruptible(struct semaphore *sem) { - long count = atomic_dec_return(&sem->count); + long count; + might_sleep(); + count = atomic_dec_return(&sem->count); if (unlikely(count < 0)) return __down_failed_interruptible(sem); return 0; diff --git a/include/asm-arm/assembler.h b/include/asm-arm/assembler.h index 677eedee6694..84ab93eb4643 100644 --- a/include/asm-arm/assembler.h +++ b/include/asm-arm/assembler.h @@ -1,18 +1,23 @@ /* - * linux/asm/assembler.h + * linux/include/asm-arm/assembler.h * - * This file contains arm architecture specific defines - * for the different processors. + * Copyright (C) 1996-2000 Russell King * - * Do not include any C declarations in this file - it is included by - * assembler source. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This file contains arm architecture specific defines + * for the different processors. + * + * Do not include any C declarations in this file - it is included by + * assembler source. */ #ifndef __ASSEMBLY__ #error "Only include this from assembly code" #endif -#include <asm/proc/ptrace.h> -#include <asm/proc/assembler.h> +#include <asm/ptrace.h> /* * Endian independent macros for shifting bytes within registers. @@ -36,3 +41,63 @@ #define PLD(code...) #endif +#define MODE_USR USR_MODE +#define MODE_FIQ FIQ_MODE +#define MODE_IRQ IRQ_MODE +#define MODE_SVC SVC_MODE + +#define DEFAULT_FIQ MODE_FIQ + +/* + * LOADREGS - ldm with PC in register list (eg, ldmfd sp!, {pc}) + */ +#ifdef __STDC__ +#define LOADREGS(cond, base, reglist...)\ + ldm##cond base,reglist +#else +#define LOADREGS(cond, base, reglist...)\ + ldm/**/cond base,reglist +#endif + +/* + * Build a return instruction for this processor type. + */ +#define RETINSTR(instr, regs...)\ + instr regs + +/* + * Save the current IRQ state and disable IRQs. Note that this macro + * assumes FIQs are enabled, and that the processor is in SVC mode. + */ + .macro save_and_disable_irqs, oldcpsr, temp + mrs \oldcpsr, cpsr + mov \temp, #PSR_I_BIT | MODE_SVC + msr cpsr_c, \temp + .endm + +/* + * Restore interrupt state previously stored in a register. We don't + * guarantee that this will preserve the flags. + */ + .macro restore_irqs, oldcpsr + msr cpsr_c, \oldcpsr + .endm + +/* + * These two are used to save LR/restore PC over a user-based access. + * The old 26-bit architecture requires that we do. On 32-bit + * architecture, we can safely ignore this requirement. + */ + .macro save_lr + .endm + + .macro restore_pc + mov pc, lr + .endm + +#define USER(x...) \ +9999: x; \ + .section __ex_table,"a"; \ + .align 3; \ + .long 9999b,9001f; \ + .previous diff --git a/include/asm-arm/atomic.h b/include/asm-arm/atomic.h index ba9e4b72d821..e8e2cd1d69f1 100644 --- a/include/asm-arm/atomic.h +++ b/include/asm-arm/atomic.h @@ -27,7 +27,7 @@ typedef struct { volatile int counter; } atomic_t; #define ATOMIC_INIT(i) { (i) } #ifdef __KERNEL__ -#include <asm/proc/system.h> +#include <asm/system.h> #define atomic_read(v) ((v)->counter) #define atomic_set(v,i) (((v)->counter) = (i)) diff --git a/include/asm-arm/cacheflush.h b/include/asm-arm/cacheflush.h index f5f2b2a3e199..30a47cba4d4b 100644 --- a/include/asm-arm/cacheflush.h +++ b/include/asm-arm/cacheflush.h @@ -1,7 +1,7 @@ /* * linux/include/asm-arm/cacheflush.h * - * Copyright (C) 2000-2002 Russell King + * Copyright (C) 1999-2002 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -12,6 +12,275 @@ #include <linux/sched.h> #include <linux/mm.h> -#include <asm/proc/cache.h> + +#include <asm/mman.h> +#include <asm/glue.h> + +/* + * Cache Model + * =========== + */ +#undef _CACHE +#undef MULTI_CACHE + +#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE v3 +# endif +#endif + +#if defined(CONFIG_CPU_ARM720T) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE v4 +# endif +#endif + +#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \ + defined(CONFIG_CPU_ARM1020) +# define MULTI_CACHE 1 +#endif + +#if defined(CONFIG_CPU_ARM926T) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE arm926 +# endif +#endif + +#if defined(CONFIG_CPU_SA110) || defined(CONFIG_CPU_SA1100) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE v4wb +# endif +#endif + +#if defined(CONFIG_CPU_XSCALE) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE xscale +# endif +#endif + +#if !defined(_CACHE) && !defined(MULTI_CACHE) +#error Unknown cache maintainence model +#endif + +/* + * This flag is used to indicate that the page pointed to by a pte + * is dirty and requires cleaning before returning it to the user. + */ +#define PG_dcache_dirty PG_arch_1 + +/* + * MM Cache Management + * =================== + * + * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files + * implement these methods. + * + * Start addresses are inclusive and end addresses are exclusive; + * start addresses should be rounded down, end addresses up. + * + * See linux/Documentation/cachetlb.txt for more information. + * Please note that the implementation of these, and the required + * effects are cache-type (VIVT/VIPT/PIPT) specific. + * + * flush_cache_kern_all() + * + * Unconditionally clean and invalidate the entire cache. + * + * flush_cache_user_mm(mm) + * + * Clean and invalidate all user space cache entries + * before a change of page tables. + * + * flush_cache_user_range(start, end, flags) + * + * Clean and invalidate a range of cache entries in the + * specified address space before a change of page tables. + * - start - user start address (inclusive, page aligned) + * - end - user end address (exclusive, page aligned) + * - flags - vma->vm_flags field + * + * coherent_kern_range(start, end) + * + * Ensure coherency between the Icache and the Dcache in the + * region described by start, end. If you have non-snooping + * Harvard caches, you need to implement this function. + * - start - virtual start address + * - end - virtual end address + * + * DMA Cache Coherency + * =================== + * + * dma_inv_range(start, end) + * + * Invalidate (discard) the specified virtual address range. + * May not write back any entries. If 'start' or 'end' + * are not cache line aligned, those lines must be written + * back. + * - start - virtual start address + * - end - virtual end address + * + * dma_clean_range(start, end) + * + * Clean (write back) the specified virtual address range. + * - start - virtual start address + * - end - virtual end address + * + * dma_flush_range(start, end) + * + * Clean and invalidate the specified virtual address range. + * - start - virtual start address + * - end - virtual end address + */ + +struct cpu_cache_fns { + void (*flush_kern_all)(void); + void (*flush_user_all)(void); + void (*flush_user_range)(unsigned long, unsigned long, unsigned int); + + void (*coherent_kern_range)(unsigned long, unsigned long); + void (*flush_kern_dcache_page)(void *); + + void (*dma_inv_range)(unsigned long, unsigned long); + void (*dma_clean_range)(unsigned long, unsigned long); + void (*dma_flush_range)(unsigned long, unsigned long); +}; + +/* + * Select the calling method + */ +#ifdef MULTI_CACHE + +extern struct cpu_cache_fns cpu_cache; + +#define __cpuc_flush_kern_all cpu_cache.flush_kern_all +#define __cpuc_flush_user_all cpu_cache.flush_user_all +#define __cpuc_flush_user_range cpu_cache.flush_user_range +#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range +#define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page + +/* + * These are private to the dma-mapping API. Do not use directly. + * Their sole purpose is to ensure that data held in the cache + * is visible to DMA, or data written by DMA to system memory is + * visible to the CPU. + */ +#define dmac_inv_range cpu_cache.dma_inv_range +#define dmac_clean_range cpu_cache.dma_clean_range +#define dmac_flush_range cpu_cache.dma_flush_range + +#else + +#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all) +#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all) +#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) +#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range) +#define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page) + +extern void __cpuc_flush_kern_all(void); +extern void __cpuc_flush_user_all(void); +extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); +extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); +extern void __cpuc_flush_dcache_page(void *); + +/* + * These are private to the dma-mapping API. Do not use directly. + * Their sole purpose is to ensure that data held in the cache + * is visible to DMA, or data written by DMA to system memory is + * visible to the CPU. + */ +#define dmac_inv_range __glue(_CACHE,_dma_inv_range) +#define dmac_clean_range __glue(_CACHE,_dma_clean_range) +#define dmac_flush_range __glue(_CACHE,_dma_flush_range) + +extern void dmac_inv_range(unsigned long, unsigned long); +extern void dmac_clean_range(unsigned long, unsigned long); +extern void dmac_flush_range(unsigned long, unsigned long); + +#endif + +/* + * Convert calls to our calling convention. + */ +#define flush_cache_all() __cpuc_flush_kern_all() + +static inline void flush_cache_mm(struct mm_struct *mm) +{ + if (current->active_mm == mm) + __cpuc_flush_user_all(); +} + +static inline void +flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) +{ + if (current->active_mm == vma->vm_mm) + __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), + vma->vm_flags); +} + +static inline void +flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr) +{ + if (current->active_mm == vma->vm_mm) { + unsigned long addr = user_addr & PAGE_MASK; + __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); + } +} + +/* + * Perform necessary cache operations to ensure that data previously + * stored within this range of addresses can be executed by the CPU. + */ +#define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e) + +/* + * Perform necessary cache operations to ensure that the TLB will + * see data written in the specified area. + */ +#define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size) + +/* + * flush_dcache_page is used when the kernel has written to the page + * cache page at virtual address page->virtual. + * + * If this page isn't mapped (ie, page->mapping = NULL), or it has + * userspace mappings (page->mapping->i_mmap or page->mapping->i_mmap_shared) + * then we _must_ always clean + invalidate the dcache entries associated + * with the kernel mapping. + * + * Otherwise we can defer the operation, and clean the cache when we are + * about to change to user space. This is the same method as used on SPARC64. + * See update_mmu_cache for the user space part. + */ +#define mapping_mapped(map) (!list_empty(&(map)->i_mmap) || \ + !list_empty(&(map)->i_mmap_shared)) + +extern void __flush_dcache_page(struct page *); + +static inline void flush_dcache_page(struct page *page) +{ + if (page->mapping && !mapping_mapped(page->mapping)) + set_bit(PG_dcache_dirty, &page->flags); + else + __flush_dcache_page(page); +} + +#define flush_icache_user_range(vma,page,addr,len) \ + flush_dcache_page(page) + +/* + * We don't appear to need to do anything here. In fact, if we did, we'd + * duplicate cache flushing elsewhere performed by flush_dcache_page(). + */ +#define flush_icache_page(vma,page) do { } while (0) #endif diff --git a/include/asm-arm/cpu-multi26.h b/include/asm-arm/cpu-multi26.h deleted file mode 100644 index 415ce60471dd..000000000000 --- a/include/asm-arm/cpu-multi26.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - * linux/include/asm-arm/cpu-multi26.h - * - * Copyright (C) 2000 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __ASSEMBLY__ - -#include <asm/page.h> - -/* forward-declare task_struct */ -struct task_struct; - -/* - * Don't change this structure - ASM code - * relies on it. - */ -extern struct processor { - /* Set up any processor specifics */ - void (*_proc_init)(void); - /* Disable any processor specifics */ - void (*_proc_fin)(void); - /* set the MEMC hardware mappings */ - void (*_switch_mm)(pgd_t *pgd); - /* XCHG */ - unsigned long (*_xchg_1)(unsigned long x, volatile void *ptr); - unsigned long (*_xchg_4)(unsigned long x, volatile void *ptr); -} processor; - -extern const struct processor arm2_processor_functions; -extern const struct processor arm250_processor_functions; -extern const struct processor arm3_processor_functions; - -#define cpu_proc_init() processor._proc_init() -#define cpu_proc_fin() processor._proc_fin() -#define cpu_do_idle() do { } while (0) -#define cpu_switch_mm(pgd,mm) processor._switch_mm(pgd) -#define cpu_xchg_1(x,ptr) processor._xchg_1(x,ptr) -#define cpu_xchg_4(x,ptr) processor._xchg_4(x,ptr) - -extern void cpu_memc_update_all(pgd_t *pgd); -extern void cpu_memc_update_entry(pgd_t *pgd, unsigned long phys_pte, unsigned long log_addr); - -#endif diff --git a/include/asm-arm/proc-armv/domain.h b/include/asm-arm/domain.h index 67f889162c31..87f9734a8a4a 100644 --- a/include/asm-arm/proc-armv/domain.h +++ b/include/asm-arm/domain.h @@ -1,5 +1,5 @@ /* - * linux/include/asm-arm/proc-armv/domain.h + * linux/include/asm-arm/domain.h * * Copyright (C) 1999 Russell King. * diff --git a/include/asm-arm/elf.h b/include/asm-arm/elf.h index 536b2d6f2854..b67e06c0f588 100644 --- a/include/asm-arm/elf.h +++ b/include/asm-arm/elf.h @@ -7,7 +7,6 @@ #include <asm/ptrace.h> #include <asm/user.h> -#include <asm/proc/elf.h> #include <asm/procinfo.h> typedef unsigned long elf_greg_t; @@ -42,6 +41,7 @@ typedef struct user_fp elf_fpregset_t; #define ELF_ARCH EM_ARM #define USE_ELF_CORE_DUMP +#define ELF_EXEC_PAGESIZE 4096 /* This is the location that an ET_DYN program is loaded if exec'ed. Typical use of this is to invoke "./ld.so someprog" to test out a new version of @@ -76,4 +76,29 @@ typedef struct user_fp elf_fpregset_t; extern char elf_platform[]; #define ELF_PLATFORM (elf_platform) +#ifdef __KERNEL__ + +/* + * 32-bit code is always OK. Some cpus can do 26-bit, some can't. + */ +#define ELF_PROC_OK(x) (ELF_THUMB_OK(x) && ELF_26BIT_OK(x)) + +#define ELF_THUMB_OK(x) \ + (( (elf_hwcap & HWCAP_THUMB) && ((x)->e_entry & 1) == 1) || \ + ((x)->e_entry & 3) == 0) + +#define ELF_26BIT_OK(x) \ + (( (elf_hwcap & HWCAP_26BIT) && (x)->e_flags & EF_ARM_APCS26) || \ + ((x)->e_flags & EF_ARM_APCS26) == 0) + +/* Old NetWinder binaries were compiled in such a way that the iBCS + heuristic always trips on them. Until these binaries become uncommon + enough not to care, don't trust the `ibcs' flag here. In any case + there is no other ELF system currently supported by iBCS. + @@ Could print a warning message to encourage users to upgrade. */ +#define SET_PERSONALITY(ex,ibcs2) \ + set_personality(((ex).e_flags&EF_ARM_APCS26 ?PER_LINUX :PER_LINUX_32BIT)) + +#endif + #endif diff --git a/include/asm-arm/proc-armv/locks.h b/include/asm-arm/locks.h index 13248f903677..435ed505feda 100644 --- a/include/asm-arm/proc-armv/locks.h +++ b/include/asm-arm/locks.h @@ -1,5 +1,5 @@ /* - * linux/include/asm-arm/proc-armv/locks.h + * linux/include/asm-arm/locks.h * * Copyright (C) 2000 Russell King * diff --git a/include/asm-arm/memory.h b/include/asm-arm/memory.h index 1b61a0e02c20..588c47d26338 100644 --- a/include/asm-arm/memory.h +++ b/include/asm-arm/memory.h @@ -15,6 +15,8 @@ #include <linux/config.h> #include <asm/arch/memory.h> +#ifndef __ASSEMBLY__ + /* * PFNs are used to describe any physical page; this means * PFN 0 == physical address 0. @@ -120,3 +122,5 @@ static inline void *phys_to_virt(unsigned long x) #define page_to_bus(page) (virt_to_bus(page_address(page))) #endif + +#endif diff --git a/include/asm-arm/page.h b/include/asm-arm/page.h index ca379e1f9184..94aeae2a4fb1 100644 --- a/include/asm-arm/page.h +++ b/include/asm-arm/page.h @@ -1,9 +1,27 @@ +/* + * linux/include/asm-arm/page.h + * + * Copyright (C) 1995-2003 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ #ifndef _ASMARM_PAGE_H #define _ASMARM_PAGE_H #include <linux/config.h> +/* PAGE_SHIFT determines the page size */ +#define PAGE_SHIFT 12 +#define PAGE_SIZE (1UL << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE-1)) + #ifdef __KERNEL__ + +/* to align the pointer to the (next) page boundary */ +#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) + #ifndef __ASSEMBLY__ #include <asm/glue.h> @@ -119,10 +137,12 @@ extern void copy_page(void *to, void *from); */ typedef struct { unsigned long pte; } pte_t; typedef struct { unsigned long pmd; } pmd_t; +typedef struct { unsigned long pgd[2]; } pgd_t; typedef struct { unsigned long pgprot; } pgprot_t; #define pte_val(x) ((x).pte) #define pmd_val(x) ((x).pmd) +#define pgd_val(x) ((x).pgd[0]) #define pgprot_val(x) ((x).pgprot) #define __pte(x) ((pte_t) { (x) } ) @@ -135,10 +155,12 @@ typedef struct { unsigned long pgprot; } pgprot_t; */ typedef unsigned long pte_t; typedef unsigned long pmd_t; +typedef unsigned long pgd_t[2]; typedef unsigned long pgprot_t; #define pte_val(x) (x) #define pmd_val(x) (x) +#define pgd_val(x) ((x)[0]) #define pgprot_val(x) (x) #define __pte(x) (x) @@ -146,19 +168,6 @@ typedef unsigned long pgprot_t; #define __pgprot(x) (x) #endif /* STRICT_MM_TYPECHECKS */ -#endif /* !__ASSEMBLY__ */ -#endif /* __KERNEL__ */ - -#include <asm/proc/page.h> - -#define PAGE_SIZE (1UL << PAGE_SHIFT) -#define PAGE_MASK (~(PAGE_SIZE-1)) - -/* to align the pointer to the (next) page boundary */ -#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) - -#ifdef __KERNEL__ -#ifndef __ASSEMBLY__ /* Pure 2^n version of get_order */ static inline int get_order(unsigned long size) diff --git a/include/asm-arm/param.h b/include/asm-arm/param.h index 2bf8b1d793f6..c1ccdcbe2bd1 100644 --- a/include/asm-arm/param.h +++ b/include/asm-arm/param.h @@ -11,7 +11,6 @@ #define __ASM_PARAM_H #include <asm/arch/param.h> /* for HZ */ -#include <asm/proc/page.h> /* for EXEC_PAGE_SIZE */ #ifndef __KERNEL_HZ #define __KERNEL_HZ 100 @@ -25,6 +24,8 @@ # define HZ 100 #endif +#define EXEC_PAGESIZE 4096 + #ifndef NGROUPS #define NGROUPS 32 #endif diff --git a/include/asm-arm/pgalloc.h b/include/asm-arm/pgalloc.h index 81a7eccf2991..23eb321f4af2 100644 --- a/include/asm-arm/pgalloc.h +++ b/include/asm-arm/pgalloc.h @@ -11,7 +11,8 @@ #define _ASMARM_PGALLOC_H #include <asm/processor.h> -#include <asm/proc/pgalloc.h> +#include <asm/cacheflush.h> +#include <asm/tlbflush.h> /* * Since we have only two-level page tables, these are trivial @@ -28,4 +29,104 @@ extern void free_pgd_slow(pgd_t *pgd); #define check_pgt_cache() do { } while (0) +/* + * Allocate one PTE table. + * + * This actually allocates two hardware PTE tables, but we wrap this up + * into one table thus: + * + * +------------+ + * | h/w pt 0 | + * +------------+ + * | h/w pt 1 | + * +------------+ + * | Linux pt 0 | + * +------------+ + * | Linux pt 1 | + * +------------+ + */ +static inline pte_t * +pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) +{ + pte_t *pte; + + pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); + if (pte) { + clear_page(pte); + clean_dcache_area(pte, sizeof(pte_t) * PTRS_PER_PTE); + pte += PTRS_PER_PTE; + } + + return pte; +} + +static inline struct page * +pte_alloc_one(struct mm_struct *mm, unsigned long addr) +{ + struct page *pte; + + pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); + if (pte) { + void *page = page_address(pte); + clear_page(page); + clean_dcache_area(page, sizeof(pte_t) * PTRS_PER_PTE); + } + + return pte; +} + +/* + * Free one PTE table. + */ +static inline void pte_free_kernel(pte_t *pte) +{ + if (pte) { + pte -= PTRS_PER_PTE; + free_page((unsigned long)pte); + } +} + +static inline void pte_free(struct page *pte) +{ + __free_page(pte); +} + +/* + * Populate the pmdp entry with a pointer to the pte. This pmd is part + * of the mm address space. + * + * Ensure that we always set both PMD entries. + */ +static inline void +pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) +{ + unsigned long pte_ptr = (unsigned long)ptep; + unsigned long pmdval; + + BUG_ON(mm != &init_mm); + + /* + * The pmd must be loaded with the physical + * address of the PTE table + */ + pte_ptr -= PTRS_PER_PTE * sizeof(void *); + pmdval = __pa(pte_ptr) | _PAGE_KERNEL_TABLE; + pmdp[0] = __pmd(pmdval); + pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); + flush_pmd_entry(pmdp); +} + +static inline void +pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep) +{ + unsigned long pmdval; + + BUG_ON(mm == &init_mm); + + pmdval = page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE; + pmdp[0] = __pmd(pmdval); + pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); + flush_pmd_entry(pmdp); +} + #endif diff --git a/include/asm-arm/pgtable.h b/include/asm-arm/pgtable.h index e2ed74082ae1..473b238e6fbc 100644 --- a/include/asm-arm/pgtable.h +++ b/include/asm-arm/pgtable.h @@ -1,7 +1,7 @@ /* * linux/include/asm-arm/pgtable.h * - * Copyright (C) 2000-2002 Russell King + * Copyright (C) 1995-2002 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -16,15 +16,24 @@ #include <asm/arch/vmalloc.h> /* + * We pull a couple of tricks here: + * 1. We wrap the PMD into the PGD. + * 2. We lie about the size of the PTE and PGD. + * Even though we have 256 PTE entries and 4096 PGD entries, we tell + * Linux that we actually have 512 PTE entries and 2048 PGD entries. + * Each "Linux" PGD entry is made up of two hardware PGD entries, and + * each PTE table is actually two hardware PTE tables. + */ +#define PTRS_PER_PTE 512 +#define PTRS_PER_PMD 1 +#define PTRS_PER_PGD 2048 + +/* * PMD_SHIFT determines the size of the area a second-level page table can map * PGDIR_SHIFT determines what a third-level page table entry can map */ #define PMD_SHIFT 20 -#ifdef CONFIG_CPU_32 #define PGDIR_SHIFT 21 -#else -#define PGDIR_SHIFT 20 -#endif #define LIBRARY_TEXT_START 0x0c000000 @@ -47,6 +56,117 @@ extern void __pgd_error(const char *file, int line, unsigned long val); #define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR) /* + * Hardware page table definitions. + * + * + Level 1 descriptor (PMD) + * - common + */ +#define PMD_TYPE_MASK (3 << 0) +#define PMD_TYPE_FAULT (0 << 0) +#define PMD_TYPE_TABLE (1 << 0) +#define PMD_TYPE_SECT (2 << 0) +#define PMD_BIT4 (1 << 4) +#define PMD_DOMAIN(x) ((x) << 5) +#define PMD_PROTECTION (1 << 9) /* v5 */ +/* + * - section + */ +#define PMD_SECT_BUFFERABLE (1 << 2) +#define PMD_SECT_CACHEABLE (1 << 3) +#define PMD_SECT_AP_WRITE (1 << 10) +#define PMD_SECT_AP_READ (1 << 11) +#define PMD_SECT_TEX(x) ((x) << 12) /* v5 */ +#define PMD_SECT_APX (1 << 15) /* v6 */ +#define PMD_SECT_S (1 << 16) /* v6 */ +#define PMD_SECT_nG (1 << 17) /* v6 */ + +#define PMD_SECT_UNCACHED (0) +#define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE) +#define PMD_SECT_WT (PMD_SECT_CACHEABLE) +#define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE) +#define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE) +#define PMD_SECT_WBWA (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE) + +/* + * - coarse table (not used) + */ + +/* + * + Level 2 descriptor (PTE) + * - common + */ +#define PTE_TYPE_MASK (3 << 0) +#define PTE_TYPE_FAULT (0 << 0) +#define PTE_TYPE_LARGE (1 << 0) +#define PTE_TYPE_SMALL (2 << 0) +#define PTE_TYPE_EXT (3 << 0) /* v5 */ +#define PTE_BUFFERABLE (1 << 2) +#define PTE_CACHEABLE (1 << 3) + +/* + * - extended small page/tiny page + */ +#define PTE_EXT_AP_UNO_SRO (0 << 4) +#define PTE_EXT_AP_UNO_SRW (1 << 4) +#define PTE_EXT_AP_URO_SRW (2 << 4) +#define PTE_EXT_AP_URW_SRW (3 << 4) +#define PTE_EXT_TEX(x) ((x) << 6) /* v5 */ + +/* + * - small page + */ +#define PTE_SMALL_AP_UNO_SRO (0x00 << 4) +#define PTE_SMALL_AP_UNO_SRW (0x55 << 4) +#define PTE_SMALL_AP_URO_SRW (0xaa << 4) +#define PTE_SMALL_AP_URW_SRW (0xff << 4) +#define PTE_AP_READ PTE_SMALL_AP_URO_SRW +#define PTE_AP_WRITE PTE_SMALL_AP_UNO_SRW + +/* + * "Linux" PTE definitions. + * + * We keep two sets of PTEs - the hardware and the linux version. + * This allows greater flexibility in the way we map the Linux bits + * onto the hardware tables, and allows us to have YOUNG and DIRTY + * bits. + * + * The PTE table pointer refers to the hardware entries; the "Linux" + * entries are stored 1024 bytes below. + */ +#define L_PTE_PRESENT (1 << 0) +#define L_PTE_FILE (1 << 1) /* only when !PRESENT */ +#define L_PTE_YOUNG (1 << 1) +#define L_PTE_BUFFERABLE (1 << 2) /* matches PTE */ +#define L_PTE_CACHEABLE (1 << 3) /* matches PTE */ +#define L_PTE_USER (1 << 4) +#define L_PTE_WRITE (1 << 5) +#define L_PTE_EXEC (1 << 6) +#define L_PTE_DIRTY (1 << 7) + +#ifndef __ASSEMBLY__ + +#include <asm/domain.h> + +#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER)) +#define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL)) + +/* + * The following macros handle the cache and bufferable bits... + */ +#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG +#define _L_PTE_READ L_PTE_USER | L_PTE_EXEC | L_PTE_CACHEABLE | L_PTE_BUFFERABLE + +#define PAGE_NONE __pgprot(_L_PTE_DEFAULT) +#define PAGE_COPY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ) +#define PAGE_SHARED __pgprot(_L_PTE_DEFAULT | _L_PTE_READ | L_PTE_WRITE) +#define PAGE_READONLY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ) +#define PAGE_KERNEL __pgprot(_L_PTE_DEFAULT | L_PTE_CACHEABLE | L_PTE_BUFFERABLE | L_PTE_DIRTY | L_PTE_WRITE | L_PTE_EXEC) + +#define _PAGE_CHG_MASK (PAGE_MASK | L_PTE_DIRTY | L_PTE_YOUNG) + +#endif /* __ASSEMBLY__ */ + +/* * The table below defines the page protection levels that we insert into our * Linux page table version. These get translated into the best that the * architecture can perform. Note that on most ARM hardware: @@ -86,9 +206,82 @@ extern struct page *empty_zero_page; #define pte_none(pte) (!pte_val(pte)) #define pte_clear(ptep) set_pte((ptep), __pte(0)) #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) +#define pte_offset_kernel(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr)) +#define pte_offset_map(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr)) +#define pte_offset_map_nested(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr)) +#define pte_unmap(pte) do { } while (0) +#define pte_unmap_nested(pte) do { } while (0) + +#define set_pte(ptep, pte) cpu_set_pte(ptep,pte) + +/* + * The following only work if pte_present() is true. + * Undefined behaviour if not.. + */ +#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) +#define pte_read(pte) (pte_val(pte) & L_PTE_USER) +#define pte_write(pte) (pte_val(pte) & L_PTE_WRITE) +#define pte_exec(pte) (pte_val(pte) & L_PTE_EXEC) +#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) +#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) + +/* + * The following only works if pte_present() is not true. + */ +#define pte_file(pte) (pte_val(pte) & L_PTE_FILE) +#define pte_to_pgoff(x) (pte_val(x) >> 2) +#define pgoff_to_pte(x) __pte(((x) << 2) | L_PTE_FILE) + +#define PTE_FILE_MAX_BITS 30 + +#define PTE_BIT_FUNC(fn,op) \ +static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } + +/*PTE_BIT_FUNC(rdprotect, &= ~L_PTE_USER);*/ +/*PTE_BIT_FUNC(mkread, |= L_PTE_USER);*/ +PTE_BIT_FUNC(wrprotect, &= ~L_PTE_WRITE); +PTE_BIT_FUNC(mkwrite, |= L_PTE_WRITE); +PTE_BIT_FUNC(exprotect, &= ~L_PTE_EXEC); +PTE_BIT_FUNC(mkexec, |= L_PTE_EXEC); +PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY); +PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY); +PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG); +PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG); + +/* + * Mark the prot value as uncacheable and unbufferable. + */ +#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) & ~(L_PTE_CACHEABLE | L_PTE_BUFFERABLE)) +#define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~L_PTE_CACHEABLE) #define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_present(pmd) (pmd_val(pmd)) +#define pmd_bad(pmd) (pmd_val(pmd) & 2) + +#define set_pmd(pmdp,pmd) \ + do { \ + *pmdp = pmd; \ + flush_pmd_entry(pmdp); \ + } while (0) + +#define pmd_clear(pmdp) \ + do { \ + pmdp[0] = __pmd(0); \ + pmdp[1] = __pmd(0); \ + clean_pmd_entry(pmdp); \ + } while (0) + +static inline pte_t *pmd_page_kernel(pmd_t pmd) +{ + unsigned long ptr; + + ptr = pmd_val(pmd) & ~(PTRS_PER_PTE * sizeof(void *) - 1); + ptr += PTRS_PER_PTE * sizeof(void *); + + return __va(ptr); +} + +#define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd))) /* * Permanent address of a page. We never have highmem, so this is trivial. @@ -129,8 +322,6 @@ extern struct page *empty_zero_page; /* Find an entry in the third-level page table.. */ #define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) -#include <asm/proc/pgtable.h> - static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); @@ -164,6 +355,8 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; typedef pte_t *pte_addr_t; +#define pgtable_cache_init() do { } while (0) + #endif /* !__ASSEMBLY__ */ #endif /* _ASMARM_PGTABLE_H */ diff --git a/include/asm-arm/proc-armo/assembler.h b/include/asm-arm/proc-armo/assembler.h deleted file mode 100644 index 7f6bd57cba74..000000000000 --- a/include/asm-arm/proc-armo/assembler.h +++ /dev/null @@ -1,106 +0,0 @@ -/* - * linux/asm-arm/proc-armo/assembler.h - * - * Copyright (C) 1996 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This file contains arm architecture specific defines - * for the different processors - */ -#define MODE_USR USR26_MODE -#define MODE_FIQ FIQ26_MODE -#define MODE_IRQ IRQ26_MODE -#define MODE_SVC SVC26_MODE - -#define DEFAULT_FIQ MODE_FIQ - -#ifdef __STDC__ -#define LOADREGS(cond, base, reglist...)\ - ldm##cond base,reglist^ - -#define RETINSTR(instr, regs...)\ - instr##s regs -#else -#define LOADREGS(cond, base, reglist...)\ - ldm/**/cond base,reglist^ - -#define RETINSTR(instr, regs...)\ - instr/**/s regs -#endif - -#define MODENOP\ - mov r0, r0 - -#define MODE(savereg,tmpreg,mode) \ - mov savereg, pc; \ - bic tmpreg, savereg, $0x0c000003; \ - orr tmpreg, tmpreg, $mode; \ - teqp tmpreg, $0 - -#define RESTOREMODE(savereg) \ - teqp savereg, $0 - -#define SAVEIRQS(tmpreg) - -#define RESTOREIRQS(tmpreg) - -#define DISABLEIRQS(tmpreg)\ - teqp pc, $0x08000003 - -#define ENABLEIRQS(tmpreg)\ - teqp pc, $0x00000003 - -#define USERMODE(tmpreg)\ - teqp pc, $0x00000000;\ - mov r0, r0 - -#define SVCMODE(tmpreg)\ - teqp pc, $0x00000003;\ - mov r0, r0 - - -/* - * Save the current IRQ state and disable IRQs - * Note that this macro assumes FIQs are enabled, and - * that the processor is in SVC mode. - */ - .macro save_and_disable_irqs, oldcpsr, temp - mov \oldcpsr, pc - orr \temp, \oldcpsr, #0x08000000 - teqp \temp, #0 - .endm - -/* - * Restore interrupt state previously stored in - * a register - * ** Actually do nothing on Arc - hope that the caller uses a MOVS PC soon - * after! - */ - .macro restore_irqs, oldcpsr - @ This be restore_irqs - .endm - -/* - * These two are used to save LR/restore PC over a user-based access. - * The old 26-bit architecture requires that we do. On 32-bit - * architecture, we can safely ignore this requirement. - */ - .macro save_lr - str lr, [sp, #-4]! - .endm - - .macro restore_pc - ldmfd sp!, {pc}^ - .endm - -#define USER(x...) \ -9999: x; \ - .section __ex_table,"a"; \ - .align 3; \ - .long 9999b,9001f; \ - .previous - - diff --git a/include/asm-arm/proc-armo/cache.h b/include/asm-arm/proc-armo/cache.h deleted file mode 100644 index 47c8125fa719..000000000000 --- a/include/asm-arm/proc-armo/cache.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * linux/include/asm-arm/proc-armo/cache.h - * - * Copyright (C) 1999-2001 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Cache handling for 26-bit ARM processors. - */ -#define flush_cache_all() do { } while (0) -#define flush_cache_mm(mm) do { } while (0) -#define flush_cache_range(vma,start,end) do { } while (0) -#define flush_cache_page(vma,vmaddr) do { } while (0) - -#define invalidate_dcache_range(start,end) do { } while (0) -#define clean_dcache_range(start,end) do { } while (0) -#define flush_dcache_range(start,end) do { } while (0) -#define flush_dcache_page(page) do { } while (0) -#define clean_dcache_entry(_s) do { } while (0) -#define clean_cache_entry(_start) do { } while (0) - -#define flush_icache_range(start,end) do { } while (0) -#define flush_icache_page(vma,page) do { } while (0) - -/* DAG: ARM3 will flush cache on MEMC updates anyway? so don't bother */ -#define clean_cache_area(_start,_size) do { } while (0) diff --git a/include/asm-arm/proc-armo/elf.h b/include/asm-arm/proc-armo/elf.h deleted file mode 100644 index b5901f767c6d..000000000000 --- a/include/asm-arm/proc-armo/elf.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * ELF definitions for 26-bit CPUs - */ - -#define ELF_EXEC_PAGESIZE 32768 - -#ifdef __KERNEL__ - -/* We can only execute 26-bit code. */ -#define ELF_PROC_OK(x) \ - ((x)->e_flags & EF_ARM_APCS26) - -#define SET_PERSONALITY(ex,ibcs2) set_personality(PER_LINUX) - -#endif diff --git a/include/asm-arm/proc-armo/locks.h b/include/asm-arm/proc-armo/locks.h deleted file mode 100644 index 81b3bda2ed00..000000000000 --- a/include/asm-arm/proc-armo/locks.h +++ /dev/null @@ -1,161 +0,0 @@ -/* - * linux/include/asm-arm/proc-armo/locks.h - * - * Copyright (C) 2000 Russell King - * Fixes for 26 bit machines, (C) 2000 Dave Gilbert - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Interrupt safe locking assembler. - */ -#ifndef __ASM_PROC_LOCKS_H -#define __ASM_PROC_LOCKS_H - -/* Decrements by 1, fails if value < 0 */ -#define __down_op(ptr,fail) \ - ({ \ - __asm__ __volatile__ ( \ - "@ atomic down operation\n" \ -" mov ip, pc\n" \ -" orr lr, ip, #0x08000000\n" \ -" teqp lr, #0\n" \ -" ldr lr, [%0]\n" \ -" and ip, ip, #0x0c000003\n" \ -" subs lr, lr, #1\n" \ -" str lr, [%0]\n" \ -" orrmi ip, ip, #0x80000000 @ set N\n" \ -" teqp ip, #0\n" \ -" movmi ip, %0\n" \ -" blmi " #fail \ - : \ - : "r" (ptr) \ - : "ip", "lr", "cc"); \ - }) - -#define __down_op_ret(ptr,fail) \ - ({ \ - unsigned int result; \ - __asm__ __volatile__ ( \ -" @ down_op_ret\n" \ -" mov ip, pc\n" \ -" orr lr, ip, #0x08000000\n" \ -" teqp lr, #0\n" \ -" ldr lr, [%1]\n" \ -" and ip, ip, #0x0c000003\n" \ -" subs lr, lr, #1\n" \ -" str lr, [%1]\n" \ -" orrmi ip, ip, #0x80000000 @ set N\n" \ -" teqp ip, #0\n" \ -" movmi ip, %1\n" \ -" movpl ip, #0\n" \ -" blmi " #fail "\n" \ -" mov %0, ip" \ - : "=&r" (result) \ - : "r" (ptr) \ - : "ip", "lr", "cc"); \ - result; \ - }) - -#define __up_op(ptr,wake) \ - ({ \ - __asm__ __volatile__ ( \ - "@ up_op\n" \ -" mov ip, pc\n" \ -" orr lr, ip, #0x08000000\n" \ -" teqp lr, #0\n" \ -" ldr lr, [%0]\n" \ -" and ip, ip, #0x0c000003\n" \ -" adds lr, lr, #1\n" \ -" str lr, [%0]\n" \ -" orrle ip, ip, #0x80000000 @ set N - should this be mi ??? DAG ! \n" \ -" teqp ip, #0\n" \ -" movmi ip, %0\n" \ -" blmi " #wake \ - : \ - : "r" (ptr) \ - : "ip", "lr", "cc"); \ - }) - -/* - * The value 0x01000000 supports up to 128 processors and - * lots of processes. BIAS must be chosen such that sub'ing - * BIAS once per CPU will result in the long remaining - * negative. - */ -#define RW_LOCK_BIAS 0x01000000 -#define RW_LOCK_BIAS_STR "0x01000000" - -/* Decrements by RW_LOCK_BIAS rather than 1, fails if value != 0 */ -#define __down_op_write(ptr,fail) \ - ({ \ - __asm__ __volatile__( \ - "@ down_op_write\n" \ -" mov ip, pc\n" \ -" orr lr, ip, #0x08000000\n" \ -" teqp lr, #0\n" \ -" and ip, ip, #0x0c000003\n" \ -\ -" ldr lr, [%0]\n" \ -" subs lr, lr, %1\n" \ -" str lr, [%0]\n" \ -\ -" orreq ip, ip, #0x40000000 @ set Z \n"\ -" teqp ip, #0\n" \ -" movne ip, %0\n" \ -" blne " #fail \ - : \ - : "r" (ptr), "I" (RW_LOCK_BIAS) \ - : "ip", "lr", "cc"); \ - }) - -/* Increments by RW_LOCK_BIAS, wakes if value >= 0 */ -#define __up_op_write(ptr,wake) \ - ({ \ - __asm__ __volatile__( \ - "@ up_op_read\n" \ -" mov ip, pc\n" \ -" orr lr, ip, #0x08000000\n" \ -" teqp lr, #0\n" \ -\ -" ldr lr, [%0]\n" \ -" and ip, ip, #0x0c000003\n" \ -" adds lr, lr, %1\n" \ -" str lr, [%0]\n" \ -\ -" orrcs ip, ip, #0x20000000 @ set C\n" \ -" teqp ip, #0\n" \ -" movcs ip, %0\n" \ -" blcs " #wake \ - : \ - : "r" (ptr), "I" (RW_LOCK_BIAS) \ - : "ip", "lr", "cc"); \ - }) - -#define __down_op_read(ptr,fail) \ - __down_op(ptr, fail) - -#define __up_op_read(ptr,wake) \ - ({ \ - __asm__ __volatile__( \ - "@ up_op_read\n" \ -" mov ip, pc\n" \ -" orr lr, ip, #0x08000000\n" \ -" teqp lr, #0\n" \ -\ -" ldr lr, [%0]\n" \ -" and ip, ip, #0x0c000003\n" \ -" adds lr, lr, %1\n" \ -" str lr, [%0]\n" \ -\ -" orreq ip, ip, #0x40000000 @ Set Z \n" \ -" teqp ip, #0\n" \ -" moveq ip, %0\n" \ -" bleq " #wake \ - : \ - : "r" (ptr), "I" (1) \ - : "ip", "lr", "cc"); \ - }) - -#endif diff --git a/include/asm-arm/proc-armo/page.h b/include/asm-arm/proc-armo/page.h deleted file mode 100644 index d32a7af23b7a..000000000000 --- a/include/asm-arm/proc-armo/page.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * linux/include/asm-arm/proc-armo/page.h - * - * Copyright (C) 1995-2002 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __ASM_PROC_PAGE_H -#define __ASM_PROC_PAGE_H - -#include <linux/config.h> - -/* PAGE_SHIFT determines the page size. This is configurable. */ -#if defined(CONFIG_PAGESIZE_16) -#define PAGE_SHIFT 14 /* 16K */ -#else /* default */ -#define PAGE_SHIFT 15 /* 32K */ -#endif - -#define EXEC_PAGESIZE 32768 - -#ifndef __ASSEMBLY__ -#ifdef STRICT_MM_TYPECHECKS - -typedef struct { unsigned long pgd; } pgd_t; - -#define pgd_val(x) ((x).pgd) - -#else - -typedef unsigned long pgd_t; - -#define pgd_val(x) (x) - -#endif -#endif /* __ASSEMBLY__ */ - -#endif /* __ASM_PROC_PAGE_H */ diff --git a/include/asm-arm/proc-armo/pgalloc.h b/include/asm-arm/proc-armo/pgalloc.h deleted file mode 100644 index 7b9bf375abc7..000000000000 --- a/include/asm-arm/proc-armo/pgalloc.h +++ /dev/null @@ -1,44 +0,0 @@ -/* - * linux/include/asm-arm/proc-armo/pgalloc.h - * - * Copyright (C) 2001-2002 Russell King - * - * Page table allocation/freeing primitives for 26-bit ARM processors. - */ - -#include <linux/slab.h> - -extern kmem_cache_t *pte_cache; - -static inline pte_t * -pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) -{ - return kmem_cache_alloc(pte_cache, GFP_KERNEL); -} - -static inline void pte_free_kernel(pte_t *pte) -{ - if (pte) - kmem_cache_free(pte_cache, pte); -} - -/* - * Populate the pmdp entry with a pointer to the pte. This pmd is part - * of the mm address space. - * - * If 'mm' is the init tasks mm, then we are doing a vmalloc, and we - * need to set stuff up correctly for it. - */ -static inline void -pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) -{ - set_pmd(pmdp, __mk_pmd(ptep, _PAGE_TABLE)); -} - -/* - * We use the old 2.5.5-rmk1 hack for this. - * This is not truly correct, but should be functional. - */ -#define pte_alloc_one(mm,addr) ((struct page *)pte_alloc_one_kernel(mm,addr)) -#define pte_free(pte) pte_free_kernel((pte_t *)pte) -#define pmd_populate(mm,pmdp,ptep) pmd_populate_kernel(mm,pmdp,(pte_t *)ptep) diff --git a/include/asm-arm/proc-armo/pgtable.h b/include/asm-arm/proc-armo/pgtable.h deleted file mode 100644 index 152504bc9888..000000000000 --- a/include/asm-arm/proc-armo/pgtable.h +++ /dev/null @@ -1,106 +0,0 @@ -/* - * linux/include/asm-arm/proc-armo/pgtable.h - * - * Copyright (C) 1995-2002 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * 18-Oct-1997 RMK Now two-level (32x32) - */ -#ifndef __ASM_PROC_PGTABLE_H -#define __ASM_PROC_PGTABLE_H - -/* - * entries per page directory level: they are two-level, so - * we don't really have any PMD directory. - */ -#define PTRS_PER_PTE 32 -#define PTRS_PER_PMD 1 -#define PTRS_PER_PGD 32 - -/* - * The vmalloc() routines leaves a hole of 4kB between each vmalloced - * area for the same reason. ;) - */ -#define VMALLOC_START 0x01a00000 -#define VMALLOC_VMADDR(x) ((unsigned long)(x)) -#define VMALLOC_END 0x01c00000 - -#define _PAGE_TABLE (0x01) - -#define pmd_bad(pmd) ((pmd_val(pmd) & 0xfc000002)) -#define set_pmd(pmdp,pmd) ((*(pmdp)) = (pmd)) -#define pmd_clear(pmdp) set_pmd(pmdp, __pmd(0)) - -static inline pmd_t __mk_pmd(pte_t *ptep, unsigned long prot) -{ - unsigned long pte_ptr = (unsigned long)ptep; - pmd_t pmd; - - pmd_val(pmd) = __virt_to_phys(pte_ptr) | prot; - - return pmd; -} - -static inline unsigned long pmd_page(pmd_t pmd) -{ - return __phys_to_virt(pmd_val(pmd) & ~_PAGE_TABLE); -} - -#define pte_offset_kernel(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr)) -#define pte_offset_map(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr)) -#define pte_offset_map_nested(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr)) -#define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) - -#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval)) - -#define _PAGE_PRESENT 0x01 -#define _PAGE_READONLY 0x02 -#define _PAGE_NOT_USER 0x04 -#define _PAGE_OLD 0x08 -#define _PAGE_CLEAN 0x10 - -/* -- present -- -- !dirty -- --- !write --- ---- !user --- */ -#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_CLEAN | _PAGE_READONLY | _PAGE_NOT_USER) -#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_CLEAN ) -#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_CLEAN | _PAGE_READONLY ) -#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_CLEAN | _PAGE_READONLY ) -#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_NOT_USER) - -#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_OLD | _PAGE_CLEAN) - - -/* - * The following only work if pte_present() is true. - * Undefined behaviour if not.. - */ -#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) -#define pte_read(pte) (!(pte_val(pte) & _PAGE_NOT_USER)) -#define pte_write(pte) (!(pte_val(pte) & _PAGE_READONLY)) -#define pte_exec(pte) (!(pte_val(pte) & _PAGE_NOT_USER)) -#define pte_dirty(pte) (!(pte_val(pte) & _PAGE_CLEAN)) -#define pte_young(pte) (!(pte_val(pte) & _PAGE_OLD)) - -static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_READONLY; return pte; } -static inline pte_t pte_rdprotect(pte_t pte) { pte_val(pte) |= _PAGE_NOT_USER; return pte; } -static inline pte_t pte_exprotect(pte_t pte) { pte_val(pte) |= _PAGE_NOT_USER; return pte; } -static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) |= _PAGE_CLEAN; return pte; } -static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) |= _PAGE_OLD; return pte; } - -static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_READONLY; return pte; } -static inline pte_t pte_mkread(pte_t pte) { pte_val(pte) &= ~_PAGE_NOT_USER; return pte; } -static inline pte_t pte_mkexec(pte_t pte) { pte_val(pte) &= ~_PAGE_NOT_USER; return pte; } -static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) &= ~_PAGE_CLEAN; return pte; } -static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) &= ~_PAGE_OLD; return pte; } - -/* - * We don't store cache state bits in the page table here. - */ -#define pgprot_noncached(prot) (prot) - -extern void pgtable_cache_init(void); - -#endif /* __ASM_PROC_PGTABLE_H */ diff --git a/include/asm-arm/proc-armo/processor.h b/include/asm-arm/proc-armo/processor.h deleted file mode 100644 index aba9dbaa61b7..000000000000 --- a/include/asm-arm/proc-armo/processor.h +++ /dev/null @@ -1,63 +0,0 @@ -/* - * linux/include/asm-arm/proc-armo/processor.h - * - * Copyright (C) 1996 Russell King. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Changelog: - * 27-06-1996 RMK Created - * 10-10-1996 RMK Brought up to date with SA110 - * 26-09-1996 RMK Added 'EXTRA_THREAD_STRUCT*' - * 28-09-1996 RMK Moved start_thread into the processor dependencies - * 11-01-1998 RMK Added new uaccess_t - * 09-09-1998 PJB Delete redundant `wp_works_ok' - * 30-05-1999 PJB Save sl across context switches - */ -#ifndef __ASM_PROC_PROCESSOR_H -#define __ASM_PROC_PROCESSOR_H - -#include <linux/string.h> - -#define KERNEL_STACK_SIZE 4096 - -typedef struct { - void (*put_byte)(void); /* Special calling convention */ - void (*get_byte)(void); /* Special calling convention */ - void (*put_half)(void); /* Special calling convention */ - void (*get_half)(void); /* Special calling convention */ - void (*put_word)(void); /* Special calling convention */ - void (*get_word)(void); /* Special calling convention */ - unsigned long (*copy_from_user)(void *to, const void *from, unsigned long sz); - unsigned long (*copy_to_user)(void *to, const void *from, unsigned long sz); - unsigned long (*clear_user)(void *addr, unsigned long sz); - unsigned long (*strncpy_from_user)(char *to, const char *from, unsigned long sz); - unsigned long (*strnlen_user)(const char *s, long n); -} uaccess_t; - -extern uaccess_t uaccess_user, uaccess_kernel; - -#define EXTRA_THREAD_STRUCT \ - uaccess_t *uaccess; /* User access functions*/ - -#define EXTRA_THREAD_STRUCT_INIT \ - .uaccess = &uaccess_kernel, - -#define start_thread(regs,pc,sp) \ -({ \ - unsigned long *stack = (unsigned long *)sp; \ - set_fs(USER_DS); \ - memzero(regs->uregs, sizeof (regs->uregs)); \ - regs->ARM_pc = pc; /* pc */ \ - regs->ARM_sp = sp; /* sp */ \ - regs->ARM_r2 = stack[2]; /* r2 (envp) */ \ - regs->ARM_r1 = stack[1]; /* r1 (argv) */ \ - regs->ARM_r0 = stack[0]; /* r0 (argc) */ \ -}) - -#define KSTK_EIP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1020]) -#define KSTK_ESP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1018]) - -#endif diff --git a/include/asm-arm/proc-armo/ptrace.h b/include/asm-arm/proc-armo/ptrace.h deleted file mode 100644 index 79d44f4f71d8..000000000000 --- a/include/asm-arm/proc-armo/ptrace.h +++ /dev/null @@ -1,98 +0,0 @@ -/* - * linux/include/asm-arm/proc-armo/ptrace.h - * - * Copyright (C) 1996-2001 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __ASM_PROC_PTRACE_H -#define __ASM_PROC_PTRACE_H - -#define USR26_MODE 0x00000000 -#define FIQ26_MODE 0x00000001 -#define IRQ26_MODE 0x00000002 -#define SVC26_MODE 0x00000003 -#define USR_MODE USR26_MODE -#define FIQ_MODE FIQ26_MODE -#define IRQ_MODE IRQ26_MODE -#define SVC_MODE SVC26_MODE -#define MODE_MASK 0x00000003 -#define PSR_F_BIT 0x04000000 -#define PSR_I_BIT 0x08000000 -#define PSR_V_BIT 0x10000000 -#define PSR_C_BIT 0x20000000 -#define PSR_Z_BIT 0x40000000 -#define PSR_N_BIT 0x80000000 -#define PCMASK 0xfc000003 - -#ifndef __ASSEMBLY__ - -/* this struct defines the way the registers are stored on the - stack during a system call. */ - -struct pt_regs { - long uregs[17]; -}; - -#define ARM_pc uregs[15] -#define ARM_lr uregs[14] -#define ARM_sp uregs[13] -#define ARM_ip uregs[12] -#define ARM_fp uregs[11] -#define ARM_r10 uregs[10] -#define ARM_r9 uregs[9] -#define ARM_r8 uregs[8] -#define ARM_r7 uregs[7] -#define ARM_r6 uregs[6] -#define ARM_r5 uregs[5] -#define ARM_r4 uregs[4] -#define ARM_r3 uregs[3] -#define ARM_r2 uregs[2] -#define ARM_r1 uregs[1] -#define ARM_r0 uregs[0] -#define ARM_ORIG_r0 uregs[16] - -#ifdef __KERNEL__ - -#define processor_mode(regs) \ - ((regs)->ARM_pc & MODE_MASK) - -#define user_mode(regs) \ - (processor_mode(regs) == USR26_MODE) - -#define thumb_mode(regs) (0) - -#define interrupts_enabled(regs) \ - (!((regs)->ARM_pc & PSR_I_BIT)) - -#define fast_interrupts_enabled(regs) \ - (!((regs)->ARM_pc & PSR_F_BIT)) - -#define condition_codes(regs) \ - ((regs)->ARM_pc & (PSR_V_BIT|PSR_C_BIT|PSR_Z_BIT|PSR_N_BIT)) - -/* Are the current registers suitable for user mode? - * (used to maintain security in signal handlers) - */ -static inline int valid_user_regs(struct pt_regs *regs) -{ - if (user_mode(regs) && - (regs->ARM_pc & (PSR_F_BIT | PSR_I_BIT)) == 0) - return 1; - - /* - * force it to be something sensible - */ - regs->ARM_pc &= ~(MODE_MASK | PSR_F_BIT | PSR_I_BIT); - - return 0; -} - -#endif /* __KERNEL__ */ - -#endif /* __ASSEMBLY__ */ - -#endif - diff --git a/include/asm-arm/proc-armo/shmparam.h b/include/asm-arm/proc-armo/shmparam.h deleted file mode 100644 index 6b106f1419e8..000000000000 --- a/include/asm-arm/proc-armo/shmparam.h +++ /dev/null @@ -1,19 +0,0 @@ -/* - * linux/include/asm-arm/proc-armo/shmparam.h - * - * Copyright (C) 1996 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * definitions for the shared process memory on the ARM3 - */ -#ifndef __ASM_PROC_SHMPARAM_H -#define __ASM_PROC_SHMPARAM_H - -#ifndef SHMMAX -#define SHMMAX 0x003fa000 -#endif - -#endif diff --git a/include/asm-arm/proc-armo/system.h b/include/asm-arm/proc-armo/system.h deleted file mode 100644 index 9e46e58f56e4..000000000000 --- a/include/asm-arm/proc-armo/system.h +++ /dev/null @@ -1,128 +0,0 @@ -/* - * linux/include/asm-arm/proc-armo/system.h - * - * Copyright (C) 1995, 1996 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __ASM_PROC_SYSTEM_H -#define __ASM_PROC_SYSTEM_H - -#define vectors_base() (0) - -static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) -{ - extern void __bad_xchg(volatile void *, int); - - switch (size) { - case 1: return cpu_xchg_1(x, ptr); - case 4: return cpu_xchg_4(x, ptr); - default: __bad_xchg(ptr, size); - } - return 0; -} - -/* - * We need to turn the caches off before calling the reset vector - RiscOS - * messes up if we don't - */ -#define proc_hard_reset() cpu_proc_fin() - -/* - * A couple of speedups for the ARM - */ - -/* - * Save the current interrupt enable state & disable IRQs - */ -#define local_save_flags_cli(x) \ - do { \ - unsigned long temp; \ - __asm__ __volatile__( \ -" mov %0, pc @ save_flags_cli\n" \ -" orr %1, %0, #0x08000000\n" \ -" and %0, %0, #0x0c000000\n" \ -" teqp %1, #0\n" \ - : "=r" (x), "=r" (temp) \ - : \ - : "memory"); \ - } while (0) - -/* - * Enable IRQs - */ -#define local_irq_enable() \ - do { \ - unsigned long temp; \ - __asm__ __volatile__( \ -" mov %0, pc @ sti\n" \ -" bic %0, %0, #0x08000000\n" \ -" teqp %0, #0\n" \ - : "=r" (temp) \ - : \ - : "memory"); \ - } while(0) - -/* - * Disable IRQs - */ -#define local_irq_disable() \ - do { \ - unsigned long temp; \ - __asm__ __volatile__( \ -" mov %0, pc @ cli\n" \ -" orr %0, %0, #0x08000000\n" \ -" teqp %0, #0\n" \ - : "=r" (temp) \ - : \ - : "memory"); \ - } while(0) - -#define __clf() do { \ - unsigned long temp; \ - __asm__ __volatile__( \ -" mov %0, pc @ clf\n" \ -" orr %0, %0, #0x04000000\n" \ -" teqp %0, #0\n" \ - : "=r" (temp)); \ - } while(0) - -#define __stf() do { \ - unsigned long temp; \ - __asm__ __volatile__( \ -" mov %0, pc @ stf\n" \ -" bic %0, %0, #0x04000000\n" \ -" teqp %0, #0\n" \ - : "=r" (temp)); \ - } while(0) - -/* - * save current IRQ & FIQ state - */ -#define local_save_flags(x) \ - do { \ - __asm__ __volatile__( \ -" mov %0, pc @ save_flags\n" \ -" and %0, %0, #0x0c000000\n" \ - : "=r" (x)); \ - } while (0) - -/* - * restore saved IRQ & FIQ state - */ -#define local_irq_restore(x) \ - do { \ - unsigned long temp; \ - __asm__ __volatile__( \ -" mov %0, pc @ restore_flags\n" \ -" bic %0, %0, #0x0c000000\n" \ -" orr %0, %0, %1\n" \ -" teqp %0, #0\n" \ - : "=&r" (temp) \ - : "r" (x) \ - : "memory"); \ - } while (0) - -#endif diff --git a/include/asm-arm/proc-armo/tlbflush.h b/include/asm-arm/proc-armo/tlbflush.h deleted file mode 100644 index f10e5b66b596..000000000000 --- a/include/asm-arm/proc-armo/tlbflush.h +++ /dev/null @@ -1,63 +0,0 @@ -/* - * TLB flushing: - * - * - flush_tlb_all() flushes all processes TLBs - * - flush_tlb_mm(mm) flushes the specified mm context TLB's - * - flush_tlb_page(vma, vmaddr) flushes one page - * - flush_tlb_range(vma, start, end) flushes a range of pages - */ -#define flush_tlb_all() memc_update_all() -#define flush_tlb_mm(mm) memc_update_mm(mm) -#define flush_tlb_range(vma,start,end) \ - do { memc_update_mm(vma->vm_mm); (void)(start); (void)(end); } while (0) -#define flush_tlb_page(vma, vmaddr) do { } while (0) - -/* - * The following handle the weird MEMC chip - */ -static inline void memc_update_all(void) -{ - struct task_struct *p; - - cpu_memc_update_all(init_mm.pgd); - for_each_task(p) { - if (!p->mm) - continue; - cpu_memc_update_all(p->mm->pgd); - } - processor._set_pgd(current->active_mm->pgd); -} - -static inline void memc_update_mm(struct mm_struct *mm) -{ - cpu_memc_update_all(mm->pgd); - - if (mm == current->active_mm) - processor._set_pgd(mm->pgd); -} - -static inline void -memc_clear(struct mm_struct *mm, struct page *page) -{ - cpu_memc_update_entry(mm->pgd, (unsigned long) page_address(page), 0); - - if (mm == current->active_mm) - processor._set_pgd(mm->pgd); -} - -static inline void -memc_update_addr(struct mm_struct *mm, pte_t pte, unsigned long vaddr) -{ - cpu_memc_update_entry(mm->pgd, pte_val(pte), vaddr); - - if (mm == current->active_mm) - processor._set_pgd(mm->pgd); -} - -static inline void -update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) -{ - struct mm_struct *mm = vma->vm_mm; - memc_update_addr(mm, pte, addr); -} - diff --git a/include/asm-arm/proc-armo/uaccess.h b/include/asm-arm/proc-armo/uaccess.h deleted file mode 100644 index 77fcf9e482a5..000000000000 --- a/include/asm-arm/proc-armo/uaccess.h +++ /dev/null @@ -1,138 +0,0 @@ -/* - * linux/include/asm-arm/proc-armo/segment.h - * - * Copyright (C) 1996 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -/* - * The fs functions are implemented on the ARM2 and ARM3 architectures - * manually. - * Use *_user functions to access user memory with faulting behaving - * as though the user is accessing the memory. - * Use set_fs(get_ds()) and then the *_user functions to allow them to - * access kernel memory. - */ - -/* - * These are the values used to represent the user `fs' and the kernel `ds' - */ -#define KERNEL_DS 0x03000000 -#define USER_DS 0x02000000 - -extern uaccess_t uaccess_user, uaccess_kernel; - -static inline void set_fs (mm_segment_t fs) -{ - current->addr_limit = fs; - current->thread.uaccess = fs == USER_DS ? &uaccess_user : &uaccess_kernel; -} - -#define __range_ok(addr,size) ({ \ - unsigned long flag, sum; \ - __asm__ __volatile__("subs %1, %0, %3; cmpcs %1, %2; movcs %0, #0" \ - : "=&r" (flag), "=&r" (sum) \ - : "r" (addr), "Ir" (size), "0" (current->addr_limit) \ - : "cc"); \ - flag; }) - -#define __addr_ok(addr) ({ \ - unsigned long flag; \ - __asm__ __volatile__("cmp %2, %0; movlo %0, #0" \ - : "=&r" (flag) \ - : "0" (current->addr_limit), "r" (addr) \ - : "cc"); \ - (flag == 0); }) - -#define __put_user_asm_byte(x,addr,err) \ - __asm__ __volatile__( \ - " mov r0, %1\n" \ - " mov r1, %2\n" \ - " mov r2, %0\n" \ - " mov lr, pc\n" \ - " mov pc, %3\n" \ - " mov %0, r2\n" \ - : "=r" (err) \ - : "r" (x), "r" (addr), "r" (current->thread.uaccess->put_byte), \ - "0" (err) \ - : "r0", "r1", "r2", "lr") - -#define __put_user_asm_half(x,addr,err) \ - __asm__ __volatile__( \ - " mov r0, %1\n" \ - " mov r1, %2\n" \ - " mov r2, %0\n" \ - " mov lr, pc\n" \ - " mov pc, %3\n" \ - " mov %0, r2\n" \ - : "=r" (err) \ - : "r" (x), "r" (addr), "r" (current->thread.uaccess->put_half), \ - "0" (err) \ - : "r0", "r1", "r2", "lr") - -#define __put_user_asm_word(x,addr,err) \ - __asm__ __volatile__( \ - " mov r0, %1\n" \ - " mov r1, %2\n" \ - " mov r2, %0\n" \ - " mov lr, pc\n" \ - " mov pc, %3\n" \ - " mov %0, r2\n" \ - : "=r" (err) \ - : "r" (x), "r" (addr), "r" (current->thread.uaccess->put_word), \ - "0" (err) \ - : "r0", "r1", "r2", "lr") - -#define __get_user_asm_byte(x,addr,err) \ - __asm__ __volatile__( \ - " mov r0, %2\n" \ - " mov r1, %0\n" \ - " mov lr, pc\n" \ - " mov pc, %3\n" \ - " mov %0, r1\n" \ - " mov %1, r0\n" \ - : "=r" (err), "=r" (x) \ - : "r" (addr), "r" (current->thread.uaccess->get_byte), "0" (err) \ - : "r0", "r1", "r2", "lr") - -#define __get_user_asm_half(x,addr,err) \ - __asm__ __volatile__( \ - " mov r0, %2\n" \ - " mov r1, %0\n" \ - " mov lr, pc\n" \ - " mov pc, %3\n" \ - " mov %0, r1\n" \ - " mov %1, r0\n" \ - : "=r" (err), "=r" (x) \ - : "r" (addr), "r" (current->thread.uaccess->get_half), "0" (err) \ - : "r0", "r1", "r2", "lr") - -#define __get_user_asm_word(x,addr,err) \ - __asm__ __volatile__( \ - " mov r0, %2\n" \ - " mov r1, %0\n" \ - " mov lr, pc\n" \ - " mov pc, %3\n" \ - " mov %0, r1\n" \ - " mov %1, r0\n" \ - : "=r" (err), "=r" (x) \ - : "r" (addr), "r" (current->thread.uaccess->get_word), "0" (err) \ - : "r0", "r1", "r2", "lr") - -#define __do_copy_from_user(to,from,n) \ - (n) = current->thread.uaccess->copy_from_user((to),(from),(n)) - -#define __do_copy_to_user(to,from,n) \ - (n) = current->thread.uaccess->copy_to_user((to),(from),(n)) - -#define __do_clear_user(addr,sz) \ - (sz) = current->thread.uaccess->clear_user((addr),(sz)) - -#define __do_strncpy_from_user(dst,src,count,res) \ - (res) = current->thread.uaccess->strncpy_from_user(dst,src,count) - -#define __do_strnlen_user(s,n,res) \ - (res) = current->thread.uaccess->strnlen_user(s,n) diff --git a/include/asm-arm/proc-armv/assembler.h b/include/asm-arm/proc-armv/assembler.h deleted file mode 100644 index 64916c820b33..000000000000 --- a/include/asm-arm/proc-armv/assembler.h +++ /dev/null @@ -1,74 +0,0 @@ -/* - * linux/asm-arm/proc-armv/assembler.h - * - * Copyright (C) 1996-2000 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This file contains ARM processor specifics for - * the ARM6 and better processors. - */ -#define MODE_USR USR_MODE -#define MODE_FIQ FIQ_MODE -#define MODE_IRQ IRQ_MODE -#define MODE_SVC SVC_MODE - -#define DEFAULT_FIQ MODE_FIQ - -/* - * LOADREGS - ldm with PC in register list (eg, ldmfd sp!, {pc}) - */ -#ifdef __STDC__ -#define LOADREGS(cond, base, reglist...)\ - ldm##cond base,reglist -#else -#define LOADREGS(cond, base, reglist...)\ - ldm/**/cond base,reglist -#endif - -/* - * Build a return instruction for this processor type. - */ -#define RETINSTR(instr, regs...)\ - instr regs - -/* - * Save the current IRQ state and disable IRQs. Note that this macro - * assumes FIQs are enabled, and that the processor is in SVC mode. - */ - .macro save_and_disable_irqs, oldcpsr, temp - mrs \oldcpsr, cpsr - mov \temp, #PSR_I_BIT | MODE_SVC - msr cpsr_c, \temp - .endm - -/* - * Restore interrupt state previously stored in a register. We don't - * guarantee that this will preserve the flags. - */ - .macro restore_irqs, oldcpsr - msr cpsr_c, \oldcpsr - .endm - -/* - * These two are used to save LR/restore PC over a user-based access. - * The old 26-bit architecture requires that we do. On 32-bit - * architecture, we can safely ignore this requirement. - */ - .macro save_lr - .endm - - .macro restore_pc - mov pc, lr - .endm - -#define USER(x...) \ -9999: x; \ - .section __ex_table,"a"; \ - .align 3; \ - .long 9999b,9001f; \ - .previous - - diff --git a/include/asm-arm/proc-armv/cache.h b/include/asm-arm/proc-armv/cache.h deleted file mode 100644 index 250a69e335ff..000000000000 --- a/include/asm-arm/proc-armv/cache.h +++ /dev/null @@ -1,278 +0,0 @@ -/* - * linux/include/asm-arm/proc-armv/cache.h - * - * Copyright (C) 1999-2002 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include <asm/mman.h> -#include <asm/glue.h> - -/* - * Cache Model - * =========== - */ -#undef _CACHE -#undef MULTI_CACHE - -#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE v3 -# endif -#endif - -#if defined(CONFIG_CPU_ARM720T) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE v4 -# endif -#endif - -#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \ - defined(CONFIG_CPU_ARM1020) -# define MULTI_CACHE 1 -#endif - -#if defined(CONFIG_CPU_ARM926T) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE arm926 -# endif -#endif - -#if defined(CONFIG_CPU_SA110) || defined(CONFIG_CPU_SA1100) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE v4wb -# endif -#endif - -#if defined(CONFIG_CPU_XSCALE) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE xscale -# endif -#endif - -#if !defined(_CACHE) && !defined(MULTI_CACHE) -#error Unknown cache maintainence model -#endif - -/* - * This flag is used to indicate that the page pointed to by a pte - * is dirty and requires cleaning before returning it to the user. - */ -#define PG_dcache_dirty PG_arch_1 - -/* - * MM Cache Management - * =================== - * - * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files - * implement these methods. - * - * Start addresses are inclusive and end addresses are exclusive; - * start addresses should be rounded down, end addresses up. - * - * See linux/Documentation/cachetlb.txt for more information. - * Please note that the implementation of these, and the required - * effects are cache-type (VIVT/VIPT/PIPT) specific. - * - * flush_cache_kern_all() - * - * Unconditionally clean and invalidate the entire cache. - * - * flush_cache_user_mm(mm) - * - * Clean and invalidate all user space cache entries - * before a change of page tables. - * - * flush_cache_user_range(start, end, flags) - * - * Clean and invalidate a range of cache entries in the - * specified address space before a change of page tables. - * - start - user start address (inclusive, page aligned) - * - end - user end address (exclusive, page aligned) - * - flags - vma->vm_flags field - * - * coherent_kern_range(start, end) - * - * Ensure coherency between the Icache and the Dcache in the - * region described by start, end. If you have non-snooping - * Harvard caches, you need to implement this function. - * - start - virtual start address - * - end - virtual end address - * - * DMA Cache Coherency - * =================== - * - * dma_inv_range(start, end) - * - * Invalidate (discard) the specified virtual address range. - * May not write back any entries. If 'start' or 'end' - * are not cache line aligned, those lines must be written - * back. - * - start - virtual start address - * - end - virtual end address - * - * dma_clean_range(start, end) - * - * Clean (write back) the specified virtual address range. - * - start - virtual start address - * - end - virtual end address - * - * dma_flush_range(start, end) - * - * Clean and invalidate the specified virtual address range. - * - start - virtual start address - * - end - virtual end address - */ - -struct cpu_cache_fns { - void (*flush_kern_all)(void); - void (*flush_user_all)(void); - void (*flush_user_range)(unsigned long, unsigned long, unsigned int); - - void (*coherent_kern_range)(unsigned long, unsigned long); - void (*flush_kern_dcache_page)(void *); - - void (*dma_inv_range)(unsigned long, unsigned long); - void (*dma_clean_range)(unsigned long, unsigned long); - void (*dma_flush_range)(unsigned long, unsigned long); -}; - -/* - * Select the calling method - */ -#ifdef MULTI_CACHE - -extern struct cpu_cache_fns cpu_cache; - -#define __cpuc_flush_kern_all cpu_cache.flush_kern_all -#define __cpuc_flush_user_all cpu_cache.flush_user_all -#define __cpuc_flush_user_range cpu_cache.flush_user_range -#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range -#define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page - -/* - * These are private to the dma-mapping API. Do not use directly. - * Their sole purpose is to ensure that data held in the cache - * is visible to DMA, or data written by DMA to system memory is - * visible to the CPU. - */ -#define dmac_inv_range cpu_cache.dma_inv_range -#define dmac_clean_range cpu_cache.dma_clean_range -#define dmac_flush_range cpu_cache.dma_flush_range - -#else - -#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all) -#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all) -#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) -#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range) -#define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page) - -extern void __cpuc_flush_kern_all(void); -extern void __cpuc_flush_user_all(void); -extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); -extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); -extern void __cpuc_flush_dcache_page(void *); - -/* - * These are private to the dma-mapping API. Do not use directly. - * Their sole purpose is to ensure that data held in the cache - * is visible to DMA, or data written by DMA to system memory is - * visible to the CPU. - */ -#define dmac_inv_range __glue(_CACHE,_dma_inv_range) -#define dmac_clean_range __glue(_CACHE,_dma_clean_range) -#define dmac_flush_range __glue(_CACHE,_dma_flush_range) - -extern void dmac_inv_range(unsigned long, unsigned long); -extern void dmac_clean_range(unsigned long, unsigned long); -extern void dmac_flush_range(unsigned long, unsigned long); - -#endif - -/* - * Convert calls to our calling convention. - */ -#define flush_cache_all() __cpuc_flush_kern_all() - -static inline void flush_cache_mm(struct mm_struct *mm) -{ - if (current->active_mm == mm) - __cpuc_flush_user_all(); -} - -static inline void -flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) -{ - if (current->active_mm == vma->vm_mm) - __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), - vma->vm_flags); -} - -static inline void -flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr) -{ - if (current->active_mm == vma->vm_mm) { - unsigned long addr = user_addr & PAGE_MASK; - __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); - } -} - -/* - * Perform necessary cache operations to ensure that data previously - * stored within this range of addresses can be executed by the CPU. - */ -#define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e) - -/* - * Perform necessary cache operations to ensure that the TLB will - * see data written in the specified area. - */ -#define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size) - -/* - * flush_dcache_page is used when the kernel has written to the page - * cache page at virtual address page->virtual. - * - * If this page isn't mapped (ie, page->mapping = NULL), or it has - * userspace mappings (page->mapping->i_mmap or page->mapping->i_mmap_shared) - * then we _must_ always clean + invalidate the dcache entries associated - * with the kernel mapping. - * - * Otherwise we can defer the operation, and clean the cache when we are - * about to change to user space. This is the same method as used on SPARC64. - * See update_mmu_cache for the user space part. - */ -#define mapping_mapped(map) (!list_empty(&(map)->i_mmap) || \ - !list_empty(&(map)->i_mmap_shared)) - -extern void __flush_dcache_page(struct page *); - -static inline void flush_dcache_page(struct page *page) -{ - if (page->mapping && !mapping_mapped(page->mapping)) - set_bit(PG_dcache_dirty, &page->flags); - else - __flush_dcache_page(page); -} - -#define flush_icache_user_range(vma,page,addr,len) \ - flush_dcache_page(page) - -/* - * We don't appear to need to do anything here. In fact, if we did, we'd - * duplicate cache flushing elsewhere performed by flush_dcache_page(). - */ -#define flush_icache_page(vma,page) do { } while (0) diff --git a/include/asm-arm/proc-armv/elf.h b/include/asm-arm/proc-armv/elf.h deleted file mode 100644 index 1b8a0ac3b2b5..000000000000 --- a/include/asm-arm/proc-armv/elf.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * ELF definitions for 32-bit CPUs - */ - -#define ELF_EXEC_PAGESIZE 4096 - -#ifdef __KERNEL__ - -/* - * 32-bit code is always OK. Some cpus can do 26-bit, some can't. - */ -#define ELF_PROC_OK(x) (ELF_THUMB_OK(x) && ELF_26BIT_OK(x)) - -#define ELF_THUMB_OK(x) \ - (( (elf_hwcap & HWCAP_THUMB) && ((x)->e_entry & 1) == 1) || \ - ((x)->e_entry & 3) == 0) - -#define ELF_26BIT_OK(x) \ - (( (elf_hwcap & HWCAP_26BIT) && (x)->e_flags & EF_ARM_APCS26) || \ - ((x)->e_flags & EF_ARM_APCS26) == 0) - -/* Old NetWinder binaries were compiled in such a way that the iBCS - heuristic always trips on them. Until these binaries become uncommon - enough not to care, don't trust the `ibcs' flag here. In any case - there is no other ELF system currently supported by iBCS. - @@ Could print a warning message to encourage users to upgrade. */ -#define SET_PERSONALITY(ex,ibcs2) \ - set_personality(((ex).e_flags&EF_ARM_APCS26 ?PER_LINUX :PER_LINUX_32BIT)) - -#endif diff --git a/include/asm-arm/proc-armv/page.h b/include/asm-arm/proc-armv/page.h deleted file mode 100644 index 726b16df7195..000000000000 --- a/include/asm-arm/proc-armv/page.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * linux/include/asm-arm/proc-armv/page.h - * - * Copyright (C) 1995-2002 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __ASM_PROC_PAGE_H -#define __ASM_PROC_PAGE_H - -/* PAGE_SHIFT determines the page size */ -#define PAGE_SHIFT 12 - -#define EXEC_PAGESIZE 4096 - -#ifndef __ASSEMBLY__ -#ifdef STRICT_MM_TYPECHECKS - -typedef struct { - unsigned long pgd0; - unsigned long pgd1; -} pgd_t; - -#define pgd_val(x) ((x).pgd0) - -#else - -typedef unsigned long pgd_t[2]; - -#define pgd_val(x) ((x)[0]) - -#endif -#endif /* __ASSEMBLY__ */ - -#endif /* __ASM_PROC_PAGE_H */ diff --git a/include/asm-arm/proc-armv/pgalloc.h b/include/asm-arm/proc-armv/pgalloc.h deleted file mode 100644 index 0e65ab7362e4..000000000000 --- a/include/asm-arm/proc-armv/pgalloc.h +++ /dev/null @@ -1,110 +0,0 @@ -/* - * linux/include/asm-arm/proc-armv/pgalloc.h - * - * Copyright (C) 2001-2002 Russell King - * - * Page table allocation/freeing primitives for 32-bit ARM processors. - */ -#include <asm/cacheflush.h> -#include <asm/tlbflush.h> -#include "pgtable.h" - -/* - * Allocate one PTE table. - * - * This actually allocates two hardware PTE tables, but we wrap this up - * into one table thus: - * - * +------------+ - * | h/w pt 0 | - * +------------+ - * | h/w pt 1 | - * +------------+ - * | Linux pt 0 | - * +------------+ - * | Linux pt 1 | - * +------------+ - */ -static inline pte_t * -pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) -{ - pte_t *pte; - - pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); - if (pte) { - clear_page(pte); - clean_dcache_area(pte, sizeof(pte_t) * PTRS_PER_PTE); - pte += PTRS_PER_PTE; - } - - return pte; -} - -static inline struct page * -pte_alloc_one(struct mm_struct *mm, unsigned long addr) -{ - struct page *pte; - - pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); - if (pte) { - void *page = page_address(pte); - clear_page(page); - clean_dcache_area(page, sizeof(pte_t) * PTRS_PER_PTE); - } - - return pte; -} - -/* - * Free one PTE table. - */ -static inline void pte_free_kernel(pte_t *pte) -{ - if (pte) { - pte -= PTRS_PER_PTE; - free_page((unsigned long)pte); - } -} - -static inline void pte_free(struct page *pte) -{ - __free_page(pte); -} - -/* - * Populate the pmdp entry with a pointer to the pte. This pmd is part - * of the mm address space. - * - * Ensure that we always set both PMD entries. - */ -static inline void -pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) -{ - unsigned long pte_ptr = (unsigned long)ptep; - unsigned long pmdval; - - BUG_ON(mm != &init_mm); - - /* - * The pmd must be loaded with the physical - * address of the PTE table - */ - pte_ptr -= PTRS_PER_PTE * sizeof(void *); - pmdval = __pa(pte_ptr) | _PAGE_KERNEL_TABLE; - pmdp[0] = __pmd(pmdval); - pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); - flush_pmd_entry(pmdp); -} - -static inline void -pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep) -{ - unsigned long pmdval; - - BUG_ON(mm == &init_mm); - - pmdval = page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE; - pmdp[0] = __pmd(pmdval); - pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); - flush_pmd_entry(pmdp); -} diff --git a/include/asm-arm/proc-armv/pgtable.h b/include/asm-arm/proc-armv/pgtable.h deleted file mode 100644 index 4b1cb65443ae..000000000000 --- a/include/asm-arm/proc-armv/pgtable.h +++ /dev/null @@ -1,217 +0,0 @@ -/* - * linux/include/asm-arm/proc-armv/pgtable.h - * - * Copyright (C) 1995-2002 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * 12-Jan-1997 RMK Altered flushing routines to use function pointers - * now possible to combine ARM6, ARM7 and StrongARM versions. - * 17-Apr-1999 RMK Now pass an area size to clean_cache_area and - * flush_icache_area. - */ -#ifndef __ASM_PROC_PGTABLE_H -#define __ASM_PROC_PGTABLE_H - -/* - * We pull a couple of tricks here: - * 1. We wrap the PMD into the PGD. - * 2. We lie about the size of the PTE and PGD. - * Even though we have 256 PTE entries and 4096 PGD entries, we tell - * Linux that we actually have 512 PTE entries and 2048 PGD entries. - * Each "Linux" PGD entry is made up of two hardware PGD entries, and - * each PTE table is actually two hardware PTE tables. - */ -#define PTRS_PER_PTE 512 -#define PTRS_PER_PMD 1 -#define PTRS_PER_PGD 2048 - -/* - * Hardware page table definitions. - * - * + Level 1 descriptor (PMD) - * - common - */ -#define PMD_TYPE_MASK (3 << 0) -#define PMD_TYPE_FAULT (0 << 0) -#define PMD_TYPE_TABLE (1 << 0) -#define PMD_TYPE_SECT (2 << 0) -#define PMD_BIT4 (1 << 4) -#define PMD_DOMAIN(x) ((x) << 5) -#define PMD_PROTECTION (1 << 9) /* v5 */ -/* - * - section - */ -#define PMD_SECT_BUFFERABLE (1 << 2) -#define PMD_SECT_CACHEABLE (1 << 3) -#define PMD_SECT_AP_WRITE (1 << 10) -#define PMD_SECT_AP_READ (1 << 11) -#define PMD_SECT_TEX(x) ((x) << 12) /* v5 */ -#define PMD_SECT_APX (1 << 15) /* v6 */ -#define PMD_SECT_S (1 << 16) /* v6 */ -#define PMD_SECT_nG (1 << 17) /* v6 */ - -#define PMD_SECT_UNCACHED (0) -#define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE) -#define PMD_SECT_WT (PMD_SECT_CACHEABLE) -#define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE) -#define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE) -#define PMD_SECT_WBWA (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE) - -/* - * - coarse table (not used) - */ - -/* - * + Level 2 descriptor (PTE) - * - common - */ -#define PTE_TYPE_MASK (3 << 0) -#define PTE_TYPE_FAULT (0 << 0) -#define PTE_TYPE_LARGE (1 << 0) -#define PTE_TYPE_SMALL (2 << 0) -#define PTE_TYPE_EXT (3 << 0) /* v5 */ -#define PTE_BUFFERABLE (1 << 2) -#define PTE_CACHEABLE (1 << 3) - -/* - * - extended small page/tiny page - */ -#define PTE_EXT_AP_UNO_SRO (0 << 4) -#define PTE_EXT_AP_UNO_SRW (1 << 4) -#define PTE_EXT_AP_URO_SRW (2 << 4) -#define PTE_EXT_AP_URW_SRW (3 << 4) -#define PTE_EXT_TEX(x) ((x) << 6) /* v5 */ - -/* - * - small page - */ -#define PTE_SMALL_AP_UNO_SRO (0x00 << 4) -#define PTE_SMALL_AP_UNO_SRW (0x55 << 4) -#define PTE_SMALL_AP_URO_SRW (0xaa << 4) -#define PTE_SMALL_AP_URW_SRW (0xff << 4) -#define PTE_AP_READ PTE_SMALL_AP_URO_SRW -#define PTE_AP_WRITE PTE_SMALL_AP_UNO_SRW - -/* - * "Linux" PTE definitions. - * - * We keep two sets of PTEs - the hardware and the linux version. - * This allows greater flexibility in the way we map the Linux bits - * onto the hardware tables, and allows us to have YOUNG and DIRTY - * bits. - * - * The PTE table pointer refers to the hardware entries; the "Linux" - * entries are stored 1024 bytes below. - */ -#define L_PTE_PRESENT (1 << 0) -#define L_PTE_FILE (1 << 1) /* only when !PRESENT */ -#define L_PTE_YOUNG (1 << 1) -#define L_PTE_BUFFERABLE (1 << 2) /* matches PTE */ -#define L_PTE_CACHEABLE (1 << 3) /* matches PTE */ -#define L_PTE_USER (1 << 4) -#define L_PTE_WRITE (1 << 5) -#define L_PTE_EXEC (1 << 6) -#define L_PTE_DIRTY (1 << 7) - -#ifndef __ASSEMBLY__ - -#include <asm/proc/domain.h> - -#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER)) -#define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL)) - -#define pmd_bad(pmd) (pmd_val(pmd) & 2) - -#define set_pmd(pmdp,pmd) \ - do { \ - *pmdp = pmd; \ - flush_pmd_entry(pmdp); \ - } while (0) - -#define pmd_clear(pmdp) \ - do { \ - pmdp[0] = __pmd(0); \ - pmdp[1] = __pmd(0); \ - clean_pmd_entry(pmdp); \ - } while (0) - -static inline pte_t *pmd_page_kernel(pmd_t pmd) -{ - unsigned long ptr; - - ptr = pmd_val(pmd) & ~(PTRS_PER_PTE * sizeof(void *) - 1); - ptr += PTRS_PER_PTE * sizeof(void *); - - return __va(ptr); -} - -#define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd))) - -#define pte_offset_kernel(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr)) -#define pte_offset_map(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr)) -#define pte_offset_map_nested(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr)) -#define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) - -#define set_pte(ptep, pte) cpu_set_pte(ptep,pte) - -/* - * The following macros handle the cache and bufferable bits... - */ -#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG -#define _L_PTE_READ L_PTE_USER | L_PTE_EXEC | L_PTE_CACHEABLE | L_PTE_BUFFERABLE - -#define PAGE_NONE __pgprot(_L_PTE_DEFAULT) -#define PAGE_COPY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ) -#define PAGE_SHARED __pgprot(_L_PTE_DEFAULT | _L_PTE_READ | L_PTE_WRITE) -#define PAGE_READONLY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ) -#define PAGE_KERNEL __pgprot(_L_PTE_DEFAULT | L_PTE_CACHEABLE | L_PTE_BUFFERABLE | L_PTE_DIRTY | L_PTE_WRITE | L_PTE_EXEC) - -#define _PAGE_CHG_MASK (PAGE_MASK | L_PTE_DIRTY | L_PTE_YOUNG) - - -/* - * The following only work if pte_present() is true. - * Undefined behaviour if not.. - */ -#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) -#define pte_read(pte) (pte_val(pte) & L_PTE_USER) -#define pte_write(pte) (pte_val(pte) & L_PTE_WRITE) -#define pte_exec(pte) (pte_val(pte) & L_PTE_EXEC) -#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) -#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) -#define pte_file(pte) (pte_val(pte) & L_PTE_FILE) - -#define PTE_BIT_FUNC(fn,op) \ -static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } - -/*PTE_BIT_FUNC(rdprotect, &= ~L_PTE_USER);*/ -/*PTE_BIT_FUNC(mkread, |= L_PTE_USER);*/ -PTE_BIT_FUNC(wrprotect, &= ~L_PTE_WRITE); -PTE_BIT_FUNC(mkwrite, |= L_PTE_WRITE); -PTE_BIT_FUNC(exprotect, &= ~L_PTE_EXEC); -PTE_BIT_FUNC(mkexec, |= L_PTE_EXEC); -PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY); -PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY); -PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG); -PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG); - -/* - * Mark the prot value as uncacheable and unbufferable. - */ -#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) & ~(L_PTE_CACHEABLE | L_PTE_BUFFERABLE)) -#define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~L_PTE_CACHEABLE) - -#define pgtable_cache_init() do { } while (0) - -#define pte_to_pgoff(x) (pte_val(x) >> 2) -#define pgoff_to_pte(x) __pte(((x) << 2) | L_PTE_FILE) - -#define PTE_FILE_MAX_BITS 30 - -#endif /* __ASSEMBLY__ */ - -#endif /* __ASM_PROC_PGTABLE_H */ diff --git a/include/asm-arm/proc-armv/processor.h b/include/asm-arm/proc-armv/processor.h deleted file mode 100644 index 373ca267700c..000000000000 --- a/include/asm-arm/proc-armv/processor.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * linux/include/asm-arm/proc-armv/processor.h - * - * Copyright (C) 1996-1999 Russell King. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Changelog: - * 20-09-1996 RMK Created - * 26-09-1996 RMK Added 'EXTRA_THREAD_STRUCT*' - * 28-09-1996 RMK Moved start_thread into the processor dependencies - * 09-09-1998 PJB Delete redundant `wp_works_ok' - * 30-05-1999 PJB Save sl across context switches - * 31-07-1999 RMK Added 'domain' stuff - */ -#ifndef __ASM_PROC_PROCESSOR_H -#define __ASM_PROC_PROCESSOR_H - -#include <asm/proc/domain.h> - -#define KERNEL_STACK_SIZE PAGE_SIZE - -#define INIT_EXTRA_THREAD_INFO \ - .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ - domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ - domain_val(DOMAIN_IO, DOMAIN_CLIENT) - -#define start_thread(regs,pc,sp) \ -({ \ - unsigned long *stack = (unsigned long *)sp; \ - set_fs(USER_DS); \ - memzero(regs->uregs, sizeof(regs->uregs)); \ - if (current->personality & ADDR_LIMIT_32BIT) \ - regs->ARM_cpsr = USR_MODE; \ - else \ - regs->ARM_cpsr = USR26_MODE; \ - if (elf_hwcap & HWCAP_THUMB && pc & 1) \ - regs->ARM_cpsr |= PSR_T_BIT; \ - regs->ARM_pc = pc & ~1; /* pc */ \ - regs->ARM_sp = sp; /* sp */ \ - regs->ARM_r2 = stack[2]; /* r2 (envp) */ \ - regs->ARM_r1 = stack[1]; /* r1 (argv) */ \ - regs->ARM_r0 = stack[0]; /* r0 (argc) */ \ -}) - -#define KSTK_EIP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)->thread_info))[1019]) -#define KSTK_ESP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)->thread_info))[1017]) - -#endif diff --git a/include/asm-arm/proc-armv/ptrace.h b/include/asm-arm/proc-armv/ptrace.h deleted file mode 100644 index d70d3724612a..000000000000 --- a/include/asm-arm/proc-armv/ptrace.h +++ /dev/null @@ -1,143 +0,0 @@ -/* - * linux/include/asm-arm/proc-armv/ptrace.h - * - * Copyright (C) 1996-1999 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __ASM_PROC_PTRACE_H -#define __ASM_PROC_PTRACE_H - -#include <linux/config.h> - -/* - * PSR bits - */ -#define USR26_MODE 0x00000000 -#define FIQ26_MODE 0x00000001 -#define IRQ26_MODE 0x00000002 -#define SVC26_MODE 0x00000003 -#define USR_MODE 0x00000010 -#define FIQ_MODE 0x00000011 -#define IRQ_MODE 0x00000012 -#define SVC_MODE 0x00000013 -#define ABT_MODE 0x00000017 -#define UND_MODE 0x0000001b -#define SYSTEM_MODE 0x0000001f -#define MODE32_BIT 0x00000010 -#define MODE_MASK 0x0000001f -#define PSR_T_BIT 0x00000020 -#define PSR_F_BIT 0x00000040 -#define PSR_I_BIT 0x00000080 -#define PSR_J_BIT 0x01000000 -#define PSR_Q_BIT 0x08000000 -#define PSR_V_BIT 0x10000000 -#define PSR_C_BIT 0x20000000 -#define PSR_Z_BIT 0x40000000 -#define PSR_N_BIT 0x80000000 -#define PCMASK 0 - -/* - * Groups of PSR bits - */ -#define PSR_f 0xff000000 /* Flags */ -#define PSR_s 0x00ff0000 /* Status */ -#define PSR_x 0x0000ff00 /* Extension */ -#define PSR_c 0x000000ff /* Control */ - -/* - * CR1 bits - */ -#define CR1_M 0x00000001 /* MMU */ -#define CR1_A 0x00000002 /* Alignment fault */ -#define CR1_C 0x00000004 /* Dcache */ -#define CR1_W 0x00000008 /* Write buffer */ -#define CR1_P 0x00000010 /* Prog32 */ -#define CR1_D 0x00000020 /* Data32 */ -#define CR1_L 0x00000040 /* Late abort */ -#define CR1_B 0x00000080 /* Big endian */ -#define CR1_S 0x00000100 /* System protection */ -#define CR1_R 0x00000200 /* ROM protection */ -#define CR1_F 0x00000400 -#define CR1_Z 0x00000800 /* BTB enable */ -#define CR1_I 0x00001000 /* Icache */ -#define CR1_V 0x00002000 /* Vector relocation */ -#define CR1_RR 0x00004000 /* Round Robin */ - -#ifndef __ASSEMBLY__ - -/* this struct defines the way the registers are stored on the - stack during a system call. */ - -struct pt_regs { - long uregs[18]; -}; - -#define ARM_cpsr uregs[16] -#define ARM_pc uregs[15] -#define ARM_lr uregs[14] -#define ARM_sp uregs[13] -#define ARM_ip uregs[12] -#define ARM_fp uregs[11] -#define ARM_r10 uregs[10] -#define ARM_r9 uregs[9] -#define ARM_r8 uregs[8] -#define ARM_r7 uregs[7] -#define ARM_r6 uregs[6] -#define ARM_r5 uregs[5] -#define ARM_r4 uregs[4] -#define ARM_r3 uregs[3] -#define ARM_r2 uregs[2] -#define ARM_r1 uregs[1] -#define ARM_r0 uregs[0] -#define ARM_ORIG_r0 uregs[17] - -#ifdef __KERNEL__ - -#define user_mode(regs) \ - (((regs)->ARM_cpsr & 0xf) == 0) - -#ifdef CONFIG_ARM_THUMB -#define thumb_mode(regs) \ - (((regs)->ARM_cpsr & PSR_T_BIT)) -#else -#define thumb_mode(regs) (0) -#endif - -#define processor_mode(regs) \ - ((regs)->ARM_cpsr & MODE_MASK) - -#define interrupts_enabled(regs) \ - (!((regs)->ARM_cpsr & PSR_I_BIT)) - -#define fast_interrupts_enabled(regs) \ - (!((regs)->ARM_cpsr & PSR_F_BIT)) - -#define condition_codes(regs) \ - ((regs)->ARM_cpsr & (PSR_V_BIT|PSR_C_BIT|PSR_Z_BIT|PSR_N_BIT)) - -/* Are the current registers suitable for user mode? - * (used to maintain security in signal handlers) - */ -static inline int valid_user_regs(struct pt_regs *regs) -{ - if (user_mode(regs) && - (regs->ARM_cpsr & (PSR_F_BIT|PSR_I_BIT)) == 0) - return 1; - - /* - * Force CPSR to something logical... - */ - regs->ARM_cpsr &= PSR_f | PSR_s | PSR_x | PSR_T_BIT | MODE32_BIT; - - return 0; -} - -#endif /* __KERNEL__ */ - -#endif /* __ASSEMBLY__ */ - -#endif - diff --git a/include/asm-arm/proc-armv/shmparam.h b/include/asm-arm/proc-armv/shmparam.h deleted file mode 100644 index 5b692cc5b3e8..000000000000 --- a/include/asm-arm/proc-armv/shmparam.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * linux/include/asm-arm/proc-armv/shmparam.h - * - * Copyright (C) 1996 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * definitions for the shared process memory on ARM v3 or v4 - * processors - */ -#ifndef __ASM_PROC_SHMPARAM_H -#define __ASM_PROC_SHMPARAM_H - -#ifndef SHMMAX -#define SHMMAX 0x01000000 -#endif - -#endif diff --git a/include/asm-arm/proc-armv/system.h b/include/asm-arm/proc-armv/system.h deleted file mode 100644 index 2179e2a175fe..000000000000 --- a/include/asm-arm/proc-armv/system.h +++ /dev/null @@ -1,215 +0,0 @@ -/* - * linux/include/asm-arm/proc-armv/system.h - * - * Copyright (C) 1996 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __ASM_PROC_SYSTEM_H -#define __ASM_PROC_SYSTEM_H - -#include <linux/config.h> - -#define set_cr(x) \ - __asm__ __volatile__( \ - "mcr p15, 0, %0, c1, c0, 0 @ set CR" \ - : : "r" (x) : "cc") - -#define get_cr() \ - ({ \ - unsigned int __val; \ - __asm__ __volatile__( \ - "mrc p15, 0, %0, c1, c0, 0 @ get CR" \ - : "=r" (__val) : : "cc"); \ - __val; \ - }) - -#define CR_M (1 << 0) /* MMU enable */ -#define CR_A (1 << 1) /* Alignment abort enable */ -#define CR_C (1 << 2) /* Dcache enable */ -#define CR_W (1 << 3) /* Write buffer enable */ -#define CR_P (1 << 4) /* 32-bit exception handler */ -#define CR_D (1 << 5) /* 32-bit data address range */ -#define CR_L (1 << 6) /* Implementation defined */ -#define CR_B (1 << 7) /* Big endian */ -#define CR_S (1 << 8) /* System MMU protection */ -#define CR_R (1 << 9) /* ROM MMU protection */ -#define CR_F (1 << 10) /* Implementation defined */ -#define CR_Z (1 << 11) /* Implementation defined */ -#define CR_I (1 << 12) /* Icache enable */ -#define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */ -#define CR_RR (1 << 14) /* Round Robin cache replacement */ -#define CR_L4 (1 << 15) /* LDR pc can set T bit */ -#define CR_DT (1 << 16) -#define CR_IT (1 << 18) -#define CR_ST (1 << 19) -#define CR_FI (1 << 21) -#define CR_U (1 << 22) /* Unaligned access operation */ -#define CR_XP (1 << 23) /* Extended page tables */ -#define CR_VE (1 << 24) /* Vectored interrupts */ - -extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ -extern unsigned long cr_alignment; /* defined in entry-armv.S */ - -#if __LINUX_ARM_ARCH__ >= 4 -#define vectors_base() ((cr_alignment & CR_V) ? 0xffff0000 : 0) -#else -#define vectors_base() (0) -#endif - -/* - * Save the current interrupt enable state & disable IRQs - */ -#define local_irq_save(x) \ - ({ \ - unsigned long temp; \ - (void) (&temp == &x); \ - __asm__ __volatile__( \ - "mrs %0, cpsr @ local_irq_save\n" \ -" orr %1, %0, #128\n" \ -" msr cpsr_c, %1" \ - : "=r" (x), "=r" (temp) \ - : \ - : "memory", "cc"); \ - }) - -/* - * Enable IRQs - */ -#define local_irq_enable() \ - ({ \ - unsigned long temp; \ - __asm__ __volatile__( \ - "mrs %0, cpsr @ local_irq_enable\n" \ -" bic %0, %0, #128\n" \ -" msr cpsr_c, %0" \ - : "=r" (temp) \ - : \ - : "memory", "cc"); \ - }) - -/* - * Disable IRQs - */ -#define local_irq_disable() \ - ({ \ - unsigned long temp; \ - __asm__ __volatile__( \ - "mrs %0, cpsr @ local_irq_disable\n" \ -" orr %0, %0, #128\n" \ -" msr cpsr_c, %0" \ - : "=r" (temp) \ - : \ - : "memory", "cc"); \ - }) - -/* - * Enable FIQs - */ -#define __stf() \ - ({ \ - unsigned long temp; \ - __asm__ __volatile__( \ - "mrs %0, cpsr @ stf\n" \ -" bic %0, %0, #64\n" \ -" msr cpsr_c, %0" \ - : "=r" (temp) \ - : \ - : "memory", "cc"); \ - }) - -/* - * Disable FIQs - */ -#define __clf() \ - ({ \ - unsigned long temp; \ - __asm__ __volatile__( \ - "mrs %0, cpsr @ clf\n" \ -" orr %0, %0, #64\n" \ -" msr cpsr_c, %0" \ - : "=r" (temp) \ - : \ - : "memory", "cc"); \ - }) - -/* - * Save the current interrupt enable state. - */ -#define local_save_flags(x) \ - ({ \ - __asm__ __volatile__( \ - "mrs %0, cpsr @ local_save_flags" \ - : "=r" (x) : : "memory", "cc"); \ - }) - -/* - * restore saved IRQ & FIQ state - */ -#define local_irq_restore(x) \ - __asm__ __volatile__( \ - "msr cpsr_c, %0 @ local_irq_restore\n" \ - : \ - : "r" (x) \ - : "memory", "cc") - -#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) -/* - * On the StrongARM, "swp" is terminally broken since it bypasses the - * cache totally. This means that the cache becomes inconsistent, and, - * since we use normal loads/stores as well, this is really bad. - * Typically, this causes oopsen in filp_close, but could have other, - * more disasterous effects. There are two work-arounds: - * 1. Disable interrupts and emulate the atomic swap - * 2. Clean the cache, perform atomic swap, flush the cache - * - * We choose (1) since its the "easiest" to achieve here and is not - * dependent on the processor type. - */ -#define swp_is_buggy -#endif - -static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) -{ - extern void __bad_xchg(volatile void *, int); - unsigned long ret; -#ifdef swp_is_buggy - unsigned long flags; -#endif - - switch (size) { -#ifdef swp_is_buggy - case 1: - local_irq_save(flags); - ret = *(volatile unsigned char *)ptr; - *(volatile unsigned char *)ptr = x; - local_irq_restore(flags); - break; - - case 4: - local_irq_save(flags); - ret = *(volatile unsigned long *)ptr; - *(volatile unsigned long *)ptr = x; - local_irq_restore(flags); - break; -#else - case 1: __asm__ __volatile__ ("swpb %0, %1, [%2]" - : "=&r" (ret) - : "r" (x), "r" (ptr) - : "memory", "cc"); - break; - case 4: __asm__ __volatile__ ("swp %0, %1, [%2]" - : "=&r" (ret) - : "r" (x), "r" (ptr) - : "memory", "cc"); - break; -#endif - default: __bad_xchg(ptr, size), ret = 0; - } - - return ret; -} - -#endif diff --git a/include/asm-arm/proc-armv/tlbflush.h b/include/asm-arm/proc-armv/tlbflush.h deleted file mode 100644 index f51019309d59..000000000000 --- a/include/asm-arm/proc-armv/tlbflush.h +++ /dev/null @@ -1,410 +0,0 @@ -/* - * linux/include/asm-arm/proc-armv/tlbflush.h - * - * Copyright (C) 1999-2003 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include <linux/config.h> -#include <asm/glue.h> - -#define TLB_V3_PAGE (1 << 0) -#define TLB_V4_U_PAGE (1 << 1) -#define TLB_V4_D_PAGE (1 << 2) -#define TLB_V4_I_PAGE (1 << 3) -#define TLB_V6_U_PAGE (1 << 4) -#define TLB_V6_D_PAGE (1 << 5) -#define TLB_V6_I_PAGE (1 << 6) - -#define TLB_V3_FULL (1 << 8) -#define TLB_V4_U_FULL (1 << 9) -#define TLB_V4_D_FULL (1 << 10) -#define TLB_V4_I_FULL (1 << 11) -#define TLB_V6_U_FULL (1 << 12) -#define TLB_V6_D_FULL (1 << 13) -#define TLB_V6_I_FULL (1 << 14) - -#define TLB_V6_U_ASID (1 << 16) -#define TLB_V6_D_ASID (1 << 17) -#define TLB_V6_I_ASID (1 << 18) - -#define TLB_DCLEAN (1 << 30) -#define TLB_WB (1 << 31) - -/* - * MMU TLB Model - * ============= - * - * We have the following to choose from: - * v3 - ARMv3 - * v4 - ARMv4 without write buffer - * v4wb - ARMv4 with write buffer without I TLB flush entry instruction - * v4wbi - ARMv4 with write buffer with I TLB flush entry instruction - * v6wbi - ARMv6 with write buffer with I TLB flush entry instruction - */ -#undef _TLB -#undef MULTI_TLB - -#define v3_tlb_flags (TLB_V3_FULL | TLB_V3_PAGE) - -#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710) -# define v3_possible_flags v3_tlb_flags -# define v3_always_flags v3_tlb_flags -# ifdef _TLB -# define MULTI_TLB 1 -# else -# define _TLB v3 -# endif -#else -# define v3_possible_flags 0 -# define v3_always_flags (-1UL) -#endif - -#define v4_tlb_flags (TLB_V4_U_FULL | TLB_V4_U_PAGE) - -#if defined(CONFIG_CPU_ARM720T) -# define v4_possible_flags v4_tlb_flags -# define v4_always_flags v4_tlb_flags -# ifdef _TLB -# define MULTI_TLB 1 -# else -# define _TLB v4 -# endif -#else -# define v4_possible_flags 0 -# define v4_always_flags (-1UL) -#endif - -#define v4wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \ - TLB_V4_I_FULL | TLB_V4_D_FULL | \ - TLB_V4_I_PAGE | TLB_V4_D_PAGE) - -#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \ - defined(CONFIG_CPU_ARM926T) || defined(CONFIG_CPU_ARM1020) || \ - defined(CONFIG_CPU_XSCALE) -# define v4wbi_possible_flags v4wbi_tlb_flags -# define v4wbi_always_flags v4wbi_tlb_flags -# ifdef _TLB -# define MULTI_TLB 1 -# else -# define _TLB v4wbi -# endif -#else -# define v4wbi_possible_flags 0 -# define v4wbi_always_flags (-1UL) -#endif - -#define v4wb_tlb_flags (TLB_WB | TLB_DCLEAN | \ - TLB_V4_I_FULL | TLB_V4_D_FULL | \ - TLB_V4_D_PAGE) - -#if defined(CONFIG_CPU_SA110) || defined(CONFIG_CPU_SA1100) -# define v4wb_possible_flags v4wb_tlb_flags -# define v4wb_always_flags v4wb_tlb_flags -# ifdef _TLB -# define MULTI_TLB 1 -# else -# define _TLB v4wb -# endif -#else -# define v4wb_possible_flags 0 -# define v4wb_always_flags (-1UL) -#endif - -#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \ - TLB_V6_I_FULL | TLB_V6_D_FULL | \ - TLB_V6_I_PAGE | TLB_V6_D_PAGE | \ - TLB_V6_I_ASID | TLB_V6_D_ASID) - -#if defined(CONFIG_CPU_V6) -# define v6wbi_possible_flags v6wbi_tlb_flags -# define v6wbi_always_flags v6wbi_tlb_flags -# ifdef _TLB -# define MULTI_TLB 1 -# else -# define _TLB v6wbi -# endif -#else -# define v6wbi_possible_flags 0 -# define v6wbi_always_flags (-1UL) -#endif - -#ifndef _TLB -#error Unknown TLB model -#endif - -#ifndef __ASSEMBLY__ - -struct cpu_tlb_fns { - void (*flush_user_range)(unsigned long, unsigned long, struct vm_area_struct *); - void (*flush_kern_range)(unsigned long, unsigned long); - unsigned long tlb_flags; -}; - -/* - * Select the calling method - */ -#ifdef MULTI_TLB - -#define __cpu_flush_user_tlb_range cpu_tlb.flush_user_range -#define __cpu_flush_kern_tlb_range cpu_tlb.flush_kern_range - -#else - -#define __cpu_flush_user_tlb_range __glue(_TLB,_flush_user_tlb_range) -#define __cpu_flush_kern_tlb_range __glue(_TLB,_flush_kern_tlb_range) - -extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *); -extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long); - -#endif - -extern struct cpu_tlb_fns cpu_tlb; - -#define __cpu_tlb_flags cpu_tlb.tlb_flags - -/* - * TLB Management - * ============== - * - * The arch/arm/mm/tlb-*.S files implement these methods. - * - * The TLB specific code is expected to perform whatever tests it - * needs to determine if it should invalidate the TLB for each - * call. Start addresses are inclusive and end addresses are - * exclusive; it is safe to round these addresses down. - * - * flush_tlb_all() - * - * Invalidate the entire TLB. - * - * flush_tlb_mm(mm) - * - * Invalidate all TLB entries in a particular address - * space. - * - mm - mm_struct describing address space - * - * flush_tlb_range(mm,start,end) - * - * Invalidate a range of TLB entries in the specified - * address space. - * - mm - mm_struct describing address space - * - start - start address (may not be aligned) - * - end - end address (exclusive, may not be aligned) - * - * flush_tlb_page(vaddr,vma) - * - * Invalidate the specified page in the specified address range. - * - vaddr - virtual address (may not be aligned) - * - vma - vma_struct describing address range - * - * flush_kern_tlb_page(kaddr) - * - * Invalidate the TLB entry for the specified page. The address - * will be in the kernels virtual memory space. Current uses - * only require the D-TLB to be invalidated. - * - kaddr - Kernel virtual memory address - */ - -/* - * We optimise the code below by: - * - building a set of TLB flags that might be set in __cpu_tlb_flags - * - building a set of TLB flags that will always be set in __cpu_tlb_flags - * - if we're going to need __cpu_tlb_flags, access it once and only once - * - * This allows us to build optimal assembly for the single-CPU type case, - * and as close to optimal given the compiler constrants for multi-CPU - * case. We could do better for the multi-CPU case if the compiler - * implemented the "%?" method, but this has been discontinued due to too - * many people getting it wrong. - */ -#define possible_tlb_flags (v3_possible_flags | \ - v4_possible_flags | \ - v4wbi_possible_flags | \ - v4wb_possible_flags | \ - v6wbi_possible_flags) - -#define always_tlb_flags (v3_always_flags & \ - v4_always_flags & \ - v4wbi_always_flags & \ - v4wb_always_flags & \ - v6wbi_always_flags) - -#define tlb_flag(f) ((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f))) - -static inline void flush_tlb_all(void) -{ - const int zero = 0; - const unsigned int __tlb_flag = __cpu_tlb_flags; - - if (tlb_flag(TLB_WB)) - asm("mcr%? p15, 0, %0, c7, c10, 4" : : "r" (zero)); - - if (tlb_flag(TLB_V3_FULL)) - asm("mcr%? p15, 0, %0, c6, c0, 0" : : "r" (zero)); - if (tlb_flag(TLB_V4_U_FULL | TLB_V6_U_FULL)) - asm("mcr%? p15, 0, %0, c8, c7, 0" : : "r" (zero)); - if (tlb_flag(TLB_V4_D_FULL | TLB_V6_D_FULL)) - asm("mcr%? p15, 0, %0, c8, c6, 0" : : "r" (zero)); - if (tlb_flag(TLB_V4_I_FULL | TLB_V6_I_FULL)) - asm("mcr%? p15, 0, %0, c8, c5, 0" : : "r" (zero)); -} - -static inline void flush_tlb_mm(struct mm_struct *mm) -{ - const int zero = 0; - const int asid = ASID(mm); - const unsigned int __tlb_flag = __cpu_tlb_flags; - - if (tlb_flag(TLB_WB)) - asm("mcr%? p15, 0, %0, c7, c10, 4" : : "r" (zero)); - - if (mm == current->active_mm) { - if (tlb_flag(TLB_V3_FULL)) - asm("mcr%? p15, 0, %0, c6, c0, 0" : : "r" (zero)); - if (tlb_flag(TLB_V4_U_FULL)) - asm("mcr%? p15, 0, %0, c8, c7, 0" : : "r" (zero)); - if (tlb_flag(TLB_V4_D_FULL)) - asm("mcr%? p15, 0, %0, c8, c6, 0" : : "r" (zero)); - if (tlb_flag(TLB_V4_I_FULL)) - asm("mcr%? p15, 0, %0, c8, c5, 0" : : "r" (zero)); - } - - if (tlb_flag(TLB_V6_U_ASID)) - asm("mcr%? p15, 0, %0, c8, c7, 2" : : "r" (asid)); - if (tlb_flag(TLB_V6_D_ASID)) - asm("mcr%? p15, 0, %0, c8, c6, 2" : : "r" (asid)); - if (tlb_flag(TLB_V6_I_ASID)) - asm("mcr%? p15, 0, %0, c8, c5, 2" : : "r" (asid)); -} - -static inline void -flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) -{ - const int zero = 0; - const unsigned int __tlb_flag = __cpu_tlb_flags; - - uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); - - if (tlb_flag(TLB_WB)) - asm("mcr%? p15, 0, %0, c7, c10, 4" : : "r" (zero)); - - if (vma->vm_mm == current->active_mm) { - if (tlb_flag(TLB_V3_PAGE)) - asm("mcr%? p15, 0, %0, c6, c0, 0" : : "r" (uaddr)); - if (tlb_flag(TLB_V4_U_PAGE)) - asm("mcr%? p15, 0, %0, c8, c7, 1" : : "r" (uaddr)); - if (tlb_flag(TLB_V4_D_PAGE)) - asm("mcr%? p15, 0, %0, c8, c6, 1" : : "r" (uaddr)); - if (tlb_flag(TLB_V4_I_PAGE)) - asm("mcr%? p15, 0, %0, c8, c5, 1" : : "r" (uaddr)); - if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL)) - asm("mcr%? p15, 0, %0, c8, c5, 0" : : "r" (zero)); - } - - if (tlb_flag(TLB_V6_U_PAGE)) - asm("mcr%? p15, 0, %0, c8, c7, 1" : : "r" (uaddr)); - if (tlb_flag(TLB_V6_D_PAGE)) - asm("mcr%? p15, 0, %0, c8, c6, 1" : : "r" (uaddr)); - if (tlb_flag(TLB_V6_I_PAGE)) - asm("mcr%? p15, 0, %0, c8, c5, 1" : : "r" (uaddr)); -} - -static inline void flush_tlb_kernel_page(unsigned long kaddr) -{ - const int zero = 0; - const unsigned int __tlb_flag = __cpu_tlb_flags; - - kaddr &= PAGE_MASK; - - if (tlb_flag(TLB_WB)) - asm("mcr%? p15, 0, %0, c7, c10, 4" : : "r" (zero)); - - if (tlb_flag(TLB_V3_PAGE)) - asm("mcr%? p15, 0, %0, c6, c0, 0" : : "r" (kaddr)); - if (tlb_flag(TLB_V4_U_PAGE)) - asm("mcr%? p15, 0, %0, c8, c7, 1" : : "r" (kaddr)); - if (tlb_flag(TLB_V4_D_PAGE)) - asm("mcr%? p15, 0, %0, c8, c6, 1" : : "r" (kaddr)); - if (tlb_flag(TLB_V4_I_PAGE)) - asm("mcr%? p15, 0, %0, c8, c5, 1" : : "r" (kaddr)); - if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL)) - asm("mcr%? p15, 0, %0, c8, c5, 0" : : "r" (zero)); - - if (tlb_flag(TLB_V6_U_PAGE)) - asm("mcr%? p15, 0, %0, c8, c7, 1" : : "r" (kaddr)); - if (tlb_flag(TLB_V6_D_PAGE)) - asm("mcr%? p15, 0, %0, c8, c6, 1" : : "r" (kaddr)); - if (tlb_flag(TLB_V6_I_PAGE)) - asm("mcr%? p15, 0, %0, c8, c5, 1" : : "r" (kaddr)); -} - -/* - * flush_pmd_entry - * - * Flush a PMD entry (word aligned, or double-word aligned) to - * RAM if the TLB for the CPU we are running on requires this. - * This is typically used when we are creating PMD entries. - * - * clean_pmd_entry - * - * Clean (but don't drain the write buffer) if the CPU requires - * these operations. This is typically used when we are removing - * PMD entries. - */ -static inline void flush_pmd_entry(pmd_t *pmd) -{ - const unsigned int zero = 0; - const unsigned int __tlb_flag = __cpu_tlb_flags; - - if (tlb_flag(TLB_DCLEAN)) - asm("mcr%? p15, 0, %0, c7, c10, 1 @ flush_pmd" - : : "r" (pmd)); - if (tlb_flag(TLB_WB)) - asm("mcr%? p15, 0, %0, c7, c10, 4 @ flush_pmd" - : : "r" (zero)); -} - -static inline void clean_pmd_entry(pmd_t *pmd) -{ - const unsigned int __tlb_flag = __cpu_tlb_flags; - - if (tlb_flag(TLB_DCLEAN)) - asm("mcr%? p15, 0, %0, c7, c10, 1 @ flush_pmd" - : : "r" (pmd)); -} - -#undef tlb_flag -#undef always_tlb_flags -#undef possible_tlb_flags - -/* - * Convert calls to our calling convention. - */ -#define flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma) -#define flush_tlb_kernel_range(s,e) __cpu_flush_kern_tlb_range(s,e) - -/* - * if PG_dcache_dirty is set for the page, we need to ensure that any - * cache entries for the kernels virtual memory range are written - * back to the page. - */ -extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte); - -/* - * ARM processors do not cache TLB tables in RAM. - */ -#define flush_tlb_pgtables(mm,start,end) do { } while (0) - -/* - * Old ARM MEMC stuff. This supports the reversed mapping handling that - * we have on the older 26-bit machines. We don't have a MEMC chip, so... - */ -#define memc_update_all() do { } while (0) -#define memc_update_mm(mm) do { } while (0) -#define memc_update_addr(mm,pte,log) do { } while (0) -#define memc_clear(mm,physaddr) do { } while (0) - -#endif diff --git a/include/asm-arm/proc-armv/uaccess.h b/include/asm-arm/proc-armv/uaccess.h deleted file mode 100644 index b871c78160b1..000000000000 --- a/include/asm-arm/proc-armv/uaccess.h +++ /dev/null @@ -1,189 +0,0 @@ -/* - * linux/include/asm-arm/proc-armv/uaccess.h - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include <asm/arch/memory.h> -#include <asm/proc/domain.h> - -/* - * Note that this is actually 0x1,0000,0000 - */ -#define KERNEL_DS 0x00000000 -#define USER_DS TASK_SIZE - -static inline void set_fs (mm_segment_t fs) -{ - current_thread_info()->addr_limit = fs; - modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); -} - -/* We use 33-bit arithmetic here... */ -#define __range_ok(addr,size) ({ \ - unsigned long flag, sum; \ - __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \ - : "=&r" (flag), "=&r" (sum) \ - : "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \ - : "cc"); \ - flag; }) - -#define __addr_ok(addr) ({ \ - unsigned long flag; \ - __asm__("cmp %2, %0; movlo %0, #0" \ - : "=&r" (flag) \ - : "0" (current_thread_info()->addr_limit), "r" (addr) \ - : "cc"); \ - (flag == 0); }) - -#define __put_user_asm_byte(x,__pu_addr,err) \ - __asm__ __volatile__( \ - "1: strbt %1,[%2],#0\n" \ - "2:\n" \ - " .section .fixup,\"ax\"\n" \ - " .align 2\n" \ - "3: mov %0, %3\n" \ - " b 2b\n" \ - " .previous\n" \ - " .section __ex_table,\"a\"\n" \ - " .align 3\n" \ - " .long 1b, 3b\n" \ - " .previous" \ - : "+r" (err) \ - : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \ - : "cc") - -#ifndef __ARMEB__ -#define __put_user_asm_half(x,__pu_addr,err) \ -({ \ - unsigned long __temp = (unsigned long)(x); \ - __put_user_asm_byte(__temp, __pu_addr, err); \ - __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \ -}) -#else -#define __put_user_asm_half(x,__pu_addr,err) \ -({ \ - unsigned long __temp = (unsigned long)(x); \ - __put_user_asm_byte(__temp >> 8, __pu_addr, err); \ - __put_user_asm_byte(__temp, __pu_addr + 1, err); \ -}) -#endif - -#define __put_user_asm_word(x,__pu_addr,err) \ - __asm__ __volatile__( \ - "1: strt %1,[%2],#0\n" \ - "2:\n" \ - " .section .fixup,\"ax\"\n" \ - " .align 2\n" \ - "3: mov %0, %3\n" \ - " b 2b\n" \ - " .previous\n" \ - " .section __ex_table,\"a\"\n" \ - " .align 3\n" \ - " .long 1b, 3b\n" \ - " .previous" \ - : "+r" (err) \ - : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \ - : "cc") - -#ifndef __ARMEB__ -#define __reg_oper0 "%R2" -#define __reg_oper1 "%Q2" -#else -#define __reg_oper0 "%Q2" -#define __reg_oper1 "%R2" -#endif - -#define __put_user_asm_dword(x,__pu_addr,err) \ - __asm__ __volatile__( \ - "1: strt " __reg_oper1 ", [%1], #4\n" \ - "2: strt " __reg_oper0 ", [%1], #0\n" \ - "3:\n" \ - " .section .fixup,\"ax\"\n" \ - " .align 2\n" \ - "4: mov %0, %3\n" \ - " b 3b\n" \ - " .previous\n" \ - " .section __ex_table,\"a\"\n" \ - " .align 3\n" \ - " .long 1b, 4b\n" \ - " .long 2b, 4b\n" \ - " .previous" \ - : "+r" (err), "+r" (__pu_addr) \ - : "r" (x), "i" (-EFAULT) \ - : "cc") - -#define __get_user_asm_byte(x,addr,err) \ - __asm__ __volatile__( \ - "1: ldrbt %1,[%2],#0\n" \ - "2:\n" \ - " .section .fixup,\"ax\"\n" \ - " .align 2\n" \ - "3: mov %0, %3\n" \ - " mov %1, #0\n" \ - " b 2b\n" \ - " .previous\n" \ - " .section __ex_table,\"a\"\n" \ - " .align 3\n" \ - " .long 1b, 3b\n" \ - " .previous" \ - : "+r" (err), "=&r" (x) \ - : "r" (addr), "i" (-EFAULT) \ - : "cc") - -#ifndef __ARMEB__ -#define __get_user_asm_half(x,__gu_addr,err) \ -({ \ - unsigned long __b1, __b2; \ - __get_user_asm_byte(__b1, __gu_addr, err); \ - __get_user_asm_byte(__b2, __gu_addr + 1, err); \ - (x) = __b1 | (__b2 << 8); \ -}) -#else -#define __get_user_asm_half(x,__gu_addr,err) \ -({ \ - unsigned long __b1, __b2; \ - __get_user_asm_byte(__b1, __gu_addr, err); \ - __get_user_asm_byte(__b2, __gu_addr + 1, err); \ - (x) = (__b1 << 8) | __b2; \ -}) -#endif - -#define __get_user_asm_word(x,addr,err) \ - __asm__ __volatile__( \ - "1: ldrt %1,[%2],#0\n" \ - "2:\n" \ - " .section .fixup,\"ax\"\n" \ - " .align 2\n" \ - "3: mov %0, %3\n" \ - " mov %1, #0\n" \ - " b 2b\n" \ - " .previous\n" \ - " .section __ex_table,\"a\"\n" \ - " .align 3\n" \ - " .long 1b, 3b\n" \ - " .previous" \ - : "+r" (err), "=&r" (x) \ - : "r" (addr), "i" (-EFAULT) \ - : "cc") - -extern unsigned long __arch_copy_from_user(void *to, const void *from, unsigned long n); -#define __do_copy_from_user(to,from,n) \ - (n) = __arch_copy_from_user(to,from,n) - -extern unsigned long __arch_copy_to_user(void *to, const void *from, unsigned long n); -#define __do_copy_to_user(to,from,n) \ - (n) = __arch_copy_to_user(to,from,n) - -extern unsigned long __arch_clear_user(void *addr, unsigned long n); -#define __do_clear_user(addr,sz) \ - (sz) = __arch_clear_user(addr,sz) - -extern unsigned long __arch_strncpy_from_user(char *to, const char *from, unsigned long count); -#define __do_strncpy_from_user(dst,src,count,res) \ - (res) = __arch_strncpy_from_user(dst,src,count) - -extern unsigned long __arch_strnlen_user(const char *s, long n); -#define __do_strnlen_user(s,n,res) \ - (res) = __arch_strnlen_user(s,n) diff --git a/include/asm-arm/proc-fns.h b/include/asm-arm/proc-fns.h index 5b6040fc3f74..9fcc8a89080c 100644 --- a/include/asm-arm/proc-fns.h +++ b/include/asm-arm/proc-fns.h @@ -21,11 +21,6 @@ #undef MULTI_CPU #undef CPU_NAME -#ifdef CONFIG_CPU_26 -# define CPU_INCLUDE_NAME "asm/cpu-multi26.h" -# define MULTI_CPU -#endif - /* * CPU_NAME - the prefix for CPU related functions */ diff --git a/include/asm-arm/processor.h b/include/asm-arm/processor.h index 181223a7329b..31c888ba601c 100644 --- a/include/asm-arm/processor.h +++ b/include/asm-arm/processor.h @@ -1,7 +1,7 @@ /* * linux/include/asm-arm/processor.h * - * Copyright (C) 1995 Russell King + * Copyright (C) 1995-1999 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -27,9 +27,10 @@ #include <asm/ptrace.h> #include <asm/procinfo.h> #include <asm/arch/memory.h> -#include <asm/proc/processor.h> #include <asm/types.h> +#define KERNEL_STACK_SIZE PAGE_SIZE + union debug_insn { u32 arm; u16 thumb; @@ -56,6 +57,24 @@ struct thread_struct { #define INIT_THREAD { } +#define start_thread(regs,pc,sp) \ +({ \ + unsigned long *stack = (unsigned long *)sp; \ + set_fs(USER_DS); \ + memzero(regs->uregs, sizeof(regs->uregs)); \ + if (current->personality & ADDR_LIMIT_32BIT) \ + regs->ARM_cpsr = USR_MODE; \ + else \ + regs->ARM_cpsr = USR26_MODE; \ + if (elf_hwcap & HWCAP_THUMB && pc & 1) \ + regs->ARM_cpsr |= PSR_T_BIT; \ + regs->ARM_pc = pc & ~1; /* pc */ \ + regs->ARM_sp = sp; /* sp */ \ + regs->ARM_r2 = stack[2]; /* r2 (envp) */ \ + regs->ARM_r1 = stack[1]; /* r1 (argv) */ \ + regs->ARM_r0 = stack[0]; /* r0 (argc) */ \ +}) + /* Forward declaration, a strange C thing */ struct task_struct; @@ -74,6 +93,9 @@ unsigned long get_wchan(struct task_struct *p); */ extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); +#define KSTK_EIP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)->thread_info))[1019]) +#define KSTK_ESP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)->thread_info))[1017]) + /* * Prefetching support - only ARMv5. */ diff --git a/include/asm-arm/ptrace.h b/include/asm-arm/ptrace.h index eaf50e3bd63e..162c456638db 100644 --- a/include/asm-arm/ptrace.h +++ b/include/asm-arm/ptrace.h @@ -1,6 +1,17 @@ +/* + * linux/include/asm-arm/ptrace.h + * + * Copyright (C) 1996-2003 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ #ifndef __ASM_ARM_PTRACE_H #define __ASM_ARM_PTRACE_H +#include <linux/config.h> + #define PTRACE_GETREGS 12 #define PTRACE_SETREGS 13 #define PTRACE_GETFPREGS 14 @@ -8,9 +19,112 @@ #define PTRACE_OLDSETOPTIONS 21 -#include <asm/proc/ptrace.h> +/* + * PSR bits + */ +#define USR26_MODE 0x00000000 +#define FIQ26_MODE 0x00000001 +#define IRQ26_MODE 0x00000002 +#define SVC26_MODE 0x00000003 +#define USR_MODE 0x00000010 +#define FIQ_MODE 0x00000011 +#define IRQ_MODE 0x00000012 +#define SVC_MODE 0x00000013 +#define ABT_MODE 0x00000017 +#define UND_MODE 0x0000001b +#define SYSTEM_MODE 0x0000001f +#define MODE32_BIT 0x00000010 +#define MODE_MASK 0x0000001f +#define PSR_T_BIT 0x00000020 +#define PSR_F_BIT 0x00000040 +#define PSR_I_BIT 0x00000080 +#define PSR_J_BIT 0x01000000 +#define PSR_Q_BIT 0x08000000 +#define PSR_V_BIT 0x10000000 +#define PSR_C_BIT 0x20000000 +#define PSR_Z_BIT 0x40000000 +#define PSR_N_BIT 0x80000000 +#define PCMASK 0 + +/* + * Groups of PSR bits + */ +#define PSR_f 0xff000000 /* Flags */ +#define PSR_s 0x00ff0000 /* Status */ +#define PSR_x 0x0000ff00 /* Extension */ +#define PSR_c 0x000000ff /* Control */ #ifndef __ASSEMBLY__ + +/* this struct defines the way the registers are stored on the + stack during a system call. */ + +struct pt_regs { + long uregs[18]; +}; + +#define ARM_cpsr uregs[16] +#define ARM_pc uregs[15] +#define ARM_lr uregs[14] +#define ARM_sp uregs[13] +#define ARM_ip uregs[12] +#define ARM_fp uregs[11] +#define ARM_r10 uregs[10] +#define ARM_r9 uregs[9] +#define ARM_r8 uregs[8] +#define ARM_r7 uregs[7] +#define ARM_r6 uregs[6] +#define ARM_r5 uregs[5] +#define ARM_r4 uregs[4] +#define ARM_r3 uregs[3] +#define ARM_r2 uregs[2] +#define ARM_r1 uregs[1] +#define ARM_r0 uregs[0] +#define ARM_ORIG_r0 uregs[17] + +#ifdef __KERNEL__ + +#define user_mode(regs) \ + (((regs)->ARM_cpsr & 0xf) == 0) + +#ifdef CONFIG_ARM_THUMB +#define thumb_mode(regs) \ + (((regs)->ARM_cpsr & PSR_T_BIT)) +#else +#define thumb_mode(regs) (0) +#endif + +#define processor_mode(regs) \ + ((regs)->ARM_cpsr & MODE_MASK) + +#define interrupts_enabled(regs) \ + (!((regs)->ARM_cpsr & PSR_I_BIT)) + +#define fast_interrupts_enabled(regs) \ + (!((regs)->ARM_cpsr & PSR_F_BIT)) + +#define condition_codes(regs) \ + ((regs)->ARM_cpsr & (PSR_V_BIT|PSR_C_BIT|PSR_Z_BIT|PSR_N_BIT)) + +/* Are the current registers suitable for user mode? + * (used to maintain security in signal handlers) + */ +static inline int valid_user_regs(struct pt_regs *regs) +{ + if (user_mode(regs) && + (regs->ARM_cpsr & (PSR_F_BIT|PSR_I_BIT)) == 0) + return 1; + + /* + * Force CPSR to something logical... + */ + regs->ARM_cpsr &= PSR_f | PSR_s | PSR_x | PSR_T_BIT | MODE32_BIT; + + return 0; +} + +#endif /* __KERNEL__ */ + #define pc_pointer(v) \ ((v) & ~PCMASK) diff --git a/include/asm-arm/semaphore.h b/include/asm-arm/semaphore.h index 34412c203747..76284ff21f49 100644 --- a/include/asm-arm/semaphore.h +++ b/include/asm-arm/semaphore.h @@ -10,7 +10,7 @@ #include <linux/rwsem.h> #include <asm/atomic.h> -#include <asm/proc/locks.h> +#include <asm/locks.h> struct semaphore { atomic_t count; @@ -88,7 +88,7 @@ static inline void down(struct semaphore * sem) #if WAITQUEUE_DEBUG CHECK_MAGIC(sem->__magic); #endif - + might_sleep(); __down_op(sem, __down_failed); } @@ -101,7 +101,7 @@ static inline int down_interruptible (struct semaphore * sem) #if WAITQUEUE_DEBUG CHECK_MAGIC(sem->__magic); #endif - + might_sleep(); return __down_op_ret(sem, __down_interruptible_failed); } diff --git a/include/asm-arm/shmparam.h b/include/asm-arm/shmparam.h index 4359852cc1ac..46d9944a7a14 100644 --- a/include/asm-arm/shmparam.h +++ b/include/asm-arm/shmparam.h @@ -1,8 +1,6 @@ #ifndef _ASMARM_SHMPARAM_H #define _ASMARM_SHMPARAM_H -#include <asm/proc/shmparam.h> - /* * This should be the size of the virtually indexed cache/ways, * or page size, whichever is greater since the cache aliases diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h index d000f45ed4e7..bb533f25fc7d 100644 --- a/include/asm-arm/system.h +++ b/include/asm-arm/system.h @@ -4,6 +4,45 @@ #ifdef __KERNEL__ #include <linux/config.h> + +#define CPU_ARCH_UNKNOWN 0 +#define CPU_ARCH_ARMv3 1 +#define CPU_ARCH_ARMv4 2 +#define CPU_ARCH_ARMv4T 3 +#define CPU_ARCH_ARMv5 4 +#define CPU_ARCH_ARMv5T 5 +#define CPU_ARCH_ARMv5TE 6 +#define CPU_ARCH_ARMv6 7 + +/* + * CR1 bits (CP#15 CR1) + */ +#define CR_M (1 << 0) /* MMU enable */ +#define CR_A (1 << 1) /* Alignment abort enable */ +#define CR_C (1 << 2) /* Dcache enable */ +#define CR_W (1 << 3) /* Write buffer enable */ +#define CR_P (1 << 4) /* 32-bit exception handler */ +#define CR_D (1 << 5) /* 32-bit data address range */ +#define CR_L (1 << 6) /* Implementation defined */ +#define CR_B (1 << 7) /* Big endian */ +#define CR_S (1 << 8) /* System MMU protection */ +#define CR_R (1 << 9) /* ROM MMU protection */ +#define CR_F (1 << 10) /* Implementation defined */ +#define CR_Z (1 << 11) /* Implementation defined */ +#define CR_I (1 << 12) /* Icache enable */ +#define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */ +#define CR_RR (1 << 14) /* Round Robin cache replacement */ +#define CR_L4 (1 << 15) /* LDR pc can set T bit */ +#define CR_DT (1 << 16) +#define CR_IT (1 << 18) +#define CR_ST (1 << 19) +#define CR_FI (1 << 21) +#define CR_U (1 << 22) /* Unaligned access operation */ +#define CR_XP (1 << 23) /* Extended page tables */ +#define CR_VE (1 << 24) /* Vectored interrupts */ + +#ifndef __ASSEMBLY__ + #include <linux/kernel.h> struct thread_info; @@ -34,21 +73,30 @@ void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, extern asmlinkage void __backtrace(void); -#define CPU_ARCH_UNKNOWN 0 -#define CPU_ARCH_ARMv3 1 -#define CPU_ARCH_ARMv4 2 -#define CPU_ARCH_ARMv4T 3 -#define CPU_ARCH_ARMv5 4 -#define CPU_ARCH_ARMv5T 5 -#define CPU_ARCH_ARMv5TE 6 -#define CPU_ARCH_ARMv6 7 - extern int cpu_architecture(void); -/* - * Include processor dependent parts - */ -#include <asm/proc/system.h> +#define set_cr(x) \ + __asm__ __volatile__( \ + "mcr p15, 0, %0, c1, c0, 0 @ set CR" \ + : : "r" (x) : "cc") + +#define get_cr() \ + ({ \ + unsigned int __val; \ + __asm__ __volatile__( \ + "mrc p15, 0, %0, c1, c0, 0 @ get CR" \ + : "=r" (__val) : : "cc"); \ + __val; \ + }) + +extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ +extern unsigned long cr_alignment; /* defined in entry-armv.S */ + +#if __LINUX_ARM_ARCH__ >= 4 +#define vectors_base() ((cr_alignment & CR_V) ? 0xffff0000 : 0) +#else +#define vectors_base() (0) +#endif #define mb() __asm__ __volatile__ ("" : : : "memory") #define rmb() mb() @@ -75,6 +123,102 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info mb(); \ } while (0) +/* + * Save the current interrupt enable state & disable IRQs + */ +#define local_irq_save(x) \ + ({ \ + unsigned long temp; \ + (void) (&temp == &x); \ + __asm__ __volatile__( \ + "mrs %0, cpsr @ local_irq_save\n" \ +" orr %1, %0, #128\n" \ +" msr cpsr_c, %1" \ + : "=r" (x), "=r" (temp) \ + : \ + : "memory", "cc"); \ + }) + +/* + * Enable IRQs + */ +#define local_irq_enable() \ + ({ \ + unsigned long temp; \ + __asm__ __volatile__( \ + "mrs %0, cpsr @ local_irq_enable\n" \ +" bic %0, %0, #128\n" \ +" msr cpsr_c, %0" \ + : "=r" (temp) \ + : \ + : "memory", "cc"); \ + }) + +/* + * Disable IRQs + */ +#define local_irq_disable() \ + ({ \ + unsigned long temp; \ + __asm__ __volatile__( \ + "mrs %0, cpsr @ local_irq_disable\n" \ +" orr %0, %0, #128\n" \ +" msr cpsr_c, %0" \ + : "=r" (temp) \ + : \ + : "memory", "cc"); \ + }) + +/* + * Enable FIQs + */ +#define __stf() \ + ({ \ + unsigned long temp; \ + __asm__ __volatile__( \ + "mrs %0, cpsr @ stf\n" \ +" bic %0, %0, #64\n" \ +" msr cpsr_c, %0" \ + : "=r" (temp) \ + : \ + : "memory", "cc"); \ + }) + +/* + * Disable FIQs + */ +#define __clf() \ + ({ \ + unsigned long temp; \ + __asm__ __volatile__( \ + "mrs %0, cpsr @ clf\n" \ +" orr %0, %0, #64\n" \ +" msr cpsr_c, %0" \ + : "=r" (temp) \ + : \ + : "memory", "cc"); \ + }) + +/* + * Save the current interrupt enable state. + */ +#define local_save_flags(x) \ + ({ \ + __asm__ __volatile__( \ + "mrs %0, cpsr @ local_save_flags" \ + : "=r" (x) : : "memory", "cc"); \ + }) + +/* + * restore saved IRQ & FIQ state + */ +#define local_irq_restore(x) \ + __asm__ __volatile__( \ + "msr cpsr_c, %0 @ local_irq_restore\n" \ + : \ + : "r" (x) \ + : "memory", "cc") + #ifdef CONFIG_SMP #error SMP not supported @@ -100,8 +244,67 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info flags & PSR_I_BIT; \ }) +#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) +/* + * On the StrongARM, "swp" is terminally broken since it bypasses the + * cache totally. This means that the cache becomes inconsistent, and, + * since we use normal loads/stores as well, this is really bad. + * Typically, this causes oopsen in filp_close, but could have other, + * more disasterous effects. There are two work-arounds: + * 1. Disable interrupts and emulate the atomic swap + * 2. Clean the cache, perform atomic swap, flush the cache + * + * We choose (1) since its the "easiest" to achieve here and is not + * dependent on the processor type. + */ +#define swp_is_buggy +#endif + +static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) +{ + extern void __bad_xchg(volatile void *, int); + unsigned long ret; +#ifdef swp_is_buggy + unsigned long flags; +#endif + + switch (size) { +#ifdef swp_is_buggy + case 1: + local_irq_save(flags); + ret = *(volatile unsigned char *)ptr; + *(volatile unsigned char *)ptr = x; + local_irq_restore(flags); + break; + + case 4: + local_irq_save(flags); + ret = *(volatile unsigned long *)ptr; + *(volatile unsigned long *)ptr = x; + local_irq_restore(flags); + break; +#else + case 1: __asm__ __volatile__ ("swpb %0, %1, [%2]" + : "=&r" (ret) + : "r" (x), "r" (ptr) + : "memory", "cc"); + break; + case 4: __asm__ __volatile__ ("swp %0, %1, [%2]" + : "=&r" (ret) + : "r" (x), "r" (ptr) + : "memory", "cc"); + break; +#endif + default: __bad_xchg(ptr, size), ret = 0; + } + + return ret; +} + #endif /* CONFIG_SMP */ +#endif /* __ASSEMBLY__ */ + #endif /* __KERNEL__ */ #endif diff --git a/include/asm-arm/thread_info.h b/include/asm-arm/thread_info.h index 60bec9ec8c79..642810a9ed84 100644 --- a/include/asm-arm/thread_info.h +++ b/include/asm-arm/thread_info.h @@ -18,9 +18,9 @@ struct task_struct; struct exec_domain; #include <asm/fpstate.h> -#include <asm/proc/processor.h> #include <asm/ptrace.h> #include <asm/types.h> +#include <asm/domain.h> typedef unsigned long mm_segment_t; @@ -55,17 +55,19 @@ struct thread_info { union fp_state fpstate; }; -#define INIT_THREAD_INFO(tsk) \ -{ \ - .task = &tsk, \ - .exec_domain = &default_exec_domain, \ - .flags = 0, \ - .preempt_count = 1, \ - .addr_limit = KERNEL_DS, \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ - INIT_EXTRA_THREAD_INFO, \ +#define INIT_THREAD_INFO(tsk) \ +{ \ + .task = &tsk, \ + .exec_domain = &default_exec_domain, \ + .flags = 0, \ + .preempt_count = 1, \ + .addr_limit = KERNEL_DS, \ + .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ + domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ + domain_val(DOMAIN_IO, DOMAIN_CLIENT), \ + .restart_block = { \ + .fn = do_no_restart_syscall, \ + }, \ } #define init_thread_info (init_thread_union.thread_info) diff --git a/include/asm-arm/tlbflush.h b/include/asm-arm/tlbflush.h index 9011f00fca32..6fdcc6ffeb0c 100644 --- a/include/asm-arm/tlbflush.h +++ b/include/asm-arm/tlbflush.h @@ -1,7 +1,7 @@ /* * linux/include/asm-arm/tlbflush.h * - * Copyright (C) 2000-2002 Russell King + * Copyright (C) 1999-2003 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -10,6 +10,397 @@ #ifndef _ASMARM_TLBFLUSH_H #define _ASMARM_TLBFLUSH_H -#include <asm-arm/proc/tlbflush.h> +#include <linux/config.h> +#include <asm/glue.h> + +#define TLB_V3_PAGE (1 << 0) +#define TLB_V4_U_PAGE (1 << 1) +#define TLB_V4_D_PAGE (1 << 2) +#define TLB_V4_I_PAGE (1 << 3) +#define TLB_V6_U_PAGE (1 << 4) +#define TLB_V6_D_PAGE (1 << 5) +#define TLB_V6_I_PAGE (1 << 6) + +#define TLB_V3_FULL (1 << 8) +#define TLB_V4_U_FULL (1 << 9) +#define TLB_V4_D_FULL (1 << 10) +#define TLB_V4_I_FULL (1 << 11) +#define TLB_V6_U_FULL (1 << 12) +#define TLB_V6_D_FULL (1 << 13) +#define TLB_V6_I_FULL (1 << 14) + +#define TLB_V6_U_ASID (1 << 16) +#define TLB_V6_D_ASID (1 << 17) +#define TLB_V6_I_ASID (1 << 18) + +#define TLB_DCLEAN (1 << 30) +#define TLB_WB (1 << 31) + +/* + * MMU TLB Model + * ============= + * + * We have the following to choose from: + * v3 - ARMv3 + * v4 - ARMv4 without write buffer + * v4wb - ARMv4 with write buffer without I TLB flush entry instruction + * v4wbi - ARMv4 with write buffer with I TLB flush entry instruction + * v6wbi - ARMv6 with write buffer with I TLB flush entry instruction + */ +#undef _TLB +#undef MULTI_TLB + +#define v3_tlb_flags (TLB_V3_FULL | TLB_V3_PAGE) + +#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710) +# define v3_possible_flags v3_tlb_flags +# define v3_always_flags v3_tlb_flags +# ifdef _TLB +# define MULTI_TLB 1 +# else +# define _TLB v3 +# endif +#else +# define v3_possible_flags 0 +# define v3_always_flags (-1UL) +#endif + +#define v4_tlb_flags (TLB_V4_U_FULL | TLB_V4_U_PAGE) + +#if defined(CONFIG_CPU_ARM720T) +# define v4_possible_flags v4_tlb_flags +# define v4_always_flags v4_tlb_flags +# ifdef _TLB +# define MULTI_TLB 1 +# else +# define _TLB v4 +# endif +#else +# define v4_possible_flags 0 +# define v4_always_flags (-1UL) +#endif + +#define v4wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \ + TLB_V4_I_FULL | TLB_V4_D_FULL | \ + TLB_V4_I_PAGE | TLB_V4_D_PAGE) + +#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \ + defined(CONFIG_CPU_ARM926T) || defined(CONFIG_CPU_ARM1020) || \ + defined(CONFIG_CPU_XSCALE) +# define v4wbi_possible_flags v4wbi_tlb_flags +# define v4wbi_always_flags v4wbi_tlb_flags +# ifdef _TLB +# define MULTI_TLB 1 +# else +# define _TLB v4wbi +# endif +#else +# define v4wbi_possible_flags 0 +# define v4wbi_always_flags (-1UL) +#endif + +#define v4wb_tlb_flags (TLB_WB | TLB_DCLEAN | \ + TLB_V4_I_FULL | TLB_V4_D_FULL | \ + TLB_V4_D_PAGE) + +#if defined(CONFIG_CPU_SA110) || defined(CONFIG_CPU_SA1100) +# define v4wb_possible_flags v4wb_tlb_flags +# define v4wb_always_flags v4wb_tlb_flags +# ifdef _TLB +# define MULTI_TLB 1 +# else +# define _TLB v4wb +# endif +#else +# define v4wb_possible_flags 0 +# define v4wb_always_flags (-1UL) +#endif + +#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \ + TLB_V6_I_FULL | TLB_V6_D_FULL | \ + TLB_V6_I_PAGE | TLB_V6_D_PAGE | \ + TLB_V6_I_ASID | TLB_V6_D_ASID) + +#if defined(CONFIG_CPU_V6) +# define v6wbi_possible_flags v6wbi_tlb_flags +# define v6wbi_always_flags v6wbi_tlb_flags +# ifdef _TLB +# define MULTI_TLB 1 +# else +# define _TLB v6wbi +# endif +#else +# define v6wbi_possible_flags 0 +# define v6wbi_always_flags (-1UL) +#endif + +#ifndef _TLB +#error Unknown TLB model +#endif + +#ifndef __ASSEMBLY__ + +struct cpu_tlb_fns { + void (*flush_user_range)(unsigned long, unsigned long, struct vm_area_struct *); + void (*flush_kern_range)(unsigned long, unsigned long); + unsigned long tlb_flags; +}; + +/* + * Select the calling method + */ +#ifdef MULTI_TLB + +#define __cpu_flush_user_tlb_range cpu_tlb.flush_user_range +#define __cpu_flush_kern_tlb_range cpu_tlb.flush_kern_range + +#else + +#define __cpu_flush_user_tlb_range __glue(_TLB,_flush_user_tlb_range) +#define __cpu_flush_kern_tlb_range __glue(_TLB,_flush_kern_tlb_range) + +extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *); +extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long); + +#endif + +extern struct cpu_tlb_fns cpu_tlb; + +#define __cpu_tlb_flags cpu_tlb.tlb_flags + +/* + * TLB Management + * ============== + * + * The arch/arm/mm/tlb-*.S files implement these methods. + * + * The TLB specific code is expected to perform whatever tests it + * needs to determine if it should invalidate the TLB for each + * call. Start addresses are inclusive and end addresses are + * exclusive; it is safe to round these addresses down. + * + * flush_tlb_all() + * + * Invalidate the entire TLB. + * + * flush_tlb_mm(mm) + * + * Invalidate all TLB entries in a particular address + * space. + * - mm - mm_struct describing address space + * + * flush_tlb_range(mm,start,end) + * + * Invalidate a range of TLB entries in the specified + * address space. + * - mm - mm_struct describing address space + * - start - start address (may not be aligned) + * - end - end address (exclusive, may not be aligned) + * + * flush_tlb_page(vaddr,vma) + * + * Invalidate the specified page in the specified address range. + * - vaddr - virtual address (may not be aligned) + * - vma - vma_struct describing address range + * + * flush_kern_tlb_page(kaddr) + * + * Invalidate the TLB entry for the specified page. The address + * will be in the kernels virtual memory space. Current uses + * only require the D-TLB to be invalidated. + * - kaddr - Kernel virtual memory address + */ + +/* + * We optimise the code below by: + * - building a set of TLB flags that might be set in __cpu_tlb_flags + * - building a set of TLB flags that will always be set in __cpu_tlb_flags + * - if we're going to need __cpu_tlb_flags, access it once and only once + * + * This allows us to build optimal assembly for the single-CPU type case, + * and as close to optimal given the compiler constrants for multi-CPU + * case. We could do better for the multi-CPU case if the compiler + * implemented the "%?" method, but this has been discontinued due to too + * many people getting it wrong. + */ +#define possible_tlb_flags (v3_possible_flags | \ + v4_possible_flags | \ + v4wbi_possible_flags | \ + v4wb_possible_flags | \ + v6wbi_possible_flags) + +#define always_tlb_flags (v3_always_flags & \ + v4_always_flags & \ + v4wbi_always_flags & \ + v4wb_always_flags & \ + v6wbi_always_flags) + +#define tlb_flag(f) ((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f))) + +static inline void flush_tlb_all(void) +{ + const int zero = 0; + const unsigned int __tlb_flag = __cpu_tlb_flags; + + if (tlb_flag(TLB_WB)) + asm("mcr%? p15, 0, %0, c7, c10, 4" : : "r" (zero)); + + if (tlb_flag(TLB_V3_FULL)) + asm("mcr%? p15, 0, %0, c6, c0, 0" : : "r" (zero)); + if (tlb_flag(TLB_V4_U_FULL | TLB_V6_U_FULL)) + asm("mcr%? p15, 0, %0, c8, c7, 0" : : "r" (zero)); + if (tlb_flag(TLB_V4_D_FULL | TLB_V6_D_FULL)) + asm("mcr%? p15, 0, %0, c8, c6, 0" : : "r" (zero)); + if (tlb_flag(TLB_V4_I_FULL | TLB_V6_I_FULL)) + asm("mcr%? p15, 0, %0, c8, c5, 0" : : "r" (zero)); +} + +static inline void flush_tlb_mm(struct mm_struct *mm) +{ + const int zero = 0; + const int asid = ASID(mm); + const unsigned int __tlb_flag = __cpu_tlb_flags; + + if (tlb_flag(TLB_WB)) + asm("mcr%? p15, 0, %0, c7, c10, 4" : : "r" (zero)); + + if (mm == current->active_mm) { + if (tlb_flag(TLB_V3_FULL)) + asm("mcr%? p15, 0, %0, c6, c0, 0" : : "r" (zero)); + if (tlb_flag(TLB_V4_U_FULL)) + asm("mcr%? p15, 0, %0, c8, c7, 0" : : "r" (zero)); + if (tlb_flag(TLB_V4_D_FULL)) + asm("mcr%? p15, 0, %0, c8, c6, 0" : : "r" (zero)); + if (tlb_flag(TLB_V4_I_FULL)) + asm("mcr%? p15, 0, %0, c8, c5, 0" : : "r" (zero)); + } + + if (tlb_flag(TLB_V6_U_ASID)) + asm("mcr%? p15, 0, %0, c8, c7, 2" : : "r" (asid)); + if (tlb_flag(TLB_V6_D_ASID)) + asm("mcr%? p15, 0, %0, c8, c6, 2" : : "r" (asid)); + if (tlb_flag(TLB_V6_I_ASID)) + asm("mcr%? p15, 0, %0, c8, c5, 2" : : "r" (asid)); +} + +static inline void +flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) +{ + const int zero = 0; + const unsigned int __tlb_flag = __cpu_tlb_flags; + + uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); + + if (tlb_flag(TLB_WB)) + asm("mcr%? p15, 0, %0, c7, c10, 4" : : "r" (zero)); + + if (vma->vm_mm == current->active_mm) { + if (tlb_flag(TLB_V3_PAGE)) + asm("mcr%? p15, 0, %0, c6, c0, 0" : : "r" (uaddr)); + if (tlb_flag(TLB_V4_U_PAGE)) + asm("mcr%? p15, 0, %0, c8, c7, 1" : : "r" (uaddr)); + if (tlb_flag(TLB_V4_D_PAGE)) + asm("mcr%? p15, 0, %0, c8, c6, 1" : : "r" (uaddr)); + if (tlb_flag(TLB_V4_I_PAGE)) + asm("mcr%? p15, 0, %0, c8, c5, 1" : : "r" (uaddr)); + if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL)) + asm("mcr%? p15, 0, %0, c8, c5, 0" : : "r" (zero)); + } + + if (tlb_flag(TLB_V6_U_PAGE)) + asm("mcr%? p15, 0, %0, c8, c7, 1" : : "r" (uaddr)); + if (tlb_flag(TLB_V6_D_PAGE)) + asm("mcr%? p15, 0, %0, c8, c6, 1" : : "r" (uaddr)); + if (tlb_flag(TLB_V6_I_PAGE)) + asm("mcr%? p15, 0, %0, c8, c5, 1" : : "r" (uaddr)); +} + +static inline void flush_tlb_kernel_page(unsigned long kaddr) +{ + const int zero = 0; + const unsigned int __tlb_flag = __cpu_tlb_flags; + + kaddr &= PAGE_MASK; + + if (tlb_flag(TLB_WB)) + asm("mcr%? p15, 0, %0, c7, c10, 4" : : "r" (zero)); + + if (tlb_flag(TLB_V3_PAGE)) + asm("mcr%? p15, 0, %0, c6, c0, 0" : : "r" (kaddr)); + if (tlb_flag(TLB_V4_U_PAGE)) + asm("mcr%? p15, 0, %0, c8, c7, 1" : : "r" (kaddr)); + if (tlb_flag(TLB_V4_D_PAGE)) + asm("mcr%? p15, 0, %0, c8, c6, 1" : : "r" (kaddr)); + if (tlb_flag(TLB_V4_I_PAGE)) + asm("mcr%? p15, 0, %0, c8, c5, 1" : : "r" (kaddr)); + if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL)) + asm("mcr%? p15, 0, %0, c8, c5, 0" : : "r" (zero)); + + if (tlb_flag(TLB_V6_U_PAGE)) + asm("mcr%? p15, 0, %0, c8, c7, 1" : : "r" (kaddr)); + if (tlb_flag(TLB_V6_D_PAGE)) + asm("mcr%? p15, 0, %0, c8, c6, 1" : : "r" (kaddr)); + if (tlb_flag(TLB_V6_I_PAGE)) + asm("mcr%? p15, 0, %0, c8, c5, 1" : : "r" (kaddr)); +} + +/* + * flush_pmd_entry + * + * Flush a PMD entry (word aligned, or double-word aligned) to + * RAM if the TLB for the CPU we are running on requires this. + * This is typically used when we are creating PMD entries. + * + * clean_pmd_entry + * + * Clean (but don't drain the write buffer) if the CPU requires + * these operations. This is typically used when we are removing + * PMD entries. + */ +static inline void flush_pmd_entry(pmd_t *pmd) +{ + const unsigned int zero = 0; + const unsigned int __tlb_flag = __cpu_tlb_flags; + + if (tlb_flag(TLB_DCLEAN)) + asm("mcr%? p15, 0, %0, c7, c10, 1 @ flush_pmd" + : : "r" (pmd)); + if (tlb_flag(TLB_WB)) + asm("mcr%? p15, 0, %0, c7, c10, 4 @ flush_pmd" + : : "r" (zero)); +} + +static inline void clean_pmd_entry(pmd_t *pmd) +{ + const unsigned int __tlb_flag = __cpu_tlb_flags; + + if (tlb_flag(TLB_DCLEAN)) + asm("mcr%? p15, 0, %0, c7, c10, 1 @ flush_pmd" + : : "r" (pmd)); +} + +#undef tlb_flag +#undef always_tlb_flags +#undef possible_tlb_flags + +/* + * Convert calls to our calling convention. + */ +#define flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma) +#define flush_tlb_kernel_range(s,e) __cpu_flush_kern_tlb_range(s,e) + +/* + * if PG_dcache_dirty is set for the page, we need to ensure that any + * cache entries for the kernels virtual memory range are written + * back to the page. + */ +extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte); + +/* + * ARM processors do not cache TLB tables in RAM. + */ +#define flush_tlb_pgtables(mm,start,end) do { } while (0) + +#endif #endif diff --git a/include/asm-arm/uaccess.h b/include/asm-arm/uaccess.h index 23bf0cbc1c76..634980801347 100644 --- a/include/asm-arm/uaccess.h +++ b/include/asm-arm/uaccess.h @@ -1,3 +1,10 @@ +/* + * linux/include/asm-arm/uaccess.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ #ifndef _ASMARM_UACCESS_H #define _ASMARM_UACCESS_H @@ -6,6 +13,8 @@ */ #include <linux/sched.h> #include <asm/errno.h> +#include <asm/arch/memory.h> +#include <asm/domain.h> #define VERIFY_READ 0 #define VERIFY_WRITE 1 @@ -30,11 +39,39 @@ struct exception_table_entry extern int fixup_exception(struct pt_regs *regs); +/* + * Note that this is actually 0x1,0000,0000 + */ +#define KERNEL_DS 0x00000000 +#define USER_DS TASK_SIZE + #define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) + +static inline void set_fs (mm_segment_t fs) +{ + current_thread_info()->addr_limit = fs; + modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); +} + #define segment_eq(a,b) ((a) == (b)) -#include <asm/proc/uaccess.h> +#define __addr_ok(addr) ({ \ + unsigned long flag; \ + __asm__("cmp %2, %0; movlo %0, #0" \ + : "=&r" (flag) \ + : "0" (current_thread_info()->addr_limit), "r" (addr) \ + : "cc"); \ + (flag == 0); }) + +/* We use 33-bit arithmetic here... */ +#define __range_ok(addr,size) ({ \ + unsigned long flag, sum; \ + __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \ + : "=&r" (flag), "=&r" (sum) \ + : "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \ + : "cc"); \ + flag; }) #define access_ok(type,addr,size) (__range_ok(addr,size) == 0) @@ -125,17 +162,71 @@ do { \ (x) = (__typeof__(*(ptr)))__gu_val; \ } while (0) +#define __get_user_asm_byte(x,addr,err) \ + __asm__ __volatile__( \ + "1: ldrbt %1,[%2],#0\n" \ + "2:\n" \ + " .section .fixup,\"ax\"\n" \ + " .align 2\n" \ + "3: mov %0, %3\n" \ + " mov %1, #0\n" \ + " b 2b\n" \ + " .previous\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 3\n" \ + " .long 1b, 3b\n" \ + " .previous" \ + : "+r" (err), "=&r" (x) \ + : "r" (addr), "i" (-EFAULT) \ + : "cc") + +#ifndef __ARMEB__ +#define __get_user_asm_half(x,__gu_addr,err) \ +({ \ + unsigned long __b1, __b2; \ + __get_user_asm_byte(__b1, __gu_addr, err); \ + __get_user_asm_byte(__b2, __gu_addr + 1, err); \ + (x) = __b1 | (__b2 << 8); \ +}) +#else +#define __get_user_asm_half(x,__gu_addr,err) \ +({ \ + unsigned long __b1, __b2; \ + __get_user_asm_byte(__b1, __gu_addr, err); \ + __get_user_asm_byte(__b2, __gu_addr + 1, err); \ + (x) = (__b1 << 8) | __b2; \ +}) +#endif + +#define __get_user_asm_word(x,addr,err) \ + __asm__ __volatile__( \ + "1: ldrt %1,[%2],#0\n" \ + "2:\n" \ + " .section .fixup,\"ax\"\n" \ + " .align 2\n" \ + "3: mov %0, %3\n" \ + " mov %1, #0\n" \ + " b 2b\n" \ + " .previous\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 3\n" \ + " .long 1b, 3b\n" \ + " .previous" \ + : "+r" (err), "=&r" (x) \ + : "r" (addr), "i" (-EFAULT) \ + : "cc") + extern int __put_user_1(void *, unsigned int); extern int __put_user_2(void *, unsigned int); extern int __put_user_4(void *, unsigned int); extern int __put_user_8(void *, unsigned long long); extern int __put_user_bad(void); -#define __put_user_x(__r1,__p,__e,__s,__i...) \ +#define __put_user_x(__r1,__p,__e,__s) \ __asm__ __volatile__ ("bl __put_user_" #__s \ : "=&r" (__e) \ : "0" (__p), "r" (__r1) \ - : __i, "cc") + : "ip", "lr", "cc") #define put_user(x,p) \ ({ \ @@ -144,16 +235,16 @@ extern int __put_user_bad(void); register int __e asm("r0"); \ switch (sizeof(*(__p))) { \ case 1: \ - __put_user_x(__r1, __p, __e, 1, "ip", "lr"); \ + __put_user_x(__r1, __p, __e, 1); \ break; \ case 2: \ - __put_user_x(__r1, __p, __e, 2, "ip", "lr"); \ + __put_user_x(__r1, __p, __e, 2); \ break; \ case 4: \ - __put_user_x(__r1, __p, __e, 4, "ip", "lr"); \ + __put_user_x(__r1, __p, __e, 4); \ break; \ case 8: \ - __put_user_x(__r1, __p, __e, 8, "ip", "lr"); \ + __put_user_x(__r1, __p, __e, 8); \ break; \ default: __e = __put_user_bad(); break; \ } \ @@ -186,10 +277,93 @@ do { \ } \ } while (0) +#define __put_user_asm_byte(x,__pu_addr,err) \ + __asm__ __volatile__( \ + "1: strbt %1,[%2],#0\n" \ + "2:\n" \ + " .section .fixup,\"ax\"\n" \ + " .align 2\n" \ + "3: mov %0, %3\n" \ + " b 2b\n" \ + " .previous\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 3\n" \ + " .long 1b, 3b\n" \ + " .previous" \ + : "+r" (err) \ + : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \ + : "cc") + +#ifndef __ARMEB__ +#define __put_user_asm_half(x,__pu_addr,err) \ +({ \ + unsigned long __temp = (unsigned long)(x); \ + __put_user_asm_byte(__temp, __pu_addr, err); \ + __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \ +}) +#else +#define __put_user_asm_half(x,__pu_addr,err) \ +({ \ + unsigned long __temp = (unsigned long)(x); \ + __put_user_asm_byte(__temp >> 8, __pu_addr, err); \ + __put_user_asm_byte(__temp, __pu_addr + 1, err); \ +}) +#endif + +#define __put_user_asm_word(x,__pu_addr,err) \ + __asm__ __volatile__( \ + "1: strt %1,[%2],#0\n" \ + "2:\n" \ + " .section .fixup,\"ax\"\n" \ + " .align 2\n" \ + "3: mov %0, %3\n" \ + " b 2b\n" \ + " .previous\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 3\n" \ + " .long 1b, 3b\n" \ + " .previous" \ + : "+r" (err) \ + : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \ + : "cc") + +#ifndef __ARMEB__ +#define __reg_oper0 "%R2" +#define __reg_oper1 "%Q2" +#else +#define __reg_oper0 "%Q2" +#define __reg_oper1 "%R2" +#endif + +#define __put_user_asm_dword(x,__pu_addr,err) \ + __asm__ __volatile__( \ + "1: strt " __reg_oper1 ", [%1], #4\n" \ + "2: strt " __reg_oper0 ", [%1], #0\n" \ + "3:\n" \ + " .section .fixup,\"ax\"\n" \ + " .align 2\n" \ + "4: mov %0, %3\n" \ + " b 3b\n" \ + " .previous\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 3\n" \ + " .long 1b, 4b\n" \ + " .long 2b, 4b\n" \ + " .previous" \ + : "+r" (err), "+r" (__pu_addr) \ + : "r" (x), "i" (-EFAULT) \ + : "cc") + +extern unsigned long __arch_copy_from_user(void *to, const void *from, unsigned long n); +extern unsigned long __arch_copy_to_user(void *to, const void *from, unsigned long n); +extern unsigned long __arch_clear_user(void *addr, unsigned long n); +extern unsigned long __arch_strncpy_from_user(char *to, const char *from, unsigned long count); +extern unsigned long __arch_strnlen_user(const char *s, long n); + static __inline__ unsigned long copy_from_user(void *to, const void *from, unsigned long n) { if (access_ok(VERIFY_READ, from, n)) - __do_copy_from_user(to, from, n); + n = __arch_copy_from_user(to, from, n); else /* security hole - plug it */ memzero(to, n); return n; @@ -197,49 +371,44 @@ static __inline__ unsigned long copy_from_user(void *to, const void *from, unsig static __inline__ unsigned long __copy_from_user(void *to, const void *from, unsigned long n) { - __do_copy_from_user(to, from, n); - return n; + return __arch_copy_from_user(to, from, n); } static __inline__ unsigned long copy_to_user(void *to, const void *from, unsigned long n) { if (access_ok(VERIFY_WRITE, to, n)) - __do_copy_to_user(to, from, n); + n = __arch_copy_to_user(to, from, n); return n; } static __inline__ unsigned long __copy_to_user(void *to, const void *from, unsigned long n) { - __do_copy_to_user(to, from, n); - return n; + return __arch_copy_to_user(to, from, n); } static __inline__ unsigned long clear_user (void *to, unsigned long n) { if (access_ok(VERIFY_WRITE, to, n)) - __do_clear_user(to, n); + n = __arch_clear_user(to, n); return n; } static __inline__ unsigned long __clear_user (void *to, unsigned long n) { - __do_clear_user(to, n); - return n; + return __arch_clear_user(to, n); } static __inline__ long strncpy_from_user (char *dst, const char *src, long count) { long res = -EFAULT; if (access_ok(VERIFY_READ, src, 1)) - __do_strncpy_from_user(dst, src, count, res); + res = __arch_strncpy_from_user(dst, src, count); return res; } static __inline__ long __strncpy_from_user (char *dst, const char *src, long count) { - long res; - __do_strncpy_from_user(dst, src, count, res); - return res; + return __arch_strncpy_from_user(dst, src, count); } #define strlen_user(s) strnlen_user(s, ~0UL >> 1) @@ -249,7 +418,7 @@ static inline long strnlen_user(const char *s, long n) unsigned long res = 0; if (__addr_ok(s)) - __do_strnlen_user(s, n, res); + res = __arch_strnlen_user(s, n); return res; } diff --git a/include/asm-arm26/semaphore.h b/include/asm-arm26/semaphore.h index a7d94526309d..49946274a67b 100644 --- a/include/asm-arm26/semaphore.h +++ b/include/asm-arm26/semaphore.h @@ -84,7 +84,7 @@ static inline void down(struct semaphore * sem) #if WAITQUEUE_DEBUG CHECK_MAGIC(sem->__magic); #endif - + might_sleep(); __down_op(sem, __down_failed); } @@ -97,7 +97,7 @@ static inline int down_interruptible (struct semaphore * sem) #if WAITQUEUE_DEBUG CHECK_MAGIC(sem->__magic); #endif - + might_sleep(); return __down_op_ret(sem, __down_interruptible_failed); } diff --git a/include/asm-cris/semaphore.h b/include/asm-cris/semaphore.h index 011a3bdc9ca7..d0821f6cef7f 100644 --- a/include/asm-cris/semaphore.h +++ b/include/asm-cris/semaphore.h @@ -79,6 +79,7 @@ extern inline void down(struct semaphore * sem) #if WAITQUEUE_DEBUG CHECK_MAGIC(sem->__magic); #endif + might_sleep(); /* atomically decrement the semaphores count, and if its negative, we wait */ local_save_flags(flags); @@ -104,6 +105,7 @@ extern inline int down_interruptible(struct semaphore * sem) #if WAITQUEUE_DEBUG CHECK_MAGIC(sem->__magic); #endif + might_sleep(); /* atomically decrement the semaphores count, and if its negative, we wait */ local_save_flags(flags); diff --git a/include/asm-h8300/semaphore.h b/include/asm-h8300/semaphore.h index 962f5eb32d16..62e6e9ce669e 100644 --- a/include/asm-h8300/semaphore.h +++ b/include/asm-h8300/semaphore.h @@ -90,6 +90,7 @@ static inline void down(struct semaphore * sem) #if WAITQUEUE_DEBUG CHECK_MAGIC(sem->__magic); #endif + might_sleep(); count = &(sem->count); __asm__ __volatile__( @@ -117,6 +118,7 @@ static inline int down_interruptible(struct semaphore * sem) #if WAITQUEUE_DEBUG CHECK_MAGIC(sem->__magic); #endif + might_sleep(); count = &(sem->count); __asm__ __volatile__( diff --git a/include/asm-i386/hpet.h b/include/asm-i386/hpet.h index 8890ea1fdb4e..ffc472ff5418 100644 --- a/include/asm-i386/hpet.h +++ b/include/asm-i386/hpet.h @@ -102,8 +102,7 @@ extern int is_hpet_capable(void); extern int hpet_readl(unsigned long a); extern void hpet_writel(unsigned long d, unsigned long a); -#ifdef CONFIG_RTC -#define CONFIG_HPET_EMULATE_RTC 1 +#ifdef CONFIG_HPET_EMULATE_RTC extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask); extern int hpet_set_rtc_irq_bit(unsigned long bit_mask); extern int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec); @@ -111,6 +110,6 @@ extern int hpet_set_periodic_freq(unsigned long freq); extern int hpet_rtc_dropped_irq(void); extern int hpet_rtc_timer_init(void); extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs); -#endif /* CONFIG_RTC */ +#endif /* CONFIG_HPET_EMULATE_RTC */ #endif /* CONFIG_HPET_TIMER */ #endif /* _I386_HPET_H */ diff --git a/include/asm-i386/termios.h b/include/asm-i386/termios.h index c4cc5c8168aa..c0fded995489 100644 --- a/include/asm-i386/termios.h +++ b/include/asm-i386/termios.h @@ -58,6 +58,7 @@ struct termio { #define N_HCI 15 /* Bluetooth HCI UART */ #ifdef __KERNEL__ +#include <linux/module.h> /* intr=^C quit=^\ erase=del kill=^U eof=^D vtime=\0 vmin=\1 sxtc=\0 @@ -101,6 +102,8 @@ struct termio { #define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios)) #define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios)) +#define MODULE_ALIAS_LDISC(ldisc) \ + MODULE_ALIAS("tty-ldisc-" __stringify(ldisc)) #endif /* __KERNEL__ */ #endif /* _I386_TERMIOS_H */ diff --git a/include/asm-ia64/semaphore.h b/include/asm-ia64/semaphore.h index dd0eade35495..44165a637add 100644 --- a/include/asm-ia64/semaphore.h +++ b/include/asm-ia64/semaphore.h @@ -73,6 +73,7 @@ down (struct semaphore *sem) #if WAITQUEUE_DEBUG CHECK_MAGIC(sem->__magic); #endif + might_sleep(); if (atomic_dec_return(&sem->count) < 0) __down(sem); } @@ -89,6 +90,7 @@ down_interruptible (struct semaphore * sem) #if WAITQUEUE_DEBUG CHECK_MAGIC(sem->__magic); #endif + might_sleep(); if (atomic_dec_return(&sem->count) < 0) ret = __down_interruptible(sem); return ret; diff --git a/include/asm-m68k/semaphore.h b/include/asm-m68k/semaphore.h index 8581f361c52d..cc844ef36a83 100644 --- a/include/asm-m68k/semaphore.h +++ b/include/asm-m68k/semaphore.h @@ -89,7 +89,7 @@ extern inline void down(struct semaphore * sem) #if WAITQUEUE_DEBUG CHECK_MAGIC(sem->__magic); #endif - + might_sleep(); __asm__ __volatile__( "| atomic down operation\n\t" "subql #1,%0@\n\t" @@ -112,7 +112,7 @@ extern inline int down_interruptible(struct semaphore * sem) #if WAITQUEUE_DEBUG CHECK_MAGIC(sem->__magic); #endif - + might_sleep(); __asm__ __volatile__( "| atomic interruptible down operation\n\t" "subql #1,%1@\n\t" diff --git a/include/asm-m68knommu/semaphore.h b/include/asm-m68knommu/semaphore.h index 4720a09e6191..f85d311bc2d8 100644 --- a/include/asm-m68knommu/semaphore.h +++ b/include/asm-m68knommu/semaphore.h @@ -88,7 +88,7 @@ extern inline void down(struct semaphore * sem) #if WAITQUEUE_DEBUG CHECK_MAGIC(sem->__magic); #endif - + might_sleep(); __asm__ __volatile__( "| atomic down operation\n\t" "movel %0, %%a1\n\t" @@ -108,7 +108,7 @@ extern inline int down_interruptible(struct semaphore * sem) #if WAITQUEUE_DEBUG CHECK_MAGIC(sem->__magic); #endif - + might_sleep(); __asm__ __volatile__( "| atomic down operation\n\t" "movel %1, %%a1\n\t" diff --git a/include/asm-mips/semaphore.h b/include/asm-mips/semaphore.h index 34fc00d60460..a867bb1014be 100644 --- a/include/asm-mips/semaphore.h +++ b/include/asm-mips/semaphore.h @@ -88,6 +88,7 @@ static inline void down(struct semaphore * sem) #if WAITQUEUE_DEBUG CHECK_MAGIC(sem->__magic); #endif + might_sleep(); if (atomic_dec_return(&sem->count) < 0) __down(sem); } @@ -103,6 +104,7 @@ static inline int down_interruptible(struct semaphore * sem) #if WAITQUEUE_DEBUG CHECK_MAGIC(sem->__magic); #endif + might_sleep(); if (atomic_dec_return(&sem->count) < 0) ret = __down_interruptible(sem); return ret; diff --git a/include/asm-parisc/semaphore.h b/include/asm-parisc/semaphore.h index 80686a7ab250..f01554398f1a 100644 --- a/include/asm-parisc/semaphore.h +++ b/include/asm-parisc/semaphore.h @@ -84,7 +84,7 @@ extern __inline__ void down(struct semaphore * sem) #if WAITQUEUE_DEBUG CHECK_MAGIC(sem->__magic); #endif - + might_sleep(); spin_lock_irq(&sem->sentry); if (sem->count > 0) { sem->count--; @@ -100,7 +100,7 @@ extern __inline__ int down_interruptible(struct semaphore * sem) #if WAITQUEUE_DEBUG CHECK_MAGIC(sem->__magic); #endif - + might_sleep(); spin_lock_irq(&sem->sentry); if (sem->count > 0) { sem->count--; diff --git a/include/asm-ppc/ibm44x.h b/include/asm-ppc/ibm44x.h new file mode 100644 index 000000000000..ff6b72df2c10 --- /dev/null +++ b/include/asm-ppc/ibm44x.h @@ -0,0 +1,435 @@ +/* + * include/asm-ppc/ibm44x.h + * + * PPC44x definitions + * + * Matt Porter <mporter@mvista.com> + * + * Copyright 2002-2003 MontaVista Software Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifdef __KERNEL__ +#ifndef __ASM_IBM44x_H__ +#define __ASM_IBM44x_H__ + +#include <linux/config.h> + +#ifndef __ASSEMBLY__ +/* + * Data structure defining board information maintained by the boot + * ROM on IBM's "Ebony" evaluation board. An effort has been made to + * keep the field names consistent with the 8xx 'bd_t' board info + * structures. + * + * Ebony firmware stores MAC addresses in the F/W VPD area. The + * firmware must store the other dynamic values in NVRAM like on + * the previous 40x systems so they should be accessible if we + * really want them. + */ +typedef struct board_info { + unsigned char bi_enetaddr[2][6]; /* EMAC addresses */ + unsigned int bi_opb_busfreq; /* OPB clock in Hz */ + int bi_iic_fast[2]; /* Use fast i2c mode */ +} bd_t; +#endif /* __ASSEMBLY__ */ + +#ifndef NR_BOARD_IRQS +#define NR_BOARD_IRQS 0 +#endif + +#define _IO_BASE isa_io_base +#define _ISA_MEM_BASE isa_mem_base +#define PCI_DRAM_OFFSET pci_dram_offset + +/* TLB entry offset/size used for pinning kernel lowmem */ +#define PPC44x_PIN_SHIFT 28 +#define PPC44x_PIN_SIZE (1 << PPC44x_PIN_SHIFT) + +/* Lowest TLB slot consumed by the default pinned TLBs */ +#define PPC44x_LOW_SLOT 62 + +/* + * Standard 4GB "page" definitions + */ +#define PPC44x_IO_PAGE 0x0000000100000000ULL +#define PPC44x_PCICFG_PAGE 0x0000000200000000ULL +#define PPC44x_PCIIO_PAGE PPC44x_PCICFG_PAGE +#define PPC44x_PCIMEM_PAGE 0x0000000300000000ULL + +/* + * 36-bit trap ranges + */ +#define PPC44x_IO_LO 0x40000000 +#define PPC44x_IO_HI 0x40001000 +#define PPC44x_PCICFG_LO 0x0ec00000 +#define PPC44x_PCICFG_HI 0x0ec7ffff +#define PPC44x_PCIMEM_LO 0x80002000 +#define PPC44x_PCIMEM_HI 0xffffffff + +/* + * The "residual" board information structure the boot loader passes + * into the kernel. + */ +#ifndef __ASSEMBLY__ + +/* + * SPRN definitions + */ +#define SPRN_CPC0_GPIO 0xe5/BEARLRL + +/* + * DCRN definitions + */ + +#ifdef CONFIG_440GX +/* SDRs */ +#define DCRN_SDR_CONFIG_ADDR 0xe +#define DCRN_SDR_CONFIG_DATA 0xf +#define DCRN_SDR_PFC0 0x4100 +#define DCRN_SDR_PFC1 0x4101 +#define DCRN_SDR_MFR 0x4300 +#define DCRN_SDR_MFR_TAH0 0x80000000 /* TAHOE0 Enable */ +#define DCRN_SDR_MFR_TAH1 0x40000000 /* TAHOE1 Enable */ +#define DCRN_SDR_MFR_PCM 0x10000000 /* PPC440GP irq compat mode */ +#define DCRN_SDR_MFR_ECS 0x08000000 /* EMAC int clk */ +#define DCRN_SDR_MFR_T0TXFL 0x00080000 +#define DCRN_SDR_MFR_T0TXFH 0x00040000 +#define DCRN_SDR_MFR_T1TXFL 0x00020000 +#define DCRN_SDR_MFR_T1TXFH 0x00010000 +#define DCRN_SDR_MFR_E0TXFL 0x00008000 +#define DCRN_SDR_MFR_E0TXFH 0x00004000 +#define DCRN_SDR_MFR_E0RXFL 0x00002000 +#define DCRN_SDR_MFR_E0RXFH 0x00001000 +#define DCRN_SDR_MFR_E1TXFL 0x00000800 +#define DCRN_SDR_MFR_E1TXFH 0x00000400 +#define DCRN_SDR_MFR_E1RXFL 0x00000200 +#define DCRN_SDR_MFR_E1RXFH 0x00000100 +#define DCRN_SDR_MFR_E2TXFL 0x00000080 +#define DCRN_SDR_MFR_E2TXFH 0x00000040 +#define DCRN_SDR_MFR_E2RXFL 0x00000020 +#define DCRN_SDR_MFR_E2RXFH 0x00000010 +#define DCRN_SDR_MFR_E3TXFL 0x00000008 +#define DCRN_SDR_MFR_E3TXFH 0x00000004 +#define DCRN_SDR_MFR_E3RXFL 0x00000002 +#define DCRN_SDR_MFR_E3RXFH 0x00000001 + +/* SDR read/write helper macros */ +#define SDR_READ(offset) ({\ + mtdcr(DCRN_SDR_CONFIG_ADDR, offset); \ + mfdcr(DCRN_SDR_CONFIG_DATA);}) +#define SDR_WRITE(offset, data) ({\ + mtdcr(DCRN_SDR_CONFIG_ADDR, offset); \ + mtdcr(DCRN_SDR_CONFIG_DATA,data);}) +#endif /* CONFIG_440GX */ + +/* Base DCRNs */ +#define DCRN_DMA0_BASE 0x100 +#define DCRN_DMA1_BASE 0x108 +#define DCRN_DMA2_BASE 0x110 +#define DCRN_DMA3_BASE 0x118 +#define DCRN_DMASR_BASE 0x120 +#define DCRNCAP_DMA_SG 1 /* have DMA scatter/gather capability */ +#define DCRN_MAL_BASE 0x180 + +/* UIC */ +#define DCRN_UIC0_BASE 0xc0 +#define DCRN_UIC1_BASE 0xd0 +#define UIC0 DCRN_UIC0_BASE +#define UIC1 DCRN_UIC1_BASE + +#define DCRN_UIC_SR(base) (base + 0x0) +#define DCRN_UIC_ER(base) (base + 0x2) +#define DCRN_UIC_CR(base) (base + 0x3) +#define DCRN_UIC_PR(base) (base + 0x4) +#define DCRN_UIC_TR(base) (base + 0x5) +#define DCRN_UIC_MSR(base) (base + 0x6) +#define DCRN_UIC_VR(base) (base + 0x7) +#define DCRN_UIC_VCR(base) (base + 0x8) + +#define UIC0_UIC1NC 30 /* UIC1 non-critical interrupt */ +#define UIC0_UIC1CR 31 /* UIC1 critical interrupt */ + +/* 440GP MAL DCRs */ +#define DCRN_MALCR(base) (base + 0x0) /* Configuration */ +#define DCRN_MALESR(base) (base + 0x1) /* Error Status */ +#define DCRN_MALIER(base) (base + 0x2) /* Interrupt Enable */ +#define DCRN_MALTXCASR(base) (base + 0x4) /* Tx Channel Active Set */ +#define DCRN_MALTXCARR(base) (base + 0x5) /* Tx Channel Active Reset */ +#define DCRN_MALTXEOBISR(base) (base + 0x6) /* Tx End of Buffer Interrupt Status */ +#define DCRN_MALTXDEIR(base) (base + 0x7) /* Tx Descriptor Error Interrupt */ +#define DCRN_MALRXCASR(base) (base + 0x10) /* Rx Channel Active Set */ +#define DCRN_MALRXCARR(base) (base + 0x11) /* Rx Channel Active Reset */ +#define DCRN_MALRXEOBISR(base) (base + 0x12) /* Rx End of Buffer Interrupt Status */ +#define DCRN_MALRXDEIR(base) (base + 0x13) /* Rx Descriptor Error Interrupt */ +#define DCRN_MALTXCTP0R(base) (base + 0x20) /* Channel Tx 0 Channel Table Pointer */ +#define DCRN_MALTXCTP1R(base) (base + 0x21) /* Channel Tx 1 Channel Table Pointer */ +#define DCRN_MALTXCTP2R(base) (base + 0x22) /* Channel Tx 2 Channel Table Pointer */ +#define DCRN_MALTXCTP3R(base) (base + 0x23) /* Channel Tx 3 Channel Table Pointer */ +#define DCRN_MALRXCTP0R(base) (base + 0x40) /* Channel Rx 0 Channel Table Pointer */ +#define DCRN_MALRXCTP1R(base) (base + 0x41) /* Channel Rx 1 Channel Table Pointer */ +#define DCRN_MALRCBS0(base) (base + 0x60) /* Channel Rx 0 Channel Buffer Size */ +#define DCRN_MALRCBS1(base) (base + 0x61) /* Channel Rx 1 Channel Buffer Size */ + +/* Compatibility DCRN's */ +#define DCRN_MALRXCTP2R(base) ((base) + 0x42) /* Channel Rx 2 Channel Table Pointer */ +#define DCRN_MALRXCTP3R(base) ((base) + 0x43) /* Channel Rx 3 Channel Table Pointer */ +#define DCRN_MALTXCTP4R(base) ((base) + 0x24) /* Channel Tx 4 Channel Table Pointer */ +#define DCRN_MALTXCTP5R(base) ((base) + 0x25) /* Channel Tx 5 Channel Table Pointer */ +#define DCRN_MALTXCTP6R(base) ((base) + 0x26) /* Channel Tx 6 Channel Table Pointer */ +#define DCRN_MALTXCTP7R(base) ((base) + 0x27) /* Channel Tx 7 Channel Table Pointer */ +#define DCRN_MALRCBS2(base) ((base) + 0x62) /* Channel Rx 2 Channel Buffer Size */ +#define DCRN_MALRCBS3(base) ((base) + 0x63) /* Channel Rx 3 Channel Buffer Size */ + + +#define MALCR_MMSR 0x80000000 /* MAL Software reset */ +#define MALCR_PLBP_1 0x00400000 /* MAL reqest priority: */ +#define MALCR_PLBP_2 0x00800000 /* lowsest is 00 */ +#define MALCR_PLBP_3 0x00C00000 /* highest */ +#define MALCR_GA 0x00200000 /* Guarded Active Bit */ +#define MALCR_OA 0x00100000 /* Ordered Active Bit */ +#define MALCR_PLBLE 0x00080000 /* PLB Lock Error Bit */ +#define MALCR_PLBLT_1 0x00040000 /* PLB Latency Timer */ +#define MALCR_PLBLT_2 0x00020000 +#define MALCR_PLBLT_3 0x00010000 +#define MALCR_PLBLT_4 0x00008000 +#ifdef CONFIG_440GP +#define MALCR_PLBLT_DEFAULT 0x00330000 /* PLB Latency Timer default */ +#else +#define MALCR_PLBLT_DEFAULT 0x00ff0000 /* PLB Latency Timer default */ +#endif +#define MALCR_PLBB 0x00004000 /* PLB Burst Deactivation Bit */ +#define MALCR_OPBBL 0x00000080 /* OPB Lock Bit */ +#define MALCR_EOPIE 0x00000004 /* End Of Packet Interrupt Enable */ +#define MALCR_LEA 0x00000002 /* Locked Error Active */ +#define MALCR_MSD 0x00000001 /* MAL Scroll Descriptor Bit */ +/* DCRN_MALESR */ +#define MALESR_EVB 0x80000000 /* Error Valid Bit */ +#define MALESR_CIDRX 0x40000000 /* Channel ID Receive */ +#define MALESR_DE 0x00100000 /* Descriptor Error */ +#define MALESR_OEN 0x00080000 /* OPB Non-Fullword Error */ +#define MALESR_OTE 0x00040000 /* OPB Timeout Error */ +#define MALESR_OSE 0x00020000 /* OPB Slave Error */ +#define MALESR_PEIN 0x00010000 /* PLB Bus Error Indication */ +#define MALESR_DEI 0x00000010 /* Descriptor Error Interrupt */ +#define MALESR_ONEI 0x00000008 /* OPB Non-Fullword Error Interrupt */ +#define MALESR_OTEI 0x00000004 /* OPB Timeout Error Interrupt */ +#define MALESR_OSEI 0x00000002 /* OPB Slace Error Interrupt */ +#define MALESR_PBEI 0x00000001 /* PLB Bus Error Interrupt */ +/* DCRN_MALIER */ +#define MALIER_DE 0x00000010 /* Descriptor Error Interrupt Enable */ +#define MALIER_NE 0x00000008 /* OPB Non-word Transfer Int Enable */ +#define MALIER_TE 0x00000004 /* OPB Time Out Error Interrupt Enable */ +#define MALIER_OPBE 0x00000002 /* OPB Slave Error Interrupt Enable */ +#define MALIER_PLBE 0x00000001 /* PLB Error Interrupt Enable */ +/* DCRN_MALTXEOBISR */ +#define MALOBISR_CH0 0x80000000 /* EOB channel 1 bit */ +#define MALOBISR_CH2 0x40000000 /* EOB channel 2 bit */ + +/* 440GP PLB Arbiter DCRs */ +#define DCRN_PLB0_REVID 0x082 /* PLB Arbiter Revision ID */ +#define DCRN_PLB0_ACR 0x083 /* PLB Arbiter Control */ +#define DCRN_PLB0_BESR 0x084 /* PLB Error Status */ +#define DCRN_PLB0_BEARL 0x086 /* PLB Error Address Low */ +#define DCRN_PLB0_BEAR DCRN_PLB0_BEARL /* 40x compatibility */ +#define DCRN_PLB0_BEARH 0x087 /* PLB Error Address High */ + +/* 440GP Clock, PM, chip control */ +#define DCRN_CPC0_SR 0x0b0 +#define DCRN_CPC0_ER 0x0b1 +#define DCRN_CPC0_FR 0x0b2 +#define DCRN_CPC0_SYS0 0x0e0 +#define DCRN_CPC0_SYS1 0x0e1 +#define DCRN_CPC0_CUST0 0x0e2 +#define DCRN_CPC0_CUST1 0x0e3 +#define DCRN_CPC0_STRP0 0x0e4 +#define DCRN_CPC0_STRP1 0x0e5 +#define DCRN_CPC0_STRP2 0x0e6 +#define DCRN_CPC0_STRP3 0x0e7 +#define DCRN_CPC0_GPIO 0x0e8 +#define DCRN_CPC0_PLB 0x0e9 +#define DCRN_CPC0_CR1 0x0ea +#define DCRN_CPC0_CR0 0x0eb +#define DCRN_CPC0_MIRQ0 0x0ec +#define DCRN_CPC0_MIRQ1 0x0ed +#define DCRN_CPC0_JTAGID 0x0ef + +/* 440GP DMA controller DCRs */ +#define DCRN_DMACR0 (DCRN_DMA0_BASE + 0x0) /* DMA Channel Control 0 */ +#define DCRN_DMACT0 (DCRN_DMA0_BASE + 0x1) /* DMA Count 0 */ +#define DCRN_DMASAH0 (DCRN_DMA0_BASE + 0x2) /* DMA Src Addr High 0 */ +#define DCRN_DMASA0 (DCRN_DMA0_BASE + 0x3) /* DMA Src Addr Low 0 */ +#define DCRN_DMADAH0 (DCRN_DMA0_BASE + 0x4) /* DMA Dest Addr High 0 */ +#define DCRN_DMADA0 (DCRN_DMA0_BASE + 0x5) /* DMA Dest Addr Low 0 */ +#define DCRN_ASGH0 (DCRN_DMA0_BASE + 0x6) /* DMA SG Desc Addr High 0 */ +#define DCRN_ASG0 (DCRN_DMA0_BASE + 0x7) /* DMA SG Desc Addr Low 0 */ + +#define DCRN_DMACR1 (DCRN_DMA1_BASE + 0x0) /* DMA Channel Control 1 */ +#define DCRN_DMACT1 (DCRN_DMA1_BASE + 0x1) /* DMA Count 1 */ +#define DCRN_DMASAH1 (DCRN_DMA1_BASE + 0x2) /* DMA Src Addr High 1 */ +#define DCRN_DMASA1 (DCRN_DMA1_BASE + 0x3) /* DMA Src Addr Low 1 */ +#define DCRN_DMADAH1 (DCRN_DMA1_BASE + 0x4) /* DMA Dest Addr High 1 */ +#define DCRN_DMADA1 (DCRN_DMA1_BASE + 0x5) /* DMA Dest Addr Low 1 */ +#define DCRN_ASGH1 (DCRN_DMA1_BASE + 0x6) /* DMA SG Desc Addr High 1 */ +#define DCRN_ASG1 (DCRN_DMA1_BASE + 0x7) /* DMA SG Desc Addr Low 1 */ + +#define DCRN_DMACR2 (DCRN_DMA2_BASE + 0x0) /* DMA Channel Control 2 */ +#define DCRN_DMACT2 (DCRN_DMA2_BASE + 0x1) /* DMA Count 2 */ +#define DCRN_DMASAH2 (DCRN_DMA2_BASE + 0x2) /* DMA Src Addr High 2 */ +#define DCRN_DMASA2 (DCRN_DMA2_BASE + 0x3) /* DMA Src Addr Low 2 */ +#define DCRN_DMADAH2 (DCRN_DMA2_BASE + 0x4) /* DMA Dest Addr High 2 */ +#define DCRN_DMADA2 (DCRN_DMA2_BASE + 0x5) /* DMA Dest Addr Low 2 */ +#define DCRN_ASGH2 (DCRN_DMA2_BASE + 0x6) /* DMA SG Desc Addr High 2 */ +#define DCRN_ASG2 (DCRN_DMA2_BASE + 0x7) /* DMA SG Desc Addr Low 2 */ + +#define DCRN_DMACR3 (DCRN_DMA3_BASE + 0x0) /* DMA Channel Control 3 */ +#define DCRN_DMACT3 (DCRN_DMA3_BASE + 0x1) /* DMA Count 3 */ +#define DCRN_DMASAH3 (DCRN_DMA3_BASE + 0x2) /* DMA Src Addr High 3 */ +#define DCRN_DMASA3 (DCRN_DMA3_BASE + 0x3) /* DMA Src Addr Low 3 */ +#define DCRN_DMADAH3 (DCRN_DMA3_BASE + 0x4) /* DMA Dest Addr High 3 */ +#define DCRN_DMADA3 (DCRN_DMA3_BASE + 0x5) /* DMA Dest Addr Low 3 */ +#define DCRN_ASGH3 (DCRN_DMA3_BASE + 0x6) /* DMA SG Desc Addr High 3 */ +#define DCRN_ASG3 (DCRN_DMA3_BASE + 0x7) /* DMA SG Desc Addr Low 3 */ + +#define DCRN_DMASR (DCRN_DMASR_BASE + 0x0) /* DMA Status Register */ +#define DCRN_ASGC (DCRN_DMASR_BASE + 0x3) /* DMA Scatter/Gather Command */ +#define DCRN_SLP (DCRN_DMASR_BASE + 0x5) /* DMA Sleep Register */ +#define DCRN_POL (DCRN_DMASR_BASE + 0x6) /* DMA Polarity Register */ + +/* 440GP DRAM controller DCRs */ +#define DCRN_SDRAM0_CFGADDR 0x010 +#define DCRN_SDRAM0_CFGDATA 0x011 + +#define SDRAM0_B0CR 0x40 +#define SDRAM0_B1CR 0x44 +#define SDRAM0_B2CR 0x48 +#define SDRAM0_B3CR 0x4c + +#define SDRAM_CONFIG_BANK_ENABLE 0x00000001 +#define SDRAM_CONFIG_SIZE_MASK 0x000e0000 +#define SDRAM_CONFIG_BANK_SIZE(reg) ((reg & SDRAM_CONFIG_SIZE_MASK) >> 17) +#define SDRAM_CONFIG_SIZE_8M 0x00000001 +#define SDRAM_CONFIG_SIZE_16M 0x00000002 +#define SDRAM_CONFIG_SIZE_32M 0x00000003 +#define SDRAM_CONFIG_SIZE_64M 0x00000004 +#define SDRAM_CONFIG_SIZE_128M 0x00000005 +#define SDRAM_CONFIG_SIZE_256M 0x00000006 +#define SDRAM_CONFIG_SIZE_512M 0x00000007 +#define PPC44x_MEM_SIZE_8M 0x00800000 +#define PPC44x_MEM_SIZE_16M 0x01000000 +#define PPC44x_MEM_SIZE_32M 0x02000000 +#define PPC44x_MEM_SIZE_64M 0x04000000 +#define PPC44x_MEM_SIZE_128M 0x08000000 +#define PPC44x_MEM_SIZE_256M 0x10000000 +#define PPC44x_MEM_SIZE_512M 0x20000000 + +/* + * PCI-X definitions + */ +#define PCIX0_REG_BASE 0x20ec80000ULL +#define PCIX0_REG_SIZE 0x200 + +#define PCIX0_VENDID 0x000 +#define PCIX0_DEVID 0x002 +#define PCIX0_COMMAND 0x004 +#define PCIX0_STATUS 0x006 +#define PCIX0_REVID 0x008 +#define PCIX0_CLS 0x009 +#define PCIX0_CACHELS 0x00c +#define PCIX0_LATTIM 0x00d +#define PCIX0_HDTYPE 0x00e +#define PCIX0_BIST 0x00f +#define PCIX0_BAR0L 0x010 +#define PCIX0_BAR0H 0x014 +#define PCIX0_BAR1 0x018 +#define PCIX0_BAR2L 0x01c +#define PCIX0_BAR2H 0x020 +#define PCIX0_BAR3 0x024 +#define PCIX0_CISPTR 0x028 +#define PCIX0_SBSYSVID 0x02c +#define PCIX0_SBSYSID 0x02e +#define PCIX0_EROMBA 0x030 +#define PCIX0_CAP 0x034 +#define PCIX0_RES0 0x035 +#define PCIX0_RES1 0x036 +#define PCIX0_RES2 0x038 +#define PCIX0_INTLN 0x03c +#define PCIX0_INTPN 0x03d +#define PCIX0_MINGNT 0x03e +#define PCIX0_MAXLTNCY 0x03f +#define PCIX0_BRDGOPT1 0x040 +#define PCIX0_BRDGOPT2 0x044 +#define PCIX0_ERREN 0x050 +#define PCIX0_ERRSTS 0x054 +#define PCIX0_PLBBESR 0x058 +#define PCIX0_PLBBEARL 0x05c +#define PCIX0_PLBBEARH 0x060 +#define PCIX0_POM0LAL 0x068 +#define PCIX0_POM0LAH 0x06c +#define PCIX0_POM0SA 0x070 +#define PCIX0_POM0PCIAL 0x074 +#define PCIX0_POM0PCIAH 0x078 +#define PCIX0_POM1LAL 0x07c +#define PCIX0_POM1LAH 0x080 +#define PCIX0_POM1SA 0x084 +#define PCIX0_POM1PCIAL 0x088 +#define PCIX0_POM1PCIAH 0x08c +#define PCIX0_POM2SA 0x090 +#define PCIX0_PIM0SAL 0x098 +#define PCIX0_PIM0SA PCIX0_PIM0SAL +#define PCIX0_PIM0LAL 0x09c +#define PCIX0_PIM0LAH 0x0a0 +#define PCIX0_PIM1SA 0x0a4 +#define PCIX0_PIM1LAL 0x0a8 +#define PCIX0_PIM1LAH 0x0ac +#define PCIX0_PIM2SAL 0x0b0 +#define PCIX0_PIM2SA PCIX0_PIM2SAL +#define PCIX0_PIM2LAL 0x0b4 +#define PCIX0_PIM2LAH 0x0b8 +#define PCIX0_OMCAPID 0x0c0 +#define PCIX0_OMNIPTR 0x0c1 +#define PCIX0_OMMC 0x0c2 +#define PCIX0_OMMA 0x0c4 +#define PCIX0_OMMUA 0x0c8 +#define PCIX0_OMMDATA 0x0cc +#define PCIX0_OMMEOI 0x0ce +#define PCIX0_PMCAPID 0x0d0 +#define PCIX0_PMNIPTR 0x0d1 +#define PCIX0_PMC 0x0d2 +#define PCIX0_PMCSR 0x0d4 +#define PCIX0_PMCSRBSE 0x0d6 +#define PCIX0_PMDATA 0x0d7 +#define PCIX0_PMSCRR 0x0d8 +#define PCIX0_CAPID 0x0dc +#define PCIX0_NIPTR 0x0dd +#define PCIX0_CMD 0x0de +#define PCIX0_STS 0x0e0 +#define PCIX0_IDR 0x0e4 +#define PCIX0_CID 0x0e8 +#define PCIX0_RID 0x0ec +#define PCIX0_PIM0SAH 0x0f8 +#define PCIX0_PIM2SAH 0x0fc +#define PCIX0_MSGIL 0x100 +#define PCIX0_MSGIH 0x104 +#define PCIX0_MSGOL 0x108 +#define PCIX0_MSGOH 0x10c +#define PCIX0_IM 0x1f8 + +#define IIC_OWN 0x55 +#define IIC_CLOCK 50 + +#undef NR_UICS +#define NR_UICS 2 +#define UIC_CASCADE_MASK 0x0003 /* bits 30 & 31 */ + +#define BD_EMAC_ADDR(e,i) bi_enetaddr[e][i] + +#include <asm/ibm4xx.h> + +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_IBM44x_H__ */ +#endif /* __KERNEL__ */ diff --git a/include/asm-ppc/ibm4xx.h b/include/asm-ppc/ibm4xx.h index 4884067dbcd9..c8da43c8512f 100644 --- a/include/asm-ppc/ibm4xx.h +++ b/include/asm-ppc/ibm4xx.h @@ -122,7 +122,25 @@ void ppc4xx_init(unsigned long r3, unsigned long r4, unsigned long r5, #define PCI_DRAM_OFFSET 0 #endif +#elif CONFIG_44x + +#if defined(CONFIG_EBONY) +#include <platforms/4xx/ebony.h> +#endif + +#if defined(CONFIG_OCOTEA) +#include <platforms/4xx/ocotea.h> +#endif #endif /* CONFIG_40x */ + +#ifndef __ASSEMBLY__ +/* + * The "residual" board information structure the boot loader passes + * into the kernel. + */ +extern bd_t __res; +#endif + #endif /* __ASM_IBM4XX_H__ */ #endif /* __KERNEL__ */ diff --git a/include/asm-ppc/io.h b/include/asm-ppc/io.h index b67f27323f46..55e67291c3d2 100644 --- a/include/asm-ppc/io.h +++ b/include/asm-ppc/io.h @@ -4,9 +4,11 @@ #include <linux/config.h> #include <linux/types.h> +#include <linux/mm.h> #include <asm/page.h> #include <asm/byteorder.h> +#include <asm/mmu.h> #define SIO_CONFIG_RA 0x398 #define SIO_CONFIG_RD 0x399 @@ -22,7 +24,7 @@ #define PREP_ISA_MEM_BASE 0xc0000000 #define PREP_PCI_DRAM_OFFSET 0x80000000 -#if defined(CONFIG_40x) +#if defined(CONFIG_4xx) #include <asm/ibm4xx.h> #elif defined(CONFIG_8xx) #include <asm/mpc8xx.h> @@ -197,14 +199,17 @@ extern void _outsl_ns(volatile u32 *port, const void *buf, int nl); * Map in an area of physical address space, for accessing * I/O devices etc. */ -extern void *__ioremap(unsigned long address, unsigned long size, +extern void *__ioremap(phys_addr_t address, unsigned long size, unsigned long flags); -extern void *ioremap(unsigned long address, unsigned long size); +extern void *ioremap(phys_addr_t address, unsigned long size); +#ifdef CONFIG_44x +extern void *ioremap64(unsigned long long address, unsigned long size); +#endif #define ioremap_nocache(addr, size) ioremap((addr), (size)) extern void iounmap(void *addr); extern unsigned long iopa(unsigned long addr); extern unsigned long mm_ptov(unsigned long addr) __attribute__ ((const)); -extern void io_block_mapping(unsigned long virt, unsigned long phys, +extern void io_block_mapping(unsigned long virt, phys_addr_t phys, unsigned int size, int flags); /* diff --git a/include/asm-ppc/irq.h b/include/asm-ppc/irq.h index 6ca586ab8f4e..02b993f6c3c7 100644 --- a/include/asm-ppc/irq.h +++ b/include/asm-ppc/irq.h @@ -71,11 +71,11 @@ irq_canonicalize(int irq) return (irq); } -#elif defined(CONFIG_440) -#include <asm/ibm440.h> +#elif defined(CONFIG_44x) +#include <asm/ibm44x.h> -#define NR_UIC_IRQS 64 -#define NR_IRQS (NR_UIC_IRQS + NR_BOARD_IRQS) +#define NR_UIC_IRQS 32 +#define NR_IRQS ((NR_UIC_IRQS * NR_UICS) + NR_BOARD_IRQS) static __inline__ int irq_canonicalize(int irq) diff --git a/include/asm-ppc/mmu.h b/include/asm-ppc/mmu.h index 7b729cdf52f4..b75b24e4c056 100644 --- a/include/asm-ppc/mmu.h +++ b/include/asm-ppc/mmu.h @@ -10,6 +10,18 @@ #ifndef __ASSEMBLY__ +/* + * Define physical address type. Machines using split size + * virtual/physical addressing like 32-bit virtual / 36-bit + * physical need a larger than native word size type. -Matt + */ +#ifndef CONFIG_PTE_64BIT +typedef unsigned long phys_addr_t; +#else +typedef unsigned long long phys_addr_t; +extern phys_addr_t fixup_bigphys_addr(phys_addr_t, phys_addr_t); +#endif + /* Default "unsigned long" context */ typedef unsigned long mm_context_t; @@ -320,5 +332,55 @@ typedef struct _P601_BAT { #define TLB_M 0x00000002 /* Memory is coherent */ #define TLB_G 0x00000001 /* Memory is guarded from prefetch */ +/* + * PPC440 support + */ +#define PPC44x_MMUCR_TID 0x000000ff +#define PPC44x_MMUCR_STS 0x00010000 + +#define PPC44x_TLB_PAGEID 0 +#define PPC44x_TLB_XLAT 1 +#define PPC44x_TLB_ATTRIB 2 + +/* Page identification fields */ +#define PPC44x_TLB_EPN_MASK 0xfffffc00 /* Effective Page Number */ +#define PPC44x_TLB_VALID 0x00000200 /* Valid flag */ +#define PPC44x_TLB_TS 0x00000100 /* Translation address space */ +#define PPC44x_TLB_PAGESZ_MASK 0x000000f0 +#define PPC44x_TLB_PAGESZ(x) (x << 4) +#define PPC44x_PAGESZ_1K 0 +#define PPC44x_PAGESZ_4K 1 +#define PPC44x_PAGESZ_16K 2 +#define PPC44x_PAGESZ_64K 3 +#define PPC44x_PAGESZ_256K 4 +#define PPC44x_PAGESZ_1M 5 +#define PPC44x_PAGESZ_16M 7 +#define PPC44x_PAGESZ_256M 9 + +/* Translation fields */ +#define PPC44x_TLB_RPN_MASK 0xfffffc00 /* Real Page Number */ +#define PPC44x_TLB_ERPN_MASK 0x0000000f + +/* Storage attribute and access control fields */ +#define PPC44x_TLB_ATTR_MASK 0x0000ff80 +#define PPC44x_TLB_U0 0x00008000 /* User 0 */ +#define PPC44x_TLB_U1 0x00004000 /* User 1 */ +#define PPC44x_TLB_U2 0x00002000 /* User 2 */ +#define PPC44x_TLB_U3 0x00001000 /* User 3 */ +#define PPC44x_TLB_W 0x00000800 /* Caching is write-through */ +#define PPC44x_TLB_I 0x00000400 /* Caching is inhibited */ +#define PPC44x_TLB_M 0x00000200 /* Memory is coherent */ +#define PPC44x_TLB_G 0x00000100 /* Memory is guarded */ +#define PPC44x_TLB_E 0x00000080 /* Memory is guarded */ + +#define PPC44x_TLB_PERM_MASK 0x0000003f +#define PPC44x_TLB_UX 0x00000020 /* User execution */ +#define PPC44x_TLB_UW 0x00000010 /* User write */ +#define PPC44x_TLB_UR 0x00000008 /* User read */ +#define PPC44x_TLB_SX 0x00000004 /* Super execution */ +#define PPC44x_TLB_SW 0x00000002 /* Super write */ +#define PPC44x_TLB_SR 0x00000001 /* Super read */ + + #endif /* _PPC_MMU_H_ */ #endif /* __KERNEL__ */ diff --git a/include/asm-ppc/page.h b/include/asm-ppc/page.h index b8f25854778d..6d2a8a40368f 100644 --- a/include/asm-ppc/page.h +++ b/include/asm-ppc/page.h @@ -4,7 +4,12 @@ /* PAGE_SHIFT determines the page size */ #define PAGE_SHIFT 12 #define PAGE_SIZE (1UL << PAGE_SHIFT) -#define PAGE_MASK (~(PAGE_SIZE-1)) + +/* + * Subtle: this is an int (not an unsigned long) and so it + * gets extended to 64 bits the way want (i.e. with 1s). -- paulus + */ +#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) #ifdef __KERNEL__ #include <linux/config.h> @@ -15,13 +20,27 @@ #ifndef __ASSEMBLY__ -#define STRICT_MM_TYPECHECKS +/* + * The basic type of a PTE - 64 bits for those CPUs with > 32 bit + * physical addressing. For now this just the IBM PPC440. + */ +#ifdef CONFIG_PTE_64BIT +typedef unsigned long long pte_basic_t; +#define PTE_SHIFT (PAGE_SHIFT - 3) /* 512 ptes per page */ +#define PTE_FMT "%16Lx" +#else +typedef unsigned long pte_basic_t; +#define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */ +#define PTE_FMT "%.8lx" +#endif + +#undef STRICT_MM_TYPECHECKS #ifdef STRICT_MM_TYPECHECKS /* * These are used to make use of C type-checking.. */ -typedef struct { unsigned long pte; } pte_t; +typedef struct { pte_basic_t pte; } pte_t; typedef struct { unsigned long pmd; } pmd_t; typedef struct { unsigned long pgd; } pgd_t; typedef struct { unsigned long pgprot; } pgprot_t; @@ -40,7 +59,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; /* * .. while these make it easier on the compiler */ -typedef unsigned long pte_t; +typedef pte_basic_t pte_t; typedef unsigned long pmd_t; typedef unsigned long pgd_t; typedef unsigned long pgprot_t; @@ -123,6 +142,7 @@ static inline void* ___va(unsigned long p) #define pfn_to_page(pfn) (mem_map + ((pfn) - PPC_PGSTART)) #define page_to_pfn(page) ((unsigned long)((page) - mem_map) + PPC_PGSTART) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) +#define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT) #define pfn_valid(pfn) (((pfn) - PPC_PGSTART) < max_mapnr) #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) diff --git a/include/asm-ppc/pgalloc.h b/include/asm-ppc/pgalloc.h index 7d5d0455cc00..faf91bc03022 100644 --- a/include/asm-ppc/pgalloc.h +++ b/include/asm-ppc/pgalloc.h @@ -20,10 +20,17 @@ extern void pgd_free(pgd_t *pgd); #define __pmd_free_tlb(tlb,x) do { } while (0) #define pgd_populate(mm, pmd, pte) BUG() +#ifndef CONFIG_BOOKE #define pmd_populate_kernel(mm, pmd, pte) \ (pmd_val(*(pmd)) = __pa(pte) | _PMD_PRESENT) #define pmd_populate(mm, pmd, pte) \ (pmd_val(*(pmd)) = (page_to_pfn(pte) << PAGE_SHIFT) | _PMD_PRESENT) +#else +#define pmd_populate_kernel(mm, pmd, pte) \ + (pmd_val(*(pmd)) = (unsigned long)pte | _PMD_PRESENT) +#define pmd_populate(mm, pmd, pte) \ + (pmd_val(*(pmd)) = (unsigned long)page_to_virt(pte) | _PMD_PRESENT) +#endif extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr); extern struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr); diff --git a/include/asm-ppc/pgtable.h b/include/asm-ppc/pgtable.h index 73a8f18d22cf..0399c8cfbcab 100644 --- a/include/asm-ppc/pgtable.h +++ b/include/asm-ppc/pgtable.h @@ -65,13 +65,23 @@ extern unsigned long ioremap_bot, ioremap_base; * and ITLB, respectively (see "mmu.h" for definitions). */ -/* PMD_SHIFT determines the size of the area mapped by the second-level page tables */ -#define PMD_SHIFT 22 +/* + * The normal case is that PTEs are 32-bits and we have a 1-page + * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus + * + * For any >32-bit physical address platform, we can use the following + * two level page table layout where the pgdir is 8KB and the MS 13 bits + * are an index to the second level table. The combined pgdir/pmd first + * level has 2048 entries and the second level has 512 64-bit PTE entries. + * -Matt + */ +/* PMD_SHIFT determines the size of the area mapped by the PTE pages */ +#define PMD_SHIFT (PAGE_SHIFT + PTE_SHIFT) #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) -/* PGDIR_SHIFT determines what a third-level page table entry can map */ -#define PGDIR_SHIFT 22 +/* PGDIR_SHIFT determines what a top-level page table entry can map */ +#define PGDIR_SHIFT PMD_SHIFT #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) @@ -79,9 +89,10 @@ extern unsigned long ioremap_bot, ioremap_base; * entries per page directory level: our page-table tree is two-level, so * we don't really have any PMD directory. */ -#define PTRS_PER_PTE 1024 +#define PTRS_PER_PTE (1 << PTE_SHIFT) #define PTRS_PER_PMD 1 -#define PTRS_PER_PGD 1024 +#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) + #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) #define FIRST_USER_PGD_NR 0 @@ -89,7 +100,7 @@ extern unsigned long ioremap_bot, ioremap_base; #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) #define pte_ERROR(e) \ - printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) + printk("%s:%d: bad pte "PTE_FMT".\n", __FILE__, __LINE__, pte_val(e)) #define pmd_ERROR(e) \ printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) #define pgd_ERROR(e) \ @@ -113,7 +124,11 @@ extern unsigned long ioremap_bot, ioremap_base; * of RAM. -- Cort */ #define VMALLOC_OFFSET (0x1000000) /* 16M */ +#ifdef CONFIG_44x +#define VMALLOC_START (((_ALIGN((long)high_memory, PPC44x_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) +#else #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) +#endif #define VMALLOC_VMADDR(x) ((unsigned long)(x)) #define VMALLOC_END ioremap_bot @@ -170,6 +185,44 @@ extern unsigned long ioremap_bot, ioremap_base; #define _PMD_SIZE_16M 0x0e0 #define PMD_PAGE_SIZE(pmdval) (1024 << (((pmdval) & _PMD_SIZE) >> 4)) +#elif defined(CONFIG_44x) +/* + * Definitions for PPC440 + * + * Because of the 3 word TLB entries to support 36-bit addressing, + * the attribute are difficult to map in such a fashion that they + * are easily loaded during exception processing. I decided to + * organize the entry so the ERPN is the only portion in the + * upper word of the PTE and the attribute bits below are packed + * in as sensibly as they can be in the area below a 4KB page size + * oriented RPN. This at least makes it easy to load the RPN and + * ERPN fields in the TLB. -Matt + * + * Note that these bits preclude future use of a page size + * less than 4KB. + */ +#define _PAGE_PRESENT 0x00000001 /* S: PTE valid */ +#define _PAGE_RW 0x00000002 /* S: Write permission */ +#define _PAGE_DIRTY 0x00000004 /* S: Page dirty */ +#define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */ +#define _PAGE_HWWRITE 0x00000010 /* H: Dirty & RW */ +#define _PAGE_HWEXEC 0x00000020 /* H: Execute permission */ +#define _PAGE_USER 0x00000040 /* S: User page */ +#define _PAGE_ENDIAN 0x00000080 /* H: E bit */ +#define _PAGE_GUARDED 0x00000100 /* H: G bit */ +#define _PAGE_COHERENT 0x00000200 /* H: M bit */ +#define _PAGE_FILE 0x00000400 /* S: nonlinear file mapping */ +#define _PAGE_NO_CACHE 0x00000400 /* H: I bit */ +#define _PAGE_WRITETHRU 0x00000800 /* H: W bit */ + +/* TODO: Add large page lowmem mapping support */ +#define _PMD_PRESENT 0 +#define _PMD_PRESENT_MASK (PAGE_MASK) +#define _PMD_BAD (~PAGE_MASK) + +/* ERPN in a PTE never gets cleared, ignore it */ +#define _PTE_NONE_MASK 0xffffffff00000000ULL + #elif defined(CONFIG_8xx) /* Definitions for 8xx embedded chips. */ #define _PAGE_PRESENT 0x0001 /* Page is valid */ @@ -270,7 +323,11 @@ extern unsigned long ioremap_bot, ioremap_base; #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED) #define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE) +#ifndef CONFIG_44x #define _PAGE_KERNEL (_PAGE_BASE | _PAGE_SHARED | _PAGE_WRENABLE) +#else +#define _PAGE_KERNEL (_PAGE_BASE | _PAGE_SHARED | _PAGE_WRENABLE | _PAGE_GUARDED) +#endif #ifdef CONFIG_PPC_STD_MMU /* On standard PPC MMU, no user access implies kernel read/write access, @@ -283,7 +340,7 @@ extern unsigned long ioremap_bot, ioremap_base; #define _PAGE_IO (_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED) #define _PAGE_RAM (_PAGE_KERNEL | _PAGE_HWEXEC) -#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) +#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) /* We want the debuggers to be able to set breakpoints anywhere, so * don't write protect the kernel text */ #define _PAGE_RAM_TEXT _PAGE_RAM @@ -420,6 +477,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) * * pte_update clears and sets bit atomically, and returns * the old pte value. + * The ((unsigned long)(p+1) - 4) hack is to get to the least-significant + * 32 bits of the PTE regardless of whether PTEs are 32 or 64 bits. */ static inline unsigned long pte_update(pte_t *p, unsigned long clr, unsigned long set) @@ -434,7 +493,7 @@ static inline unsigned long pte_update(pte_t *p, unsigned long clr, " stwcx. %1,0,%3\n\ bne- 1b" : "=&r" (old), "=&r" (tmp), "=m" (*p) - : "r" (p), "r" (clr), "r" (set), "m" (*p) + : "r" ((unsigned long)(p+1) - 4), "r" (clr), "r" (set), "m" (*p) : "cc" ); return old; } @@ -485,11 +544,25 @@ static inline void ptep_mkdirty(pte_t *ptep) #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0) +/* + * Note that on Book E processors, the pmd contains the kernel virtual + * (lowmem) address of the pte page. The physical address is less useful + * because everything runs with translation enabled (even the TLB miss + * handler). On everything else the pmd contains the physical address + * of the pte page. -- paulus + */ +#ifndef CONFIG_BOOKE #define pmd_page_kernel(pmd) \ ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) #define pmd_page(pmd) \ (mem_map + (pmd_val(pmd) >> PAGE_SHIFT)) - +#else +#define pmd_page_kernel(pmd) \ + ((unsigned long) (pmd_val(pmd) & PAGE_MASK)) +#define pmd_page(pmd) \ + (mem_map + (__pa(pmd_val(pmd)) >> PAGE_SHIFT)) +#endif + /* to find an entry in a kernel page-table-directory */ #define pgd_offset_k(address) pgd_offset(&init_mm, address) @@ -516,7 +589,8 @@ static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) -extern pgd_t swapper_pg_dir[1024]; +extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; + extern void paging_init(void); /* diff --git a/include/asm-ppc/ppc_asm.h b/include/asm-ppc/ppc_asm.h index 39049bcac7fc..afd99032491c 100644 --- a/include/asm-ppc/ppc_asm.h +++ b/include/asm-ppc/ppc_asm.h @@ -107,7 +107,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601) bdnz 0b #endif -#if !defined(CONFIG_440) +#if !defined(CONFIG_44x) /* * On APUS (Amiga PowerPC cpu upgrade board), we don't know the * physical base address of RAM at compile time. @@ -125,7 +125,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601) .align 1; \ .long 0b; \ .previous -#else /* CONFIG_440 */ +#else /* CONFIG_44x */ #define tophys(rd,rs) \ mr rd,rs @@ -133,7 +133,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601) #define tovirt(rd,rs) \ mr rd,rs -#endif /* CONFIG_440 */ +#endif /* CONFIG_44x */ /* * On 64-bit cpus, we use the rfid instruction instead of rfi, but diff --git a/include/asm-ppc/processor.h b/include/asm-ppc/processor.h index 21c2c819b98f..e13d0a0b1f45 100644 --- a/include/asm-ppc/processor.h +++ b/include/asm-ppc/processor.h @@ -3,6 +3,12 @@ #define __ASM_PPC_PROCESSOR_H /* + * The Book E definitions are hacked into here for 440 right + * now. This whole thing needs regorganized (maybe two files) + * so that it becomes readable. -Matt + */ + +/* * Default implementation of macro that returns current * instruction pointer ("program counter"). */ @@ -45,6 +51,11 @@ #define MSR_RI (1<<1) /* Recoverable Exception */ #define MSR_LE (1<<0) /* Little Endian */ +#ifdef CONFIG_BOOKE +#define MSR_IS MSR_IR /* Instruction Space */ +#define MSR_DS MSR_DR /* Data Space */ +#endif + #ifdef CONFIG_APUS_FAST_EXCEPT #define MSR_ (MSR_ME|MSR_IP|MSR_RI) #else @@ -93,8 +104,13 @@ #define SPRN_CDBCR 0x3D7 /* Cache Debug Control Register */ #define SPRN_CTR 0x009 /* Count Register */ #define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */ +#ifndef CONFIG_BOOKE #define SPRN_DAC1 0x3F6 /* Data Address Compare 1 */ #define SPRN_DAC2 0x3F7 /* Data Address Compare 2 */ +#else +#define SPRN_DAC1 0x13C /* Book E Data Address Compare 1 */ +#define SPRN_DAC2 0x13D /* Book E Data Address Compare 2 */ +#endif /* CONFIG_BOOKE */ #define SPRN_DAR 0x013 /* Data Address Register */ #define SPRN_DBAT0L 0x219 /* Data BAT 0 Lower Register */ #define SPRN_DBAT0U 0x218 /* Data BAT 0 Upper Register */ @@ -146,7 +162,11 @@ #define DBCR_SDA 0x00000004 /* Second DAC Enable */ #define DBCR_JOI 0x00000002 /* JTAG Serial Outbound Int. Enable */ #define DBCR_JII 0x00000001 /* JTAG Serial Inbound Int. Enable */ +#ifndef CONFIG_BOOKE #define SPRN_DBCR0 0x3F2 /* Debug Control Register 0 */ +#else +#define SPRN_DBCR0 0x134 /* Book E Debug Control Register 0 */ +#endif /* CONFIG_BOOKE */ #define DBCR0_EDM 0x80000000 /* External Debug Mode */ #define DBCR0_IDM 0x40000000 /* Internal Debug Mode */ #define DBCR0_RST 0x30000000 /* all the bits in the RST field */ @@ -169,11 +189,18 @@ #define DBCR0_IA12T 0x00008000 /* Instr Addr 1-2 range Toggle */ #define DBCR0_IA34T 0x00004000 /* Instr Addr 3-4 range Toggle */ #define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */ +#ifndef CONFIG_BOOKE #define SPRN_DBCR1 0x3BD /* Debug Control Register 1 */ #define SPRN_DBSR 0x3F0 /* Debug Status Register */ #define DBSR_IC 0x80000000 /* Instruction Completion */ #define DBSR_BT 0x40000000 /* Branch taken */ #define DBSR_TIE 0x10000000 /* Trap Instruction debug Event */ +#else +#define SPRN_DBCR1 0x135 /* Book E Debug Control Register 1 */ +#define SPRN_DBSR 0x130 /* Book E Debug Status Register */ +#define DBSR_IC 0x08000000 /* Book E Instruction Completion */ +#define DBSR_TIE 0x01000000 /* Book E Trap Instruction Event */ +#endif /* CONFIG_BOOKE */ #define SPRN_DCCR 0x3FA /* Data Cache Cacheability Register */ #define DCCR_NOCACHE 0 /* Noncacheable */ #define DCCR_CACHE 1 /* Cacheable */ @@ -181,7 +208,11 @@ #define SPRN_DCWR 0x3BA /* Data Cache Write-thru Register */ #define DCWR_COPY 0 /* Copy-back */ #define DCWR_WRITE 1 /* Write-through */ +#ifndef CONFIG_BOOKE #define SPRN_DEAR 0x3D5 /* Data Error Address Register */ +#else +#define SPRN_DEAR 0x03D /* Book E Data Error Address Register */ +#endif /* CONFIG_BOOKE */ #define SPRN_DEC 0x016 /* Decrement Register */ #define SPRN_DER 0x095 /* Debug Enable Regsiter */ #define DER_RSTE 0x40000000 /* Reset Interrupt */ @@ -206,12 +237,16 @@ #define SPRN_DMISS 0x3D0 /* Data TLB Miss Register */ #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */ #define SPRN_EAR 0x11A /* External Address Register */ +#ifndef CONFIG_BOOKE #define SPRN_ESR 0x3D4 /* Exception Syndrome Register */ +#else +#define SPRN_ESR 0x03E /* Book E Exception Syndrome Register */ +#endif /* CONFIG_BOOKE */ #define ESR_MCI 0x80000000 /* 405 Machine Check - Instruction */ -#define ESR_IMCP 0x80000000 /* 403 Inst. Mach. Check - Protection */ -#define ESR_IMCN 0x40000000 /* 403 Inst. Mach. Check - Non-config */ -#define ESR_IMCB 0x20000000 /* 403 Inst. Mach. Check - Bus error */ -#define ESR_IMCT 0x10000000 /* 403 Inst. Mach. Check - Timeout */ +#define ESR_IMCP 0x80000000 /* Instr. Machine Check - Protection */ +#define ESR_IMCN 0x40000000 /* Instr. Machine Check - Non-config */ +#define ESR_IMCB 0x20000000 /* Instr. Machine Check - Bus error */ +#define ESR_IMCT 0x10000000 /* Instr. Machine Check - Timeout */ #define ESR_PIL 0x08000000 /* Program Exception - Illegal */ #define ESR_PPR 0x04000000 /* Program Exception - Priveleged */ #define ESR_PTR 0x02000000 /* Program Exception - Trap */ @@ -266,8 +301,13 @@ #define HID1_SYNCBE (1<<11) /* 7450 ABE for sync, eieio */ #define HID1_ABE (1<<10) /* 7450 Address Broadcast Enable */ #define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */ +#ifndef CONFIG_BOOKE #define SPRN_IAC1 0x3F4 /* Instruction Address Compare 1 */ #define SPRN_IAC2 0x3F5 /* Instruction Address Compare 2 */ +#else +#define SPRN_IAC1 0x138 /* Book E Instruction Address Compare 1 */ +#define SPRN_IAC2 0x139 /* Book E Instruction Address Compare 2 */ +#endif /* CONFIG_BOOKE */ #define SPRN_IBAT0L 0x211 /* Instruction BAT 0 Lower Register */ #define SPRN_IBAT0U 0x210 /* Instruction BAT 0 Upper Register */ #define SPRN_IBAT1L 0x213 /* Instruction BAT 1 Lower Register */ @@ -358,8 +398,13 @@ #define SPRN_PBL2 0x3FE /* Protection Bound Lower 2 */ #define SPRN_PBU1 0x3FD /* Protection Bound Upper 1 */ #define SPRN_PBU2 0x3FF /* Protection Bound Upper 2 */ +#ifndef CONFIG_BOOKE #define SPRN_PID 0x3B1 /* Process ID */ #define SPRN_PIR 0x3FF /* Processor Identification Register */ +#else +#define SPRN_PID 0x030 /* Book E Process ID */ +#define SPRN_PIR 0x11E /* Book E Processor Identification Register */ +#endif /* CONFIG_BOOKE */ #define SPRN_PIT 0x3DB /* Programmable Interval Timer */ #define SPRN_PMC1 0x3B9 /* Performance Counter Register 1 */ #define SPRN_PMC2 0x3BA /* Performance Counter Register 2 */ @@ -375,6 +420,7 @@ #define SGR_NORMAL 0 #define SGR_GUARDED 1 #define SPRN_SIA 0x3BB /* Sampled Instruction Address Register */ +#define SPRN_SLER 0x3BB /* Little-endian real mode */ #define SPRN_SPRG0 0x110 /* Special Purpose Register General 0 */ #define SPRN_SPRG1 0x111 /* Special Purpose Register General 1 */ #define SPRN_SPRG2 0x112 /* Special Purpose Register General 2 */ @@ -387,6 +433,7 @@ #define SPRN_SRR1 0x01B /* Save/Restore Register 1 */ #define SPRN_SRR2 0x3DE /* Save/Restore Register 2 */ #define SPRN_SRR3 0x3DF /* Save/Restore Register 3 */ +#define SPRN_SU0R 0x3BC /* "User 0" real mode */ #define SPRN_TBHI 0x3DC /* Time Base High (4xx) */ #define SPRN_TBHU 0x3CC /* Time Base High User-mode (4xx) */ #define SPRN_TBLO 0x3DD /* Time Base Low (4xx) */ @@ -395,7 +442,11 @@ #define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */ #define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */ #define SPRN_TBWU 0x11D /* Time Base Upper Register (super, R/W) */ +#ifndef CONFIG_BOOKE #define SPRN_TCR 0x3DA /* Timer Control Register */ +#else +#define SPRN_TCR 0x154 /* Book E Timer Control Register */ +#endif #define TCR_WP(x) (((x)&0x3)<<30) /* WDT Period */ #define TCR_WP_MASK TCR_WP(3) #define WP_2_17 0 /* 2^17 clocks */ @@ -410,6 +461,7 @@ #define WRC_SYSTEM 3 /* System reset will occur */ #define TCR_WIE 0x08000000 /* WDT Interrupt Enable */ #define TCR_PIE 0x04000000 /* PIT Interrupt Enable */ +#define TCR_DIE TCR_PIE /* DEC Interrupt Enable */ #define TCR_FP(x) (((x)&0x3)<<24) /* FIT Period */ #define TCR_FP_MASK TCR_FP(3) #define FP_2_9 0 /* 2^9 clocks */ @@ -431,7 +483,11 @@ #define SPRN_THRM3 0x3FE /* Thermal Management Register 3 */ #define THRM3_E (1<<0) #define SPRN_TLBMISS 0x3D4 /* 980 7450 TLB Miss Register */ +#ifndef CONFIG_BOOKE #define SPRN_TSR 0x3D8 /* Timer Status Register */ +#else +#define SPRN_TSR 0x150 /* Book E Timer Status Register */ +#endif /* CONFIG_BOOKE */ #define TSR_ENW 0x80000000 /* Enable Next Watchdog */ #define TSR_WIS 0x40000000 /* WDT Interrupt Status */ #define TSR_WRS(x) (((x)&0x3)<<28) /* WDT Reset Status */ @@ -440,6 +496,7 @@ #define WRS_CHIP 2 /* WDT forced chip reset */ #define WRS_SYSTEM 3 /* WDT forced system reset */ #define TSR_PIS 0x08000000 /* PIT Interrupt Status */ +#define TSR_DIS TSR_PIS /* DEC Interrupt Status */ #define TSR_FIS 0x04000000 /* FIT Interrupt Status */ #define SPRN_UMMCR0 0x3A8 /* User Monitor Mode Control Register 0 */ #define SPRN_UMMCR1 0x3AC /* User Monitor Mode Control Register 0 */ @@ -452,6 +509,45 @@ #define SPRN_XER 0x001 /* Fixed Point Exception Register */ #define SPRN_ZPR 0x3B0 /* Zone Protection Register */ +/* Book E definitions */ +#define SPRN_DECAR 0x036 /* Decrementer Auto Reload Register */ +#define SPRN_CSRR0 0x03A /* Critical Save and Restore Register 0 */ +#define SPRN_CSRR1 0x03B /* Critical Save and Restore Register 1 */ +#define SPRN_IVPR 0x03F /* Interrupt Vector Prefix Register */ +#define SPRN_USPRG0 0x100 /* User Special Purpose Register General 0 */ +#define SPRN_SPRG4R 0x104 /* Special Purpose Register General 4 Read */ +#define SPRN_SPRG5R 0x105 /* Special Purpose Register General 5 Read */ +#define SPRN_SPRG6R 0x106 /* Special Purpose Register General 6 Read */ +#define SPRN_SPRG7R 0x107 /* Special Purpose Register General 7 Read */ +#define SPRN_SPRG4W 0x114 /* Special Purpose Register General 4 Write */ +#define SPRN_SPRG5W 0x115 /* Special Purpose Register General 5 Write */ +#define SPRN_SPRG6W 0x116 /* Special Purpose Register General 6 Write */ +#define SPRN_SPRG7W 0x117 /* Special Purpose Register General 7 Write */ +#define SPRN_DBCR2 0x136 /* Debug Control Register 2 */ +#define SPRN_IAC3 0x13A /* Instruction Address Compare 3 */ +#define SPRN_IAC4 0x13B /* Instruction Address Compare 4 */ +#define SPRN_DVC1 0x13E /* */ +#define SPRN_DVC2 0x13F /* */ +#define SPRN_IVOR0 0x190 /* Interrupt Vector Offset Register 0 */ +#define SPRN_IVOR1 0x191 /* Interrupt Vector Offset Register 1 */ +#define SPRN_IVOR2 0x192 /* Interrupt Vector Offset Register 2 */ +#define SPRN_IVOR3 0x193 /* Interrupt Vector Offset Register 3 */ +#define SPRN_IVOR4 0x194 /* Interrupt Vector Offset Register 4 */ +#define SPRN_IVOR5 0x195 /* Interrupt Vector Offset Register 5 */ +#define SPRN_IVOR6 0x196 /* Interrupt Vector Offset Register 6 */ +#define SPRN_IVOR7 0x197 /* Interrupt Vector Offset Register 7 */ +#define SPRN_IVOR8 0x198 /* Interrupt Vector Offset Register 8 */ +#define SPRN_IVOR9 0x199 /* Interrupt Vector Offset Register 9 */ +#define SPRN_IVOR10 0x19a /* Interrupt Vector Offset Register 10 */ +#define SPRN_IVOR11 0x19b /* Interrupt Vector Offset Register 11 */ +#define SPRN_IVOR12 0x19c /* Interrupt Vector Offset Register 12 */ +#define SPRN_IVOR13 0x19d /* Interrupt Vector Offset Register 13 */ +#define SPRN_IVOR14 0x19e /* Interrupt Vector Offset Register 14 */ +#define SPRN_IVOR15 0x19f /* Interrupt Vector Offset Register 15 */ +#define SPRN_MMUCR 0x3b2 /* MMU Control Register */ + +#define ESR_ST 0x00800000 /* Store Operation */ + /* Short-hand versions for a number of the above SPRNs */ #define CTR SPRN_CTR /* Counter Register */ @@ -524,6 +620,16 @@ #define SPRG5 SPRN_SPRG5 #define SPRG6 SPRN_SPRG6 #define SPRG7 SPRN_SPRG7 +#define SPRG4R SPRN_SPRG4R /* Book E Supervisor Private Registers */ +#define SPRG5R SPRN_SPRG5R +#define SPRG6R SPRN_SPRG6R +#define SPRG7R SPRN_SPRG7R +#define SPRG4W SPRN_SPRG4W +#define SPRG5W SPRN_SPRG5W +#define SPRG6W SPRN_SPRG6W +#define SPRG7W SPRN_SPRG7W +#define CSRR0 SPRN_CSRR0 /* Critical Save and Restore Register 0 */ +#define CSRR1 SPRN_CSRR1 /* Critical Save and Restore Register 1 */ #define SRR0 SPRN_SRR0 /* Save and Restore Register 0 */ #define SRR1 SPRN_SRR1 /* Save and Restore Register 1 */ #define SRR2 SPRN_SRR2 /* Save and Restore Register 2 */ @@ -557,6 +663,51 @@ #define PVR_MAJ(pvr) (((pvr) >> 4) & 0xF) /* Major revision field */ #define PVR_MIN(pvr) (((pvr) >> 0) & 0xF) /* Minor revision field */ +/* Processor Version Numbers */ + +#define PVR_403GA 0x00200000 +#define PVR_403GB 0x00200100 +#define PVR_403GC 0x00200200 +#define PVR_403GCX 0x00201400 +#define PVR_405GP 0x40110000 +#define PVR_STB03XXX 0x40310000 +#define PVR_NP405H 0x41410000 +#define PVR_NP405L 0x41610000 +#define PVR_440GP_RB 0x40120440 +#define PVR_440GP_RC1 0x40120481 +#define PVR_440GP_RC2 0x40200481 +#define PVR_440GX_RC1 0x51b21850 +#define PVR_601 0x00010000 +#define PVR_602 0x00050000 +#define PVR_603 0x00030000 +#define PVR_603e 0x00060000 +#define PVR_603ev 0x00070000 +#define PVR_603r 0x00071000 +#define PVR_604 0x00040000 +#define PVR_604e 0x00090000 +#define PVR_604r 0x000A0000 +#define PVR_620 0x00140000 +#define PVR_740 0x00080000 +#define PVR_750 PVR_740 +#define PVR_740P 0x10080000 +#define PVR_750P PVR_740P +#define PVR_7400 0x000C0000 +#define PVR_7410 0x800C0000 +#define PVR_7450 0x80000000 +/* + * For the 8xx processors, all of them report the same PVR family for + * the PowerPC core. The various versions of these processors must be + * differentiated by the version number in the Communication Processor + * Module (CPM). + */ +#define PVR_821 0x00500000 +#define PVR_823 PVR_821 +#define PVR_850 PVR_821 +#define PVR_860 PVR_821 +#define PVR_8240 0x00810100 +#define PVR_8245 0x80811014 +#define PVR_8260 PVR_8240 + /* We only need to define a new _MACH_xxx for machines which are part of * a configuration which supports more than one type of different machine. * This is currently limited to CONFIG_PPC_MULTIPLATFORM and CHRP/PReP/PMac. -- Tom diff --git a/include/asm-ppc/serial.h b/include/asm-ppc/serial.h index 5d121448d3c1..2d9e8642cc87 100644 --- a/include/asm-ppc/serial.h +++ b/include/asm-ppc/serial.h @@ -28,7 +28,7 @@ #include <platforms/sandpoint.h> #elif defined(CONFIG_SPRUCE) #include <platforms/spruce_serial.h> -#elif defined(CONFIG_40x) +#elif defined(CONFIG_4xx) #include <asm/ibm4xx.h> #else diff --git a/include/asm-ppc/tlbflush.h b/include/asm-ppc/tlbflush.h index 2ccb0b20d2af..a441eb07e0e5 100644 --- a/include/asm-ppc/tlbflush.h +++ b/include/asm-ppc/tlbflush.h @@ -19,17 +19,23 @@ extern void _tlbia(void); #if defined(CONFIG_4xx) +#ifndef CONFIG_44x +#define __tlbia() asm volatile ("sync; tlbia; isync" : : : "memory") +#else +#define __tlbia _tlbia +#endif + static inline void flush_tlb_mm(struct mm_struct *mm) - { _tlbia(); } + { __tlbia(); } static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { _tlbie(vmaddr); } static inline void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) - { _tlbia(); } + { __tlbia(); } static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) - { _tlbia(); } + { __tlbia(); } #elif defined(CONFIG_8xx) #define __tlbia() asm volatile ("tlbia; sync" : : : "memory") diff --git a/include/asm-s390/semaphore.h b/include/asm-s390/semaphore.h index 474abcd85b34..5720dea2459f 100644 --- a/include/asm-s390/semaphore.h +++ b/include/asm-s390/semaphore.h @@ -60,6 +60,7 @@ asmlinkage void __up(struct semaphore * sem); static inline void down(struct semaphore * sem) { + might_sleep(); if (atomic_dec_return(&sem->count) < 0) __down(sem); } @@ -68,6 +69,7 @@ static inline int down_interruptible(struct semaphore * sem) { int ret = 0; + might_sleep(); if (atomic_dec_return(&sem->count) < 0) ret = __down_interruptible(sem); return ret; diff --git a/include/asm-sh/semaphore.h b/include/asm-sh/semaphore.h index 2ba0930035f6..dc8955e3448c 100644 --- a/include/asm-sh/semaphore.h +++ b/include/asm-sh/semaphore.h @@ -107,6 +107,7 @@ static inline int down_interruptible(struct semaphore * sem) CHECK_MAGIC(sem->__magic); #endif + might_sleep(); if (atomic_dec_return(&sem->count) < 0) ret = __down_interruptible(sem); return ret; diff --git a/include/asm-sparc/semaphore.h b/include/asm-sparc/semaphore.h index cf3135b928a0..0e6122ae3058 100644 --- a/include/asm-sparc/semaphore.h +++ b/include/asm-sparc/semaphore.h @@ -71,6 +71,7 @@ static inline void down(struct semaphore * sem) #if WAITQUEUE_DEBUG CHECK_MAGIC(sem->__magic); #endif + might_sleep(); ptr = &(sem->count.counter); increment = 1; @@ -107,6 +108,7 @@ static inline int down_interruptible(struct semaphore * sem) #if WAITQUEUE_DEBUG CHECK_MAGIC(sem->__magic); #endif + might_sleep(); ptr = &(sem->count.counter); increment = 1; diff --git a/include/asm-v850/semaphore.h b/include/asm-v850/semaphore.h index 0d6560f3ba98..c514062bb69e 100644 --- a/include/asm-v850/semaphore.h +++ b/include/asm-v850/semaphore.h @@ -57,6 +57,7 @@ extern void __up (struct semaphore * sem); extern inline void down (struct semaphore * sem) { + might_sleep(); if (atomic_dec_return (&sem->count) < 0) __down (sem); } @@ -64,6 +65,7 @@ extern inline void down (struct semaphore * sem) extern inline int down_interruptible (struct semaphore * sem) { int ret = 0; + might_sleep(); if (atomic_dec_return (&sem->count) < 0) ret = __down_interruptible (sem); return ret; diff --git a/include/asm-x86_64/semaphore.h b/include/asm-x86_64/semaphore.h index 6f42c7af790b..5fe25482facc 100644 --- a/include/asm-x86_64/semaphore.h +++ b/include/asm-x86_64/semaphore.h @@ -118,6 +118,7 @@ static inline void down(struct semaphore * sem) #if WAITQUEUE_DEBUG CHECK_MAGIC(sem->__magic); #endif + might_sleep(); __asm__ __volatile__( "# atomic down operation\n\t" @@ -144,6 +145,7 @@ static inline int down_interruptible(struct semaphore * sem) #if WAITQUEUE_DEBUG CHECK_MAGIC(sem->__magic); #endif + might_sleep(); __asm__ __volatile__( "# atomic interruptible down operation\n\t" diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 99dbf0d24eaf..9c7e6d86e810 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -12,6 +12,8 @@ #include <linux/wait.h> #include <linux/mempool.h> #include <linux/bio.h> +#include <linux/module.h> +#include <linux/stringify.h> #include <asm/scatterlist.h> @@ -671,6 +673,11 @@ void kblockd_flush(void); } \ ) #endif - + +#define MODULE_ALIAS_BLOCKDEV(major,minor) \ + MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) +#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ + MODULE_ALIAS("block-major-" __stringify(major) "-*") + #endif diff --git a/include/linux/device.h b/include/linux/device.h index 8d6266f2e3c3..96043c0f46c5 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -398,4 +398,9 @@ extern void firmware_unregister(struct subsystem *); #define dev_warn(dev, format, arg...) \ dev_printk(KERN_WARNING , dev , format , ## arg) +/* Create alias, so I can be autoloaded. */ +#define MODULE_ALIAS_CHARDEV(major,minor) \ + MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor)) +#define MODULE_ALIAS_CHARDEV_MAJOR(major) \ + MODULE_ALIAS("char-major-" __stringify(major) "-*") #endif /* _DEVICE_H_ */ diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 460ef0362ab5..13e0933ccbc7 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -75,7 +75,6 @@ struct disk_stats { unsigned read_merges, write_merges; unsigned read_ticks, write_ticks; unsigned io_ticks; - int in_flight; unsigned time_in_queue; }; @@ -101,6 +100,7 @@ struct gendisk { unsigned sync_io; /* RAID */ unsigned long stamp, stamp_idle; + int in_flight; #ifdef CONFIG_SMP struct disk_stats *dkstats; #else diff --git a/include/linux/isdn.h b/include/linux/isdn.h index ecd0f5394e9e..35a2ef6e2452 100644 --- a/include/linux/isdn.h +++ b/include/linux/isdn.h @@ -258,13 +258,13 @@ typedef struct { * variables. Of course, we need to check skb_headroom prior to * any access. */ -typedef struct isdn_audio_skb { +typedef struct _isdnaudio_header { unsigned short dle_count; unsigned char lock; -} isdn_audio_skb; +} isdnaudio_header; -#define ISDN_AUDIO_SKB_DLECOUNT(skb) (((isdn_audio_skb*)skb->head)->dle_count) -#define ISDN_AUDIO_SKB_LOCK(skb) (((isdn_audio_skb*)skb->head)->lock) +#define ISDN_AUDIO_SKB_DLECOUNT(skb) (((isdnaudio_header*)skb->head)->dle_count) +#define ISDN_AUDIO_SKB_LOCK(skb) (((isdnaudio_header*)skb->head)->lock) #endif /* Private data of AT-command-interpreter */ @@ -291,6 +291,7 @@ typedef struct atemu { /* Private data (similar to async_struct in <linux/serial.h>) */ typedef struct modem_info { int magic; + struct module *owner; int flags; /* defined in tty.h */ int x_char; /* xon/xoff character */ int mcr; /* Modem control register */ diff --git a/include/linux/isdn/capilli.h b/include/linux/isdn/capilli.h index cf501356aba4..d4747a9cee9c 100644 --- a/include/linux/isdn/capilli.h +++ b/include/linux/isdn/capilli.h @@ -54,7 +54,7 @@ struct capi_ctr { int count, int *eof, struct capi_ctr *card); /* filled in before calling ready callback */ - u8 manu[CAPI_MANUFACTURER_LEN]; /* CAPI_GET_MANUFACTURER */ + u8 manu[CAPI_MANUFACTURER_LEN]; /* CAPI_GET_MANUFACTURER */ capi_version version; /* CAPI_GET_VERSION */ capi_profile profile; /* CAPI_GET_PROFILE */ u8 serial[CAPI_SERIAL_LEN]; /* CAPI_GET_SERIAL */ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index b0aebfeda888..b2d17ea0dac0 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -52,8 +52,10 @@ struct completion; #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP void __might_sleep(char *file, int line); #define might_sleep() __might_sleep(__FILE__, __LINE__) +#define might_sleep_if(cond) do { if (unlikely(cond)) might_sleep(); } while (0) #else #define might_sleep() do {} while(0) +#define might_sleep_if(cond) do {} while (0) #endif extern struct notifier_block *panic_notifier_list; diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h index 159f8ab3df80..23b095889111 100644 --- a/include/linux/miscdevice.h +++ b/include/linux/miscdevice.h @@ -1,5 +1,7 @@ #ifndef _LINUX_MISCDEVICE_H #define _LINUX_MISCDEVICE_H +#include <linux/module.h> +#include <linux/major.h> #define BUSMOUSE_MINOR 0 #define PSMOUSE_MINOR 1 @@ -48,4 +50,7 @@ struct miscdevice extern int misc_register(struct miscdevice * misc); extern int misc_deregister(struct miscdevice * misc); +#define MODULE_ALIAS_MISCDEV(minor) \ + MODULE_ALIAS("char-major-" __stringify(MISC_MAJOR) \ + "-" __stringify(minor)) #endif diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index d4fd6bb3ca43..006fde0295d5 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -191,6 +191,8 @@ extern void get_full_page_state(struct page_state *ret); #define PageSlab(page) test_bit(PG_slab, &(page)->flags) #define SetPageSlab(page) set_bit(PG_slab, &(page)->flags) #define ClearPageSlab(page) clear_bit(PG_slab, &(page)->flags) +#define TestClearPageSlab(page) test_and_clear_bit(PG_slab, &(page)->flags) +#define TestSetPageSlab(page) test_and_set_bit(PG_slab, &(page)->flags) #ifdef CONFIG_HIGHMEM #define PageHighMem(page) test_bit(PG_highmem, &(page)->flags) diff --git a/include/linux/rtc.h b/include/linux/rtc.h index 27c38b240736..e1aaf1fac8e0 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -101,6 +101,7 @@ typedef struct rtc_task { int rtc_register(rtc_task_t *task); int rtc_unregister(rtc_task_t *task); int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg); +void rtc_get_rtc_time(struct rtc_time *rtc_tm); #endif /* __KERNEL__ */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index a372186cb6d6..4b5f057f8bfb 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -389,6 +389,7 @@ static inline int skb_shared(struct sk_buff *skb) */ static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri) { + might_sleep_if(pri & __GFP_WAIT); if (skb_shared(skb)) { struct sk_buff *nskb = skb_clone(skb, pri); kfree_skb(skb); @@ -419,6 +420,7 @@ static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri) */ static inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri) { + might_sleep_if(pri & __GFP_WAIT); if (skb_cloned(skb)) { struct sk_buff *nskb = skb_copy(skb, pri); kfree_skb(skb); /* Free our shared copy */ diff --git a/include/linux/usb.h b/include/linux/usb.h index 471c37f5188b..a20b33d953aa 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -329,7 +329,7 @@ static inline int usb_make_path (struct usb_device *dev, char *buf, size_t size) { int actual; actual = snprintf (buf, size, "usb-%s-%s", dev->bus->bus_name, dev->devpath); - return (actual >= size) ? -1 : actual; + return (actual >= (int)size) ? -1 : actual; } /*-------------------------------------------------------------------------*/ |
