diff options
| author | Richard Henderson <rth@kanga.twiddle.home> | 2004-09-22 08:59:47 -0700 |
|---|---|---|
| committer | Richard Henderson <rth@kanga.twiddle.home> | 2004-09-22 08:59:47 -0700 |
| commit | 59f9f96e64d1af23447505eca8f20cae0b704d6e (patch) | |
| tree | 376ff41a1f5b29add3795ed264203d2efacdb856 /include | |
| parent | 1cd1bcee4088e02d1db850caf7d1d1b4afbf11f8 (diff) | |
| parent | 63dd622b89e8c0b5ca435f937f55f71b3eb2d7b0 (diff) | |
Merge kanga.twiddle.home:/home/rth/work/linux/linus-2.6
into kanga.twiddle.home:/home/rth/work/linux/axp-2.6
Diffstat (limited to 'include')
| -rw-r--r-- | include/asm-alpha/compiler.h | 10 | ||||
| -rw-r--r-- | include/asm-alpha/core_apecs.h | 224 | ||||
| -rw-r--r-- | include/asm-alpha/core_cia.h | 303 | ||||
| -rw-r--r-- | include/asm-alpha/core_irongate.h | 134 | ||||
| -rw-r--r-- | include/asm-alpha/core_lca.h | 223 | ||||
| -rw-r--r-- | include/asm-alpha/core_marvel.h | 243 | ||||
| -rw-r--r-- | include/asm-alpha/core_mcpcia.h | 267 | ||||
| -rw-r--r-- | include/asm-alpha/core_polaris.h | 146 | ||||
| -rw-r--r-- | include/asm-alpha/core_t2.h | 110 | ||||
| -rw-r--r-- | include/asm-alpha/core_titan.h | 146 | ||||
| -rw-r--r-- | include/asm-alpha/core_tsunami.h | 149 | ||||
| -rw-r--r-- | include/asm-alpha/core_wildfire.h | 149 | ||||
| -rw-r--r-- | include/asm-alpha/io.h | 686 | ||||
| -rw-r--r-- | include/asm-alpha/io_trivial.h | 127 | ||||
| -rw-r--r-- | include/asm-alpha/jensen.h | 99 | ||||
| -rw-r--r-- | include/asm-alpha/machvec.h | 42 | ||||
| -rw-r--r-- | include/asm-alpha/mmu_context.h | 1 | ||||
| -rw-r--r-- | include/asm-alpha/spinlock.h | 4 | ||||
| -rw-r--r-- | include/asm-alpha/system.h | 6 | ||||
| -rw-r--r-- | include/asm-alpha/tlbflush.h | 1 | ||||
| -rw-r--r-- | include/asm-alpha/vga.h | 18 |
21 files changed, 1158 insertions, 1930 deletions
diff --git a/include/asm-alpha/compiler.h b/include/asm-alpha/compiler.h index 5be6bfcbfa86..399c33b7be51 100644 --- a/include/asm-alpha/compiler.h +++ b/include/asm-alpha/compiler.h @@ -90,4 +90,14 @@ __asm__("stw %1,%0" : "=m"(mem) : "r"(val)) #endif +/* Some idiots over in <linux/compiler.h> thought inline should imply + always_inline. This breaks stuff. We'll include this file whenever + we run into such problems. */ + +#include <linux/compiler.h> +#undef inline +#undef __inline__ +#undef __inline + + #endif /* __ALPHA_COMPILER_H */ diff --git a/include/asm-alpha/core_apecs.h b/include/asm-alpha/core_apecs.h index b32084e51c98..6785ff7e02bc 100644 --- a/include/asm-alpha/core_apecs.h +++ b/include/asm-alpha/core_apecs.h @@ -370,178 +370,142 @@ struct el_apecs_procdata * data to/from the right byte-lanes. */ -#define vip volatile int * -#define vuip volatile unsigned int * -#define vulp volatile unsigned long * - -__EXTERN_INLINE u8 apecs_inb(unsigned long addr) +#define vip volatile int __force * +#define vuip volatile unsigned int __force * +#define vulp volatile unsigned long __force * + +#define APECS_SET_HAE \ + do { \ + if (addr >= (1UL << 24)) { \ + unsigned long msb = addr & 0xf8000000; \ + addr -= msb; \ + set_hae(msb); \ + } \ + } while (0) + +__EXTERN_INLINE unsigned int apecs_ioread8(void __iomem *xaddr) { - long result = *(vip) ((addr << 5) + APECS_IO + 0x00); + unsigned long addr = (unsigned long) xaddr; + unsigned long result, base_and_type; + + if (addr >= APECS_DENSE_MEM) { + addr -= APECS_DENSE_MEM; + APECS_SET_HAE; + base_and_type = APECS_SPARSE_MEM + 0x00; + } else { + addr -= APECS_IO; + base_and_type = APECS_IO + 0x00; + } + + result = *(vip) ((addr << 5) + base_and_type); return __kernel_extbl(result, addr & 3); } -__EXTERN_INLINE void apecs_outb(u8 b, unsigned long addr) +__EXTERN_INLINE void apecs_iowrite8(u8 b, void __iomem *xaddr) { - unsigned long w; + unsigned long addr = (unsigned long) xaddr; + unsigned long w, base_and_type; + + if (addr >= APECS_DENSE_MEM) { + addr -= APECS_DENSE_MEM; + APECS_SET_HAE; + base_and_type = APECS_SPARSE_MEM + 0x00; + } else { + addr -= APECS_IO; + base_and_type = APECS_IO + 0x00; + } w = __kernel_insbl(b, addr & 3); - *(vuip) ((addr << 5) + APECS_IO + 0x00) = w; - mb(); -} - -__EXTERN_INLINE u16 apecs_inw(unsigned long addr) -{ - long result = *(vip) ((addr << 5) + APECS_IO + 0x08); - return __kernel_extwl(result, addr & 3); + *(vuip) ((addr << 5) + base_and_type) = w; } -__EXTERN_INLINE void apecs_outw(u16 b, unsigned long addr) -{ - unsigned long w; - - w = __kernel_inswl(b, addr & 3); - *(vuip) ((addr << 5) + APECS_IO + 0x08) = w; - mb(); -} - -__EXTERN_INLINE u32 apecs_inl(unsigned long addr) -{ - return *(vuip) ((addr << 5) + APECS_IO + 0x18); -} - -__EXTERN_INLINE void apecs_outl(u32 b, unsigned long addr) -{ - *(vuip) ((addr << 5) + APECS_IO + 0x18) = b; - mb(); -} - - -/* - * Memory functions. 64-bit and 32-bit accesses are done through - * dense memory space, everything else through sparse space. - */ - -__EXTERN_INLINE u8 apecs_readb(unsigned long addr) +__EXTERN_INLINE unsigned int apecs_ioread16(void __iomem *xaddr) { - unsigned long result, msb; - - addr -= APECS_DENSE_MEM; - if (addr >= (1UL << 24)) { - msb = addr & 0xf8000000; - addr -= msb; - set_hae(msb); + unsigned long addr = (unsigned long) xaddr; + unsigned long result, base_and_type; + + if (addr >= APECS_DENSE_MEM) { + addr -= APECS_DENSE_MEM; + APECS_SET_HAE; + base_and_type = APECS_SPARSE_MEM + 0x08; + } else { + addr -= APECS_IO; + base_and_type = APECS_IO + 0x08; } - result = *(vip) ((addr << 5) + APECS_SPARSE_MEM + 0x00); - return __kernel_extbl(result, addr & 3); -} -__EXTERN_INLINE u16 apecs_readw(unsigned long addr) -{ - unsigned long result, msb; - - addr -= APECS_DENSE_MEM; - if (addr >= (1UL << 24)) { - msb = addr & 0xf8000000; - addr -= msb; - set_hae(msb); - } - result = *(vip) ((addr << 5) + APECS_SPARSE_MEM + 0x08); + result = *(vip) ((addr << 5) + base_and_type); return __kernel_extwl(result, addr & 3); } -__EXTERN_INLINE u32 apecs_readl(unsigned long addr) -{ - return (*(vuip)addr) & 0xffffffff; -} - -__EXTERN_INLINE u64 apecs_readq(unsigned long addr) -{ - return *(vulp)addr; -} - -__EXTERN_INLINE void apecs_writeb(u8 b, unsigned long addr) +__EXTERN_INLINE void apecs_iowrite16(u16 b, void __iomem *xaddr) { - unsigned long msb; - - addr -= APECS_DENSE_MEM; - if (addr >= (1UL << 24)) { - msb = addr & 0xf8000000; - addr -= msb; - set_hae(msb); + unsigned long addr = (unsigned long) xaddr; + unsigned long w, base_and_type; + + if (addr >= APECS_DENSE_MEM) { + addr -= APECS_DENSE_MEM; + APECS_SET_HAE; + base_and_type = APECS_SPARSE_MEM + 0x08; + } else { + addr -= APECS_IO; + base_and_type = APECS_IO + 0x08; } - *(vuip) ((addr << 5) + APECS_SPARSE_MEM + 0x00) = b * 0x01010101; + + w = __kernel_inswl(b, addr & 3); + *(vuip) ((addr << 5) + base_and_type) = w; } -__EXTERN_INLINE void apecs_writew(u16 b, unsigned long addr) +__EXTERN_INLINE unsigned int apecs_ioread32(void __iomem *xaddr) { - unsigned long msb; - - addr -= APECS_DENSE_MEM; - if (addr >= (1UL << 24)) { - msb = addr & 0xf8000000; - addr -= msb; - set_hae(msb); - } - *(vuip) ((addr << 5) + APECS_SPARSE_MEM + 0x08) = b * 0x00010001; + unsigned long addr = (unsigned long) xaddr; + if (addr < APECS_DENSE_MEM) + addr = ((addr - APECS_IO) << 5) + APECS_IO + 0x18; + return *(vuip)addr; } -__EXTERN_INLINE void apecs_writel(u32 b, unsigned long addr) +__EXTERN_INLINE void apecs_iowrite32(u32 b, void __iomem *xaddr) { + unsigned long addr = (unsigned long) xaddr; + if (addr < APECS_DENSE_MEM) + addr = ((addr - APECS_IO) << 5) + APECS_IO + 0x18; *(vuip)addr = b; } -__EXTERN_INLINE void apecs_writeq(u64 b, unsigned long addr) +__EXTERN_INLINE void __iomem *apecs_ioportmap(unsigned long addr) { - *(vulp)addr = b; + return (void __iomem *)(addr + APECS_IO); } -__EXTERN_INLINE unsigned long apecs_ioremap(unsigned long addr, - unsigned long size - __attribute__((unused))) +__EXTERN_INLINE void __iomem *apecs_ioremap(unsigned long addr, + unsigned long size) { - return addr + APECS_DENSE_MEM; + return (void __iomem *)(addr + APECS_DENSE_MEM); } -__EXTERN_INLINE void apecs_iounmap(unsigned long addr) +__EXTERN_INLINE int apecs_is_ioaddr(unsigned long addr) { - return; + return addr >= IDENT_ADDR + 0x180000000UL; } -__EXTERN_INLINE int apecs_is_ioaddr(unsigned long addr) +__EXTERN_INLINE int apecs_is_mmio(const volatile void __iomem *addr) { - return addr >= IDENT_ADDR + 0x180000000UL; + return (unsigned long)addr >= APECS_DENSE_MEM; } +#undef APECS_SET_HAE + #undef vip #undef vuip #undef vulp -#ifdef __WANT_IO_DEF - -#define __inb(p) apecs_inb((unsigned long)(p)) -#define __inw(p) apecs_inw((unsigned long)(p)) -#define __inl(p) apecs_inl((unsigned long)(p)) -#define __outb(x,p) apecs_outb((x),(unsigned long)(p)) -#define __outw(x,p) apecs_outw((x),(unsigned long)(p)) -#define __outl(x,p) apecs_outl((x),(unsigned long)(p)) -#define __readb(a) apecs_readb((unsigned long)(a)) -#define __readw(a) apecs_readw((unsigned long)(a)) -#define __readl(a) apecs_readl((unsigned long)(a)) -#define __readq(a) apecs_readq((unsigned long)(a)) -#define __writeb(x,a) apecs_writeb((x),(unsigned long)(a)) -#define __writew(x,a) apecs_writew((x),(unsigned long)(a)) -#define __writel(x,a) apecs_writel((x),(unsigned long)(a)) -#define __writeq(x,a) apecs_writeq((x),(unsigned long)(a)) -#define __ioremap(a,s) apecs_ioremap((unsigned long)(a),(s)) -#define __iounmap(a) apecs_iounmap((unsigned long)(a)) -#define __is_ioaddr(a) apecs_is_ioaddr((unsigned long)(a)) - -#define __raw_readl(a) __readl(a) -#define __raw_readq(a) __readq(a) -#define __raw_writel(v,a) __writel((v),(a)) -#define __raw_writeq(v,a) __writeq((v),(a)) - -#endif /* __WANT_IO_DEF */ +#undef __IO_PREFIX +#define __IO_PREFIX apecs +#define apecs_trivial_io_bw 0 +#define apecs_trivial_io_lq 0 +#define apecs_trivial_rw_bw 2 +#define apecs_trivial_rw_lq 1 +#define apecs_trivial_iounmap 1 +#include <asm/io_trivial.h> #ifdef __IO_EXTERN_INLINE #undef __EXTERN_INLINE diff --git a/include/asm-alpha/core_cia.h b/include/asm-alpha/core_cia.h index e5eca2dbfdf0..549550feeee4 100644 --- a/include/asm-alpha/core_cia.h +++ b/include/asm-alpha/core_cia.h @@ -306,90 +306,6 @@ struct el_CIA_sysdata_mcheck { * get at PCI memory and I/O. */ -#define vucp volatile unsigned char * -#define vusp volatile unsigned short * -#define vip volatile int * -#define vuip volatile unsigned int * -#define vulp volatile unsigned long * - -__EXTERN_INLINE u8 cia_inb(unsigned long addr) -{ - long result; - result = *(vip) ((addr << 5) + CIA_IO + 0x00); - return __kernel_extbl(result, addr & 3); -} - -__EXTERN_INLINE void cia_outb(u8 b, unsigned long addr) -{ - unsigned long w = __kernel_insbl(b, addr & 3); - *(vuip) ((addr << 5) + CIA_IO + 0x00) = w; - mb(); -} - -__EXTERN_INLINE u16 cia_inw(unsigned long addr) -{ - long result; - result = *(vip) ((addr << 5) + CIA_IO + 0x08); - return __kernel_extwl(result, addr & 3); -} - -__EXTERN_INLINE void cia_outw(u16 b, unsigned long addr) -{ - unsigned long w = __kernel_inswl(b, addr & 3); - *(vuip) ((addr << 5) + CIA_IO + 0x08) = w; - mb(); -} - -__EXTERN_INLINE u32 cia_inl(unsigned long addr) -{ - return *(vuip) ((addr << 5) + CIA_IO + 0x18); -} - -__EXTERN_INLINE void cia_outl(u32 b, unsigned long addr) -{ - *(vuip) ((addr << 5) + CIA_IO + 0x18) = b; - mb(); -} - -__EXTERN_INLINE u8 cia_bwx_inb(unsigned long addr) -{ - /* ??? I wish I could get rid of this. But there's no ioremap - equivalent for I/O space. PCI I/O can be forced into the - CIA BWX I/O region, but that doesn't take care of legacy - ISA crap. */ - - return __kernel_ldbu(*(vucp)(addr+CIA_BW_IO)); -} - -__EXTERN_INLINE void cia_bwx_outb(u8 b, unsigned long addr) -{ - __kernel_stb(b, *(vucp)(addr+CIA_BW_IO)); - mb(); -} - -__EXTERN_INLINE u16 cia_bwx_inw(unsigned long addr) -{ - return __kernel_ldwu(*(vusp)(addr+CIA_BW_IO)); -} - -__EXTERN_INLINE void cia_bwx_outw(u16 b, unsigned long addr) -{ - __kernel_stw(b, *(vusp)(addr+CIA_BW_IO)); - mb(); -} - -__EXTERN_INLINE u32 cia_bwx_inl(unsigned long addr) -{ - return *(vuip)(addr+CIA_BW_IO); -} - -__EXTERN_INLINE void cia_bwx_outl(u32 b, unsigned long addr) -{ - *(vuip)(addr+CIA_BW_IO) = b; - mb(); -} - - /* * Memory functions. 64-bit and 32-bit accesses are done through * dense memory space, everything else through sparse space. @@ -422,195 +338,158 @@ __EXTERN_INLINE void cia_bwx_outl(u32 b, unsigned long addr) * */ -__EXTERN_INLINE u8 cia_readb(unsigned long addr) +#define vip volatile int __force * +#define vuip volatile unsigned int __force * +#define vulp volatile unsigned long __force * + +__EXTERN_INLINE unsigned int cia_ioread8(void __iomem *xaddr) { - unsigned long result; + unsigned long addr = (unsigned long) xaddr; + unsigned long result, base_and_type; + /* We can use CIA_MEM_R1_MASK for io ports too, since it is large + enough to cover all io ports, and smaller than CIA_IO. */ addr &= CIA_MEM_R1_MASK; - result = *(vip) ((addr << 5) + CIA_SPARSE_MEM + 0x00); + if (addr >= CIA_DENSE_MEM) + base_and_type = CIA_SPARSE_MEM + 0x00; + else + base_and_type = CIA_IO + 0x00; + + result = *(vip) ((addr << 5) + base_and_type); return __kernel_extbl(result, addr & 3); } -__EXTERN_INLINE u16 cia_readw(unsigned long addr) +__EXTERN_INLINE void cia_iowrite8(u8 b, void __iomem *xaddr) { - unsigned long result; + unsigned long addr = (unsigned long) xaddr; + unsigned long w, base_and_type; addr &= CIA_MEM_R1_MASK; - result = *(vip) ((addr << 5) + CIA_SPARSE_MEM + 0x08); - return __kernel_extwl(result, addr & 3); -} + if (addr >= CIA_DENSE_MEM) + base_and_type = CIA_SPARSE_MEM + 0x00; + else + base_and_type = CIA_IO + 0x00; -__EXTERN_INLINE void cia_writeb(u8 b, unsigned long addr) -{ - unsigned long w; - - addr &= CIA_MEM_R1_MASK; w = __kernel_insbl(b, addr & 3); - *(vuip) ((addr << 5) + CIA_SPARSE_MEM + 0x00) = w; + *(vuip) ((addr << 5) + base_and_type) = w; } -__EXTERN_INLINE void cia_writew(u16 b, unsigned long addr) +__EXTERN_INLINE unsigned int cia_ioread16(void __iomem *xaddr) { - unsigned long w; + unsigned long addr = (unsigned long) xaddr; + unsigned long result, base_and_type; addr &= CIA_MEM_R1_MASK; - w = __kernel_inswl(b, addr & 3); - *(vuip) ((addr << 5) + CIA_SPARSE_MEM + 0x08) = w; -} - -__EXTERN_INLINE u32 cia_readl(unsigned long addr) -{ - return *(vuip)addr; -} - -__EXTERN_INLINE u64 cia_readq(unsigned long addr) -{ - return *(vulp)addr; -} - -__EXTERN_INLINE void cia_writel(u32 b, unsigned long addr) -{ - *(vuip)addr = b; -} + if (addr >= CIA_DENSE_MEM) + base_and_type = CIA_SPARSE_MEM + 0x08; + else + base_and_type = CIA_IO + 0x08; -__EXTERN_INLINE void cia_writeq(u64 b, unsigned long addr) -{ - *(vulp)addr = b; + result = *(vip) ((addr << 5) + base_and_type); + return __kernel_extwl(result, addr & 3); } -__EXTERN_INLINE unsigned long cia_ioremap(unsigned long addr, - unsigned long size - __attribute__((unused))) +__EXTERN_INLINE void cia_iowrite16(u16 b, void __iomem *xaddr) { - return addr + CIA_DENSE_MEM; -} + unsigned long addr = (unsigned long) xaddr; + unsigned long w, base_and_type; -__EXTERN_INLINE void cia_iounmap(unsigned long addr) -{ - return; -} + addr &= CIA_MEM_R1_MASK; + if (addr >= CIA_DENSE_MEM) + base_and_type = CIA_SPARSE_MEM + 0x08; + else + base_and_type = CIA_IO + 0x08; -__EXTERN_INLINE u8 cia_bwx_readb(unsigned long addr) -{ - return __kernel_ldbu(*(vucp)addr); + w = __kernel_inswl(b, addr & 3); + *(vuip) ((addr << 5) + base_and_type) = w; } -__EXTERN_INLINE u16 cia_bwx_readw(unsigned long addr) +__EXTERN_INLINE unsigned int cia_ioread32(void __iomem *xaddr) { - return __kernel_ldwu(*(vusp)addr); + unsigned long addr = (unsigned long) xaddr; + if (addr < CIA_DENSE_MEM) + addr = ((addr - CIA_IO) << 5) + CIA_IO + 0x18; + return *(vuip)addr; } -__EXTERN_INLINE u32 cia_bwx_readl(unsigned long addr) +__EXTERN_INLINE void cia_iowrite32(u32 b, void __iomem *xaddr) { - return *(vuip)addr; + unsigned long addr = (unsigned long) xaddr; + if (addr < CIA_DENSE_MEM) + addr = ((addr - CIA_IO) << 5) + CIA_IO + 0x18; + *(vuip)addr = b; } -__EXTERN_INLINE u64 cia_bwx_readq(unsigned long addr) +__EXTERN_INLINE void __iomem *cia_ioportmap(unsigned long addr) { - return *(vulp)addr; + return (void __iomem *)(addr + CIA_IO); } -__EXTERN_INLINE void cia_bwx_writeb(u8 b, unsigned long addr) +__EXTERN_INLINE void __iomem *cia_ioremap(unsigned long addr, + unsigned long size) { - __kernel_stb(b, *(vucp)addr); + return (void __iomem *)(addr + CIA_DENSE_MEM); } -__EXTERN_INLINE void cia_bwx_writew(u16 b, unsigned long addr) +__EXTERN_INLINE int cia_is_ioaddr(unsigned long addr) { - __kernel_stw(b, *(vusp)addr); + return addr >= IDENT_ADDR + 0x8000000000UL; } -__EXTERN_INLINE void cia_bwx_writel(u32 b, unsigned long addr) +__EXTERN_INLINE int cia_is_mmio(const volatile void __iomem *addr) { - *(vuip)addr = b; + return (unsigned long)addr >= CIA_DENSE_MEM; } -__EXTERN_INLINE void cia_bwx_writeq(u64 b, unsigned long addr) +__EXTERN_INLINE void __iomem *cia_bwx_ioportmap(unsigned long addr) { - *(vulp)addr = b; + return (void __iomem *)(addr + CIA_BW_IO); } -__EXTERN_INLINE unsigned long cia_bwx_ioremap(unsigned long addr, +__EXTERN_INLINE void __iomem *cia_bwx_ioremap(unsigned long addr, unsigned long size) { - return addr + CIA_BW_MEM; + return (void __iomem *)(addr + CIA_BW_MEM); } -__EXTERN_INLINE void cia_bwx_iounmap(unsigned long addr) +__EXTERN_INLINE int cia_bwx_is_ioaddr(unsigned long addr) { - return; + return addr >= IDENT_ADDR + 0x8000000000UL; } -__EXTERN_INLINE int cia_is_ioaddr(unsigned long addr) +__EXTERN_INLINE int cia_bwx_is_mmio(const volatile void __iomem *addr) { - return addr >= IDENT_ADDR + 0x8000000000UL; + return (unsigned long)addr < CIA_BW_IO; } -#undef vucp -#undef vusp #undef vip #undef vuip #undef vulp -#ifdef __WANT_IO_DEF - +#undef __IO_PREFIX +#define __IO_PREFIX cia +#define cia_trivial_rw_bw 2 +#define cia_trivial_rw_lq 1 +#define cia_trivial_io_bw 0 +#define cia_trivial_io_lq 0 +#define cia_trivial_iounmap 1 +#include <asm/io_trivial.h> + +#undef __IO_PREFIX +#define __IO_PREFIX cia_bwx +#define cia_bwx_trivial_rw_bw 1 +#define cia_bwx_trivial_rw_lq 1 +#define cia_bwx_trivial_io_bw 1 +#define cia_bwx_trivial_io_lq 1 +#define cia_bwx_trivial_iounmap 1 +#include <asm/io_trivial.h> + +#undef __IO_PREFIX #ifdef CONFIG_ALPHA_PYXIS -# define __inb(p) cia_bwx_inb((unsigned long)(p)) -# define __inw(p) cia_bwx_inw((unsigned long)(p)) -# define __inl(p) cia_bwx_inl((unsigned long)(p)) -# define __outb(x,p) cia_bwx_outb((x),(unsigned long)(p)) -# define __outw(x,p) cia_bwx_outw((x),(unsigned long)(p)) -# define __outl(x,p) cia_bwx_outl((x),(unsigned long)(p)) -# define __readb(a) cia_bwx_readb((unsigned long)(a)) -# define __readw(a) cia_bwx_readw((unsigned long)(a)) -# define __readl(a) cia_bwx_readl((unsigned long)(a)) -# define __readq(a) cia_bwx_readq((unsigned long)(a)) -# define __writeb(x,a) cia_bwx_writeb((x),(unsigned long)(a)) -# define __writew(x,a) cia_bwx_writew((x),(unsigned long)(a)) -# define __writel(x,a) cia_bwx_writel((x),(unsigned long)(a)) -# define __writeq(x,a) cia_bwx_writeq((x),(unsigned long)(a)) -# define __ioremap(a,s) cia_bwx_ioremap((unsigned long)(a),(s)) -# define __iounmap(a) cia_bwx_iounmap((unsigned long)(a)) -# define inb(p) __inb(p) -# define inw(p) __inw(p) -# define inl(p) __inl(p) -# define outb(x,p) __outb((x),(p)) -# define outw(x,p) __outw((x),(p)) -# define outl(x,p) __outl((x),(p)) -# define __raw_readb(a) __readb(a) -# define __raw_readw(a) __readw(a) -# define __raw_readl(a) __readl(a) -# define __raw_readq(a) __readq(a) -# define __raw_writeb(x,a) __writeb((x),(a)) -# define __raw_writew(x,a) __writew((x),(a)) -# define __raw_writel(x,a) __writel((x),(a)) -# define __raw_writeq(x,a) __writeq((x),(a)) +#define __IO_PREFIX cia_bwx #else -# define __inb(p) cia_inb((unsigned long)(p)) -# define __inw(p) cia_inw((unsigned long)(p)) -# define __inl(p) cia_inl((unsigned long)(p)) -# define __outb(x,p) cia_outb((x),(unsigned long)(p)) -# define __outw(x,p) cia_outw((x),(unsigned long)(p)) -# define __outl(x,p) cia_outl((x),(unsigned long)(p)) -# define __readb(a) cia_readb((unsigned long)(a)) -# define __readw(a) cia_readw((unsigned long)(a)) -# define __readl(a) cia_readl((unsigned long)(a)) -# define __readq(a) cia_readq((unsigned long)(a)) -# define __writeb(x,a) cia_writeb((x),(unsigned long)(a)) -# define __writew(x,a) cia_writew((x),(unsigned long)(a)) -# define __writel(x,a) cia_writel((x),(unsigned long)(a)) -# define __writeq(x,a) cia_writeq((x),(unsigned long)(a)) -# define __ioremap(a,s) cia_ioremap((unsigned long)(a),(s)) -# define __iounmap(a) cia_iounmap((unsigned long)(a)) -# define __raw_readl(a) __readl(a) -# define __raw_readq(a) __readq(a) -# define __raw_writel(v,a) __writel((v),(a)) -# define __raw_writeq(v,a) __writeq((v),(a)) -#endif /* PYXIS */ - -#define __is_ioaddr(a) cia_is_ioaddr((unsigned long)(a)) - -#endif /* __WANT_IO_DEF */ +#define __IO_PREFIX cia +#endif #ifdef __IO_EXTERN_INLINE #undef __EXTERN_INLINE diff --git a/include/asm-alpha/core_irongate.h b/include/asm-alpha/core_irongate.h index 9b9a49feb51b..24b2db541501 100644 --- a/include/asm-alpha/core_irongate.h +++ b/include/asm-alpha/core_irongate.h @@ -190,137 +190,37 @@ struct el_IRONGATE_sysdata_mcheck { * K7 can only use linear accesses to get at PCI memory and I/O spaces. */ -#define vucp volatile unsigned char * -#define vusp volatile unsigned short * -#define vuip volatile unsigned int * -#define vulp volatile unsigned long * - -__EXTERN_INLINE u8 irongate_inb(unsigned long addr) -{ - return __kernel_ldbu(*(vucp)(addr + IRONGATE_IO)); -} - -__EXTERN_INLINE void irongate_outb(u8 b, unsigned long addr) -{ - __kernel_stb(b, *(vucp)(addr + IRONGATE_IO)); - mb(); -} - -__EXTERN_INLINE u16 irongate_inw(unsigned long addr) -{ - return __kernel_ldwu(*(vusp)(addr + IRONGATE_IO)); -} - -__EXTERN_INLINE void irongate_outw(u16 b, unsigned long addr) -{ - __kernel_stw(b, *(vusp)(addr + IRONGATE_IO)); - mb(); -} - -__EXTERN_INLINE u32 irongate_inl(unsigned long addr) -{ - return *(vuip)(addr + IRONGATE_IO); -} - -__EXTERN_INLINE void irongate_outl(u32 b, unsigned long addr) -{ - *(vuip)(addr + IRONGATE_IO) = b; - mb(); -} - /* * Memory functions. All accesses are done through linear space. */ -__EXTERN_INLINE u8 irongate_readb(unsigned long addr) +__EXTERN_INLINE void __iomem *irongate_ioportmap(unsigned long addr) { - return __kernel_ldbu(*(vucp)addr); + return (void __iomem *)(addr + IRONGATE_IO); } -__EXTERN_INLINE u16 irongate_readw(unsigned long addr) -{ - return __kernel_ldwu(*(vusp)addr); -} +extern void __iomem *irongate_ioremap(unsigned long addr, unsigned long size); +extern void irongate_iounmap(volatile void __iomem *addr); -__EXTERN_INLINE u32 irongate_readl(unsigned long addr) -{ - return (*(vuip)addr) & 0xffffffff; -} - -__EXTERN_INLINE u64 irongate_readq(unsigned long addr) -{ - return *(vulp)addr; -} - -__EXTERN_INLINE void irongate_writeb(u8 b, unsigned long addr) -{ - __kernel_stb(b, *(vucp)addr); -} - -__EXTERN_INLINE void irongate_writew(u16 b, unsigned long addr) -{ - __kernel_stw(b, *(vusp)addr); -} - -__EXTERN_INLINE void irongate_writel(u32 b, unsigned long addr) -{ - *(vuip)addr = b; -} - -__EXTERN_INLINE void irongate_writeq(u64 b, unsigned long addr) +__EXTERN_INLINE int irongate_is_ioaddr(unsigned long addr) { - *(vulp)addr = b; + return addr >= IRONGATE_MEM; } -extern unsigned long irongate_ioremap(unsigned long addr, unsigned long size); -extern void irongate_iounmap(unsigned long addr); - -__EXTERN_INLINE int irongate_is_ioaddr(unsigned long addr) +__EXTERN_INLINE int irongate_is_mmio(const volatile void __iomem *xaddr) { - return addr >= IRONGATE_MEM; + unsigned long addr = (unsigned long)xaddr; + return addr < IRONGATE_IO || addr >= IRONGATE_CONF; } -#undef vucp -#undef vusp -#undef vuip -#undef vulp - -#ifdef __WANT_IO_DEF - -#define __inb(p) irongate_inb((unsigned long)(p)) -#define __inw(p) irongate_inw((unsigned long)(p)) -#define __inl(p) irongate_inl((unsigned long)(p)) -#define __outb(x,p) irongate_outb((x),(unsigned long)(p)) -#define __outw(x,p) irongate_outw((x),(unsigned long)(p)) -#define __outl(x,p) irongate_outl((x),(unsigned long)(p)) -#define __readb(a) irongate_readb((unsigned long)(a)) -#define __readw(a) irongate_readw((unsigned long)(a)) -#define __readl(a) irongate_readl((unsigned long)(a)) -#define __readq(a) irongate_readq((unsigned long)(a)) -#define __writeb(x,a) irongate_writeb((x),(unsigned long)(a)) -#define __writew(x,a) irongate_writew((x),(unsigned long)(a)) -#define __writel(x,a) irongate_writel((x),(unsigned long)(a)) -#define __writeq(x,a) irongate_writeq((x),(unsigned long)(a)) -#define __ioremap(a,s) irongate_ioremap((unsigned long)(a),(s)) -#define __iounmap(a) irongate_iounmap((unsigned long)(a)) -#define __is_ioaddr(a) irongate_is_ioaddr((unsigned long)(a)) - -#define inb(p) __inb(p) -#define inw(p) __inw(p) -#define inl(p) __inl(p) -#define outb(x,p) __outb((x),(p)) -#define outw(x,p) __outw((x),(p)) -#define outl(x,p) __outl((x),(p)) -#define __raw_readb(a) __readb(a) -#define __raw_readw(a) __readw(a) -#define __raw_readl(a) __readl(a) -#define __raw_readq(a) __readq(a) -#define __raw_writeb(v,a) __writeb((v),(a)) -#define __raw_writew(v,a) __writew((v),(a)) -#define __raw_writel(v,a) __writel((v),(a)) -#define __raw_writeq(v,a) __writeq((v),(a)) - -#endif /* __WANT_IO_DEF */ +#undef __IO_PREFIX +#define __IO_PREFIX irongate +#define irongate_trivial_rw_bw 1 +#define irongate_trivial_rw_lq 1 +#define irongate_trivial_io_bw 1 +#define irongate_trivial_io_lq 1 +#define irongate_trivial_iounmap 0 +#include <asm/io_trivial.h> #ifdef __IO_EXTERN_INLINE #undef __EXTERN_INLINE diff --git a/include/asm-alpha/core_lca.h b/include/asm-alpha/core_lca.h index a2a55b54a72c..f7cb4b460954 100644 --- a/include/asm-alpha/core_lca.h +++ b/include/asm-alpha/core_lca.h @@ -215,145 +215,117 @@ union el_lca { * data to/from the right byte-lanes. */ -#define vip volatile int * -#define vuip volatile unsigned int * -#define vulp volatile unsigned long * +#define vip volatile int __force * +#define vuip volatile unsigned int __force * +#define vulp volatile unsigned long __force * -__EXTERN_INLINE u8 lca_inb(unsigned long addr) -{ - long result = *(vip) ((addr << 5) + LCA_IO + 0x00); - return __kernel_extbl(result, addr & 3); -} +#define LCA_SET_HAE \ + do { \ + if (addr >= (1UL << 24)) { \ + unsigned long msb = addr & 0xf8000000; \ + addr -= msb; \ + set_hae(msb); \ + } \ + } while (0) -__EXTERN_INLINE void lca_outb(u8 b, unsigned long addr) -{ - unsigned long w; - - w = __kernel_insbl(b, addr & 3); - *(vuip) ((addr << 5) + LCA_IO + 0x00) = w; - mb(); -} - -__EXTERN_INLINE u16 lca_inw(unsigned long addr) -{ - long result = *(vip) ((addr << 5) + LCA_IO + 0x08); - return __kernel_extwl(result, addr & 3); -} -__EXTERN_INLINE void lca_outw(u16 b, unsigned long addr) +__EXTERN_INLINE unsigned int lca_ioread8(void __iomem *xaddr) { - unsigned long w; + unsigned long addr = (unsigned long) xaddr; + unsigned long result, base_and_type; + + if (addr >= LCA_DENSE_MEM) { + addr -= LCA_DENSE_MEM; + LCA_SET_HAE; + base_and_type = LCA_SPARSE_MEM + 0x00; + } else { + addr -= LCA_IO; + base_and_type = LCA_IO + 0x00; + } - w = __kernel_inswl(b, addr & 3); - *(vuip) ((addr << 5) + LCA_IO + 0x08) = w; - mb(); + result = *(vip) ((addr << 5) + base_and_type); + return __kernel_extbl(result, addr & 3); } -__EXTERN_INLINE u32 lca_inl(unsigned long addr) +__EXTERN_INLINE void lca_iowrite8(u8 b, void __iomem *xaddr) { - return *(vuip) ((addr << 5) + LCA_IO + 0x18); -} + unsigned long addr = (unsigned long) xaddr; + unsigned long w, base_and_type; + + if (addr >= LCA_DENSE_MEM) { + addr -= LCA_DENSE_MEM; + LCA_SET_HAE; + base_and_type = LCA_SPARSE_MEM + 0x00; + } else { + addr -= LCA_IO; + base_and_type = LCA_IO + 0x00; + } -__EXTERN_INLINE void lca_outl(u32 b, unsigned long addr) -{ - *(vuip) ((addr << 5) + LCA_IO + 0x18) = b; - mb(); + w = __kernel_insbl(b, addr & 3); + *(vuip) ((addr << 5) + base_and_type) = w; } - -/* - * Memory functions. 64-bit and 32-bit accesses are done through - * dense memory space, everything else through sparse space. - */ - -__EXTERN_INLINE u8 lca_readb(unsigned long addr) +__EXTERN_INLINE unsigned int lca_ioread16(void __iomem *xaddr) { - unsigned long result, msb; - - addr -= LCA_DENSE_MEM; - if (addr >= (1UL << 24)) { - msb = addr & 0xf8000000; - addr -= msb; - set_hae(msb); + unsigned long addr = (unsigned long) xaddr; + unsigned long result, base_and_type; + + if (addr >= LCA_DENSE_MEM) { + addr -= LCA_DENSE_MEM; + LCA_SET_HAE; + base_and_type = LCA_SPARSE_MEM + 0x08; + } else { + addr -= LCA_IO; + base_and_type = LCA_IO + 0x08; } - result = *(vip) ((addr << 5) + LCA_SPARSE_MEM + 0x00); - return __kernel_extbl(result, addr & 3); -} -__EXTERN_INLINE u16 lca_readw(unsigned long addr) -{ - unsigned long result, msb; - - addr -= LCA_DENSE_MEM; - if (addr >= (1UL << 24)) { - msb = addr & 0xf8000000; - addr -= msb; - set_hae(msb); - } - result = *(vip) ((addr << 5) + LCA_SPARSE_MEM + 0x08); + result = *(vip) ((addr << 5) + base_and_type); return __kernel_extwl(result, addr & 3); } -__EXTERN_INLINE u32 lca_readl(unsigned long addr) +__EXTERN_INLINE void lca_iowrite16(u16 b, void __iomem *xaddr) { - return (*(vuip)addr) & 0xffffffff; -} - -__EXTERN_INLINE u64 lca_readq(unsigned long addr) -{ - return *(vulp)addr; -} - -__EXTERN_INLINE void lca_writeb(u8 b, unsigned long addr) -{ - unsigned long msb; - unsigned long w; - - addr -= LCA_DENSE_MEM; - if (addr >= (1UL << 24)) { - msb = addr & 0xf8000000; - addr -= msb; - set_hae(msb); + unsigned long addr = (unsigned long) xaddr; + unsigned long w, base_and_type; + + if (addr >= LCA_DENSE_MEM) { + addr -= LCA_DENSE_MEM; + LCA_SET_HAE; + base_and_type = LCA_SPARSE_MEM + 0x08; + } else { + addr -= LCA_IO; + base_and_type = LCA_IO + 0x08; } - w = __kernel_insbl(b, addr & 3); - *(vuip) ((addr << 5) + LCA_SPARSE_MEM + 0x00) = w; -} -__EXTERN_INLINE void lca_writew(u16 b, unsigned long addr) -{ - unsigned long msb; - unsigned long w; - - addr -= LCA_DENSE_MEM; - if (addr >= (1UL << 24)) { - msb = addr & 0xf8000000; - addr -= msb; - set_hae(msb); - } w = __kernel_inswl(b, addr & 3); - *(vuip) ((addr << 5) + LCA_SPARSE_MEM + 0x08) = w; + *(vuip) ((addr << 5) + base_and_type) = w; } -__EXTERN_INLINE void lca_writel(u32 b, unsigned long addr) +__EXTERN_INLINE unsigned int lca_ioread32(void __iomem *xaddr) { - *(vuip)addr = b; + unsigned long addr = (unsigned long) xaddr; + if (addr < LCA_DENSE_MEM) + addr = ((addr - LCA_IO) << 5) + LCA_IO + 0x18; + return *(vuip)addr; } -__EXTERN_INLINE void lca_writeq(u64 b, unsigned long addr) +__EXTERN_INLINE void lca_iowrite32(u32 b, void __iomem *xaddr) { - *(vulp)addr = b; + unsigned long addr = (unsigned long) xaddr; + if (addr < LCA_DENSE_MEM) + addr = ((addr - LCA_IO) << 5) + LCA_IO + 0x18; + *(vuip)addr = b; } -__EXTERN_INLINE unsigned long lca_ioremap(unsigned long addr, - unsigned long size - __attribute__((unused))) +__EXTERN_INLINE void __iomem *lca_ioportmap(unsigned long addr) { - return addr + LCA_DENSE_MEM; + return (void __iomem *)(addr + LCA_IO); } -__EXTERN_INLINE void lca_iounmap(unsigned long addr) +__EXTERN_INLINE void __iomem *lca_ioremap(unsigned long addr, + unsigned long size) { - return; + return (void __iomem *)(addr + LCA_DENSE_MEM); } __EXTERN_INLINE int lca_is_ioaddr(unsigned long addr) @@ -361,36 +333,23 @@ __EXTERN_INLINE int lca_is_ioaddr(unsigned long addr) return addr >= IDENT_ADDR + 0x120000000UL; } +__EXTERN_INLINE int lca_is_mmio(const volatile void __iomem *addr) +{ + return (unsigned long)addr >= LCA_DENSE_MEM; +} + #undef vip #undef vuip #undef vulp -#ifdef __WANT_IO_DEF - -#define __inb(p) lca_inb((unsigned long)(p)) -#define __inw(p) lca_inw((unsigned long)(p)) -#define __inl(p) lca_inl((unsigned long)(p)) -#define __outb(x,p) lca_outb((x),(unsigned long)(p)) -#define __outw(x,p) lca_outw((x),(unsigned long)(p)) -#define __outl(x,p) lca_outl((x),(unsigned long)(p)) -#define __readb(a) lca_readb((unsigned long)(a)) -#define __readw(a) lca_readw((unsigned long)(a)) -#define __readl(a) lca_readl((unsigned long)(a)) -#define __readq(a) lca_readq((unsigned long)(a)) -#define __writeb(x,a) lca_writeb((x),(unsigned long)(a)) -#define __writew(x,a) lca_writew((x),(unsigned long)(a)) -#define __writel(x,a) lca_writel((x),(unsigned long)(a)) -#define __writeq(x,a) lca_writeq((x),(unsigned long)(a)) -#define __ioremap(a,s) lca_ioremap((unsigned long)(a),(s)) -#define __iounmap(a) lca_iounmap((unsigned long)(a)) -#define __is_ioaddr(a) lca_is_ioaddr((unsigned long)(a)) - -#define __raw_readl(a) __readl(a) -#define __raw_readq(a) __readq(a) -#define __raw_writel(v,a) __writel((v),(a)) -#define __raw_writeq(v,a) __writeq((v),(a)) - -#endif /* __WANT_IO_DEF */ +#undef __IO_PREFIX +#define __IO_PREFIX lca +#define lca_trivial_rw_bw 2 +#define lca_trivial_rw_lq 1 +#define lca_trivial_io_bw 0 +#define lca_trivial_io_lq 0 +#define lca_trivial_iounmap 1 +#include <asm/io_trivial.h> #ifdef __IO_EXTERN_INLINE #undef __EXTERN_INLINE diff --git a/include/asm-alpha/core_marvel.h b/include/asm-alpha/core_marvel.h index a1f2482cf3a4..30d55fe7aaf6 100644 --- a/include/asm-alpha/core_marvel.h +++ b/include/asm-alpha/core_marvel.h @@ -325,249 +325,48 @@ struct io7 { * I/O functions. All access through linear space. */ -#define vucp volatile unsigned char * -#define vusp volatile unsigned short * -#define vuip volatile unsigned int * -#define vulp volatile unsigned long * - -#ifdef CONFIG_VGA_HOSE -extern struct pci_controller *pci_vga_hose; - -# define __marvel_is_port_vga(a) \ - (((a) >= 0x3b0) && ((a) < 0x3e0) && ((a) != 0x3b3) && ((a) != 0x3d3)) -# define __marvel_is_mem_vga(a) (((a) >= 0xa0000) && ((a) <= 0xc0000)) -# define FIXUP_IOADDR_VGA(a) do { \ - if (pci_vga_hose && __marvel_is_port_vga(a)) \ - a += pci_vga_hose->io_space->start; \ - } while(0) -#else -# define FIXUP_IOADDR_VGA(a) -#endif - -#define __marvel_is_port_kbd(a) (((a) == 0x60) || ((a) == 0x64)) -#define __marvel_is_port_rtc(a) (((a) == 0x70) || ((a) == 0x71)) - -#define FIXUP_IOADDR_LEGACY(a) - -#define FIXUP_IOADDR(a) do { \ - FIXUP_IOADDR_VGA(a); \ - FIXUP_IOADDR_LEGACY(a); \ - } while(0) - -#if 0 -# define IOBUG(x) printk x -# define IOBUG_FILTER_IOADDR(a, x) \ - if (!__marvel_is_port_kbd(a) && !__marvel_is_port_rtc(a)) IOBUG(x) -#else -# define IOBUG(x) -# define IOBUG_FILTER_IOADDR(a, x) -#endif - -extern u8 __marvel_rtc_io(int write, u8 b, unsigned long addr); -#define __marvel_rtc_inb(a) __marvel_rtc_io(0, 0, (a)) -#define __marvel_rtc_outb(b, a) __marvel_rtc_io(1, (b), (a)) - -__EXTERN_INLINE int marvel_is_ioaddr(unsigned long addr) -{ - return (addr & (1UL << 40)) != 0; /*FIXME - hardwire*/ -} - -__EXTERN_INLINE u8 marvel_inb(unsigned long addr) -{ - FIXUP_IOADDR(addr); - if (!marvel_is_ioaddr(addr)) { - if (__marvel_is_port_kbd(addr)) - return (u8)0; - if (__marvel_is_port_rtc(addr)) - return __marvel_rtc_inb(addr); - IOBUG_FILTER_IOADDR(addr, - ("Bad IO addr %lx - reading -1\n", addr)); - return (u8)-1; - } - return __kernel_ldbu(*(vucp)addr); -} - -__EXTERN_INLINE void marvel_outb(u8 b, unsigned long addr) -{ - FIXUP_IOADDR(addr); - if (!marvel_is_ioaddr(addr)) { - if (__marvel_is_port_rtc(addr)) - return (void)__marvel_rtc_outb(b, addr); - IOBUG_FILTER_IOADDR(addr, - ("Bad IO addr %lx - reading -1\n", addr)); - return; - } - __kernel_stb(b, *(vucp)addr); - mb(); -} - -__EXTERN_INLINE u16 marvel_inw(unsigned long addr) -{ - FIXUP_IOADDR(addr); - if (!marvel_is_ioaddr(addr)) { - IOBUG_FILTER_IOADDR(addr, - ("Bad IO addr %lx - reading -1\n", addr)); - return (u16)-1; - } - return __kernel_ldwu(*(vusp)addr); -} - -__EXTERN_INLINE void marvel_outw(u16 w, unsigned long addr) -{ - FIXUP_IOADDR(addr); - if (!marvel_is_ioaddr(addr)) { - IOBUG_FILTER_IOADDR(addr, - ("Bad IO addr %lx - reading -1\n", addr)); - return; - } - __kernel_stw(w, *(vusp)addr); - mb(); -} - -__EXTERN_INLINE u32 marvel_inl(unsigned long addr) -{ - FIXUP_IOADDR(addr); - if (!marvel_is_ioaddr(addr)) { - IOBUG_FILTER_IOADDR(addr, - ("Bad IO addr %lx - reading -1\n", addr)); - return (u32)-1; - } - return *(vuip)addr; -} - -__EXTERN_INLINE void marvel_outl(u32 l, unsigned long addr) -{ - FIXUP_IOADDR(addr); - if (!marvel_is_ioaddr(addr)) { - IOBUG_FILTER_IOADDR(addr, - ("Bad IO addr %lx - reading -1\n", addr)); - return; - } - *(vuip)addr = l; - mb(); -} - /* * Memory functions. All accesses through linear space. */ -extern unsigned long marvel_ioremap(unsigned long addr, unsigned long size); -extern void marvel_iounmap(unsigned long addr); +#define vucp volatile unsigned char __force * +#define vusp volatile unsigned short __force * -__EXTERN_INLINE u8 marvel_readb(unsigned long addr) -{ - if (!marvel_is_ioaddr(addr)) { - IOBUG(("Bad MEM addr %lx - reading -1\n", addr)); - return (u8)-1; - } - return __kernel_ldbu(*(vucp)addr); -} +extern unsigned int marvel_ioread8(void __iomem *); +extern void marvel_iowrite8(u8 b, void __iomem *); -__EXTERN_INLINE u16 marvel_readw(unsigned long addr) +__EXTERN_INLINE unsigned int marvel_ioread16(void __iomem *addr) { - if (!marvel_is_ioaddr(addr)) { - IOBUG(("Bad MEM addr %lx - reading -1\n", addr)); - return (u16)-1; - } return __kernel_ldwu(*(vusp)addr); } -__EXTERN_INLINE u32 marvel_readl(unsigned long addr) -{ - if (!marvel_is_ioaddr(addr)) { - IOBUG(("Bad MEM addr %lx - reading -1\n", addr)); - return (u32)-1; - } - return *(vuip)addr; -} - -__EXTERN_INLINE u64 marvel_readq(unsigned long addr) +__EXTERN_INLINE void marvel_iowrite16(u16 b, void __iomem *addr) { - if (!marvel_is_ioaddr(addr)) { - IOBUG(("Bad MEM addr %lx - reading -1\n", addr)); - return (u64)-1; - } - return *(vulp)addr; + __kernel_stw(b, *(vusp)addr); } -__EXTERN_INLINE void marvel_writeb(u8 b, unsigned long addr) -{ - if (!marvel_is_ioaddr(addr)) { - IOBUG(("Bad MEM addr %lx - dropping store\n", addr)); - return; - } - __kernel_stb(b, *(vucp)addr); -} - -__EXTERN_INLINE void marvel_writew(u16 w, unsigned long addr) -{ - if (!marvel_is_ioaddr(addr)) { - IOBUG(("Bad MEM addr %lx - dropping store\n", addr)); - return; - } - __kernel_stw(w, *(vusp)addr); -} +extern void __iomem *marvel_ioremap(unsigned long addr, unsigned long size); +extern void marvel_iounmap(volatile void __iomem *addr); +extern void __iomem *marvel_ioportmap (unsigned long addr); -__EXTERN_INLINE void marvel_writel(u32 l, unsigned long addr) -{ - if (!marvel_is_ioaddr(addr)) { - IOBUG(("Bad MEM addr %lx - dropping store\n", addr)); - return; - } - *(vuip)addr = l; -} - -__EXTERN_INLINE void marvel_writeq(u64 q, unsigned long addr) +__EXTERN_INLINE int marvel_is_ioaddr(unsigned long addr) { - if (!marvel_is_ioaddr(addr)) { - IOBUG(("Bad MEM addr %lx - dropping store\n", addr)); - return; - } - *(vulp)addr = q; + return (addr >> 40) & 1; } -#undef FIXUP_IOADDR -#undef FIXUP_IOADDR_LEGACY -#undef FIXUP_IOADDR_VGA +extern int marvel_is_mmio(const volatile void __iomem *); #undef vucp #undef vusp -#undef vuip -#undef vulp - -#ifdef __WANT_IO_DEF - -#define __inb(p) marvel_inb((unsigned long)(p)) -#define __inw(p) marvel_inw((unsigned long)(p)) -#define __inl(p) marvel_inl((unsigned long)(p)) -#define __outb(x,p) marvel_outb((x),(unsigned long)(p)) -#define __outw(x,p) marvel_outw((x),(unsigned long)(p)) -#define __outl(x,p) marvel_outl((x),(unsigned long)(p)) -#define __readb(a) marvel_readb((unsigned long)(a)) -#define __readw(a) marvel_readw((unsigned long)(a)) -#define __readl(a) marvel_readl((unsigned long)(a)) -#define __readq(a) marvel_readq((unsigned long)(a)) -#define __writeb(x,a) marvel_writeb((x),(unsigned long)(a)) -#define __writew(x,a) marvel_writew((x),(unsigned long)(a)) -#define __writel(x,a) marvel_writel((x),(unsigned long)(a)) -#define __writeq(x,a) marvel_writeq((x),(unsigned long)(a)) -#define __ioremap(a,s) marvel_ioremap((unsigned long)(a),(s)) -#define __iounmap(a) marvel_iounmap((unsigned long)(a)) -#define __is_ioaddr(a) marvel_is_ioaddr((unsigned long)(a)) - -/* Disable direct inlining of these calls with the debug checks present. */ -#if 0 -#define __raw_readb(a) __readb(a) -#define __raw_readw(a) __readw(a) -#define __raw_readl(a) __readl(a) -#define __raw_readq(a) __readq(a) -#define __raw_writeb(v,a) __writeb(v,a) -#define __raw_writew(v,a) __writew(v,a) -#define __raw_writel(v,a) __writel(v,a) -#define __raw_writeq(v,a) __writeq(v,a) -#endif -#endif /* __WANT_IO_DEF */ +#undef __IO_PREFIX +#define __IO_PREFIX marvel +#define marvel_trivial_rw_bw 1 +#define marvel_trivial_rw_lq 1 +#define marvel_trivial_io_bw 0 +#define marvel_trivial_io_lq 1 +#define marvel_trivial_iounmap 0 +#include <asm/io_trivial.h> #ifdef __IO_EXTERN_INLINE # undef __EXTERN_INLINE diff --git a/include/asm-alpha/core_mcpcia.h b/include/asm-alpha/core_mcpcia.h index db03753ed209..980a3c51b18e 100644 --- a/include/asm-alpha/core_mcpcia.h +++ b/include/asm-alpha/core_mcpcia.h @@ -211,91 +211,6 @@ struct el_MCPCIA_uncorrected_frame_mcheck { * Unfortunately, we can't use BWIO with EV5, so for now, we always use SPARSE. */ -#define vucp volatile unsigned char * -#define vusp volatile unsigned short * -#define vip volatile int * -#define vuip volatile unsigned int * -#define vulp volatile unsigned long * - -__EXTERN_INLINE u8 mcpcia_inb(unsigned long in_addr) -{ - unsigned long addr, hose, result; - - addr = in_addr & 0xffffUL; - hose = in_addr & ~0xffffUL; - - /* ??? I wish I could get rid of this. But there's no ioremap - equivalent for I/O space. PCI I/O can be forced into the - correct hose's I/O region, but that doesn't take care of - legacy ISA crap. */ - hose += MCPCIA_IO_BIAS; - - result = *(vip) ((addr << 5) + hose + 0x00); - return __kernel_extbl(result, addr & 3); -} - -__EXTERN_INLINE void mcpcia_outb(u8 b, unsigned long in_addr) -{ - unsigned long addr, hose, w; - - addr = in_addr & 0xffffUL; - hose = in_addr & ~0xffffUL; - hose += MCPCIA_IO_BIAS; - - w = __kernel_insbl(b, addr & 3); - *(vuip) ((addr << 5) + hose + 0x00) = w; - mb(); -} - -__EXTERN_INLINE u16 mcpcia_inw(unsigned long in_addr) -{ - unsigned long addr, hose, result; - - addr = in_addr & 0xffffUL; - hose = in_addr & ~0xffffUL; - hose += MCPCIA_IO_BIAS; - - result = *(vip) ((addr << 5) + hose + 0x08); - return __kernel_extwl(result, addr & 3); -} - -__EXTERN_INLINE void mcpcia_outw(u16 b, unsigned long in_addr) -{ - unsigned long addr, hose, w; - - addr = in_addr & 0xffffUL; - hose = in_addr & ~0xffffUL; - hose += MCPCIA_IO_BIAS; - - w = __kernel_inswl(b, addr & 3); - *(vuip) ((addr << 5) + hose + 0x08) = w; - mb(); -} - -__EXTERN_INLINE u32 mcpcia_inl(unsigned long in_addr) -{ - unsigned long addr, hose; - - addr = in_addr & 0xffffUL; - hose = in_addr & ~0xffffUL; - hose += MCPCIA_IO_BIAS; - - return *(vuip) ((addr << 5) + hose + 0x18); -} - -__EXTERN_INLINE void mcpcia_outl(u32 b, unsigned long in_addr) -{ - unsigned long addr, hose; - - addr = in_addr & 0xffffUL; - hose = in_addr & ~0xffffUL; - hose += MCPCIA_IO_BIAS; - - *(vuip) ((addr << 5) + hose + 0x18) = b; - mb(); -} - - /* * Memory functions. 64-bit and 32-bit accesses are done through * dense memory space, everything else through sparse space. @@ -328,149 +243,131 @@ __EXTERN_INLINE void mcpcia_outl(u32 b, unsigned long in_addr) * */ -__EXTERN_INLINE unsigned long mcpcia_ioremap(unsigned long addr, - unsigned long size - __attribute__((unused))) -{ - return addr + MCPCIA_MEM_BIAS; -} +#define vip volatile int __force * +#define vuip volatile unsigned int __force * + +#ifdef MCPCIA_ONE_HAE_WINDOW +#define MCPCIA_FROB_MMIO \ + if (__mcpcia_is_mmio(hose)) { \ + set_hae(hose & 0xffffffff); \ + hose = hose - MCPCIA_DENSE(4) + MCPCIA_SPARSE(4); \ + } +#else +#define MCPCIA_FROB_MMIO \ + if (__mcpcia_is_mmio(hose)) { \ + hose = hose - MCPCIA_DENSE(4) + MCPCIA_SPARSE(4); \ + } +#endif -__EXTERN_INLINE void mcpcia_iounmap(unsigned long addr) +static inline int __mcpcia_is_mmio(unsigned long addr) { - return; + return (addr & 0x80000000UL) == 0; } -__EXTERN_INLINE int mcpcia_is_ioaddr(unsigned long addr) +__EXTERN_INLINE unsigned int mcpcia_ioread8(void __iomem *xaddr) { - return addr >= MCPCIA_SPARSE(0); + unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK; + unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK; + unsigned long result; + + MCPCIA_FROB_MMIO; + + result = *(vip) ((addr << 5) + hose + 0x00); + return __kernel_extbl(result, addr & 3); } -__EXTERN_INLINE u8 mcpcia_readb(unsigned long in_addr) +__EXTERN_INLINE void mcpcia_iowrite8(u8 b, void __iomem *xaddr) { - unsigned long addr = in_addr & 0xffffffffUL; - unsigned long hose = in_addr & ~0xffffffffUL; - unsigned long result, work; + unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK; + unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK; + unsigned long w; -#ifndef MCPCIA_ONE_HAE_WINDOW - unsigned long msb; - msb = addr & ~MCPCIA_MEM_MASK; - set_hae(msb); -#endif - addr = addr & MCPCIA_MEM_MASK; + MCPCIA_FROB_MMIO; - hose = hose - MCPCIA_DENSE(4) + MCPCIA_SPARSE(4); - work = ((addr << 5) + hose + 0x00); - result = *(vip) work; - return __kernel_extbl(result, addr & 3); + w = __kernel_insbl(b, addr & 3); + *(vuip) ((addr << 5) + hose + 0x00) = w; } -__EXTERN_INLINE u16 mcpcia_readw(unsigned long in_addr) +__EXTERN_INLINE unsigned int mcpcia_ioread16(void __iomem *xaddr) { - unsigned long addr = in_addr & 0xffffffffUL; - unsigned long hose = in_addr & ~0xffffffffUL; - unsigned long result, work; + unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK; + unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK; + unsigned long result; -#ifndef MCPCIA_ONE_HAE_WINDOW - unsigned long msb; - msb = addr & ~MCPCIA_MEM_MASK; - set_hae(msb); -#endif - addr = addr & MCPCIA_MEM_MASK; + MCPCIA_FROB_MMIO; - hose = hose - MCPCIA_DENSE(4) + MCPCIA_SPARSE(4); - work = ((addr << 5) + hose + 0x08); - result = *(vip) work; + result = *(vip) ((addr << 5) + hose + 0x08); return __kernel_extwl(result, addr & 3); } -__EXTERN_INLINE void mcpcia_writeb(u8 b, unsigned long in_addr) +__EXTERN_INLINE void mcpcia_iowrite16(u16 b, void __iomem *xaddr) { - unsigned long addr = in_addr & 0xffffffffUL; - unsigned long hose = in_addr & ~0xffffffffUL; + unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK; + unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK; unsigned long w; -#ifndef MCPCIA_ONE_HAE_WINDOW - unsigned long msb; - msb = addr & ~MCPCIA_MEM_MASK; - set_hae(msb); -#endif - addr = addr & MCPCIA_MEM_MASK; + MCPCIA_FROB_MMIO; - w = __kernel_insbl(b, in_addr & 3); - hose = hose - MCPCIA_DENSE(4) + MCPCIA_SPARSE(4); - *(vuip) ((addr << 5) + hose + 0x00) = w; + w = __kernel_inswl(b, addr & 3); + *(vuip) ((addr << 5) + hose + 0x08) = w; } -__EXTERN_INLINE void mcpcia_writew(u16 b, unsigned long in_addr) +__EXTERN_INLINE unsigned int mcpcia_ioread32(void __iomem *xaddr) { - unsigned long addr = in_addr & 0xffffffffUL; - unsigned long hose = in_addr & ~0xffffffffUL; - unsigned long w; + unsigned long addr = (unsigned long)xaddr; -#ifndef MCPCIA_ONE_HAE_WINDOW - unsigned long msb; - msb = addr & ~MCPCIA_MEM_MASK; - set_hae(msb); -#endif - addr = addr & MCPCIA_MEM_MASK; + if (!__mcpcia_is_mmio(addr)) + addr = ((addr & 0xffff) << 5) + (addr & ~0xfffful) + 0x18; - w = __kernel_inswl(b, in_addr & 3); - hose = hose - MCPCIA_DENSE(4) + MCPCIA_SPARSE(4); - *(vuip) ((addr << 5) + hose + 0x08) = w; + return *(vuip)addr; +} + +__EXTERN_INLINE void mcpcia_iowrite32(u32 b, void __iomem *xaddr) +{ + unsigned long addr = (unsigned long)xaddr; + + if (!__mcpcia_is_mmio(addr)) + addr = ((addr & 0xffff) << 5) + (addr & ~0xfffful) + 0x18; + + *(vuip)addr = b; } -__EXTERN_INLINE u32 mcpcia_readl(unsigned long addr) + +__EXTERN_INLINE void __iomem *mcpcia_ioportmap(unsigned long addr) { - return (*(vuip)addr) & 0xffffffff; + return (void __iomem *)(addr + MCPCIA_IO_BIAS); } -__EXTERN_INLINE u64 mcpcia_readq(unsigned long addr) +__EXTERN_INLINE void __iomem *mcpcia_ioremap(unsigned long addr, + unsigned long size) { - return *(vulp)addr; + return (void __iomem *)(addr + MCPCIA_MEM_BIAS); } -__EXTERN_INLINE void mcpcia_writel(u32 b, unsigned long addr) +__EXTERN_INLINE int mcpcia_is_ioaddr(unsigned long addr) { - *(vuip)addr = b; + return addr >= MCPCIA_SPARSE(0); } -__EXTERN_INLINE void mcpcia_writeq(u64 b, unsigned long addr) +__EXTERN_INLINE int mcpcia_is_mmio(const volatile void __iomem *xaddr) { - *(vulp)addr = b; + unsigned long addr = (unsigned long) xaddr; + return __mcpcia_is_mmio(addr); } -#undef vucp -#undef vusp +#undef MCPCIA_FROB_MMIO + #undef vip #undef vuip -#undef vulp - -#ifdef __WANT_IO_DEF - -#define __inb(p) mcpcia_inb((unsigned long)(p)) -#define __inw(p) mcpcia_inw((unsigned long)(p)) -#define __inl(p) mcpcia_inl((unsigned long)(p)) -#define __outb(x,p) mcpcia_outb((x),(unsigned long)(p)) -#define __outw(x,p) mcpcia_outw((x),(unsigned long)(p)) -#define __outl(x,p) mcpcia_outl((x),(unsigned long)(p)) -#define __readb(a) mcpcia_readb((unsigned long)(a)) -#define __readw(a) mcpcia_readw((unsigned long)(a)) -#define __readl(a) mcpcia_readl((unsigned long)(a)) -#define __readq(a) mcpcia_readq((unsigned long)(a)) -#define __writeb(x,a) mcpcia_writeb((x),(unsigned long)(a)) -#define __writew(x,a) mcpcia_writew((x),(unsigned long)(a)) -#define __writel(x,a) mcpcia_writel((x),(unsigned long)(a)) -#define __writeq(x,a) mcpcia_writeq((x),(unsigned long)(a)) -#define __ioremap(a,s) mcpcia_ioremap((unsigned long)(a),(s)) -#define __iounmap(a) mcpcia_iounmap((unsigned long)(a)) -#define __is_ioaddr(a) mcpcia_is_ioaddr((unsigned long)(a)) - -#define __raw_readl(a) __readl(a) -#define __raw_readq(a) __readq(a) -#define __raw_writel(v,a) __writel((v),(a)) -#define __raw_writeq(v,a) __writeq((v),(a)) - -#endif /* __WANT_IO_DEF */ + +#undef __IO_PREFIX +#define __IO_PREFIX mcpcia +#define mcpcia_trivial_rw_bw 2 +#define mcpcia_trivial_rw_lq 1 +#define mcpcia_trivial_io_bw 0 +#define mcpcia_trivial_io_lq 0 +#define mcpcia_trivial_iounmap 1 +#include <asm/io_trivial.h> #ifdef __IO_EXTERN_INLINE #undef __EXTERN_INLINE diff --git a/include/asm-alpha/core_polaris.h b/include/asm-alpha/core_polaris.h index dca0341b0323..2f966b64659d 100644 --- a/include/asm-alpha/core_polaris.h +++ b/include/asm-alpha/core_polaris.h @@ -63,49 +63,6 @@ struct el_POLARIS_sysdata_mcheck { * However, we will support only the BWX form. */ -#define vucp volatile unsigned char * -#define vusp volatile unsigned short * -#define vuip volatile unsigned int * -#define vulp volatile unsigned long * - -__EXTERN_INLINE u8 polaris_inb(unsigned long addr) -{ - /* ??? I wish I could get rid of this. But there's no ioremap - equivalent for I/O space. PCI I/O can be forced into the - POLARIS I/O region, but that doesn't take care of legacy - ISA crap. */ - - return __kernel_ldbu(*(vucp)(addr + POLARIS_DENSE_IO_BASE)); -} - -__EXTERN_INLINE void polaris_outb(u8 b, unsigned long addr) -{ - __kernel_stb(b, *(vucp)(addr + POLARIS_DENSE_IO_BASE)); - mb(); -} - -__EXTERN_INLINE u16 polaris_inw(unsigned long addr) -{ - return __kernel_ldwu(*(vusp)(addr + POLARIS_DENSE_IO_BASE)); -} - -__EXTERN_INLINE void polaris_outw(u16 b, unsigned long addr) -{ - __kernel_stw(b, *(vusp)(addr + POLARIS_DENSE_IO_BASE)); - mb(); -} - -__EXTERN_INLINE u32 polaris_inl(unsigned long addr) -{ - return *(vuip)(addr + POLARIS_DENSE_IO_BASE); -} - -__EXTERN_INLINE void polaris_outl(u32 b, unsigned long addr) -{ - *(vuip)(addr + POLARIS_DENSE_IO_BASE) = b; - mb(); -} - /* * Memory functions. Polaris allows all accesses (byte/word * as well as long/quad) to be done through dense space. @@ -113,104 +70,35 @@ __EXTERN_INLINE void polaris_outl(u32 b, unsigned long addr) * We will only support DENSE access via BWX insns. */ -__EXTERN_INLINE u8 polaris_readb(unsigned long addr) -{ - return __kernel_ldbu(*(vucp)addr); -} - -__EXTERN_INLINE u16 polaris_readw(unsigned long addr) +__EXTERN_INLINE void __iomem *polaris_ioportmap(unsigned long addr) { - return __kernel_ldwu(*(vusp)addr); + return (void __iomem *)(addr + POLARIS_DENSE_IO_BASE); } -__EXTERN_INLINE u32 polaris_readl(unsigned long addr) +__EXTERN_INLINE void __iomem *polaris_ioremap(unsigned long addr, + unsigned long size) { - return (*(vuip)addr) & 0xffffffff; + return (void __iomem *)(addr + POLARIS_DENSE_MEM_BASE); } -__EXTERN_INLINE u64 polaris_readq(unsigned long addr) -{ - return *(vulp)addr; -} - -__EXTERN_INLINE void polaris_writeb(u8 b, unsigned long addr) -{ - __kernel_stb(b, *(vucp)addr); -} - -__EXTERN_INLINE void polaris_writew(u16 b, unsigned long addr) -{ - __kernel_stw(b, *(vusp)addr); -} - -__EXTERN_INLINE void polaris_writel(u32 b, unsigned long addr) -{ - *(vuip)addr = b; -} - -__EXTERN_INLINE void polaris_writeq(u64 b, unsigned long addr) -{ - *(vulp)addr = b; -} - -__EXTERN_INLINE unsigned long polaris_ioremap(unsigned long addr, - unsigned long size - __attribute__((unused))) -{ - return addr + POLARIS_DENSE_MEM_BASE; -} - -__EXTERN_INLINE void polaris_iounmap(unsigned long addr) +__EXTERN_INLINE int polaris_is_ioaddr(unsigned long addr) { - return; + return addr >= POLARIS_SPARSE_MEM_BASE; } -__EXTERN_INLINE int polaris_is_ioaddr(unsigned long addr) +__EXTERN_INLINE int polaris_is_mmio(const volatile void __iomem *addr) { - return addr >= POLARIS_SPARSE_MEM_BASE; + return (unsigned long)addr < POLARIS_SPARSE_IO_BASE; } -#undef vucp -#undef vusp -#undef vuip -#undef vulp - -#ifdef __WANT_IO_DEF - -#define __inb(p) polaris_inb((unsigned long)(p)) -#define __inw(p) polaris_inw((unsigned long)(p)) -#define __inl(p) polaris_inl((unsigned long)(p)) -#define __outb(x,p) polaris_outb((x),(unsigned long)(p)) -#define __outw(x,p) polaris_outw((x),(unsigned long)(p)) -#define __outl(x,p) polaris_outl((x),(unsigned long)(p)) -#define __readb(a) polaris_readb((unsigned long)(a)) -#define __readw(a) polaris_readw((unsigned long)(a)) -#define __readl(a) polaris_readl((unsigned long)(a)) -#define __readq(a) polaris_readq((unsigned long)(a)) -#define __writeb(x,a) polaris_writeb((x),(unsigned long)(a)) -#define __writew(x,a) polaris_writew((x),(unsigned long)(a)) -#define __writel(x,a) polaris_writel((x),(unsigned long)(a)) -#define __writeq(x,a) polaris_writeq((x),(unsigned long)(a)) -#define __ioremap(a,s) polaris_ioremap((unsigned long)(a),(s)) -#define __iounmap(a) polaris_iounmap((unsigned long)(a)) -#define __is_ioaddr(a) polaris_is_ioaddr((unsigned long)(a)) - -#define inb(p) __inb(p) -#define inw(p) __inw(p) -#define inl(p) __inl(p) -#define outb(x,p) __outb((x),(p)) -#define outw(x,p) __outw((x),(p)) -#define outl(x,p) __outl((x),(p)) -#define __raw_readb(a) __readb(a) -#define __raw_readw(a) __readw(a) -#define __raw_readl(a) __readl(a) -#define __raw_readq(a) __readq(a) -#define __raw_writeb(v,a) __writeb((v),(a)) -#define __raw_writew(v,a) __writew((v),(a)) -#define __raw_writel(v,a) __writel((v),(a)) -#define __raw_writeq(v,a) __writeq((v),(a)) - -#endif /* __WANT_IO_DEF */ +#undef __IO_PREFIX +#define __IO_PREFIX polaris +#define polaris_trivial_rw_bw 1 +#define polaris_trivial_rw_lq 1 +#define polaris_trivial_io_bw 1 +#define polaris_trivial_io_lq 1 +#define polaris_trivial_iounmap 1 +#include <asm/io_trivial.h> #ifdef __IO_EXTERN_INLINE #undef __EXTERN_INLINE diff --git a/include/asm-alpha/core_t2.h b/include/asm-alpha/core_t2.h index ce5ae2a5c896..5c1c40338c82 100644 --- a/include/asm-alpha/core_t2.h +++ b/include/asm-alpha/core_t2.h @@ -199,8 +199,8 @@ struct el_t2_procdata_mcheck { struct el_t2_logout_header { unsigned int elfl_size; /* size in bytes of logout area. */ - int elfl_sbz1:31; /* Should be zero. */ - char elfl_retry:1; /* Retry flag. */ + unsigned int elfl_sbz1:31; /* Should be zero. */ + unsigned int elfl_retry:1; /* Retry flag. */ unsigned int elfl_procoffset; /* Processor-specific offset. */ unsigned int elfl_sysoffset; /* Offset of system-specific. */ unsigned int elfl_error_type; /* PAL error type code. */ @@ -357,13 +357,13 @@ struct el_t2_frame_corrected { #define vip volatile int * #define vuip volatile unsigned int * -__EXTERN_INLINE u8 t2_inb(unsigned long addr) +static inline u8 t2_inb(unsigned long addr) { long result = *(vip) ((addr << 5) + T2_IO + 0x00); return __kernel_extbl(result, addr & 3); } -__EXTERN_INLINE void t2_outb(u8 b, unsigned long addr) +static inline void t2_outb(u8 b, unsigned long addr) { unsigned long w; @@ -372,13 +372,13 @@ __EXTERN_INLINE void t2_outb(u8 b, unsigned long addr) mb(); } -__EXTERN_INLINE u16 t2_inw(unsigned long addr) +static inline u16 t2_inw(unsigned long addr) { long result = *(vip) ((addr << 5) + T2_IO + 0x08); return __kernel_extwl(result, addr & 3); } -__EXTERN_INLINE void t2_outw(u16 b, unsigned long addr) +static inline void t2_outw(u16 b, unsigned long addr) { unsigned long w; @@ -387,12 +387,12 @@ __EXTERN_INLINE void t2_outw(u16 b, unsigned long addr) mb(); } -__EXTERN_INLINE u32 t2_inl(unsigned long addr) +static inline u32 t2_inl(unsigned long addr) { return *(vuip) ((addr << 5) + T2_IO + 0x18); } -__EXTERN_INLINE void t2_outl(u32 b, unsigned long addr) +static inline void t2_outl(u32 b, unsigned long addr) { *(vuip) ((addr << 5) + T2_IO + 0x18) = b; mb(); @@ -438,8 +438,9 @@ __EXTERN_INLINE void t2_outl(u32 b, unsigned long addr) static spinlock_t t2_hae_lock = SPIN_LOCK_UNLOCKED; -__EXTERN_INLINE u8 t2_readb(unsigned long addr) +__EXTERN_INLINE u8 t2_readb(const volatile void __iomem *xaddr) { + unsigned long addr = (unsigned long) xaddr; unsigned long result, msb; unsigned long flags; spin_lock_irqsave(&t2_hae_lock, flags); @@ -451,8 +452,9 @@ __EXTERN_INLINE u8 t2_readb(unsigned long addr) return __kernel_extbl(result, addr & 3); } -__EXTERN_INLINE u16 t2_readw(unsigned long addr) +__EXTERN_INLINE u16 t2_readw(const volatile void __iomem *xaddr) { + unsigned long addr = (unsigned long) xaddr; unsigned long result, msb; unsigned long flags; spin_lock_irqsave(&t2_hae_lock, flags); @@ -468,8 +470,9 @@ __EXTERN_INLINE u16 t2_readw(unsigned long addr) * On SABLE with T2, we must use SPARSE memory even for 32-bit access, * because we cannot access all of DENSE without changing its HAE. */ -__EXTERN_INLINE u32 t2_readl(unsigned long addr) +__EXTERN_INLINE u32 t2_readl(const volatile void __iomem *xaddr) { + unsigned long addr = (unsigned long) xaddr; unsigned long result, msb; unsigned long flags; spin_lock_irqsave(&t2_hae_lock, flags); @@ -481,8 +484,9 @@ __EXTERN_INLINE u32 t2_readl(unsigned long addr) return result & 0xffffffffUL; } -__EXTERN_INLINE u64 t2_readq(unsigned long addr) +__EXTERN_INLINE u64 t2_readq(const volatile void __iomem *xaddr) { + unsigned long addr = (unsigned long) xaddr; unsigned long r0, r1, work, msb; unsigned long flags; spin_lock_irqsave(&t2_hae_lock, flags); @@ -496,8 +500,9 @@ __EXTERN_INLINE u64 t2_readq(unsigned long addr) return r1 << 32 | r0; } -__EXTERN_INLINE void t2_writeb(u8 b, unsigned long addr) +__EXTERN_INLINE void t2_writeb(u8 b, volatile void __iomem *xaddr) { + unsigned long addr = (unsigned long) xaddr; unsigned long msb, w; unsigned long flags; spin_lock_irqsave(&t2_hae_lock, flags); @@ -509,8 +514,9 @@ __EXTERN_INLINE void t2_writeb(u8 b, unsigned long addr) spin_unlock_irqrestore(&t2_hae_lock, flags); } -__EXTERN_INLINE void t2_writew(u16 b, unsigned long addr) +__EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr) { + unsigned long addr = (unsigned long) xaddr; unsigned long msb, w; unsigned long flags; spin_lock_irqsave(&t2_hae_lock, flags); @@ -526,8 +532,9 @@ __EXTERN_INLINE void t2_writew(u16 b, unsigned long addr) * On SABLE with T2, we must use SPARSE memory even for 32-bit access, * because we cannot access all of DENSE without changing its HAE. */ -__EXTERN_INLINE void t2_writel(u32 b, unsigned long addr) +__EXTERN_INLINE void t2_writel(u32 b, volatile void __iomem *xaddr) { + unsigned long addr = (unsigned long) xaddr; unsigned long msb; unsigned long flags; spin_lock_irqsave(&t2_hae_lock, flags); @@ -538,8 +545,9 @@ __EXTERN_INLINE void t2_writel(u32 b, unsigned long addr) spin_unlock_irqrestore(&t2_hae_lock, flags); } -__EXTERN_INLINE void t2_writeq(u64 b, unsigned long addr) +__EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr) { + unsigned long addr = (unsigned long) xaddr; unsigned long msb, work; unsigned long flags; spin_lock_irqsave(&t2_hae_lock, flags); @@ -552,16 +560,15 @@ __EXTERN_INLINE void t2_writeq(u64 b, unsigned long addr) spin_unlock_irqrestore(&t2_hae_lock, flags); } -__EXTERN_INLINE unsigned long t2_ioremap(unsigned long addr, - unsigned long size - __attribute__((unused))) +__EXTERN_INLINE void __iomem *t2_ioportmap(unsigned long addr) { - return addr; + return (void __iomem *)(addr + T2_IO); } -__EXTERN_INLINE void t2_iounmap(unsigned long addr) +__EXTERN_INLINE void __iomem *t2_ioremap(unsigned long addr, + unsigned long size) { - return; + return (void __iomem *)(addr + T2_DENSE_MEM); } __EXTERN_INLINE int t2_is_ioaddr(unsigned long addr) @@ -569,30 +576,47 @@ __EXTERN_INLINE int t2_is_ioaddr(unsigned long addr) return (long)addr >= 0; } +__EXTERN_INLINE int t2_is_mmio(const volatile void __iomem *addr) +{ + return (unsigned long)addr >= T2_DENSE_MEM; +} + +/* New-style ioread interface. The mmio routines are so ugly for T2 that + it doesn't make sense to merge the pio and mmio routines. */ + +#define IOPORT(OS, NS) \ +__EXTERN_INLINE unsigned int t2_ioread##NS(void __iomem *xaddr) \ +{ \ + if (t2_is_mmio(xaddr)) \ + return t2_read##OS(xaddr - T2_DENSE_MEM); \ + else \ + return t2_in##OS((unsigned long)xaddr - T2_IO); \ +} \ +__EXTERN_INLINE void t2_iowrite##NS(u##NS b, void __iomem *xaddr) \ +{ \ + if (t2_is_mmio(xaddr)) \ + t2_write##OS(b, xaddr - T2_DENSE_MEM); \ + else \ + t2_out##OS(b, (unsigned long)xaddr - T2_IO); \ +} + +IOPORT(b, 8) +IOPORT(w, 16) +IOPORT(l, 32) + +#undef IOPORT + #undef vip #undef vuip -#ifdef __WANT_IO_DEF - -#define __inb(p) t2_inb((unsigned long)(p)) -#define __inw(p) t2_inw((unsigned long)(p)) -#define __inl(p) t2_inl((unsigned long)(p)) -#define __outb(x,p) t2_outb((x),(unsigned long)(p)) -#define __outw(x,p) t2_outw((x),(unsigned long)(p)) -#define __outl(x,p) t2_outl((x),(unsigned long)(p)) -#define __readb(a) t2_readb((unsigned long)(a)) -#define __readw(a) t2_readw((unsigned long)(a)) -#define __readl(a) t2_readl((unsigned long)(a)) -#define __readq(a) t2_readq((unsigned long)(a)) -#define __writeb(x,a) t2_writeb((x),(unsigned long)(a)) -#define __writew(x,a) t2_writew((x),(unsigned long)(a)) -#define __writel(x,a) t2_writel((x),(unsigned long)(a)) -#define __writeq(x,a) t2_writeq((x),(unsigned long)(a)) -#define __ioremap(a,s) t2_ioremap((unsigned long)(a),(s)) -#define __iounmap(a) t2_iounmap((unsigned long)(a)) -#define __is_ioaddr(a) t2_is_ioaddr((unsigned long)(a)) - -#endif /* __WANT_IO_DEF */ +#undef __IO_PREFIX +#define __IO_PREFIX t2 +#define t2_trivial_rw_bw 0 +#define t2_trivial_rw_lq 0 +#define t2_trivial_io_bw 0 +#define t2_trivial_io_lq 0 +#define t2_trivial_iounmap 1 +#include <asm/io_trivial.h> #ifdef __IO_EXTERN_INLINE #undef __EXTERN_INLINE diff --git a/include/asm-alpha/core_titan.h b/include/asm-alpha/core_titan.h index 39959ed0e535..a64ccbff7d98 100644 --- a/include/asm-alpha/core_titan.h +++ b/include/asm-alpha/core_titan.h @@ -377,149 +377,33 @@ struct el_PRIVATEER_envdata_mcheck { * can only use linear accesses to get at PCI/AGP memory and I/O spaces. */ -#define vucp volatile unsigned char * -#define vusp volatile unsigned short * -#define vuip volatile unsigned int * -#define vulp volatile unsigned long * - -__EXTERN_INLINE u8 titan_inb(unsigned long addr) -{ - /* ??? I wish I could get rid of this. But there's no ioremap - equivalent for I/O space. PCI I/O can be forced into the - correct hose's I/O region, but that doesn't take care of - legacy ISA crap. */ - - addr += TITAN_IO_BIAS; - return __kernel_ldbu(*(vucp)addr); -} - -__EXTERN_INLINE void titan_outb(u8 b, unsigned long addr) -{ - addr += TITAN_IO_BIAS; - __kernel_stb(b, *(vucp)addr); - mb(); -} - -__EXTERN_INLINE u16 titan_inw(unsigned long addr) -{ - addr += TITAN_IO_BIAS; - return __kernel_ldwu(*(vusp)addr); -} - -__EXTERN_INLINE void titan_outw(u16 b, unsigned long addr) -{ - addr += TITAN_IO_BIAS; - __kernel_stw(b, *(vusp)addr); - mb(); -} - -__EXTERN_INLINE u32 titan_inl(unsigned long addr) -{ - addr += TITAN_IO_BIAS; - return *(vuip)addr; -} - -__EXTERN_INLINE void titan_outl(u32 b, unsigned long addr) -{ - addr += TITAN_IO_BIAS; - *(vuip)addr = b; - mb(); -} - /* * Memory functions. all accesses are done through linear space. */ -extern unsigned long titan_ioremap(unsigned long addr, unsigned long size); -extern void titan_iounmap(unsigned long addr); - -__EXTERN_INLINE int titan_is_ioaddr(unsigned long addr) -{ - return addr >= TITAN_BASE; -} - -__EXTERN_INLINE u8 titan_readb(unsigned long addr) -{ - return __kernel_ldbu(*(vucp)addr); -} - -__EXTERN_INLINE u16 titan_readw(unsigned long addr) -{ - return __kernel_ldwu(*(vusp)addr); -} - -__EXTERN_INLINE u32 titan_readl(unsigned long addr) -{ - return (*(vuip)addr) & 0xffffffff; -} - -__EXTERN_INLINE u64 titan_readq(unsigned long addr) +__EXTERN_INLINE void __iomem *titan_ioportmap(unsigned long addr) { - return *(vulp)addr; + return (void __iomem *)(addr + TITAN_IO_BIAS); } -__EXTERN_INLINE void titan_writeb(u8 b, unsigned long addr) -{ - __kernel_stb(b, *(vucp)addr); -} +extern void __iomem *titan_ioremap(unsigned long addr, unsigned long size); +extern void titan_iounmap(volatile void __iomem *addr); -__EXTERN_INLINE void titan_writew(u16 b, unsigned long addr) -{ - __kernel_stw(b, *(vusp)addr); -} - -__EXTERN_INLINE void titan_writel(u32 b, unsigned long addr) +__EXTERN_INLINE int titan_is_ioaddr(unsigned long addr) { - *(vuip)addr = b; + return addr >= TITAN_BASE; } -__EXTERN_INLINE void titan_writeq(u64 b, unsigned long addr) -{ - *(vulp)addr = b; -} +extern int titan_is_mmio(const volatile void __iomem *addr); -#undef vucp -#undef vusp -#undef vuip -#undef vulp - -#ifdef __WANT_IO_DEF - -#define __inb(p) titan_inb((unsigned long)(p)) -#define __inw(p) titan_inw((unsigned long)(p)) -#define __inl(p) titan_inl((unsigned long)(p)) -#define __outb(x,p) titan_outb((x),(unsigned long)(p)) -#define __outw(x,p) titan_outw((x),(unsigned long)(p)) -#define __outl(x,p) titan_outl((x),(unsigned long)(p)) -#define __readb(a) titan_readb((unsigned long)(a)) -#define __readw(a) titan_readw((unsigned long)(a)) -#define __readl(a) titan_readl((unsigned long)(a)) -#define __readq(a) titan_readq((unsigned long)(a)) -#define __writeb(x,a) titan_writeb((x),(unsigned long)(a)) -#define __writew(x,a) titan_writew((x),(unsigned long)(a)) -#define __writel(x,a) titan_writel((x),(unsigned long)(a)) -#define __writeq(x,a) titan_writeq((x),(unsigned long)(a)) -#define __ioremap(a,s) titan_ioremap((unsigned long)(a),(s)) -#define __iounmap(a) titan_iounmap((unsigned long)(a)) -#define __is_ioaddr(a) titan_is_ioaddr((unsigned long)(a)) - -#define inb(port) __inb((port)) -#define inw(port) __inw((port)) -#define inl(port) __inl((port)) -#define outb(v, port) __outb((v),(port)) -#define outw(v, port) __outw((v),(port)) -#define outl(v, port) __outl((v),(port)) - -#define __raw_readb(a) __readb((unsigned long)(a)) -#define __raw_readw(a) __readw((unsigned long)(a)) -#define __raw_readl(a) __readl((unsigned long)(a)) -#define __raw_readq(a) __readq((unsigned long)(a)) -#define __raw_writeb(v,a) __writeb((v),(unsigned long)(a)) -#define __raw_writew(v,a) __writew((v),(unsigned long)(a)) -#define __raw_writel(v,a) __writel((v),(unsigned long)(a)) -#define __raw_writeq(v,a) __writeq((v),(unsigned long)(a)) - -#endif /* __WANT_IO_DEF */ +#undef __IO_PREFIX +#define __IO_PREFIX titan +#define titan_trivial_rw_bw 1 +#define titan_trivial_rw_lq 1 +#define titan_trivial_io_bw 1 +#define titan_trivial_io_lq 1 +#define titan_trivial_iounmap 0 +#include <asm/io_trivial.h> #ifdef __IO_EXTERN_INLINE #undef __EXTERN_INLINE diff --git a/include/asm-alpha/core_tsunami.h b/include/asm-alpha/core_tsunami.h index 7c815eb42c30..44e635d2c571 100644 --- a/include/asm-alpha/core_tsunami.h +++ b/include/asm-alpha/core_tsunami.h @@ -299,69 +299,19 @@ struct el_TSUNAMI_sysdata_mcheck { * can only use linear accesses to get at PCI memory and I/O spaces. */ -#define vucp volatile unsigned char * -#define vusp volatile unsigned short * -#define vuip volatile unsigned int * -#define vulp volatile unsigned long * - -__EXTERN_INLINE u8 tsunami_inb(unsigned long addr) -{ - /* ??? I wish I could get rid of this. But there's no ioremap - equivalent for I/O space. PCI I/O can be forced into the - correct hose's I/O region, but that doesn't take care of - legacy ISA crap. */ - - addr += TSUNAMI_IO_BIAS; - return __kernel_ldbu(*(vucp)addr); -} - -__EXTERN_INLINE void tsunami_outb(u8 b, unsigned long addr) -{ - addr += TSUNAMI_IO_BIAS; - __kernel_stb(b, *(vucp)addr); - mb(); -} - -__EXTERN_INLINE u16 tsunami_inw(unsigned long addr) -{ - addr += TSUNAMI_IO_BIAS; - return __kernel_ldwu(*(vusp)addr); -} - -__EXTERN_INLINE void tsunami_outw(u16 b, unsigned long addr) -{ - addr += TSUNAMI_IO_BIAS; - __kernel_stw(b, *(vusp)addr); - mb(); -} - -__EXTERN_INLINE u32 tsunami_inl(unsigned long addr) -{ - addr += TSUNAMI_IO_BIAS; - return *(vuip)addr; -} - -__EXTERN_INLINE void tsunami_outl(u32 b, unsigned long addr) -{ - addr += TSUNAMI_IO_BIAS; - *(vuip)addr = b; - mb(); -} - /* * Memory functions. all accesses are done through linear space. */ -__EXTERN_INLINE unsigned long tsunami_ioremap(unsigned long addr, - unsigned long size - __attribute__((unused))) +__EXTERN_INLINE void __iomem *tsunami_ioportmap(unsigned long addr) { - return addr + TSUNAMI_MEM_BIAS; + return (void __iomem *)(addr + TSUNAMI_IO_BIAS); } -__EXTERN_INLINE void tsunami_iounmap(unsigned long addr) +__EXTERN_INLINE void __iomem *tsunami_ioremap(unsigned long addr, + unsigned long size) { - return; + return (void __iomem *)(addr + TSUNAMI_MEM_BIAS); } __EXTERN_INLINE int tsunami_is_ioaddr(unsigned long addr) @@ -369,87 +319,20 @@ __EXTERN_INLINE int tsunami_is_ioaddr(unsigned long addr) return addr >= TSUNAMI_BASE; } -__EXTERN_INLINE u8 tsunami_readb(unsigned long addr) -{ - return __kernel_ldbu(*(vucp)addr); -} - -__EXTERN_INLINE u16 tsunami_readw(unsigned long addr) -{ - return __kernel_ldwu(*(vusp)addr); -} - -__EXTERN_INLINE u32 tsunami_readl(unsigned long addr) -{ - return *(vuip)addr; -} - -__EXTERN_INLINE u64 tsunami_readq(unsigned long addr) -{ - return *(vulp)addr; -} - -__EXTERN_INLINE void tsunami_writeb(u8 b, unsigned long addr) -{ - __kernel_stb(b, *(vucp)addr); -} - -__EXTERN_INLINE void tsunami_writew(u16 b, unsigned long addr) -{ - __kernel_stw(b, *(vusp)addr); -} - -__EXTERN_INLINE void tsunami_writel(u32 b, unsigned long addr) -{ - *(vuip)addr = b; -} - -__EXTERN_INLINE void tsunami_writeq(u64 b, unsigned long addr) +__EXTERN_INLINE int tsunami_is_mmio(const volatile void __iomem *xaddr) { - *(vulp)addr = b; + unsigned long addr = (unsigned long) xaddr; + return (addr & 0x100000000UL) == 0; } -#undef vucp -#undef vusp -#undef vuip -#undef vulp - -#ifdef __WANT_IO_DEF - -#define __inb(p) tsunami_inb((unsigned long)(p)) -#define __inw(p) tsunami_inw((unsigned long)(p)) -#define __inl(p) tsunami_inl((unsigned long)(p)) -#define __outb(x,p) tsunami_outb((x),(unsigned long)(p)) -#define __outw(x,p) tsunami_outw((x),(unsigned long)(p)) -#define __outl(x,p) tsunami_outl((x),(unsigned long)(p)) -#define __readb(a) tsunami_readb((unsigned long)(a)) -#define __readw(a) tsunami_readw((unsigned long)(a)) -#define __readl(a) tsunami_readl((unsigned long)(a)) -#define __readq(a) tsunami_readq((unsigned long)(a)) -#define __writeb(x,a) tsunami_writeb((x),(unsigned long)(a)) -#define __writew(x,a) tsunami_writew((x),(unsigned long)(a)) -#define __writel(x,a) tsunami_writel((x),(unsigned long)(a)) -#define __writeq(x,a) tsunami_writeq((x),(unsigned long)(a)) -#define __ioremap(a,s) tsunami_ioremap((unsigned long)(a),(s)) -#define __iounmap(a) tsunami_iounmap((unsigned long)(a)) -#define __is_ioaddr(a) tsunami_is_ioaddr((unsigned long)(a)) - -#define inb(p) __inb(p) -#define inw(p) __inw(p) -#define inl(p) __inl(p) -#define outb(x,p) __outb((x),(p)) -#define outw(x,p) __outw((x),(p)) -#define outl(x,p) __outl((x),(p)) -#define __raw_readb(a) __readb(a) -#define __raw_readw(a) __readw(a) -#define __raw_readl(a) __readl(a) -#define __raw_readq(a) __readq(a) -#define __raw_writeb(v,a) __writeb((v),(a)) -#define __raw_writew(v,a) __writew((v),(a)) -#define __raw_writel(v,a) __writel((v),(a)) -#define __raw_writeq(v,a) __writeq((v),(a)) - -#endif /* __WANT_IO_DEF */ +#undef __IO_PREFIX +#define __IO_PREFIX tsunami +#define tsunami_trivial_rw_bw 1 +#define tsunami_trivial_rw_lq 1 +#define tsunami_trivial_io_bw 1 +#define tsunami_trivial_io_lq 1 +#define tsunami_trivial_iounmap 1 +#include <asm/io_trivial.h> #ifdef __IO_EXTERN_INLINE #undef __EXTERN_INLINE diff --git a/include/asm-alpha/core_wildfire.h b/include/asm-alpha/core_wildfire.h index 7b8bbd11798f..12af803d445a 100644 --- a/include/asm-alpha/core_wildfire.h +++ b/include/asm-alpha/core_wildfire.h @@ -273,69 +273,19 @@ typedef struct { #define __IO_EXTERN_INLINE #endif -#define vucp volatile unsigned char * -#define vusp volatile unsigned short * -#define vuip volatile unsigned int * -#define vulp volatile unsigned long * - -__EXTERN_INLINE u8 wildfire_inb(unsigned long addr) -{ - /* ??? I wish I could get rid of this. But there's no ioremap - equivalent for I/O space. PCI I/O can be forced into the - correct hose's I/O region, but that doesn't take care of - legacy ISA crap. */ - - addr += WILDFIRE_IO_BIAS; - return __kernel_ldbu(*(vucp)addr); -} - -__EXTERN_INLINE void wildfire_outb(u8 b, unsigned long addr) -{ - addr += WILDFIRE_IO_BIAS; - __kernel_stb(b, *(vucp)addr); - mb(); -} - -__EXTERN_INLINE u16 wildfire_inw(unsigned long addr) -{ - addr += WILDFIRE_IO_BIAS; - return __kernel_ldwu(*(vusp)addr); -} - -__EXTERN_INLINE void wildfire_outw(u16 b, unsigned long addr) -{ - addr += WILDFIRE_IO_BIAS; - __kernel_stw(b, *(vusp)addr); - mb(); -} - -__EXTERN_INLINE u32 wildfire_inl(unsigned long addr) -{ - addr += WILDFIRE_IO_BIAS; - return *(vuip)addr; -} - -__EXTERN_INLINE void wildfire_outl(u32 b, unsigned long addr) -{ - addr += WILDFIRE_IO_BIAS; - *(vuip)addr = b; - mb(); -} - /* * Memory functions. all accesses are done through linear space. */ -__EXTERN_INLINE unsigned long wildfire_ioremap(unsigned long addr, - unsigned long size - __attribute__((unused))) +__EXTERN_INLINE void __iomem *wildfire_ioportmap(unsigned long addr) { - return addr + WILDFIRE_MEM_BIAS; + return (void __iomem *)(addr + WILDFIRE_IO_BIAS); } -__EXTERN_INLINE void wildfire_iounmap(unsigned long addr) +__EXTERN_INLINE void __iomem *wildfire_ioremap(unsigned long addr, + unsigned long size) { - return; + return (void __iomem *)(addr + WILDFIRE_MEM_BIAS); } __EXTERN_INLINE int wildfire_is_ioaddr(unsigned long addr) @@ -343,87 +293,20 @@ __EXTERN_INLINE int wildfire_is_ioaddr(unsigned long addr) return addr >= WILDFIRE_BASE; } -__EXTERN_INLINE u8 wildfire_readb(unsigned long addr) -{ - return __kernel_ldbu(*(vucp)addr); -} - -__EXTERN_INLINE u16 wildfire_readw(unsigned long addr) -{ - return __kernel_ldwu(*(vusp)addr); -} - -__EXTERN_INLINE u32 wildfire_readl(unsigned long addr) -{ - return (*(vuip)addr) & 0xffffffff; -} - -__EXTERN_INLINE u64 wildfire_readq(unsigned long addr) -{ - return *(vulp)addr; -} - -__EXTERN_INLINE void wildfire_writeb(u8 b, unsigned long addr) -{ - __kernel_stb(b, *(vucp)addr); -} - -__EXTERN_INLINE void wildfire_writew(u16 b, unsigned long addr) -{ - __kernel_stw(b, *(vusp)addr); -} - -__EXTERN_INLINE void wildfire_writel(u32 b, unsigned long addr) -{ - *(vuip)addr = b; -} - -__EXTERN_INLINE void wildfire_writeq(u64 b, unsigned long addr) +__EXTERN_INLINE int wildfire_is_mmio(const volatile void __iomem *xaddr) { - *(vulp)addr = b; + unsigned long addr = (unsigned long)addr; + return (addr & 0x100000000UL) == 0; } -#undef vucp -#undef vusp -#undef vuip -#undef vulp - -#ifdef __WANT_IO_DEF - -#define __inb(p) wildfire_inb((unsigned long)(p)) -#define __inw(p) wildfire_inw((unsigned long)(p)) -#define __inl(p) wildfire_inl((unsigned long)(p)) -#define __outb(x,p) wildfire_outb((x),(unsigned long)(p)) -#define __outw(x,p) wildfire_outw((x),(unsigned long)(p)) -#define __outl(x,p) wildfire_outl((x),(unsigned long)(p)) -#define __readb(a) wildfire_readb((unsigned long)(a)) -#define __readw(a) wildfire_readw((unsigned long)(a)) -#define __readl(a) wildfire_readl((unsigned long)(a)) -#define __readq(a) wildfire_readq((unsigned long)(a)) -#define __writeb(x,a) wildfire_writeb((x),(unsigned long)(a)) -#define __writew(x,a) wildfire_writew((x),(unsigned long)(a)) -#define __writel(x,a) wildfire_writel((x),(unsigned long)(a)) -#define __writeq(x,a) wildfire_writeq((x),(unsigned long)(a)) -#define __ioremap(a,s) wildfire_ioremap((unsigned long)(a),(s)) -#define __iounmap(a) wildfire_iounmap((unsigned long)(a)) -#define __is_ioaddr(a) wildfire_is_ioaddr((unsigned long)(a)) - -#define inb(p) __inb(p) -#define inw(p) __inw(p) -#define inl(p) __inl(p) -#define outb(x,p) __outb((x),(p)) -#define outw(x,p) __outw((x),(p)) -#define outl(x,p) __outl((x),(p)) -#define __raw_readb(a) __readb(a) -#define __raw_readw(a) __readw(a) -#define __raw_readl(a) __readl(a) -#define __raw_readq(a) __readq(a) -#define __raw_writeb(v,a) __writeb((v),(a)) -#define __raw_writew(v,a) __writew((v),(a)) -#define __raw_writel(v,a) __writel((v),(a)) -#define __raw_writeq(v,a) __writeq((v),(a)) - -#endif /* __WANT_IO_DEF */ +#undef __IO_PREFIX +#define __IO_PREFIX wildfire +#define wildfire_trivial_rw_bw 1 +#define wildfire_trivial_rw_lq 1 +#define wildfire_trivial_io_bw 1 +#define wildfire_trivial_io_lq 1 +#define wildfire_trivial_iounmap 1 +#include <asm/io_trivial.h> #ifdef __IO_EXTERN_INLINE #undef __EXTERN_INLINE diff --git a/include/asm-alpha/io.h b/include/asm-alpha/io.h index 5f4a74357838..2b712ee0d1bb 100644 --- a/include/asm-alpha/io.h +++ b/include/asm-alpha/io.h @@ -1,6 +1,20 @@ #ifndef __ALPHA_IO_H #define __ALPHA_IO_H +#ifdef __KERNEL__ + +#include <linux/config.h> +#include <linux/kernel.h> +#include <asm/compiler.h> +#include <asm/system.h> +#include <asm/pgtable.h> +#include <asm/machvec.h> +#include <asm/hwrpb.h> + +/* The generic header contains only prototypes. Including it ensures that + the implementation we have here matches that interface. */ +#include <asm-generic/iomap.h> + /* We don't use IO slowdowns on the Alpha, but.. */ #define __SLOW_DOWN_IO do { } while (0) #define SLOW_DOWN_IO do { } while (0) @@ -14,14 +28,6 @@ #define IDENT_ADDR 0xfffffc0000000000UL #endif -#ifdef __KERNEL__ -#include <linux/config.h> -#include <linux/kernel.h> -#include <asm/system.h> -#include <asm/pgtable.h> -#include <asm/machvec.h> -#include <asm/hwrpb.h> - /* * We try to avoid hae updates (thus the cache), but when we * do need to update the hae, we need to do it atomically, so @@ -88,6 +94,9 @@ static inline void * phys_to_virt(unsigned long address) /* This depends on working iommu. */ #define BIO_VMERGE_BOUNDARY (alpha_mv.mv_pci_tbi ? PAGE_SIZE : 0) +/* Maximum PIO space address supported? */ +#define IO_SPACE_LIMIT 0xffff + /* * Change addresses as seen by the kernel (virtual) to addresses as * seen by a device (bus), and vice versa. @@ -118,67 +127,81 @@ static inline void *bus_to_virt(unsigned long address) return (long)address <= 0 ? NULL : virt; } -#else /* !__KERNEL__ */ - -/* - * Define actual functions in private name-space so it's easier to - * accommodate things like XFree or svgalib that like to define their - * own versions of inb etc. - */ -extern void __sethae (unsigned long addr); /* syscall */ -extern void _sethae (unsigned long addr); /* cached version */ - -#endif /* !__KERNEL__ */ - /* * There are different chipsets to interface the Alpha CPUs to the world. */ -#ifdef __KERNEL__ +#define IO_CONCAT(a,b) _IO_CONCAT(a,b) +#define _IO_CONCAT(a,b) a ## _ ## b + #ifdef CONFIG_ALPHA_GENERIC /* In a generic kernel, we always go through the machine vector. */ -# define __inb(p) alpha_mv.mv_inb((unsigned long)(p)) -# define __inw(p) alpha_mv.mv_inw((unsigned long)(p)) -# define __inl(p) alpha_mv.mv_inl((unsigned long)(p)) -# define __outb(x,p) alpha_mv.mv_outb((x),(unsigned long)(p)) -# define __outw(x,p) alpha_mv.mv_outw((x),(unsigned long)(p)) -# define __outl(x,p) alpha_mv.mv_outl((x),(unsigned long)(p)) - -# define __readb(a) alpha_mv.mv_readb((unsigned long)(a)) -# define __readw(a) alpha_mv.mv_readw((unsigned long)(a)) -# define __readl(a) alpha_mv.mv_readl((unsigned long)(a)) -# define __readq(a) alpha_mv.mv_readq((unsigned long)(a)) -# define __writeb(v,a) alpha_mv.mv_writeb((v),(unsigned long)(a)) -# define __writew(v,a) alpha_mv.mv_writew((v),(unsigned long)(a)) -# define __writel(v,a) alpha_mv.mv_writel((v),(unsigned long)(a)) -# define __writeq(v,a) alpha_mv.mv_writeq((v),(unsigned long)(a)) - -# define __ioremap(a,s) alpha_mv.mv_ioremap((unsigned long)(a),(s)) -# define __iounmap(a) alpha_mv.mv_iounmap((unsigned long)(a)) -# define __is_ioaddr(a) alpha_mv.mv_is_ioaddr((unsigned long)(a)) - -# define inb __inb -# define inw __inw -# define inl __inl -# define outb __outb -# define outw __outw -# define outl __outl - -# define __raw_readb __readb -# define __raw_readw __readw -# define __raw_readl __readl -# define __raw_readq __readq -# define __raw_writeb __writeb -# define __raw_writew __writew -# define __raw_writel __writel -# define __raw_writeq __writeq +#define REMAP1(TYPE, NAME, QUAL) \ +static inline TYPE generic_##NAME(QUAL void __iomem *addr) \ +{ \ + return alpha_mv.mv_##NAME(addr); \ +} + +#define REMAP2(TYPE, NAME, QUAL) \ +static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr) \ +{ \ + alpha_mv.mv_##NAME(b, addr); \ +} -#else +REMAP1(unsigned int, ioread8, /**/) +REMAP1(unsigned int, ioread16, /**/) +REMAP1(unsigned int, ioread32, /**/) +REMAP1(u8, readb, const volatile) +REMAP1(u16, readw, const volatile) +REMAP1(u32, readl, const volatile) +REMAP1(u64, readq, const volatile) + +REMAP2(u8, iowrite8, /**/) +REMAP2(u16, iowrite16, /**/) +REMAP2(u32, iowrite32, /**/) +REMAP2(u8, writeb, volatile) +REMAP2(u16, writew, volatile) +REMAP2(u32, writel, volatile) +REMAP2(u64, writeq, volatile) + +#undef REMAP1 +#undef REMAP2 + +static inline void __iomem *generic_ioportmap(unsigned long a) +{ + return alpha_mv.mv_ioportmap(a); +} + +static inline void __iomem *generic_ioremap(unsigned long a, unsigned long s) +{ + return alpha_mv.mv_ioremap(a, s); +} + +static inline void generic_iounmap(volatile void __iomem *a) +{ + return alpha_mv.mv_iounmap(a); +} + +static inline int generic_is_ioaddr(unsigned long a) +{ + return alpha_mv.mv_is_ioaddr(a); +} -/* Control how and what gets defined within the core logic headers. */ -#define __WANT_IO_DEF +static inline int generic_is_mmio(const volatile void __iomem *a) +{ + return alpha_mv.mv_is_mmio(a); +} + +#define __IO_PREFIX generic +#define generic_trivial_rw_bw 0 +#define generic_trivial_rw_lq 0 +#define generic_trivial_io_bw 0 +#define generic_trivial_io_lq 0 +#define generic_trivial_iounmap 0 + +#else #if defined(CONFIG_ALPHA_APECS) # include <asm/core_apecs.h> @@ -208,245 +231,280 @@ extern void _sethae (unsigned long addr); /* cached version */ #error "What system is this?" #endif -#undef __WANT_IO_DEF - #endif /* GENERIC */ -#endif /* __KERNEL__ */ /* - * The convention used for inb/outb etc. is that names starting with - * two underscores are the inline versions, names starting with a - * single underscore are proper functions, and names starting with a - * letter are macros that map in some way to inline or proper function - * versions. Not all that pretty, but before you change it, be sure - * to convince yourself that it won't break anything (in particular - * module support). + * We always have external versions of these routines. */ -extern u8 _inb (unsigned long port); -extern u16 _inw (unsigned long port); -extern u32 _inl (unsigned long port); -extern void _outb (u8 b,unsigned long port); -extern void _outw (u16 w,unsigned long port); -extern void _outl (u32 l,unsigned long port); -extern u8 _readb(unsigned long addr); -extern u16 _readw(unsigned long addr); -extern u32 _readl(unsigned long addr); -extern u64 _readq(unsigned long addr); -extern void _writeb(u8 b, unsigned long addr); -extern void _writew(u16 b, unsigned long addr); -extern void _writel(u32 b, unsigned long addr); -extern void _writeq(u64 b, unsigned long addr); +extern u8 inb(unsigned long port); +extern u16 inw(unsigned long port); +extern u32 inl(unsigned long port); +extern void outb(u8 b, unsigned long port); +extern void outw(u16 b, unsigned long port); +extern void outl(u32 b, unsigned long port); + +extern u8 readb(const volatile void __iomem *addr); +extern u16 readw(const volatile void __iomem *addr); +extern u32 readl(const volatile void __iomem *addr); +extern u64 readq(const volatile void __iomem *addr); +extern void writeb(u8 b, volatile void __iomem *addr); +extern void writew(u16 b, volatile void __iomem *addr); +extern void writel(u32 b, volatile void __iomem *addr); +extern void writeq(u64 b, volatile void __iomem *addr); + +extern u8 __raw_readb(const volatile void __iomem *addr); +extern u16 __raw_readw(const volatile void __iomem *addr); +extern u32 __raw_readl(const volatile void __iomem *addr); +extern u64 __raw_readq(const volatile void __iomem *addr); +extern void __raw_writeb(u8 b, volatile void __iomem *addr); +extern void __raw_writew(u16 b, volatile void __iomem *addr); +extern void __raw_writel(u32 b, volatile void __iomem *addr); +extern void __raw_writeq(u64 b, volatile void __iomem *addr); -#ifdef __KERNEL__ /* - * The platform header files may define some of these macros to use - * the inlined versions where appropriate. These macros may also be - * redefined by userlevel programs. + * Mapping from port numbers to __iomem space is pretty easy. */ -#ifndef inb -# define inb(p) _inb(p) -#endif -#ifndef inw -# define inw(p) _inw(p) -#endif -#ifndef inl -# define inl(p) _inl(p) -#endif -#ifndef outb -# define outb(b,p) _outb((b),(p)) -#endif -#ifndef outw -# define outw(w,p) _outw((w),(p)) -#endif -#ifndef outl -# define outl(l,p) _outl((l),(p)) -#endif -#ifndef inb_p -# define inb_p inb -#endif -#ifndef inw_p -# define inw_p inw -#endif -#ifndef inl_p -# define inl_p inl -#endif +/* These two have to be extern inline so that we don't get redefinition + errors building lib/iomap.c. Which we don't want anyway, but... */ +extern inline void __iomem *ioport_map(unsigned long port, unsigned int size) +{ + return IO_CONCAT(__IO_PREFIX,ioportmap) (port); +} -#ifndef outb_p -# define outb_p outb -#endif -#ifndef outw_p -# define outw_p outw -#endif -#ifndef outl_p -# define outl_p outl -#endif +extern inline void ioport_unmap(void __iomem *addr) +{ +} -#define IO_SPACE_LIMIT 0xffff +static inline void __iomem *ioremap(unsigned long port, unsigned long size) +{ + return IO_CONCAT(__IO_PREFIX,ioremap) (port, size); +} -#else +static inline void __iomem * ioremap_nocache(unsigned long offset, + unsigned long size) +{ + return ioremap(offset, size); +} -/* Userspace declarations. Kill in 2.5. */ +static inline void iounmap(volatile void __iomem *addr) +{ + IO_CONCAT(__IO_PREFIX,iounmap)(addr); +} -extern unsigned int inb(unsigned long port); -extern unsigned int inw(unsigned long port); -extern unsigned int inl(unsigned long port); -extern void outb(unsigned char b,unsigned long port); -extern void outw(unsigned short w,unsigned long port); -extern void outl(unsigned int l,unsigned long port); -extern unsigned long readb(unsigned long addr); -extern unsigned long readw(unsigned long addr); -extern unsigned long readl(unsigned long addr); -extern void writeb(unsigned char b, unsigned long addr); -extern void writew(unsigned short b, unsigned long addr); -extern void writel(unsigned int b, unsigned long addr); +static inline int __is_ioaddr(unsigned long addr) +{ + return IO_CONCAT(__IO_PREFIX,is_ioaddr)(addr); +} +#define __is_ioaddr(a) __is_ioaddr((unsigned long)(a)) -#endif /* __KERNEL__ */ +static inline int __is_mmio(const volatile void __iomem *addr) +{ + return IO_CONCAT(__IO_PREFIX,is_mmio)(addr); +} -#ifdef __KERNEL__ /* - * On Alpha, we have the whole of I/O space mapped at all times, but - * at odd and sometimes discontinuous addresses. Note that the - * discontinuities are all across busses, so we need not care for that - * for any one device. - * - * The DRM drivers need to be able to map contiguously a (potentially) - * discontiguous set of I/O pages. This set of pages is scatter-gather - * mapped contiguously from the perspective of the bus, but we can't - * directly access DMA addresses from the CPU, these addresses need to - * have a real ioremap. Therefore, iounmap and the size argument to - * ioremap are needed to give the platforms the ability to fully implement - * ioremap. - * - * Map the I/O space address into the kernel's virtual address space. + * If the actual I/O bits are sufficiently trivial, then expand inline. */ -static inline void * ioremap(unsigned long offset, unsigned long size) + +#if IO_CONCAT(__IO_PREFIX,trivial_io_bw) +extern inline unsigned int ioread8(void __iomem *addr) { - return (void *) __ioremap(offset, size); -} + unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr); + mb(); + return ret; +} -static inline void iounmap(void *addr) +extern inline unsigned int ioread16(void __iomem *addr) { - __iounmap(addr); + unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr); + mb(); + return ret; } -static inline void * ioremap_nocache(unsigned long offset, unsigned long size) +extern inline void iowrite8(u8 b, void __iomem *addr) { - return ioremap(offset, size); -} + IO_CONCAT(__IO_PREFIX,iowrite8)(b, addr); + mb(); +} -/* Indirect back to the macros provided. */ +extern inline void iowrite16(u16 b, void __iomem *addr) +{ + IO_CONCAT(__IO_PREFIX,iowrite16)(b, addr); + mb(); +} -extern u8 ___raw_readb(unsigned long addr); -extern u16 ___raw_readw(unsigned long addr); -extern u32 ___raw_readl(unsigned long addr); -extern u64 ___raw_readq(unsigned long addr); -extern void ___raw_writeb(u8 b, unsigned long addr); -extern void ___raw_writew(u16 b, unsigned long addr); -extern void ___raw_writel(u32 b, unsigned long addr); -extern void ___raw_writeq(u64 b, unsigned long addr); +extern inline u8 inb(unsigned long port) +{ + return ioread8(ioport_map(port, 1)); +} -#ifdef __raw_readb -# define readb(a) ({ u8 r_ = __raw_readb(a); mb(); r_; }) -#endif -#ifdef __raw_readw -# define readw(a) ({ u16 r_ = __raw_readw(a); mb(); r_; }) -#endif -#ifdef __raw_readl -# define readl(a) ({ u32 r_ = __raw_readl(a); mb(); r_; }) -#endif -#ifdef __raw_readq -# define readq(a) ({ u64 r_ = __raw_readq(a); mb(); r_; }) -#endif +extern inline u16 inw(unsigned long port) +{ + return ioread16(ioport_map(port, 2)); +} -#ifdef __raw_writeb -# define writeb(v,a) ({ __raw_writeb((v),(a)); mb(); }) -#endif -#ifdef __raw_writew -# define writew(v,a) ({ __raw_writew((v),(a)); mb(); }) -#endif -#ifdef __raw_writel -# define writel(v,a) ({ __raw_writel((v),(a)); mb(); }) -#endif -#ifdef __raw_writeq -# define writeq(v,a) ({ __raw_writeq((v),(a)); mb(); }) -#endif +extern inline void outb(u8 b, unsigned long port) +{ + iowrite8(b, ioport_map(port, 1)); +} -#ifndef __raw_readb -# define __raw_readb(a) ___raw_readb((unsigned long)(a)) -#endif -#ifndef __raw_readw -# define __raw_readw(a) ___raw_readw((unsigned long)(a)) -#endif -#ifndef __raw_readl -# define __raw_readl(a) ___raw_readl((unsigned long)(a)) -#endif -#ifndef __raw_readq -# define __raw_readq(a) ___raw_readq((unsigned long)(a)) +extern inline void outw(u16 b, unsigned long port) +{ + iowrite16(b, ioport_map(port, 2)); +} #endif -#ifndef __raw_writeb -# define __raw_writeb(v,a) ___raw_writeb((v),(unsigned long)(a)) -#endif -#ifndef __raw_writew -# define __raw_writew(v,a) ___raw_writew((v),(unsigned long)(a)) -#endif -#ifndef __raw_writel -# define __raw_writel(v,a) ___raw_writel((v),(unsigned long)(a)) -#endif -#ifndef __raw_writeq -# define __raw_writeq(v,a) ___raw_writeq((v),(unsigned long)(a)) -#endif +#if IO_CONCAT(__IO_PREFIX,trivial_io_lq) +extern inline unsigned int ioread32(void __iomem *addr) +{ + unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr); + mb(); + return ret; +} -#ifndef readb -# define readb(a) _readb((unsigned long)(a)) -#endif -#ifndef readw -# define readw(a) _readw((unsigned long)(a)) -#endif -#ifndef readl -# define readl(a) _readl((unsigned long)(a)) -#endif -#ifndef readq -# define readq(a) _readq((unsigned long)(a)) -#endif +extern inline void iowrite32(u32 b, void __iomem *addr) +{ + IO_CONCAT(__IO_PREFIX,iowrite32)(b, addr); + mb(); +} -#define readb_relaxed(addr) readb(addr) -#define readw_relaxed(addr) readw(addr) -#define readl_relaxed(addr) readl(addr) -#define readq_relaxed(addr) readq(addr) +extern inline u32 inl(unsigned long port) +{ + return ioread32(ioport_map(port, 4)); +} -#ifndef writeb -# define writeb(v,a) _writeb((v),(unsigned long)(a)) -#endif -#ifndef writew -# define writew(v,a) _writew((v),(unsigned long)(a)) +extern inline void outl(u32 b, unsigned long port) +{ + iowrite32(b, ioport_map(port, 4)); +} #endif -#ifndef writel -# define writel(v,a) _writel((v),(unsigned long)(a)) + +#if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1 +extern inline u8 __raw_readb(const volatile void __iomem *addr) +{ + return IO_CONCAT(__IO_PREFIX,readb)(addr); +} + +extern inline u16 __raw_readw(const volatile void __iomem *addr) +{ + return IO_CONCAT(__IO_PREFIX,readw)(addr); +} + +extern inline void __raw_writeb(u8 b, volatile void __iomem *addr) +{ + IO_CONCAT(__IO_PREFIX,writeb)(b, addr); +} + +extern inline void __raw_writew(u16 b, volatile void __iomem *addr) +{ + IO_CONCAT(__IO_PREFIX,writew)(b, addr); +} + +extern inline u8 readb(const volatile void __iomem *addr) +{ + u8 ret = __raw_readb(addr); + mb(); + return ret; +} + +extern inline u16 readw(const volatile void __iomem *addr) +{ + u16 ret = __raw_readw(addr); + mb(); + return ret; +} + +extern inline void writeb(u8 b, volatile void __iomem *addr) +{ + __raw_writeb(b, addr); + mb(); +} + +extern inline void writew(u16 b, volatile void __iomem *addr) +{ + __raw_writew(b, addr); + mb(); +} #endif -#ifndef writeq -# define writeq(v,a) _writeq((v),(unsigned long)(a)) + +#if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1 +extern inline u32 __raw_readl(const volatile void __iomem *addr) +{ + return IO_CONCAT(__IO_PREFIX,readl)(addr); +} + +extern inline u64 __raw_readq(const volatile void __iomem *addr) +{ + return IO_CONCAT(__IO_PREFIX,readq)(addr); +} + +extern inline void __raw_writel(u32 b, volatile void __iomem *addr) +{ + IO_CONCAT(__IO_PREFIX,writel)(b, addr); +} + +extern inline void __raw_writeq(u64 b, volatile void __iomem *addr) +{ + IO_CONCAT(__IO_PREFIX,writeq)(b, addr); +} + +extern inline u32 readl(const volatile void __iomem *addr) +{ + u32 ret = __raw_readl(addr); + mb(); + return ret; +} + +extern inline u64 readq(const volatile void __iomem *addr) +{ + u64 ret = __raw_readq(addr); + mb(); + return ret; +} + +extern inline void writel(u32 b, volatile void __iomem *addr) +{ + __raw_writel(b, addr); + mb(); +} + +extern inline void writeq(u64 b, volatile void __iomem *addr) +{ + __raw_writeq(b, addr); + mb(); +} #endif +#define inb_p inb +#define inw_p inw +#define inl_p inl +#define outb_p outb +#define outw_p outw +#define outl_p outl +#define readb_relaxed(addr) __raw_readb(addr) +#define readw_relaxed(addr) __raw_readw(addr) +#define readl_relaxed(addr) __raw_readl(addr) +#define readq_relaxed(addr) __raw_readq(addr) + /* * String version of IO memory access ops: */ -extern void _memcpy_fromio(void *, unsigned long, long); -extern void _memcpy_toio(unsigned long, const void *, long); -extern void _memset_c_io(unsigned long, unsigned long, long); +extern void memcpy_fromio(void *, const volatile void __iomem *, long); +extern void memcpy_toio(volatile void __iomem *, const void *, long); +extern void _memset_c_io(volatile void __iomem *, unsigned long, long); -#define memcpy_fromio(to,from,len) \ - _memcpy_fromio((to),(unsigned long)(from),(len)) -#define memcpy_toio(to,from,len) \ - _memcpy_toio((unsigned long)(to),(from),(len)) -#define memset_io(addr,c,len) \ - _memset_c_io((unsigned long)(addr),0x0101010101010101UL*(u8)(c),(len)) +static inline void memset_io(volatile void __iomem *addr, u8 c, long len) +{ + _memset_c_io(addr, 0x0101010101010101UL * c, len); +} #define __HAVE_ARCH_MEMSETW_IO -#define memsetw_io(addr,c,len) \ - _memset_c_io((unsigned long)(addr),0x0001000100010001UL*(u16)(c),(len)) +static inline void memsetw_io(volatile void __iomem *addr, u16 c, long len) +{ + _memset_c_io(addr, 0x0001000100010001UL * c, len); +} /* * String versions of in/out ops: @@ -465,26 +523,22 @@ extern void outsl (unsigned long port, const void *src, unsigned long count); */ #define eth_io_copy_and_sum(skb,src,len,unused) \ - memcpy_fromio((skb)->data,(src),(len)) + memcpy_fromio((skb)->data,src,len) #define isa_eth_io_copy_and_sum(skb,src,len,unused) \ - isa_memcpy_fromio((skb)->data,(src),(len)) + isa_memcpy_fromio((skb)->data,src,len) static inline int -check_signature(unsigned long io_addr, const unsigned char *signature, - int length) +check_signature(const volatile void __iomem *io_addr, + const unsigned char *signature, int length) { - int retval = 0; do { if (readb(io_addr) != *signature) - goto out; + return 0; io_addr++; signature++; - length--; - } while (length); - retval = 1; -out: - return retval; + } while (--length); + return 1; } @@ -492,31 +546,89 @@ out: * ISA space is mapped to some machine-specific location on Alpha. * Call into the existing hooks to get the address translated. */ -#define isa_readb(a) readb(__ioremap((a),1)) -#define isa_readw(a) readw(__ioremap((a),2)) -#define isa_readl(a) readl(__ioremap((a),4)) -#define isa_writeb(b,a) writeb((b),__ioremap((a),1)) -#define isa_writew(w,a) writew((w),__ioremap((a),2)) -#define isa_writel(l,a) writel((l),__ioremap((a),4)) -#define isa_memset_io(a,b,c) memset_io(__ioremap((a),(c)),(b),(c)) -#define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ioremap((b),(c)),(c)) -#define isa_memcpy_toio(a,b,c) memcpy_toio(__ioremap((a),(c)),(b),(c)) + +static inline u8 +isa_readb(unsigned long offset) +{ + void __iomem *addr = ioremap(offset, 1); + u8 ret = readb(addr); + iounmap(addr); + return ret; +} + +static inline u16 +isa_readw(unsigned long offset) +{ + void __iomem *addr = ioremap(offset, 2); + u16 ret = readw(addr); + iounmap(addr); + return ret; +} + +static inline u32 +isa_readl(unsigned long offset) +{ + void __iomem *addr = ioremap(offset, 2); + u32 ret = readl(addr); + iounmap(addr); + return ret; +} + +static inline void +isa_writeb(u8 b, unsigned long offset) +{ + void __iomem *addr = ioremap(offset, 2); + writeb(b, addr); + iounmap(addr); +} + +static inline void +isa_writew(u16 w, unsigned long offset) +{ + void __iomem *addr = ioremap(offset, 2); + writew(w, addr); + iounmap(addr); +} + +static inline void +isa_writel(u32 l, unsigned long offset) +{ + void __iomem *addr = ioremap(offset, 2); + writel(l, addr); + iounmap(addr); +} + +static inline void +isa_memset_io(unsigned long offset, u8 val, long n) +{ + void __iomem *addr = ioremap(offset, n); + memset_io(addr, val, n); + iounmap(addr); +} + +static inline void +isa_memcpy_fromio(void *dest, unsigned long offset, long n) +{ + void __iomem *addr = ioremap(offset, n); + memcpy_fromio(dest, addr, n); + iounmap(addr); +} + +static inline void +isa_memcpy_toio(unsigned long offset, const void *src, long n) +{ + void __iomem *addr = ioremap(offset, n); + memcpy_toio(addr, src, n); + iounmap(addr); +} static inline int -isa_check_signature(unsigned long io_addr, const unsigned char *signature, - int length) +isa_check_signature(unsigned long offset, const unsigned char *sig, long len) { - int retval = 0; - do { - if (isa_readb(io_addr) != *signature) - goto out; - io_addr++; - signature++; - length--; - } while (length); - retval = 1; -out: - return retval; + void __iomem *addr = ioremap(offset, len); + int ret = check_signature(addr, sig, len); + iounmap(addr); + return ret; } diff --git a/include/asm-alpha/io_trivial.h b/include/asm-alpha/io_trivial.h new file mode 100644 index 000000000000..cfe1f86c33db --- /dev/null +++ b/include/asm-alpha/io_trivial.h @@ -0,0 +1,127 @@ +/* Trivial implementations of basic i/o routines. Assumes that all + of the hard work has been done by ioremap and ioportmap, and that + access to i/o space is linear. */ + +/* This file may be included multiple times. */ + +#if IO_CONCAT(__IO_PREFIX,trivial_io_bw) +__EXTERN_INLINE unsigned int +IO_CONCAT(__IO_PREFIX,ioread8)(void __iomem *a) +{ + return __kernel_ldbu(*(volatile u8 __force *)a); +} + +__EXTERN_INLINE unsigned int +IO_CONCAT(__IO_PREFIX,ioread16)(void __iomem *a) +{ + return __kernel_ldwu(*(volatile u16 __force *)a); +} + +__EXTERN_INLINE void +IO_CONCAT(__IO_PREFIX,iowrite8)(u8 b, void __iomem *a) +{ + __kernel_stb(b, *(volatile u8 __force *)a); +} + +__EXTERN_INLINE void +IO_CONCAT(__IO_PREFIX,iowrite16)(u16 b, void __iomem *a) +{ + __kernel_stb(b, *(volatile u16 __force *)a); +} +#endif + +#if IO_CONCAT(__IO_PREFIX,trivial_io_lq) +__EXTERN_INLINE unsigned int +IO_CONCAT(__IO_PREFIX,ioread32)(void __iomem *a) +{ + return *(volatile u32 __force *)a; +} + +__EXTERN_INLINE void +IO_CONCAT(__IO_PREFIX,iowrite32)(u32 b, void __iomem *a) +{ + *(volatile u32 __force *)a = b; +} +#endif + +#if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1 +__EXTERN_INLINE u8 +IO_CONCAT(__IO_PREFIX,readb)(const volatile void __iomem *a) +{ + return __kernel_ldbu(*(const volatile u8 __force *)a); +} + +__EXTERN_INLINE u16 +IO_CONCAT(__IO_PREFIX,readw)(const volatile void __iomem *a) +{ + return __kernel_ldwu(*(const volatile u16 __force *)a); +} + +__EXTERN_INLINE void +IO_CONCAT(__IO_PREFIX,writeb)(u8 b, volatile void __iomem *a) +{ + __kernel_stb(b, *(volatile u8 __force *)a); +} + +__EXTERN_INLINE void +IO_CONCAT(__IO_PREFIX,writew)(u16 b, volatile void __iomem *a) +{ + __kernel_stb(b, *(volatile u16 __force *)a); +} +#elif IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 2 +__EXTERN_INLINE u8 +IO_CONCAT(__IO_PREFIX,readb)(const volatile void __iomem *a) +{ + return IO_CONCAT(__IO_PREFIX,ioread8)((void __iomem *)a); +} + +__EXTERN_INLINE u16 +IO_CONCAT(__IO_PREFIX,readw)(const volatile void __iomem *a) +{ + return IO_CONCAT(__IO_PREFIX,ioread16)((void __iomem *)a); +} + +__EXTERN_INLINE void +IO_CONCAT(__IO_PREFIX,writeb)(u8 b, volatile void __iomem *a) +{ + IO_CONCAT(__IO_PREFIX,iowrite8)(b, (void __iomem *)a); +} + +__EXTERN_INLINE void +IO_CONCAT(__IO_PREFIX,writew)(u16 b, volatile void __iomem *a) +{ + IO_CONCAT(__IO_PREFIX,iowrite16)(b, (void __iomem *)a); +} +#endif + +#if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1 +__EXTERN_INLINE u32 +IO_CONCAT(__IO_PREFIX,readl)(const volatile void __iomem *a) +{ + return *(const volatile u32 __force *)a; +} + +__EXTERN_INLINE u64 +IO_CONCAT(__IO_PREFIX,readq)(const volatile void __iomem *a) +{ + return *(const volatile u64 __force *)a; +} + +__EXTERN_INLINE void +IO_CONCAT(__IO_PREFIX,writel)(u32 b, volatile void __iomem *a) +{ + *(volatile u32 __force *)a = b; +} + +__EXTERN_INLINE void +IO_CONCAT(__IO_PREFIX,writeq)(u64 b, volatile void __iomem *a) +{ + *(volatile u64 __force *)a = b; +} +#endif + +#if IO_CONCAT(__IO_PREFIX,trivial_iounmap) +__EXTERN_INLINE void IO_CONCAT(__IO_PREFIX,iounmap)(volatile void __iomem *a) +{ +} +#endif diff --git a/include/asm-alpha/jensen.h b/include/asm-alpha/jensen.h index 7e6c35b146c3..964b06ead43b 100644 --- a/include/asm-alpha/jensen.h +++ b/include/asm-alpha/jensen.h @@ -200,8 +200,9 @@ __EXTERN_INLINE void jensen_outl(u32 b, unsigned long addr) * Memory functions. */ -__EXTERN_INLINE u8 jensen_readb(unsigned long addr) +__EXTERN_INLINE u8 jensen_readb(const volatile void __iomem *xaddr) { + unsigned long addr = (unsigned long) xaddr; long result; jensen_set_hae(addr); @@ -211,8 +212,9 @@ __EXTERN_INLINE u8 jensen_readb(unsigned long addr) return 0xffUL & result; } -__EXTERN_INLINE u16 jensen_readw(unsigned long addr) +__EXTERN_INLINE u16 jensen_readw(const volatile void __iomem *xaddr) { + unsigned long addr = (unsigned long) xaddr; long result; jensen_set_hae(addr); @@ -222,15 +224,17 @@ __EXTERN_INLINE u16 jensen_readw(unsigned long addr) return 0xffffUL & result; } -__EXTERN_INLINE u32 jensen_readl(unsigned long addr) +__EXTERN_INLINE u32 jensen_readl(const volatile void __iomem *xaddr) { + unsigned long addr = (unsigned long) xaddr; jensen_set_hae(addr); addr &= JENSEN_HAE_MASK; return *(vuip) ((addr << 7) + EISA_MEM + 0x60); } -__EXTERN_INLINE u64 jensen_readq(unsigned long addr) +__EXTERN_INLINE u64 jensen_readq(const volatile void __iomem *xaddr) { + unsigned long addr = (unsigned long) xaddr; unsigned long r0, r1; jensen_set_hae(addr); @@ -241,29 +245,33 @@ __EXTERN_INLINE u64 jensen_readq(unsigned long addr) return r1 << 32 | r0; } -__EXTERN_INLINE void jensen_writeb(u8 b, unsigned long addr) +__EXTERN_INLINE void jensen_writeb(u8 b, volatile void __iomem *xaddr) { + unsigned long addr = (unsigned long) xaddr; jensen_set_hae(addr); addr &= JENSEN_HAE_MASK; *(vuip) ((addr << 7) + EISA_MEM + 0x00) = b * 0x01010101; } -__EXTERN_INLINE void jensen_writew(u16 b, unsigned long addr) +__EXTERN_INLINE void jensen_writew(u16 b, volatile void __iomem *xaddr) { + unsigned long addr = (unsigned long) xaddr; jensen_set_hae(addr); addr &= JENSEN_HAE_MASK; *(vuip) ((addr << 7) + EISA_MEM + 0x20) = b * 0x00010001; } -__EXTERN_INLINE void jensen_writel(u32 b, unsigned long addr) +__EXTERN_INLINE void jensen_writel(u32 b, volatile void __iomem *xaddr) { + unsigned long addr = (unsigned long) xaddr; jensen_set_hae(addr); addr &= JENSEN_HAE_MASK; *(vuip) ((addr << 7) + EISA_MEM + 0x60) = b; } -__EXTERN_INLINE void jensen_writeq(u64 b, unsigned long addr) +__EXTERN_INLINE void jensen_writeq(u64 b, volatile void __iomem *xaddr) { + unsigned long addr = (unsigned long) xaddr; jensen_set_hae(addr); addr &= JENSEN_HAE_MASK; addr = (addr << 7) + EISA_MEM + 0x60; @@ -271,15 +279,15 @@ __EXTERN_INLINE void jensen_writeq(u64 b, unsigned long addr) *(vuip) (addr + (4 << 7)) = b >> 32; } -__EXTERN_INLINE unsigned long jensen_ioremap(unsigned long addr, - unsigned long size) +__EXTERN_INLINE void __iomem *jensen_ioportmap(unsigned long addr) { - return addr; + return (void __iomem *)addr; } -__EXTERN_INLINE void jensen_iounmap(unsigned long addr) +__EXTERN_INLINE void __iomem *jensen_ioremap(unsigned long addr, + unsigned long size) { - return; + return (void __iomem *)(addr + 0x100000000ul); } __EXTERN_INLINE int jensen_is_ioaddr(unsigned long addr) @@ -287,39 +295,46 @@ __EXTERN_INLINE int jensen_is_ioaddr(unsigned long addr) return (long)addr >= 0; } -#undef vuip +__EXTERN_INLINE int jensen_is_mmio(const volatile void __iomem *addr) +{ + return (unsigned long)addr >= 0x100000000ul; +} -#ifdef __WANT_IO_DEF - -#define __inb jensen_inb -#define __inw jensen_inw -#define __inl jensen_inl -#define __outb jensen_outb -#define __outw jensen_outw -#define __outl jensen_outl -#define __readb jensen_readb -#define __readw jensen_readw -#define __writeb jensen_writeb -#define __writew jensen_writew -#define __readl jensen_readl -#define __readq jensen_readq -#define __writel jensen_writel -#define __writeq jensen_writeq -#define __ioremap jensen_ioremap -#define __iounmap(a) jensen_iounmap((unsigned long)a) -#define __is_ioaddr jensen_is_ioaddr +/* New-style ioread interface. All the routines are so ugly for Jensen + that it doesn't make sense to merge them. */ + +#define IOPORT(OS, NS) \ +__EXTERN_INLINE unsigned int jensen_ioread##NS(void __iomem *xaddr) \ +{ \ + if (jensen_is_mmio(xaddr)) \ + return jensen_read##OS(xaddr - 0x100000000ul); \ + else \ + return jensen_in##OS((unsigned long)xaddr); \ +} \ +__EXTERN_INLINE void jensen_iowrite##NS(u##NS b, void __iomem *xaddr) \ +{ \ + if (jensen_is_mmio(xaddr)) \ + jensen_write##OS(b, xaddr - 0x100000000ul); \ + else \ + jensen_out##OS(b, (unsigned long)xaddr); \ +} -/* - * The above have so much overhead that it probably doesn't make - * sense to have them inlined (better icache behaviour). - */ -#define inb(port) \ -(__builtin_constant_p((port))?__inb(port):_inb(port)) +IOPORT(b, 8) +IOPORT(w, 16) +IOPORT(l, 32) -#define outb(x, port) \ -(__builtin_constant_p((port))?__outb((x),(port)):_outb((x),(port))) +#undef IOPORT + +#undef vuip -#endif /* __WANT_IO_DEF */ +#undef __IO_PREFIX +#define __IO_PREFIX jensen +#define jensen_trivial_rw_bw 0 +#define jensen_trivial_rw_lq 0 +#define jensen_trivial_io_bw 0 +#define jensen_trivial_io_lq 0 +#define jensen_trivial_iounmap 1 +#include <asm/io_trivial.h> #ifdef __IO_EXTERN_INLINE #undef __EXTERN_INLINE diff --git a/include/asm-alpha/machvec.h b/include/asm-alpha/machvec.h index f09f71909aa6..ece166a203ec 100644 --- a/include/asm-alpha/machvec.h +++ b/include/asm-alpha/machvec.h @@ -45,27 +45,29 @@ struct alpha_machine_vector void (*mv_pci_tbi)(struct pci_controller *hose, dma_addr_t start, dma_addr_t end); - u8 (*mv_inb)(unsigned long); - u16 (*mv_inw)(unsigned long); - u32 (*mv_inl)(unsigned long); - - void (*mv_outb)(u8, unsigned long); - void (*mv_outw)(u16, unsigned long); - void (*mv_outl)(u32, unsigned long); - - u8 (*mv_readb)(unsigned long); - u16 (*mv_readw)(unsigned long); - u32 (*mv_readl)(unsigned long); - u64 (*mv_readq)(unsigned long); - - void (*mv_writeb)(u8, unsigned long); - void (*mv_writew)(u16, unsigned long); - void (*mv_writel)(u32, unsigned long); - void (*mv_writeq)(u64, unsigned long); - - unsigned long (*mv_ioremap)(unsigned long, unsigned long); - void (*mv_iounmap)(unsigned long); + unsigned int (*mv_ioread8)(void __iomem *); + unsigned int (*mv_ioread16)(void __iomem *); + unsigned int (*mv_ioread32)(void __iomem *); + + void (*mv_iowrite8)(u8, void __iomem *); + void (*mv_iowrite16)(u16, void __iomem *); + void (*mv_iowrite32)(u32, void __iomem *); + + u8 (*mv_readb)(const volatile void __iomem *); + u16 (*mv_readw)(const volatile void __iomem *); + u32 (*mv_readl)(const volatile void __iomem *); + u64 (*mv_readq)(const volatile void __iomem *); + + void (*mv_writeb)(u8, volatile void __iomem *); + void (*mv_writew)(u16, volatile void __iomem *); + void (*mv_writel)(u32, volatile void __iomem *); + void (*mv_writeq)(u64, volatile void __iomem *); + + void __iomem *(*mv_ioportmap)(unsigned long); + void __iomem *(*mv_ioremap)(unsigned long, unsigned long); + void (*mv_iounmap)(volatile void __iomem *); int (*mv_is_ioaddr)(unsigned long); + int (*mv_is_mmio)(const volatile void __iomem *); void (*mv_switch_mm)(struct mm_struct *, struct mm_struct *, struct task_struct *); diff --git a/include/asm-alpha/mmu_context.h b/include/asm-alpha/mmu_context.h index 3ae6408acaed..a714d0cdc204 100644 --- a/include/asm-alpha/mmu_context.h +++ b/include/asm-alpha/mmu_context.h @@ -10,6 +10,7 @@ #include <linux/config.h> #include <asm/system.h> #include <asm/machvec.h> +#include <asm/compiler.h> /* * Force a context reload. This is needed when we change the page diff --git a/include/asm-alpha/spinlock.h b/include/asm-alpha/spinlock.h index 346ec5b5374a..b5f355444fc2 100644 --- a/include/asm-alpha/spinlock.h +++ b/include/asm-alpha/spinlock.h @@ -95,7 +95,7 @@ static inline int _raw_spin_trylock(spinlock_t *lock) /***********************************************************/ typedef struct { - volatile int write_lock:1, read_counter:31; + volatile unsigned int write_lock:1, read_counter:31; } /*__attribute__((aligned(32)))*/ rwlock_t; #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 } @@ -124,7 +124,7 @@ static inline void _raw_write_lock(rwlock_t * lock) " br 1b\n" ".previous" : "=m" (*lock), "=&r" (regx) - : "0" (*lock) : "memory"); + : "m" (*lock) : "memory"); } static inline void _raw_read_lock(rwlock_t * lock) diff --git a/include/asm-alpha/system.h b/include/asm-alpha/system.h index 1e5ac92d2073..bba276c50b15 100644 --- a/include/asm-alpha/system.h +++ b/include/asm-alpha/system.h @@ -55,9 +55,9 @@ */ struct el_common { unsigned int size; /* size in bytes of logout area */ - int sbz1 : 30; /* should be zero */ - int err2 : 1; /* second error */ - int retry : 1; /* retry flag */ + unsigned int sbz1 : 30; /* should be zero */ + unsigned int err2 : 1; /* second error */ + unsigned int retry : 1; /* retry flag */ unsigned int proc_offset; /* processor-specific offset */ unsigned int sys_offset; /* system-specific offset */ unsigned int code; /* machine check code */ diff --git a/include/asm-alpha/tlbflush.h b/include/asm-alpha/tlbflush.h index ba6beac99509..9d484c1fdc82 100644 --- a/include/asm-alpha/tlbflush.h +++ b/include/asm-alpha/tlbflush.h @@ -3,6 +3,7 @@ #include <linux/config.h> #include <linux/mm.h> +#include <asm/compiler.h> #ifndef __EXTERN_INLINE #define __EXTERN_INLINE extern inline diff --git a/include/asm-alpha/vga.h b/include/asm-alpha/vga.h index 0f3d10eaa903..8ca4f6b2da19 100644 --- a/include/asm-alpha/vga.h +++ b/include/asm-alpha/vga.h @@ -15,24 +15,24 @@ extern inline void scr_writew(u16 val, volatile u16 *addr) { - if (__is_ioaddr((unsigned long) addr)) - __raw_writew(val, (unsigned long) addr); + if (__is_ioaddr(addr)) + __raw_writew(val, (volatile u16 __iomem *) addr); else *addr = val; } extern inline u16 scr_readw(volatile const u16 *addr) { - if (__is_ioaddr((unsigned long) addr)) - return __raw_readw((unsigned long) addr); + if (__is_ioaddr(addr)) + return __raw_readw((volatile const u16 __iomem *) addr); else return *addr; } extern inline void scr_memsetw(u16 *s, u16 c, unsigned int count) { - if (__is_ioaddr((unsigned long) s)) - memsetw_io(s, c, count); + if (__is_ioaddr(s)) + memsetw_io((u16 __iomem *) s, c, count); else memsetw(s, c, count); } @@ -43,9 +43,9 @@ extern void scr_memcpyw(u16 *d, const u16 *s, unsigned int count); /* ??? These are currently only used for downloading character sets. As such, they don't need memory barriers. Is this all they are intended to be used for? */ -#define vga_readb readb -#define vga_writeb writeb +#define vga_readb(a) readb((u8 __iomem *)(a)) +#define vga_writeb(v,a) writeb(v, (u8 __iomem *)(a)) -#define VGA_MAP_MEM(x) ((unsigned long) ioremap((x), 0)) +#define VGA_MAP_MEM(x) ((unsigned long) ioremap(x, 0)) #endif |
