summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorRichard Henderson <rth@kanga.twiddle.home>2004-09-21 01:01:24 -0700
committerRichard Henderson <rth@kanga.twiddle.home>2004-09-21 01:01:24 -0700
commit6b60f95b61002ffd03e35e8e4b88ff1b32eceb88 (patch)
tree01dd9e5211dbc8718d177095f1012dc6dd55d740 /arch
parentd34bd869e62a611ec4f4cf523adbbd1f72761532 (diff)
[ALPHA] Update readb and friends for __iomem.
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/kernel/core_cia.c4
-rw-r--r--arch/alpha/kernel/core_irongate.c20
-rw-r--r--arch/alpha/lib/io.c87
3 files changed, 59 insertions, 52 deletions
diff --git a/arch/alpha/kernel/core_cia.c b/arch/alpha/kernel/core_cia.c
index f5245ab2f7fe..fd563064363c 100644
--- a/arch/alpha/kernel/core_cia.c
+++ b/arch/alpha/kernel/core_cia.c
@@ -282,7 +282,7 @@ void
cia_pci_tbi_try2(struct pci_controller *hose,
dma_addr_t start, dma_addr_t end)
{
- unsigned long bus_addr;
+ void __iomem *bus_addr;
int ctrl;
/* Put the chip into PCI loopback mode. */
@@ -351,7 +351,7 @@ verify_tb_operation(void)
struct pci_iommu_arena *arena = pci_isa_hose->sg_isa;
int ctrl, addr0, tag0, pte0, data0;
int temp, use_tbia_try2 = 0;
- unsigned long bus_addr;
+ void __iomem *bus_addr;
/* pyxis -- tbia is broken */
if (pci_isa_hose->dense_io_base)
diff --git a/arch/alpha/kernel/core_irongate.c b/arch/alpha/kernel/core_irongate.c
index 50b7dd3d59c1..138d497d1cca 100644
--- a/arch/alpha/kernel/core_irongate.c
+++ b/arch/alpha/kernel/core_irongate.c
@@ -310,7 +310,7 @@ irongate_init_arch(void)
#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
#define GET_GATT(addr) (gatt_pages[GET_PAGE_DIR_IDX(addr)])
-unsigned long
+void __iomem *
irongate_ioremap(unsigned long addr, unsigned long size)
{
struct vm_struct *area;
@@ -320,7 +320,7 @@ irongate_ioremap(unsigned long addr, unsigned long size)
unsigned long gart_bus_addr;
if (!alpha_agpgart_size)
- return addr + IRONGATE_MEM;
+ return (void __iomem *)(addr + IRONGATE_MEM);
gart_bus_addr = (unsigned long)IRONGATE0->bar0 &
PCI_BASE_ADDRESS_MEM_MASK;
@@ -339,7 +339,7 @@ irongate_ioremap(unsigned long addr, unsigned long size)
/*
* Not found - assume legacy ioremap
*/
- return addr + IRONGATE_MEM;
+ return (void __iomem *)(addr + IRONGATE_MEM);
} while(0);
mmio_regs = (u32 *)(((unsigned long)IRONGATE0->bar1 &
@@ -353,7 +353,7 @@ irongate_ioremap(unsigned long addr, unsigned long size)
if (addr & ~PAGE_MASK) {
printk("AGP ioremap failed... addr not page aligned (0x%lx)\n",
addr);
- return addr + IRONGATE_MEM;
+ return (void __iomem *)(addr + IRONGATE_MEM);
}
last = addr + size - 1;
size = PAGE_ALIGN(last) - addr;
@@ -378,7 +378,7 @@ irongate_ioremap(unsigned long addr, unsigned long size)
* Map it
*/
area = get_vm_area(size, VM_IOREMAP);
- if (!area) return (unsigned long)NULL;
+ if (!area) return NULL;
for(baddr = addr, vaddr = (unsigned long)area->addr;
baddr <= last;
@@ -391,7 +391,7 @@ irongate_ioremap(unsigned long addr, unsigned long size)
pte, PAGE_SIZE, 0)) {
printk("AGP ioremap: FAILED to map...\n");
vfree(area->addr);
- return (unsigned long)NULL;
+ return NULL;
}
}
@@ -402,13 +402,15 @@ irongate_ioremap(unsigned long addr, unsigned long size)
printk("irongate_ioremap(0x%lx, 0x%lx) returning 0x%lx\n",
addr, size, vaddr);
#endif
- return vaddr;
+ return (void __iomem *)vaddr;
}
void
-irongate_iounmap(unsigned long addr)
+irongate_iounmap(volatile void __iomem *xaddr)
{
+ unsigned long addr = (unsigned long) xaddr;
if (((long)addr >> 41) == -2)
return; /* kseg map, nothing to do */
- if (addr) return vfree((void *)(PAGE_MASK & addr));
+ if (addr)
+ return vfree((void *)(PAGE_MASK & addr));
}
diff --git a/arch/alpha/lib/io.c b/arch/alpha/lib/io.c
index dedc5186916f..9b77aec36eab 100644
--- a/arch/alpha/lib/io.c
+++ b/arch/alpha/lib/io.c
@@ -40,93 +40,93 @@ void _outl(u32 b, unsigned long addr)
__outl(b, addr);
}
-u8 ___raw_readb(unsigned long addr)
+u8 ___raw_readb(const volatile void __iomem *addr)
{
return __readb(addr);
}
-u16 ___raw_readw(unsigned long addr)
+u16 ___raw_readw(const volatile void __iomem *addr)
{
return __readw(addr);
}
-u32 ___raw_readl(unsigned long addr)
+u32 ___raw_readl(const volatile void __iomem *addr)
{
return __readl(addr);
}
-u64 ___raw_readq(unsigned long addr)
+u64 ___raw_readq(const volatile void __iomem *addr)
{
return __readq(addr);
}
-u8 _readb(unsigned long addr)
+u8 _readb(const volatile void __iomem *addr)
{
unsigned long r = __readb(addr);
mb();
return r;
}
-u16 _readw(unsigned long addr)
+u16 _readw(const volatile void __iomem *addr)
{
unsigned long r = __readw(addr);
mb();
return r;
}
-u32 _readl(unsigned long addr)
+u32 _readl(const volatile void __iomem *addr)
{
unsigned long r = __readl(addr);
mb();
return r;
}
-u64 _readq(unsigned long addr)
+u64 _readq(const volatile void __iomem *addr)
{
unsigned long r = __readq(addr);
mb();
return r;
}
-void ___raw_writeb(u8 b, unsigned long addr)
+void ___raw_writeb(u8 b, volatile void __iomem *addr)
{
__writeb(b, addr);
}
-void ___raw_writew(u16 b, unsigned long addr)
+void ___raw_writew(u16 b, volatile void __iomem *addr)
{
__writew(b, addr);
}
-void ___raw_writel(u32 b, unsigned long addr)
+void ___raw_writel(u32 b, volatile void __iomem *addr)
{
__writel(b, addr);
}
-void ___raw_writeq(u64 b, unsigned long addr)
+void ___raw_writeq(u64 b, volatile void __iomem *addr)
{
__writeq(b, addr);
}
-void _writeb(u8 b, unsigned long addr)
+void _writeb(u8 b, volatile void __iomem *addr)
{
__writeb(b, addr);
mb();
}
-void _writew(u16 b, unsigned long addr)
+void _writew(u16 b, volatile void __iomem *addr)
{
__writew(b, addr);
mb();
}
-void _writel(u32 b, unsigned long addr)
+void _writel(u32 b, volatile void __iomem *addr)
{
__writel(b, addr);
mb();
}
-void _writeq(u64 b, unsigned long addr)
+void _writeq(u64 b, volatile void __iomem *addr)
{
__writeq(b, addr);
mb();
@@ -411,12 +411,12 @@ void outsl (unsigned long port, const void *src, unsigned long count)
* Copy data from IO memory space to "real" memory space.
* This needs to be optimized.
*/
-void _memcpy_fromio(void * to, unsigned long from, long count)
+void _memcpy_fromio(void * to, const volatile void __iomem *from, long count)
{
/* Optimize co-aligned transfers. Everything else gets handled
a byte at a time. */
- if (count >= 8 && ((unsigned long)to & 7) == (from & 7)) {
+ if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) {
count -= 8;
do {
*(u64 *)to = __raw_readq(from);
@@ -427,7 +427,7 @@ void _memcpy_fromio(void * to, unsigned long from, long count)
count += 8;
}
- if (count >= 4 && ((unsigned long)to & 3) == (from & 3)) {
+ if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) {
count -= 4;
do {
*(u32 *)to = __raw_readl(from);
@@ -438,7 +438,7 @@ void _memcpy_fromio(void * to, unsigned long from, long count)
count += 4;
}
- if (count >= 2 && ((unsigned long)to & 1) == (from & 1)) {
+ if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) {
count -= 2;
do {
*(u16 *)to = __raw_readw(from);
@@ -455,19 +455,20 @@ void _memcpy_fromio(void * to, unsigned long from, long count)
to++;
from++;
}
+ mb();
}
/*
* Copy data from "real" memory space to IO memory space.
* This needs to be optimized.
*/
-void _memcpy_toio(unsigned long to, const void * from, long count)
+void _memcpy_toio(volatile void __iomem *to, const void * from, long count)
{
/* Optimize co-aligned transfers. Everything else gets handled
a byte at a time. */
/* FIXME -- align FROM. */
- if (count >= 8 && (to & 7) == ((unsigned long)from & 7)) {
+ if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) {
count -= 8;
do {
__raw_writeq(*(const u64 *)from, to);
@@ -478,7 +479,7 @@ void _memcpy_toio(unsigned long to, const void * from, long count)
count += 8;
}
- if (count >= 4 && (to & 3) == ((unsigned long)from & 3)) {
+ if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) {
count -= 4;
do {
__raw_writel(*(const u32 *)from, to);
@@ -489,7 +490,7 @@ void _memcpy_toio(unsigned long to, const void * from, long count)
count += 4;
}
- if (count >= 2 && (to & 1) == ((unsigned long)from & 1)) {
+ if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) {
count -= 2;
do {
__raw_writew(*(const u16 *)from, to);
@@ -512,24 +513,24 @@ void _memcpy_toio(unsigned long to, const void * from, long count)
/*
* "memset" on IO memory space.
*/
-void _memset_c_io(unsigned long to, unsigned long c, long count)
+void _memset_c_io(volatile void __iomem *to, unsigned long c, long count)
{
/* Handle any initial odd byte */
- if (count > 0 && (to & 1)) {
+ if (count > 0 && ((u64)to & 1)) {
__raw_writeb(c, to);
to++;
count--;
}
/* Handle any initial odd halfword */
- if (count >= 2 && (to & 2)) {
+ if (count >= 2 && ((u64)to & 2)) {
__raw_writew(c, to);
to += 2;
count -= 2;
}
/* Handle any initial odd word */
- if (count >= 4 && (to & 4)) {
+ if (count >= 4 && ((u64)to & 4)) {
__raw_writel(c, to);
to += 4;
count -= 4;
@@ -571,24 +572,28 @@ void _memset_c_io(unsigned long to, unsigned long c, long count)
void
scr_memcpyw(u16 *d, const u16 *s, unsigned int count)
{
- if (! __is_ioaddr((unsigned long) s)) {
- /* Source is memory. */
- if (! __is_ioaddr((unsigned long) d))
- memcpy(d, s, count);
- else
- memcpy_toio(d, s, count);
- } else {
- /* Source is screen. */
- if (! __is_ioaddr((unsigned long) d))
- memcpy_fromio(d, s, count);
- else {
+ const u16 __iomem *ios = (const u16 __iomem *) s;
+ u16 __iomem *iod = (u16 __iomem *) d;
+ int s_isio = __is_ioaddr(s);
+ int d_isio = __is_ioaddr(d);
+
+ if (s_isio) {
+ if (d_isio) {
/* FIXME: Should handle unaligned ops and
operation widening. */
+
count /= 2;
while (count--) {
- u16 tmp = __raw_readw((unsigned long)(s++));
- __raw_writew(tmp, (unsigned long)(d++));
+ u16 tmp = __raw_readw(ios++);
+ __raw_writew(tmp, iod++);
}
}
+ else
+ memcpy_fromio(d, ios, count);
+ } else {
+ if (d_isio)
+ memcpy_toio(iod, s, count);
+ else
+ memcpy(d, s, count);
}
}