summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/sparc/kernel/sparc_ksyms.c1
-rw-r--r--arch/sparc/mm/generic.c34
-rw-r--r--arch/sparc64/kernel/pci.c15
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c6
-rw-r--r--arch/sparc64/mm/generic.c50
-rw-r--r--include/asm-alpha/pgtable.h7
-rw-r--r--include/asm-arm/pgtable.h7
-rw-r--r--include/asm-arm26/pgtable.h7
-rw-r--r--include/asm-frv/pgtable.h7
-rw-r--r--include/asm-h8300/pgtable.h7
-rw-r--r--include/asm-i386/pgtable.h7
-rw-r--r--include/asm-ia64/pgtable.h7
-rw-r--r--include/asm-m32r/pgtable.h7
-rw-r--r--include/asm-m68k/pgtable.h7
-rw-r--r--include/asm-m68knommu/pgtable.h7
-rw-r--r--include/asm-mips/pgtable.h18
-rw-r--r--include/asm-parisc/pgtable.h7
-rw-r--r--include/asm-ppc/pgtable.h16
-rw-r--r--include/asm-ppc64/pgtable.h7
-rw-r--r--include/asm-sh/pgtable.h7
-rw-r--r--include/asm-sh64/pgtable.h8
-rw-r--r--include/asm-sparc/pgtable.h14
-rw-r--r--include/asm-sparc64/pgtable.h12
-rw-r--r--include/asm-x86_64/pgtable.h7
24 files changed, 251 insertions, 21 deletions
diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c
index a7df400e9d48..f91b0e8d0dc8 100644
--- a/arch/sparc/kernel/sparc_ksyms.c
+++ b/arch/sparc/kernel/sparc_ksyms.c
@@ -164,6 +164,7 @@ EXPORT_SYMBOL(get_auxio);
#endif
EXPORT_SYMBOL(request_fast_irq);
EXPORT_SYMBOL(io_remap_page_range);
+EXPORT_SYMBOL(io_remap_pfn_range);
/* P3: iounit_xxx may be needed, sun4d users */
/* EXPORT_SYMBOL(iounit_map_dma_init); */
/* EXPORT_SYMBOL(iounit_map_dma_page); */
diff --git a/arch/sparc/mm/generic.c b/arch/sparc/mm/generic.c
index 70d2acb3d1bc..db27eee3bda1 100644
--- a/arch/sparc/mm/generic.c
+++ b/arch/sparc/mm/generic.c
@@ -118,3 +118,37 @@ int io_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned
flush_tlb_range(vma, beg, end);
return error;
}
+
+int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
+ unsigned long pfn, unsigned long size, pgprot_t prot)
+{
+ int error = 0;
+ pgd_t * dir;
+ unsigned long beg = from;
+ unsigned long end = from + size;
+ struct mm_struct *mm = vma->vm_mm;
+ int space = GET_IOSPACE(pfn);
+ unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
+
+ prot = __pgprot(pg_iobits);
+ offset -= from;
+ dir = pgd_offset(mm, from);
+ flush_cache_range(vma, beg, end);
+
+ spin_lock(&mm->page_table_lock);
+ while (from < end) {
+ pmd_t *pmd = pmd_alloc(current->mm, dir, from);
+ error = -ENOMEM;
+ if (!pmd)
+ break;
+ error = io_remap_pmd_range(mm, pmd, from, end - from, offset + from, prot, space);
+ if (error)
+ break;
+ from = (from + PGDIR_SIZE) & PGDIR_MASK;
+ dir++;
+ }
+ spin_unlock(&mm->page_table_lock);
+
+ flush_tlb_range(vma, beg, end);
+ return error;
+}
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c
index 3f524d52d531..bba140d98b1b 100644
--- a/arch/sparc64/kernel/pci.c
+++ b/arch/sparc64/kernel/pci.c
@@ -18,6 +18,7 @@
#include <asm/uaccess.h>
#include <asm/pbm.h>
+#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/ebus.h>
#include <asm/isa.h>
@@ -734,12 +735,10 @@ static void __pci_mmap_set_flags(struct pci_dev *dev, struct vm_area_struct *vma
static void __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state)
{
- /* Our io_remap_page_range takes care of this, do nothing. */
+ /* Our io_remap_page_range/io_remap_pfn_range takes care of this,
+ do nothing. */
}
-extern int io_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long offset,
- unsigned long size, pgprot_t prot, int space);
-
/* Perform the actual remap of the pages for a PCI device mapping, as appropriate
* for this architecture. The region in the process to map is described by vm_start
* and vm_end members of VMA, the base physical address is found in vm_pgoff.
@@ -761,10 +760,10 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
__pci_mmap_set_flags(dev, vma, mmap_state);
__pci_mmap_set_pgprot(dev, vma, mmap_state);
- ret = io_remap_page_range(vma, vma->vm_start,
- (vma->vm_pgoff << PAGE_SHIFT |
- (write_combine ? 0x1UL : 0x0UL)),
- vma->vm_end - vma->vm_start, vma->vm_page_prot, 0);
+ ret = io_remap_pfn_range(vma, vma->vm_start,
+ vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
if (ret)
return ret;
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index 686e45899bfc..3cec1ebb083b 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -87,7 +87,10 @@ extern int svr4_setcontext(svr4_ucontext_t *uc, struct pt_regs *regs);
extern int compat_sys_ioctl(unsigned int fd, unsigned int cmd, u32 arg);
extern int (*handle_mathemu)(struct pt_regs *, struct fpustate *);
extern long sparc32_open(const char __user * filename, int flags, int mode);
-extern int io_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long offset, unsigned long size, pgprot_t prot, int space);
+extern int io_remap_page_range(struct vm_area_struct *vma, unsigned long from,
+ unsigned long offset, unsigned long size, pgprot_t prot, int space);
+extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
+ unsigned long pfn, unsigned long size, pgprot_t prot);
extern void (*prom_palette)(int);
extern int __ashrdi3(int, int);
@@ -254,6 +257,7 @@ EXPORT_SYMBOL(pci_dma_supported);
/* I/O device mmaping on Sparc64. */
EXPORT_SYMBOL(io_remap_page_range);
+EXPORT_SYMBOL(io_remap_pfn_range);
/* Solaris/SunOS binary compatibility */
EXPORT_SYMBOL(_sigpause_common);
diff --git a/arch/sparc64/mm/generic.c b/arch/sparc64/mm/generic.c
index e379f744be0a..6b31f6117a95 100644
--- a/arch/sparc64/mm/generic.c
+++ b/arch/sparc64/mm/generic.c
@@ -20,10 +20,6 @@
*
* They use a pgprot that sets PAGE_IO and does not check the
* mem_map table as this is independent of normal memory.
- *
- * As a special hack if the lowest bit of offset is set the
- * side-effect bit will be turned off. This is used as a
- * performance improvement on FFB/AFB. -DaveM
*/
static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte,
unsigned long address,
@@ -33,6 +29,8 @@ static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte,
{
unsigned long end;
+ /* clear hack bit that was used as a write_combine side-effect flag */
+ offset &= ~0x1UL;
address &= ~PMD_MASK;
end = address + size;
if (end > PMD_SIZE)
@@ -41,22 +39,22 @@ static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte,
pte_t entry;
unsigned long curend = address + PAGE_SIZE;
- entry = mk_pte_io((offset & ~(0x1UL)), prot, space);
+ entry = mk_pte_io(offset, prot, space);
if (!(address & 0xffff)) {
if (!(address & 0x3fffff) && !(offset & 0x3ffffe) && end >= address + 0x400000) {
- entry = mk_pte_io((offset & ~(0x1UL)),
+ entry = mk_pte_io(offset,
__pgprot(pgprot_val (prot) | _PAGE_SZ4MB),
space);
curend = address + 0x400000;
offset += 0x400000;
} else if (!(address & 0x7ffff) && !(offset & 0x7fffe) && end >= address + 0x80000) {
- entry = mk_pte_io((offset & ~(0x1UL)),
+ entry = mk_pte_io(offset,
__pgprot(pgprot_val (prot) | _PAGE_SZ512K),
space);
curend = address + 0x80000;
offset += 0x80000;
} else if (!(offset & 0xfffe) && end >= address + 0x10000) {
- entry = mk_pte_io((offset & ~(0x1UL)),
+ entry = mk_pte_io(offset,
__pgprot(pgprot_val (prot) | _PAGE_SZ64K),
space);
curend = address + 0x10000;
@@ -66,8 +64,6 @@ static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte,
} else
offset += PAGE_SIZE;
- if (offset & 0x1UL)
- pte_val(entry) &= ~(_PAGE_E);
do {
BUG_ON(!pte_none(*pte));
set_pte_at(mm, address, pte, entry);
@@ -150,3 +146,37 @@ int io_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned
return error;
}
+
+int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
+ unsigned long pfn, unsigned long size, pgprot_t prot)
+{
+ int error = 0;
+ pgd_t * dir;
+ unsigned long beg = from;
+ unsigned long end = from + size;
+ struct mm_struct *mm = vma->vm_mm;
+ int space = GET_IOSPACE(pfn);
+ unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
+
+ prot = __pgprot(pg_iobits);
+ offset -= from;
+ dir = pgd_offset(mm, from);
+ flush_cache_range(vma, beg, end);
+
+ spin_lock(&mm->page_table_lock);
+ while (from < end) {
+ pud_t *pud = pud_alloc(current->mm, dir, from);
+ error = -ENOMEM;
+ if (!pud)
+ break;
+ error = io_remap_pud_range(mm, pud, from, end - from, offset + from, prot, space);
+ if (error)
+ break;
+ from = (from + PGDIR_SIZE) & PGDIR_MASK;
+ dir++;
+ }
+ flush_tlb_range(vma, beg, end);
+ spin_unlock(&mm->page_table_lock);
+
+ return error;
+}
diff --git a/include/asm-alpha/pgtable.h b/include/asm-alpha/pgtable.h
index d27970940f03..faae196d8377 100644
--- a/include/asm-alpha/pgtable.h
+++ b/include/asm-alpha/pgtable.h
@@ -340,6 +340,13 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
remap_pfn_range(vma, start, pfn, size, prot); \
})
+#define io_remap_pfn_range(vma, start, pfn, size, prot) \
+ remap_pfn_range(vma, start, pfn, size, prot)
+
+#define MK_IOSPACE_PFN(space, pfn) (pfn)
+#define GET_IOSPACE(pfn) 0
+#define GET_PFN(pfn) (pfn)
+
#define pte_ERROR(e) \
printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
#define pmd_ERROR(e) \
diff --git a/include/asm-arm/pgtable.h b/include/asm-arm/pgtable.h
index c2455a7a3287..91ffb1f4cd10 100644
--- a/include/asm-arm/pgtable.h
+++ b/include/asm-arm/pgtable.h
@@ -419,6 +419,13 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
#define io_remap_page_range(vma,from,phys,size,prot) \
remap_pfn_range(vma, from, (phys) >> PAGE_SHIFT, size, prot)
+#define io_remap_pfn_range(vma,from,pfn,size,prot) \
+ remap_pfn_range(vma, from, pfn, size, prot)
+
+#define MK_IOSPACE_PFN(space, pfn) (pfn)
+#define GET_IOSPACE(pfn) 0
+#define GET_PFN(pfn) (pfn)
+
#define pgtable_cache_init() do { } while (0)
#endif /* !__ASSEMBLY__ */
diff --git a/include/asm-arm26/pgtable.h b/include/asm-arm26/pgtable.h
index 95d16abeb189..af0b8907dc14 100644
--- a/include/asm-arm26/pgtable.h
+++ b/include/asm-arm26/pgtable.h
@@ -293,6 +293,13 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
#define io_remap_page_range(vma,from,phys,size,prot) \
remap_pfn_range(vma, from, (phys) >> PAGE_SHIFT, size, prot)
+#define io_remap_pfn_range(vma,from,pfn,size,prot) \
+ remap_pfn_range(vma, from, pfn, size, prot)
+
+#define MK_IOSPACE_PFN(space, pfn) (pfn)
+#define GET_IOSPACE(pfn) 0
+#define GET_PFN(pfn) (pfn)
+
#endif /* !__ASSEMBLY__ */
#endif /* _ASMARM_PGTABLE_H */
diff --git a/include/asm-frv/pgtable.h b/include/asm-frv/pgtable.h
index 9d44b7e4a210..cc1373c4b790 100644
--- a/include/asm-frv/pgtable.h
+++ b/include/asm-frv/pgtable.h
@@ -503,6 +503,13 @@ static inline int pte_file(pte_t pte)
#define io_remap_page_range(vma, vaddr, paddr, size, prot) \
remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
+#define MK_IOSPACE_PFN(space, pfn) (pfn)
+#define GET_IOSPACE(pfn) 0
+#define GET_PFN(pfn) (pfn)
+
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
diff --git a/include/asm-h8300/pgtable.h b/include/asm-h8300/pgtable.h
index 1fd6e7191d00..69076eb31476 100644
--- a/include/asm-h8300/pgtable.h
+++ b/include/asm-h8300/pgtable.h
@@ -55,6 +55,13 @@ extern int is_in_rom(unsigned long);
#define io_remap_page_range(vma, vaddr, paddr, size, prot) \
remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
+#define MK_IOSPACE_PFN(space, pfn) (pfn)
+#define GET_IOSPACE(pfn) 0
+#define GET_PFN(pfn) (pfn)
+
/*
* All 32bit addresses are effectively valid for vmalloc...
* Sort of meaningless for non-VM targets.
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index da4f252b7b84..488c2b4befa5 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -405,6 +405,13 @@ extern void noexec_setup(const char *str);
#define io_remap_page_range(vma, vaddr, paddr, size, prot) \
remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
+#define MK_IOSPACE_PFN(space, pfn) (pfn)
+#define GET_IOSPACE(pfn) 0
+#define GET_PFN(pfn) (pfn)
+
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h
index 385b5d4da925..880d1e7de50e 100644
--- a/include/asm-ia64/pgtable.h
+++ b/include/asm-ia64/pgtable.h
@@ -447,6 +447,13 @@ extern void paging_init (void);
#define io_remap_page_range(vma, vaddr, paddr, size, prot) \
remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
+#define MK_IOSPACE_PFN(space, pfn) (pfn)
+#define GET_IOSPACE(pfn) 0
+#define GET_PFN(pfn) (pfn)
+
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
diff --git a/include/asm-m32r/pgtable.h b/include/asm-m32r/pgtable.h
index 1710d91d4e56..70a0eb68fdf6 100644
--- a/include/asm-m32r/pgtable.h
+++ b/include/asm-m32r/pgtable.h
@@ -381,6 +381,13 @@ static inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
#define io_remap_page_range(vma, vaddr, paddr, size, prot) \
remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
+#define MK_IOSPACE_PFN(space, pfn) (pfn)
+#define GET_IOSPACE(pfn) 0
+#define GET_PFN(pfn) (pfn)
+
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
diff --git a/include/asm-m68k/pgtable.h b/include/asm-m68k/pgtable.h
index 186585d45d8c..0eef32778df2 100644
--- a/include/asm-m68k/pgtable.h
+++ b/include/asm-m68k/pgtable.h
@@ -144,6 +144,13 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
#define io_remap_page_range(vma, vaddr, paddr, size, prot) \
remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
+#define MK_IOSPACE_PFN(space, pfn) (pfn)
+#define GET_IOSPACE(pfn) 0
+#define GET_PFN(pfn) (pfn)
+
/* MMU-specific headers */
#ifdef CONFIG_SUN3
diff --git a/include/asm-m68knommu/pgtable.h b/include/asm-m68knommu/pgtable.h
index 48980c21b67b..e2a69fffa370 100644
--- a/include/asm-m68knommu/pgtable.h
+++ b/include/asm-m68knommu/pgtable.h
@@ -59,6 +59,13 @@ extern int is_in_rom(unsigned long);
#define io_remap_page_range(vma, vaddr, paddr, size, prot) \
remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
+#define MK_IOSPACE_PFN(space, pfn) (pfn)
+#define GET_IOSPACE(pfn) 0
+#define GET_PFN(pfn) (pfn)
+
/*
* All 32bit addresses are effectively valid for vmalloc...
* Sort of meaningless for non-VM targets.
diff --git a/include/asm-mips/pgtable.h b/include/asm-mips/pgtable.h
index 61983c3fc9c3..878843203d67 100644
--- a/include/asm-mips/pgtable.h
+++ b/include/asm-mips/pgtable.h
@@ -367,11 +367,27 @@ static inline int io_remap_page_range(struct vm_area_struct *vma,
phys_t phys_addr_high = fixup_bigphys_addr(paddr, size);
return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot);
}
+
+static inline int io_remap_pfn_range(struct vm_area_struct *vma,
+ unsigned long vaddr,
+ unsigned long pfn,
+ unsigned long size,
+ pgprot_t prot)
+{
+ phys_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
+ return remap_pfn_range(vma, vaddr, pfn, size, prot);
+}
#else
#define io_remap_page_range(vma, vaddr, paddr, size, prot) \
- remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
+ remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
#endif
+#define MK_IOSPACE_PFN(space, pfn) (pfn)
+#define GET_IOSPACE(pfn) 0
+#define GET_PFN(pfn) (pfn)
+
#include <asm-generic/pgtable.h>
/*
diff --git a/include/asm-parisc/pgtable.h b/include/asm-parisc/pgtable.h
index 149c17aea2fe..af353a9bce21 100644
--- a/include/asm-parisc/pgtable.h
+++ b/include/asm-parisc/pgtable.h
@@ -501,6 +501,13 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
#define io_remap_page_range(vma, vaddr, paddr, size, prot) \
remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
+#define MK_IOSPACE_PFN(space, pfn) (pfn)
+#define GET_IOSPACE(pfn) 0
+#define GET_PFN(pfn) (pfn)
+
/* We provide our own get_unmapped_area to provide cache coherency */
#define HAVE_ARCH_UNMAPPED_AREA
diff --git a/include/asm-ppc/pgtable.h b/include/asm-ppc/pgtable.h
index f83f0a1b2f57..19dfb7abaa21 100644
--- a/include/asm-ppc/pgtable.h
+++ b/include/asm-ppc/pgtable.h
@@ -740,11 +740,27 @@ static inline int io_remap_page_range(struct vm_area_struct *vma,
phys_addr_t paddr64 = fixup_bigphys_addr(paddr, size);
return remap_pfn_range(vma, vaddr, paddr64 >> PAGE_SHIFT, size, prot);
}
+
+static inline int io_remap_pfn_range(struct vm_area_struct *vma,
+ unsigned long vaddr,
+ unsigned long pfn,
+ unsigned long size,
+ pgprot_t prot)
+{
+ phys_addr_t paddr64 = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
+ return remap_pfn_range(vma, vaddr, paddr64 >> PAGE_SHIFT, size, prot);
+}
#else
#define io_remap_page_range(vma, vaddr, paddr, size, prot) \
remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
#endif
+#define MK_IOSPACE_PFN(space, pfn) (pfn)
+#define GET_IOSPACE(pfn) 0
+#define GET_PFN(pfn) (pfn)
+
/*
* No page table caches to initialise
*/
diff --git a/include/asm-ppc64/pgtable.h b/include/asm-ppc64/pgtable.h
index 9d0c8e98642b..8a79ece01aa5 100644
--- a/include/asm-ppc64/pgtable.h
+++ b/include/asm-ppc64/pgtable.h
@@ -532,6 +532,13 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
#define io_remap_page_range(vma, vaddr, paddr, size, prot) \
remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
+#define MK_IOSPACE_PFN(space, pfn) (pfn)
+#define GET_IOSPACE(pfn) 0
+#define GET_PFN(pfn) (pfn)
+
void pgtable_cache_init(void);
extern void hpte_init_native(void);
diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h
index e3592103f4d1..8a9689d5366f 100644
--- a/include/asm-sh/pgtable.h
+++ b/include/asm-sh/pgtable.h
@@ -279,6 +279,13 @@ typedef pte_t *pte_addr_t;
#define io_remap_page_range(vma, vaddr, paddr, size, prot) \
remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
+#define MK_IOSPACE_PFN(space, pfn) (pfn)
+#define GET_IOSPACE(pfn) 0
+#define GET_PFN(pfn) (pfn)
+
/*
* No page table caches to initialise
*/
diff --git a/include/asm-sh64/pgtable.h b/include/asm-sh64/pgtable.h
index 8157a2a727cc..45f70c0f4a5e 100644
--- a/include/asm-sh64/pgtable.h
+++ b/include/asm-sh64/pgtable.h
@@ -482,6 +482,14 @@ extern void update_mmu_cache(struct vm_area_struct * vma,
#define io_remap_page_range(vma, vaddr, paddr, size, prot) \
remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
+
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
+#define MK_IOSPACE_PFN(space, pfn) (pfn)
+#define GET_IOSPACE(pfn) 0
+#define GET_PFN(pfn) (pfn)
+
#endif /* !__ASSEMBLY__ */
/*
diff --git a/include/asm-sparc/pgtable.h b/include/asm-sparc/pgtable.h
index f45b1801be98..3d2418c28ff5 100644
--- a/include/asm-sparc/pgtable.h
+++ b/include/asm-sparc/pgtable.h
@@ -454,8 +454,20 @@ extern unsigned long *sparc_valid_addr_bitmap;
#define kern_addr_valid(addr) \
(test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
-extern int io_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long to,
+extern int io_remap_page_range(struct vm_area_struct *vma,
+ unsigned long from, unsigned long to,
unsigned long size, pgprot_t prot, int space);
+extern int io_remap_pfn_range(struct vm_area_struct *vma,
+ unsigned long from, unsigned long pfn,
+ unsigned long size, pgprot_t prot);
+
+/*
+ * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
+ * its high 4 bits. These macros/functions put it there or get it from there.
+ */
+#define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4)))
+#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
+#define GET_PFN(pfn) (pfn & 0x0fffffffUL)
#include <asm-generic/pgtable.h>
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index 886dd0c7c454..dfb8a8886318 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -16,6 +16,7 @@
#include <linux/config.h>
#include <linux/compiler.h>
+#include <asm/types.h>
#include <asm/spitfire.h>
#include <asm/asi.h>
#include <asm/system.h>
@@ -431,6 +432,17 @@ extern unsigned long *sparc64_valid_addr_bitmap;
extern int io_remap_page_range(struct vm_area_struct *vma, unsigned long from,
unsigned long offset,
unsigned long size, pgprot_t prot, int space);
+extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
+ unsigned long pfn,
+ unsigned long size, pgprot_t prot);
+
+/*
+ * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
+ * its high 4 bits. These macros/functions put it there or get it from there.
+ */
+#define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4)))
+#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
+#define GET_PFN(pfn) (pfn & 0x0fffffffffffffffUL)
/* Override for {pgd,pmd}_addr_end() to deal with the virtual address
* space hole. We simply sign extend bit 43.
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index c0af27a62352..dc6b6f2604e8 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -407,6 +407,13 @@ extern int kern_addr_valid(unsigned long addr);
#define io_remap_page_range(vma, vaddr, paddr, size, prot) \
remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
+#define MK_IOSPACE_PFN(space, pfn) (pfn)
+#define GET_IOSPACE(pfn) 0
+#define GET_PFN(pfn) (pfn)
+
#define HAVE_ARCH_UNMAPPED_AREA
#define pgtable_cache_init() do { } while (0)