diff options
Diffstat (limited to 'arch/sparc/kernel/iommu.c')
| -rw-r--r-- | arch/sparc/kernel/iommu.c | 30 |
1 files changed, 19 insertions, 11 deletions
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index da0363692528..46ef88bc9c26 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c @@ -260,26 +260,35 @@ static void dma_4u_free_coherent(struct device *dev, size_t size, free_pages((unsigned long)cpu, order); } -static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t sz, - enum dma_data_direction direction, +static dma_addr_t dma_4u_map_phys(struct device *dev, phys_addr_t phys, + size_t sz, enum dma_data_direction direction, unsigned long attrs) { struct iommu *iommu; struct strbuf *strbuf; iopte_t *base; unsigned long flags, npages, oaddr; - unsigned long i, base_paddr, ctx; + unsigned long i, ctx; u32 bus_addr, ret; unsigned long iopte_protection; + if (unlikely(attrs & DMA_ATTR_MMIO)) + /* + * This check is included because older versions of the code + * lacked MMIO path support, and my ability to test this path + * is limited. However, from a software technical standpoint, + * there is no restriction, as the following code operates + * solely on physical addresses. + */ + goto bad_no_ctx; + iommu = dev->archdata.iommu; strbuf = dev->archdata.stc; if (unlikely(direction == DMA_NONE)) goto bad_no_ctx; - oaddr = (unsigned long)(page_address(page) + offset); + oaddr = (unsigned long)(phys_to_virt(phys)); npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); npages >>= IO_PAGE_SHIFT; @@ -296,7 +305,6 @@ static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page, bus_addr = (iommu->tbl.table_map_base + ((base - iommu->page_table) << IO_PAGE_SHIFT)); ret = bus_addr | (oaddr & ~IO_PAGE_MASK); - base_paddr = __pa(oaddr & IO_PAGE_MASK); if (strbuf->strbuf_enabled) iopte_protection = IOPTE_STREAMING(ctx); else @@ -304,8 +312,8 @@ static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page, if (direction != DMA_TO_DEVICE) iopte_protection |= IOPTE_WRITE; - for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE) - iopte_val(*base) = iopte_protection | base_paddr; + for (i = 0; i < npages; i++, base++, phys += IO_PAGE_SIZE) + iopte_val(*base) = iopte_protection | phys; return ret; @@ -383,7 +391,7 @@ do_flush_sync: vaddr, ctx, npages); } -static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, +static void dma_4u_unmap_phys(struct device *dev, dma_addr_t bus_addr, size_t sz, enum dma_data_direction direction, unsigned long attrs) { @@ -753,8 +761,8 @@ static int dma_4u_supported(struct device *dev, u64 device_mask) static const struct dma_map_ops sun4u_dma_ops = { .alloc = dma_4u_alloc_coherent, .free = dma_4u_free_coherent, - .map_page = dma_4u_map_page, - .unmap_page = dma_4u_unmap_page, + .map_phys = dma_4u_map_phys, + .unmap_phys = dma_4u_unmap_phys, .map_sg = dma_4u_map_sg, .unmap_sg = dma_4u_unmap_sg, .sync_single_for_cpu = dma_4u_sync_single_for_cpu, |
