diff options
Diffstat (limited to 'arch/arm')
| -rw-r--r-- | arch/arm/Kconfig | 2 | ||||
| -rw-r--r-- | arch/arm/common/sa1111.c | 2 | ||||
| -rw-r--r-- | arch/arm/configs/aspeed_g4_defconfig | 1 | ||||
| -rw-r--r-- | arch/arm/configs/aspeed_g5_defconfig | 3 | ||||
| -rw-r--r-- | arch/arm/configs/hisi_defconfig | 1 | ||||
| -rw-r--r-- | arch/arm/configs/lpc18xx_defconfig | 1 | ||||
| -rw-r--r-- | arch/arm/configs/shmobile_defconfig | 1 | ||||
| -rw-r--r-- | arch/arm/include/asm/hardware/sa1111.h | 2 | ||||
| -rw-r--r-- | arch/arm/mm/dma-mapping.c | 180 |
9 files changed, 56 insertions, 137 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 4fb985b76e97..ff61891abe53 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1161,8 +1161,6 @@ config AEABI disambiguate both ABIs and allow for backward compatibility support (selected with CONFIG_OABI_COMPAT). - To use this you need GCC version 4.0.0 or later. - config OABI_COMPAT bool "Allow old ABI binaries to run with this kernel (EXPERIMENTAL)" depends on AEABI && !THUMB2_KERNEL diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c index 3389a70e4d49..04ff75dcc20e 100644 --- a/arch/arm/common/sa1111.c +++ b/arch/arm/common/sa1111.c @@ -1371,7 +1371,7 @@ static void sa1111_bus_remove(struct device *dev) drv->remove(sadev); } -struct bus_type sa1111_bus_type = { +const struct bus_type sa1111_bus_type = { .name = "sa1111-rab", .match = sa1111_match, .probe = sa1111_bus_probe, diff --git a/arch/arm/configs/aspeed_g4_defconfig b/arch/arm/configs/aspeed_g4_defconfig index 28b724d59e7e..45d8738abb75 100644 --- a/arch/arm/configs/aspeed_g4_defconfig +++ b/arch/arm/configs/aspeed_g4_defconfig @@ -117,7 +117,6 @@ CONFIG_KEYBOARD_GPIO_POLLED=y # CONFIG_VT is not set # CONFIG_LEGACY_PTYS is not set CONFIG_SERIAL_8250=y -# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=6 CONFIG_SERIAL_8250_RUNTIME_UARTS=6 diff --git a/arch/arm/configs/aspeed_g5_defconfig b/arch/arm/configs/aspeed_g5_defconfig index 61cee1e7ebea..2e6ea13c1e9b 100644 --- a/arch/arm/configs/aspeed_g5_defconfig +++ b/arch/arm/configs/aspeed_g5_defconfig @@ -138,7 +138,6 @@ CONFIG_SERIO_RAW=y # CONFIG_VT is not set # CONFIG_LEGACY_PTYS is not set CONFIG_SERIAL_8250=y -# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=6 CONFIG_SERIAL_8250_RUNTIME_UARTS=6 @@ -308,7 +307,7 @@ CONFIG_PANIC_ON_OOPS=y CONFIG_PANIC_TIMEOUT=-1 CONFIG_SOFTLOCKUP_DETECTOR=y CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y -CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y +CONFIG_BOOTPARAM_HUNG_TASK_PANIC=1 CONFIG_WQ_WATCHDOG=y # CONFIG_SCHED_DEBUG is not set CONFIG_FUNCTION_TRACER=y diff --git a/arch/arm/configs/hisi_defconfig b/arch/arm/configs/hisi_defconfig index e19c1039fb93..384aade1a48b 100644 --- a/arch/arm/configs/hisi_defconfig +++ b/arch/arm/configs/hisi_defconfig @@ -35,7 +35,6 @@ CONFIG_NETDEVICES=y CONFIG_HIX5HD2_GMAC=y CONFIG_HIP04_ETH=y CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=2 CONFIG_SERIAL_8250_RUNTIME_UARTS=2 diff --git a/arch/arm/configs/lpc18xx_defconfig b/arch/arm/configs/lpc18xx_defconfig index 2d489186e945..f142a6637ede 100644 --- a/arch/arm/configs/lpc18xx_defconfig +++ b/arch/arm/configs/lpc18xx_defconfig @@ -90,7 +90,6 @@ CONFIG_KEYBOARD_GPIO_POLLED=y # CONFIG_UNIX98_PTYS is not set # CONFIG_LEGACY_PTYS is not set CONFIG_SERIAL_8250=y -# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_OF_PLATFORM=y CONFIG_SERIAL_NONSTANDARD=y diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig index c1fd469e2071..0085921833c3 100644 --- a/arch/arm/configs/shmobile_defconfig +++ b/arch/arm/configs/shmobile_defconfig @@ -75,7 +75,6 @@ CONFIG_INPUT_DA9063_ONKEY=y CONFIG_INPUT_ADXL34X=y # CONFIG_LEGACY_PTYS is not set CONFIG_SERIAL_8250=y -# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set # CONFIG_SERIAL_8250_16550A_VARIANTS is not set CONFIG_SERIAL_8250_CONSOLE=y # CONFIG_SERIAL_8250_PCI is not set diff --git a/arch/arm/include/asm/hardware/sa1111.h b/arch/arm/include/asm/hardware/sa1111.h index a815f39b4243..90b6a832108d 100644 --- a/arch/arm/include/asm/hardware/sa1111.h +++ b/arch/arm/include/asm/hardware/sa1111.h @@ -368,7 +368,7 @@ -extern struct bus_type sa1111_bus_type; +extern const struct bus_type sa1111_bus_type; #define SA1111_DEVID_SBI (1 << 0) #define SA1111_DEVID_SK (1 << 1) diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 08641a936394..a4c765d24692 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -624,16 +624,14 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, kfree(buf); } -static void dma_cache_maint_page(struct page *page, unsigned long offset, - size_t size, enum dma_data_direction dir, +static void dma_cache_maint_page(phys_addr_t phys, size_t size, + enum dma_data_direction dir, void (*op)(const void *, size_t, int)) { - unsigned long pfn; + unsigned long offset = offset_in_page(phys); + unsigned long pfn = __phys_to_pfn(phys); size_t left = size; - pfn = page_to_pfn(page) + offset / PAGE_SIZE; - offset %= PAGE_SIZE; - /* * A single sg entry may refer to multiple physically contiguous * pages. But we still need to process highmem pages individually. @@ -644,17 +642,18 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, size_t len = left; void *vaddr; - page = pfn_to_page(pfn); - - if (PageHighMem(page)) { + phys = __pfn_to_phys(pfn); + if (PhysHighMem(phys)) { if (len + offset > PAGE_SIZE) len = PAGE_SIZE - offset; if (cache_is_vipt_nonaliasing()) { - vaddr = kmap_atomic(page); + vaddr = kmap_atomic_pfn(pfn); op(vaddr + offset, len, dir); kunmap_atomic(vaddr); } else { + struct page *page = phys_to_page(phys); + vaddr = kmap_high_get(page); if (vaddr) { op(vaddr + offset, len, dir); @@ -662,7 +661,8 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, } } } else { - vaddr = page_address(page) + offset; + phys += offset; + vaddr = phys_to_virt(phys); op(vaddr, len, dir); } offset = 0; @@ -676,14 +676,11 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, * Note: Drivers should NOT use this function directly. * Use the driver DMA support - see dma-mapping.h (dma_sync_*) */ -static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, - size_t size, enum dma_data_direction dir) +void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, + enum dma_data_direction dir) { - phys_addr_t paddr; + dma_cache_maint_page(paddr, size, dir, dmac_map_area); - dma_cache_maint_page(page, off, size, dir, dmac_map_area); - - paddr = page_to_phys(page) + off; if (dir == DMA_FROM_DEVICE) { outer_inv_range(paddr, paddr + size); } else { @@ -692,17 +689,15 @@ static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, /* FIXME: non-speculating: flush on bidirectional mappings? */ } -static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, - size_t size, enum dma_data_direction dir) +void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, + enum dma_data_direction dir) { - phys_addr_t paddr = page_to_phys(page) + off; - /* FIXME: non-speculating: not required */ /* in any case, don't bother invalidating if DMA to device */ if (dir != DMA_TO_DEVICE) { outer_inv_range(paddr, paddr + size); - dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); + dma_cache_maint_page(paddr, size, dir, dmac_unmap_area); } /* @@ -737,6 +732,9 @@ static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs) if (attrs & DMA_ATTR_PRIVILEGED) prot |= IOMMU_PRIV; + if (attrs & DMA_ATTR_MMIO) + prot |= IOMMU_MMIO; + switch (dir) { case DMA_BIDIRECTIONAL: return prot | IOMMU_READ | IOMMU_WRITE; @@ -1205,7 +1203,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, unsigned int len = PAGE_ALIGN(s->offset + s->length); if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); + arch_sync_dma_for_device(sg_phys(s), s->length, dir); prot = __dma_info_to_prot(dir, attrs); @@ -1307,8 +1305,7 @@ static void arm_iommu_unmap_sg(struct device *dev, __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s)); if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - __dma_page_dev_to_cpu(sg_page(s), s->offset, - s->length, dir); + arch_sync_dma_for_cpu(sg_phys(s), s->length, dir); } } @@ -1330,7 +1327,7 @@ static void arm_iommu_sync_sg_for_cpu(struct device *dev, return; for_each_sg(sg, s, nents, i) - __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); + arch_sync_dma_for_cpu(sg_phys(s), s->length, dir); } @@ -1352,29 +1349,31 @@ static void arm_iommu_sync_sg_for_device(struct device *dev, return; for_each_sg(sg, s, nents, i) - __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); + arch_sync_dma_for_device(sg_phys(s), s->length, dir); } /** - * arm_iommu_map_page + * arm_iommu_map_phys * @dev: valid struct device pointer - * @page: page that buffer resides in - * @offset: offset into page for start of buffer + * @phys: physical address that buffer resides in * @size: size of buffer to map * @dir: DMA transfer direction + * @attrs: DMA mapping attributes * * IOMMU aware version of arm_dma_map_page() */ -static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir, - unsigned long attrs) +static dma_addr_t arm_iommu_map_phys(struct device *dev, phys_addr_t phys, + size_t size, enum dma_data_direction dir, unsigned long attrs) { struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); + int len = PAGE_ALIGN(size + offset_in_page(phys)); + phys_addr_t addr = phys & PAGE_MASK; dma_addr_t dma_addr; - int ret, prot, len = PAGE_ALIGN(size + offset); + int ret, prot; - if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - __dma_page_cpu_to_dev(page, offset, size, dir); + if (!dev->dma_coherent && + !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO))) + arch_sync_dma_for_device(phys, size, dir); dma_addr = __alloc_iova(mapping, len); if (dma_addr == DMA_MAPPING_ERROR) @@ -1382,12 +1381,11 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, prot = __dma_info_to_prot(dir, attrs); - ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, - prot, GFP_KERNEL); + ret = iommu_map(mapping->domain, dma_addr, addr, len, prot, GFP_KERNEL); if (ret < 0) goto fail; - return dma_addr + offset; + return dma_addr + offset_in_page(phys); fail: __free_iova(mapping, dma_addr, len); return DMA_MAPPING_ERROR; @@ -1399,82 +1397,27 @@ fail: * @handle: DMA address of buffer * @size: size of buffer (same as passed to dma_map_page) * @dir: DMA transfer direction (same as passed to dma_map_page) + * @attrs: DMA mapping attributes * - * IOMMU aware version of arm_dma_unmap_page() + * IOMMU aware version of arm_dma_unmap_phys() */ -static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, +static void arm_iommu_unmap_phys(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir, unsigned long attrs) { struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); dma_addr_t iova = handle & PAGE_MASK; - struct page *page; int offset = handle & ~PAGE_MASK; int len = PAGE_ALIGN(size + offset); if (!iova) return; - if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { - page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); - __dma_page_dev_to_cpu(page, offset, size, dir); - } - - iommu_unmap(mapping->domain, iova, len); - __free_iova(mapping, iova, len); -} - -/** - * arm_iommu_map_resource - map a device resource for DMA - * @dev: valid struct device pointer - * @phys_addr: physical address of resource - * @size: size of resource to map - * @dir: DMA transfer direction - */ -static dma_addr_t arm_iommu_map_resource(struct device *dev, - phys_addr_t phys_addr, size_t size, - enum dma_data_direction dir, unsigned long attrs) -{ - struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); - dma_addr_t dma_addr; - int ret, prot; - phys_addr_t addr = phys_addr & PAGE_MASK; - unsigned int offset = phys_addr & ~PAGE_MASK; - size_t len = PAGE_ALIGN(size + offset); - - dma_addr = __alloc_iova(mapping, len); - if (dma_addr == DMA_MAPPING_ERROR) - return dma_addr; - - prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO; - - ret = iommu_map(mapping->domain, dma_addr, addr, len, prot, GFP_KERNEL); - if (ret < 0) - goto fail; - - return dma_addr + offset; -fail: - __free_iova(mapping, dma_addr, len); - return DMA_MAPPING_ERROR; -} + if (!dev->dma_coherent && + !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO))) { + phys_addr_t phys = iommu_iova_to_phys(mapping->domain, iova); -/** - * arm_iommu_unmap_resource - unmap a device DMA resource - * @dev: valid struct device pointer - * @dma_handle: DMA address to resource - * @size: size of resource to map - * @dir: DMA transfer direction - */ -static void arm_iommu_unmap_resource(struct device *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); - dma_addr_t iova = dma_handle & PAGE_MASK; - unsigned int offset = dma_handle & ~PAGE_MASK; - size_t len = PAGE_ALIGN(size + offset); - - if (!iova) - return; + arch_sync_dma_for_cpu(phys + offset, size, dir); + } iommu_unmap(mapping->domain, iova, len); __free_iova(mapping, iova, len); @@ -1485,14 +1428,14 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev, { struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); dma_addr_t iova = handle & PAGE_MASK; - struct page *page; unsigned int offset = handle & ~PAGE_MASK; + phys_addr_t phys; if (dev->dma_coherent || !iova) return; - page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); - __dma_page_dev_to_cpu(page, offset, size, dir); + phys = iommu_iova_to_phys(mapping->domain, iova); + arch_sync_dma_for_cpu(phys + offset, size, dir); } static void arm_iommu_sync_single_for_device(struct device *dev, @@ -1500,14 +1443,14 @@ static void arm_iommu_sync_single_for_device(struct device *dev, { struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); dma_addr_t iova = handle & PAGE_MASK; - struct page *page; unsigned int offset = handle & ~PAGE_MASK; + phys_addr_t phys; if (dev->dma_coherent || !iova) return; - page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); - __dma_page_cpu_to_dev(page, offset, size, dir); + phys = iommu_iova_to_phys(mapping->domain, iova); + arch_sync_dma_for_device(phys + offset, size, dir); } static const struct dma_map_ops iommu_ops = { @@ -1516,8 +1459,8 @@ static const struct dma_map_ops iommu_ops = { .mmap = arm_iommu_mmap_attrs, .get_sgtable = arm_iommu_get_sgtable, - .map_page = arm_iommu_map_page, - .unmap_page = arm_iommu_unmap_page, + .map_phys = arm_iommu_map_phys, + .unmap_phys = arm_iommu_unmap_phys, .sync_single_for_cpu = arm_iommu_sync_single_for_cpu, .sync_single_for_device = arm_iommu_sync_single_for_device, @@ -1525,9 +1468,6 @@ static const struct dma_map_ops iommu_ops = { .unmap_sg = arm_iommu_unmap_sg, .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu, .sync_sg_for_device = arm_iommu_sync_sg_for_device, - - .map_resource = arm_iommu_map_resource, - .unmap_resource = arm_iommu_unmap_resource, }; /** @@ -1794,20 +1734,6 @@ void arch_teardown_dma_ops(struct device *dev) set_dma_ops(dev, NULL); } -void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, - enum dma_data_direction dir) -{ - __dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), - size, dir); -} - -void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, - enum dma_data_direction dir) -{ - __dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), - size, dir); -} - void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { |
