summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/drm_pagemap.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/drm_pagemap.c')
-rw-r--r--drivers/gpu/drm/drm_pagemap.c138
1 files changed, 91 insertions, 47 deletions
diff --git a/drivers/gpu/drm/drm_pagemap.c b/drivers/gpu/drm/drm_pagemap.c
index 1da55322af12..22c44807e3fe 100644
--- a/drivers/gpu/drm/drm_pagemap.c
+++ b/drivers/gpu/drm/drm_pagemap.c
@@ -202,7 +202,7 @@ static void drm_pagemap_get_devmem_page(struct page *page,
/**
* drm_pagemap_migrate_map_pages() - Map migration pages for GPU SVM migration
* @dev: The device for which the pages are being mapped
- * @dma_addr: Array to store DMA addresses corresponding to mapped pages
+ * @pagemap_addr: Array to store DMA information corresponding to mapped pages
* @migrate_pfn: Array of migrate page frame numbers to map
* @npages: Number of pages to map
* @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL)
@@ -215,25 +215,39 @@ static void drm_pagemap_get_devmem_page(struct page *page,
* Returns: 0 on success, -EFAULT if an error occurs during mapping.
*/
static int drm_pagemap_migrate_map_pages(struct device *dev,
- dma_addr_t *dma_addr,
+ struct drm_pagemap_addr *pagemap_addr,
unsigned long *migrate_pfn,
unsigned long npages,
enum dma_data_direction dir)
{
unsigned long i;
- for (i = 0; i < npages; ++i) {
+ for (i = 0; i < npages;) {
struct page *page = migrate_pfn_to_page(migrate_pfn[i]);
+ dma_addr_t dma_addr;
+ struct folio *folio;
+ unsigned int order = 0;
if (!page)
- continue;
+ goto next;
if (WARN_ON_ONCE(is_zone_device_page(page)))
return -EFAULT;
- dma_addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
- if (dma_mapping_error(dev, dma_addr[i]))
+ folio = page_folio(page);
+ order = folio_order(folio);
+
+ dma_addr = dma_map_page(dev, page, 0, page_size(page), dir);
+ if (dma_mapping_error(dev, dma_addr))
return -EFAULT;
+
+ pagemap_addr[i] =
+ drm_pagemap_addr_encode(dma_addr,
+ DRM_INTERCONNECT_SYSTEM,
+ order, dir);
+
+next:
+ i += NR_PAGES(order);
}
return 0;
@@ -242,7 +256,7 @@ static int drm_pagemap_migrate_map_pages(struct device *dev,
/**
* drm_pagemap_migrate_unmap_pages() - Unmap pages previously mapped for GPU SVM migration
* @dev: The device for which the pages were mapped
- * @dma_addr: Array of DMA addresses corresponding to mapped pages
+ * @pagemap_addr: Array of DMA information corresponding to mapped pages
* @npages: Number of pages to unmap
* @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL)
*
@@ -251,17 +265,20 @@ static int drm_pagemap_migrate_map_pages(struct device *dev,
* if it's valid and not already unmapped, and unmaps the corresponding page.
*/
static void drm_pagemap_migrate_unmap_pages(struct device *dev,
- dma_addr_t *dma_addr,
+ struct drm_pagemap_addr *pagemap_addr,
unsigned long npages,
enum dma_data_direction dir)
{
unsigned long i;
- for (i = 0; i < npages; ++i) {
- if (!dma_addr[i] || dma_mapping_error(dev, dma_addr[i]))
- continue;
+ for (i = 0; i < npages;) {
+ if (!pagemap_addr[i].addr || dma_mapping_error(dev, pagemap_addr[i].addr))
+ goto next;
- dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
+ dma_unmap_page(dev, pagemap_addr[i].addr, PAGE_SIZE << pagemap_addr[i].order, dir);
+
+next:
+ i += NR_PAGES(pagemap_addr[i].order);
}
}
@@ -314,7 +331,7 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
struct vm_area_struct *vas;
struct drm_pagemap_zdd *zdd = NULL;
struct page **pages;
- dma_addr_t *dma_addr;
+ struct drm_pagemap_addr *pagemap_addr;
void *buf;
int err;
@@ -340,14 +357,14 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
goto err_out;
}
- buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) +
+ buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*pagemap_addr) +
sizeof(*pages), GFP_KERNEL);
if (!buf) {
err = -ENOMEM;
goto err_out;
}
- dma_addr = buf + (2 * sizeof(*migrate.src) * npages);
- pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages;
+ pagemap_addr = buf + (2 * sizeof(*migrate.src) * npages);
+ pages = buf + (2 * sizeof(*migrate.src) + sizeof(*pagemap_addr)) * npages;
zdd = drm_pagemap_zdd_alloc(pgmap_owner);
if (!zdd) {
@@ -377,8 +394,9 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
if (err)
goto err_finalize;
- err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, dma_addr,
+ err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, pagemap_addr,
migrate.src, npages, DMA_TO_DEVICE);
+
if (err)
goto err_finalize;
@@ -390,7 +408,7 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
drm_pagemap_get_devmem_page(page, zdd);
}
- err = ops->copy_to_devmem(pages, dma_addr, npages);
+ err = ops->copy_to_devmem(pages, pagemap_addr, npages);
if (err)
goto err_finalize;
@@ -404,7 +422,7 @@ err_finalize:
drm_pagemap_migration_unlock_put_pages(npages, migrate.dst);
migrate_vma_pages(&migrate);
migrate_vma_finalize(&migrate);
- drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages,
+ drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, pagemap_addr, npages,
DMA_TO_DEVICE);
err_free:
if (zdd)
@@ -442,54 +460,80 @@ static int drm_pagemap_migrate_populate_ram_pfn(struct vm_area_struct *vas,
{
unsigned long i;
- for (i = 0; i < npages; ++i, addr += PAGE_SIZE) {
- struct page *page, *src_page;
+ for (i = 0; i < npages;) {
+ struct page *page = NULL, *src_page;
+ struct folio *folio;
+ unsigned int order = 0;
if (!(src_mpfn[i] & MIGRATE_PFN_MIGRATE))
- continue;
+ goto next;
src_page = migrate_pfn_to_page(src_mpfn[i]);
if (!src_page)
- continue;
+ goto next;
if (fault_page) {
if (src_page->zone_device_data !=
fault_page->zone_device_data)
- continue;
+ goto next;
}
+ order = folio_order(page_folio(src_page));
+
+ /* TODO: Support fallback to single pages if THP allocation fails */
if (vas)
- page = alloc_page_vma(GFP_HIGHUSER, vas, addr);
+ folio = vma_alloc_folio(GFP_HIGHUSER, order, vas, addr);
else
- page = alloc_page(GFP_HIGHUSER);
+ folio = folio_alloc(GFP_HIGHUSER, order);
- if (!page)
+ if (!folio)
goto free_pages;
+ page = folio_page(folio, 0);
mpfn[i] = migrate_pfn(page_to_pfn(page));
+
+next:
+ if (page)
+ addr += page_size(page);
+ else
+ addr += PAGE_SIZE;
+
+ i += NR_PAGES(order);
}
- for (i = 0; i < npages; ++i) {
+ for (i = 0; i < npages;) {
struct page *page = migrate_pfn_to_page(mpfn[i]);
+ unsigned int order = 0;
if (!page)
- continue;
+ goto next_lock;
- WARN_ON_ONCE(!trylock_page(page));
- ++*mpages;
+ WARN_ON_ONCE(!folio_trylock(page_folio(page)));
+
+ order = folio_order(page_folio(page));
+ *mpages += NR_PAGES(order);
+
+next_lock:
+ i += NR_PAGES(order);
}
return 0;
free_pages:
- for (i = 0; i < npages; ++i) {
+ for (i = 0; i < npages;) {
struct page *page = migrate_pfn_to_page(mpfn[i]);
+ unsigned int order = 0;
if (!page)
- continue;
+ goto next_put;
put_page(page);
mpfn[i] = 0;
+
+ order = folio_order(page_folio(page));
+
+next_put:
+ i += NR_PAGES(order);
}
return -ENOMEM;
}
@@ -509,7 +553,7 @@ int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation)
unsigned long npages, mpages = 0;
struct page **pages;
unsigned long *src, *dst;
- dma_addr_t *dma_addr;
+ struct drm_pagemap_addr *pagemap_addr;
void *buf;
int i, err = 0;
unsigned int retry_count = 2;
@@ -520,7 +564,7 @@ retry:
if (!mmget_not_zero(devmem_allocation->mm))
return -EFAULT;
- buf = kvcalloc(npages, 2 * sizeof(*src) + sizeof(*dma_addr) +
+ buf = kvcalloc(npages, 2 * sizeof(*src) + sizeof(*pagemap_addr) +
sizeof(*pages), GFP_KERNEL);
if (!buf) {
err = -ENOMEM;
@@ -528,8 +572,8 @@ retry:
}
src = buf;
dst = buf + (sizeof(*src) * npages);
- dma_addr = buf + (2 * sizeof(*src) * npages);
- pages = buf + (2 * sizeof(*src) + sizeof(*dma_addr)) * npages;
+ pagemap_addr = buf + (2 * sizeof(*src) * npages);
+ pages = buf + (2 * sizeof(*src) + sizeof(*pagemap_addr)) * npages;
err = ops->populate_devmem_pfn(devmem_allocation, npages, src);
if (err)
@@ -544,7 +588,7 @@ retry:
if (err || !mpages)
goto err_finalize;
- err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, dma_addr,
+ err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, pagemap_addr,
dst, npages, DMA_FROM_DEVICE);
if (err)
goto err_finalize;
@@ -552,7 +596,7 @@ retry:
for (i = 0; i < npages; ++i)
pages[i] = migrate_pfn_to_page(src[i]);
- err = ops->copy_to_ram(pages, dma_addr, npages);
+ err = ops->copy_to_ram(pages, pagemap_addr, npages);
if (err)
goto err_finalize;
@@ -561,7 +605,7 @@ err_finalize:
drm_pagemap_migration_unlock_put_pages(npages, dst);
migrate_device_pages(src, dst, npages);
migrate_device_finalize(src, dst, npages);
- drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages,
+ drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, pagemap_addr, npages,
DMA_FROM_DEVICE);
err_free:
kvfree(buf);
@@ -612,7 +656,7 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
struct device *dev = NULL;
unsigned long npages, mpages = 0;
struct page **pages;
- dma_addr_t *dma_addr;
+ struct drm_pagemap_addr *pagemap_addr;
unsigned long start, end;
void *buf;
int i, err = 0;
@@ -637,14 +681,14 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
migrate.end = end;
npages = npages_in_range(start, end);
- buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) +
+ buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*pagemap_addr) +
sizeof(*pages), GFP_KERNEL);
if (!buf) {
err = -ENOMEM;
goto err_out;
}
- dma_addr = buf + (2 * sizeof(*migrate.src) * npages);
- pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages;
+ pagemap_addr = buf + (2 * sizeof(*migrate.src) * npages);
+ pages = buf + (2 * sizeof(*migrate.src) + sizeof(*pagemap_addr)) * npages;
migrate.vma = vas;
migrate.src = buf;
@@ -680,7 +724,7 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
if (err)
goto err_finalize;
- err = drm_pagemap_migrate_map_pages(dev, dma_addr, migrate.dst, npages,
+ err = drm_pagemap_migrate_map_pages(dev, pagemap_addr, migrate.dst, npages,
DMA_FROM_DEVICE);
if (err)
goto err_finalize;
@@ -688,7 +732,7 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
for (i = 0; i < npages; ++i)
pages[i] = migrate_pfn_to_page(migrate.src[i]);
- err = ops->copy_to_ram(pages, dma_addr, npages);
+ err = ops->copy_to_ram(pages, pagemap_addr, npages);
if (err)
goto err_finalize;
@@ -698,7 +742,7 @@ err_finalize:
migrate_vma_pages(&migrate);
migrate_vma_finalize(&migrate);
if (dev)
- drm_pagemap_migrate_unmap_pages(dev, dma_addr, npages,
+ drm_pagemap_migrate_unmap_pages(dev, pagemap_addr, npages,
DMA_FROM_DEVICE);
err_free:
kvfree(buf);