diff options
| author | Boris Brezillon <boris.brezillon@collabora.com> | 2026-01-08 13:33:25 +0100 |
|---|---|---|
| committer | Adrián Larumbe <adrian.larumbe@collabora.com> | 2026-01-09 16:29:30 +0000 |
| commit | b49c35ba0ce1ce1779a2ad563c85fbf87d03689f (patch) | |
| tree | c34efbedd798731d14fcf686b0d5c9924f3eb624 | |
| parent | 29f4e7301910d7d1ece249cf34dfbe4dd7c2c6c4 (diff) | |
drm/panfrost: Fix a page leak in panfrost_mmu_map_fault_addr() when THP is on
drm_gem_put_pages(), which we rely on for returning BO pages to shmem,
assume per-folio refcounting and not per-page. If we call
shmem_read_mapping_page() per-page, we break this assumption and leak
pages every time we get a huge page allocated.
v2:
- Rework the logic for() loop to better match the folio-granular
allocation scheme
Cc: Loïc Molinari <loic.molinari@collabora.com>
Fixes: c12e9fcb5a5a ("drm/panfrost: Introduce huge tmpfs mountpoint option")
Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
Reviewed-by: Adrián Larumbe <adrian.larumbe@collabora.com>
Reviewed-by: Steven Price <steven.price@arm.com>
Link: https://patch.msgid.link/20260108123325.1088195-1-boris.brezillon@collabora.com
Signed-off-by: Adrián Larumbe <adrian.larumbe@collabora.com>
| -rw-r--r-- | drivers/gpu/drm/panfrost/panfrost_mmu.c | 40 |
1 files changed, 25 insertions, 15 deletions
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index 8f3b7a7b6ad0..50ff30849361 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -587,12 +587,12 @@ out: static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr) { - int ret, i; + int ret; struct panfrost_gem_mapping *bomapping; struct panfrost_gem_object *bo; struct address_space *mapping; struct drm_gem_object *obj; - pgoff_t page_offset; + pgoff_t page_offset, nr_pages; struct sg_table *sgt; struct page **pages; @@ -613,6 +613,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, addr &= ~((u64)SZ_2M - 1); page_offset = addr >> PAGE_SHIFT; page_offset -= bomapping->mmnode.start; + nr_pages = bo->base.base.size >> PAGE_SHIFT; obj = &bo->base.base; @@ -626,8 +627,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, goto err_unlock; } - pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT, - sizeof(struct page *), GFP_KERNEL | __GFP_ZERO); + pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL | __GFP_ZERO); if (!pages) { kvfree(bo->sgts); bo->sgts = NULL; @@ -649,20 +649,30 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, mapping = bo->base.base.filp->f_mapping; mapping_set_unevictable(mapping); - for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) { - /* Can happen if the last fault only partially filled this - * section of the pages array before failing. In that case - * we skip already filled pages. - */ - if (pages[i]) - continue; + for (pgoff_t pg = page_offset; pg < page_offset + NUM_FAULT_PAGES;) { + bool already_owned = false; + struct folio *folio; - pages[i] = shmem_read_mapping_page(mapping, i); - if (IS_ERR(pages[i])) { - ret = PTR_ERR(pages[i]); - pages[i] = NULL; + folio = shmem_read_folio(mapping, pg); + if (IS_ERR(folio)) { + ret = PTR_ERR(folio); goto err_unlock; } + + pg &= ~(folio_nr_pages(folio) - 1); + for (u32 i = 0; i < folio_nr_pages(folio) && pg < nr_pages; i++) { + if (pages[pg]) + already_owned = true; + + pages[pg++] = folio_page(folio, i); + } + + /* We always fill the page array at a folio granularity so + * there's no valid reason for a folio range to be partially + * populated. + */ + if (drm_WARN_ON(&pfdev->base, already_owned)) + folio_put(folio); } ret = sg_alloc_table_from_pages(sgt, pages + page_offset, |
