From 6c08cc64d194dc5cc3dfc785517098d3b161c05f Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Fri, 9 Jan 2026 17:31:33 +0800 Subject: mm: cma: kill cma_pages_valid() Kill cma_pages_valid() which only used in cma_release(), also cleanup code duplication between cma pages valid checking and cma memrange finding. Link: https://lkml.kernel.org/r/20260109093136.1491549-4-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Reviewed-by: Jane Chu Reviewed-by: Zi Yan Reviewed-by: Muchun Song Acked-by: David Hildenbrand Cc: Brendan Jackman Cc: Johannes Weiner Cc: Matthew Wilcox (Oracle) Cc: Oscar Salvador Cc: Sidhartha Kumar Cc: Vlastimil Babka Cc: Claudiu Beznea Cc: Mark Brown Signed-off-by: Andrew Morton --- include/linux/cma.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux/cma.h') diff --git a/include/linux/cma.h b/include/linux/cma.h index 62d9c1cf6326..e5745d2aec55 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -49,7 +49,6 @@ extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, struct cma **res_cma); extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align, bool no_warn); -extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned long count); extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count); extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data); -- cgit v1.2.3 From 9bda131c6093e9c4a8739e2eeb65ba4d5fbefc2f Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Fri, 9 Jan 2026 17:31:35 +0800 Subject: mm: cma: add cma_alloc_frozen{_compound}() Introduce cma_alloc_frozen{_compound}() helper to alloc pages without incrementing their refcount, then convert hugetlb cma to use the cma_alloc_frozen_compound() and cma_release_frozen() and remove the unused cma_{alloc,free}_folio(), also move the cma_validate_zones() into mm/internal.h since no outside user. The set_pages_refcounted() is only called to set non-compound pages after above changes, so remove the processing about PageHead. Link: https://lkml.kernel.org/r/20260109093136.1491549-6-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Reviewed-by: Zi Yan Cc: Brendan Jackman Cc: David Hildenbrand Cc: Jane Chu Cc: Johannes Weiner Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Oscar Salvador Cc: Sidhartha Kumar Cc: Vlastimil Babka Cc: Claudiu Beznea Cc: Mark Brown Signed-off-by: Andrew Morton --- include/linux/cma.h | 26 +++---------- mm/cma.c | 107 +++++++++++++++++++++++++++++++++++----------------- mm/hugetlb_cma.c | 24 +++++++----- mm/internal.h | 10 ++--- 4 files changed, 97 insertions(+), 70 deletions(-) (limited to 'include/linux/cma.h') diff --git a/include/linux/cma.h b/include/linux/cma.h index e5745d2aec55..e2a690f7e77e 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -51,29 +51,15 @@ extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int bool no_warn); extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count); +struct page *cma_alloc_frozen(struct cma *cma, unsigned long count, + unsigned int align, bool no_warn); +struct page *cma_alloc_frozen_compound(struct cma *cma, unsigned int order); +bool cma_release_frozen(struct cma *cma, const struct page *pages, + unsigned long count); + extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data); extern bool cma_intersects(struct cma *cma, unsigned long start, unsigned long end); extern void cma_reserve_pages_on_error(struct cma *cma); -#ifdef CONFIG_CMA -struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp); -bool cma_free_folio(struct cma *cma, const struct folio *folio); -bool cma_validate_zones(struct cma *cma); -#else -static inline struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp) -{ - return NULL; -} - -static inline bool cma_free_folio(struct cma *cma, const struct folio *folio) -{ - return false; -} -static inline bool cma_validate_zones(struct cma *cma) -{ - return false; -} -#endif - #endif diff --git a/mm/cma.c b/mm/cma.c index 0e8c146424fb..b80b60ed4927 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -856,8 +856,8 @@ out: return ret; } -static struct page *__cma_alloc(struct cma *cma, unsigned long count, - unsigned int align, gfp_t gfp) +static struct page *__cma_alloc_frozen(struct cma *cma, + unsigned long count, unsigned int align, gfp_t gfp) { struct page *page = NULL; int ret = -ENOMEM, r; @@ -904,7 +904,6 @@ static struct page *__cma_alloc(struct cma *cma, unsigned long count, trace_cma_alloc_finish(name, page ? page_to_pfn(page) : 0, page, count, align, ret); if (page) { - set_pages_refcounted(page, count); count_vm_event(CMA_ALLOC_SUCCESS); cma_sysfs_account_success_pages(cma, count); } else { @@ -915,6 +914,21 @@ static struct page *__cma_alloc(struct cma *cma, unsigned long count, return page; } +struct page *cma_alloc_frozen(struct cma *cma, unsigned long count, + unsigned int align, bool no_warn) +{ + gfp_t gfp = GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0); + + return __cma_alloc_frozen(cma, count, align, gfp); +} + +struct page *cma_alloc_frozen_compound(struct cma *cma, unsigned int order) +{ + gfp_t gfp = GFP_KERNEL | __GFP_COMP | __GFP_NOWARN; + + return __cma_alloc_frozen(cma, 1 << order, order, gfp); +} + /** * cma_alloc() - allocate pages from contiguous area * @cma: Contiguous memory region for which the allocation is performed. @@ -927,43 +941,27 @@ static struct page *__cma_alloc(struct cma *cma, unsigned long count, */ struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align, bool no_warn) -{ - return __cma_alloc(cma, count, align, GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0)); -} - -struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp) { struct page *page; - if (WARN_ON(!order || !(gfp & __GFP_COMP))) - return NULL; - - page = __cma_alloc(cma, 1 << order, order, gfp); + page = cma_alloc_frozen(cma, count, align, no_warn); + if (page) + set_pages_refcounted(page, count); - return page ? page_folio(page) : NULL; + return page; } -/** - * cma_release() - release allocated pages - * @cma: Contiguous memory region for which the allocation is performed. - * @pages: Allocated pages. - * @count: Number of allocated pages. - * - * This function releases memory allocated by cma_alloc(). - * It returns false when provided pages do not belong to contiguous area and - * true otherwise. - */ -bool cma_release(struct cma *cma, const struct page *pages, - unsigned long count) +static struct cma_memrange *find_cma_memrange(struct cma *cma, + const struct page *pages, unsigned long count) { - struct cma_memrange *cmr; + struct cma_memrange *cmr = NULL; unsigned long pfn, end_pfn; int r; pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count); if (!cma || !pages || count > cma->count) - return false; + return NULL; pfn = page_to_pfn(pages); @@ -981,27 +979,66 @@ bool cma_release(struct cma *cma, const struct page *pages, if (r == cma->nranges) { pr_debug("%s(page %p, count %lu, no cma range matches the page range)\n", __func__, (void *)pages, count); - return false; + return NULL; } - if (PageHead(pages)) - __free_pages((struct page *)pages, compound_order(pages)); - else - free_contig_range(pfn, count); + return cmr; +} + +static void __cma_release_frozen(struct cma *cma, struct cma_memrange *cmr, + const struct page *pages, unsigned long count) +{ + unsigned long pfn = page_to_pfn(pages); + + pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count); + free_contig_frozen_range(pfn, count); cma_clear_bitmap(cma, cmr, pfn, count); cma_sysfs_account_release_pages(cma, count); trace_cma_release(cma->name, pfn, pages, count); +} + +/** + * cma_release() - release allocated pages + * @cma: Contiguous memory region for which the allocation is performed. + * @pages: Allocated pages. + * @count: Number of allocated pages. + * + * This function releases memory allocated by cma_alloc(). + * It returns false when provided pages do not belong to contiguous area and + * true otherwise. + */ +bool cma_release(struct cma *cma, const struct page *pages, + unsigned long count) +{ + struct cma_memrange *cmr; + unsigned long i, pfn; + + cmr = find_cma_memrange(cma, pages, count); + if (!cmr) + return false; + + pfn = page_to_pfn(pages); + for (i = 0; i < count; i++, pfn++) + VM_WARN_ON(!put_page_testzero(pfn_to_page(pfn))); + + __cma_release_frozen(cma, cmr, pages, count); return true; } -bool cma_free_folio(struct cma *cma, const struct folio *folio) +bool cma_release_frozen(struct cma *cma, const struct page *pages, + unsigned long count) { - if (WARN_ON(!folio_test_large(folio))) + struct cma_memrange *cmr; + + cmr = find_cma_memrange(cma, pages, count); + if (!cmr) return false; - return cma_release(cma, &folio->page, folio_nr_pages(folio)); + __cma_release_frozen(cma, cmr, pages, count); + + return true; } int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data) diff --git a/mm/hugetlb_cma.c b/mm/hugetlb_cma.c index f5e79103e110..58ceb6c9e410 100644 --- a/mm/hugetlb_cma.c +++ b/mm/hugetlb_cma.c @@ -20,35 +20,39 @@ static unsigned long hugetlb_cma_size __initdata; void hugetlb_cma_free_folio(struct folio *folio) { - int nid = folio_nid(folio); + folio_ref_dec(folio); - WARN_ON_ONCE(!cma_free_folio(hugetlb_cma[nid], folio)); + WARN_ON_ONCE(!cma_release_frozen(hugetlb_cma[folio_nid(folio)], + &folio->page, folio_nr_pages(folio))); } - struct folio *hugetlb_cma_alloc_folio(int order, gfp_t gfp_mask, int nid, nodemask_t *nodemask) { int node; - struct folio *folio = NULL; + struct folio *folio; + struct page *page = NULL; if (hugetlb_cma[nid]) - folio = cma_alloc_folio(hugetlb_cma[nid], order, gfp_mask); + page = cma_alloc_frozen_compound(hugetlb_cma[nid], order); - if (!folio && !(gfp_mask & __GFP_THISNODE)) { + if (!page && !(gfp_mask & __GFP_THISNODE)) { for_each_node_mask(node, *nodemask) { if (node == nid || !hugetlb_cma[node]) continue; - folio = cma_alloc_folio(hugetlb_cma[node], order, gfp_mask); - if (folio) + page = cma_alloc_frozen_compound(hugetlb_cma[node], order); + if (page) break; } } - if (folio) - folio_set_hugetlb_cma(folio); + if (!page) + return NULL; + set_page_refcounted(page); + folio = page_folio(page); + folio_set_hugetlb_cma(folio); return folio; } diff --git a/mm/internal.h b/mm/internal.h index 0623b865ad1a..27509a909915 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -584,11 +584,6 @@ static inline void set_pages_refcounted(struct page *page, unsigned long nr_page { unsigned long pfn = page_to_pfn(page); - if (PageHead(page)) { - set_page_refcounted(page); - return; - } - for (; nr_pages--; pfn++) set_page_refcounted(pfn_to_page(pfn)); } @@ -1014,9 +1009,14 @@ void init_cma_reserved_pageblock(struct page *page); struct cma; #ifdef CONFIG_CMA +bool cma_validate_zones(struct cma *cma); void *cma_reserve_early(struct cma *cma, unsigned long size); void init_cma_pageblock(struct page *page); #else +static inline bool cma_validate_zones(struct cma *cma) +{ + return false; +} static inline void *cma_reserve_early(struct cma *cma, unsigned long size) { return NULL; -- cgit v1.2.3 From 0fd17e5983337231dc655e9ca0095d2ca3f47405 Mon Sep 17 00:00:00 2001 From: Oreoluwa Babatunde Date: Mon, 26 Jan 2026 18:13:27 +0100 Subject: of: reserved_mem: Allow reserved_mem framework detect "cma=" kernel param When initializing the default cma region, the "cma=" kernel parameter takes priority over a DT defined linux,cma-default region. Hence, give the reserved_mem framework the ability to detect this so that the DT defined cma region can skip initialization accordingly. Signed-off-by: Oreoluwa Babatunde Tested-by: Joy Zou Acked-by: Rob Herring (Arm) Fixes: 8a6e02d0c00e ("of: reserved_mem: Restructure how the reserved memory regions are processed") Fixes: 2c223f7239f3 ("of: reserved_mem: Restructure call site for dma_contiguous_early_fixup()") Link: https://lore.kernel.org/r/20251210002027.1171519-1-oreoluwa.babatunde@oss.qualcomm.com [mszyprow: rebased onto v6.19-rc1, added fixes tags, added a stub for cma_skip_dt_default_reserved_mem() if no CONFIG_DMA_CMA is set] Signed-off-by: Marek Szyprowski --- drivers/of/of_reserved_mem.c | 19 +++++++++++++++++-- include/linux/cma.h | 9 +++++++++ kernel/dma/contiguous.c | 16 ++++++++++------ 3 files changed, 36 insertions(+), 8 deletions(-) (limited to 'include/linux/cma.h') diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c index 5619ec917858..a2a13617c6f4 100644 --- a/drivers/of/of_reserved_mem.c +++ b/drivers/of/of_reserved_mem.c @@ -157,13 +157,19 @@ static int __init __reserved_mem_reserve_reg(unsigned long node, phys_addr_t base, size; int i, len; const __be32 *prop; - bool nomap; + bool nomap, default_cma; prop = of_flat_dt_get_addr_size_prop(node, "reg", &len); if (!prop) return -ENOENT; nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; + default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL); + + if (default_cma && cma_skip_dt_default_reserved_mem()) { + pr_err("Skipping dt linux,cma-default for \"cma=\" kernel param.\n"); + return -EINVAL; + } for (i = 0; i < len; i++) { u64 b, s; @@ -248,10 +254,13 @@ void __init fdt_scan_reserved_mem_reg_nodes(void) fdt_for_each_subnode(child, fdt, node) { const char *uname; + bool default_cma = of_get_flat_dt_prop(child, "linux,cma-default", NULL); u64 b, s; if (!of_fdt_device_is_available(fdt, child)) continue; + if (default_cma && cma_skip_dt_default_reserved_mem()) + continue; if (!of_flat_dt_get_addr_size(child, "reg", &b, &s)) continue; @@ -389,7 +398,7 @@ static int __init __reserved_mem_alloc_size(unsigned long node, const char *unam phys_addr_t base = 0, align = 0, size; int i, len; const __be32 *prop; - bool nomap; + bool nomap, default_cma; int ret; prop = of_get_flat_dt_prop(node, "size", &len); @@ -413,6 +422,12 @@ static int __init __reserved_mem_alloc_size(unsigned long node, const char *unam } nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; + default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL); + + if (default_cma && cma_skip_dt_default_reserved_mem()) { + pr_err("Skipping dt linux,cma-default for \"cma=\" kernel param.\n"); + return -EINVAL; + } /* Need adjust the alignment to satisfy the CMA requirement */ if (IS_ENABLED(CONFIG_CMA) diff --git a/include/linux/cma.h b/include/linux/cma.h index 62d9c1cf6326..2e6931735880 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -57,6 +57,15 @@ extern bool cma_intersects(struct cma *cma, unsigned long start, unsigned long e extern void cma_reserve_pages_on_error(struct cma *cma); +#ifdef CONFIG_DMA_CMA +extern bool cma_skip_dt_default_reserved_mem(void); +#else +static inline bool cma_skip_dt_default_reserved_mem(void) +{ + return false; +} +#endif + #ifdef CONFIG_CMA struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp); bool cma_free_folio(struct cma *cma, const struct folio *folio); diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c index d8fd6f779f79..0e266979728b 100644 --- a/kernel/dma/contiguous.c +++ b/kernel/dma/contiguous.c @@ -91,6 +91,16 @@ static int __init early_cma(char *p) } early_param("cma", early_cma); +/* + * cma_skip_dt_default_reserved_mem - This is called from the + * reserved_mem framework to detect if the default cma region is being + * set by the "cma=" kernel parameter. + */ +bool __init cma_skip_dt_default_reserved_mem(void) +{ + return size_cmdline != -1; +} + #ifdef CONFIG_DMA_NUMA_CMA static struct cma *dma_contiguous_numa_area[MAX_NUMNODES]; @@ -470,12 +480,6 @@ static int __init rmem_cma_setup(struct reserved_mem *rmem) struct cma *cma; int err; - if (size_cmdline != -1 && default_cma) { - pr_info("Reserved memory: bypass %s node, using cmdline CMA params instead\n", - rmem->name); - return -EBUSY; - } - if (!of_get_flat_dt_prop(node, "reusable", NULL) || of_get_flat_dt_prop(node, "no-map", NULL)) return -EINVAL; -- cgit v1.2.3