summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-12-29 11:40:38 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2025-12-29 11:40:38 -0800
commit0b34fd0feac6202602591dc15c58e25ffde41bd5 (patch)
treeb1775084d6a4683a9ca6ced5c262b9b5488275b4
parent7839932417dd53bb09eb5a585a7a92781dfd7cb2 (diff)
parentd6b5a8d6f142ad0a8e45181f06e70b4746c4abc3 (diff)
Merge tag 'mm-hotfixes-stable-2025-12-28-21-50' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull misc fixes from Andrew Morton: "27 hotfixes. 12 are cc:stable, 18 are MM. There's a patch series from Jiayuan Chen which fixes some issues with KASAN and vmalloc. Apart from that it's the usual shower of singletons - please see the respective changelogs for details" * tag 'mm-hotfixes-stable-2025-12-28-21-50' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (27 commits) mm/ksm: fix pte_unmap_unlock of wrong address in break_ksm_pmd_entry mm/page_owner: fix memory leak in page_owner_stack_fops->release() mm/memremap: fix spurious large folio warning for FS-DAX MAINTAINERS: notify the "Device Memory" community of memory hotplug changes sparse: update MAINTAINERS info mm/page_alloc: report 1 as zone_batchsize for !CONFIG_MMU mm: consider non-anon swap cache folios in folio_expected_ref_count() rust: maple_tree: rcu_read_lock() in destructor to silence lockdep mm: memcg: fix unit conversion for K() macro in OOM log mm: fixup pfnmap memory failure handling to use pgoff tools/mm/page_owner_sort: fix timestamp comparison for stable sorting selftests/mm: fix thread state check in uffd-unit-tests kernel/kexec: fix IMA when allocation happens in CMA area kernel/kexec: change the prototype of kimage_map_segment() MAINTAINERS: add ABI headers to KHO and LIVE UPDATE .mailmap: remove one of the entries for WangYuli mm/damon/vaddr: fix missing pte_unmap_unlock in damos_va_migrate_pmd_entry() MAINTAINERS: update one straggling entry for Bartosz Golaszewski mm/page_alloc: change all pageblocks migrate type on coalescing mm: leafops.h: correct kernel-doc function param. names ...
-rw-r--r--.mailmap4
-rw-r--r--MAINTAINERS8
-rw-r--r--include/linux/genalloc.h1
-rw-r--r--include/linux/kasan.h16
-rw-r--r--include/linux/kexec.h4
-rw-r--r--include/linux/leafops.h4
-rw-r--r--include/linux/memory-failure.h2
-rw-r--r--include/linux/mm.h8
-rw-r--r--kernel/kexec_core.c16
-rw-r--r--lib/idr.c2
-rw-r--r--mm/damon/vaddr.c2
-rw-r--r--mm/kasan/common.c32
-rw-r--r--mm/kasan/hw_tags.c2
-rw-r--r--mm/kasan/shadow.c4
-rw-r--r--mm/ksm.c2
-rw-r--r--mm/memcontrol.c4
-rw-r--r--mm/memory-failure.c29
-rw-r--r--mm/memremap.c2
-rw-r--r--mm/page_alloc.c26
-rw-r--r--mm/page_owner.c2
-rw-r--r--mm/vmalloc.c8
-rw-r--r--rust/kernel/maple_tree.rs11
-rw-r--r--security/integrity/ima/ima_kexec.c4
-rw-r--r--tools/mm/page_owner_sort.c6
-rw-r--r--tools/testing/radix-tree/idr-test.c21
-rw-r--r--tools/testing/selftests/mm/uffd-unit-tests.c2
26 files changed, 163 insertions, 59 deletions
diff --git a/.mailmap b/.mailmap
index 84309a39d329..7a6110d0e46d 100644
--- a/.mailmap
+++ b/.mailmap
@@ -127,7 +127,8 @@ Barry Song <baohua@kernel.org> <Baohua.Song@csr.com>
Barry Song <baohua@kernel.org> <barry.song@analog.com>
Bart Van Assche <bvanassche@acm.org> <bart.vanassche@sandisk.com>
Bart Van Assche <bvanassche@acm.org> <bart.vanassche@wdc.com>
-Bartosz Golaszewski <brgl@bgdev.pl> <bgolaszewski@baylibre.com>
+Bartosz Golaszewski <brgl@kernel.org> <bartosz.golaszewski@linaro.org>
+Bartosz Golaszewski <brgl@kernel.org> <bgolaszewski@baylibre.com>
Ben Dooks <ben-linux@fluff.org> <ben.dooks@simtec.co.uk>
Ben Dooks <ben-linux@fluff.org> <ben.dooks@sifive.com>
Ben Gardner <bgardner@wabtec.com>
@@ -857,7 +858,6 @@ Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
WangYuli <wangyuli@aosc.io> <wangyl5933@chinaunicom.cn>
WangYuli <wangyuli@aosc.io> <wangyuli@deepin.org>
-WangYuli <wangyuli@aosc.io> <wangyuli@uniontech.com>
Weiwen Hu <huweiwen@linux.alibaba.com> <sehuww@mail.scut.edu.cn>
WeiXiong Liao <gmpy.liaowx@gmail.com> <liaoweixiong@allwinnertech.com>
Wen Gong <quic_wgong@quicinc.com> <wgong@codeaurora.org>
diff --git a/MAINTAINERS b/MAINTAINERS
index 12f49de7fe03..765ad2daa218 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -13959,6 +13959,7 @@ S: Maintained
F: Documentation/admin-guide/mm/kho.rst
F: Documentation/core-api/kho/*
F: include/linux/kexec_handover.h
+F: include/linux/kho/
F: kernel/liveupdate/kexec_handover*
F: lib/test_kho.c
F: tools/testing/selftests/kho/
@@ -14637,6 +14638,7 @@ S: Maintained
F: Documentation/core-api/liveupdate.rst
F: Documentation/mm/memfd_preservation.rst
F: Documentation/userspace-api/liveupdate.rst
+F: include/linux/kho/abi/
F: include/linux/liveupdate.h
F: include/linux/liveupdate/
F: include/uapi/linux/liveupdate.h
@@ -16426,6 +16428,7 @@ MEMORY HOT(UN)PLUG
M: David Hildenbrand <david@kernel.org>
M: Oscar Salvador <osalvador@suse.de>
L: linux-mm@kvack.org
+L: linux-cxl@vger.kernel.org
S: Maintained
F: Documentation/admin-guide/mm/memory-hotplug.rst
F: Documentation/core-api/memory-hotplug.rst
@@ -16751,6 +16754,7 @@ F: tools/testing/selftests/mm/transhuge-stress.c
MEMORY MANAGEMENT - USERFAULTFD
M: Andrew Morton <akpm@linux-foundation.org>
+M: Mike Rapoport <rppt@kernel.org>
R: Peter Xu <peterx@redhat.com>
L: linux-mm@kvack.org
S: Maintained
@@ -21345,7 +21349,7 @@ F: Documentation/devicetree/bindings/net/qcom,bam-dmux.yaml
F: drivers/net/wwan/qcom_bam_dmux.c
QUALCOMM BLUETOOTH DRIVER
-M: Bartosz Golaszewski <brgl@bgdev.pl>
+M: Bartosz Golaszewski <brgl@kernel.org>
L: linux-arm-msm@vger.kernel.org
S: Maintained
F: drivers/bluetooth/btqca.[ch]
@@ -24571,7 +24575,7 @@ F: drivers/tty/vcc.c
F: include/linux/sunserialcore.h
SPARSE CHECKER
-M: "Luc Van Oostenryck" <luc.vanoostenryck@gmail.com>
+M: Chris Li <sparse@chrisli.org>
L: linux-sparse@vger.kernel.org
S: Maintained
W: https://sparse.docs.kernel.org/
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 0bd581003cd5..60de63e46b33 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -44,6 +44,7 @@ struct gen_pool;
* @nr: The number of zeroed bits we're looking for
* @data: optional additional data used by the callback
* @pool: the pool being allocated from
+ * @start_addr: start address of memory chunk
*/
typedef unsigned long (*genpool_algo_t)(unsigned long *map,
unsigned long size,
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index f335c1d7b61d..9c6ac4b62eb9 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -28,6 +28,7 @@ typedef unsigned int __bitwise kasan_vmalloc_flags_t;
#define KASAN_VMALLOC_INIT ((__force kasan_vmalloc_flags_t)0x01u)
#define KASAN_VMALLOC_VM_ALLOC ((__force kasan_vmalloc_flags_t)0x02u)
#define KASAN_VMALLOC_PROT_NORMAL ((__force kasan_vmalloc_flags_t)0x04u)
+#define KASAN_VMALLOC_KEEP_TAG ((__force kasan_vmalloc_flags_t)0x08u)
#define KASAN_VMALLOC_PAGE_RANGE 0x1 /* Apply exsiting page range */
#define KASAN_VMALLOC_TLB_FLUSH 0x2 /* TLB flush */
@@ -630,6 +631,16 @@ static __always_inline void kasan_poison_vmalloc(const void *start,
__kasan_poison_vmalloc(start, size);
}
+void __kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
+ kasan_vmalloc_flags_t flags);
+static __always_inline void
+kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
+ kasan_vmalloc_flags_t flags)
+{
+ if (kasan_enabled())
+ __kasan_unpoison_vmap_areas(vms, nr_vms, flags);
+}
+
#else /* CONFIG_KASAN_VMALLOC */
static inline void kasan_populate_early_vm_area_shadow(void *start,
@@ -654,6 +665,11 @@ static inline void *kasan_unpoison_vmalloc(const void *start,
static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
{ }
+static __always_inline void
+kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
+ kasan_vmalloc_flags_t flags)
+{ }
+
#endif /* CONFIG_KASAN_VMALLOC */
#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index ff7e231b0485..8a22bc9b8c6c 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -530,7 +530,7 @@ extern bool kexec_file_dbg_print;
#define kexec_dprintk(fmt, arg...) \
do { if (kexec_file_dbg_print) pr_info(fmt, ##arg); } while (0)
-extern void *kimage_map_segment(struct kimage *image, unsigned long addr, unsigned long size);
+extern void *kimage_map_segment(struct kimage *image, int idx);
extern void kimage_unmap_segment(void *buffer);
#else /* !CONFIG_KEXEC_CORE */
struct pt_regs;
@@ -540,7 +540,7 @@ static inline void __crash_kexec(struct pt_regs *regs) { }
static inline void crash_kexec(struct pt_regs *regs) { }
static inline int kexec_should_crash(struct task_struct *p) { return 0; }
static inline int kexec_crash_loaded(void) { return 0; }
-static inline void *kimage_map_segment(struct kimage *image, unsigned long addr, unsigned long size)
+static inline void *kimage_map_segment(struct kimage *image, int idx)
{ return NULL; }
static inline void kimage_unmap_segment(void *buffer) { }
#define kexec_in_progress false
diff --git a/include/linux/leafops.h b/include/linux/leafops.h
index cfafe7a5e7b1..a9ff94b744f2 100644
--- a/include/linux/leafops.h
+++ b/include/linux/leafops.h
@@ -133,7 +133,7 @@ static inline bool softleaf_is_none(softleaf_t entry)
/**
* softleaf_type() - Identify the type of leaf entry.
- * @enntry: Leaf entry.
+ * @entry: Leaf entry.
*
* Returns: the leaf entry type associated with @entry.
*/
@@ -534,7 +534,7 @@ static inline bool pte_is_uffd_wp_marker(pte_t pte)
/**
* pte_is_uffd_marker() - Does this PTE entry encode a userfault-specific marker
* leaf entry?
- * @entry: Leaf entry.
+ * @pte: PTE entry.
*
* It's useful to be able to determine which leaf entries encode UFFD-specific
* markers so we can handle these correctly.
diff --git a/include/linux/memory-failure.h b/include/linux/memory-failure.h
index bc326503d2d2..7b5e11cf905f 100644
--- a/include/linux/memory-failure.h
+++ b/include/linux/memory-failure.h
@@ -9,6 +9,8 @@ struct pfn_address_space;
struct pfn_address_space {
struct interval_tree_node node;
struct address_space *mapping;
+ int (*pfn_to_vma_pgoff)(struct vm_area_struct *vma,
+ unsigned long pfn, pgoff_t *pgoff);
};
int register_pfn_address_space(struct pfn_address_space *pfn_space);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 15076261d0c2..6f959d8ca4b4 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2459,10 +2459,10 @@ static inline int folio_expected_ref_count(const struct folio *folio)
if (WARN_ON_ONCE(page_has_type(&folio->page) && !folio_test_hugetlb(folio)))
return 0;
- if (folio_test_anon(folio)) {
- /* One reference per page from the swapcache. */
- ref_count += folio_test_swapcache(folio) << order;
- } else {
+ /* One reference per page from the swapcache. */
+ ref_count += folio_test_swapcache(folio) << order;
+
+ if (!folio_test_anon(folio)) {
/* One reference per page from the pagecache. */
ref_count += !!folio->mapping << order;
/* One reference from PG_private. */
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index 0f92acdd354d..95c585c6ddc3 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -953,17 +953,24 @@ int kimage_load_segment(struct kimage *image, int idx)
return result;
}
-void *kimage_map_segment(struct kimage *image,
- unsigned long addr, unsigned long size)
+void *kimage_map_segment(struct kimage *image, int idx)
{
+ unsigned long addr, size, eaddr;
unsigned long src_page_addr, dest_page_addr = 0;
- unsigned long eaddr = addr + size;
kimage_entry_t *ptr, entry;
struct page **src_pages;
unsigned int npages;
+ struct page *cma;
void *vaddr = NULL;
int i;
+ cma = image->segment_cma[idx];
+ if (cma)
+ return page_address(cma);
+
+ addr = image->segment[idx].mem;
+ size = image->segment[idx].memsz;
+ eaddr = addr + size;
/*
* Collect the source pages and map them in a contiguous VA range.
*/
@@ -1004,7 +1011,8 @@ void *kimage_map_segment(struct kimage *image,
void kimage_unmap_segment(void *segment_buffer)
{
- vunmap(segment_buffer);
+ if (is_vmalloc_addr(segment_buffer))
+ vunmap(segment_buffer);
}
struct kexec_load_limit {
diff --git a/lib/idr.c b/lib/idr.c
index e2adc457abb4..457430cff8c5 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -40,6 +40,8 @@ int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid,
if (WARN_ON_ONCE(!(idr->idr_rt.xa_flags & ROOT_IS_IDR)))
idr->idr_rt.xa_flags |= IDR_RT_MARKER;
+ if (max < base)
+ return -ENOSPC;
id = (id < base) ? 0 : id - base;
radix_tree_iter_init(&iter, id);
diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
index 2750c88e7225..23ed738a0bd6 100644
--- a/mm/damon/vaddr.c
+++ b/mm/damon/vaddr.c
@@ -743,7 +743,7 @@ huge_out:
if (!folio)
continue;
if (damos_va_filter_out(s, folio, walk->vma, addr, pte, NULL))
- return 0;
+ continue;
damos_va_migrate_dests_add(folio, walk->vma, addr, dests,
migration_lists);
nr = folio_nr_pages(folio);
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 1d27f1bd260b..ed489a14dddf 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -28,6 +28,7 @@
#include <linux/string.h>
#include <linux/types.h>
#include <linux/bug.h>
+#include <linux/vmalloc.h>
#include "kasan.h"
#include "../slab.h"
@@ -575,3 +576,34 @@ bool __kasan_check_byte(const void *address, unsigned long ip)
}
return true;
}
+
+#ifdef CONFIG_KASAN_VMALLOC
+void __kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
+ kasan_vmalloc_flags_t flags)
+{
+ unsigned long size;
+ void *addr;
+ int area;
+ u8 tag;
+
+ /*
+ * If KASAN_VMALLOC_KEEP_TAG was set at this point, all vms[] pointers
+ * would be unpoisoned with the KASAN_TAG_KERNEL which would disable
+ * KASAN checks down the line.
+ */
+ if (WARN_ON_ONCE(flags & KASAN_VMALLOC_KEEP_TAG))
+ return;
+
+ size = vms[0]->size;
+ addr = vms[0]->addr;
+ vms[0]->addr = __kasan_unpoison_vmalloc(addr, size, flags);
+ tag = get_tag(vms[0]->addr);
+
+ for (area = 1 ; area < nr_vms ; area++) {
+ size = vms[area]->size;
+ addr = set_tag(vms[area]->addr, tag);
+ vms[area]->addr =
+ __kasan_unpoison_vmalloc(addr, size, flags | KASAN_VMALLOC_KEEP_TAG);
+ }
+}
+#endif
diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c
index 1c373cc4b3fa..cbef5e450954 100644
--- a/mm/kasan/hw_tags.c
+++ b/mm/kasan/hw_tags.c
@@ -361,7 +361,7 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
return (void *)start;
}
- tag = kasan_random_tag();
+ tag = (flags & KASAN_VMALLOC_KEEP_TAG) ? get_tag(start) : kasan_random_tag();
start = set_tag(start, tag);
/* Unpoison and initialize memory up to size. */
diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
index 29a751a8a08d..32fbdf759ea2 100644
--- a/mm/kasan/shadow.c
+++ b/mm/kasan/shadow.c
@@ -631,7 +631,9 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
!(flags & KASAN_VMALLOC_PROT_NORMAL))
return (void *)start;
- start = set_tag(start, kasan_random_tag());
+ if (unlikely(!(flags & KASAN_VMALLOC_KEEP_TAG)))
+ start = set_tag(start, kasan_random_tag());
+
kasan_unpoison(start, size, false);
return (void *)start;
}
diff --git a/mm/ksm.c b/mm/ksm.c
index cfc182255c7b..2d89a7c8b4eb 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -650,7 +650,7 @@ static int break_ksm_pmd_entry(pmd_t *pmdp, unsigned long addr, unsigned long en
}
}
out_unlock:
- pte_unmap_unlock(ptep, ptl);
+ pte_unmap_unlock(start_ptep, ptl);
return found;
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index be810c1fbfc3..86f43b7e5f71 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5638,6 +5638,6 @@ void mem_cgroup_show_protected_memory(struct mem_cgroup *memcg)
memcg = root_mem_cgroup;
pr_warn("Memory cgroup min protection %lukB -- low protection %lukB",
- K(atomic_long_read(&memcg->memory.children_min_usage)*PAGE_SIZE),
- K(atomic_long_read(&memcg->memory.children_low_usage)*PAGE_SIZE));
+ K(atomic_long_read(&memcg->memory.children_min_usage)),
+ K(atomic_long_read(&memcg->memory.children_low_usage)));
}
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index fbc5a01260c8..c80c2907da33 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -2161,6 +2161,9 @@ int register_pfn_address_space(struct pfn_address_space *pfn_space)
{
guard(mutex)(&pfn_space_lock);
+ if (!pfn_space->pfn_to_vma_pgoff)
+ return -EINVAL;
+
if (interval_tree_iter_first(&pfn_space_itree,
pfn_space->node.start,
pfn_space->node.last))
@@ -2183,10 +2186,10 @@ void unregister_pfn_address_space(struct pfn_address_space *pfn_space)
}
EXPORT_SYMBOL_GPL(unregister_pfn_address_space);
-static void add_to_kill_pfn(struct task_struct *tsk,
- struct vm_area_struct *vma,
- struct list_head *to_kill,
- unsigned long pfn)
+static void add_to_kill_pgoff(struct task_struct *tsk,
+ struct vm_area_struct *vma,
+ struct list_head *to_kill,
+ pgoff_t pgoff)
{
struct to_kill *tk;
@@ -2197,12 +2200,12 @@ static void add_to_kill_pfn(struct task_struct *tsk,
}
/* Check for pgoff not backed by struct page */
- tk->addr = vma_address(vma, pfn, 1);
+ tk->addr = vma_address(vma, pgoff, 1);
tk->size_shift = PAGE_SHIFT;
if (tk->addr == -EFAULT)
pr_info("Unable to find address %lx in %s\n",
- pfn, tsk->comm);
+ pgoff, tsk->comm);
get_task_struct(tsk);
tk->tsk = tsk;
@@ -2212,11 +2215,12 @@ static void add_to_kill_pfn(struct task_struct *tsk,
/*
* Collect processes when the error hit a PFN not backed by struct page.
*/
-static void collect_procs_pfn(struct address_space *mapping,
+static void collect_procs_pfn(struct pfn_address_space *pfn_space,
unsigned long pfn, struct list_head *to_kill)
{
struct vm_area_struct *vma;
struct task_struct *tsk;
+ struct address_space *mapping = pfn_space->mapping;
i_mmap_lock_read(mapping);
rcu_read_lock();
@@ -2226,9 +2230,12 @@ static void collect_procs_pfn(struct address_space *mapping,
t = task_early_kill(tsk, true);
if (!t)
continue;
- vma_interval_tree_foreach(vma, &mapping->i_mmap, pfn, pfn) {
- if (vma->vm_mm == t->mm)
- add_to_kill_pfn(t, vma, to_kill, pfn);
+ vma_interval_tree_foreach(vma, &mapping->i_mmap, 0, ULONG_MAX) {
+ pgoff_t pgoff;
+
+ if (vma->vm_mm == t->mm &&
+ !pfn_space->pfn_to_vma_pgoff(vma, pfn, &pgoff))
+ add_to_kill_pgoff(t, vma, to_kill, pgoff);
}
}
rcu_read_unlock();
@@ -2264,7 +2271,7 @@ static int memory_failure_pfn(unsigned long pfn, int flags)
struct pfn_address_space *pfn_space =
container_of(node, struct pfn_address_space, node);
- collect_procs_pfn(pfn_space->mapping, pfn, &tokill);
+ collect_procs_pfn(pfn_space, pfn, &tokill);
mf_handled = true;
}
diff --git a/mm/memremap.c b/mm/memremap.c
index 4c2e0d68eb27..63c6ab4fdf08 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -427,8 +427,6 @@ void free_zone_device_folio(struct folio *folio)
if (folio_test_anon(folio)) {
for (i = 0; i < nr; i++)
__ClearPageAnonExclusive(folio_page(folio, i));
- } else {
- VM_WARN_ON_ONCE(folio_test_large(folio));
}
/*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 822e05f1a964..c380f063e8b7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -914,6 +914,17 @@ buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
NULL) != NULL;
}
+static void change_pageblock_range(struct page *pageblock_page,
+ int start_order, int migratetype)
+{
+ int nr_pageblocks = 1 << (start_order - pageblock_order);
+
+ while (nr_pageblocks--) {
+ set_pageblock_migratetype(pageblock_page, migratetype);
+ pageblock_page += pageblock_nr_pages;
+ }
+}
+
/*
* Freeing function for a buddy system allocator.
*
@@ -1000,7 +1011,7 @@ static inline void __free_one_page(struct page *page,
* expand() down the line puts the sub-blocks
* on the right freelists.
*/
- set_pageblock_migratetype(buddy, migratetype);
+ change_pageblock_range(buddy, order, migratetype);
}
combined_pfn = buddy_pfn & pfn;
@@ -2147,17 +2158,6 @@ bool pageblock_unisolate_and_move_free_pages(struct zone *zone, struct page *pag
#endif /* CONFIG_MEMORY_ISOLATION */
-static void change_pageblock_range(struct page *pageblock_page,
- int start_order, int migratetype)
-{
- int nr_pageblocks = 1 << (start_order - pageblock_order);
-
- while (nr_pageblocks--) {
- set_pageblock_migratetype(pageblock_page, migratetype);
- pageblock_page += pageblock_nr_pages;
- }
-}
-
static inline bool boost_watermark(struct zone *zone)
{
unsigned long max_boost;
@@ -5924,7 +5924,7 @@ static int zone_batchsize(struct zone *zone)
* recycled, this leads to the once large chunks of space being
* fragmented and becoming unavailable for high-order allocations.
*/
- return 0;
+ return 1;
#endif
}
diff --git a/mm/page_owner.c b/mm/page_owner.c
index a70245684206..b3260f0c17ba 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -952,7 +952,7 @@ static const struct file_operations page_owner_stack_fops = {
.open = page_owner_stack_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = seq_release_private,
};
static int page_owner_threshold_get(void *data, u64 *val)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index ecbac900c35f..41dd01e8430c 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -4331,7 +4331,9 @@ void *vrealloc_node_align_noprof(const void *p, size_t size, unsigned long align
*/
if (size <= alloced_size) {
kasan_unpoison_vmalloc(p + old_size, size - old_size,
- KASAN_VMALLOC_PROT_NORMAL);
+ KASAN_VMALLOC_PROT_NORMAL |
+ KASAN_VMALLOC_VM_ALLOC |
+ KASAN_VMALLOC_KEEP_TAG);
/*
* No need to zero memory here, as unused memory will have
* already been zeroed at initial allocation time or during
@@ -5025,9 +5027,7 @@ retry:
* With hardware tag-based KASAN, marking is skipped for
* non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
*/
- for (area = 0; area < nr_vms; area++)
- vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr,
- vms[area]->size, KASAN_VMALLOC_PROT_NORMAL);
+ kasan_unpoison_vmap_areas(vms, nr_vms, KASAN_VMALLOC_PROT_NORMAL);
kfree(vas);
return vms;
diff --git a/rust/kernel/maple_tree.rs b/rust/kernel/maple_tree.rs
index e72eec56bf57..265d6396a78a 100644
--- a/rust/kernel/maple_tree.rs
+++ b/rust/kernel/maple_tree.rs
@@ -265,7 +265,16 @@ impl<T: ForeignOwnable> MapleTree<T> {
loop {
// This uses the raw accessor because we're destroying pointers without removing them
// from the maple tree, which is only valid because this is the destructor.
- let ptr = ma_state.mas_find_raw(usize::MAX);
+ //
+ // Take the rcu lock because mas_find_raw() requires that you hold either the spinlock
+ // or the rcu read lock. This is only really required if memory reclaim might
+ // reallocate entries in the tree, as we otherwise have exclusive access. That feature
+ // doesn't exist yet, so for now, taking the rcu lock only serves the purpose of
+ // silencing lockdep.
+ let ptr = {
+ let _rcu = kernel::sync::rcu::Guard::new();
+ ma_state.mas_find_raw(usize::MAX)
+ };
if ptr.is_null() {
break;
}
diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c
index 7362f68f2d8b..5beb69edd12f 100644
--- a/security/integrity/ima/ima_kexec.c
+++ b/security/integrity/ima/ima_kexec.c
@@ -250,9 +250,7 @@ void ima_kexec_post_load(struct kimage *image)
if (!image->ima_buffer_addr)
return;
- ima_kexec_buffer = kimage_map_segment(image,
- image->ima_buffer_addr,
- image->ima_buffer_size);
+ ima_kexec_buffer = kimage_map_segment(image, image->ima_segment_index);
if (!ima_kexec_buffer) {
pr_err("Could not map measurements buffer.\n");
return;
diff --git a/tools/mm/page_owner_sort.c b/tools/mm/page_owner_sort.c
index 14c67e9e84c4..e6954909401c 100644
--- a/tools/mm/page_owner_sort.c
+++ b/tools/mm/page_owner_sort.c
@@ -181,7 +181,11 @@ static int compare_ts(const void *p1, const void *p2)
{
const struct block_list *l1 = p1, *l2 = p2;
- return l1->ts_nsec < l2->ts_nsec ? -1 : 1;
+ if (l1->ts_nsec < l2->ts_nsec)
+ return -1;
+ if (l1->ts_nsec > l2->ts_nsec)
+ return 1;
+ return 0;
}
static int compare_cull_condition(const void *p1, const void *p2)
diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c
index 2f830ff8396c..945144e98507 100644
--- a/tools/testing/radix-tree/idr-test.c
+++ b/tools/testing/radix-tree/idr-test.c
@@ -57,6 +57,26 @@ void idr_alloc_test(void)
idr_destroy(&idr);
}
+void idr_alloc2_test(void)
+{
+ int id;
+ struct idr idr = IDR_INIT_BASE(idr, 1);
+
+ id = idr_alloc(&idr, idr_alloc2_test, 0, 1, GFP_KERNEL);
+ assert(id == -ENOSPC);
+
+ id = idr_alloc(&idr, idr_alloc2_test, 1, 2, GFP_KERNEL);
+ assert(id == 1);
+
+ id = idr_alloc(&idr, idr_alloc2_test, 0, 1, GFP_KERNEL);
+ assert(id == -ENOSPC);
+
+ id = idr_alloc(&idr, idr_alloc2_test, 0, 2, GFP_KERNEL);
+ assert(id == -ENOSPC);
+
+ idr_destroy(&idr);
+}
+
void idr_replace_test(void)
{
DEFINE_IDR(idr);
@@ -409,6 +429,7 @@ void idr_checks(void)
idr_replace_test();
idr_alloc_test();
+ idr_alloc2_test();
idr_null_test();
idr_nowait_test();
idr_get_next_test(0);
diff --git a/tools/testing/selftests/mm/uffd-unit-tests.c b/tools/testing/selftests/mm/uffd-unit-tests.c
index f4807242c5b2..6f5e404a446c 100644
--- a/tools/testing/selftests/mm/uffd-unit-tests.c
+++ b/tools/testing/selftests/mm/uffd-unit-tests.c
@@ -1317,7 +1317,7 @@ static thread_state thread_state_get(pid_t tid)
p = strstr(tmp, header);
if (p) {
/* For example, "State:\tD (disk sleep)" */
- c = *(p + sizeof(header) - 1);
+ c = *(p + strlen(header));
return c == 'D' ?
THR_STATE_UNINTERRUPTIBLE : THR_STATE_UNKNOWN;
}