diff options
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/hugetlb.c | 131 | ||||
| -rw-r--r-- | mm/init-mm.c | 5 | ||||
| -rw-r--r-- | mm/internal.h | 8 | ||||
| -rw-r--r-- | mm/kfence/core.c | 17 | ||||
| -rw-r--r-- | mm/memory.c | 11 | ||||
| -rw-r--r-- | mm/migrate.c | 12 | ||||
| -rw-r--r-- | mm/mmu_gather.c | 33 | ||||
| -rw-r--r-- | mm/page_alloc.c | 8 | ||||
| -rw-r--r-- | mm/rmap.c | 45 | ||||
| -rw-r--r-- | mm/slub.c | 8 | ||||
| -rw-r--r-- | mm/vma.c | 11 | ||||
| -rw-r--r-- | mm/vmscan.c | 13 |
12 files changed, 183 insertions, 119 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index e0ab14020513..a1832da0f623 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5112,7 +5112,7 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma, unsigned long last_addr_mask; pte_t *src_pte, *dst_pte; struct mmu_notifier_range range; - bool shared_pmd = false; + struct mmu_gather tlb; mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, old_addr, old_end); @@ -5122,6 +5122,7 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma, * range. */ flush_cache_range(vma, range.start, range.end); + tlb_gather_mmu_vma(&tlb, vma); mmu_notifier_invalidate_range_start(&range); last_addr_mask = hugetlb_mask_last_page(h); @@ -5138,8 +5139,7 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma, if (huge_pte_none(huge_ptep_get(mm, old_addr, src_pte))) continue; - if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) { - shared_pmd = true; + if (huge_pmd_unshare(&tlb, vma, old_addr, src_pte)) { old_addr |= last_addr_mask; new_addr |= last_addr_mask; continue; @@ -5150,15 +5150,16 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma, break; move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte, sz); + tlb_remove_huge_tlb_entry(h, &tlb, src_pte, old_addr); } - if (shared_pmd) - flush_hugetlb_tlb_range(vma, range.start, range.end); - else - flush_hugetlb_tlb_range(vma, old_end - len, old_end); + tlb_flush_mmu_tlbonly(&tlb); + huge_pmd_unshare_flush(&tlb, vma); + mmu_notifier_invalidate_range_end(&range); i_mmap_unlock_write(mapping); hugetlb_vma_unlock_write(vma); + tlb_finish_mmu(&tlb); return len + old_addr - old_end; } @@ -5177,7 +5178,6 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long sz = huge_page_size(h); bool adjust_reservation; unsigned long last_addr_mask; - bool force_flush = false; WARN_ON(!is_vm_hugetlb_page(vma)); BUG_ON(start & ~huge_page_mask(h)); @@ -5200,10 +5200,8 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, } ptl = huge_pte_lock(h, mm, ptep); - if (huge_pmd_unshare(mm, vma, address, ptep)) { + if (huge_pmd_unshare(tlb, vma, address, ptep)) { spin_unlock(ptl); - tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE); - force_flush = true; address |= last_addr_mask; continue; } @@ -5319,21 +5317,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, } tlb_end_vma(tlb, vma); - /* - * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We - * could defer the flush until now, since by holding i_mmap_rwsem we - * guaranteed that the last reference would not be dropped. But we must - * do the flushing before we return, as otherwise i_mmap_rwsem will be - * dropped and the last reference to the shared PMDs page might be - * dropped as well. - * - * In theory we could defer the freeing of the PMD pages as well, but - * huge_pmd_unshare() relies on the exact page_count for the PMD page to - * detect sharing, so we cannot defer the release of the page either. - * Instead, do flush now. - */ - if (force_flush) - tlb_flush_mmu_tlbonly(tlb); + huge_pmd_unshare_flush(tlb, vma); } void __hugetlb_zap_begin(struct vm_area_struct *vma, @@ -6432,11 +6416,11 @@ long hugetlb_change_protection(struct vm_area_struct *vma, pte_t pte; struct hstate *h = hstate_vma(vma); long pages = 0, psize = huge_page_size(h); - bool shared_pmd = false; struct mmu_notifier_range range; unsigned long last_addr_mask; bool uffd_wp = cp_flags & MM_CP_UFFD_WP; bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE; + struct mmu_gather tlb; /* * In the case of shared PMDs, the area to flush could be beyond @@ -6449,6 +6433,7 @@ long hugetlb_change_protection(struct vm_area_struct *vma, BUG_ON(address >= end); flush_cache_range(vma, range.start, range.end); + tlb_gather_mmu_vma(&tlb, vma); mmu_notifier_invalidate_range_start(&range); hugetlb_vma_lock_write(vma); @@ -6475,7 +6460,7 @@ long hugetlb_change_protection(struct vm_area_struct *vma, } } ptl = huge_pte_lock(h, mm, ptep); - if (huge_pmd_unshare(mm, vma, address, ptep)) { + if (huge_pmd_unshare(&tlb, vma, address, ptep)) { /* * When uffd-wp is enabled on the vma, unshare * shouldn't happen at all. Warn about it if it @@ -6484,7 +6469,6 @@ long hugetlb_change_protection(struct vm_area_struct *vma, WARN_ON_ONCE(uffd_wp || uffd_wp_resolve); pages++; spin_unlock(ptl); - shared_pmd = true; address |= last_addr_mask; continue; } @@ -6545,23 +6529,16 @@ long hugetlb_change_protection(struct vm_area_struct *vma, pte = huge_pte_clear_uffd_wp(pte); huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte); pages++; + tlb_remove_huge_tlb_entry(h, &tlb, ptep, address); } next: spin_unlock(ptl); cond_resched(); } - /* - * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare - * may have cleared our pud entry and done put_page on the page table: - * once we release i_mmap_rwsem, another task can do the final put_page - * and that page table be reused and filled with junk. If we actually - * did unshare a page of pmds, flush the range corresponding to the pud. - */ - if (shared_pmd) - flush_hugetlb_tlb_range(vma, range.start, range.end); - else - flush_hugetlb_tlb_range(vma, start, end); + + tlb_flush_mmu_tlbonly(&tlb); + huge_pmd_unshare_flush(&tlb, vma); /* * No need to call mmu_notifier_arch_invalidate_secondary_tlbs() we are * downgrading page table protection not changing it to point to a new @@ -6572,6 +6549,7 @@ next: i_mmap_unlock_write(vma->vm_file->f_mapping); hugetlb_vma_unlock_write(vma); mmu_notifier_invalidate_range_end(&range); + tlb_finish_mmu(&tlb); return pages > 0 ? (pages << h->order) : pages; } @@ -6928,18 +6906,27 @@ out: return pte; } -/* - * unmap huge page backed by shared pte. +/** + * huge_pmd_unshare - Unmap a pmd table if it is shared by multiple users + * @tlb: the current mmu_gather. + * @vma: the vma covering the pmd table. + * @addr: the address we are trying to unshare. + * @ptep: pointer into the (pmd) page table. * - * Called with page table lock held. + * Called with the page table lock held, the i_mmap_rwsem held in write mode + * and the hugetlb vma lock held in write mode. * - * returns: 1 successfully unmapped a shared pte page - * 0 the underlying pte page is not shared, or it is the last user + * Note: The caller must call huge_pmd_unshare_flush() before dropping the + * i_mmap_rwsem. + * + * Returns: 1 if it was a shared PMD table and it got unmapped, or 0 if it + * was not a shared PMD table. */ -int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long addr, pte_t *ptep) +int huge_pmd_unshare(struct mmu_gather *tlb, struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) { unsigned long sz = huge_page_size(hstate_vma(vma)); + struct mm_struct *mm = vma->vm_mm; pgd_t *pgd = pgd_offset(mm, addr); p4d_t *p4d = p4d_offset(pgd, addr); pud_t *pud = pud_offset(p4d, addr); @@ -6951,18 +6938,36 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, i_mmap_assert_write_locked(vma->vm_file->f_mapping); hugetlb_vma_assert_locked(vma); pud_clear(pud); - /* - * Once our caller drops the rmap lock, some other process might be - * using this page table as a normal, non-hugetlb page table. - * Wait for pending gup_fast() in other threads to finish before letting - * that happen. - */ - tlb_remove_table_sync_one(); - ptdesc_pmd_pts_dec(virt_to_ptdesc(ptep)); + + tlb_unshare_pmd_ptdesc(tlb, virt_to_ptdesc(ptep), addr); + mm_dec_nr_pmds(mm); return 1; } +/* + * huge_pmd_unshare_flush - Complete a sequence of huge_pmd_unshare() calls + * @tlb: the current mmu_gather. + * @vma: the vma covering the pmd table. + * + * Perform necessary TLB flushes or IPI broadcasts to synchronize PMD table + * unsharing with concurrent page table walkers. + * + * This function must be called after a sequence of huge_pmd_unshare() + * calls while still holding the i_mmap_rwsem. + */ +void huge_pmd_unshare_flush(struct mmu_gather *tlb, struct vm_area_struct *vma) +{ + /* + * We must synchronize page table unsharing such that nobody will + * try reusing a previously-shared page table while it might still + * be in use by previous sharers (TLB, GUP_fast). + */ + i_mmap_assert_write_locked(vma->vm_file->f_mapping); + + tlb_flush_unshared_tables(tlb); +} + #else /* !CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, @@ -6971,12 +6976,16 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, return NULL; } -int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long addr, pte_t *ptep) +int huge_pmd_unshare(struct mmu_gather *tlb, struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) { return 0; } +void huge_pmd_unshare_flush(struct mmu_gather *tlb, struct vm_area_struct *vma) +{ +} + void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, unsigned long *start, unsigned long *end) { @@ -7243,6 +7252,7 @@ static void hugetlb_unshare_pmds(struct vm_area_struct *vma, unsigned long sz = huge_page_size(h); struct mm_struct *mm = vma->vm_mm; struct mmu_notifier_range range; + struct mmu_gather tlb; unsigned long address; spinlock_t *ptl; pte_t *ptep; @@ -7254,6 +7264,8 @@ static void hugetlb_unshare_pmds(struct vm_area_struct *vma, return; flush_cache_range(vma, start, end); + tlb_gather_mmu_vma(&tlb, vma); + /* * No need to call adjust_range_if_pmd_sharing_possible(), because * we have already done the PUD_SIZE alignment. @@ -7272,10 +7284,10 @@ static void hugetlb_unshare_pmds(struct vm_area_struct *vma, if (!ptep) continue; ptl = huge_pte_lock(h, mm, ptep); - huge_pmd_unshare(mm, vma, address, ptep); + huge_pmd_unshare(&tlb, vma, address, ptep); spin_unlock(ptl); } - flush_hugetlb_tlb_range(vma, start, end); + huge_pmd_unshare_flush(&tlb, vma); if (take_locks) { i_mmap_unlock_write(vma->vm_file->f_mapping); hugetlb_vma_unlock_write(vma); @@ -7285,6 +7297,7 @@ static void hugetlb_unshare_pmds(struct vm_area_struct *vma, * Documentation/mm/mmu_notifier.rst. */ mmu_notifier_invalidate_range_end(&range); + tlb_finish_mmu(&tlb); } /* diff --git a/mm/init-mm.c b/mm/init-mm.c index 4600e7605cab..c5556bb9d5f0 100644 --- a/mm/init-mm.c +++ b/mm/init-mm.c @@ -44,7 +44,10 @@ struct mm_struct init_mm = { .mm_lock_seq = SEQCNT_ZERO(init_mm.mm_lock_seq), #endif .user_ns = &init_user_ns, - .cpu_bitmap = CPU_BITS_NONE, +#ifdef CONFIG_SCHED_MM_CID + .mm_cid.lock = __RAW_SPIN_LOCK_UNLOCKED(init_mm.mm_cid.lock), +#endif + .flexible_array = MM_STRUCT_FLEXIBLE_ARRAY_INIT, INIT_MM_CONTEXT(init_mm) }; diff --git a/mm/internal.h b/mm/internal.h index e430da900430..f35dbcf99a86 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -538,16 +538,8 @@ extern unsigned long highest_memmap_pfn; bool folio_isolate_lru(struct folio *folio); void folio_putback_lru(struct folio *folio); extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason); -#ifdef CONFIG_NUMA int user_proactive_reclaim(char *buf, struct mem_cgroup *memcg, pg_data_t *pgdat); -#else -static inline int user_proactive_reclaim(char *buf, - struct mem_cgroup *memcg, pg_data_t *pgdat) -{ - return 0; -} -#endif /* * in mm/rmap.c: diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 577a1699c553..da0f5b6f5744 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -823,6 +823,9 @@ static struct notifier_block kfence_check_canary_notifier = { static struct delayed_work kfence_timer; #ifdef CONFIG_KFENCE_STATIC_KEYS +/* Wait queue to wake up allocation-gate timer task. */ +static DECLARE_WAIT_QUEUE_HEAD(allocation_wait); + static int kfence_reboot_callback(struct notifier_block *nb, unsigned long action, void *data) { @@ -832,7 +835,12 @@ static int kfence_reboot_callback(struct notifier_block *nb, */ WRITE_ONCE(kfence_enabled, false); /* Cancel any pending timer work */ - cancel_delayed_work_sync(&kfence_timer); + cancel_delayed_work(&kfence_timer); + /* + * Wake up any blocked toggle_allocation_gate() so it can complete + * early while the system is still able to handle IPIs. + */ + wake_up(&allocation_wait); return NOTIFY_OK; } @@ -842,9 +850,6 @@ static struct notifier_block kfence_reboot_notifier = { .priority = INT_MAX, /* Run early to stop timers ASAP */ }; -/* Wait queue to wake up allocation-gate timer task. */ -static DECLARE_WAIT_QUEUE_HEAD(allocation_wait); - static void wake_up_kfence_timer(struct irq_work *work) { wake_up(&allocation_wait); @@ -873,7 +878,9 @@ static void toggle_allocation_gate(struct work_struct *work) /* Enable static key, and await allocation to happen. */ static_branch_enable(&kfence_allocation_key); - wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate) > 0); + wait_event_idle(allocation_wait, + atomic_read(&kfence_allocation_gate) > 0 || + !READ_ONCE(kfence_enabled)); /* Disable static key and reset timer. */ static_branch_disable(&kfence_allocation_key); diff --git a/mm/memory.c b/mm/memory.c index 2a55edc48a65..da360a6eb8a4 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1465,7 +1465,11 @@ copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, static bool vma_needs_copy(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) { - if (src_vma->vm_flags & VM_COPY_ON_FORK) + /* + * We check against dst_vma as while sane VMA flags will have been + * copied, VM_UFFD_WP may be set only on dst_vma. + */ + if (dst_vma->vm_flags & VM_COPY_ON_FORK) return true; /* * The presence of an anon_vma indicates an anonymous VMA has page @@ -1963,10 +1967,9 @@ static inline unsigned long zap_pud_range(struct mmu_gather *tlb, do { next = pud_addr_end(addr, end); if (pud_trans_huge(*pud)) { - if (next - addr != HPAGE_PUD_SIZE) { - mmap_assert_locked(tlb->mm); + if (next - addr != HPAGE_PUD_SIZE) split_huge_pud(vma, pud, addr); - } else if (zap_huge_pud(tlb, vma, pud, addr)) + else if (zap_huge_pud(tlb, vma, pud, addr)) goto next; /* fall through */ } diff --git a/mm/migrate.c b/mm/migrate.c index 5169f9717f60..4688b9e38cd2 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1458,6 +1458,7 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio, int page_was_mapped = 0; struct anon_vma *anon_vma = NULL; struct address_space *mapping = NULL; + enum ttu_flags ttu = 0; if (folio_ref_count(src) == 1) { /* page was freed from under us. So we are done. */ @@ -1498,8 +1499,6 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio, goto put_anon; if (folio_mapped(src)) { - enum ttu_flags ttu = 0; - if (!folio_test_anon(src)) { /* * In shared mappings, try_to_unmap could potentially @@ -1516,16 +1515,17 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio, try_to_migrate(src, ttu); page_was_mapped = 1; - - if (ttu & TTU_RMAP_LOCKED) - i_mmap_unlock_write(mapping); } if (!folio_mapped(src)) rc = move_to_new_folio(dst, src, mode); if (page_was_mapped) - remove_migration_ptes(src, !rc ? dst : src, 0); + remove_migration_ptes(src, !rc ? dst : src, + ttu ? RMP_LOCKED : 0); + + if (ttu & TTU_RMAP_LOCKED) + i_mmap_unlock_write(mapping); unlock_put_anon: folio_unlock(dst); diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c index 247e3f9db6c7..7468ec388455 100644 --- a/mm/mmu_gather.c +++ b/mm/mmu_gather.c @@ -10,6 +10,7 @@ #include <linux/swap.h> #include <linux/rmap.h> #include <linux/pgalloc.h> +#include <linux/hugetlb.h> #include <asm/tlb.h> @@ -426,6 +427,7 @@ static void __tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, #endif tlb->vma_pfn = 0; + tlb->fully_unshared_tables = 0; __tlb_reset_range(tlb); inc_tlb_flush_pending(tlb->mm); } @@ -460,6 +462,31 @@ void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm) } /** + * tlb_gather_mmu_vma - initialize an mmu_gather structure for operating on a + * single VMA + * @tlb: the mmu_gather structure to initialize + * @vma: the vm_area_struct + * + * Called to initialize an (on-stack) mmu_gather structure for operating on + * a single VMA. In contrast to tlb_gather_mmu(), calling this function will + * not require another call to tlb_start_vma(). In contrast to tlb_start_vma(), + * this function will *not* call flush_cache_range(). + * + * For hugetlb VMAs, this function will also initialize the mmu_gather + * page_size accordingly, not requiring a separate call to + * tlb_change_page_size(). + * + */ +void tlb_gather_mmu_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) +{ + tlb_gather_mmu(tlb, vma->vm_mm); + tlb_update_vma_flags(tlb, vma); + if (is_vm_hugetlb_page(vma)) + /* All entries have the same size. */ + tlb_change_page_size(tlb, huge_page_size(hstate_vma(vma))); +} + +/** * tlb_finish_mmu - finish an mmu_gather structure * @tlb: the mmu_gather structure to finish * @@ -469,6 +496,12 @@ void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm) void tlb_finish_mmu(struct mmu_gather *tlb) { /* + * We expect an earlier huge_pmd_unshare_flush() call to sort this out, + * due to complicated locking requirements with page table unsharing. + */ + VM_WARN_ON_ONCE(tlb->fully_unshared_tables); + + /* * If there are parallel threads are doing PTE changes on same range * under non-exclusive lock (e.g., mmap_lock read-side) but defer TLB * flush by batching, one thread may end up seeing inconsistent PTEs diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f65c4edf199d..cbf758e27aa2 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -7457,20 +7457,16 @@ bool put_page_back_buddy(struct page *page) } #endif -#ifdef CONFIG_ZONE_DMA -bool has_managed_dma(void) +bool has_managed_zone(enum zone_type zone) { struct pglist_data *pgdat; for_each_online_pgdat(pgdat) { - struct zone *zone = &pgdat->node_zones[ZONE_DMA]; - - if (managed_zone(zone)) + if (managed_zone(&pgdat->node_zones[zone])) return true; } return false; } -#endif /* CONFIG_ZONE_DMA */ #ifdef CONFIG_UNACCEPTED_MEMORY diff --git a/mm/rmap.c b/mm/rmap.c index f955f02d570e..7b9879ef442d 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -76,7 +76,7 @@ #include <linux/mm_inline.h> #include <linux/oom.h> -#include <asm/tlbflush.h> +#include <asm/tlb.h> #define CREATE_TRACE_POINTS #include <trace/events/migrate.h> @@ -2008,26 +2008,25 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, * if unsuccessful. */ if (!anon) { + struct mmu_gather tlb; + VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); if (!hugetlb_vma_trylock_write(vma)) goto walk_abort; - if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) { + + tlb_gather_mmu_vma(&tlb, vma); + if (huge_pmd_unshare(&tlb, vma, address, pvmw.pte)) { hugetlb_vma_unlock_write(vma); - flush_tlb_range(vma, - range.start, range.end); + huge_pmd_unshare_flush(&tlb, vma); + tlb_finish_mmu(&tlb); /* - * The ref count of the PMD page was - * dropped which is part of the way map - * counting is done for shared PMDs. - * Return 'true' here. When there is - * no other sharing, huge_pmd_unshare - * returns false and we will unmap the - * actual page and drop map count - * to zero. + * The PMD table was unmapped, + * consequently unmapping the folio. */ goto walk_done; } hugetlb_vma_unlock_write(vma); + tlb_finish_mmu(&tlb); } pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); if (pte_dirty(pteval)) @@ -2404,31 +2403,29 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, * fail if unsuccessful. */ if (!anon) { + struct mmu_gather tlb; + VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); if (!hugetlb_vma_trylock_write(vma)) { page_vma_mapped_walk_done(&pvmw); ret = false; break; } - if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) { - hugetlb_vma_unlock_write(vma); - flush_tlb_range(vma, - range.start, range.end); + tlb_gather_mmu_vma(&tlb, vma); + if (huge_pmd_unshare(&tlb, vma, address, pvmw.pte)) { + hugetlb_vma_unlock_write(vma); + huge_pmd_unshare_flush(&tlb, vma); + tlb_finish_mmu(&tlb); /* - * The ref count of the PMD page was - * dropped which is part of the way map - * counting is done for shared PMDs. - * Return 'true' here. When there is - * no other sharing, huge_pmd_unshare - * returns false and we will unmap the - * actual page and drop map count - * to zero. + * The PMD table was unmapped, + * consequently unmapping the folio. */ page_vma_mapped_walk_done(&pvmw); break; } hugetlb_vma_unlock_write(vma); + tlb_finish_mmu(&tlb); } /* Nuke the hugetlb page table entry */ pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); diff --git a/mm/slub.c b/mm/slub.c index 861592ac5425..f77b7407c51b 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5694,8 +5694,12 @@ void *kmalloc_nolock_noprof(size_t size, gfp_t gfp_flags, int node) if (unlikely(!size)) return ZERO_SIZE_PTR; - if (IS_ENABLED(CONFIG_PREEMPT_RT) && (in_nmi() || in_hardirq())) - /* kmalloc_nolock() in PREEMPT_RT is not supported from irq */ + if (IS_ENABLED(CONFIG_PREEMPT_RT) && !preemptible()) + /* + * kmalloc_nolock() in PREEMPT_RT is not supported from + * non-preemptible context because local_lock becomes a + * sleeping lock on RT. + */ return NULL; retry: if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) @@ -37,6 +37,8 @@ struct mmap_state { bool check_ksm_early :1; /* If we map new, hold the file rmap lock on mapping. */ bool hold_file_rmap_lock :1; + /* If .mmap_prepare changed the file, we don't need to pin. */ + bool file_doesnt_need_get :1; }; #define MMAP_STATE(name, mm_, vmi_, addr_, len_, pgoff_, vm_flags_, file_) \ @@ -2450,7 +2452,9 @@ static int __mmap_new_file_vma(struct mmap_state *map, struct vma_iterator *vmi = map->vmi; int error; - vma->vm_file = get_file(map->file); + vma->vm_file = map->file; + if (!map->file_doesnt_need_get) + get_file(map->file); if (!map->file->f_op->mmap) return 0; @@ -2638,7 +2642,10 @@ static int call_mmap_prepare(struct mmap_state *map, /* Update fields permitted to be changed. */ map->pgoff = desc->pgoff; - map->file = desc->vm_file; + if (desc->vm_file != map->file) { + map->file_doesnt_need_get = true; + map->file = desc->vm_file; + } map->vm_flags = desc->vm_flags; map->page_prot = desc->page_prot; /* User-defined fields. */ diff --git a/mm/vmscan.c b/mm/vmscan.c index 670fe9fae5ba..614ccf39fe3f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -7707,6 +7707,17 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) return ret; } +#else + +static unsigned long __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, + unsigned long nr_pages, + struct scan_control *sc) +{ + return 0; +} + +#endif + enum { MEMORY_RECLAIM_SWAPPINESS = 0, MEMORY_RECLAIM_SWAPPINESS_MAX, @@ -7814,8 +7825,6 @@ int user_proactive_reclaim(char *buf, return 0; } -#endif - /** * check_move_unevictable_folios - Move evictable folios to appropriate zone * lru list |
