summaryrefslogtreecommitdiff
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c71
1 files changed, 37 insertions, 34 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 7de11524a936..b2fc8b626d3d 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -398,14 +398,7 @@ unsigned long zone_reclaimable_pages(struct zone *zone)
if (can_reclaim_anon_pages(NULL, zone_to_nid(zone), NULL))
nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);
- /*
- * If there are no reclaimable file-backed or anonymous pages,
- * ensure zones with sufficient free pages are not skipped.
- * This prevents zones like DMA32 from being ignored in reclaim
- * scenarios where they can still help alleviate memory pressure.
- */
- if (nr == 0)
- nr = zone_page_state_snapshot(zone, NR_FREE_PAGES);
+
return nr;
}
@@ -525,7 +518,7 @@ static bool skip_throttle_noprogress(pg_data_t *pgdat)
* If kswapd is disabled, reschedule if necessary but do not
* throttle as the system is likely near OOM.
*/
- if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
+ if (atomic_read(&pgdat->kswapd_failures) >= MAX_RECLAIM_RETRIES)
return true;
/*
@@ -737,13 +730,18 @@ static int __remove_mapping(struct address_space *mapping, struct folio *folio,
{
int refcount;
void *shadow = NULL;
+ struct swap_cluster_info *ci;
BUG_ON(!folio_test_locked(folio));
BUG_ON(mapping != folio_mapping(folio));
- if (!folio_test_swapcache(folio))
+ if (folio_test_swapcache(folio)) {
+ ci = swap_cluster_get_and_lock_irq(folio);
+ } else {
spin_lock(&mapping->host->i_lock);
- xa_lock_irq(&mapping->i_pages);
+ xa_lock_irq(&mapping->i_pages);
+ }
+
/*
* The non racy check for a busy folio.
*
@@ -783,9 +781,9 @@ static int __remove_mapping(struct address_space *mapping, struct folio *folio,
if (reclaimed && !mapping_exiting(mapping))
shadow = workingset_eviction(folio, target_memcg);
- __delete_from_swap_cache(folio, swap, shadow);
+ __swap_cache_del_folio(ci, folio, swap, shadow);
memcg1_swapout(folio, swap);
- xa_unlock_irq(&mapping->i_pages);
+ swap_cluster_unlock_irq(ci);
put_swap_folio(folio, swap);
} else {
void (*free_folio)(struct folio *);
@@ -823,9 +821,12 @@ static int __remove_mapping(struct address_space *mapping, struct folio *folio,
return 1;
cannot_free:
- xa_unlock_irq(&mapping->i_pages);
- if (!folio_test_swapcache(folio))
+ if (folio_test_swapcache(folio)) {
+ swap_cluster_unlock_irq(ci);
+ } else {
+ xa_unlock_irq(&mapping->i_pages);
spin_unlock(&mapping->host->i_lock);
+ }
return 0;
}
@@ -888,11 +889,11 @@ static bool lru_gen_set_refs(struct folio *folio)
{
/* see the comment on LRU_REFS_FLAGS */
if (!folio_test_referenced(folio) && !folio_test_workingset(folio)) {
- set_mask_bits(&folio->flags, LRU_REFS_MASK, BIT(PG_referenced));
+ set_mask_bits(&folio->flags.f, LRU_REFS_MASK, BIT(PG_referenced));
return false;
}
- set_mask_bits(&folio->flags, LRU_REFS_FLAGS, BIT(PG_workingset));
+ set_mask_bits(&folio->flags.f, LRU_REFS_FLAGS, BIT(PG_workingset));
return true;
}
#else
@@ -3257,13 +3258,13 @@ static bool positive_ctrl_err(struct ctrl_pos *sp, struct ctrl_pos *pv)
/* promote pages accessed through page tables */
static int folio_update_gen(struct folio *folio, int gen)
{
- unsigned long new_flags, old_flags = READ_ONCE(folio->flags);
+ unsigned long new_flags, old_flags = READ_ONCE(folio->flags.f);
VM_WARN_ON_ONCE(gen >= MAX_NR_GENS);
/* see the comment on LRU_REFS_FLAGS */
if (!folio_test_referenced(folio) && !folio_test_workingset(folio)) {
- set_mask_bits(&folio->flags, LRU_REFS_MASK, BIT(PG_referenced));
+ set_mask_bits(&folio->flags.f, LRU_REFS_MASK, BIT(PG_referenced));
return -1;
}
@@ -3274,7 +3275,7 @@ static int folio_update_gen(struct folio *folio, int gen)
new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_FLAGS);
new_flags |= ((gen + 1UL) << LRU_GEN_PGOFF) | BIT(PG_workingset);
- } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags));
+ } while (!try_cmpxchg(&folio->flags.f, &old_flags, new_flags));
return ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
}
@@ -3285,7 +3286,7 @@ static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclai
int type = folio_is_file_lru(folio);
struct lru_gen_folio *lrugen = &lruvec->lrugen;
int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
- unsigned long new_flags, old_flags = READ_ONCE(folio->flags);
+ unsigned long new_flags, old_flags = READ_ONCE(folio->flags.f);
VM_WARN_ON_ONCE_FOLIO(!(old_flags & LRU_GEN_MASK), folio);
@@ -3302,7 +3303,7 @@ static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclai
/* for folio_end_writeback() */
if (reclaiming)
new_flags |= BIT(PG_reclaim);
- } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags));
+ } while (!try_cmpxchg(&folio->flags.f, &old_flags, new_flags));
lru_gen_update_size(lruvec, folio, old_gen, new_gen);
@@ -4507,7 +4508,7 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c
}
/* ineligible */
- if (!folio_test_lru(folio) || zone > sc->reclaim_idx) {
+ if (zone > sc->reclaim_idx) {
gen = folio_inc_gen(lruvec, folio, false);
list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
return true;
@@ -4553,7 +4554,7 @@ static bool isolate_folio(struct lruvec *lruvec, struct folio *folio, struct sca
/* see the comment on LRU_REFS_FLAGS */
if (!folio_test_referenced(folio))
- set_mask_bits(&folio->flags, LRU_REFS_MASK, 0);
+ set_mask_bits(&folio->flags.f, LRU_REFS_MASK, 0);
/* for shrink_folio_list() */
folio_clear_reclaim(folio);
@@ -4766,7 +4767,7 @@ retry:
/* don't add rejected folios to the oldest generation */
if (lru_gen_folio_seq(lruvec, folio, false) == min_seq[type])
- set_mask_bits(&folio->flags, LRU_REFS_FLAGS, BIT(PG_active));
+ set_mask_bits(&folio->flags.f, LRU_REFS_FLAGS, BIT(PG_active));
}
spin_lock_irq(&lruvec->lru_lock);
@@ -5100,7 +5101,7 @@ static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *
blk_finish_plug(&plug);
done:
if (sc->nr_reclaimed > reclaimed)
- pgdat->kswapd_failures = 0;
+ atomic_set(&pgdat->kswapd_failures, 0);
}
/******************************************************************************
@@ -5561,6 +5562,7 @@ static int run_cmd(char cmd, int memcg_id, int nid, unsigned long seq,
if (memcg_id != mem_cgroup_id(memcg))
goto done;
+ sc->target_mem_cgroup = memcg;
lruvec = get_lruvec(memcg, nid);
if (swappiness < MIN_SWAPPINESS)
@@ -5597,6 +5599,7 @@ static ssize_t lru_gen_seq_write(struct file *file, const char __user *src,
.may_swap = true,
.reclaim_idx = MAX_NR_ZONES - 1,
.gfp_mask = GFP_KERNEL,
+ .proactive = true,
};
buf = kvmalloc(len + 1, GFP_KERNEL);
@@ -5772,9 +5775,9 @@ static int __init init_lru_gen(void)
if (sysfs_create_group(mm_kobj, &lru_gen_attr_group))
pr_err("lru_gen: failed to create sysfs group\n");
- debugfs_create_file_aux_num("lru_gen", 0644, NULL, NULL, 1,
+ debugfs_create_file_aux_num("lru_gen", 0644, NULL, NULL, false,
&lru_gen_rw_fops);
- debugfs_create_file_aux_num("lru_gen_full", 0444, NULL, NULL, 0,
+ debugfs_create_file_aux_num("lru_gen_full", 0444, NULL, NULL, true,
&lru_gen_ro_fops);
return 0;
@@ -6177,7 +6180,7 @@ again:
* successful direct reclaim run will revive a dormant kswapd.
*/
if (reclaimable)
- pgdat->kswapd_failures = 0;
+ atomic_set(&pgdat->kswapd_failures, 0);
else if (sc->cache_trim_mode)
sc->cache_trim_mode_failed = 1;
}
@@ -6489,11 +6492,11 @@ static bool allow_direct_reclaim(pg_data_t *pgdat)
int i;
bool wmark_ok;
- if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
+ if (atomic_read(&pgdat->kswapd_failures) >= MAX_RECLAIM_RETRIES)
return true;
for_each_managed_zone_pgdat(zone, pgdat, i, ZONE_NORMAL) {
- if (!zone_reclaimable_pages(zone))
+ if (!zone_reclaimable_pages(zone) && zone_page_state_snapshot(zone, NR_FREE_PAGES))
continue;
pfmemalloc_reserve += min_wmark_pages(zone);
@@ -6899,7 +6902,7 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order,
wake_up_all(&pgdat->pfmemalloc_wait);
/* Hopeless node, leave it to direct reclaim */
- if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
+ if (atomic_read(&pgdat->kswapd_failures) >= MAX_RECLAIM_RETRIES)
return true;
if (pgdat_balanced(pgdat, order, highest_zoneidx)) {
@@ -7167,7 +7170,7 @@ restart:
}
if (!sc.nr_reclaimed)
- pgdat->kswapd_failures++;
+ atomic_inc(&pgdat->kswapd_failures);
out:
clear_reclaim_active(pgdat, highest_zoneidx);
@@ -7426,7 +7429,7 @@ void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order,
return;
/* Hopeless node, leave it to direct reclaim if possible */
- if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ||
+ if (atomic_read(&pgdat->kswapd_failures) >= MAX_RECLAIM_RETRIES ||
(pgdat_balanced(pgdat, order, highest_zoneidx) &&
!pgdat_watermark_boosted(pgdat, highest_zoneidx))) {
/*