summaryrefslogtreecommitdiff
path: root/mm/migrate.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c151
1 files changed, 88 insertions, 63 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index 425401b2d4e1..aee61a980374 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -43,8 +43,6 @@
#include <linux/sched/sysctl.h>
#include <linux/memory-tiers.h>
#include <linux/pagewalk.h>
-#include <linux/balloon_compaction.h>
-#include <linux/zsmalloc.h>
#include <asm/tlbflush.h>
@@ -53,6 +51,33 @@
#include "internal.h"
#include "swap.h"
+static const struct movable_operations *offline_movable_ops;
+static const struct movable_operations *zsmalloc_movable_ops;
+
+int set_movable_ops(const struct movable_operations *ops, enum pagetype type)
+{
+ /*
+ * We only allow for selected types and don't handle concurrent
+ * registration attempts yet.
+ */
+ switch (type) {
+ case PGTY_offline:
+ if (offline_movable_ops && ops)
+ return -EBUSY;
+ offline_movable_ops = ops;
+ break;
+ case PGTY_zsmalloc:
+ if (zsmalloc_movable_ops && ops)
+ return -EBUSY;
+ zsmalloc_movable_ops = ops;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(set_movable_ops);
+
static const struct movable_operations *page_movable_ops(struct page *page)
{
VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(page), page);
@@ -62,15 +87,12 @@ static const struct movable_operations *page_movable_ops(struct page *page)
* it as movable, the page type must be sticky until the page gets freed
* back to the buddy.
*/
-#ifdef CONFIG_BALLOON_COMPACTION
if (PageOffline(page))
/* Only balloon compaction sets PageOffline pages movable. */
- return &balloon_mops;
-#endif /* CONFIG_BALLOON_COMPACTION */
-#if defined(CONFIG_ZSMALLOC) && defined(CONFIG_COMPACTION)
+ return offline_movable_ops;
if (PageZsmalloc(page))
- return &zsmalloc_mops;
-#endif /* defined(CONFIG_ZSMALLOC) && defined(CONFIG_COMPACTION) */
+ return zsmalloc_movable_ops;
+
return NULL;
}
@@ -209,18 +231,17 @@ static void putback_movable_ops_page(struct page *page)
* src and dst are also released by migration core. These pages will not be
* folios in the future, so that must be reworked.
*
- * Returns MIGRATEPAGE_SUCCESS on success, otherwise a negative error
- * code.
+ * Returns 0 on success, otherwise a negative error code.
*/
static int migrate_movable_ops_page(struct page *dst, struct page *src,
enum migrate_mode mode)
{
- int rc = MIGRATEPAGE_SUCCESS;
+ int rc;
VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(src), src);
VM_WARN_ON_ONCE_PAGE(!PageMovableOpsIsolated(src), src);
rc = page_movable_ops(src)->migrate_page(dst, src, mode);
- if (rc == MIGRATEPAGE_SUCCESS)
+ if (!rc)
ClearPageMovableOpsIsolated(src);
return rc;
}
@@ -542,10 +563,10 @@ static int __folio_migrate_mapping(struct address_space *mapping,
struct folio *newfolio, struct folio *folio, int expected_count)
{
XA_STATE(xas, &mapping->i_pages, folio_index(folio));
+ struct swap_cluster_info *ci = NULL;
struct zone *oldzone, *newzone;
int dirty;
long nr = folio_nr_pages(folio);
- long entries, i;
if (!mapping) {
/* Take off deferred split queue while frozen and memcg set */
@@ -565,15 +586,22 @@ static int __folio_migrate_mapping(struct address_space *mapping,
if (folio_test_swapbacked(folio))
__folio_set_swapbacked(newfolio);
- return MIGRATEPAGE_SUCCESS;
+ return 0;
}
oldzone = folio_zone(folio);
newzone = folio_zone(newfolio);
- xas_lock_irq(&xas);
+ if (folio_test_swapcache(folio))
+ ci = swap_cluster_get_and_lock_irq(folio);
+ else
+ xas_lock_irq(&xas);
+
if (!folio_ref_freeze(folio, expected_count)) {
- xas_unlock_irq(&xas);
+ if (ci)
+ swap_cluster_unlock_irq(ci);
+ else
+ xas_unlock_irq(&xas);
return -EAGAIN;
}
@@ -594,9 +622,6 @@ static int __folio_migrate_mapping(struct address_space *mapping,
if (folio_test_swapcache(folio)) {
folio_set_swapcache(newfolio);
newfolio->private = folio_get_private(folio);
- entries = nr;
- } else {
- entries = 1;
}
/* Move dirty while folio refs frozen and newfolio not yet exposed */
@@ -606,11 +631,10 @@ static int __folio_migrate_mapping(struct address_space *mapping,
folio_set_dirty(newfolio);
}
- /* Swap cache still stores N entries instead of a high-order entry */
- for (i = 0; i < entries; i++) {
+ if (folio_test_swapcache(folio))
+ __swap_cache_replace_folio(ci, folio, newfolio);
+ else
xas_store(&xas, newfolio);
- xas_next(&xas);
- }
/*
* Drop cache reference from old folio by unfreezing
@@ -619,8 +643,11 @@ static int __folio_migrate_mapping(struct address_space *mapping,
*/
folio_ref_unfreeze(folio, expected_count - nr);
- xas_unlock(&xas);
/* Leave irq disabled to prevent preemption while updating stats */
+ if (ci)
+ swap_cluster_unlock(ci);
+ else
+ xas_unlock(&xas);
/*
* If moved to a different zone then also account
@@ -666,7 +693,7 @@ static int __folio_migrate_mapping(struct address_space *mapping,
}
local_irq_enable();
- return MIGRATEPAGE_SUCCESS;
+ return 0;
}
int folio_migrate_mapping(struct address_space *mapping,
@@ -715,7 +742,7 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
xas_unlock_irq(&xas);
- return MIGRATEPAGE_SUCCESS;
+ return 0;
}
/*
@@ -831,14 +858,14 @@ static int __migrate_folio(struct address_space *mapping, struct folio *dst,
return rc;
rc = __folio_migrate_mapping(mapping, dst, src, expected_count);
- if (rc != MIGRATEPAGE_SUCCESS)
+ if (rc)
return rc;
if (src_private)
folio_attach_private(dst, folio_detach_private(src));
folio_migrate_flags(dst, src);
- return MIGRATEPAGE_SUCCESS;
+ return 0;
}
/**
@@ -945,7 +972,7 @@ recheck_buffers:
}
rc = filemap_migrate_folio(mapping, dst, src, mode);
- if (rc != MIGRATEPAGE_SUCCESS)
+ if (rc)
goto unlock_buffers;
bh = head;
@@ -1049,7 +1076,7 @@ static int fallback_migrate_folio(struct address_space *mapping,
*
* Return value:
* < 0 - error code
- * MIGRATEPAGE_SUCCESS - success
+ * 0 - success
*/
static int move_to_new_folio(struct folio *dst, struct folio *src,
enum migrate_mode mode)
@@ -1077,7 +1104,7 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
else
rc = fallback_migrate_folio(mapping, dst, src, mode);
- if (rc == MIGRATEPAGE_SUCCESS) {
+ if (!rc) {
/*
* For pagecache folios, src->mapping must be cleared before src
* is freed. Anonymous folios must stay anonymous until freed.
@@ -1167,7 +1194,7 @@ static void migrate_folio_done(struct folio *src,
static int migrate_folio_unmap(new_folio_t get_new_folio,
free_folio_t put_new_folio, unsigned long private,
struct folio *src, struct folio **dstp, enum migrate_mode mode,
- enum migrate_reason reason, struct list_head *ret)
+ struct list_head *ret)
{
struct folio *dst;
int rc = -EAGAIN;
@@ -1176,16 +1203,6 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
bool locked = false;
bool dst_locked = false;
- if (folio_ref_count(src) == 1) {
- /* Folio was freed from under us. So we are done. */
- folio_clear_active(src);
- folio_clear_unevictable(src);
- /* free_pages_prepare() will clear PG_isolated. */
- list_del(&src->lru);
- migrate_folio_done(src, reason);
- return MIGRATEPAGE_SUCCESS;
- }
-
dst = get_new_folio(src, private);
if (!dst)
return -ENOMEM;
@@ -1275,7 +1292,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
if (unlikely(page_has_movable_ops(&src->page))) {
__migrate_folio_record(dst, old_page_state, anon_vma);
- return MIGRATEPAGE_UNMAP;
+ return 0;
}
/*
@@ -1305,7 +1322,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
if (!folio_mapped(src)) {
__migrate_folio_record(dst, old_page_state, anon_vma);
- return MIGRATEPAGE_UNMAP;
+ return 0;
}
out:
@@ -1437,7 +1454,7 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio,
if (folio_ref_count(src) == 1) {
/* page was freed from under us. So we are done. */
folio_putback_hugetlb(src);
- return MIGRATEPAGE_SUCCESS;
+ return 0;
}
dst = get_new_folio(src, private);
@@ -1500,8 +1517,7 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio,
rc = move_to_new_folio(dst, src, mode);
if (page_was_mapped)
- remove_migration_ptes(src,
- rc == MIGRATEPAGE_SUCCESS ? dst : src, 0);
+ remove_migration_ptes(src, !rc ? dst : src, 0);
unlock_put_anon:
folio_unlock(dst);
@@ -1510,7 +1526,7 @@ put_anon:
if (anon_vma)
put_anon_vma(anon_vma);
- if (rc == MIGRATEPAGE_SUCCESS) {
+ if (!rc) {
move_hugetlb_state(src, dst, reason);
put_new_folio = NULL;
}
@@ -1518,7 +1534,7 @@ put_anon:
out_unlock:
folio_unlock(src);
out:
- if (rc == MIGRATEPAGE_SUCCESS)
+ if (!rc)
folio_putback_hugetlb(src);
else if (rc != -EAGAIN)
list_move_tail(&src->lru, ret);
@@ -1628,7 +1644,7 @@ static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
reason, ret_folios);
/*
* The rules are:
- * Success: hugetlb folio will be put back
+ * 0: hugetlb folio will be put back
* -EAGAIN: stay on the from list
* -ENOMEM: stay on the from list
* Other errno: put on ret_folios list
@@ -1645,7 +1661,7 @@ static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
retry++;
nr_retry_pages += nr_pages;
break;
- case MIGRATEPAGE_SUCCESS:
+ case 0:
stats->nr_succeeded += nr_pages;
break;
default:
@@ -1699,7 +1715,7 @@ static void migrate_folios_move(struct list_head *src_folios,
reason, ret_folios);
/*
* The rules are:
- * Success: folio will be freed
+ * 0: folio will be freed
* -EAGAIN: stay on the unmap_folios list
* Other errno: put on ret_folios list
*/
@@ -1709,7 +1725,7 @@ static void migrate_folios_move(struct list_head *src_folios,
*thp_retry += is_thp;
*nr_retry_pages += nr_pages;
break;
- case MIGRATEPAGE_SUCCESS:
+ case 0:
stats->nr_succeeded += nr_pages;
stats->nr_thp_succeeded += is_thp;
break;
@@ -1848,14 +1864,27 @@ static int migrate_pages_batch(struct list_head *from,
continue;
}
+ /*
+ * If we are holding the last folio reference, the folio
+ * was freed from under us, so just drop our reference.
+ */
+ if (likely(!page_has_movable_ops(&folio->page)) &&
+ folio_ref_count(folio) == 1) {
+ folio_clear_active(folio);
+ folio_clear_unevictable(folio);
+ list_del(&folio->lru);
+ migrate_folio_done(folio, reason);
+ stats->nr_succeeded += nr_pages;
+ stats->nr_thp_succeeded += is_thp;
+ continue;
+ }
+
rc = migrate_folio_unmap(get_new_folio, put_new_folio,
- private, folio, &dst, mode, reason,
- ret_folios);
+ private, folio, &dst, mode, ret_folios);
/*
* The rules are:
- * Success: folio will be freed
- * Unmap: folio will be put on unmap_folios list,
- * dst folio put on dst_folios list
+ * 0: folio will be put on unmap_folios list,
+ * dst folio put on dst_folios list
* -EAGAIN: stay on the from list
* -ENOMEM: stay on the from list
* Other errno: put on ret_folios list
@@ -1905,11 +1934,7 @@ static int migrate_pages_batch(struct list_head *from,
thp_retry += is_thp;
nr_retry_pages += nr_pages;
break;
- case MIGRATEPAGE_SUCCESS:
- stats->nr_succeeded += nr_pages;
- stats->nr_thp_succeeded += is_thp;
- break;
- case MIGRATEPAGE_UNMAP:
+ case 0:
list_move_tail(&folio->lru, &unmap_folios);
list_add_tail(&dst->lru, &dst_folios);
break;