summaryrefslogtreecommitdiff
path: root/mm/damon
diff options
context:
space:
mode:
Diffstat (limited to 'mm/damon')
-rw-r--r--mm/damon/core.c129
-rw-r--r--mm/damon/lru_sort.c3
-rw-r--r--mm/damon/ops-common.c40
-rw-r--r--mm/damon/reclaim.c3
-rw-r--r--mm/damon/stat.c12
-rw-r--r--mm/damon/sysfs-schemes.c59
-rw-r--r--mm/damon/sysfs.c63
-rw-r--r--mm/damon/tests/core-kunit.h708
-rw-r--r--mm/damon/tests/sysfs-kunit.h25
-rw-r--r--mm/damon/tests/vaddr-kunit.h26
-rw-r--r--mm/damon/vaddr.c143
11 files changed, 1000 insertions, 211 deletions
diff --git a/mm/damon/core.c b/mm/damon/core.c
index 109b050c795a..f9fc0375890a 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -10,6 +10,7 @@
#include <linux/damon.h>
#include <linux/delay.h>
#include <linux/kthread.h>
+#include <linux/memcontrol.h>
#include <linux/mm.h>
#include <linux/psi.h>
#include <linux/slab.h>
@@ -19,11 +20,6 @@
#define CREATE_TRACE_POINTS
#include <trace/events/damon.h>
-#ifdef CONFIG_DAMON_KUNIT_TEST
-#undef DAMON_MIN_REGION
-#define DAMON_MIN_REGION 1
-#endif
-
static DEFINE_MUTEX(damon_lock);
static int nr_running_ctxs;
static bool running_exclusive_ctxs;
@@ -305,7 +301,7 @@ void damos_add_filter(struct damos *s, struct damos_filter *f)
if (damos_filter_for_ops(f->type))
list_add_tail(&f->list, &s->ops_filters);
else
- list_add_tail(&f->list, &s->filters);
+ list_add_tail(&f->list, &s->core_filters);
}
static void damos_del_filter(struct damos_filter *f)
@@ -396,7 +392,7 @@ struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
*/
scheme->next_apply_sis = 0;
scheme->walk_completed = false;
- INIT_LIST_HEAD(&scheme->filters);
+ INIT_LIST_HEAD(&scheme->core_filters);
INIT_LIST_HEAD(&scheme->ops_filters);
scheme->stat = (struct damos_stat){};
INIT_LIST_HEAD(&scheme->list);
@@ -449,7 +445,7 @@ void damon_destroy_scheme(struct damos *s)
damos_for_each_quota_goal_safe(g, g_next, &s->quota)
damos_destroy_quota_goal(g);
- damos_for_each_filter_safe(f, next, s)
+ damos_for_each_core_filter_safe(f, next, s)
damos_destroy_filter(f);
damos_for_each_ops_filter_safe(f, next, s)
@@ -478,6 +474,7 @@ struct damon_target *damon_new_target(void)
t->nr_regions = 0;
INIT_LIST_HEAD(&t->regions_list);
INIT_LIST_HEAD(&t->list);
+ t->obsolete = false;
return t;
}
@@ -788,6 +785,11 @@ static void damos_commit_quota_goal_union(
case DAMOS_QUOTA_NODE_MEM_FREE_BP:
dst->nid = src->nid;
break;
+ case DAMOS_QUOTA_NODE_MEMCG_USED_BP:
+ case DAMOS_QUOTA_NODE_MEMCG_FREE_BP:
+ dst->nid = src->nid;
+ dst->memcg_id = src->memcg_id;
+ break;
default:
break;
}
@@ -857,12 +859,12 @@ static int damos_commit_quota(struct damos_quota *dst, struct damos_quota *src)
return 0;
}
-static struct damos_filter *damos_nth_filter(int n, struct damos *s)
+static struct damos_filter *damos_nth_core_filter(int n, struct damos *s)
{
struct damos_filter *filter;
int i = 0;
- damos_for_each_filter(filter, s) {
+ damos_for_each_core_filter(filter, s) {
if (i++ == n)
return filter;
}
@@ -916,15 +918,15 @@ static int damos_commit_core_filters(struct damos *dst, struct damos *src)
struct damos_filter *dst_filter, *next, *src_filter, *new_filter;
int i = 0, j = 0;
- damos_for_each_filter_safe(dst_filter, next, dst) {
- src_filter = damos_nth_filter(i++, src);
+ damos_for_each_core_filter_safe(dst_filter, next, dst) {
+ src_filter = damos_nth_core_filter(i++, src);
if (src_filter)
damos_commit_filter(dst_filter, src_filter);
else
damos_destroy_filter(dst_filter);
}
- damos_for_each_filter_safe(src_filter, next, src) {
+ damos_for_each_core_filter_safe(src_filter, next, src) {
if (j++ < i)
continue;
@@ -988,41 +990,37 @@ static void damos_set_filters_default_reject(struct damos *s)
s->core_filters_default_reject = false;
else
s->core_filters_default_reject =
- damos_filters_default_reject(&s->filters);
+ damos_filters_default_reject(&s->core_filters);
s->ops_filters_default_reject =
damos_filters_default_reject(&s->ops_filters);
}
-static int damos_commit_dests(struct damos *dst, struct damos *src)
+static int damos_commit_dests(struct damos_migrate_dests *dst,
+ struct damos_migrate_dests *src)
{
- struct damos_migrate_dests *dst_dests, *src_dests;
-
- dst_dests = &dst->migrate_dests;
- src_dests = &src->migrate_dests;
-
- if (dst_dests->nr_dests != src_dests->nr_dests) {
- kfree(dst_dests->node_id_arr);
- kfree(dst_dests->weight_arr);
+ if (dst->nr_dests != src->nr_dests) {
+ kfree(dst->node_id_arr);
+ kfree(dst->weight_arr);
- dst_dests->node_id_arr = kmalloc_array(src_dests->nr_dests,
- sizeof(*dst_dests->node_id_arr), GFP_KERNEL);
- if (!dst_dests->node_id_arr) {
- dst_dests->weight_arr = NULL;
+ dst->node_id_arr = kmalloc_array(src->nr_dests,
+ sizeof(*dst->node_id_arr), GFP_KERNEL);
+ if (!dst->node_id_arr) {
+ dst->weight_arr = NULL;
return -ENOMEM;
}
- dst_dests->weight_arr = kmalloc_array(src_dests->nr_dests,
- sizeof(*dst_dests->weight_arr), GFP_KERNEL);
- if (!dst_dests->weight_arr) {
+ dst->weight_arr = kmalloc_array(src->nr_dests,
+ sizeof(*dst->weight_arr), GFP_KERNEL);
+ if (!dst->weight_arr) {
/* ->node_id_arr will be freed by scheme destruction */
return -ENOMEM;
}
}
- dst_dests->nr_dests = src_dests->nr_dests;
- for (int i = 0; i < src_dests->nr_dests; i++) {
- dst_dests->node_id_arr[i] = src_dests->node_id_arr[i];
- dst_dests->weight_arr[i] = src_dests->weight_arr[i];
+ dst->nr_dests = src->nr_dests;
+ for (int i = 0; i < src->nr_dests; i++) {
+ dst->node_id_arr[i] = src->node_id_arr[i];
+ dst->weight_arr[i] = src->weight_arr[i];
}
return 0;
@@ -1069,7 +1067,7 @@ static int damos_commit(struct damos *dst, struct damos *src)
dst->wmarks = src->wmarks;
dst->target_nid = src->target_nid;
- err = damos_commit_dests(dst, src);
+ err = damos_commit_dests(&dst->migrate_dests, &src->migrate_dests);
if (err)
return err;
@@ -1181,7 +1179,11 @@ static int damon_commit_targets(
damon_for_each_target_safe(dst_target, next, dst) {
src_target = damon_nth_target(i++, src);
- if (src_target) {
+ /*
+ * If src target is obsolete, do not commit the parameters to
+ * the dst target, and further remove the dst target.
+ */
+ if (src_target && !src_target->obsolete) {
err = damon_commit_target(
dst_target, damon_target_has_pid(dst),
src_target, damon_target_has_pid(src),
@@ -1204,6 +1206,9 @@ static int damon_commit_targets(
damon_for_each_target_safe(src_target, next, src) {
if (j++ < i)
continue;
+ /* target to remove has no matching dst */
+ if (src_target->obsolete)
+ return -EINVAL;
new_target = damon_new_target();
if (!new_target)
return -ENOMEM;
@@ -1434,7 +1439,7 @@ bool damon_is_running(struct damon_ctx *ctx)
* Ask DAMON worker thread (kdamond) of @ctx to call a function with an
* argument data that respectively passed via &damon_call_control->fn and
* &damon_call_control->data of @control. If &damon_call_control->repeat of
- * @control is set, further wait until the kdamond finishes handling of the
+ * @control is unset, further wait until the kdamond finishes handling of the
* request. Otherwise, return as soon as the request is made.
*
* The kdamond executes the function with the argument in the main loop, just
@@ -1757,7 +1762,7 @@ static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t,
struct damos_filter *filter;
s->core_filters_allowed = false;
- damos_for_each_filter(filter, s) {
+ damos_for_each_core_filter(filter, s) {
if (damos_filter_match(ctx, t, r, filter, ctx->min_sz_region)) {
if (filter->allow)
s->core_filters_allowed = true;
@@ -2035,12 +2040,50 @@ static __kernel_ulong_t damos_get_node_mem_bp(
numerator = i.freeram;
return numerator * 10000 / i.totalram;
}
+
+static unsigned long damos_get_node_memcg_used_bp(
+ struct damos_quota_goal *goal)
+{
+ struct mem_cgroup *memcg;
+ struct lruvec *lruvec;
+ unsigned long used_pages, numerator;
+ struct sysinfo i;
+
+ rcu_read_lock();
+ memcg = mem_cgroup_from_id(goal->memcg_id);
+ rcu_read_unlock();
+ if (!memcg) {
+ if (goal->metric == DAMOS_QUOTA_NODE_MEMCG_USED_BP)
+ return 0;
+ else /* DAMOS_QUOTA_NODE_MEMCG_FREE_BP */
+ return 10000;
+ }
+ mem_cgroup_flush_stats(memcg);
+ lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(goal->nid));
+ used_pages = lruvec_page_state(lruvec, NR_ACTIVE_ANON);
+ used_pages += lruvec_page_state(lruvec, NR_INACTIVE_ANON);
+ used_pages += lruvec_page_state(lruvec, NR_ACTIVE_FILE);
+ used_pages += lruvec_page_state(lruvec, NR_INACTIVE_FILE);
+
+ si_meminfo_node(&i, goal->nid);
+ if (goal->metric == DAMOS_QUOTA_NODE_MEMCG_USED_BP)
+ numerator = used_pages;
+ else /* DAMOS_QUOTA_NODE_MEMCG_FREE_BP */
+ numerator = i.totalram - used_pages;
+ return numerator * 10000 / i.totalram;
+}
#else
static __kernel_ulong_t damos_get_node_mem_bp(
struct damos_quota_goal *goal)
{
return 0;
}
+
+static unsigned long damos_get_node_memcg_used_bp(
+ struct damos_quota_goal *goal)
+{
+ return 0;
+}
#endif
@@ -2061,6 +2104,10 @@ static void damos_set_quota_goal_current_value(struct damos_quota_goal *goal)
case DAMOS_QUOTA_NODE_MEM_FREE_BP:
goal->current_value = damos_get_node_mem_bp(goal);
break;
+ case DAMOS_QUOTA_NODE_MEMCG_USED_BP:
+ case DAMOS_QUOTA_NODE_MEMCG_FREE_BP:
+ goal->current_value = damos_get_node_memcg_used_bp(goal);
+ break;
default:
break;
}
@@ -2770,6 +2817,7 @@ static bool damon_find_biggest_system_ram(unsigned long *start,
* @t: The monitoring target to set the region.
* @start: The pointer to the start address of the region.
* @end: The pointer to the end address of the region.
+ * @min_sz_region: Minimum region size.
*
* This function sets the region of @t as requested by @start and @end. If the
* values of @start and @end are zero, however, this function finds the biggest
@@ -2780,7 +2828,8 @@ static bool damon_find_biggest_system_ram(unsigned long *start,
* Return: 0 on success, negative error code otherwise.
*/
int damon_set_region_biggest_system_ram_default(struct damon_target *t,
- unsigned long *start, unsigned long *end)
+ unsigned long *start, unsigned long *end,
+ unsigned long min_sz_region)
{
struct damon_addr_range addr_range;
@@ -2793,7 +2842,7 @@ int damon_set_region_biggest_system_ram_default(struct damon_target *t,
addr_range.start = *start;
addr_range.end = *end;
- return damon_set_regions(t, &addr_range, 1, DAMON_MIN_REGION);
+ return damon_set_regions(t, &addr_range, 1, min_sz_region);
}
/*
diff --git a/mm/damon/lru_sort.c b/mm/damon/lru_sort.c
index 42b9a656f9de..49b4bc294f4e 100644
--- a/mm/damon/lru_sort.c
+++ b/mm/damon/lru_sort.c
@@ -242,7 +242,8 @@ static int damon_lru_sort_apply_parameters(void)
err = damon_set_region_biggest_system_ram_default(param_target,
&monitor_region_start,
- &monitor_region_end);
+ &monitor_region_end,
+ param_ctx->min_sz_region);
if (err)
goto out;
err = damon_commit_ctx(ctx, param_ctx);
diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c
index 998c5180a603..a218d9922234 100644
--- a/mm/damon/ops-common.c
+++ b/mm/damon/ops-common.c
@@ -11,7 +11,7 @@
#include <linux/pagemap.h>
#include <linux/rmap.h>
#include <linux/swap.h>
-#include <linux/swapops.h>
+#include <linux/leafops.h>
#include "../internal.h"
#include "ops-common.h"
@@ -51,7 +51,7 @@ void damon_ptep_mkold(pte_t *pte, struct vm_area_struct *vma, unsigned long addr
if (likely(pte_present(pteval)))
pfn = pte_pfn(pteval);
else
- pfn = swp_offset_pfn(pte_to_swp_entry(pteval));
+ pfn = softleaf_to_pfn(softleaf_from_pte(pteval));
folio = damon_get_folio(pfn);
if (!folio)
@@ -75,12 +75,24 @@ void damon_ptep_mkold(pte_t *pte, struct vm_area_struct *vma, unsigned long addr
void damon_pmdp_mkold(pmd_t *pmd, struct vm_area_struct *vma, unsigned long addr)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- struct folio *folio = damon_get_folio(pmd_pfn(pmdp_get(pmd)));
+ pmd_t pmdval = pmdp_get(pmd);
+ struct folio *folio;
+ bool young = false;
+ unsigned long pfn;
+ if (likely(pmd_present(pmdval)))
+ pfn = pmd_pfn(pmdval);
+ else
+ pfn = softleaf_to_pfn(softleaf_from_pmd(pmdval));
+
+ folio = damon_get_folio(pfn);
if (!folio)
return;
- if (pmdp_clear_young_notify(vma, addr, pmd))
+ if (likely(pmd_present(pmdval)))
+ young |= pmdp_clear_young_notify(vma, addr, pmd);
+ young |= mmu_notifier_clear_young(vma->vm_mm, addr, addr + HPAGE_PMD_SIZE);
+ if (young)
folio_set_young(folio);
folio_set_idle(folio);
@@ -162,21 +174,17 @@ void damon_folio_mkold(struct folio *folio)
.rmap_one = damon_folio_mkold_one,
.anon_lock = folio_lock_anon_vma_read,
};
- bool need_lock;
if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
folio_set_idle(folio);
return;
}
- need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
- if (need_lock && !folio_trylock(folio))
+ if (!folio_trylock(folio))
return;
rmap_walk(folio, &rwc);
-
- if (need_lock)
- folio_unlock(folio);
+ folio_unlock(folio);
}
@@ -203,7 +211,9 @@ static bool damon_folio_young_one(struct folio *folio,
mmu_notifier_test_young(vma->vm_mm, addr);
} else {
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- *accessed = pmd_young(pmdp_get(pvmw.pmd)) ||
+ pmd_t pmd = pmdp_get(pvmw.pmd);
+
+ *accessed = (pmd_present(pmd) && pmd_young(pmd)) ||
!folio_test_idle(folio) ||
mmu_notifier_test_young(vma->vm_mm, addr);
#else
@@ -228,7 +238,6 @@ bool damon_folio_young(struct folio *folio)
.rmap_one = damon_folio_young_one,
.anon_lock = folio_lock_anon_vma_read,
};
- bool need_lock;
if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
if (folio_test_idle(folio))
@@ -237,14 +246,11 @@ bool damon_folio_young(struct folio *folio)
return true;
}
- need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
- if (need_lock && !folio_trylock(folio))
+ if (!folio_trylock(folio))
return false;
rmap_walk(folio, &rwc);
-
- if (need_lock)
- folio_unlock(folio);
+ folio_unlock(folio);
return accessed;
}
diff --git a/mm/damon/reclaim.c b/mm/damon/reclaim.c
index 7ba3d0f9a19a..36a582e09eae 100644
--- a/mm/damon/reclaim.c
+++ b/mm/damon/reclaim.c
@@ -250,7 +250,8 @@ static int damon_reclaim_apply_parameters(void)
err = damon_set_region_biggest_system_ram_default(param_target,
&monitor_region_start,
- &monitor_region_end);
+ &monitor_region_end,
+ param_ctx->min_sz_region);
if (err)
goto out;
err = damon_commit_ctx(ctx, param_ctx);
diff --git a/mm/damon/stat.c b/mm/damon/stat.c
index d8010968bbed..ed8e3629d31a 100644
--- a/mm/damon/stat.c
+++ b/mm/damon/stat.c
@@ -46,6 +46,8 @@ MODULE_PARM_DESC(aggr_interval_us,
static struct damon_ctx *damon_stat_context;
+static unsigned long damon_stat_last_refresh_jiffies;
+
static void damon_stat_set_estimated_memory_bandwidth(struct damon_ctx *c)
{
struct damon_target *t;
@@ -130,13 +132,12 @@ static void damon_stat_set_idletime_percentiles(struct damon_ctx *c)
static int damon_stat_damon_call_fn(void *data)
{
struct damon_ctx *c = data;
- static unsigned long last_refresh_jiffies;
/* avoid unnecessarily frequent stat update */
- if (time_before_eq(jiffies, last_refresh_jiffies +
+ if (time_before_eq(jiffies, damon_stat_last_refresh_jiffies +
msecs_to_jiffies(5 * MSEC_PER_SEC)))
return 0;
- last_refresh_jiffies = jiffies;
+ damon_stat_last_refresh_jiffies = jiffies;
aggr_interval_us = c->attrs.aggr_interval;
damon_stat_set_estimated_memory_bandwidth(c);
@@ -187,7 +188,8 @@ static struct damon_ctx *damon_stat_build_ctx(void)
if (!target)
goto free_out;
damon_add_target(ctx, target);
- if (damon_set_region_biggest_system_ram_default(target, &start, &end))
+ if (damon_set_region_biggest_system_ram_default(target, &start, &end,
+ ctx->min_sz_region))
goto free_out;
return ctx;
free_out:
@@ -210,6 +212,8 @@ static int damon_stat_start(void)
err = damon_start(&damon_stat_context, 1, true);
if (err)
return err;
+
+ damon_stat_last_refresh_jiffies = jiffies;
call_control.data = damon_stat_context;
return damon_call(damon_stat_context, &call_control);
}
diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c
index 6536f16006c9..30d20f5b3192 100644
--- a/mm/damon/sysfs-schemes.c
+++ b/mm/damon/sysfs-schemes.c
@@ -999,6 +999,7 @@ struct damos_sysfs_quota_goal {
unsigned long target_value;
unsigned long current_value;
int nid;
+ char *path;
};
static struct damos_sysfs_quota_goal *damos_sysfs_quota_goal_alloc(void)
@@ -1029,6 +1030,14 @@ struct damos_sysfs_qgoal_metric_name damos_sysfs_qgoal_metric_names[] = {
.metric = DAMOS_QUOTA_NODE_MEM_FREE_BP,
.name = "node_mem_free_bp",
},
+ {
+ .metric = DAMOS_QUOTA_NODE_MEMCG_USED_BP,
+ .name = "node_memcg_used_bp",
+ },
+ {
+ .metric = DAMOS_QUOTA_NODE_MEMCG_FREE_BP,
+ .name = "node_memcg_free_bp",
+ },
};
static ssize_t target_metric_show(struct kobject *kobj,
@@ -1112,7 +1121,6 @@ static ssize_t nid_show(struct kobject *kobj,
struct damos_sysfs_quota_goal *goal = container_of(kobj, struct
damos_sysfs_quota_goal, kobj);
- /* todo: return error if the goal is not using nid */
return sysfs_emit(buf, "%d\n", goal->nid);
}
@@ -1128,10 +1136,39 @@ static ssize_t nid_store(struct kobject *kobj,
return err ? err : count;
}
+static ssize_t path_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct damos_sysfs_quota_goal *goal = container_of(kobj,
+ struct damos_sysfs_quota_goal, kobj);
+
+ return sysfs_emit(buf, "%s\n", goal->path ? goal->path : "");
+}
+
+static ssize_t path_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ struct damos_sysfs_quota_goal *goal = container_of(kobj,
+ struct damos_sysfs_quota_goal, kobj);
+ char *path = kmalloc_array(size_add(count, 1), sizeof(*path),
+ GFP_KERNEL);
+
+ if (!path)
+ return -ENOMEM;
+
+ strscpy(path, buf, count + 1);
+ kfree(goal->path);
+ goal->path = path;
+ return count;
+}
+
static void damos_sysfs_quota_goal_release(struct kobject *kobj)
{
- /* or, notify this release to the feed callback */
- kfree(container_of(kobj, struct damos_sysfs_quota_goal, kobj));
+ struct damos_sysfs_quota_goal *goal = container_of(kobj,
+ struct damos_sysfs_quota_goal, kobj);
+
+ kfree(goal->path);
+ kfree(goal);
}
static struct kobj_attribute damos_sysfs_quota_goal_target_metric_attr =
@@ -1146,11 +1183,15 @@ static struct kobj_attribute damos_sysfs_quota_goal_current_value_attr =
static struct kobj_attribute damos_sysfs_quota_goal_nid_attr =
__ATTR_RW_MODE(nid, 0600);
+static struct kobj_attribute damos_sysfs_quota_goal_path_attr =
+ __ATTR_RW_MODE(path, 0600);
+
static struct attribute *damos_sysfs_quota_goal_attrs[] = {
&damos_sysfs_quota_goal_target_metric_attr.attr,
&damos_sysfs_quota_goal_target_value_attr.attr,
&damos_sysfs_quota_goal_current_value_attr.attr,
&damos_sysfs_quota_goal_nid_attr.attr,
+ &damos_sysfs_quota_goal_path_attr.attr,
NULL,
};
ATTRIBUTE_GROUPS(damos_sysfs_quota_goal);
@@ -2492,7 +2533,7 @@ static int damos_sysfs_add_quota_score(
struct damos_quota *quota)
{
struct damos_quota_goal *goal;
- int i;
+ int i, err;
for (i = 0; i < sysfs_goals->nr; i++) {
struct damos_sysfs_quota_goal *sysfs_goal =
@@ -2513,6 +2554,16 @@ static int damos_sysfs_add_quota_score(
case DAMOS_QUOTA_NODE_MEM_FREE_BP:
goal->nid = sysfs_goal->nid;
break;
+ case DAMOS_QUOTA_NODE_MEMCG_USED_BP:
+ case DAMOS_QUOTA_NODE_MEMCG_FREE_BP:
+ err = damon_sysfs_memcg_path_to_id(
+ sysfs_goal->path, &goal->memcg_id);
+ if (err) {
+ damos_destroy_quota_goal(goal);
+ return err;
+ }
+ goal->nid = sysfs_goal->nid;
+ break;
default:
break;
}
diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
index cd6815ecc04e..e2bd2d7becdd 100644
--- a/mm/damon/sysfs.c
+++ b/mm/damon/sysfs.c
@@ -212,6 +212,7 @@ struct damon_sysfs_target {
struct kobject kobj;
struct damon_sysfs_regions *regions;
int pid;
+ bool obsolete;
};
static struct damon_sysfs_target *damon_sysfs_target_alloc(void)
@@ -263,6 +264,29 @@ static ssize_t pid_target_store(struct kobject *kobj,
return count;
}
+static ssize_t obsolete_target_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct damon_sysfs_target *target = container_of(kobj,
+ struct damon_sysfs_target, kobj);
+
+ return sysfs_emit(buf, "%c\n", target->obsolete ? 'Y' : 'N');
+}
+
+static ssize_t obsolete_target_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ struct damon_sysfs_target *target = container_of(kobj,
+ struct damon_sysfs_target, kobj);
+ bool obsolete;
+ int err = kstrtobool(buf, &obsolete);
+
+ if (err)
+ return err;
+ target->obsolete = obsolete;
+ return count;
+}
+
static void damon_sysfs_target_release(struct kobject *kobj)
{
kfree(container_of(kobj, struct damon_sysfs_target, kobj));
@@ -271,8 +295,12 @@ static void damon_sysfs_target_release(struct kobject *kobj)
static struct kobj_attribute damon_sysfs_target_pid_attr =
__ATTR_RW_MODE(pid_target, 0600);
+static struct kobj_attribute damon_sysfs_target_obsolete_attr =
+ __ATTR_RW_MODE(obsolete_target, 0600);
+
static struct attribute *damon_sysfs_target_attrs[] = {
&damon_sysfs_target_pid_attr.attr,
+ &damon_sysfs_target_obsolete_attr.attr,
NULL,
};
ATTRIBUTE_GROUPS(damon_sysfs_target);
@@ -1264,7 +1292,7 @@ enum damon_sysfs_cmd {
DAMON_SYSFS_CMD_UPDATE_SCHEMES_EFFECTIVE_QUOTAS,
/*
* @DAMON_SYSFS_CMD_UPDATE_TUNED_INTERVALS: Update the tuned monitoring
- * intevals.
+ * intervals.
*/
DAMON_SYSFS_CMD_UPDATE_TUNED_INTERVALS,
/*
@@ -1377,6 +1405,7 @@ static int damon_sysfs_add_target(struct damon_sysfs_target *sys_target,
/* caller will destroy targets */
return -EINVAL;
}
+ t->obsolete = sys_target->obsolete;
return damon_sysfs_set_regions(t, sys_target->regions, ctx->min_sz_region);
}
@@ -1452,6 +1481,26 @@ static struct damon_ctx *damon_sysfs_build_ctx(
struct damon_sysfs_context *sys_ctx);
/*
+ * Return a new damon_ctx for testing new parameters to commit.
+ */
+static struct damon_ctx *damon_sysfs_new_test_ctx(
+ struct damon_ctx *running_ctx)
+{
+ struct damon_ctx *test_ctx;
+ int err;
+
+ test_ctx = damon_new_ctx();
+ if (!test_ctx)
+ return NULL;
+ err = damon_commit_ctx(test_ctx, running_ctx);
+ if (err) {
+ damon_destroy_ctx(test_ctx);
+ return NULL;
+ }
+ return test_ctx;
+}
+
+/*
* damon_sysfs_commit_input() - Commit user inputs to a running kdamond.
* @kdamond: The kobject wrapper for the associated kdamond.
*
@@ -1472,7 +1521,7 @@ static int damon_sysfs_commit_input(void *data)
param_ctx = damon_sysfs_build_ctx(kdamond->contexts->contexts_arr[0]);
if (IS_ERR(param_ctx))
return PTR_ERR(param_ctx);
- test_ctx = damon_new_ctx();
+ test_ctx = damon_sysfs_new_test_ctx(kdamond->damon_ctx);
if (!test_ctx)
return -ENOMEM;
err = damon_commit_ctx(test_ctx, param_ctx);
@@ -1552,16 +1601,17 @@ static struct damon_ctx *damon_sysfs_build_ctx(
return ctx;
}
+static unsigned long damon_sysfs_next_update_jiffies;
+
static int damon_sysfs_repeat_call_fn(void *data)
{
struct damon_sysfs_kdamond *sysfs_kdamond = data;
- static unsigned long next_update_jiffies;
if (!sysfs_kdamond->refresh_ms)
return 0;
- if (time_before(jiffies, next_update_jiffies))
+ if (time_before(jiffies, damon_sysfs_next_update_jiffies))
return 0;
- next_update_jiffies = jiffies +
+ damon_sysfs_next_update_jiffies = jiffies +
msecs_to_jiffies(sysfs_kdamond->refresh_ms);
if (!mutex_trylock(&damon_sysfs_lock))
@@ -1607,6 +1657,9 @@ static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *kdamond)
}
kdamond->damon_ctx = ctx;
+ damon_sysfs_next_update_jiffies =
+ jiffies + msecs_to_jiffies(kdamond->refresh_ms);
+
repeat_call_control->fn = damon_sysfs_repeat_call_fn;
repeat_call_control->data = kdamond;
repeat_call_control->repeat = true;
diff --git a/mm/damon/tests/core-kunit.h b/mm/damon/tests/core-kunit.h
index 51369e35298b..a1eff023e928 100644
--- a/mm/damon/tests/core-kunit.h
+++ b/mm/damon/tests/core-kunit.h
@@ -20,11 +20,17 @@ static void damon_test_regions(struct kunit *test)
struct damon_target *t;
r = damon_new_region(1, 2);
+ if (!r)
+ kunit_skip(test, "region alloc fail");
KUNIT_EXPECT_EQ(test, 1ul, r->ar.start);
KUNIT_EXPECT_EQ(test, 2ul, r->ar.end);
KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses);
t = damon_new_target();
+ if (!t) {
+ damon_free_region(r);
+ kunit_skip(test, "target alloc fail");
+ }
KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t));
damon_add_region(r, t);
@@ -52,7 +58,14 @@ static void damon_test_target(struct kunit *test)
struct damon_ctx *c = damon_new_ctx();
struct damon_target *t;
+ if (!c)
+ kunit_skip(test, "ctx alloc fail");
+
t = damon_new_target();
+ if (!t) {
+ damon_destroy_ctx(c);
+ kunit_skip(test, "target alloc fail");
+ }
KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c));
damon_add_target(c, t);
@@ -84,8 +97,15 @@ static void damon_test_aggregate(struct kunit *test)
struct damon_region *r;
int it, ir;
+ if (!ctx)
+ kunit_skip(test, "ctx alloc fail");
+
for (it = 0; it < 3; it++) {
t = damon_new_target();
+ if (!t) {
+ damon_destroy_ctx(ctx);
+ kunit_skip(test, "target alloc fail");
+ }
damon_add_target(ctx, t);
}
@@ -93,6 +113,10 @@ static void damon_test_aggregate(struct kunit *test)
damon_for_each_target(t, ctx) {
for (ir = 0; ir < 3; ir++) {
r = damon_new_region(saddr[it][ir], eaddr[it][ir]);
+ if (!r) {
+ damon_destroy_ctx(ctx);
+ kunit_skip(test, "region alloc fail");
+ }
r->nr_accesses = accesses[it][ir];
r->nr_accesses_bp = accesses[it][ir] * 10000;
damon_add_region(r, t);
@@ -120,12 +144,17 @@ static void damon_test_aggregate(struct kunit *test)
static void damon_test_split_at(struct kunit *test)
{
- struct damon_ctx *c = damon_new_ctx();
struct damon_target *t;
struct damon_region *r, *r_new;
t = damon_new_target();
+ if (!t)
+ kunit_skip(test, "target alloc fail");
r = damon_new_region(0, 100);
+ if (!r) {
+ damon_free_target(t);
+ kunit_skip(test, "region alloc fail");
+ }
r->nr_accesses_bp = 420000;
r->nr_accesses = 42;
r->last_nr_accesses = 15;
@@ -143,7 +172,6 @@ static void damon_test_split_at(struct kunit *test)
KUNIT_EXPECT_EQ(test, r->last_nr_accesses, r_new->last_nr_accesses);
damon_free_target(t);
- damon_destroy_ctx(c);
}
static void damon_test_merge_two(struct kunit *test)
@@ -153,11 +181,21 @@ static void damon_test_merge_two(struct kunit *test)
int i;
t = damon_new_target();
+ if (!t)
+ kunit_skip(test, "target alloc fail");
r = damon_new_region(0, 100);
+ if (!r) {
+ damon_free_target(t);
+ kunit_skip(test, "region alloc fail");
+ }
r->nr_accesses = 10;
r->nr_accesses_bp = 100000;
damon_add_region(r, t);
r2 = damon_new_region(100, 300);
+ if (!r2) {
+ damon_free_target(t);
+ kunit_skip(test, "second region alloc fail");
+ }
r2->nr_accesses = 20;
r2->nr_accesses_bp = 200000;
damon_add_region(r2, t);
@@ -203,8 +241,14 @@ static void damon_test_merge_regions_of(struct kunit *test)
int i;
t = damon_new_target();
+ if (!t)
+ kunit_skip(test, "target alloc fail");
for (i = 0; i < ARRAY_SIZE(sa); i++) {
r = damon_new_region(sa[i], ea[i]);
+ if (!r) {
+ damon_free_target(t);
+ kunit_skip(test, "region alloc fail");
+ }
r->nr_accesses = nrs[i];
r->nr_accesses_bp = nrs[i] * 10000;
damon_add_region(r, t);
@@ -223,24 +267,34 @@ static void damon_test_merge_regions_of(struct kunit *test)
static void damon_test_split_regions_of(struct kunit *test)
{
- struct damon_ctx *c = damon_new_ctx();
struct damon_target *t;
struct damon_region *r;
t = damon_new_target();
+ if (!t)
+ kunit_skip(test, "target alloc fail");
r = damon_new_region(0, 22);
+ if (!r) {
+ damon_free_target(t);
+ kunit_skip(test, "region alloc fail");
+ }
damon_add_region(r, t);
- damon_split_regions_of(t, 2, DAMON_MIN_REGION);
+ damon_split_regions_of(t, 2, 1);
KUNIT_EXPECT_LE(test, damon_nr_regions(t), 2u);
damon_free_target(t);
t = damon_new_target();
+ if (!t)
+ kunit_skip(test, "second target alloc fail");
r = damon_new_region(0, 220);
+ if (!r) {
+ damon_free_target(t);
+ kunit_skip(test, "second region alloc fail");
+ }
damon_add_region(r, t);
- damon_split_regions_of(t, 4, DAMON_MIN_REGION);
+ damon_split_regions_of(t, 4, 1);
KUNIT_EXPECT_LE(test, damon_nr_regions(t), 4u);
damon_free_target(t);
- damon_destroy_ctx(c);
}
static void damon_test_ops_registration(struct kunit *test)
@@ -249,6 +303,9 @@ static void damon_test_ops_registration(struct kunit *test)
struct damon_operations ops = {.id = DAMON_OPS_VADDR}, bak;
bool need_cleanup = false;
+ if (!c)
+ kunit_skip(test, "ctx alloc fail");
+
/* DAMON_OPS_VADDR is registered only if CONFIG_DAMON_VADDR is set */
if (!damon_is_registered_ops(DAMON_OPS_VADDR)) {
bak.id = DAMON_OPS_VADDR;
@@ -294,16 +351,29 @@ static void damon_test_ops_registration(struct kunit *test)
static void damon_test_set_regions(struct kunit *test)
{
struct damon_target *t = damon_new_target();
- struct damon_region *r1 = damon_new_region(4, 16);
- struct damon_region *r2 = damon_new_region(24, 32);
+ struct damon_region *r1, *r2;
struct damon_addr_range range = {.start = 8, .end = 28};
unsigned long expects[] = {8, 16, 16, 24, 24, 28};
int expect_idx = 0;
struct damon_region *r;
+ if (!t)
+ kunit_skip(test, "target alloc fail");
+ r1 = damon_new_region(4, 16);
+ if (!r1) {
+ damon_free_target(t);
+ kunit_skip(test, "region alloc fail");
+ }
+ r2 = damon_new_region(24, 32);
+ if (!r2) {
+ damon_free_target(t);
+ damon_free_region(r1);
+ kunit_skip(test, "second region alloc fail");
+ }
+
damon_add_region(r1, t);
damon_add_region(r2, t);
- damon_set_regions(t, &range, 1, DAMON_MIN_REGION);
+ damon_set_regions(t, &range, 1, 1);
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 3);
damon_for_each_region(r, t) {
@@ -342,6 +412,9 @@ static void damon_test_update_monitoring_result(struct kunit *test)
struct damon_attrs new_attrs;
struct damon_region *r = damon_new_region(3, 7);
+ if (!r)
+ kunit_skip(test, "region alloc fail");
+
r->nr_accesses = 15;
r->nr_accesses_bp = 150000;
r->age = 20;
@@ -375,6 +448,9 @@ static void damon_test_set_attrs(struct kunit *test)
.sample_interval = 5000, .aggr_interval = 100000,};
struct damon_attrs invalid_attrs;
+ if (!c)
+ kunit_skip(test, "ctx alloc fail");
+
KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &valid_attrs), 0);
invalid_attrs = valid_attrs;
@@ -412,6 +488,8 @@ static void damos_test_new_filter(struct kunit *test)
struct damos_filter *filter;
filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true, false);
+ if (!filter)
+ kunit_skip(test, "filter alloc fail");
KUNIT_EXPECT_EQ(test, filter->type, DAMOS_FILTER_TYPE_ANON);
KUNIT_EXPECT_EQ(test, filter->matching, true);
KUNIT_EXPECT_PTR_EQ(test, filter->list.prev, &filter->list);
@@ -419,20 +497,535 @@ static void damos_test_new_filter(struct kunit *test)
damos_destroy_filter(filter);
}
+static void damos_test_commit_quota_goal_for(struct kunit *test,
+ struct damos_quota_goal *dst,
+ struct damos_quota_goal *src)
+{
+ u64 dst_last_psi_total = 0;
+
+ if (dst->metric == DAMOS_QUOTA_SOME_MEM_PSI_US)
+ dst_last_psi_total = dst->last_psi_total;
+ damos_commit_quota_goal(dst, src);
+
+ KUNIT_EXPECT_EQ(test, dst->metric, src->metric);
+ KUNIT_EXPECT_EQ(test, dst->target_value, src->target_value);
+ if (src->metric == DAMOS_QUOTA_USER_INPUT)
+ KUNIT_EXPECT_EQ(test, dst->current_value, src->current_value);
+ if (dst_last_psi_total && src->metric == DAMOS_QUOTA_SOME_MEM_PSI_US)
+ KUNIT_EXPECT_EQ(test, dst->last_psi_total, dst_last_psi_total);
+ switch (dst->metric) {
+ case DAMOS_QUOTA_NODE_MEM_USED_BP:
+ case DAMOS_QUOTA_NODE_MEM_FREE_BP:
+ KUNIT_EXPECT_EQ(test, dst->nid, src->nid);
+ break;
+ case DAMOS_QUOTA_NODE_MEMCG_USED_BP:
+ case DAMOS_QUOTA_NODE_MEMCG_FREE_BP:
+ KUNIT_EXPECT_EQ(test, dst->nid, src->nid);
+ KUNIT_EXPECT_EQ(test, dst->memcg_id, src->memcg_id);
+ break;
+ default:
+ break;
+ }
+}
+
+static void damos_test_commit_quota_goal(struct kunit *test)
+{
+ struct damos_quota_goal dst = {
+ .metric = DAMOS_QUOTA_SOME_MEM_PSI_US,
+ .target_value = 1000,
+ .current_value = 123,
+ .last_psi_total = 456,
+ };
+
+ damos_test_commit_quota_goal_for(test, &dst,
+ &(struct damos_quota_goal){
+ .metric = DAMOS_QUOTA_USER_INPUT,
+ .target_value = 789,
+ .current_value = 12});
+ damos_test_commit_quota_goal_for(test, &dst,
+ &(struct damos_quota_goal){
+ .metric = DAMOS_QUOTA_NODE_MEM_FREE_BP,
+ .target_value = 345,
+ .current_value = 678,
+ .nid = 9,
+ });
+ damos_test_commit_quota_goal_for(test, &dst,
+ &(struct damos_quota_goal){
+ .metric = DAMOS_QUOTA_NODE_MEM_USED_BP,
+ .target_value = 12,
+ .current_value = 345,
+ .nid = 6,
+ });
+ damos_test_commit_quota_goal_for(test, &dst,
+ &(struct damos_quota_goal){
+ .metric = DAMOS_QUOTA_NODE_MEMCG_USED_BP,
+ .target_value = 456,
+ .current_value = 567,
+ .nid = 6,
+ .memcg_id = 7,
+ });
+ damos_test_commit_quota_goal_for(test, &dst,
+ &(struct damos_quota_goal){
+ .metric = DAMOS_QUOTA_NODE_MEMCG_FREE_BP,
+ .target_value = 890,
+ .current_value = 901,
+ .nid = 10,
+ .memcg_id = 1,
+ });
+ damos_test_commit_quota_goal_for(test, &dst,
+ &(struct damos_quota_goal) {
+ .metric = DAMOS_QUOTA_USER_INPUT,
+ .target_value = 789,
+ .current_value = 12,
+ });
+}
+
+static void damos_test_commit_quota_goals_for(struct kunit *test,
+ struct damos_quota_goal *dst_goals, int nr_dst_goals,
+ struct damos_quota_goal *src_goals, int nr_src_goals)
+{
+ struct damos_quota dst, src;
+ struct damos_quota_goal *goal, *next;
+ bool skip = true;
+ int i;
+
+ INIT_LIST_HEAD(&dst.goals);
+ INIT_LIST_HEAD(&src.goals);
+
+ for (i = 0; i < nr_dst_goals; i++) {
+ /*
+ * When nr_src_goals is smaller than dst_goals,
+ * damos_commit_quota_goals() will kfree() the dst goals.
+ * Make it kfree()-able.
+ */
+ goal = damos_new_quota_goal(dst_goals[i].metric,
+ dst_goals[i].target_value);
+ if (!goal)
+ goto out;
+ damos_add_quota_goal(&dst, goal);
+ }
+ skip = false;
+ for (i = 0; i < nr_src_goals; i++)
+ damos_add_quota_goal(&src, &src_goals[i]);
+
+ damos_commit_quota_goals(&dst, &src);
+
+ i = 0;
+ damos_for_each_quota_goal(goal, (&dst)) {
+ KUNIT_EXPECT_EQ(test, goal->metric, src_goals[i].metric);
+ KUNIT_EXPECT_EQ(test, goal->target_value,
+ src_goals[i++].target_value);
+ }
+ KUNIT_EXPECT_EQ(test, i, nr_src_goals);
+
+out:
+ damos_for_each_quota_goal_safe(goal, next, (&dst))
+ damos_destroy_quota_goal(goal);
+ if (skip)
+ kunit_skip(test, "goal alloc fail");
+}
+
+static void damos_test_commit_quota_goals(struct kunit *test)
+{
+ damos_test_commit_quota_goals_for(test,
+ (struct damos_quota_goal[]){}, 0,
+ (struct damos_quota_goal[]){
+ {
+ .metric = DAMOS_QUOTA_USER_INPUT,
+ .target_value = 123,
+ },
+ }, 1);
+ damos_test_commit_quota_goals_for(test,
+ (struct damos_quota_goal[]){
+ {
+ .metric = DAMOS_QUOTA_USER_INPUT,
+ .target_value = 234,
+ },
+
+ }, 1,
+ (struct damos_quota_goal[]){
+ {
+ .metric = DAMOS_QUOTA_USER_INPUT,
+ .target_value = 345,
+ },
+ }, 1);
+ damos_test_commit_quota_goals_for(test,
+ (struct damos_quota_goal[]){
+ {
+ .metric = DAMOS_QUOTA_USER_INPUT,
+ .target_value = 456,
+ },
+
+ }, 1,
+ (struct damos_quota_goal[]){}, 0);
+}
+
+static void damos_test_commit_quota(struct kunit *test)
+{
+ struct damos_quota dst = {
+ .reset_interval = 1,
+ .ms = 2,
+ .sz = 3,
+ .weight_sz = 4,
+ .weight_nr_accesses = 5,
+ .weight_age = 6,
+ };
+ struct damos_quota src = {
+ .reset_interval = 7,
+ .ms = 8,
+ .sz = 9,
+ .weight_sz = 10,
+ .weight_nr_accesses = 11,
+ .weight_age = 12,
+ };
+
+ INIT_LIST_HEAD(&dst.goals);
+ INIT_LIST_HEAD(&src.goals);
+
+ damos_commit_quota(&dst, &src);
+
+ KUNIT_EXPECT_EQ(test, dst.reset_interval, src.reset_interval);
+ KUNIT_EXPECT_EQ(test, dst.ms, src.ms);
+ KUNIT_EXPECT_EQ(test, dst.sz, src.sz);
+ KUNIT_EXPECT_EQ(test, dst.weight_sz, src.weight_sz);
+ KUNIT_EXPECT_EQ(test, dst.weight_nr_accesses, src.weight_nr_accesses);
+ KUNIT_EXPECT_EQ(test, dst.weight_age, src.weight_age);
+}
+
+static int damos_test_help_dests_setup(struct damos_migrate_dests *dests,
+ unsigned int *node_id_arr, unsigned int *weight_arr,
+ size_t nr_dests)
+{
+ size_t i;
+
+ dests->node_id_arr = kmalloc_array(nr_dests,
+ sizeof(*dests->node_id_arr), GFP_KERNEL);
+ if (!dests->node_id_arr)
+ return -ENOMEM;
+ dests->weight_arr = kmalloc_array(nr_dests,
+ sizeof(*dests->weight_arr), GFP_KERNEL);
+ if (!dests->weight_arr) {
+ kfree(dests->node_id_arr);
+ dests->node_id_arr = NULL;
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < nr_dests; i++) {
+ dests->node_id_arr[i] = node_id_arr[i];
+ dests->weight_arr[i] = weight_arr[i];
+ }
+ dests->nr_dests = nr_dests;
+ return 0;
+}
+
+static void damos_test_help_dests_free(struct damos_migrate_dests *dests)
+{
+ kfree(dests->node_id_arr);
+ kfree(dests->weight_arr);
+}
+
+static void damos_test_commit_dests_for(struct kunit *test,
+ unsigned int *dst_node_id_arr, unsigned int *dst_weight_arr,
+ size_t dst_nr_dests,
+ unsigned int *src_node_id_arr, unsigned int *src_weight_arr,
+ size_t src_nr_dests)
+{
+ struct damos_migrate_dests dst = {}, src = {};
+ int i, err;
+ bool skip = true;
+
+ err = damos_test_help_dests_setup(&dst, dst_node_id_arr,
+ dst_weight_arr, dst_nr_dests);
+ if (err)
+ kunit_skip(test, "dests setup fail");
+ err = damos_test_help_dests_setup(&src, src_node_id_arr,
+ src_weight_arr, src_nr_dests);
+ if (err) {
+ damos_test_help_dests_free(&dst);
+ kunit_skip(test, "src setup fail");
+ }
+ err = damos_commit_dests(&dst, &src);
+ if (err)
+ goto out;
+ skip = false;
+
+ KUNIT_EXPECT_EQ(test, dst.nr_dests, src_nr_dests);
+ for (i = 0; i < dst.nr_dests; i++) {
+ KUNIT_EXPECT_EQ(test, dst.node_id_arr[i], src_node_id_arr[i]);
+ KUNIT_EXPECT_EQ(test, dst.weight_arr[i], src_weight_arr[i]);
+ }
+
+out:
+ damos_test_help_dests_free(&dst);
+ damos_test_help_dests_free(&src);
+ if (skip)
+ kunit_skip(test, "skip");
+}
+
+static void damos_test_commit_dests(struct kunit *test)
+{
+ damos_test_commit_dests_for(test,
+ (unsigned int[]){1, 2, 3}, (unsigned int[]){2, 3, 4},
+ 3,
+ (unsigned int[]){4, 5, 6}, (unsigned int[]){5, 6, 7},
+ 3);
+ damos_test_commit_dests_for(test,
+ (unsigned int[]){1, 2}, (unsigned int[]){2, 3},
+ 2,
+ (unsigned int[]){4, 5, 6}, (unsigned int[]){5, 6, 7},
+ 3);
+ damos_test_commit_dests_for(test,
+ NULL, NULL, 0,
+ (unsigned int[]){4, 5, 6}, (unsigned int[]){5, 6, 7},
+ 3);
+ damos_test_commit_dests_for(test,
+ (unsigned int[]){1, 2, 3}, (unsigned int[]){2, 3, 4},
+ 3,
+ (unsigned int[]){4, 5}, (unsigned int[]){5, 6}, 2);
+ damos_test_commit_dests_for(test,
+ (unsigned int[]){1, 2, 3}, (unsigned int[]){2, 3, 4},
+ 3,
+ NULL, NULL, 0);
+}
+
+static void damos_test_commit_filter_for(struct kunit *test,
+ struct damos_filter *dst, struct damos_filter *src)
+{
+ damos_commit_filter(dst, src);
+ KUNIT_EXPECT_EQ(test, dst->type, src->type);
+ KUNIT_EXPECT_EQ(test, dst->matching, src->matching);
+ KUNIT_EXPECT_EQ(test, dst->allow, src->allow);
+ switch (src->type) {
+ case DAMOS_FILTER_TYPE_MEMCG:
+ KUNIT_EXPECT_EQ(test, dst->memcg_id, src->memcg_id);
+ break;
+ case DAMOS_FILTER_TYPE_ADDR:
+ KUNIT_EXPECT_EQ(test, dst->addr_range.start,
+ src->addr_range.start);
+ KUNIT_EXPECT_EQ(test, dst->addr_range.end,
+ src->addr_range.end);
+ break;
+ case DAMOS_FILTER_TYPE_TARGET:
+ KUNIT_EXPECT_EQ(test, dst->target_idx, src->target_idx);
+ break;
+ case DAMOS_FILTER_TYPE_HUGEPAGE_SIZE:
+ KUNIT_EXPECT_EQ(test, dst->sz_range.min, src->sz_range.min);
+ KUNIT_EXPECT_EQ(test, dst->sz_range.max, src->sz_range.max);
+ break;
+ default:
+ break;
+ }
+}
+
static void damos_test_commit_filter(struct kunit *test)
{
- struct damos_filter *src_filter = damos_new_filter(
- DAMOS_FILTER_TYPE_ANON, true, true);
- struct damos_filter *dst_filter = damos_new_filter(
- DAMOS_FILTER_TYPE_ACTIVE, false, false);
+ struct damos_filter dst = {
+ .type = DAMOS_FILTER_TYPE_ACTIVE,
+ .matching = false,
+ .allow = false,
+ };
+
+ damos_test_commit_filter_for(test, &dst,
+ &(struct damos_filter){
+ .type = DAMOS_FILTER_TYPE_ANON,
+ .matching = true,
+ .allow = true,
+ });
+ damos_test_commit_filter_for(test, &dst,
+ &(struct damos_filter){
+ .type = DAMOS_FILTER_TYPE_MEMCG,
+ .matching = false,
+ .allow = false,
+ .memcg_id = 123,
+ });
+ damos_test_commit_filter_for(test, &dst,
+ &(struct damos_filter){
+ .type = DAMOS_FILTER_TYPE_YOUNG,
+ .matching = true,
+ .allow = true,
+ });
+ damos_test_commit_filter_for(test, &dst,
+ &(struct damos_filter){
+ .type = DAMOS_FILTER_TYPE_HUGEPAGE_SIZE,
+ .matching = false,
+ .allow = false,
+ .sz_range = {.min = 234, .max = 345},
+ });
+ damos_test_commit_filter_for(test, &dst,
+ &(struct damos_filter){
+ .type = DAMOS_FILTER_TYPE_UNMAPPED,
+ .matching = true,
+ .allow = true,
+ });
+ damos_test_commit_filter_for(test, &dst,
+ &(struct damos_filter){
+ .type = DAMOS_FILTER_TYPE_ADDR,
+ .matching = false,
+ .allow = false,
+ .addr_range = {.start = 456, .end = 567},
+ });
+ damos_test_commit_filter_for(test, &dst,
+ &(struct damos_filter){
+ .type = DAMOS_FILTER_TYPE_TARGET,
+ .matching = true,
+ .allow = true,
+ .target_idx = 6,
+ });
+}
+
+static void damos_test_help_initailize_scheme(struct damos *scheme)
+{
+ INIT_LIST_HEAD(&scheme->quota.goals);
+ INIT_LIST_HEAD(&scheme->core_filters);
+ INIT_LIST_HEAD(&scheme->ops_filters);
+}
- damos_commit_filter(dst_filter, src_filter);
- KUNIT_EXPECT_EQ(test, dst_filter->type, src_filter->type);
- KUNIT_EXPECT_EQ(test, dst_filter->matching, src_filter->matching);
- KUNIT_EXPECT_EQ(test, dst_filter->allow, src_filter->allow);
+static void damos_test_commit_for(struct kunit *test, struct damos *dst,
+ struct damos *src)
+{
+ int err;
+
+ damos_test_help_initailize_scheme(dst);
+ damos_test_help_initailize_scheme(src);
+
+ err = damos_commit(dst, src);
+ if (err)
+ kunit_skip(test, "damos_commit fail");
+
+ KUNIT_EXPECT_EQ(test, dst->pattern.min_sz_region,
+ src->pattern.min_sz_region);
+ KUNIT_EXPECT_EQ(test, dst->pattern.max_sz_region,
+ src->pattern.max_sz_region);
+ KUNIT_EXPECT_EQ(test, dst->pattern.min_nr_accesses,
+ src->pattern.min_nr_accesses);
+ KUNIT_EXPECT_EQ(test, dst->pattern.max_nr_accesses,
+ src->pattern.max_nr_accesses);
+ KUNIT_EXPECT_EQ(test, dst->pattern.min_age_region,
+ src->pattern.min_age_region);
+ KUNIT_EXPECT_EQ(test, dst->pattern.max_age_region,
+ src->pattern.max_age_region);
+
+ KUNIT_EXPECT_EQ(test, dst->action, src->action);
+ KUNIT_EXPECT_EQ(test, dst->apply_interval_us, src->apply_interval_us);
+
+ KUNIT_EXPECT_EQ(test, dst->wmarks.metric, src->wmarks.metric);
+ KUNIT_EXPECT_EQ(test, dst->wmarks.interval, src->wmarks.interval);
+ KUNIT_EXPECT_EQ(test, dst->wmarks.high, src->wmarks.high);
+ KUNIT_EXPECT_EQ(test, dst->wmarks.mid, src->wmarks.mid);
+ KUNIT_EXPECT_EQ(test, dst->wmarks.low, src->wmarks.low);
+
+ switch (src->action) {
+ case DAMOS_MIGRATE_COLD:
+ case DAMOS_MIGRATE_HOT:
+ KUNIT_EXPECT_EQ(test, dst->target_nid, src->target_nid);
+ break;
+ default:
+ break;
+ }
+}
- damos_destroy_filter(src_filter);
- damos_destroy_filter(dst_filter);
+static void damos_test_commit(struct kunit *test)
+{
+ damos_test_commit_for(test,
+ &(struct damos){
+ .pattern = (struct damos_access_pattern){
+ 1, 2, 3, 4, 5, 6},
+ .action = DAMOS_PAGEOUT,
+ .apply_interval_us = 1000000,
+ .wmarks = (struct damos_watermarks){
+ DAMOS_WMARK_FREE_MEM_RATE,
+ 900, 100, 50},
+ },
+ &(struct damos){
+ .pattern = (struct damos_access_pattern){
+ 2, 3, 4, 5, 6, 7},
+ .action = DAMOS_PAGEOUT,
+ .apply_interval_us = 2000000,
+ .wmarks = (struct damos_watermarks){
+ DAMOS_WMARK_FREE_MEM_RATE,
+ 800, 50, 30},
+ });
+ damos_test_commit_for(test,
+ &(struct damos){
+ .pattern = (struct damos_access_pattern){
+ 1, 2, 3, 4, 5, 6},
+ .action = DAMOS_PAGEOUT,
+ .apply_interval_us = 1000000,
+ .wmarks = (struct damos_watermarks){
+ DAMOS_WMARK_FREE_MEM_RATE,
+ 900, 100, 50},
+ },
+ &(struct damos){
+ .pattern = (struct damos_access_pattern){
+ 2, 3, 4, 5, 6, 7},
+ .action = DAMOS_MIGRATE_HOT,
+ .apply_interval_us = 2000000,
+ .target_nid = 5,
+ });
+}
+
+static struct damon_target *damon_test_help_setup_target(
+ unsigned long region_start_end[][2], int nr_regions)
+{
+ struct damon_target *t;
+ struct damon_region *r;
+ int i;
+
+ t = damon_new_target();
+ if (!t)
+ return NULL;
+ for (i = 0; i < nr_regions; i++) {
+ r = damon_new_region(region_start_end[i][0],
+ region_start_end[i][1]);
+ if (!r) {
+ damon_free_target(t);
+ return NULL;
+ }
+ damon_add_region(r, t);
+ }
+ return t;
+}
+
+static void damon_test_commit_target_regions_for(struct kunit *test,
+ unsigned long dst_start_end[][2], int nr_dst_regions,
+ unsigned long src_start_end[][2], int nr_src_regions,
+ unsigned long expect_start_end[][2], int nr_expect_regions)
+{
+ struct damon_target *dst_target, *src_target;
+ struct damon_region *r;
+ int i;
+
+ dst_target = damon_test_help_setup_target(dst_start_end, nr_dst_regions);
+ if (!dst_target)
+ kunit_skip(test, "dst target setup fail");
+ src_target = damon_test_help_setup_target(src_start_end, nr_src_regions);
+ if (!src_target) {
+ damon_free_target(dst_target);
+ kunit_skip(test, "src target setup fail");
+ }
+ damon_commit_target_regions(dst_target, src_target, 1);
+ i = 0;
+ damon_for_each_region(r, dst_target) {
+ KUNIT_EXPECT_EQ(test, r->ar.start, expect_start_end[i][0]);
+ KUNIT_EXPECT_EQ(test, r->ar.end, expect_start_end[i][1]);
+ i++;
+ }
+ KUNIT_EXPECT_EQ(test, damon_nr_regions(dst_target), nr_expect_regions);
+ KUNIT_EXPECT_EQ(test, i, nr_expect_regions);
+ damon_free_target(dst_target);
+ damon_free_target(src_target);
+}
+
+static void damon_test_commit_target_regions(struct kunit *test)
+{
+ damon_test_commit_target_regions_for(test,
+ (unsigned long[][2]) {{3, 8}, {8, 10}}, 2,
+ (unsigned long[][2]) {{4, 6}}, 1,
+ (unsigned long[][2]) {{4, 6}}, 1);
+ damon_test_commit_target_regions_for(test,
+ (unsigned long[][2]) {{3, 8}, {8, 10}}, 2,
+ (unsigned long[][2]) {}, 0,
+ (unsigned long[][2]) {{3, 8}, {8, 10}}, 2);
}
static void damos_test_filter_out(struct kunit *test)
@@ -442,58 +1035,66 @@ static void damos_test_filter_out(struct kunit *test)
struct damos_filter *f;
f = damos_new_filter(DAMOS_FILTER_TYPE_ADDR, true, false);
- f->addr_range = (struct damon_addr_range){
- .start = DAMON_MIN_REGION * 2, .end = DAMON_MIN_REGION * 6};
+ if (!f)
+ kunit_skip(test, "filter alloc fail");
+ f->addr_range = (struct damon_addr_range){.start = 2, .end = 6};
t = damon_new_target();
- r = damon_new_region(DAMON_MIN_REGION * 3, DAMON_MIN_REGION * 5);
+ if (!t) {
+ damos_destroy_filter(f);
+ kunit_skip(test, "target alloc fail");
+ }
+ r = damon_new_region(3, 5);
+ if (!r) {
+ damos_destroy_filter(f);
+ damon_free_target(t);
+ kunit_skip(test, "region alloc fail");
+ }
damon_add_region(r, t);
/* region in the range */
- KUNIT_EXPECT_TRUE(test,
- damos_filter_match(NULL, t, r, f, DAMON_MIN_REGION));
+ KUNIT_EXPECT_TRUE(test, damos_filter_match(NULL, t, r, f, 1));
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1);
/* region before the range */
- r->ar.start = DAMON_MIN_REGION * 1;
- r->ar.end = DAMON_MIN_REGION * 2;
+ r->ar.start = 1;
+ r->ar.end = 2;
KUNIT_EXPECT_FALSE(test,
- damos_filter_match(NULL, t, r, f, DAMON_MIN_REGION));
+ damos_filter_match(NULL, t, r, f, 1));
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1);
/* region after the range */
- r->ar.start = DAMON_MIN_REGION * 6;
- r->ar.end = DAMON_MIN_REGION * 8;
+ r->ar.start = 6;
+ r->ar.end = 8;
KUNIT_EXPECT_FALSE(test,
- damos_filter_match(NULL, t, r, f, DAMON_MIN_REGION));
+ damos_filter_match(NULL, t, r, f, 1));
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1);
/* region started before the range */
- r->ar.start = DAMON_MIN_REGION * 1;
- r->ar.end = DAMON_MIN_REGION * 4;
- KUNIT_EXPECT_FALSE(test,
- damos_filter_match(NULL, t, r, f, DAMON_MIN_REGION));
+ r->ar.start = 1;
+ r->ar.end = 4;
+ KUNIT_EXPECT_FALSE(test, damos_filter_match(NULL, t, r, f, 1));
/* filter should have split the region */
- KUNIT_EXPECT_EQ(test, r->ar.start, DAMON_MIN_REGION * 1);
- KUNIT_EXPECT_EQ(test, r->ar.end, DAMON_MIN_REGION * 2);
+ KUNIT_EXPECT_EQ(test, r->ar.start, 1);
+ KUNIT_EXPECT_EQ(test, r->ar.end, 2);
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 2);
r2 = damon_next_region(r);
- KUNIT_EXPECT_EQ(test, r2->ar.start, DAMON_MIN_REGION * 2);
- KUNIT_EXPECT_EQ(test, r2->ar.end, DAMON_MIN_REGION * 4);
+ KUNIT_EXPECT_EQ(test, r2->ar.start, 2);
+ KUNIT_EXPECT_EQ(test, r2->ar.end, 4);
damon_destroy_region(r2, t);
/* region started in the range */
- r->ar.start = DAMON_MIN_REGION * 2;
- r->ar.end = DAMON_MIN_REGION * 8;
+ r->ar.start = 2;
+ r->ar.end = 8;
KUNIT_EXPECT_TRUE(test,
- damos_filter_match(NULL, t, r, f, DAMON_MIN_REGION));
+ damos_filter_match(NULL, t, r, f, 1));
/* filter should have split the region */
- KUNIT_EXPECT_EQ(test, r->ar.start, DAMON_MIN_REGION * 2);
- KUNIT_EXPECT_EQ(test, r->ar.end, DAMON_MIN_REGION * 6);
+ KUNIT_EXPECT_EQ(test, r->ar.start, 2);
+ KUNIT_EXPECT_EQ(test, r->ar.end, 6);
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 2);
r2 = damon_next_region(r);
- KUNIT_EXPECT_EQ(test, r2->ar.start, DAMON_MIN_REGION * 6);
- KUNIT_EXPECT_EQ(test, r2->ar.end, DAMON_MIN_REGION * 8);
+ KUNIT_EXPECT_EQ(test, r2->ar.start, 6);
+ KUNIT_EXPECT_EQ(test, r2->ar.end, 8);
damon_destroy_region(r2, t);
damon_free_target(t);
@@ -536,7 +1137,7 @@ static void damon_test_set_filters_default_reject(struct kunit *test)
struct damos scheme;
struct damos_filter *target_filter, *anon_filter;
- INIT_LIST_HEAD(&scheme.filters);
+ INIT_LIST_HEAD(&scheme.core_filters);
INIT_LIST_HEAD(&scheme.ops_filters);
damos_set_filters_default_reject(&scheme);
@@ -548,6 +1149,8 @@ static void damon_test_set_filters_default_reject(struct kunit *test)
KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, false);
target_filter = damos_new_filter(DAMOS_FILTER_TYPE_TARGET, true, true);
+ if (!target_filter)
+ kunit_skip(test, "filter alloc fail");
damos_add_filter(&scheme, target_filter);
damos_set_filters_default_reject(&scheme);
/*
@@ -573,6 +1176,10 @@ static void damon_test_set_filters_default_reject(struct kunit *test)
KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, false);
anon_filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true, true);
+ if (!anon_filter) {
+ damos_free_filter(target_filter);
+ kunit_skip(test, "anon_filter alloc fail");
+ }
damos_add_filter(&scheme, anon_filter);
damos_set_filters_default_reject(&scheme);
@@ -598,6 +1205,9 @@ static void damon_test_set_filters_default_reject(struct kunit *test)
*/
KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false);
KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, true);
+
+ damos_free_filter(anon_filter);
+ damos_free_filter(target_filter);
}
static struct kunit_case damon_test_cases[] = {
@@ -615,7 +1225,13 @@ static struct kunit_case damon_test_cases[] = {
KUNIT_CASE(damon_test_set_attrs),
KUNIT_CASE(damon_test_moving_sum),
KUNIT_CASE(damos_test_new_filter),
+ KUNIT_CASE(damos_test_commit_quota_goal),
+ KUNIT_CASE(damos_test_commit_quota_goals),
+ KUNIT_CASE(damos_test_commit_quota),
+ KUNIT_CASE(damos_test_commit_dests),
KUNIT_CASE(damos_test_commit_filter),
+ KUNIT_CASE(damos_test_commit),
+ KUNIT_CASE(damon_test_commit_target_regions),
KUNIT_CASE(damos_test_filter_out),
KUNIT_CASE(damon_test_feed_loop_next_input),
KUNIT_CASE(damon_test_set_filters_default_reject),
diff --git a/mm/damon/tests/sysfs-kunit.h b/mm/damon/tests/sysfs-kunit.h
index 7b5c7b307da9..0c665ed255a3 100644
--- a/mm/damon/tests/sysfs-kunit.h
+++ b/mm/damon/tests/sysfs-kunit.h
@@ -45,16 +45,41 @@ static void damon_sysfs_test_add_targets(struct kunit *test)
struct damon_ctx *ctx;
sysfs_targets = damon_sysfs_targets_alloc();
+ if (!sysfs_targets)
+ kunit_skip(test, "sysfs_targets alloc fail");
sysfs_targets->nr = 1;
sysfs_targets->targets_arr = kmalloc_array(1,
sizeof(*sysfs_targets->targets_arr), GFP_KERNEL);
+ if (!sysfs_targets->targets_arr) {
+ kfree(sysfs_targets);
+ kunit_skip(test, "targets_arr alloc fail");
+ }
sysfs_target = damon_sysfs_target_alloc();
+ if (!sysfs_target) {
+ kfree(sysfs_targets->targets_arr);
+ kfree(sysfs_targets);
+ kunit_skip(test, "sysfs_target alloc fail");
+ }
sysfs_target->pid = __damon_sysfs_test_get_any_pid(12, 100);
sysfs_target->regions = damon_sysfs_regions_alloc();
+ if (!sysfs_target->regions) {
+ kfree(sysfs_targets->targets_arr);
+ kfree(sysfs_targets);
+ kfree(sysfs_target);
+ kunit_skip(test, "sysfs_regions alloc fail");
+ }
+
sysfs_targets->targets_arr[0] = sysfs_target;
ctx = damon_new_ctx();
+ if (!ctx) {
+ kfree(sysfs_targets->targets_arr);
+ kfree(sysfs_targets);
+ kfree(sysfs_target->regions);
+ kfree(sysfs_target);
+ kunit_skip(test, "ctx alloc fail");
+ }
damon_sysfs_add_targets(ctx, sysfs_targets);
KUNIT_EXPECT_EQ(test, 1u, nr_damon_targets(ctx));
diff --git a/mm/damon/tests/vaddr-kunit.h b/mm/damon/tests/vaddr-kunit.h
index fce38dd53cf8..30dc5459f1d2 100644
--- a/mm/damon/tests/vaddr-kunit.h
+++ b/mm/damon/tests/vaddr-kunit.h
@@ -136,8 +136,14 @@ static void damon_do_test_apply_three_regions(struct kunit *test,
int i;
t = damon_new_target();
+ if (!t)
+ kunit_skip(test, "target alloc fail");
for (i = 0; i < nr_regions / 2; i++) {
r = damon_new_region(regions[i * 2], regions[i * 2 + 1]);
+ if (!r) {
+ damon_destroy_target(t, NULL);
+ kunit_skip(test, "region alloc fail");
+ }
damon_add_region(r, t);
}
@@ -250,7 +256,16 @@ static void damon_test_split_evenly_fail(struct kunit *test,
unsigned long start, unsigned long end, unsigned int nr_pieces)
{
struct damon_target *t = damon_new_target();
- struct damon_region *r = damon_new_region(start, end);
+ struct damon_region *r;
+
+ if (!t)
+ kunit_skip(test, "target alloc fail");
+
+ r = damon_new_region(start, end);
+ if (!r) {
+ damon_free_target(t);
+ kunit_skip(test, "region alloc fail");
+ }
damon_add_region(r, t);
KUNIT_EXPECT_EQ(test,
@@ -269,10 +284,17 @@ static void damon_test_split_evenly_succ(struct kunit *test,
unsigned long start, unsigned long end, unsigned int nr_pieces)
{
struct damon_target *t = damon_new_target();
- struct damon_region *r = damon_new_region(start, end);
+ struct damon_region *r;
unsigned long expected_width = (end - start) / nr_pieces;
unsigned long i = 0;
+ if (!t)
+ kunit_skip(test, "target alloc fail");
+ r = damon_new_region(start, end);
+ if (!r) {
+ damon_free_target(t);
+ kunit_skip(test, "region alloc fail");
+ }
damon_add_region(r, t);
KUNIT_EXPECT_EQ(test,
damon_va_evenly_split_region(t, r, nr_pieces), 0);
diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
index 7e834467b2d8..2750c88e7225 100644
--- a/mm/damon/vaddr.c
+++ b/mm/damon/vaddr.c
@@ -307,24 +307,16 @@ static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
pte_t *pte;
- pmd_t pmde;
spinlock_t *ptl;
- if (pmd_trans_huge(pmdp_get(pmd))) {
- ptl = pmd_lock(walk->mm, pmd);
- pmde = pmdp_get(pmd);
+ ptl = pmd_trans_huge_lock(pmd, walk->vma);
+ if (ptl) {
+ pmd_t pmde = pmdp_get(pmd);
- if (!pmd_present(pmde)) {
- spin_unlock(ptl);
- return 0;
- }
-
- if (pmd_trans_huge(pmde)) {
+ if (pmd_present(pmde))
damon_pmdp_mkold(pmd, walk->vma, addr);
- spin_unlock(ptl);
- return 0;
- }
spin_unlock(ptl);
+ return 0;
}
pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
@@ -446,22 +438,13 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
struct damon_young_walk_private *priv = walk->private;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- if (pmd_trans_huge(pmdp_get(pmd))) {
- pmd_t pmde;
-
- ptl = pmd_lock(walk->mm, pmd);
- pmde = pmdp_get(pmd);
+ ptl = pmd_trans_huge_lock(pmd, walk->vma);
+ if (ptl) {
+ pmd_t pmde = pmdp_get(pmd);
- if (!pmd_present(pmde)) {
- spin_unlock(ptl);
- return 0;
- }
-
- if (!pmd_trans_huge(pmde)) {
- spin_unlock(ptl);
- goto regular_page;
- }
- folio = damon_get_folio(pmd_pfn(pmde));
+ if (!pmd_present(pmde))
+ goto huge_out;
+ folio = vm_normal_folio_pmd(walk->vma, addr, pmde);
if (!folio)
goto huge_out;
if (pmd_young(pmde) || !folio_test_idle(folio) ||
@@ -469,13 +452,10 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
addr))
priv->young = true;
*priv->folio_sz = HPAGE_PMD_SIZE;
- folio_put(folio);
huge_out:
spin_unlock(ptl);
return 0;
}
-
-regular_page:
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
@@ -484,14 +464,13 @@ regular_page:
ptent = ptep_get(pte);
if (!pte_present(ptent))
goto out;
- folio = damon_get_folio(pte_pfn(ptent));
+ folio = vm_normal_folio(walk->vma, addr, ptent);
if (!folio)
goto out;
if (pte_young(ptent) || !folio_test_idle(folio) ||
mmu_notifier_test_young(walk->mm, addr))
priv->young = true;
*priv->folio_sz = folio_size(folio);
- folio_put(folio);
out:
pte_unmap_unlock(pte, ptl);
return 0;
@@ -718,7 +697,6 @@ isolate:
list_add(&folio->lru, &migration_lists[i]);
}
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static int damos_va_migrate_pmd_entry(pmd_t *pmd, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
@@ -728,63 +706,49 @@ static int damos_va_migrate_pmd_entry(pmd_t *pmd, unsigned long addr,
struct damos_migrate_dests *dests = &s->migrate_dests;
struct folio *folio;
spinlock_t *ptl;
- pmd_t pmde;
-
- ptl = pmd_lock(walk->mm, pmd);
- pmde = pmdp_get(pmd);
-
- if (!pmd_present(pmde) || !pmd_trans_huge(pmde))
- goto unlock;
-
- /* Tell page walk code to not split the PMD */
- walk->action = ACTION_CONTINUE;
-
- folio = damon_get_folio(pmd_pfn(pmde));
- if (!folio)
- goto unlock;
-
- if (damos_va_filter_out(s, folio, walk->vma, addr, NULL, pmd))
- goto put_folio;
-
- damos_va_migrate_dests_add(folio, walk->vma, addr, dests,
- migration_lists);
-
-put_folio:
- folio_put(folio);
-unlock:
- spin_unlock(ptl);
- return 0;
-}
-#else
-#define damos_va_migrate_pmd_entry NULL
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+ pte_t *start_pte, *pte, ptent;
+ int nr;
-static int damos_va_migrate_pte_entry(pte_t *pte, unsigned long addr,
- unsigned long next, struct mm_walk *walk)
-{
- struct damos_va_migrate_private *priv = walk->private;
- struct list_head *migration_lists = priv->migration_lists;
- struct damos *s = priv->scheme;
- struct damos_migrate_dests *dests = &s->migrate_dests;
- struct folio *folio;
- pte_t ptent;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ ptl = pmd_trans_huge_lock(pmd, walk->vma);
+ if (ptl) {
+ pmd_t pmde = pmdp_get(pmd);
- ptent = ptep_get(pte);
- if (pte_none(ptent) || !pte_present(ptent))
+ if (!pmd_present(pmde))
+ goto huge_out;
+ folio = vm_normal_folio_pmd(walk->vma, addr, pmde);
+ if (!folio)
+ goto huge_out;
+ if (damos_va_filter_out(s, folio, walk->vma, addr, NULL, pmd))
+ goto huge_out;
+ damos_va_migrate_dests_add(folio, walk->vma, addr, dests,
+ migration_lists);
+huge_out:
+ spin_unlock(ptl);
return 0;
+ }
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
- folio = damon_get_folio(pte_pfn(ptent));
- if (!folio)
+ start_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+ if (!pte)
return 0;
- if (damos_va_filter_out(s, folio, walk->vma, addr, pte, NULL))
- goto put_folio;
-
- damos_va_migrate_dests_add(folio, walk->vma, addr, dests,
- migration_lists);
+ for (; addr < next; pte += nr, addr += nr * PAGE_SIZE) {
+ nr = 1;
+ ptent = ptep_get(pte);
-put_folio:
- folio_put(folio);
+ if (pte_none(ptent) || !pte_present(ptent))
+ continue;
+ folio = vm_normal_folio(walk->vma, addr, ptent);
+ if (!folio)
+ continue;
+ if (damos_va_filter_out(s, folio, walk->vma, addr, pte, NULL))
+ return 0;
+ damos_va_migrate_dests_add(folio, walk->vma, addr, dests,
+ migration_lists);
+ nr = folio_nr_pages(folio);
+ }
+ pte_unmap_unlock(start_pte, ptl);
return 0;
}
@@ -850,7 +814,7 @@ static unsigned long damos_va_migrate(struct damon_target *target,
struct damos_migrate_dests *dests = &s->migrate_dests;
struct mm_walk_ops walk_ops = {
.pmd_entry = damos_va_migrate_pmd_entry,
- .pte_entry = damos_va_migrate_pte_entry,
+ .pte_entry = NULL,
.walk_lock = PGWALK_RDLOCK,
};
@@ -910,13 +874,10 @@ static int damos_va_stat_pmd_entry(pmd_t *pmd, unsigned long addr,
int nr;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- if (pmd_trans_huge(*pmd)) {
- pmd_t pmde;
+ ptl = pmd_trans_huge_lock(pmd, vma);
+ if (ptl) {
+ pmd_t pmde = pmdp_get(pmd);
- ptl = pmd_trans_huge_lock(pmd, vma);
- if (!ptl)
- return 0;
- pmde = pmdp_get(pmd);
if (!pmd_present(pmde))
goto huge_unlock;