summaryrefslogtreecommitdiff
path: root/mm/damon/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/damon/core.c')
-rw-r--r--mm/damon/core.c129
1 files changed, 89 insertions, 40 deletions
diff --git a/mm/damon/core.c b/mm/damon/core.c
index 109b050c795a..f9fc0375890a 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -10,6 +10,7 @@
#include <linux/damon.h>
#include <linux/delay.h>
#include <linux/kthread.h>
+#include <linux/memcontrol.h>
#include <linux/mm.h>
#include <linux/psi.h>
#include <linux/slab.h>
@@ -19,11 +20,6 @@
#define CREATE_TRACE_POINTS
#include <trace/events/damon.h>
-#ifdef CONFIG_DAMON_KUNIT_TEST
-#undef DAMON_MIN_REGION
-#define DAMON_MIN_REGION 1
-#endif
-
static DEFINE_MUTEX(damon_lock);
static int nr_running_ctxs;
static bool running_exclusive_ctxs;
@@ -305,7 +301,7 @@ void damos_add_filter(struct damos *s, struct damos_filter *f)
if (damos_filter_for_ops(f->type))
list_add_tail(&f->list, &s->ops_filters);
else
- list_add_tail(&f->list, &s->filters);
+ list_add_tail(&f->list, &s->core_filters);
}
static void damos_del_filter(struct damos_filter *f)
@@ -396,7 +392,7 @@ struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
*/
scheme->next_apply_sis = 0;
scheme->walk_completed = false;
- INIT_LIST_HEAD(&scheme->filters);
+ INIT_LIST_HEAD(&scheme->core_filters);
INIT_LIST_HEAD(&scheme->ops_filters);
scheme->stat = (struct damos_stat){};
INIT_LIST_HEAD(&scheme->list);
@@ -449,7 +445,7 @@ void damon_destroy_scheme(struct damos *s)
damos_for_each_quota_goal_safe(g, g_next, &s->quota)
damos_destroy_quota_goal(g);
- damos_for_each_filter_safe(f, next, s)
+ damos_for_each_core_filter_safe(f, next, s)
damos_destroy_filter(f);
damos_for_each_ops_filter_safe(f, next, s)
@@ -478,6 +474,7 @@ struct damon_target *damon_new_target(void)
t->nr_regions = 0;
INIT_LIST_HEAD(&t->regions_list);
INIT_LIST_HEAD(&t->list);
+ t->obsolete = false;
return t;
}
@@ -788,6 +785,11 @@ static void damos_commit_quota_goal_union(
case DAMOS_QUOTA_NODE_MEM_FREE_BP:
dst->nid = src->nid;
break;
+ case DAMOS_QUOTA_NODE_MEMCG_USED_BP:
+ case DAMOS_QUOTA_NODE_MEMCG_FREE_BP:
+ dst->nid = src->nid;
+ dst->memcg_id = src->memcg_id;
+ break;
default:
break;
}
@@ -857,12 +859,12 @@ static int damos_commit_quota(struct damos_quota *dst, struct damos_quota *src)
return 0;
}
-static struct damos_filter *damos_nth_filter(int n, struct damos *s)
+static struct damos_filter *damos_nth_core_filter(int n, struct damos *s)
{
struct damos_filter *filter;
int i = 0;
- damos_for_each_filter(filter, s) {
+ damos_for_each_core_filter(filter, s) {
if (i++ == n)
return filter;
}
@@ -916,15 +918,15 @@ static int damos_commit_core_filters(struct damos *dst, struct damos *src)
struct damos_filter *dst_filter, *next, *src_filter, *new_filter;
int i = 0, j = 0;
- damos_for_each_filter_safe(dst_filter, next, dst) {
- src_filter = damos_nth_filter(i++, src);
+ damos_for_each_core_filter_safe(dst_filter, next, dst) {
+ src_filter = damos_nth_core_filter(i++, src);
if (src_filter)
damos_commit_filter(dst_filter, src_filter);
else
damos_destroy_filter(dst_filter);
}
- damos_for_each_filter_safe(src_filter, next, src) {
+ damos_for_each_core_filter_safe(src_filter, next, src) {
if (j++ < i)
continue;
@@ -988,41 +990,37 @@ static void damos_set_filters_default_reject(struct damos *s)
s->core_filters_default_reject = false;
else
s->core_filters_default_reject =
- damos_filters_default_reject(&s->filters);
+ damos_filters_default_reject(&s->core_filters);
s->ops_filters_default_reject =
damos_filters_default_reject(&s->ops_filters);
}
-static int damos_commit_dests(struct damos *dst, struct damos *src)
+static int damos_commit_dests(struct damos_migrate_dests *dst,
+ struct damos_migrate_dests *src)
{
- struct damos_migrate_dests *dst_dests, *src_dests;
-
- dst_dests = &dst->migrate_dests;
- src_dests = &src->migrate_dests;
-
- if (dst_dests->nr_dests != src_dests->nr_dests) {
- kfree(dst_dests->node_id_arr);
- kfree(dst_dests->weight_arr);
+ if (dst->nr_dests != src->nr_dests) {
+ kfree(dst->node_id_arr);
+ kfree(dst->weight_arr);
- dst_dests->node_id_arr = kmalloc_array(src_dests->nr_dests,
- sizeof(*dst_dests->node_id_arr), GFP_KERNEL);
- if (!dst_dests->node_id_arr) {
- dst_dests->weight_arr = NULL;
+ dst->node_id_arr = kmalloc_array(src->nr_dests,
+ sizeof(*dst->node_id_arr), GFP_KERNEL);
+ if (!dst->node_id_arr) {
+ dst->weight_arr = NULL;
return -ENOMEM;
}
- dst_dests->weight_arr = kmalloc_array(src_dests->nr_dests,
- sizeof(*dst_dests->weight_arr), GFP_KERNEL);
- if (!dst_dests->weight_arr) {
+ dst->weight_arr = kmalloc_array(src->nr_dests,
+ sizeof(*dst->weight_arr), GFP_KERNEL);
+ if (!dst->weight_arr) {
/* ->node_id_arr will be freed by scheme destruction */
return -ENOMEM;
}
}
- dst_dests->nr_dests = src_dests->nr_dests;
- for (int i = 0; i < src_dests->nr_dests; i++) {
- dst_dests->node_id_arr[i] = src_dests->node_id_arr[i];
- dst_dests->weight_arr[i] = src_dests->weight_arr[i];
+ dst->nr_dests = src->nr_dests;
+ for (int i = 0; i < src->nr_dests; i++) {
+ dst->node_id_arr[i] = src->node_id_arr[i];
+ dst->weight_arr[i] = src->weight_arr[i];
}
return 0;
@@ -1069,7 +1067,7 @@ static int damos_commit(struct damos *dst, struct damos *src)
dst->wmarks = src->wmarks;
dst->target_nid = src->target_nid;
- err = damos_commit_dests(dst, src);
+ err = damos_commit_dests(&dst->migrate_dests, &src->migrate_dests);
if (err)
return err;
@@ -1181,7 +1179,11 @@ static int damon_commit_targets(
damon_for_each_target_safe(dst_target, next, dst) {
src_target = damon_nth_target(i++, src);
- if (src_target) {
+ /*
+ * If src target is obsolete, do not commit the parameters to
+ * the dst target, and further remove the dst target.
+ */
+ if (src_target && !src_target->obsolete) {
err = damon_commit_target(
dst_target, damon_target_has_pid(dst),
src_target, damon_target_has_pid(src),
@@ -1204,6 +1206,9 @@ static int damon_commit_targets(
damon_for_each_target_safe(src_target, next, src) {
if (j++ < i)
continue;
+ /* target to remove has no matching dst */
+ if (src_target->obsolete)
+ return -EINVAL;
new_target = damon_new_target();
if (!new_target)
return -ENOMEM;
@@ -1434,7 +1439,7 @@ bool damon_is_running(struct damon_ctx *ctx)
* Ask DAMON worker thread (kdamond) of @ctx to call a function with an
* argument data that respectively passed via &damon_call_control->fn and
* &damon_call_control->data of @control. If &damon_call_control->repeat of
- * @control is set, further wait until the kdamond finishes handling of the
+ * @control is unset, further wait until the kdamond finishes handling of the
* request. Otherwise, return as soon as the request is made.
*
* The kdamond executes the function with the argument in the main loop, just
@@ -1757,7 +1762,7 @@ static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t,
struct damos_filter *filter;
s->core_filters_allowed = false;
- damos_for_each_filter(filter, s) {
+ damos_for_each_core_filter(filter, s) {
if (damos_filter_match(ctx, t, r, filter, ctx->min_sz_region)) {
if (filter->allow)
s->core_filters_allowed = true;
@@ -2035,12 +2040,50 @@ static __kernel_ulong_t damos_get_node_mem_bp(
numerator = i.freeram;
return numerator * 10000 / i.totalram;
}
+
+static unsigned long damos_get_node_memcg_used_bp(
+ struct damos_quota_goal *goal)
+{
+ struct mem_cgroup *memcg;
+ struct lruvec *lruvec;
+ unsigned long used_pages, numerator;
+ struct sysinfo i;
+
+ rcu_read_lock();
+ memcg = mem_cgroup_from_id(goal->memcg_id);
+ rcu_read_unlock();
+ if (!memcg) {
+ if (goal->metric == DAMOS_QUOTA_NODE_MEMCG_USED_BP)
+ return 0;
+ else /* DAMOS_QUOTA_NODE_MEMCG_FREE_BP */
+ return 10000;
+ }
+ mem_cgroup_flush_stats(memcg);
+ lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(goal->nid));
+ used_pages = lruvec_page_state(lruvec, NR_ACTIVE_ANON);
+ used_pages += lruvec_page_state(lruvec, NR_INACTIVE_ANON);
+ used_pages += lruvec_page_state(lruvec, NR_ACTIVE_FILE);
+ used_pages += lruvec_page_state(lruvec, NR_INACTIVE_FILE);
+
+ si_meminfo_node(&i, goal->nid);
+ if (goal->metric == DAMOS_QUOTA_NODE_MEMCG_USED_BP)
+ numerator = used_pages;
+ else /* DAMOS_QUOTA_NODE_MEMCG_FREE_BP */
+ numerator = i.totalram - used_pages;
+ return numerator * 10000 / i.totalram;
+}
#else
static __kernel_ulong_t damos_get_node_mem_bp(
struct damos_quota_goal *goal)
{
return 0;
}
+
+static unsigned long damos_get_node_memcg_used_bp(
+ struct damos_quota_goal *goal)
+{
+ return 0;
+}
#endif
@@ -2061,6 +2104,10 @@ static void damos_set_quota_goal_current_value(struct damos_quota_goal *goal)
case DAMOS_QUOTA_NODE_MEM_FREE_BP:
goal->current_value = damos_get_node_mem_bp(goal);
break;
+ case DAMOS_QUOTA_NODE_MEMCG_USED_BP:
+ case DAMOS_QUOTA_NODE_MEMCG_FREE_BP:
+ goal->current_value = damos_get_node_memcg_used_bp(goal);
+ break;
default:
break;
}
@@ -2770,6 +2817,7 @@ static bool damon_find_biggest_system_ram(unsigned long *start,
* @t: The monitoring target to set the region.
* @start: The pointer to the start address of the region.
* @end: The pointer to the end address of the region.
+ * @min_sz_region: Minimum region size.
*
* This function sets the region of @t as requested by @start and @end. If the
* values of @start and @end are zero, however, this function finds the biggest
@@ -2780,7 +2828,8 @@ static bool damon_find_biggest_system_ram(unsigned long *start,
* Return: 0 on success, negative error code otherwise.
*/
int damon_set_region_biggest_system_ram_default(struct damon_target *t,
- unsigned long *start, unsigned long *end)
+ unsigned long *start, unsigned long *end,
+ unsigned long min_sz_region)
{
struct damon_addr_range addr_range;
@@ -2793,7 +2842,7 @@ int damon_set_region_biggest_system_ram_default(struct damon_target *t,
addr_range.start = *start;
addr_range.end = *end;
- return damon_set_regions(t, &addr_range, 1, DAMON_MIN_REGION);
+ return damon_set_regions(t, &addr_range, 1, min_sz_region);
}
/*