summaryrefslogtreecommitdiff
path: root/mm/damon/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/damon/core.c')
-rw-r--r--mm/damon/core.c111
1 files changed, 78 insertions, 33 deletions
diff --git a/mm/damon/core.c b/mm/damon/core.c
index 08065b363972..93848b4c6944 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -201,6 +201,7 @@ static int damon_fill_regions_holes(struct damon_region *first,
* @t: the given target.
* @ranges: array of new monitoring target ranges.
* @nr_ranges: length of @ranges.
+ * @min_sz_region: minimum region size.
*
* This function adds new regions to, or modify existing regions of a
* monitoring target to fit in specific ranges.
@@ -208,7 +209,7 @@ static int damon_fill_regions_holes(struct damon_region *first,
* Return: 0 if success, or negative error code otherwise.
*/
int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
- unsigned int nr_ranges)
+ unsigned int nr_ranges, unsigned long min_sz_region)
{
struct damon_region *r, *next;
unsigned int i;
@@ -245,16 +246,16 @@ int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
/* no region intersects with this range */
newr = damon_new_region(
ALIGN_DOWN(range->start,
- DAMON_MIN_REGION),
- ALIGN(range->end, DAMON_MIN_REGION));
+ min_sz_region),
+ ALIGN(range->end, min_sz_region));
if (!newr)
return -ENOMEM;
damon_insert_region(newr, damon_prev_region(r), r, t);
} else {
/* resize intersecting regions to fit in this range */
first->ar.start = ALIGN_DOWN(range->start,
- DAMON_MIN_REGION);
- last->ar.end = ALIGN(range->end, DAMON_MIN_REGION);
+ min_sz_region);
+ last->ar.end = ALIGN(range->end, min_sz_region);
/* fill possible holes in the range */
err = damon_fill_regions_holes(first, last, t);
@@ -544,6 +545,9 @@ struct damon_ctx *damon_new_ctx(void)
ctx->attrs.min_nr_regions = 10;
ctx->attrs.max_nr_regions = 1000;
+ ctx->addr_unit = 1;
+ ctx->min_sz_region = DAMON_MIN_REGION;
+
INIT_LIST_HEAD(&ctx->adaptive_targets);
INIT_LIST_HEAD(&ctx->schemes);
@@ -570,6 +574,23 @@ void damon_destroy_ctx(struct damon_ctx *ctx)
kfree(ctx);
}
+static bool damon_attrs_equals(const struct damon_attrs *attrs1,
+ const struct damon_attrs *attrs2)
+{
+ const struct damon_intervals_goal *ig1 = &attrs1->intervals_goal;
+ const struct damon_intervals_goal *ig2 = &attrs2->intervals_goal;
+
+ return attrs1->sample_interval == attrs2->sample_interval &&
+ attrs1->aggr_interval == attrs2->aggr_interval &&
+ attrs1->ops_update_interval == attrs2->ops_update_interval &&
+ attrs1->min_nr_regions == attrs2->min_nr_regions &&
+ attrs1->max_nr_regions == attrs2->max_nr_regions &&
+ ig1->access_bp == ig2->access_bp &&
+ ig1->aggrs == ig2->aggrs &&
+ ig1->min_sample_us == ig2->min_sample_us &&
+ ig1->max_sample_us == ig2->max_sample_us;
+}
+
static unsigned int damon_age_for_new_attrs(unsigned int age,
struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
{
@@ -1108,8 +1129,8 @@ static struct damon_target *damon_nth_target(int n, struct damon_ctx *ctx)
*
* If @src has no region, @dst keeps current regions.
*/
-static int damon_commit_target_regions(
- struct damon_target *dst, struct damon_target *src)
+static int damon_commit_target_regions(struct damon_target *dst,
+ struct damon_target *src, unsigned long src_min_sz_region)
{
struct damon_region *src_region;
struct damon_addr_range *ranges;
@@ -1126,18 +1147,19 @@ static int damon_commit_target_regions(
i = 0;
damon_for_each_region(src_region, src)
ranges[i++] = src_region->ar;
- err = damon_set_regions(dst, ranges, i);
+ err = damon_set_regions(dst, ranges, i, src_min_sz_region);
kfree(ranges);
return err;
}
static int damon_commit_target(
struct damon_target *dst, bool dst_has_pid,
- struct damon_target *src, bool src_has_pid)
+ struct damon_target *src, bool src_has_pid,
+ unsigned long src_min_sz_region)
{
int err;
- err = damon_commit_target_regions(dst, src);
+ err = damon_commit_target_regions(dst, src, src_min_sz_region);
if (err)
return err;
if (dst_has_pid)
@@ -1159,7 +1181,8 @@ static int damon_commit_targets(
if (src_target) {
err = damon_commit_target(
dst_target, damon_target_has_pid(dst),
- src_target, damon_target_has_pid(src));
+ src_target, damon_target_has_pid(src),
+ src->min_sz_region);
if (err)
return err;
} else {
@@ -1182,7 +1205,8 @@ static int damon_commit_targets(
if (!new_target)
return -ENOMEM;
err = damon_commit_target(new_target, false,
- src_target, damon_target_has_pid(src));
+ src_target, damon_target_has_pid(src),
+ src->min_sz_region);
if (err) {
damon_destroy_target(new_target, NULL);
return err;
@@ -1222,10 +1246,14 @@ int damon_commit_ctx(struct damon_ctx *dst, struct damon_ctx *src)
* 2. ops update should be done after pid handling is done (target
* committing require putting pids).
*/
- err = damon_set_attrs(dst, &src->attrs);
- if (err)
- return err;
+ if (!damon_attrs_equals(&dst->attrs, &src->attrs)) {
+ err = damon_set_attrs(dst, &src->attrs);
+ if (err)
+ return err;
+ }
dst->ops = src->ops;
+ dst->addr_unit = src->addr_unit;
+ dst->min_sz_region = src->min_sz_region;
return 0;
}
@@ -1258,8 +1286,8 @@ static unsigned long damon_region_sz_limit(struct damon_ctx *ctx)
if (ctx->attrs.min_nr_regions)
sz /= ctx->attrs.min_nr_regions;
- if (sz < DAMON_MIN_REGION)
- sz = DAMON_MIN_REGION;
+ if (sz < ctx->min_sz_region)
+ sz = ctx->min_sz_region;
return sz;
}
@@ -1603,6 +1631,7 @@ static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t,
* @t: The target of the region.
* @rp: The pointer to the region.
* @s: The scheme to be applied.
+ * @min_sz_region: minimum region size.
*
* If a quota of a scheme has exceeded in a quota charge window, the scheme's
* action would applied to only a part of the target access pattern fulfilling
@@ -1620,7 +1649,7 @@ static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t,
* Return: true if the region should be entirely skipped, false otherwise.
*/
static bool damos_skip_charged_region(struct damon_target *t,
- struct damon_region **rp, struct damos *s)
+ struct damon_region **rp, struct damos *s, unsigned long min_sz_region)
{
struct damon_region *r = *rp;
struct damos_quota *quota = &s->quota;
@@ -1642,11 +1671,11 @@ static bool damos_skip_charged_region(struct damon_target *t,
if (quota->charge_addr_from && r->ar.start <
quota->charge_addr_from) {
sz_to_skip = ALIGN_DOWN(quota->charge_addr_from -
- r->ar.start, DAMON_MIN_REGION);
+ r->ar.start, min_sz_region);
if (!sz_to_skip) {
- if (damon_sz_region(r) <= DAMON_MIN_REGION)
+ if (damon_sz_region(r) <= min_sz_region)
return true;
- sz_to_skip = DAMON_MIN_REGION;
+ sz_to_skip = min_sz_region;
}
damon_split_region_at(t, r, sz_to_skip);
r = damon_next_region(r);
@@ -1671,7 +1700,8 @@ static void damos_update_stat(struct damos *s,
}
static bool damos_filter_match(struct damon_ctx *ctx, struct damon_target *t,
- struct damon_region *r, struct damos_filter *filter)
+ struct damon_region *r, struct damos_filter *filter,
+ unsigned long min_sz_region)
{
bool matched = false;
struct damon_target *ti;
@@ -1688,8 +1718,8 @@ static bool damos_filter_match(struct damon_ctx *ctx, struct damon_target *t,
matched = target_idx == filter->target_idx;
break;
case DAMOS_FILTER_TYPE_ADDR:
- start = ALIGN_DOWN(filter->addr_range.start, DAMON_MIN_REGION);
- end = ALIGN_DOWN(filter->addr_range.end, DAMON_MIN_REGION);
+ start = ALIGN_DOWN(filter->addr_range.start, min_sz_region);
+ end = ALIGN_DOWN(filter->addr_range.end, min_sz_region);
/* inside the range */
if (start <= r->ar.start && r->ar.end <= end) {
@@ -1725,7 +1755,7 @@ static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t,
s->core_filters_allowed = false;
damos_for_each_filter(filter, s) {
- if (damos_filter_match(ctx, t, r, filter)) {
+ if (damos_filter_match(ctx, t, r, filter, ctx->min_sz_region)) {
if (filter->allow)
s->core_filters_allowed = true;
return !filter->allow;
@@ -1860,7 +1890,7 @@ static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
if (c->ops.apply_scheme) {
if (quota->esz && quota->charged_sz + sz > quota->esz) {
sz = ALIGN_DOWN(quota->esz - quota->charged_sz,
- DAMON_MIN_REGION);
+ c->min_sz_region);
if (!sz)
goto update_stat;
damon_split_region_at(t, r, sz);
@@ -1908,7 +1938,7 @@ static void damon_do_apply_schemes(struct damon_ctx *c,
if (quota->esz && quota->charged_sz >= quota->esz)
continue;
- if (damos_skip_charged_region(t, &r, s))
+ if (damos_skip_charged_region(t, &r, s, c->min_sz_region))
continue;
if (!damos_valid_target(c, t, r, s))
@@ -2112,8 +2142,10 @@ static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
return;
/* First charge window */
- if (!quota->total_charged_sz && !quota->charged_from)
+ if (!quota->total_charged_sz && !quota->charged_from) {
quota->charged_from = jiffies;
+ damos_set_effective_quota(quota);
+ }
/* New charge window starts */
if (time_after_eq(jiffies, quota->charged_from +
@@ -2231,6 +2263,8 @@ static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
damon_for_each_region_safe(r, next, t) {
if (abs(r->nr_accesses - r->last_nr_accesses) > thres)
r->age = 0;
+ else if ((r->nr_accesses == 0) != (r->last_nr_accesses == 0))
+ r->age = 0;
else
r->age++;
@@ -2306,7 +2340,8 @@ static void damon_split_region_at(struct damon_target *t,
}
/* Split every region in the given target into 'nr_subs' regions */
-static void damon_split_regions_of(struct damon_target *t, int nr_subs)
+static void damon_split_regions_of(struct damon_target *t, int nr_subs,
+ unsigned long min_sz_region)
{
struct damon_region *r, *next;
unsigned long sz_region, sz_sub = 0;
@@ -2316,13 +2351,13 @@ static void damon_split_regions_of(struct damon_target *t, int nr_subs)
sz_region = damon_sz_region(r);
for (i = 0; i < nr_subs - 1 &&
- sz_region > 2 * DAMON_MIN_REGION; i++) {
+ sz_region > 2 * min_sz_region; i++) {
/*
* Randomly select size of left sub-region to be at
* least 10 percent and at most 90% of original region
*/
sz_sub = ALIGN_DOWN(damon_rand(1, 10) *
- sz_region / 10, DAMON_MIN_REGION);
+ sz_region / 10, min_sz_region);
/* Do not allow blank region */
if (sz_sub == 0 || sz_sub >= sz_region)
continue;
@@ -2362,7 +2397,7 @@ static void kdamond_split_regions(struct damon_ctx *ctx)
nr_subregions = 3;
damon_for_each_target(t, ctx)
- damon_split_regions_of(t, nr_subregions);
+ damon_split_regions_of(t, nr_subregions, ctx->min_sz_region);
last_nr_regions = nr_regions;
}
@@ -2755,7 +2790,7 @@ int damon_set_region_biggest_system_ram_default(struct damon_target *t,
addr_range.start = *start;
addr_range.end = *end;
- return damon_set_regions(t, &addr_range, 1);
+ return damon_set_regions(t, &addr_range, 1, DAMON_MIN_REGION);
}
/*
@@ -2828,6 +2863,16 @@ void damon_update_region_access_rate(struct damon_region *r, bool accessed,
r->nr_accesses++;
}
+/**
+ * damon_initialized() - Return if DAMON is ready to be used.
+ *
+ * Return: true if DAMON is ready to be used, false otherwise.
+ */
+bool damon_initialized(void)
+{
+ return damon_region_cache != NULL;
+}
+
static int __init damon_init(void)
{
damon_region_cache = KMEM_CACHE(damon_region, 0);