diff options
Diffstat (limited to 'fs/resctrl')
| -rw-r--r-- | fs/resctrl/ctrlmondata.c | 309 | ||||
| -rw-r--r-- | fs/resctrl/internal.h | 17 | ||||
| -rw-r--r-- | fs/resctrl/monitor.c | 16 | ||||
| -rw-r--r-- | fs/resctrl/rdtgroup.c | 82 |
4 files changed, 394 insertions, 30 deletions
diff --git a/fs/resctrl/ctrlmondata.c b/fs/resctrl/ctrlmondata.c index 0d0ef54fc4de..b2d178d3556e 100644 --- a/fs/resctrl/ctrlmondata.c +++ b/fs/resctrl/ctrlmondata.c @@ -24,7 +24,8 @@ #include "internal.h" struct rdt_parse_data { - struct rdtgroup *rdtgrp; + u32 closid; + enum rdtgrp_mode mode; char *buf; }; @@ -77,8 +78,8 @@ static int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s, struct rdt_ctrl_domain *d) { struct resctrl_staged_config *cfg; - u32 closid = data->rdtgrp->closid; struct rdt_resource *r = s->res; + u32 closid = data->closid; u32 bw_val; cfg = &d->staged_config[s->conf_type]; @@ -156,9 +157,10 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) static int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s, struct rdt_ctrl_domain *d) { - struct rdtgroup *rdtgrp = data->rdtgrp; + enum rdtgrp_mode mode = data->mode; struct resctrl_staged_config *cfg; struct rdt_resource *r = s->res; + u32 closid = data->closid; u32 cbm_val; cfg = &d->staged_config[s->conf_type]; @@ -171,7 +173,7 @@ static int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s, * Cannot set up more than one pseudo-locked region in a cache * hierarchy. */ - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP && + if (mode == RDT_MODE_PSEUDO_LOCKSETUP && rdtgroup_pseudo_locked_in_hierarchy(d)) { rdt_last_cmd_puts("Pseudo-locked region in hierarchy\n"); return -EINVAL; @@ -180,8 +182,7 @@ static int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s, if (!cbm_validate(data->buf, &cbm_val, r)) return -EINVAL; - if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE || - rdtgrp->mode == RDT_MODE_SHAREABLE) && + if ((mode == RDT_MODE_EXCLUSIVE || mode == RDT_MODE_SHAREABLE) && rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) { rdt_last_cmd_puts("CBM overlaps with pseudo-locked region\n"); return -EINVAL; @@ -191,14 +192,14 @@ static int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s, * The CBM may not overlap with the CBM of another closid if * either is exclusive. */ - if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, true)) { + if (rdtgroup_cbm_overlaps(s, d, cbm_val, closid, true)) { rdt_last_cmd_puts("Overlaps with exclusive group\n"); return -EINVAL; } - if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, false)) { - if (rdtgrp->mode == RDT_MODE_EXCLUSIVE || - rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + if (rdtgroup_cbm_overlaps(s, d, cbm_val, closid, false)) { + if (mode == RDT_MODE_EXCLUSIVE || + mode == RDT_MODE_PSEUDO_LOCKSETUP) { rdt_last_cmd_puts("Overlaps with other group\n"); return -EINVAL; } @@ -262,7 +263,8 @@ next: list_for_each_entry(d, &r->ctrl_domains, hdr.list) { if (d->hdr.id == dom_id) { data.buf = dom; - data.rdtgrp = rdtgrp; + data.closid = rdtgrp->closid; + data.mode = rdtgrp->mode; if (parse_ctrlval(&data, s, d)) return -EINVAL; if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { @@ -381,7 +383,8 @@ out: return ret ?: nbytes; } -static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid) +static void show_doms(struct seq_file *s, struct resctrl_schema *schema, + char *resource_name, int closid) { struct rdt_resource *r = schema->res; struct rdt_ctrl_domain *dom; @@ -391,7 +394,8 @@ static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int clo /* Walking r->domains, ensure it can't race with cpuhp */ lockdep_assert_cpus_held(); - seq_printf(s, "%*s:", max_name_width, schema->name); + if (resource_name) + seq_printf(s, "%*s:", max_name_width, resource_name); list_for_each_entry(dom, &r->ctrl_domains, hdr.list) { if (sep) seq_puts(s, ";"); @@ -437,7 +441,7 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of, closid = rdtgrp->closid; list_for_each_entry(schema, &resctrl_schema_all, list) { if (closid < schema->num_closid) - show_doms(s, schema, closid); + show_doms(s, schema, schema->name, closid); } } } else { @@ -676,3 +680,280 @@ out: rdtgroup_kn_unlock(of->kn); return ret; } + +int resctrl_io_alloc_show(struct kernfs_open_file *of, struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = rdt_kn_parent_priv(of->kn); + struct rdt_resource *r = s->res; + + mutex_lock(&rdtgroup_mutex); + + if (r->cache.io_alloc_capable) { + if (resctrl_arch_get_io_alloc_enabled(r)) + seq_puts(seq, "enabled\n"); + else + seq_puts(seq, "disabled\n"); + } else { + seq_puts(seq, "not supported\n"); + } + + mutex_unlock(&rdtgroup_mutex); + + return 0; +} + +/* + * resctrl_io_alloc_closid_supported() - io_alloc feature utilizes the + * highest CLOSID value to direct I/O traffic. Ensure that io_alloc_closid + * is in the supported range. + */ +static bool resctrl_io_alloc_closid_supported(u32 io_alloc_closid) +{ + return io_alloc_closid < closids_supported(); +} + +/* + * Initialize io_alloc CLOSID cache resource CBM with all usable (shared + * and unused) cache portions. + */ +static int resctrl_io_alloc_init_cbm(struct resctrl_schema *s, u32 closid) +{ + enum resctrl_conf_type peer_type; + struct rdt_resource *r = s->res; + struct rdt_ctrl_domain *d; + int ret; + + rdt_staged_configs_clear(); + + ret = rdtgroup_init_cat(s, closid); + if (ret < 0) + goto out; + + /* Keep CDP_CODE and CDP_DATA of io_alloc CLOSID's CBM in sync. */ + if (resctrl_arch_get_cdp_enabled(r->rid)) { + peer_type = resctrl_peer_type(s->conf_type); + list_for_each_entry(d, &s->res->ctrl_domains, hdr.list) + memcpy(&d->staged_config[peer_type], + &d->staged_config[s->conf_type], + sizeof(d->staged_config[0])); + } + + ret = resctrl_arch_update_domains(r, closid); +out: + rdt_staged_configs_clear(); + return ret; +} + +/* + * resctrl_io_alloc_closid() - io_alloc feature routes I/O traffic using + * the highest available CLOSID. Retrieve the maximum CLOSID supported by the + * resource. Note that if Code Data Prioritization (CDP) is enabled, the number + * of available CLOSIDs is reduced by half. + */ +u32 resctrl_io_alloc_closid(struct rdt_resource *r) +{ + if (resctrl_arch_get_cdp_enabled(r->rid)) + return resctrl_arch_get_num_closid(r) / 2 - 1; + else + return resctrl_arch_get_num_closid(r) - 1; +} + +ssize_t resctrl_io_alloc_write(struct kernfs_open_file *of, char *buf, + size_t nbytes, loff_t off) +{ + struct resctrl_schema *s = rdt_kn_parent_priv(of->kn); + struct rdt_resource *r = s->res; + char const *grp_name; + u32 io_alloc_closid; + bool enable; + int ret; + + ret = kstrtobool(buf, &enable); + if (ret) + return ret; + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + + rdt_last_cmd_clear(); + + if (!r->cache.io_alloc_capable) { + rdt_last_cmd_printf("io_alloc is not supported on %s\n", s->name); + ret = -ENODEV; + goto out_unlock; + } + + /* If the feature is already up to date, no action is needed. */ + if (resctrl_arch_get_io_alloc_enabled(r) == enable) + goto out_unlock; + + io_alloc_closid = resctrl_io_alloc_closid(r); + if (!resctrl_io_alloc_closid_supported(io_alloc_closid)) { + rdt_last_cmd_printf("io_alloc CLOSID (ctrl_hw_id) %u is not available\n", + io_alloc_closid); + ret = -EINVAL; + goto out_unlock; + } + + if (enable) { + if (!closid_alloc_fixed(io_alloc_closid)) { + grp_name = rdtgroup_name_by_closid(io_alloc_closid); + WARN_ON_ONCE(!grp_name); + rdt_last_cmd_printf("CLOSID (ctrl_hw_id) %u for io_alloc is used by %s group\n", + io_alloc_closid, grp_name ? grp_name : "another"); + ret = -ENOSPC; + goto out_unlock; + } + + ret = resctrl_io_alloc_init_cbm(s, io_alloc_closid); + if (ret) { + rdt_last_cmd_puts("Failed to initialize io_alloc allocations\n"); + closid_free(io_alloc_closid); + goto out_unlock; + } + } else { + closid_free(io_alloc_closid); + } + + ret = resctrl_arch_io_alloc_enable(r, enable); + if (enable && ret) { + rdt_last_cmd_puts("Failed to enable io_alloc feature\n"); + closid_free(io_alloc_closid); + } + +out_unlock: + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); + + return ret ?: nbytes; +} + +int resctrl_io_alloc_cbm_show(struct kernfs_open_file *of, struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = rdt_kn_parent_priv(of->kn); + struct rdt_resource *r = s->res; + int ret = 0; + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + + rdt_last_cmd_clear(); + + if (!r->cache.io_alloc_capable) { + rdt_last_cmd_printf("io_alloc is not supported on %s\n", s->name); + ret = -ENODEV; + goto out_unlock; + } + + if (!resctrl_arch_get_io_alloc_enabled(r)) { + rdt_last_cmd_printf("io_alloc is not enabled on %s\n", s->name); + ret = -EINVAL; + goto out_unlock; + } + + /* + * When CDP is enabled, the CBMs of the highest CLOSID of CDP_CODE and + * CDP_DATA are kept in sync. As a result, the io_alloc CBMs shown for + * either CDP resource are identical and accurately represent the CBMs + * used for I/O. + */ + show_doms(seq, s, NULL, resctrl_io_alloc_closid(r)); + +out_unlock: + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); + return ret; +} + +static int resctrl_io_alloc_parse_line(char *line, struct rdt_resource *r, + struct resctrl_schema *s, u32 closid) +{ + enum resctrl_conf_type peer_type; + struct rdt_parse_data data; + struct rdt_ctrl_domain *d; + char *dom = NULL, *id; + unsigned long dom_id; + +next: + if (!line || line[0] == '\0') + return 0; + + dom = strsep(&line, ";"); + id = strsep(&dom, "="); + if (!dom || kstrtoul(id, 10, &dom_id)) { + rdt_last_cmd_puts("Missing '=' or non-numeric domain\n"); + return -EINVAL; + } + + dom = strim(dom); + list_for_each_entry(d, &r->ctrl_domains, hdr.list) { + if (d->hdr.id == dom_id) { + data.buf = dom; + data.mode = RDT_MODE_SHAREABLE; + data.closid = closid; + if (parse_cbm(&data, s, d)) + return -EINVAL; + /* + * Keep io_alloc CLOSID's CBM of CDP_CODE and CDP_DATA + * in sync. + */ + if (resctrl_arch_get_cdp_enabled(r->rid)) { + peer_type = resctrl_peer_type(s->conf_type); + memcpy(&d->staged_config[peer_type], + &d->staged_config[s->conf_type], + sizeof(d->staged_config[0])); + } + goto next; + } + } + + return -EINVAL; +} + +ssize_t resctrl_io_alloc_cbm_write(struct kernfs_open_file *of, char *buf, + size_t nbytes, loff_t off) +{ + struct resctrl_schema *s = rdt_kn_parent_priv(of->kn); + struct rdt_resource *r = s->res; + u32 io_alloc_closid; + int ret = 0; + + /* Valid input requires a trailing newline */ + if (nbytes == 0 || buf[nbytes - 1] != '\n') + return -EINVAL; + + buf[nbytes - 1] = '\0'; + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + rdt_last_cmd_clear(); + + if (!r->cache.io_alloc_capable) { + rdt_last_cmd_printf("io_alloc is not supported on %s\n", s->name); + ret = -ENODEV; + goto out_unlock; + } + + if (!resctrl_arch_get_io_alloc_enabled(r)) { + rdt_last_cmd_printf("io_alloc is not enabled on %s\n", s->name); + ret = -EINVAL; + goto out_unlock; + } + + io_alloc_closid = resctrl_io_alloc_closid(r); + + rdt_staged_configs_clear(); + ret = resctrl_io_alloc_parse_line(buf, r, s, io_alloc_closid); + if (ret) + goto out_clear_configs; + + ret = resctrl_arch_update_domains(r, io_alloc_closid); + +out_clear_configs: + rdt_staged_configs_clear(); +out_unlock: + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); + + return ret ?: nbytes; +} diff --git a/fs/resctrl/internal.h b/fs/resctrl/internal.h index cf1fd82dc5a9..bff4a54ae333 100644 --- a/fs/resctrl/internal.h +++ b/fs/resctrl/internal.h @@ -390,6 +390,8 @@ void rdt_staged_configs_clear(void); bool closid_allocated(unsigned int closid); +bool closid_alloc_fixed(u32 closid); + int resctrl_find_cleanest_closid(void); void *rdt_kn_parent_priv(struct kernfs_node *kn); @@ -426,6 +428,21 @@ int mbm_L3_assignments_show(struct kernfs_open_file *of, struct seq_file *s, voi ssize_t mbm_L3_assignments_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off); +int resctrl_io_alloc_show(struct kernfs_open_file *of, struct seq_file *seq, void *v); + +int rdtgroup_init_cat(struct resctrl_schema *s, u32 closid); + +enum resctrl_conf_type resctrl_peer_type(enum resctrl_conf_type my_type); + +ssize_t resctrl_io_alloc_write(struct kernfs_open_file *of, char *buf, + size_t nbytes, loff_t off); + +const char *rdtgroup_name_by_closid(u32 closid); +int resctrl_io_alloc_cbm_show(struct kernfs_open_file *of, struct seq_file *seq, + void *v); +ssize_t resctrl_io_alloc_cbm_write(struct kernfs_open_file *of, char *buf, + size_t nbytes, loff_t off); +u32 resctrl_io_alloc_closid(struct rdt_resource *r); #ifdef CONFIG_RESCTRL_FS_PSEUDO_LOCK int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp); diff --git a/fs/resctrl/monitor.c b/fs/resctrl/monitor.c index 4076336fbba6..572a9925bd6c 100644 --- a/fs/resctrl/monitor.c +++ b/fs/resctrl/monitor.c @@ -1782,15 +1782,13 @@ int resctrl_mon_resource_init(void) mba_mbps_default_event = QOS_L3_MBM_TOTAL_EVENT_ID; if (r->mon.mbm_cntr_assignable) { - if (!resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID)) - resctrl_enable_mon_event(QOS_L3_MBM_TOTAL_EVENT_ID); - if (!resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID)) - resctrl_enable_mon_event(QOS_L3_MBM_LOCAL_EVENT_ID); - mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask; - mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask & - (READS_TO_LOCAL_MEM | - READS_TO_LOCAL_S_MEM | - NON_TEMP_WRITE_TO_LOCAL_MEM); + if (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID)) + mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask; + if (resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID)) + mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask & + (READS_TO_LOCAL_MEM | + READS_TO_LOCAL_S_MEM | + NON_TEMP_WRITE_TO_LOCAL_MEM); r->mon.mbm_assign_on_mkdir = true; resctrl_file_fflags_init("num_mbm_cntrs", RFTYPE_MON_INFO | RFTYPE_RES_CACHE); diff --git a/fs/resctrl/rdtgroup.c b/fs/resctrl/rdtgroup.c index 0320360cd7a6..8e39dfda56bc 100644 --- a/fs/resctrl/rdtgroup.c +++ b/fs/resctrl/rdtgroup.c @@ -226,6 +226,11 @@ bool closid_allocated(unsigned int closid) return !test_bit(closid, closid_free_map); } +bool closid_alloc_fixed(u32 closid) +{ + return __test_and_clear_bit(closid, closid_free_map); +} + /** * rdtgroup_mode_by_closid - Return mode of resource group with closid * @closid: closid if the resource group @@ -1057,15 +1062,17 @@ static int rdt_bit_usage_show(struct kernfs_open_file *of, cpus_read_lock(); mutex_lock(&rdtgroup_mutex); - hw_shareable = r->cache.shareable_bits; list_for_each_entry(dom, &r->ctrl_domains, hdr.list) { if (sep) seq_putc(seq, ';'); + hw_shareable = r->cache.shareable_bits; sw_shareable = 0; exclusive = 0; seq_printf(seq, "%d=", dom->hdr.id); for (i = 0; i < closids_supported(); i++) { - if (!closid_allocated(i)) + if (!closid_allocated(i) || + (resctrl_arch_get_io_alloc_enabled(r) && + i == resctrl_io_alloc_closid(r))) continue; ctrl_val = resctrl_arch_get_config(r, dom, i, s->conf_type); @@ -1093,6 +1100,21 @@ static int rdt_bit_usage_show(struct kernfs_open_file *of, break; } } + + /* + * When the "io_alloc" feature is enabled, a portion of the cache + * is configured for shared use between hardware and software. + * Also, when CDP is enabled the CBMs of CDP_CODE and CDP_DATA + * resources are kept in sync. So, the CBMs for "io_alloc" can + * be accessed through either resource. + */ + if (resctrl_arch_get_io_alloc_enabled(r)) { + ctrl_val = resctrl_arch_get_config(r, dom, + resctrl_io_alloc_closid(r), + s->conf_type); + hw_shareable |= ctrl_val; + } + for (i = r->cache.cbm_len - 1; i >= 0; i--) { pseudo_locked = dom->plr ? dom->plr->cbm : 0; hwb = test_bit(i, &hw_shareable); @@ -1247,7 +1269,7 @@ static int rdtgroup_mode_show(struct kernfs_open_file *of, return 0; } -static enum resctrl_conf_type resctrl_peer_type(enum resctrl_conf_type my_type) +enum resctrl_conf_type resctrl_peer_type(enum resctrl_conf_type my_type) { switch (my_type) { case CDP_CODE: @@ -1838,6 +1860,18 @@ void resctrl_bmec_files_show(struct rdt_resource *r, struct kernfs_node *l3_mon_ kernfs_put(mon_kn); } +const char *rdtgroup_name_by_closid(u32 closid) +{ + struct rdtgroup *rdtgrp; + + list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { + if (rdtgrp->closid == closid) + return rdt_kn_name(rdtgrp->kn); + } + + return NULL; +} + /* rdtgroup information files for one cache resource. */ static struct rftype res_common_files[] = { { @@ -1948,6 +1982,20 @@ static struct rftype res_common_files[] = { .seq_show = rdt_thread_throttle_mode_show, }, { + .name = "io_alloc", + .mode = 0644, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = resctrl_io_alloc_show, + .write = resctrl_io_alloc_write, + }, + { + .name = "io_alloc_cbm", + .mode = 0644, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = resctrl_io_alloc_cbm_show, + .write = resctrl_io_alloc_cbm_write, + }, + { .name = "max_threshold_occupancy", .mode = 0644, .kf_ops = &rdtgroup_kf_single_ops, @@ -2138,6 +2186,23 @@ static void thread_throttle_mode_init(void) RFTYPE_CTRL_INFO | RFTYPE_RES_MB); } +/* + * The resctrl file "io_alloc" is added using L3 resource. However, it results + * in this file being visible for *all* cache resources (eg. L2 cache), + * whether it supports "io_alloc" or not. + */ +static void io_alloc_init(void) +{ + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); + + if (r->cache.io_alloc_capable) { + resctrl_file_fflags_init("io_alloc", RFTYPE_CTRL_INFO | + RFTYPE_RES_CACHE); + resctrl_file_fflags_init("io_alloc_cbm", + RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE); + } +} + void resctrl_file_fflags_init(const char *config, unsigned long fflags) { struct rftype *rft; @@ -3383,11 +3448,12 @@ static u32 cbm_ensure_valid(u32 _val, struct rdt_resource *r) { unsigned int cbm_len = r->cache.cbm_len; unsigned long first_bit, zero_bit; - unsigned long val = _val; + unsigned long val; - if (!val) - return 0; + if (!_val || r->cache.arch_has_sparse_bitmasks) + return _val; + val = _val; first_bit = find_first_bit(&val, cbm_len); zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); @@ -3480,7 +3546,7 @@ static int __init_one_rdt_domain(struct rdt_ctrl_domain *d, struct resctrl_schem * If there are no more shareable bits available on any domain then * the entire allocation will fail. */ -static int rdtgroup_init_cat(struct resctrl_schema *s, u32 closid) +int rdtgroup_init_cat(struct resctrl_schema *s, u32 closid) { struct rdt_ctrl_domain *d; int ret; @@ -4408,6 +4474,8 @@ int resctrl_init(void) thread_throttle_mode_init(); + io_alloc_init(); + ret = resctrl_mon_resource_init(); if (ret) return ret; |
