summaryrefslogtreecommitdiff
path: root/fs/resctrl/monitor.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/resctrl/monitor.c')
-rw-r--r--fs/resctrl/monitor.c1014
1 files changed, 948 insertions, 66 deletions
diff --git a/fs/resctrl/monitor.c b/fs/resctrl/monitor.c
index f5637855c3ac..4076336fbba6 100644
--- a/fs/resctrl/monitor.c
+++ b/fs/resctrl/monitor.c
@@ -336,7 +336,7 @@ void free_rmid(u32 closid, u32 rmid)
entry = __rmid_entry(idx);
- if (resctrl_arch_is_llc_occupancy_enabled())
+ if (resctrl_is_mon_event_enabled(QOS_L3_OCCUP_EVENT_ID))
add_rmid_to_limbo(entry);
else
list_add_tail(&entry->list, &rmid_free_lru);
@@ -346,28 +346,97 @@ static struct mbm_state *get_mbm_state(struct rdt_mon_domain *d, u32 closid,
u32 rmid, enum resctrl_event_id evtid)
{
u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid);
+ struct mbm_state *state;
- switch (evtid) {
- case QOS_L3_MBM_TOTAL_EVENT_ID:
- return &d->mbm_total[idx];
- case QOS_L3_MBM_LOCAL_EVENT_ID:
- return &d->mbm_local[idx];
- default:
+ if (!resctrl_is_mbm_event(evtid))
return NULL;
+
+ state = d->mbm_states[MBM_STATE_IDX(evtid)];
+
+ return state ? &state[idx] : NULL;
+}
+
+/*
+ * mbm_cntr_get() - Return the counter ID for the matching @evtid and @rdtgrp.
+ *
+ * Return:
+ * Valid counter ID on success, or -ENOENT on failure.
+ */
+static int mbm_cntr_get(struct rdt_resource *r, struct rdt_mon_domain *d,
+ struct rdtgroup *rdtgrp, enum resctrl_event_id evtid)
+{
+ int cntr_id;
+
+ if (!r->mon.mbm_cntr_assignable)
+ return -ENOENT;
+
+ if (!resctrl_is_mbm_event(evtid))
+ return -ENOENT;
+
+ for (cntr_id = 0; cntr_id < r->mon.num_mbm_cntrs; cntr_id++) {
+ if (d->cntr_cfg[cntr_id].rdtgrp == rdtgrp &&
+ d->cntr_cfg[cntr_id].evtid == evtid)
+ return cntr_id;
+ }
+
+ return -ENOENT;
+}
+
+/*
+ * mbm_cntr_alloc() - Initialize and return a new counter ID in the domain @d.
+ * Caller must ensure that the specified event is not assigned already.
+ *
+ * Return:
+ * Valid counter ID on success, or -ENOSPC on failure.
+ */
+static int mbm_cntr_alloc(struct rdt_resource *r, struct rdt_mon_domain *d,
+ struct rdtgroup *rdtgrp, enum resctrl_event_id evtid)
+{
+ int cntr_id;
+
+ for (cntr_id = 0; cntr_id < r->mon.num_mbm_cntrs; cntr_id++) {
+ if (!d->cntr_cfg[cntr_id].rdtgrp) {
+ d->cntr_cfg[cntr_id].rdtgrp = rdtgrp;
+ d->cntr_cfg[cntr_id].evtid = evtid;
+ return cntr_id;
+ }
}
+
+ return -ENOSPC;
}
-static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr)
+/*
+ * mbm_cntr_free() - Clear the counter ID configuration details in the domain @d.
+ */
+static void mbm_cntr_free(struct rdt_mon_domain *d, int cntr_id)
+{
+ memset(&d->cntr_cfg[cntr_id], 0, sizeof(*d->cntr_cfg));
+}
+
+static int __mon_event_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
{
int cpu = smp_processor_id();
+ u32 closid = rdtgrp->closid;
+ u32 rmid = rdtgrp->mon.rmid;
struct rdt_mon_domain *d;
- struct cacheinfo *ci;
+ int cntr_id = -ENOENT;
struct mbm_state *m;
int err, ret;
u64 tval = 0;
+ if (rr->is_mbm_cntr) {
+ cntr_id = mbm_cntr_get(rr->r, rr->d, rdtgrp, rr->evtid);
+ if (cntr_id < 0) {
+ rr->err = -ENOENT;
+ return -EINVAL;
+ }
+ }
+
if (rr->first) {
- resctrl_arch_reset_rmid(rr->r, rr->d, closid, rmid, rr->evtid);
+ if (rr->is_mbm_cntr)
+ resctrl_arch_reset_cntr(rr->r, rr->d, closid, rmid, cntr_id, rr->evtid);
+ else
+ resctrl_arch_reset_rmid(rr->r, rr->d, closid, rmid, rr->evtid);
m = get_mbm_state(rr->d, closid, rmid, rr->evtid);
if (m)
memset(m, 0, sizeof(struct mbm_state));
@@ -378,8 +447,12 @@ static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr)
/* Reading a single domain, must be on a CPU in that domain. */
if (!cpumask_test_cpu(cpu, &rr->d->hdr.cpu_mask))
return -EINVAL;
- rr->err = resctrl_arch_rmid_read(rr->r, rr->d, closid, rmid,
- rr->evtid, &tval, rr->arch_mon_ctx);
+ if (rr->is_mbm_cntr)
+ rr->err = resctrl_arch_cntr_read(rr->r, rr->d, closid, rmid, cntr_id,
+ rr->evtid, &tval);
+ else
+ rr->err = resctrl_arch_rmid_read(rr->r, rr->d, closid, rmid,
+ rr->evtid, &tval, rr->arch_mon_ctx);
if (rr->err)
return rr->err;
@@ -389,8 +462,7 @@ static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr)
}
/* Summing domains that share a cache, must be on a CPU for that cache. */
- ci = get_cpu_cacheinfo_level(cpu, RESCTRL_L3_CACHE);
- if (!ci || ci->id != rr->ci_id)
+ if (!cpumask_test_cpu(cpu, &rr->ci->shared_cpu_map))
return -EINVAL;
/*
@@ -402,10 +474,14 @@ static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr)
*/
ret = -EINVAL;
list_for_each_entry(d, &rr->r->mon_domains, hdr.list) {
- if (d->ci_id != rr->ci_id)
+ if (d->ci_id != rr->ci->id)
continue;
- err = resctrl_arch_rmid_read(rr->r, d, closid, rmid,
- rr->evtid, &tval, rr->arch_mon_ctx);
+ if (rr->is_mbm_cntr)
+ err = resctrl_arch_cntr_read(rr->r, d, closid, rmid, cntr_id,
+ rr->evtid, &tval);
+ else
+ err = resctrl_arch_rmid_read(rr->r, d, closid, rmid,
+ rr->evtid, &tval, rr->arch_mon_ctx);
if (!err) {
rr->val += tval;
ret = 0;
@@ -421,8 +497,8 @@ static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr)
/*
* mbm_bw_count() - Update bw count from values previously read by
* __mon_event_count().
- * @closid: The closid used to identify the cached mbm_state.
- * @rmid: The rmid used to identify the cached mbm_state.
+ * @rdtgrp: resctrl group associated with the CLOSID and RMID to identify
+ * the cached mbm_state.
* @rr: The struct rmid_read populated by __mon_event_count().
*
* Supporting function to calculate the memory bandwidth
@@ -430,9 +506,11 @@ static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr)
* __mon_event_count() is compared with the chunks value from the previous
* invocation. This must be called once per second to maintain values in MBps.
*/
-static void mbm_bw_count(u32 closid, u32 rmid, struct rmid_read *rr)
+static void mbm_bw_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
{
u64 cur_bw, bytes, cur_bytes;
+ u32 closid = rdtgrp->closid;
+ u32 rmid = rdtgrp->mon.rmid;
struct mbm_state *m;
m = get_mbm_state(rr->d, closid, rmid, rr->evtid);
@@ -461,7 +539,7 @@ void mon_event_count(void *info)
rdtgrp = rr->rgrp;
- ret = __mon_event_count(rdtgrp->closid, rdtgrp->mon.rmid, rr);
+ ret = __mon_event_count(rdtgrp, rr);
/*
* For Ctrl groups read data from child monitor groups and
@@ -472,8 +550,7 @@ void mon_event_count(void *info)
if (rdtgrp->type == RDTCTRL_GROUP) {
list_for_each_entry(entry, head, mon.crdtgrp_list) {
- if (__mon_event_count(entry->closid, entry->mon.rmid,
- rr) == 0)
+ if (__mon_event_count(entry, rr) == 0)
ret = 0;
}
}
@@ -604,44 +681,49 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_mon_domain *dom_mbm)
}
static void mbm_update_one_event(struct rdt_resource *r, struct rdt_mon_domain *d,
- u32 closid, u32 rmid, enum resctrl_event_id evtid)
+ struct rdtgroup *rdtgrp, enum resctrl_event_id evtid)
{
struct rmid_read rr = {0};
rr.r = r;
rr.d = d;
rr.evtid = evtid;
- rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid);
- if (IS_ERR(rr.arch_mon_ctx)) {
- pr_warn_ratelimited("Failed to allocate monitor context: %ld",
- PTR_ERR(rr.arch_mon_ctx));
- return;
+ if (resctrl_arch_mbm_cntr_assign_enabled(r)) {
+ rr.is_mbm_cntr = true;
+ } else {
+ rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid);
+ if (IS_ERR(rr.arch_mon_ctx)) {
+ pr_warn_ratelimited("Failed to allocate monitor context: %ld",
+ PTR_ERR(rr.arch_mon_ctx));
+ return;
+ }
}
- __mon_event_count(closid, rmid, &rr);
+ __mon_event_count(rdtgrp, &rr);
/*
* If the software controller is enabled, compute the
* bandwidth for this event id.
*/
if (is_mba_sc(NULL))
- mbm_bw_count(closid, rmid, &rr);
+ mbm_bw_count(rdtgrp, &rr);
- resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx);
+ if (rr.arch_mon_ctx)
+ resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx);
}
static void mbm_update(struct rdt_resource *r, struct rdt_mon_domain *d,
- u32 closid, u32 rmid)
+ struct rdtgroup *rdtgrp)
{
/*
* This is protected from concurrent reads from user as both
* the user and overflow handler hold the global mutex.
*/
- if (resctrl_arch_is_mbm_total_enabled())
- mbm_update_one_event(r, d, closid, rmid, QOS_L3_MBM_TOTAL_EVENT_ID);
+ if (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID))
+ mbm_update_one_event(r, d, rdtgrp, QOS_L3_MBM_TOTAL_EVENT_ID);
- if (resctrl_arch_is_mbm_local_enabled())
- mbm_update_one_event(r, d, closid, rmid, QOS_L3_MBM_LOCAL_EVENT_ID);
+ if (resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID))
+ mbm_update_one_event(r, d, rdtgrp, QOS_L3_MBM_LOCAL_EVENT_ID);
}
/*
@@ -714,11 +796,11 @@ void mbm_handle_overflow(struct work_struct *work)
d = container_of(work, struct rdt_mon_domain, mbm_over.work);
list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
- mbm_update(r, d, prgrp->closid, prgrp->mon.rmid);
+ mbm_update(r, d, prgrp);
head = &prgrp->mon.crdtgrp_list;
list_for_each_entry(crgrp, head, mon.crdtgrp_list)
- mbm_update(r, d, crgrp->closid, crgrp->mon.rmid);
+ mbm_update(r, d, crgrp);
if (is_mba_sc(NULL))
update_mba_bw(prgrp, d);
@@ -844,38 +926,819 @@ out_unlock:
mutex_unlock(&rdtgroup_mutex);
}
-static struct mon_evt llc_occupancy_event = {
- .name = "llc_occupancy",
- .evtid = QOS_L3_OCCUP_EVENT_ID,
+/*
+ * All available events. Architecture code marks the ones that
+ * are supported by a system using resctrl_enable_mon_event()
+ * to set .enabled.
+ */
+struct mon_evt mon_event_all[QOS_NUM_EVENTS] = {
+ [QOS_L3_OCCUP_EVENT_ID] = {
+ .name = "llc_occupancy",
+ .evtid = QOS_L3_OCCUP_EVENT_ID,
+ .rid = RDT_RESOURCE_L3,
+ },
+ [QOS_L3_MBM_TOTAL_EVENT_ID] = {
+ .name = "mbm_total_bytes",
+ .evtid = QOS_L3_MBM_TOTAL_EVENT_ID,
+ .rid = RDT_RESOURCE_L3,
+ },
+ [QOS_L3_MBM_LOCAL_EVENT_ID] = {
+ .name = "mbm_local_bytes",
+ .evtid = QOS_L3_MBM_LOCAL_EVENT_ID,
+ .rid = RDT_RESOURCE_L3,
+ },
};
-static struct mon_evt mbm_total_event = {
- .name = "mbm_total_bytes",
- .evtid = QOS_L3_MBM_TOTAL_EVENT_ID,
+void resctrl_enable_mon_event(enum resctrl_event_id eventid)
+{
+ if (WARN_ON_ONCE(eventid < QOS_FIRST_EVENT || eventid >= QOS_NUM_EVENTS))
+ return;
+ if (mon_event_all[eventid].enabled) {
+ pr_warn("Duplicate enable for event %d\n", eventid);
+ return;
+ }
+
+ mon_event_all[eventid].enabled = true;
+}
+
+bool resctrl_is_mon_event_enabled(enum resctrl_event_id eventid)
+{
+ return eventid >= QOS_FIRST_EVENT && eventid < QOS_NUM_EVENTS &&
+ mon_event_all[eventid].enabled;
+}
+
+u32 resctrl_get_mon_evt_cfg(enum resctrl_event_id evtid)
+{
+ return mon_event_all[evtid].evt_cfg;
+}
+
+/**
+ * struct mbm_transaction - Memory transaction an MBM event can be configured with.
+ * @name: Name of memory transaction (read, write ...).
+ * @val: The bit (eg. READS_TO_LOCAL_MEM or READS_TO_REMOTE_MEM) used to
+ * represent the memory transaction within an event's configuration.
+ */
+struct mbm_transaction {
+ char name[32];
+ u32 val;
};
-static struct mon_evt mbm_local_event = {
- .name = "mbm_local_bytes",
- .evtid = QOS_L3_MBM_LOCAL_EVENT_ID,
+/* Decoded values for each type of memory transaction. */
+static struct mbm_transaction mbm_transactions[NUM_MBM_TRANSACTIONS] = {
+ {"local_reads", READS_TO_LOCAL_MEM},
+ {"remote_reads", READS_TO_REMOTE_MEM},
+ {"local_non_temporal_writes", NON_TEMP_WRITE_TO_LOCAL_MEM},
+ {"remote_non_temporal_writes", NON_TEMP_WRITE_TO_REMOTE_MEM},
+ {"local_reads_slow_memory", READS_TO_LOCAL_S_MEM},
+ {"remote_reads_slow_memory", READS_TO_REMOTE_S_MEM},
+ {"dirty_victim_writes_all", DIRTY_VICTIMS_TO_ALL_MEM},
};
+int event_filter_show(struct kernfs_open_file *of, struct seq_file *seq, void *v)
+{
+ struct mon_evt *mevt = rdt_kn_parent_priv(of->kn);
+ struct rdt_resource *r;
+ bool sep = false;
+ int ret = 0, i;
+
+ mutex_lock(&rdtgroup_mutex);
+ rdt_last_cmd_clear();
+
+ r = resctrl_arch_get_resource(mevt->rid);
+ if (!resctrl_arch_mbm_cntr_assign_enabled(r)) {
+ rdt_last_cmd_puts("mbm_event counter assignment mode is not enabled\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ for (i = 0; i < NUM_MBM_TRANSACTIONS; i++) {
+ if (mevt->evt_cfg & mbm_transactions[i].val) {
+ if (sep)
+ seq_putc(seq, ',');
+ seq_printf(seq, "%s", mbm_transactions[i].name);
+ sep = true;
+ }
+ }
+ seq_putc(seq, '\n');
+
+out_unlock:
+ mutex_unlock(&rdtgroup_mutex);
+
+ return ret;
+}
+
+int resctrl_mbm_assign_on_mkdir_show(struct kernfs_open_file *of, struct seq_file *s,
+ void *v)
+{
+ struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
+ int ret = 0;
+
+ mutex_lock(&rdtgroup_mutex);
+ rdt_last_cmd_clear();
+
+ if (!resctrl_arch_mbm_cntr_assign_enabled(r)) {
+ rdt_last_cmd_puts("mbm_event counter assignment mode is not enabled\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ seq_printf(s, "%u\n", r->mon.mbm_assign_on_mkdir);
+
+out_unlock:
+ mutex_unlock(&rdtgroup_mutex);
+
+ return ret;
+}
+
+ssize_t resctrl_mbm_assign_on_mkdir_write(struct kernfs_open_file *of, char *buf,
+ size_t nbytes, loff_t off)
+{
+ struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
+ bool value;
+ int ret;
+
+ ret = kstrtobool(buf, &value);
+ if (ret)
+ return ret;
+
+ mutex_lock(&rdtgroup_mutex);
+ rdt_last_cmd_clear();
+
+ if (!resctrl_arch_mbm_cntr_assign_enabled(r)) {
+ rdt_last_cmd_puts("mbm_event counter assignment mode is not enabled\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ r->mon.mbm_assign_on_mkdir = value;
+
+out_unlock:
+ mutex_unlock(&rdtgroup_mutex);
+
+ return ret ?: nbytes;
+}
+
+/*
+ * mbm_cntr_free_all() - Clear all the counter ID configuration details in the
+ * domain @d. Called when mbm_assign_mode is changed.
+ */
+static void mbm_cntr_free_all(struct rdt_resource *r, struct rdt_mon_domain *d)
+{
+ memset(d->cntr_cfg, 0, sizeof(*d->cntr_cfg) * r->mon.num_mbm_cntrs);
+}
+
+/*
+ * resctrl_reset_rmid_all() - Reset all non-architecture states for all the
+ * supported RMIDs.
+ */
+static void resctrl_reset_rmid_all(struct rdt_resource *r, struct rdt_mon_domain *d)
+{
+ u32 idx_limit = resctrl_arch_system_num_rmid_idx();
+ enum resctrl_event_id evt;
+ int idx;
+
+ for_each_mbm_event_id(evt) {
+ if (!resctrl_is_mon_event_enabled(evt))
+ continue;
+ idx = MBM_STATE_IDX(evt);
+ memset(d->mbm_states[idx], 0, sizeof(*d->mbm_states[0]) * idx_limit);
+ }
+}
+
+/*
+ * rdtgroup_assign_cntr() - Assign/unassign the counter ID for the event, RMID
+ * pair in the domain.
+ *
+ * Assign the counter if @assign is true else unassign the counter. Reset the
+ * associated non-architectural state.
+ */
+static void rdtgroup_assign_cntr(struct rdt_resource *r, struct rdt_mon_domain *d,
+ enum resctrl_event_id evtid, u32 rmid, u32 closid,
+ u32 cntr_id, bool assign)
+{
+ struct mbm_state *m;
+
+ resctrl_arch_config_cntr(r, d, evtid, rmid, closid, cntr_id, assign);
+
+ m = get_mbm_state(d, closid, rmid, evtid);
+ if (m)
+ memset(m, 0, sizeof(*m));
+}
+
+/*
+ * rdtgroup_alloc_assign_cntr() - Allocate a counter ID and assign it to the event
+ * pointed to by @mevt and the resctrl group @rdtgrp within the domain @d.
+ *
+ * Return:
+ * 0 on success, < 0 on failure.
+ */
+static int rdtgroup_alloc_assign_cntr(struct rdt_resource *r, struct rdt_mon_domain *d,
+ struct rdtgroup *rdtgrp, struct mon_evt *mevt)
+{
+ int cntr_id;
+
+ /* No action required if the counter is assigned already. */
+ cntr_id = mbm_cntr_get(r, d, rdtgrp, mevt->evtid);
+ if (cntr_id >= 0)
+ return 0;
+
+ cntr_id = mbm_cntr_alloc(r, d, rdtgrp, mevt->evtid);
+ if (cntr_id < 0) {
+ rdt_last_cmd_printf("Failed to allocate counter for %s in domain %d\n",
+ mevt->name, d->hdr.id);
+ return cntr_id;
+ }
+
+ rdtgroup_assign_cntr(r, d, mevt->evtid, rdtgrp->mon.rmid, rdtgrp->closid, cntr_id, true);
+
+ return 0;
+}
+
/*
- * Initialize the event list for the resource.
+ * rdtgroup_assign_cntr_event() - Assign a hardware counter for the event in
+ * @mevt to the resctrl group @rdtgrp. Assign counters to all domains if @d is
+ * NULL; otherwise, assign the counter to the specified domain @d.
+ *
+ * If all counters in a domain are already in use, rdtgroup_alloc_assign_cntr()
+ * will fail. The assignment process will abort at the first failure encountered
+ * during domain traversal, which may result in the event being only partially
+ * assigned.
*
- * Note that MBM events are also part of RDT_RESOURCE_L3 resource
- * because as per the SDM the total and local memory bandwidth
- * are enumerated as part of L3 monitoring.
+ * Return:
+ * 0 on success, < 0 on failure.
+ */
+static int rdtgroup_assign_cntr_event(struct rdt_mon_domain *d, struct rdtgroup *rdtgrp,
+ struct mon_evt *mevt)
+{
+ struct rdt_resource *r = resctrl_arch_get_resource(mevt->rid);
+ int ret = 0;
+
+ if (!d) {
+ list_for_each_entry(d, &r->mon_domains, hdr.list) {
+ ret = rdtgroup_alloc_assign_cntr(r, d, rdtgrp, mevt);
+ if (ret)
+ return ret;
+ }
+ } else {
+ ret = rdtgroup_alloc_assign_cntr(r, d, rdtgrp, mevt);
+ }
+
+ return ret;
+}
+
+/*
+ * rdtgroup_assign_cntrs() - Assign counters to MBM events. Called when
+ * a new group is created.
+ *
+ * Each group can accommodate two counters per domain: one for the total
+ * event and one for the local event. Assignments may fail due to the limited
+ * number of counters. However, it is not necessary to fail the group creation
+ * and thus no failure is returned. Users have the option to modify the
+ * counter assignments after the group has been created.
+ */
+void rdtgroup_assign_cntrs(struct rdtgroup *rdtgrp)
+{
+ struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
+
+ if (!r->mon_capable || !resctrl_arch_mbm_cntr_assign_enabled(r) ||
+ !r->mon.mbm_assign_on_mkdir)
+ return;
+
+ if (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID))
+ rdtgroup_assign_cntr_event(NULL, rdtgrp,
+ &mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID]);
+
+ if (resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID))
+ rdtgroup_assign_cntr_event(NULL, rdtgrp,
+ &mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID]);
+}
+
+/*
+ * rdtgroup_free_unassign_cntr() - Unassign and reset the counter ID configuration
+ * for the event pointed to by @mevt within the domain @d and resctrl group @rdtgrp.
+ */
+static void rdtgroup_free_unassign_cntr(struct rdt_resource *r, struct rdt_mon_domain *d,
+ struct rdtgroup *rdtgrp, struct mon_evt *mevt)
+{
+ int cntr_id;
+
+ cntr_id = mbm_cntr_get(r, d, rdtgrp, mevt->evtid);
+
+ /* If there is no cntr_id assigned, nothing to do */
+ if (cntr_id < 0)
+ return;
+
+ rdtgroup_assign_cntr(r, d, mevt->evtid, rdtgrp->mon.rmid, rdtgrp->closid, cntr_id, false);
+
+ mbm_cntr_free(d, cntr_id);
+}
+
+/*
+ * rdtgroup_unassign_cntr_event() - Unassign a hardware counter associated with
+ * the event structure @mevt from the domain @d and the group @rdtgrp. Unassign
+ * the counters from all the domains if @d is NULL else unassign from @d.
+ */
+static void rdtgroup_unassign_cntr_event(struct rdt_mon_domain *d, struct rdtgroup *rdtgrp,
+ struct mon_evt *mevt)
+{
+ struct rdt_resource *r = resctrl_arch_get_resource(mevt->rid);
+
+ if (!d) {
+ list_for_each_entry(d, &r->mon_domains, hdr.list)
+ rdtgroup_free_unassign_cntr(r, d, rdtgrp, mevt);
+ } else {
+ rdtgroup_free_unassign_cntr(r, d, rdtgrp, mevt);
+ }
+}
+
+/*
+ * rdtgroup_unassign_cntrs() - Unassign the counters associated with MBM events.
+ * Called when a group is deleted.
*/
-static void l3_mon_evt_init(struct rdt_resource *r)
+void rdtgroup_unassign_cntrs(struct rdtgroup *rdtgrp)
{
- INIT_LIST_HEAD(&r->evt_list);
+ struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
- if (resctrl_arch_is_llc_occupancy_enabled())
- list_add_tail(&llc_occupancy_event.list, &r->evt_list);
- if (resctrl_arch_is_mbm_total_enabled())
- list_add_tail(&mbm_total_event.list, &r->evt_list);
- if (resctrl_arch_is_mbm_local_enabled())
- list_add_tail(&mbm_local_event.list, &r->evt_list);
+ if (!r->mon_capable || !resctrl_arch_mbm_cntr_assign_enabled(r))
+ return;
+
+ if (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID))
+ rdtgroup_unassign_cntr_event(NULL, rdtgrp,
+ &mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID]);
+
+ if (resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID))
+ rdtgroup_unassign_cntr_event(NULL, rdtgrp,
+ &mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID]);
+}
+
+static int resctrl_parse_mem_transactions(char *tok, u32 *val)
+{
+ u32 temp_val = 0;
+ char *evt_str;
+ bool found;
+ int i;
+
+next_config:
+ if (!tok || tok[0] == '\0') {
+ *val = temp_val;
+ return 0;
+ }
+
+ /* Start processing the strings for each memory transaction type */
+ evt_str = strim(strsep(&tok, ","));
+ found = false;
+ for (i = 0; i < NUM_MBM_TRANSACTIONS; i++) {
+ if (!strcmp(mbm_transactions[i].name, evt_str)) {
+ temp_val |= mbm_transactions[i].val;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ rdt_last_cmd_printf("Invalid memory transaction type %s\n", evt_str);
+ return -EINVAL;
+ }
+
+ goto next_config;
+}
+
+/*
+ * rdtgroup_update_cntr_event - Update the counter assignments for the event
+ * in a group.
+ * @r: Resource to which update needs to be done.
+ * @rdtgrp: Resctrl group.
+ * @evtid: MBM monitor event.
+ */
+static void rdtgroup_update_cntr_event(struct rdt_resource *r, struct rdtgroup *rdtgrp,
+ enum resctrl_event_id evtid)
+{
+ struct rdt_mon_domain *d;
+ int cntr_id;
+
+ list_for_each_entry(d, &r->mon_domains, hdr.list) {
+ cntr_id = mbm_cntr_get(r, d, rdtgrp, evtid);
+ if (cntr_id >= 0)
+ rdtgroup_assign_cntr(r, d, evtid, rdtgrp->mon.rmid,
+ rdtgrp->closid, cntr_id, true);
+ }
+}
+
+/*
+ * resctrl_update_cntr_allrdtgrp - Update the counter assignments for the event
+ * for all the groups.
+ * @mevt MBM Monitor event.
+ */
+static void resctrl_update_cntr_allrdtgrp(struct mon_evt *mevt)
+{
+ struct rdt_resource *r = resctrl_arch_get_resource(mevt->rid);
+ struct rdtgroup *prgrp, *crgrp;
+
+ /*
+ * Find all the groups where the event is assigned and update the
+ * configuration of existing assignments.
+ */
+ list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
+ rdtgroup_update_cntr_event(r, prgrp, mevt->evtid);
+
+ list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list)
+ rdtgroup_update_cntr_event(r, crgrp, mevt->evtid);
+ }
+}
+
+ssize_t event_filter_write(struct kernfs_open_file *of, char *buf, size_t nbytes,
+ loff_t off)
+{
+ struct mon_evt *mevt = rdt_kn_parent_priv(of->kn);
+ struct rdt_resource *r;
+ u32 evt_cfg = 0;
+ int ret = 0;
+
+ /* Valid input requires a trailing newline */
+ if (nbytes == 0 || buf[nbytes - 1] != '\n')
+ return -EINVAL;
+
+ buf[nbytes - 1] = '\0';
+
+ cpus_read_lock();
+ mutex_lock(&rdtgroup_mutex);
+
+ rdt_last_cmd_clear();
+
+ r = resctrl_arch_get_resource(mevt->rid);
+ if (!resctrl_arch_mbm_cntr_assign_enabled(r)) {
+ rdt_last_cmd_puts("mbm_event counter assignment mode is not enabled\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = resctrl_parse_mem_transactions(buf, &evt_cfg);
+ if (!ret && mevt->evt_cfg != evt_cfg) {
+ mevt->evt_cfg = evt_cfg;
+ resctrl_update_cntr_allrdtgrp(mevt);
+ }
+
+out_unlock:
+ mutex_unlock(&rdtgroup_mutex);
+ cpus_read_unlock();
+
+ return ret ?: nbytes;
+}
+
+int resctrl_mbm_assign_mode_show(struct kernfs_open_file *of,
+ struct seq_file *s, void *v)
+{
+ struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
+ bool enabled;
+
+ mutex_lock(&rdtgroup_mutex);
+ enabled = resctrl_arch_mbm_cntr_assign_enabled(r);
+
+ if (r->mon.mbm_cntr_assignable) {
+ if (enabled)
+ seq_puts(s, "[mbm_event]\n");
+ else
+ seq_puts(s, "[default]\n");
+
+ if (!IS_ENABLED(CONFIG_RESCTRL_ASSIGN_FIXED)) {
+ if (enabled)
+ seq_puts(s, "default\n");
+ else
+ seq_puts(s, "mbm_event\n");
+ }
+ } else {
+ seq_puts(s, "[default]\n");
+ }
+
+ mutex_unlock(&rdtgroup_mutex);
+
+ return 0;
+}
+
+ssize_t resctrl_mbm_assign_mode_write(struct kernfs_open_file *of, char *buf,
+ size_t nbytes, loff_t off)
+{
+ struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
+ struct rdt_mon_domain *d;
+ int ret = 0;
+ bool enable;
+
+ /* Valid input requires a trailing newline */
+ if (nbytes == 0 || buf[nbytes - 1] != '\n')
+ return -EINVAL;
+
+ buf[nbytes - 1] = '\0';
+
+ cpus_read_lock();
+ mutex_lock(&rdtgroup_mutex);
+
+ rdt_last_cmd_clear();
+
+ if (!strcmp(buf, "default")) {
+ enable = 0;
+ } else if (!strcmp(buf, "mbm_event")) {
+ if (r->mon.mbm_cntr_assignable) {
+ enable = 1;
+ } else {
+ ret = -EINVAL;
+ rdt_last_cmd_puts("mbm_event mode is not supported\n");
+ goto out_unlock;
+ }
+ } else {
+ ret = -EINVAL;
+ rdt_last_cmd_puts("Unsupported assign mode\n");
+ goto out_unlock;
+ }
+
+ if (enable != resctrl_arch_mbm_cntr_assign_enabled(r)) {
+ ret = resctrl_arch_mbm_cntr_assign_set(r, enable);
+ if (ret)
+ goto out_unlock;
+
+ /* Update the visibility of BMEC related files */
+ resctrl_bmec_files_show(r, NULL, !enable);
+
+ /*
+ * Initialize the default memory transaction values for
+ * total and local events.
+ */
+ if (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID))
+ mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask;
+ if (resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID))
+ mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask &
+ (READS_TO_LOCAL_MEM |
+ READS_TO_LOCAL_S_MEM |
+ NON_TEMP_WRITE_TO_LOCAL_MEM);
+ /* Enable auto assignment when switching to "mbm_event" mode */
+ if (enable)
+ r->mon.mbm_assign_on_mkdir = true;
+ /*
+ * Reset all the non-achitectural RMID state and assignable counters.
+ */
+ list_for_each_entry(d, &r->mon_domains, hdr.list) {
+ mbm_cntr_free_all(r, d);
+ resctrl_reset_rmid_all(r, d);
+ }
+ }
+
+out_unlock:
+ mutex_unlock(&rdtgroup_mutex);
+ cpus_read_unlock();
+
+ return ret ?: nbytes;
+}
+
+int resctrl_num_mbm_cntrs_show(struct kernfs_open_file *of,
+ struct seq_file *s, void *v)
+{
+ struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
+ struct rdt_mon_domain *dom;
+ bool sep = false;
+
+ cpus_read_lock();
+ mutex_lock(&rdtgroup_mutex);
+
+ list_for_each_entry(dom, &r->mon_domains, hdr.list) {
+ if (sep)
+ seq_putc(s, ';');
+
+ seq_printf(s, "%d=%d", dom->hdr.id, r->mon.num_mbm_cntrs);
+ sep = true;
+ }
+ seq_putc(s, '\n');
+
+ mutex_unlock(&rdtgroup_mutex);
+ cpus_read_unlock();
+ return 0;
+}
+
+int resctrl_available_mbm_cntrs_show(struct kernfs_open_file *of,
+ struct seq_file *s, void *v)
+{
+ struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
+ struct rdt_mon_domain *dom;
+ bool sep = false;
+ u32 cntrs, i;
+ int ret = 0;
+
+ cpus_read_lock();
+ mutex_lock(&rdtgroup_mutex);
+
+ rdt_last_cmd_clear();
+
+ if (!resctrl_arch_mbm_cntr_assign_enabled(r)) {
+ rdt_last_cmd_puts("mbm_event counter assignment mode is not enabled\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ list_for_each_entry(dom, &r->mon_domains, hdr.list) {
+ if (sep)
+ seq_putc(s, ';');
+
+ cntrs = 0;
+ for (i = 0; i < r->mon.num_mbm_cntrs; i++) {
+ if (!dom->cntr_cfg[i].rdtgrp)
+ cntrs++;
+ }
+
+ seq_printf(s, "%d=%u", dom->hdr.id, cntrs);
+ sep = true;
+ }
+ seq_putc(s, '\n');
+
+out_unlock:
+ mutex_unlock(&rdtgroup_mutex);
+ cpus_read_unlock();
+
+ return ret;
+}
+
+int mbm_L3_assignments_show(struct kernfs_open_file *of, struct seq_file *s, void *v)
+{
+ struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
+ struct rdt_mon_domain *d;
+ struct rdtgroup *rdtgrp;
+ struct mon_evt *mevt;
+ int ret = 0;
+ bool sep;
+
+ rdtgrp = rdtgroup_kn_lock_live(of->kn);
+ if (!rdtgrp) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ rdt_last_cmd_clear();
+ if (!resctrl_arch_mbm_cntr_assign_enabled(r)) {
+ rdt_last_cmd_puts("mbm_event counter assignment mode is not enabled\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ for_each_mon_event(mevt) {
+ if (mevt->rid != r->rid || !mevt->enabled || !resctrl_is_mbm_event(mevt->evtid))
+ continue;
+
+ sep = false;
+ seq_printf(s, "%s:", mevt->name);
+ list_for_each_entry(d, &r->mon_domains, hdr.list) {
+ if (sep)
+ seq_putc(s, ';');
+
+ if (mbm_cntr_get(r, d, rdtgrp, mevt->evtid) < 0)
+ seq_printf(s, "%d=_", d->hdr.id);
+ else
+ seq_printf(s, "%d=e", d->hdr.id);
+
+ sep = true;
+ }
+ seq_putc(s, '\n');
+ }
+
+out_unlock:
+ rdtgroup_kn_unlock(of->kn);
+
+ return ret;
+}
+
+/*
+ * mbm_get_mon_event_by_name() - Return the mon_evt entry for the matching
+ * event name.
+ */
+static struct mon_evt *mbm_get_mon_event_by_name(struct rdt_resource *r, char *name)
+{
+ struct mon_evt *mevt;
+
+ for_each_mon_event(mevt) {
+ if (mevt->rid == r->rid && mevt->enabled &&
+ resctrl_is_mbm_event(mevt->evtid) &&
+ !strcmp(mevt->name, name))
+ return mevt;
+ }
+
+ return NULL;
+}
+
+static int rdtgroup_modify_assign_state(char *assign, struct rdt_mon_domain *d,
+ struct rdtgroup *rdtgrp, struct mon_evt *mevt)
+{
+ int ret = 0;
+
+ if (!assign || strlen(assign) != 1)
+ return -EINVAL;
+
+ switch (*assign) {
+ case 'e':
+ ret = rdtgroup_assign_cntr_event(d, rdtgrp, mevt);
+ break;
+ case '_':
+ rdtgroup_unassign_cntr_event(d, rdtgrp, mevt);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int resctrl_parse_mbm_assignment(struct rdt_resource *r, struct rdtgroup *rdtgrp,
+ char *event, char *tok)
+{
+ struct rdt_mon_domain *d;
+ unsigned long dom_id = 0;
+ char *dom_str, *id_str;
+ struct mon_evt *mevt;
+ int ret;
+
+ mevt = mbm_get_mon_event_by_name(r, event);
+ if (!mevt) {
+ rdt_last_cmd_printf("Invalid event %s\n", event);
+ return -ENOENT;
+ }
+
+next:
+ if (!tok || tok[0] == '\0')
+ return 0;
+
+ /* Start processing the strings for each domain */
+ dom_str = strim(strsep(&tok, ";"));
+
+ id_str = strsep(&dom_str, "=");
+
+ /* Check for domain id '*' which means all domains */
+ if (id_str && *id_str == '*') {
+ ret = rdtgroup_modify_assign_state(dom_str, NULL, rdtgrp, mevt);
+ if (ret)
+ rdt_last_cmd_printf("Assign operation '%s:*=%s' failed\n",
+ event, dom_str);
+ return ret;
+ } else if (!id_str || kstrtoul(id_str, 10, &dom_id)) {
+ rdt_last_cmd_puts("Missing domain id\n");
+ return -EINVAL;
+ }
+
+ /* Verify if the dom_id is valid */
+ list_for_each_entry(d, &r->mon_domains, hdr.list) {
+ if (d->hdr.id == dom_id) {
+ ret = rdtgroup_modify_assign_state(dom_str, d, rdtgrp, mevt);
+ if (ret) {
+ rdt_last_cmd_printf("Assign operation '%s:%ld=%s' failed\n",
+ event, dom_id, dom_str);
+ return ret;
+ }
+ goto next;
+ }
+ }
+
+ rdt_last_cmd_printf("Invalid domain id %ld\n", dom_id);
+ return -EINVAL;
+}
+
+ssize_t mbm_L3_assignments_write(struct kernfs_open_file *of, char *buf,
+ size_t nbytes, loff_t off)
+{
+ struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
+ struct rdtgroup *rdtgrp;
+ char *token, *event;
+ int ret = 0;
+
+ /* Valid input requires a trailing newline */
+ if (nbytes == 0 || buf[nbytes - 1] != '\n')
+ return -EINVAL;
+
+ buf[nbytes - 1] = '\0';
+
+ rdtgrp = rdtgroup_kn_lock_live(of->kn);
+ if (!rdtgrp) {
+ rdtgroup_kn_unlock(of->kn);
+ return -ENOENT;
+ }
+ rdt_last_cmd_clear();
+
+ if (!resctrl_arch_mbm_cntr_assign_enabled(r)) {
+ rdt_last_cmd_puts("mbm_event mode is not enabled\n");
+ rdtgroup_kn_unlock(of->kn);
+ return -EINVAL;
+ }
+
+ while ((token = strsep(&buf, "\n")) != NULL) {
+ /*
+ * The write command follows the following format:
+ * "<Event>:<Domain ID>=<Assignment state>"
+ * Extract the event name first.
+ */
+ event = strsep(&token, ":");
+
+ ret = resctrl_parse_mbm_assignment(r, rdtgrp, event, token);
+ if (ret)
+ break;
+ }
+
+ rdtgroup_kn_unlock(of->kn);
+
+ return ret ?: nbytes;
}
/**
@@ -902,24 +1765,43 @@ int resctrl_mon_resource_init(void)
if (ret)
return ret;
- l3_mon_evt_init(r);
-
if (resctrl_arch_is_evt_configurable(QOS_L3_MBM_TOTAL_EVENT_ID)) {
- mbm_total_event.configurable = true;
+ mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID].configurable = true;
resctrl_file_fflags_init("mbm_total_bytes_config",
RFTYPE_MON_INFO | RFTYPE_RES_CACHE);
}
if (resctrl_arch_is_evt_configurable(QOS_L3_MBM_LOCAL_EVENT_ID)) {
- mbm_local_event.configurable = true;
+ mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID].configurable = true;
resctrl_file_fflags_init("mbm_local_bytes_config",
RFTYPE_MON_INFO | RFTYPE_RES_CACHE);
}
- if (resctrl_arch_is_mbm_local_enabled())
+ if (resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID))
mba_mbps_default_event = QOS_L3_MBM_LOCAL_EVENT_ID;
- else if (resctrl_arch_is_mbm_total_enabled())
+ else if (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID))
mba_mbps_default_event = QOS_L3_MBM_TOTAL_EVENT_ID;
+ if (r->mon.mbm_cntr_assignable) {
+ if (!resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID))
+ resctrl_enable_mon_event(QOS_L3_MBM_TOTAL_EVENT_ID);
+ if (!resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID))
+ resctrl_enable_mon_event(QOS_L3_MBM_LOCAL_EVENT_ID);
+ mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask;
+ mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask &
+ (READS_TO_LOCAL_MEM |
+ READS_TO_LOCAL_S_MEM |
+ NON_TEMP_WRITE_TO_LOCAL_MEM);
+ r->mon.mbm_assign_on_mkdir = true;
+ resctrl_file_fflags_init("num_mbm_cntrs",
+ RFTYPE_MON_INFO | RFTYPE_RES_CACHE);
+ resctrl_file_fflags_init("available_mbm_cntrs",
+ RFTYPE_MON_INFO | RFTYPE_RES_CACHE);
+ resctrl_file_fflags_init("event_filter", RFTYPE_ASSIGN_CONFIG);
+ resctrl_file_fflags_init("mbm_assign_on_mkdir", RFTYPE_MON_INFO |
+ RFTYPE_RES_CACHE);
+ resctrl_file_fflags_init("mbm_L3_assignments", RFTYPE_MON_BASE);
+ }
+
return 0;
}