summaryrefslogtreecommitdiff
path: root/block/blk-mq-sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-mq-sched.c')
-rw-r--r--block/blk-mq-sched.c120
1 files changed, 96 insertions, 24 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index e0bed16485c3..e26898128a7e 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -427,11 +427,25 @@ void blk_mq_free_sched_tags(struct elevator_tags *et,
kfree(et);
}
-void blk_mq_free_sched_tags_batch(struct xarray *et_table,
+void blk_mq_free_sched_res(struct elevator_resources *res,
+ struct elevator_type *type,
+ struct blk_mq_tag_set *set)
+{
+ if (res->et) {
+ blk_mq_free_sched_tags(res->et, set);
+ res->et = NULL;
+ }
+ if (res->data) {
+ blk_mq_free_sched_data(type, res->data);
+ res->data = NULL;
+ }
+}
+
+void blk_mq_free_sched_res_batch(struct xarray *elv_tbl,
struct blk_mq_tag_set *set)
{
struct request_queue *q;
- struct elevator_tags *et;
+ struct elv_change_ctx *ctx;
lockdep_assert_held_write(&set->update_nr_hwq_lock);
@@ -444,13 +458,46 @@ void blk_mq_free_sched_tags_batch(struct xarray *et_table,
* concurrently.
*/
if (q->elevator) {
- et = xa_load(et_table, q->id);
- if (unlikely(!et))
+ ctx = xa_load(elv_tbl, q->id);
+ if (!ctx) {
WARN_ON_ONCE(1);
- else
- blk_mq_free_sched_tags(et, set);
+ continue;
+ }
+ blk_mq_free_sched_res(&ctx->res, ctx->type, set);
+ }
+ }
+}
+
+void blk_mq_free_sched_ctx_batch(struct xarray *elv_tbl)
+{
+ unsigned long i;
+ struct elv_change_ctx *ctx;
+
+ xa_for_each(elv_tbl, i, ctx) {
+ xa_erase(elv_tbl, i);
+ kfree(ctx);
+ }
+}
+
+int blk_mq_alloc_sched_ctx_batch(struct xarray *elv_tbl,
+ struct blk_mq_tag_set *set)
+{
+ struct request_queue *q;
+ struct elv_change_ctx *ctx;
+
+ lockdep_assert_held_write(&set->update_nr_hwq_lock);
+
+ list_for_each_entry(q, &set->tag_list, tag_set_list) {
+ ctx = kzalloc(sizeof(struct elv_change_ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ if (xa_insert(elv_tbl, q->id, ctx, GFP_KERNEL)) {
+ kfree(ctx);
+ return -ENOMEM;
}
}
+ return 0;
}
struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set,
@@ -466,8 +513,7 @@ struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set,
else
nr_tags = nr_hw_queues;
- et = kmalloc(sizeof(struct elevator_tags) +
- nr_tags * sizeof(struct blk_mq_tags *), gfp);
+ et = kmalloc(struct_size(et, tags, nr_tags), gfp);
if (!et)
return NULL;
@@ -498,12 +544,33 @@ out:
return NULL;
}
-int blk_mq_alloc_sched_tags_batch(struct xarray *et_table,
+int blk_mq_alloc_sched_res(struct request_queue *q,
+ struct elevator_type *type,
+ struct elevator_resources *res,
+ unsigned int nr_hw_queues)
+{
+ struct blk_mq_tag_set *set = q->tag_set;
+
+ res->et = blk_mq_alloc_sched_tags(set, nr_hw_queues,
+ blk_mq_default_nr_requests(set));
+ if (!res->et)
+ return -ENOMEM;
+
+ res->data = blk_mq_alloc_sched_data(q, type);
+ if (IS_ERR(res->data)) {
+ blk_mq_free_sched_tags(res->et, set);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int blk_mq_alloc_sched_res_batch(struct xarray *elv_tbl,
struct blk_mq_tag_set *set, unsigned int nr_hw_queues)
{
+ struct elv_change_ctx *ctx;
struct request_queue *q;
- struct elevator_tags *et;
- gfp_t gfp = GFP_NOIO | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
+ int ret = -ENOMEM;
lockdep_assert_held_write(&set->update_nr_hwq_lock);
@@ -516,39 +583,44 @@ int blk_mq_alloc_sched_tags_batch(struct xarray *et_table,
* concurrently.
*/
if (q->elevator) {
- et = blk_mq_alloc_sched_tags(set, nr_hw_queues,
- blk_mq_default_nr_requests(set));
- if (!et)
+ ctx = xa_load(elv_tbl, q->id);
+ if (WARN_ON_ONCE(!ctx)) {
+ ret = -ENOENT;
+ goto out_unwind;
+ }
+
+ ret = blk_mq_alloc_sched_res(q, q->elevator->type,
+ &ctx->res, nr_hw_queues);
+ if (ret)
goto out_unwind;
- if (xa_insert(et_table, q->id, et, gfp))
- goto out_free_tags;
}
}
return 0;
-out_free_tags:
- blk_mq_free_sched_tags(et, set);
+
out_unwind:
list_for_each_entry_continue_reverse(q, &set->tag_list, tag_set_list) {
if (q->elevator) {
- et = xa_load(et_table, q->id);
- if (et)
- blk_mq_free_sched_tags(et, set);
+ ctx = xa_load(elv_tbl, q->id);
+ if (ctx)
+ blk_mq_free_sched_res(&ctx->res,
+ ctx->type, set);
}
}
- return -ENOMEM;
+ return ret;
}
/* caller must have a reference to @e, will grab another one if successful */
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e,
- struct elevator_tags *et)
+ struct elevator_resources *res)
{
unsigned int flags = q->tag_set->flags;
+ struct elevator_tags *et = res->et;
struct blk_mq_hw_ctx *hctx;
struct elevator_queue *eq;
unsigned long i;
int ret;
- eq = elevator_alloc(q, e, et);
+ eq = elevator_alloc(q, e, res);
if (!eq)
return -ENOMEM;