diff options
| author | Yu Kuai <yukuai3@huawei.com> | 2025-09-10 16:04:42 +0800 |
|---|---|---|
| committer | Jens Axboe <axboe@kernel.dk> | 2025-09-10 05:25:56 -0600 |
| commit | 6293e336f6d7d3f3415346ce34993b3398846166 (patch) | |
| tree | b7459e1521b4ab70b9f0f40e9361213f66f92080 /block | |
| parent | blk-mq: split bitmap grow and resize case in blk_mq_update_nr_requests() (diff) | |
| download | linux-6293e336f6d7d3f3415346ce34993b3398846166.tar.gz linux-6293e336f6d7d3f3415346ce34993b3398846166.zip | |
blk-mq-sched: add new parameter nr_requests in blk_mq_alloc_sched_tags()
This helper only support to allocate the default number of requests,
add a new parameter to support specific number of requests.
Prepare to fix potential deadlock in the case nr_requests grow.
Signed-off-by: Yu Kuai <yukuai3@huawei.com>
Reviewed-by: Nilay Shroff <nilay@linux.ibm.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
| -rw-r--r-- | block/blk-mq-sched.c | 14 | ||||
| -rw-r--r-- | block/blk-mq-sched.h | 2 | ||||
| -rw-r--r-- | block/blk-mq.h | 11 | ||||
| -rw-r--r-- | block/elevator.c | 3 |
4 files changed, 19 insertions, 11 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index e2ce4a28e6c9..d06bb137a743 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -454,7 +454,7 @@ void blk_mq_free_sched_tags_batch(struct xarray *et_table, } struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set, - unsigned int nr_hw_queues) + unsigned int nr_hw_queues, unsigned int nr_requests) { unsigned int nr_tags; int i; @@ -470,13 +470,8 @@ struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set, nr_tags * sizeof(struct blk_mq_tags *), gfp); if (!et) return NULL; - /* - * Default to double of smaller one between hw queue_depth and - * 128, since we don't split into sync/async like the old code - * did. Additionally, this is a per-hw queue depth. - */ - et->nr_requests = 2 * min_t(unsigned int, set->queue_depth, - BLKDEV_DEFAULT_RQ); + + et->nr_requests = nr_requests; et->nr_hw_queues = nr_hw_queues; if (blk_mq_is_shared_tags(set->flags)) { @@ -521,7 +516,8 @@ int blk_mq_alloc_sched_tags_batch(struct xarray *et_table, * concurrently. */ if (q->elevator) { - et = blk_mq_alloc_sched_tags(set, nr_hw_queues); + et = blk_mq_alloc_sched_tags(set, nr_hw_queues, + blk_mq_default_nr_requests(set)); if (!et) goto out_unwind; if (xa_insert(et_table, q->id, et, gfp)) diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index fe83187f41db..8e21a6b1415d 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -24,7 +24,7 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e); void blk_mq_sched_free_rqs(struct request_queue *q); struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set, - unsigned int nr_hw_queues); + unsigned int nr_hw_queues, unsigned int nr_requests); int blk_mq_alloc_sched_tags_batch(struct xarray *et_table, struct blk_mq_tag_set *set, unsigned int nr_hw_queues); void blk_mq_free_sched_tags(struct elevator_tags *et, diff --git a/block/blk-mq.h b/block/blk-mq.h index 5d42c7d3a952..3a1d4c37d1bc 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -110,6 +110,17 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(blk_opf_t opf, } /* + * Default to double of smaller one between hw queue_depth and + * 128, since we don't split into sync/async like the old code + * did. Additionally, this is a per-hw queue depth. + */ +static inline unsigned int blk_mq_default_nr_requests( + struct blk_mq_tag_set *set) +{ + return 2 * min_t(unsigned int, set->queue_depth, BLKDEV_DEFAULT_RQ); +} + +/* * sysfs helpers */ extern void blk_mq_sysfs_init(struct request_queue *q); diff --git a/block/elevator.c b/block/elevator.c index fe96c6f4753c..e2ebfbf107b3 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -669,7 +669,8 @@ static int elevator_change(struct request_queue *q, struct elv_change_ctx *ctx) lockdep_assert_held(&set->update_nr_hwq_lock); if (strncmp(ctx->name, "none", 4)) { - ctx->et = blk_mq_alloc_sched_tags(set, set->nr_hw_queues); + ctx->et = blk_mq_alloc_sched_tags(set, set->nr_hw_queues, + blk_mq_default_nr_requests(set)); if (!ctx->et) return -ENOMEM; } |
