Skip to content

Commit 29dc5d0

Browse files
Ming Leiaxboe
authored andcommitted
ublk: kill queuing request by task_work_add
task_work_add() is used from early ublk development stage for handling request in batch. However, since commit 7d4a931 ("ublk_drv: don't forward io commands in reserve order"), we can get similar batch processing with io_uring_cmd_complete_in_task(), and similar performance data is observed between task_work_add() and io_uring_cmd_complete_in_task(). Meantime we can kill one fast code path, which is actually seldom used given it is common to build ublk driver as module. Signed-off-by: Ming Lei <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]>
1 parent 9a67aa5 commit 29dc5d0

File tree

1 file changed

+2
-38
lines changed

1 file changed

+2
-38
lines changed

drivers/block/ublk_drv.c

Lines changed: 2 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,6 @@
6262

6363
struct ublk_rq_data {
6464
struct llist_node node;
65-
struct callback_head work;
6665
};
6766

6867
struct ublk_uring_cmd_pdu {
@@ -290,14 +289,6 @@ static int ublk_apply_params(struct ublk_device *ub)
290289
return 0;
291290
}
292291

293-
static inline bool ublk_can_use_task_work(const struct ublk_queue *ubq)
294-
{
295-
if (IS_BUILTIN(CONFIG_BLK_DEV_UBLK) &&
296-
!(ubq->flags & UBLK_F_URING_CMD_COMP_IN_TASK))
297-
return true;
298-
return false;
299-
}
300-
301292
static inline bool ublk_need_get_data(const struct ublk_queue *ubq)
302293
{
303294
return ubq->flags & UBLK_F_NEED_GET_DATA;
@@ -852,17 +843,6 @@ static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
852843
ublk_forward_io_cmds(ubq, issue_flags);
853844
}
854845

855-
static void ublk_rq_task_work_fn(struct callback_head *work)
856-
{
857-
struct ublk_rq_data *data = container_of(work,
858-
struct ublk_rq_data, work);
859-
struct request *req = blk_mq_rq_from_pdu(data);
860-
struct ublk_queue *ubq = req->mq_hctx->driver_data;
861-
unsigned issue_flags = IO_URING_F_UNLOCKED;
862-
863-
ublk_forward_io_cmds(ubq, issue_flags);
864-
}
865-
866846
static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
867847
{
868848
struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq);
@@ -886,10 +866,6 @@ static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
886866
*/
887867
if (unlikely(io->flags & UBLK_IO_FLAG_ABORTED)) {
888868
ublk_abort_io_cmds(ubq);
889-
} else if (ublk_can_use_task_work(ubq)) {
890-
if (task_work_add(ubq->ubq_daemon, &data->work,
891-
TWA_SIGNAL_NO_IPI))
892-
ublk_abort_io_cmds(ubq);
893869
} else {
894870
struct io_uring_cmd *cmd = io->cmd;
895871
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
@@ -961,19 +937,9 @@ static int ublk_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
961937
return 0;
962938
}
963939

964-
static int ublk_init_rq(struct blk_mq_tag_set *set, struct request *req,
965-
unsigned int hctx_idx, unsigned int numa_node)
966-
{
967-
struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
968-
969-
init_task_work(&data->work, ublk_rq_task_work_fn);
970-
return 0;
971-
}
972-
973940
static const struct blk_mq_ops ublk_mq_ops = {
974941
.queue_rq = ublk_queue_rq,
975942
.init_hctx = ublk_init_hctx,
976-
.init_request = ublk_init_rq,
977943
.timeout = ublk_timeout,
978944
};
979945

@@ -1813,10 +1779,8 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
18131779
*/
18141780
ub->dev_info.flags &= UBLK_F_ALL;
18151781

1816-
if (!IS_BUILTIN(CONFIG_BLK_DEV_UBLK))
1817-
ub->dev_info.flags |= UBLK_F_URING_CMD_COMP_IN_TASK;
1818-
1819-
ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE;
1782+
ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE |
1783+
UBLK_F_URING_CMD_COMP_IN_TASK;
18201784

18211785
/* We are not ready to support zero copy */
18221786
ub->dev_info.flags &= ~UBLK_F_SUPPORT_ZERO_COPY;

0 commit comments

Comments
 (0)