From 67890d579402804b1d32b3280d9860073542528e Mon Sep 17 00:00:00 2001 From: Wesley Cheng Date: Wed, 9 Apr 2025 12:47:40 -0700 Subject: ALSA: Add USB audio device jack type Add an USB jack type, in order to support notifying of a valid USB audio device. Since USB audio devices can have a slew of different configurations that reach beyond the basic headset and headphone use cases, classify these devices differently. Reviewed-by: Pierre-Louis Bossart Signed-off-by: Wesley Cheng Acked-by: Mark Brown Link: https://lore.kernel.org/r/20250409194804.3773260-8-quic_wcheng@quicinc.com Signed-off-by: Greg Kroah-Hartman --- include/uapi/linux/input-event-codes.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h index 5a199f3d4a26..3b2524e4b667 100644 --- a/include/uapi/linux/input-event-codes.h +++ b/include/uapi/linux/input-event-codes.h @@ -925,7 +925,8 @@ #define SW_MUTE_DEVICE 0x0e /* set = device disabled */ #define SW_PEN_INSERTED 0x0f /* set = pen inserted */ #define SW_MACHINE_COVER 0x10 /* set = cover closed */ -#define SW_MAX 0x10 +#define SW_USB_INSERT 0x11 /* set = USB audio device connected */ +#define SW_MAX 0x11 #define SW_CNT (SW_MAX+1) /* -- cgit v1.2.3 From 80fa7a03378588582eb40f89b6f418c0c256cf24 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 20 May 2025 13:16:43 -0400 Subject: vt: bracketed paste support This is comprised of 3 aspects: - Take note of when applications advertise bracketed paste support via "\e[?2004h" and "\e[?2004l". - Insert bracketed paste markers ("\e[200~" and "\e[201~") around pasted content in paste_selection() when bracketed paste is active. - Add TIOCL_GETBRACKETEDPASTE to return bracketed paste status so user space daemons implementing cut-and-paste functionality (e.g. gpm, BRLTTY) may know when to insert bracketed paste markers. Link: https://en.wikipedia.org/wiki/Bracketed-paste Signed-off-by: Nicolas Pitre Reviewed-by: Jiri Slaby Link: https://lore.kernel.org/r/20250520171851.1219676-2-nico@fluxnic.net Signed-off-by: Greg Kroah-Hartman --- drivers/tty/vt/selection.c | 31 +++++++++++++++++++++++++++---- drivers/tty/vt/vt.c | 15 +++++++++++++++ include/linux/console_struct.h | 1 + include/uapi/linux/tiocl.h | 1 + 4 files changed, 44 insertions(+), 4 deletions(-) (limited to 'include/uapi/linux') diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c index 791e2f1f7c0b..24b0a53e5a79 100644 --- a/drivers/tty/vt/selection.c +++ b/drivers/tty/vt/selection.c @@ -403,6 +403,12 @@ int paste_selection(struct tty_struct *tty) DECLARE_WAITQUEUE(wait, current); int ret = 0; + bool bp = vc->vc_bracketed_paste; + static const char bracketed_paste_start[] = "\033[200~"; + static const char bracketed_paste_end[] = "\033[201~"; + const char *bps = bp ? bracketed_paste_start : NULL; + const char *bpe = bp ? bracketed_paste_end : NULL; + console_lock(); poke_blanked_console(); console_unlock(); @@ -414,7 +420,7 @@ int paste_selection(struct tty_struct *tty) add_wait_queue(&vc->paste_wait, &wait); mutex_lock(&vc_sel.lock); - while (vc_sel.buffer && vc_sel.buf_len > pasted) { + while (vc_sel.buffer && (vc_sel.buf_len > pasted || bpe)) { set_current_state(TASK_INTERRUPTIBLE); if (signal_pending(current)) { ret = -EINTR; @@ -427,10 +433,27 @@ int paste_selection(struct tty_struct *tty) continue; } __set_current_state(TASK_RUNNING); + + if (bps) { + bps += tty_ldisc_receive_buf(ld, bps, NULL, strlen(bps)); + if (*bps != '\0') + continue; + bps = NULL; + } + count = vc_sel.buf_len - pasted; - count = tty_ldisc_receive_buf(ld, vc_sel.buffer + pasted, NULL, - count); - pasted += count; + if (count) { + pasted += tty_ldisc_receive_buf(ld, vc_sel.buffer + pasted, + NULL, count); + if (vc_sel.buf_len > pasted) + continue; + } + + if (bpe) { + bpe += tty_ldisc_receive_buf(ld, bpe, NULL, strlen(bpe)); + if (*bpe == '\0') + bpe = NULL; + } } mutex_unlock(&vc_sel.lock); remove_wait_queue(&vc->paste_wait, &wait); diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index efb761454166..ed39d9cb4432 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -1870,6 +1870,14 @@ int mouse_reporting(void) return vc_cons[fg_console].d->vc_report_mouse; } +/* invoked via ioctl(TIOCLINUX) */ +static int get_bracketed_paste(struct tty_struct *tty) +{ + struct vc_data *vc = tty->driver_data; + + return vc->vc_bracketed_paste; +} + enum { CSI_DEC_hl_CURSOR_KEYS = 1, /* CKM: cursor keys send ^[Ox/^[[x */ CSI_DEC_hl_132_COLUMNS = 3, /* COLM: 80/132 mode switch */ @@ -1880,6 +1888,7 @@ enum { CSI_DEC_hl_MOUSE_X10 = 9, CSI_DEC_hl_SHOW_CURSOR = 25, /* TCEM */ CSI_DEC_hl_MOUSE_VT200 = 1000, + CSI_DEC_hl_BRACKETED_PASTE = 2004, }; /* console_lock is held */ @@ -1932,6 +1941,9 @@ static void csi_DEC_hl(struct vc_data *vc, bool on_off) case CSI_DEC_hl_MOUSE_VT200: vc->vc_report_mouse = on_off ? 2 : 0; break; + case CSI_DEC_hl_BRACKETED_PASTE: + vc->vc_bracketed_paste = on_off; + break; } } @@ -2157,6 +2169,7 @@ static void reset_terminal(struct vc_data *vc, int do_clear) vc->state.charset = 0; vc->vc_need_wrap = 0; vc->vc_report_mouse = 0; + vc->vc_bracketed_paste = 0; vc->vc_utf = default_utf8; vc->vc_utf_count = 0; @@ -3483,6 +3496,8 @@ int tioclinux(struct tty_struct *tty, unsigned long arg) break; case TIOCL_BLANKEDSCREEN: return console_blanked; + case TIOCL_GETBRACKETEDPASTE: + return get_bracketed_paste(tty); default: return -EINVAL; } diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index 20f564e98552..59b4fec5f254 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h @@ -145,6 +145,7 @@ struct vc_data { unsigned int vc_need_wrap : 1; unsigned int vc_can_do_color : 1; unsigned int vc_report_mouse : 2; + unsigned int vc_bracketed_paste : 1; unsigned char vc_utf : 1; /* Unicode UTF-8 encoding */ unsigned char vc_utf_count; int vc_utf_char; diff --git a/include/uapi/linux/tiocl.h b/include/uapi/linux/tiocl.h index b32acc229024..88faba506c3d 100644 --- a/include/uapi/linux/tiocl.h +++ b/include/uapi/linux/tiocl.h @@ -36,5 +36,6 @@ struct tiocl_selection { #define TIOCL_BLANKSCREEN 14 /* keep screen blank even if a key is pressed */ #define TIOCL_BLANKEDSCREEN 15 /* return which vt was blanked */ #define TIOCL_GETKMSGREDIRECT 17 /* get the vt the kernel messages are restricted to */ +#define TIOCL_GETBRACKETEDPASTE 18 /* get whether paste may be bracketed */ #endif /* _LINUX_TIOCL_H */ -- cgit v1.2.3 From 81cf4d7d2379df853a0cbb8486286783c7380ac3 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 20 May 2025 13:16:44 -0400 Subject: vt: add VT_GETCONSIZECSRPOS to retrieve console size and cursor position The console dimension and cursor position are available through the /dev/vcsa interface already. However the /dev/vcsa header format uses single-byte fields therefore those values are clamped to 255. As surprizing as this may seem, some people do use 240-column 67-row screens (a 1920x1080 monitor with 8x16 pixel fonts) which is getting close to the limit. Monitors with higher resolution are not uncommon these days (3840x2160 producing a 480x135 character display) and it is just a matter of time before someone with, say, a braille display using the Linux VT console and BRLTTY on such a screen reports a bug about missing and oddly misaligned screen content. Let's add VT_GETCONSIZECSRPOS for the retrieval of console size and cursor position without byte-sized limitations. The actual console size limit as encoded in vt.c is 32767x32767 so using a short here is appropriate. Then this can be used to get the cursor position when /dev/vcsa reports 255. The screen dimension may already be obtained using TIOCGWINSZ and adding the same information to VT_GETCONSIZECSRPOS might be redundant. However applications that care about cursor position also care about display size and having 2 separate system calls to obtain them separately is wasteful. Also, the cursor position can be queried by writing "\e[6n" to a tty and reading back the result but that may be done only by the actual application using that tty and not a sideline observer. Signed-off-by: Nicolas Pitre Link: https://lore.kernel.org/r/20250520171851.1219676-3-nico@fluxnic.net Signed-off-by: Greg Kroah-Hartman --- drivers/tty/vt/vt_ioctl.c | 16 ++++++++++++++++ include/uapi/linux/vt.h | 11 +++++++++++ 2 files changed, 27 insertions(+) (limited to 'include/uapi/linux') diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c index 1f2bdd2e1cc5..61342e06970a 100644 --- a/drivers/tty/vt/vt_ioctl.c +++ b/drivers/tty/vt/vt_ioctl.c @@ -951,6 +951,22 @@ int vt_ioctl(struct tty_struct *tty, (unsigned short __user *)arg); case VT_WAITEVENT: return vt_event_wait_ioctl((struct vt_event __user *)arg); + + case VT_GETCONSIZECSRPOS: + { + struct vt_consizecsrpos concsr; + + console_lock(); + concsr.con_cols = vc->vc_cols; + concsr.con_rows = vc->vc_rows; + concsr.csr_col = vc->state.x; + concsr.csr_row = vc->state.y; + console_unlock(); + if (copy_to_user(up, &concsr, sizeof(concsr))) + return -EFAULT; + return 0; + } + default: return -ENOIOCTLCMD; } diff --git a/include/uapi/linux/vt.h b/include/uapi/linux/vt.h index e9d39c48520a..e5b0c492aa18 100644 --- a/include/uapi/linux/vt.h +++ b/include/uapi/linux/vt.h @@ -2,6 +2,8 @@ #ifndef _UAPI_LINUX_VT_H #define _UAPI_LINUX_VT_H +#include +#include /* * These constants are also useful for user-level apps (e.g., VC @@ -84,4 +86,13 @@ struct vt_setactivate { #define VT_SETACTIVATE 0x560F /* Activate and set the mode of a console */ +/* get console size and cursor position */ +struct vt_consizecsrpos { + __u16 con_rows; /* number of console rows */ + __u16 con_cols; /* number of console columns */ + __u16 csr_row; /* current cursor's row */ + __u16 csr_col; /* current cursor's column */ +}; +#define VT_GETCONSIZECSRPOS _IOR('V', 0x10, struct vt_consizecsrpos) + #endif /* _UAPI_LINUX_VT_H */ -- cgit v1.2.3 From ab03a61c66149327e022bdafa5843c6f82be267e Mon Sep 17 00:00:00 2001 From: Uday Shankar Date: Thu, 29 May 2025 17:47:10 -0600 Subject: ublk: have a per-io daemon instead of a per-queue daemon Currently, ublk_drv associates to each hardware queue (hctx) a unique task (called the queue's ubq_daemon) which is allowed to issue COMMIT_AND_FETCH commands against the hctx. If any other task attempts to do so, the command fails immediately with EINVAL. When considered together with the block layer architecture, the result is that for each CPU C on the system, there is a unique ublk server thread which is allowed to handle I/O submitted on CPU C. This can lead to suboptimal performance under imbalanced load generation. For an extreme example, suppose all the load is generated on CPUs mapping to a single ublk server thread. Then that thread may be fully utilized and become the bottleneck in the system, while other ublk server threads are totally idle. This issue can also be addressed directly in the ublk server without kernel support by having threads dequeue I/Os and pass them around to ensure even load. But this solution requires inter-thread communication at least twice for each I/O (submission and completion), which is generally a bad pattern for performance. The problem gets even worse with zero copy, as more inter-thread communication would be required to have the buffer register/unregister calls to come from the correct thread. Therefore, address this issue in ublk_drv by allowing each I/O to have its own daemon task. Two I/Os in the same queue are now allowed to be serviced by different daemon tasks - this was not possible before. Imbalanced load can then be balanced across all ublk server threads by having the ublk server threads issue FETCH_REQs in a round-robin manner. As a small toy example, consider a system with a single ublk device having 2 queues, each of depth 4. A ublk server having 4 threads could issue its FETCH_REQs against this device as follows (where each entry is the qid,tag pair that the FETCH_REQ targets): ublk server thread: T0 T1 T2 T3 0,0 0,1 0,2 0,3 1,3 1,0 1,1 1,2 This setup allows for load that is concentrated on one hctx/ublk_queue to be spread out across all ublk server threads, alleviating the issue described above. Add the new UBLK_F_PER_IO_DAEMON feature to ublk_drv, which ublk servers can use to essentially test for the presence of this change and tailor their behavior accordingly. Signed-off-by: Uday Shankar Reviewed-by: Caleb Sander Mateos Reviewed-by: Ming Lei Reviewed-by: Jens Axboe Link: https://lore.kernel.org/r/20250529-ublk_task_per_io-v8-1-e9d3b119336a@purestorage.com Signed-off-by: Jens Axboe --- drivers/block/ublk_drv.c | 111 +++++++++++++++++++++--------------------- include/uapi/linux/ublk_cmd.h | 9 ++++ 2 files changed, 65 insertions(+), 55 deletions(-) (limited to 'include/uapi/linux') diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 6f51072776f1..c637ea010d34 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -69,7 +69,8 @@ | UBLK_F_USER_RECOVERY_FAIL_IO \ | UBLK_F_UPDATE_SIZE \ | UBLK_F_AUTO_BUF_REG \ - | UBLK_F_QUIESCE) + | UBLK_F_QUIESCE \ + | UBLK_F_PER_IO_DAEMON) #define UBLK_F_ALL_RECOVERY_FLAGS (UBLK_F_USER_RECOVERY \ | UBLK_F_USER_RECOVERY_REISSUE \ @@ -166,6 +167,8 @@ struct ublk_io { /* valid if UBLK_IO_FLAG_OWNED_BY_SRV is set */ struct request *req; }; + + struct task_struct *task; }; struct ublk_queue { @@ -173,11 +176,9 @@ struct ublk_queue { int q_depth; unsigned long flags; - struct task_struct *ubq_daemon; struct ublksrv_io_desc *io_cmd_buf; bool force_abort; - bool timeout; bool canceling; bool fail_io; /* copy of dev->state == UBLK_S_DEV_FAIL_IO */ unsigned short nr_io_ready; /* how many ios setup */ @@ -1099,11 +1100,6 @@ static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu( return io_uring_cmd_to_pdu(ioucmd, struct ublk_uring_cmd_pdu); } -static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq) -{ - return !ubq->ubq_daemon || ubq->ubq_daemon->flags & PF_EXITING; -} - /* todo: handle partial completion */ static inline void __ublk_complete_rq(struct request *req) { @@ -1275,13 +1271,13 @@ static void ublk_dispatch_req(struct ublk_queue *ubq, /* * Task is exiting if either: * - * (1) current != ubq_daemon. + * (1) current != io->task. * io_uring_cmd_complete_in_task() tries to run task_work - * in a workqueue if ubq_daemon(cmd's task) is PF_EXITING. + * in a workqueue if cmd's task is PF_EXITING. * * (2) current->flags & PF_EXITING. */ - if (unlikely(current != ubq->ubq_daemon || current->flags & PF_EXITING)) { + if (unlikely(current != io->task || current->flags & PF_EXITING)) { __ublk_abort_rq(ubq, req); return; } @@ -1330,24 +1326,22 @@ static void ublk_cmd_list_tw_cb(struct io_uring_cmd *cmd, { struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); struct request *rq = pdu->req_list; - struct ublk_queue *ubq = pdu->ubq; struct request *next; do { next = rq->rq_next; rq->rq_next = NULL; - ublk_dispatch_req(ubq, rq, issue_flags); + ublk_dispatch_req(rq->mq_hctx->driver_data, rq, issue_flags); rq = next; } while (rq); } -static void ublk_queue_cmd_list(struct ublk_queue *ubq, struct rq_list *l) +static void ublk_queue_cmd_list(struct ublk_io *io, struct rq_list *l) { - struct request *rq = rq_list_peek(l); - struct io_uring_cmd *cmd = ubq->ios[rq->tag].cmd; + struct io_uring_cmd *cmd = io->cmd; struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); - pdu->req_list = rq; + pdu->req_list = rq_list_peek(l); rq_list_init(l); io_uring_cmd_complete_in_task(cmd, ublk_cmd_list_tw_cb); } @@ -1355,13 +1349,10 @@ static void ublk_queue_cmd_list(struct ublk_queue *ubq, struct rq_list *l) static enum blk_eh_timer_return ublk_timeout(struct request *rq) { struct ublk_queue *ubq = rq->mq_hctx->driver_data; + struct ublk_io *io = &ubq->ios[rq->tag]; if (ubq->flags & UBLK_F_UNPRIVILEGED_DEV) { - if (!ubq->timeout) { - send_sig(SIGKILL, ubq->ubq_daemon, 0); - ubq->timeout = true; - } - + send_sig(SIGKILL, io->task, 0); return BLK_EH_DONE; } @@ -1429,24 +1420,25 @@ static void ublk_queue_rqs(struct rq_list *rqlist) { struct rq_list requeue_list = { }; struct rq_list submit_list = { }; - struct ublk_queue *ubq = NULL; + struct ublk_io *io = NULL; struct request *req; while ((req = rq_list_pop(rqlist))) { struct ublk_queue *this_q = req->mq_hctx->driver_data; + struct ublk_io *this_io = &this_q->ios[req->tag]; - if (ubq && ubq != this_q && !rq_list_empty(&submit_list)) - ublk_queue_cmd_list(ubq, &submit_list); - ubq = this_q; + if (io && io->task != this_io->task && !rq_list_empty(&submit_list)) + ublk_queue_cmd_list(io, &submit_list); + io = this_io; - if (ublk_prep_req(ubq, req, true) == BLK_STS_OK) + if (ublk_prep_req(this_q, req, true) == BLK_STS_OK) rq_list_add_tail(&submit_list, req); else rq_list_add_tail(&requeue_list, req); } - if (ubq && !rq_list_empty(&submit_list)) - ublk_queue_cmd_list(ubq, &submit_list); + if (!rq_list_empty(&submit_list)) + ublk_queue_cmd_list(io, &submit_list); *rqlist = requeue_list; } @@ -1474,17 +1466,6 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq) /* All old ioucmds have to be completed */ ubq->nr_io_ready = 0; - /* - * old daemon is PF_EXITING, put it now - * - * It could be NULL in case of closing one quisced device. - */ - if (ubq->ubq_daemon) - put_task_struct(ubq->ubq_daemon); - /* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */ - ubq->ubq_daemon = NULL; - ubq->timeout = false; - for (i = 0; i < ubq->q_depth; i++) { struct ublk_io *io = &ubq->ios[i]; @@ -1495,6 +1476,17 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq) io->flags &= UBLK_IO_FLAG_CANCELED; io->cmd = NULL; io->addr = 0; + + /* + * old task is PF_EXITING, put it now + * + * It could be NULL in case of closing one quiesced + * device. + */ + if (io->task) { + put_task_struct(io->task); + io->task = NULL; + } } } @@ -1516,7 +1508,7 @@ static void ublk_reset_ch_dev(struct ublk_device *ub) for (i = 0; i < ub->dev_info.nr_hw_queues; i++) ublk_queue_reinit(ub, ublk_get_queue(ub, i)); - /* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */ + /* set to NULL, otherwise new tasks cannot mmap io_cmd_buf */ ub->mm = NULL; ub->nr_queues_ready = 0; ub->nr_privileged_daemon = 0; @@ -1783,6 +1775,7 @@ static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd, struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); struct ublk_queue *ubq = pdu->ubq; struct task_struct *task; + struct ublk_io *io; if (WARN_ON_ONCE(!ubq)) return; @@ -1791,13 +1784,14 @@ static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd, return; task = io_uring_cmd_get_task(cmd); - if (WARN_ON_ONCE(task && task != ubq->ubq_daemon)) + io = &ubq->ios[pdu->tag]; + if (WARN_ON_ONCE(task && task != io->task)) return; if (!ubq->canceling) ublk_start_cancel(ubq); - WARN_ON_ONCE(ubq->ios[pdu->tag].cmd != cmd); + WARN_ON_ONCE(io->cmd != cmd); ublk_cancel_cmd(ubq, pdu->tag, issue_flags); } @@ -1930,8 +1924,6 @@ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq) { ubq->nr_io_ready++; if (ublk_queue_ready(ubq)) { - ubq->ubq_daemon = current; - get_task_struct(ubq->ubq_daemon); ub->nr_queues_ready++; if (capable(CAP_SYS_ADMIN)) @@ -2084,6 +2076,7 @@ static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_queue *ubq, } ublk_fill_io_cmd(io, cmd, buf_addr); + WRITE_ONCE(io->task, get_task_struct(current)); ublk_mark_io_ready(ub, ubq); out: mutex_unlock(&ub->mutex); @@ -2179,6 +2172,7 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd, const struct ublksrv_io_cmd *ub_cmd) { struct ublk_device *ub = cmd->file->private_data; + struct task_struct *task; struct ublk_queue *ubq; struct ublk_io *io; u32 cmd_op = cmd->cmd_op; @@ -2193,13 +2187,14 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd, goto out; ubq = ublk_get_queue(ub, ub_cmd->q_id); - if (ubq->ubq_daemon && ubq->ubq_daemon != current) - goto out; if (tag >= ubq->q_depth) goto out; io = &ubq->ios[tag]; + task = READ_ONCE(io->task); + if (task && task != current) + goto out; /* there is pending io cmd, something must be wrong */ if (io->flags & UBLK_IO_FLAG_ACTIVE) { @@ -2449,9 +2444,14 @@ static void ublk_deinit_queue(struct ublk_device *ub, int q_id) { int size = ublk_queue_cmd_buf_size(ub, q_id); struct ublk_queue *ubq = ublk_get_queue(ub, q_id); + int i; + + for (i = 0; i < ubq->q_depth; i++) { + struct ublk_io *io = &ubq->ios[i]; + if (io->task) + put_task_struct(io->task); + } - if (ubq->ubq_daemon) - put_task_struct(ubq->ubq_daemon); if (ubq->io_cmd_buf) free_pages((unsigned long)ubq->io_cmd_buf, get_order(size)); } @@ -2923,7 +2923,8 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header) ub->dev_info.flags &= UBLK_F_ALL; ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE | - UBLK_F_URING_CMD_COMP_IN_TASK; + UBLK_F_URING_CMD_COMP_IN_TASK | + UBLK_F_PER_IO_DAEMON; /* GET_DATA isn't needed any more with USER_COPY or ZERO COPY */ if (ub->dev_info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY | @@ -3188,14 +3189,14 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub, int ublksrv_pid = (int)header->data[0]; int ret = -EINVAL; - pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n", - __func__, ub->dev_info.nr_hw_queues, header->dev_id); - /* wait until new ubq_daemon sending all FETCH_REQ */ + pr_devel("%s: Waiting for all FETCH_REQs, dev id %d...\n", __func__, + header->dev_id); + if (wait_for_completion_interruptible(&ub->completion)) return -EINTR; - pr_devel("%s: All new ubq_daemons(nr: %d) are ready, dev id %d\n", - __func__, ub->dev_info.nr_hw_queues, header->dev_id); + pr_devel("%s: All FETCH_REQs received, dev id %d\n", __func__, + header->dev_id); mutex_lock(&ub->mutex); if (ublk_nosrv_should_stop_dev(ub)) diff --git a/include/uapi/linux/ublk_cmd.h b/include/uapi/linux/ublk_cmd.h index 56c7e3fc666f..77d9d6af46da 100644 --- a/include/uapi/linux/ublk_cmd.h +++ b/include/uapi/linux/ublk_cmd.h @@ -272,6 +272,15 @@ */ #define UBLK_F_QUIESCE (1ULL << 12) +/* + * If this feature is set, ublk_drv supports each (qid,tag) pair having + * its own independent daemon task that is responsible for handling it. + * If it is not set, daemons are per-queue instead, so for two pairs + * (qid1,tag1) and (qid2,tag2), if qid1 == qid2, then the same task must + * be responsible for handling (qid1,tag1) and (qid2,tag2). + */ +#define UBLK_F_PER_IO_DAEMON (1ULL << 13) + /* device state */ #define UBLK_S_DEV_DEAD 0 #define UBLK_S_DEV_LIVE 1 -- cgit v1.2.3