| 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
| 2 | #ifndef _LINUX_IO_URING_CMD_H |
| 3 | #define _LINUX_IO_URING_CMD_H |
| 4 | |
| 5 | #include <uapi/linux/io_uring.h> |
| 6 | #include <linux/io_uring_types.h> |
| 7 | #include <linux/blk-mq.h> |
| 8 | |
| 9 | /* only top 8 bits of sqe->uring_cmd_flags for kernel internal use */ |
| 10 | #define IORING_URING_CMD_CANCELABLE (1U << 30) |
| 11 | |
| 12 | struct io_uring_cmd { |
| 13 | struct file *file; |
| 14 | const struct io_uring_sqe *sqe; |
| 15 | /* callback to defer completions to task context */ |
| 16 | void (*task_work_cb)(struct io_uring_cmd *cmd, unsigned); |
| 17 | u32 cmd_op; |
| 18 | u32 flags; |
| 19 | u8 pdu[32]; /* available inline for free use */ |
| 20 | }; |
| 21 | |
| 22 | struct io_uring_cmd_data { |
| 23 | void *op_data; |
| 24 | }; |
| 25 | |
| 26 | static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe) |
| 27 | { |
| 28 | return sqe->cmd; |
| 29 | } |
| 30 | |
| 31 | static inline void io_uring_cmd_private_sz_check(size_t cmd_sz) |
| 32 | { |
| 33 | BUILD_BUG_ON(cmd_sz > sizeof_field(struct io_uring_cmd, pdu)); |
| 34 | } |
| 35 | #define io_uring_cmd_to_pdu(cmd, pdu_type) ( \ |
| 36 | io_uring_cmd_private_sz_check(sizeof(pdu_type)), \ |
| 37 | ((pdu_type *)&(cmd)->pdu) \ |
| 38 | ) |
| 39 | |
| 40 | #if defined(CONFIG_IO_URING) |
| 41 | int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw, |
| 42 | struct iov_iter *iter, |
| 43 | struct io_uring_cmd *ioucmd, |
| 44 | unsigned int issue_flags); |
| 45 | int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd, |
| 46 | const struct iovec __user *uvec, |
| 47 | size_t uvec_segs, |
| 48 | int ddir, struct iov_iter *iter, |
| 49 | unsigned issue_flags); |
| 50 | |
| 51 | /* |
| 52 | * Completes the request, i.e. posts an io_uring CQE and deallocates @ioucmd |
| 53 | * and the corresponding io_uring request. |
| 54 | * |
| 55 | * Note: the caller should never hard code @issue_flags and is only allowed |
| 56 | * to pass the mask provided by the core io_uring code. |
| 57 | */ |
| 58 | void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, u64 res2, |
| 59 | unsigned issue_flags); |
| 60 | |
| 61 | void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd, |
| 62 | void (*task_work_cb)(struct io_uring_cmd *, unsigned), |
| 63 | unsigned flags); |
| 64 | |
| 65 | /* |
| 66 | * Note: the caller should never hard code @issue_flags and only use the |
| 67 | * mask provided by the core io_uring code. |
| 68 | */ |
| 69 | void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd, |
| 70 | unsigned int issue_flags); |
| 71 | |
| 72 | /* Execute the request from a blocking context */ |
| 73 | void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd); |
| 74 | |
| 75 | #else |
| 76 | static inline int |
| 77 | io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw, |
| 78 | struct iov_iter *iter, struct io_uring_cmd *ioucmd, |
| 79 | unsigned int issue_flags) |
| 80 | { |
| 81 | return -EOPNOTSUPP; |
| 82 | } |
| 83 | static inline int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd, |
| 84 | const struct iovec __user *uvec, |
| 85 | size_t uvec_segs, |
| 86 | int ddir, struct iov_iter *iter, |
| 87 | unsigned issue_flags) |
| 88 | { |
| 89 | return -EOPNOTSUPP; |
| 90 | } |
| 91 | static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, |
| 92 | u64 ret2, unsigned issue_flags) |
| 93 | { |
| 94 | } |
| 95 | static inline void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd, |
| 96 | void (*task_work_cb)(struct io_uring_cmd *, unsigned), |
| 97 | unsigned flags) |
| 98 | { |
| 99 | } |
| 100 | static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd, |
| 101 | unsigned int issue_flags) |
| 102 | { |
| 103 | } |
| 104 | static inline void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd) |
| 105 | { |
| 106 | } |
| 107 | #endif |
| 108 | |
| 109 | /* |
| 110 | * Polled completions must ensure they are coming from a poll queue, and |
| 111 | * hence are completed inside the usual poll handling loops. |
| 112 | */ |
| 113 | static inline void io_uring_cmd_iopoll_done(struct io_uring_cmd *ioucmd, |
| 114 | ssize_t ret, ssize_t res2) |
| 115 | { |
| 116 | lockdep_assert(in_task()); |
| 117 | io_uring_cmd_done(cmd: ioucmd, ret, res2, issue_flags: 0); |
| 118 | } |
| 119 | |
| 120 | /* users must follow the IOU_F_TWQ_LAZY_WAKE semantics */ |
| 121 | static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd, |
| 122 | void (*task_work_cb)(struct io_uring_cmd *, unsigned)) |
| 123 | { |
| 124 | __io_uring_cmd_do_in_task(ioucmd, task_work_cb, flags: IOU_F_TWQ_LAZY_WAKE); |
| 125 | } |
| 126 | |
| 127 | static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd, |
| 128 | void (*task_work_cb)(struct io_uring_cmd *, unsigned)) |
| 129 | { |
| 130 | __io_uring_cmd_do_in_task(ioucmd, task_work_cb, flags: 0); |
| 131 | } |
| 132 | |
| 133 | static inline struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd) |
| 134 | { |
| 135 | return cmd_to_io_kiocb(ptr: cmd)->tctx->task; |
| 136 | } |
| 137 | |
| 138 | static inline struct io_uring_cmd_data *io_uring_cmd_get_async_data(struct io_uring_cmd *cmd) |
| 139 | { |
| 140 | return cmd_to_io_kiocb(ptr: cmd)->async_data; |
| 141 | } |
| 142 | |
| 143 | /* |
| 144 | * Return uring_cmd's context reference as its context handle for driver to |
| 145 | * track per-context resource, such as registered kernel IO buffer |
| 146 | */ |
| 147 | static inline void *io_uring_cmd_ctx_handle(struct io_uring_cmd *cmd) |
| 148 | { |
| 149 | return cmd_to_io_kiocb(ptr: cmd)->ctx; |
| 150 | } |
| 151 | |
| 152 | int io_buffer_register_bvec(struct io_uring_cmd *cmd, struct request *rq, |
| 153 | void (*release)(void *), unsigned int index, |
| 154 | unsigned int issue_flags); |
| 155 | int io_buffer_unregister_bvec(struct io_uring_cmd *cmd, unsigned int index, |
| 156 | unsigned int issue_flags); |
| 157 | |
| 158 | #endif /* _LINUX_IO_URING_CMD_H */ |
| 159 | |