| 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
| 2 | #ifndef _LINUX_IO_URING_CMD_H |
| 3 | #define _LINUX_IO_URING_CMD_H |
| 4 | |
| 5 | #include <uapi/linux/io_uring.h> |
| 6 | #include <linux/io_uring_types.h> |
| 7 | #include <linux/blk-mq.h> |
| 8 | |
| 9 | /* only top 8 bits of sqe->uring_cmd_flags for kernel internal use */ |
| 10 | #define IORING_URING_CMD_CANCELABLE (1U << 30) |
| 11 | /* io_uring_cmd is being issued again */ |
| 12 | #define IORING_URING_CMD_REISSUE (1U << 31) |
| 13 | |
| 14 | struct io_uring_cmd { |
| 15 | struct file *file; |
| 16 | const struct io_uring_sqe *sqe; |
| 17 | u32 cmd_op; |
| 18 | u32 flags; |
| 19 | u8 pdu[32]; /* available inline for free use */ |
| 20 | u8 unused[8]; |
| 21 | }; |
| 22 | |
| 23 | static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe) |
| 24 | { |
| 25 | return sqe->cmd; |
| 26 | } |
| 27 | |
| 28 | static inline void io_uring_cmd_private_sz_check(size_t cmd_sz) |
| 29 | { |
| 30 | BUILD_BUG_ON(cmd_sz > sizeof_field(struct io_uring_cmd, pdu)); |
| 31 | } |
| 32 | #define io_uring_cmd_to_pdu(cmd, pdu_type) ( \ |
| 33 | io_uring_cmd_private_sz_check(sizeof(pdu_type)), \ |
| 34 | ((pdu_type *)&(cmd)->pdu) \ |
| 35 | ) |
| 36 | |
| 37 | #if defined(CONFIG_IO_URING) |
| 38 | int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw, |
| 39 | struct iov_iter *iter, |
| 40 | struct io_uring_cmd *ioucmd, |
| 41 | unsigned int issue_flags); |
| 42 | int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd, |
| 43 | const struct iovec __user *uvec, |
| 44 | size_t uvec_segs, |
| 45 | int ddir, struct iov_iter *iter, |
| 46 | unsigned issue_flags); |
| 47 | |
| 48 | /* |
| 49 | * Completes the request, i.e. posts an io_uring CQE and deallocates @ioucmd |
| 50 | * and the corresponding io_uring request. |
| 51 | * |
| 52 | * Note: the caller should never hard code @issue_flags and is only allowed |
| 53 | * to pass the mask provided by the core io_uring code. |
| 54 | */ |
| 55 | void __io_uring_cmd_done(struct io_uring_cmd *cmd, s32 ret, u64 res2, |
| 56 | unsigned issue_flags, bool is_cqe32); |
| 57 | |
| 58 | void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd, |
| 59 | io_req_tw_func_t task_work_cb, |
| 60 | unsigned flags); |
| 61 | |
| 62 | /* |
| 63 | * Note: the caller should never hard code @issue_flags and only use the |
| 64 | * mask provided by the core io_uring code. |
| 65 | */ |
| 66 | void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd, |
| 67 | unsigned int issue_flags); |
| 68 | |
| 69 | /* Execute the request from a blocking context */ |
| 70 | void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd); |
| 71 | |
| 72 | /* |
| 73 | * Select a buffer from the provided buffer group for multishot uring_cmd. |
| 74 | * Returns the selected buffer address and size. |
| 75 | */ |
| 76 | struct io_br_sel io_uring_cmd_buffer_select(struct io_uring_cmd *ioucmd, |
| 77 | unsigned buf_group, size_t *len, |
| 78 | unsigned int issue_flags); |
| 79 | |
| 80 | /* |
| 81 | * Complete a multishot uring_cmd event. This will post a CQE to the completion |
| 82 | * queue and update the provided buffer. |
| 83 | */ |
| 84 | bool io_uring_mshot_cmd_post_cqe(struct io_uring_cmd *ioucmd, |
| 85 | struct io_br_sel *sel, unsigned int issue_flags); |
| 86 | |
| 87 | #else |
| 88 | static inline int |
| 89 | io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw, |
| 90 | struct iov_iter *iter, struct io_uring_cmd *ioucmd, |
| 91 | unsigned int issue_flags) |
| 92 | { |
| 93 | return -EOPNOTSUPP; |
| 94 | } |
| 95 | static inline int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd, |
| 96 | const struct iovec __user *uvec, |
| 97 | size_t uvec_segs, |
| 98 | int ddir, struct iov_iter *iter, |
| 99 | unsigned issue_flags) |
| 100 | { |
| 101 | return -EOPNOTSUPP; |
| 102 | } |
| 103 | static inline void __io_uring_cmd_done(struct io_uring_cmd *cmd, s32 ret, |
| 104 | u64 ret2, unsigned issue_flags, bool is_cqe32) |
| 105 | { |
| 106 | } |
| 107 | static inline void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd, |
| 108 | io_req_tw_func_t task_work_cb, unsigned flags) |
| 109 | { |
| 110 | } |
| 111 | static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd, |
| 112 | unsigned int issue_flags) |
| 113 | { |
| 114 | } |
| 115 | static inline void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd) |
| 116 | { |
| 117 | } |
| 118 | static inline struct io_br_sel |
| 119 | io_uring_cmd_buffer_select(struct io_uring_cmd *ioucmd, unsigned buf_group, |
| 120 | size_t *len, unsigned int issue_flags) |
| 121 | { |
| 122 | return (struct io_br_sel) { .val = -EOPNOTSUPP }; |
| 123 | } |
| 124 | static inline bool io_uring_mshot_cmd_post_cqe(struct io_uring_cmd *ioucmd, |
| 125 | struct io_br_sel *sel, unsigned int issue_flags) |
| 126 | { |
| 127 | return true; |
| 128 | } |
| 129 | #endif |
| 130 | |
| 131 | static inline struct io_uring_cmd *io_uring_cmd_from_tw(struct io_tw_req tw_req) |
| 132 | { |
| 133 | return io_kiocb_to_cmd(tw_req.req, struct io_uring_cmd); |
| 134 | } |
| 135 | |
| 136 | /* task_work executor checks the deferred list completion */ |
| 137 | #define IO_URING_CMD_TASK_WORK_ISSUE_FLAGS IO_URING_F_COMPLETE_DEFER |
| 138 | |
| 139 | /* users must follow the IOU_F_TWQ_LAZY_WAKE semantics */ |
| 140 | static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd, |
| 141 | io_req_tw_func_t task_work_cb) |
| 142 | { |
| 143 | __io_uring_cmd_do_in_task(ioucmd, task_work_cb, flags: IOU_F_TWQ_LAZY_WAKE); |
| 144 | } |
| 145 | |
| 146 | static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd, |
| 147 | io_req_tw_func_t task_work_cb) |
| 148 | { |
| 149 | __io_uring_cmd_do_in_task(ioucmd, task_work_cb, flags: 0); |
| 150 | } |
| 151 | |
| 152 | static inline struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd) |
| 153 | { |
| 154 | return cmd_to_io_kiocb(ptr: cmd)->tctx->task; |
| 155 | } |
| 156 | |
| 157 | /* |
| 158 | * Return uring_cmd's context reference as its context handle for driver to |
| 159 | * track per-context resource, such as registered kernel IO buffer |
| 160 | */ |
| 161 | static inline void *io_uring_cmd_ctx_handle(struct io_uring_cmd *cmd) |
| 162 | { |
| 163 | return cmd_to_io_kiocb(ptr: cmd)->ctx; |
| 164 | } |
| 165 | |
| 166 | static inline void io_uring_cmd_done(struct io_uring_cmd *ioucmd, s32 ret, |
| 167 | unsigned issue_flags) |
| 168 | { |
| 169 | return __io_uring_cmd_done(cmd: ioucmd, ret, res2: 0, issue_flags, is_cqe32: false); |
| 170 | } |
| 171 | |
| 172 | static inline void io_uring_cmd_done32(struct io_uring_cmd *ioucmd, s32 ret, |
| 173 | u64 res2, unsigned issue_flags) |
| 174 | { |
| 175 | return __io_uring_cmd_done(cmd: ioucmd, ret, res2, issue_flags, is_cqe32: true); |
| 176 | } |
| 177 | |
| 178 | int io_buffer_register_bvec(struct io_uring_cmd *cmd, struct request *rq, |
| 179 | void (*release)(void *), unsigned int index, |
| 180 | unsigned int issue_flags); |
| 181 | int io_buffer_unregister_bvec(struct io_uring_cmd *cmd, unsigned int index, |
| 182 | unsigned int issue_flags); |
| 183 | |
| 184 | #endif /* _LINUX_IO_URING_CMD_H */ |
| 185 | |