22
33#include "kublk.h"
44
5+ #ifndef IORING_NOP_INJECT_RESULT
6+ #define IORING_NOP_INJECT_RESULT (1U << 0)
7+ #endif
8+
9+ #ifndef IORING_NOP_FIXED_BUFFER
10+ #define IORING_NOP_FIXED_BUFFER (1U << 3)
11+ #endif
12+
513static int ublk_null_tgt_init (const struct dev_ctx * ctx , struct ublk_dev * dev )
614{
715 const struct ublksrv_ctrl_dev_info * info = & dev -> dev_info ;
@@ -20,19 +28,79 @@ static int ublk_null_tgt_init(const struct dev_ctx *ctx, struct ublk_dev *dev)
2028 },
2129 };
2230
31+ if (info -> flags & UBLK_F_SUPPORT_ZERO_COPY )
32+ dev -> tgt .sq_depth = dev -> tgt .cq_depth = 2 * info -> queue_depth ;
2333 return 0 ;
2434}
2535
36+ static int null_queue_zc_io (struct ublk_queue * q , int tag )
37+ {
38+ const struct ublksrv_io_desc * iod = ublk_get_iod (q , tag );
39+ unsigned ublk_op = ublksrv_get_op (iod );
40+ struct io_uring_sqe * sqe [3 ];
41+
42+ ublk_queue_alloc_sqes (q , sqe , 3 );
43+
44+ io_uring_prep_buf_register (sqe [0 ], 0 , tag , q -> q_id , tag );
45+ sqe [0 ]-> user_data = build_user_data (tag ,
46+ ublk_cmd_op_nr (sqe [0 ]-> cmd_op ), 0 , 1 );
47+ sqe [0 ]-> flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK ;
48+
49+ io_uring_prep_nop (sqe [1 ]);
50+ sqe [1 ]-> buf_index = tag ;
51+ sqe [1 ]-> flags |= IOSQE_FIXED_FILE | IOSQE_IO_HARDLINK ;
52+ sqe [1 ]-> rw_flags = IORING_NOP_FIXED_BUFFER | IORING_NOP_INJECT_RESULT ;
53+ sqe [1 ]-> len = iod -> nr_sectors << 9 ; /* injected result */
54+ sqe [1 ]-> user_data = build_user_data (tag , ublk_op , 0 , 1 );
55+
56+ io_uring_prep_buf_unregister (sqe [2 ], 0 , tag , q -> q_id , tag );
57+ sqe [2 ]-> user_data = build_user_data (tag , ublk_cmd_op_nr (sqe [2 ]-> cmd_op ), 0 , 1 );
58+
59+ // buf register is marked as IOSQE_CQE_SKIP_SUCCESS
60+ return 2 ;
61+ }
62+
63+ static void ublk_null_io_done (struct ublk_queue * q , int tag ,
64+ const struct io_uring_cqe * cqe )
65+ {
66+ unsigned op = user_data_to_op (cqe -> user_data );
67+ struct ublk_io * io = ublk_get_io (q , tag );
68+
69+ if (cqe -> res < 0 || op != ublk_cmd_op_nr (UBLK_U_IO_UNREGISTER_IO_BUF )) {
70+ if (!io -> result )
71+ io -> result = cqe -> res ;
72+ if (cqe -> res < 0 )
73+ ublk_err ("%s: io failed op %x user_data %lx\n" ,
74+ __func__ , op , cqe -> user_data );
75+ }
76+
77+ /* buffer register op is IOSQE_CQE_SKIP_SUCCESS */
78+ if (op == ublk_cmd_op_nr (UBLK_U_IO_REGISTER_IO_BUF ))
79+ io -> tgt_ios += 1 ;
80+
81+ if (ublk_completed_tgt_io (q , tag ))
82+ ublk_complete_io (q , tag , io -> result );
83+ }
84+
2685static int ublk_null_queue_io (struct ublk_queue * q , int tag )
2786{
2887 const struct ublksrv_io_desc * iod = ublk_get_iod (q , tag );
88+ int zc = ublk_queue_use_zc (q );
89+ int queued ;
90+
91+ if (!zc ) {
92+ ublk_complete_io (q , tag , iod -> nr_sectors << 9 );
93+ return 0 ;
94+ }
2995
30- ublk_complete_io (q , tag , iod -> nr_sectors << 9 );
96+ queued = null_queue_zc_io (q , tag );
97+ ublk_queued_tgt_io (q , tag , queued );
3198 return 0 ;
3299}
33100
34101const struct ublk_tgt_ops null_tgt_ops = {
35102 .name = "null" ,
36103 .init_tgt = ublk_null_tgt_init ,
37104 .queue_io = ublk_null_queue_io ,
105+ .tgt_io_done = ublk_null_io_done ,
38106};
0 commit comments