19f5834c8SLukas Bulwahn /* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */ 22b188cc1SJens Axboe /* 32b188cc1SJens Axboe * Header file for the io_uring interface. 42b188cc1SJens Axboe * 52b188cc1SJens Axboe * Copyright (C) 2019 Jens Axboe 62b188cc1SJens Axboe * Copyright (C) 2019 Christoph Hellwig 72b188cc1SJens Axboe */ 82b188cc1SJens Axboe #ifndef LINUX_IO_URING_H 92b188cc1SJens Axboe #define LINUX_IO_URING_H 102b188cc1SJens Axboe 112b188cc1SJens Axboe #include <linux/fs.h> 122b188cc1SJens Axboe #include <linux/types.h> 139eb80340SStefan Metzmacher /* 149eb80340SStefan Metzmacher * this file is shared with liburing and that has to autodetect 159eb80340SStefan Metzmacher * if linux/time_types.h is available or not, it can 169eb80340SStefan Metzmacher * define UAPI_LINUX_IO_URING_H_SKIP_LINUX_TIME_TYPES_H 179eb80340SStefan Metzmacher * if linux/time_types.h is not available 189eb80340SStefan Metzmacher */ 199eb80340SStefan Metzmacher #ifndef UAPI_LINUX_IO_URING_H_SKIP_LINUX_TIME_TYPES_H 2078a861b9SJens Axboe #include <linux/time_types.h> 219eb80340SStefan Metzmacher #endif 222b188cc1SJens Axboe 23e1d0c6d0SAmmar Faizi #ifdef __cplusplus 24e1d0c6d0SAmmar Faizi extern "C" { 25e1d0c6d0SAmmar Faizi #endif 26e1d0c6d0SAmmar Faizi 272b188cc1SJens Axboe /* 282b188cc1SJens Axboe * IO submission data structure (Submission Queue Entry) 292b188cc1SJens Axboe */ 302b188cc1SJens Axboe struct io_uring_sqe { 312b188cc1SJens Axboe __u8 opcode; /* type of operation for this sqe */ 326b06314cSJens Axboe __u8 flags; /* IOSQE_ flags */ 332b188cc1SJens Axboe __u16 ioprio; /* ioprio for the request */ 342b188cc1SJens Axboe __s32 fd; /* file descriptor to do IO on */ 3517f2fe35SJens Axboe union { 362b188cc1SJens Axboe __u64 off; /* offset into file */ 3717f2fe35SJens Axboe __u64 addr2; 38bdb2c48eSPavel Begunkov struct { 39ee692a21SJens Axboe __u32 cmd_op; 40bdb2c48eSPavel Begunkov __u32 __pad1; 41bdb2c48eSPavel Begunkov }; 4217f2fe35SJens Axboe }; 437d67af2cSPavel Begunkov union { 442b188cc1SJens Axboe __u64 addr; /* pointer to buffer or iovecs */ 457d67af2cSPavel Begunkov __u64 splice_off_in; 46a5d2f99aSBreno Leitao struct { 47a5d2f99aSBreno Leitao __u32 level; 48a5d2f99aSBreno Leitao __u32 optname; 49a5d2f99aSBreno Leitao }; 507d67af2cSPavel Begunkov }; 512b188cc1SJens Axboe __u32 len; /* buffer size or number of iovecs */ 522b188cc1SJens Axboe union { 532b188cc1SJens Axboe __kernel_rwf_t rw_flags; 54c992fe29SChristoph Hellwig __u32 fsync_flags; 555769a351SJiufei Xue __u16 poll_events; /* compatibility */ 565769a351SJiufei Xue __u32 poll32_events; /* word-reversed for BE */ 575d17b4a4SJens Axboe __u32 sync_range_flags; 580fa03c62SJens Axboe __u32 msg_flags; 595262f567SJens Axboe __u32 timeout_flags; 6017f2fe35SJens Axboe __u32 accept_flags; 6162755e35SJens Axboe __u32 cancel_flags; 6215b71abeSJens Axboe __u32 open_flags; 63eddc7ef5SJens Axboe __u32 statx_flags; 644840e418SJens Axboe __u32 fadvise_advice; 657d67af2cSPavel Begunkov __u32 splice_flags; 6680a261fdSJens Axboe __u32 rename_flags; 6714a1143bSJens Axboe __u32 unlink_flags; 68cf30da90SDmitry Kadashev __u32 hardlink_flags; 69e9621e2bSStefan Roesch __u32 xattr_flags; 70e6130ebaSJens Axboe __u32 msg_ring_flags; 719cda70f6SAnuj Gupta __u32 uring_cmd_flags; 72f31ecf67SJens Axboe __u32 waitid_flags; 73194bb58cSJens Axboe __u32 futex_flags; 74dc18b89aSJens Axboe __u32 install_fd_flags; 75*deb1e496SMing Lei __u32 nop_flags; 762b188cc1SJens Axboe }; 772b188cc1SJens Axboe __u64 user_data; /* data to be passed back at completion time */ 78ddf0322dSJens Axboe /* pack this to avoid bogus arm OABI complaints */ 79ddf0322dSJens Axboe union { 8075c6a039SJens Axboe /* index into fixed buffers, if used */ 8175c6a039SJens Axboe __u16 buf_index; 82ddf0322dSJens Axboe /* for grouped buffer selection */ 83ddf0322dSJens Axboe __u16 buf_group; 84ddf0322dSJens Axboe } __attribute__((packed)); 8575c6a039SJens Axboe /* personality to use, if used */ 8675c6a039SJens Axboe __u16 personality; 87b9445598SPavel Begunkov union { 887d67af2cSPavel Begunkov __s32 splice_fd_in; 89b9445598SPavel Begunkov __u32 file_index; 90a5d2f99aSBreno Leitao __u32 optlen; 9106a5464bSPavel Begunkov struct { 92092aeedbSPavel Begunkov __u16 addr_len; 93b48c312bSPavel Begunkov __u16 __pad3[1]; 9406a5464bSPavel Begunkov }; 95b9445598SPavel Begunkov }; 96ee692a21SJens Axboe union { 97ee692a21SJens Axboe struct { 98e9621e2bSStefan Roesch __u64 addr3; 99e9621e2bSStefan Roesch __u64 __pad2[1]; 100edafcceeSJens Axboe }; 101a5d2f99aSBreno Leitao __u64 optval; 102ebdeb7c0SJens Axboe /* 103ee692a21SJens Axboe * If the ring is initialized with IORING_SETUP_SQE128, then 104ee692a21SJens Axboe * this field is used for 80 bytes of arbitrary command data 105ebdeb7c0SJens Axboe */ 106ee692a21SJens Axboe __u8 cmd[0]; 107ee692a21SJens Axboe }; 1082b188cc1SJens Axboe }; 1092b188cc1SJens Axboe 1101339f24bSJens Axboe /* 1111339f24bSJens Axboe * If sqe->file_index is set to this for opcodes that instantiate a new 1121339f24bSJens Axboe * direct descriptor (like openat/openat2/accept), then io_uring will allocate 1131339f24bSJens Axboe * an available direct descriptor instead of having the application pass one 1141339f24bSJens Axboe * in. The picked direct descriptor will be returned in cqe->res, or -ENFILE 1151339f24bSJens Axboe * if the space is full. 1161339f24bSJens Axboe */ 1171339f24bSJens Axboe #define IORING_FILE_INDEX_ALLOC (~0U) 1181339f24bSJens Axboe 1190f21a957SGabriel Krisman Bertazi enum io_uring_sqe_flags_bit { 1206b47ee6eSPavel Begunkov IOSQE_FIXED_FILE_BIT, 1216b47ee6eSPavel Begunkov IOSQE_IO_DRAIN_BIT, 1226b47ee6eSPavel Begunkov IOSQE_IO_LINK_BIT, 1236b47ee6eSPavel Begunkov IOSQE_IO_HARDLINK_BIT, 1246b47ee6eSPavel Begunkov IOSQE_ASYNC_BIT, 125bcda7baaSJens Axboe IOSQE_BUFFER_SELECT_BIT, 12604c76b41SPavel Begunkov IOSQE_CQE_SKIP_SUCCESS_BIT, 1276b47ee6eSPavel Begunkov }; 1286b47ee6eSPavel Begunkov 129def596e9SJens Axboe /* 1306b06314cSJens Axboe * sqe->flags 1316b06314cSJens Axboe */ 1326b47ee6eSPavel Begunkov /* use fixed fileset */ 1336b47ee6eSPavel Begunkov #define IOSQE_FIXED_FILE (1U << IOSQE_FIXED_FILE_BIT) 1346b47ee6eSPavel Begunkov /* issue after inflight IO */ 1356b47ee6eSPavel Begunkov #define IOSQE_IO_DRAIN (1U << IOSQE_IO_DRAIN_BIT) 1366b47ee6eSPavel Begunkov /* links next sqe */ 1376b47ee6eSPavel Begunkov #define IOSQE_IO_LINK (1U << IOSQE_IO_LINK_BIT) 1386b47ee6eSPavel Begunkov /* like LINK, but stronger */ 1396b47ee6eSPavel Begunkov #define IOSQE_IO_HARDLINK (1U << IOSQE_IO_HARDLINK_BIT) 1406b47ee6eSPavel Begunkov /* always go async */ 1416b47ee6eSPavel Begunkov #define IOSQE_ASYNC (1U << IOSQE_ASYNC_BIT) 142bcda7baaSJens Axboe /* select buffer from sqe->buf_group */ 143bcda7baaSJens Axboe #define IOSQE_BUFFER_SELECT (1U << IOSQE_BUFFER_SELECT_BIT) 14404c76b41SPavel Begunkov /* don't post CQE if request succeeded */ 14504c76b41SPavel Begunkov #define IOSQE_CQE_SKIP_SUCCESS (1U << IOSQE_CQE_SKIP_SUCCESS_BIT) 1466b06314cSJens Axboe 1476b06314cSJens Axboe /* 148def596e9SJens Axboe * io_uring_setup() flags 149def596e9SJens Axboe */ 150def596e9SJens Axboe #define IORING_SETUP_IOPOLL (1U << 0) /* io_context is polled */ 1516c271ce2SJens Axboe #define IORING_SETUP_SQPOLL (1U << 1) /* SQ poll thread */ 1526c271ce2SJens Axboe #define IORING_SETUP_SQ_AFF (1U << 2) /* sq_thread_cpu is valid */ 15333a107f0SJens Axboe #define IORING_SETUP_CQSIZE (1U << 3) /* app defines CQ size */ 1548110c1a6SJens Axboe #define IORING_SETUP_CLAMP (1U << 4) /* clamp SQ/CQ ring sizes */ 15524369c2eSPavel Begunkov #define IORING_SETUP_ATTACH_WQ (1U << 5) /* attach to existing wq */ 1567e84e1c7SStefano Garzarella #define IORING_SETUP_R_DISABLED (1U << 6) /* start with ring disabled */ 157bcbb7bf6SJens Axboe #define IORING_SETUP_SUBMIT_ALL (1U << 7) /* continue submit on error */ 158e1169f06SJens Axboe /* 159e1169f06SJens Axboe * Cooperative task running. When requests complete, they often require 160e1169f06SJens Axboe * forcing the submitter to transition to the kernel to complete. If this 161e1169f06SJens Axboe * flag is set, work will be done when the task transitions anyway, rather 162e1169f06SJens Axboe * than force an inter-processor interrupt reschedule. This avoids interrupting 163e1169f06SJens Axboe * a task running in userspace, and saves an IPI. 164e1169f06SJens Axboe */ 165e1169f06SJens Axboe #define IORING_SETUP_COOP_TASKRUN (1U << 8) 166ef060ea9SJens Axboe /* 167ef060ea9SJens Axboe * If COOP_TASKRUN is set, get notified if task work is available for 168ef060ea9SJens Axboe * running and a kernel transition would be needed to run it. This sets 169ef060ea9SJens Axboe * IORING_SQ_TASKRUN in the sq ring flags. Not valid with COOP_TASKRUN. 170ef060ea9SJens Axboe */ 171ef060ea9SJens Axboe #define IORING_SETUP_TASKRUN_FLAG (1U << 9) 172ebdeb7c0SJens Axboe #define IORING_SETUP_SQE128 (1U << 10) /* SQEs are 128 byte */ 1737a51e5b4SStefan Roesch #define IORING_SETUP_CQE32 (1U << 11) /* CQEs are 32 byte */ 17497bbdc06SPavel Begunkov /* 17597bbdc06SPavel Begunkov * Only one task is allowed to submit requests 17697bbdc06SPavel Begunkov */ 17797bbdc06SPavel Begunkov #define IORING_SETUP_SINGLE_ISSUER (1U << 12) 178ebdeb7c0SJens Axboe 179c0e0d6baSDylan Yudaken /* 180c0e0d6baSDylan Yudaken * Defer running task work to get events. 181c0e0d6baSDylan Yudaken * Rather than running bits of task work whenever the task transitions 182c0e0d6baSDylan Yudaken * try to do it just before it is needed. 183c0e0d6baSDylan Yudaken */ 184c0e0d6baSDylan Yudaken #define IORING_SETUP_DEFER_TASKRUN (1U << 13) 185c0e0d6baSDylan Yudaken 18603d89a2dSJens Axboe /* 18703d89a2dSJens Axboe * Application provides the memory for the rings 18803d89a2dSJens Axboe */ 18903d89a2dSJens Axboe #define IORING_SETUP_NO_MMAP (1U << 14) 19003d89a2dSJens Axboe 1916e76ac59SJosh Triplett /* 1926e76ac59SJosh Triplett * Register the ring fd in itself for use with 1936e76ac59SJosh Triplett * IORING_REGISTER_USE_REGISTERED_RING; return a registered fd index rather 1946e76ac59SJosh Triplett * than an fd. 1956e76ac59SJosh Triplett */ 1966e76ac59SJosh Triplett #define IORING_SETUP_REGISTERED_FD_ONLY (1U << 15) 1976e76ac59SJosh Triplett 1982af89abdSPavel Begunkov /* 1992af89abdSPavel Begunkov * Removes indirection through the SQ index array. 2002af89abdSPavel Begunkov */ 2012af89abdSPavel Begunkov #define IORING_SETUP_NO_SQARRAY (1U << 16) 2022af89abdSPavel Begunkov 203cc51eaa8SDylan Yudaken enum io_uring_op { 2049e3aa61aSJens Axboe IORING_OP_NOP, 2059e3aa61aSJens Axboe IORING_OP_READV, 2069e3aa61aSJens Axboe IORING_OP_WRITEV, 2079e3aa61aSJens Axboe IORING_OP_FSYNC, 2089e3aa61aSJens Axboe IORING_OP_READ_FIXED, 2099e3aa61aSJens Axboe IORING_OP_WRITE_FIXED, 2109e3aa61aSJens Axboe IORING_OP_POLL_ADD, 2119e3aa61aSJens Axboe IORING_OP_POLL_REMOVE, 2129e3aa61aSJens Axboe IORING_OP_SYNC_FILE_RANGE, 2139e3aa61aSJens Axboe IORING_OP_SENDMSG, 2149e3aa61aSJens Axboe IORING_OP_RECVMSG, 2159e3aa61aSJens Axboe IORING_OP_TIMEOUT, 2169e3aa61aSJens Axboe IORING_OP_TIMEOUT_REMOVE, 2179e3aa61aSJens Axboe IORING_OP_ACCEPT, 2189e3aa61aSJens Axboe IORING_OP_ASYNC_CANCEL, 2199e3aa61aSJens Axboe IORING_OP_LINK_TIMEOUT, 2209e3aa61aSJens Axboe IORING_OP_CONNECT, 221d63d1b5eSJens Axboe IORING_OP_FALLOCATE, 22215b71abeSJens Axboe IORING_OP_OPENAT, 223b5dba59eSJens Axboe IORING_OP_CLOSE, 224d9808cebSPavel Begunkov IORING_OP_FILES_UPDATE, 225eddc7ef5SJens Axboe IORING_OP_STATX, 2263a6820f2SJens Axboe IORING_OP_READ, 2273a6820f2SJens Axboe IORING_OP_WRITE, 2284840e418SJens Axboe IORING_OP_FADVISE, 229c1ca757bSJens Axboe IORING_OP_MADVISE, 230fddafaceSJens Axboe IORING_OP_SEND, 231fddafaceSJens Axboe IORING_OP_RECV, 232cebdb986SJens Axboe IORING_OP_OPENAT2, 2333e4827b0SJens Axboe IORING_OP_EPOLL_CTL, 2347d67af2cSPavel Begunkov IORING_OP_SPLICE, 235ddf0322dSJens Axboe IORING_OP_PROVIDE_BUFFERS, 236067524e9SJens Axboe IORING_OP_REMOVE_BUFFERS, 237f2a8d5c7SPavel Begunkov IORING_OP_TEE, 23836f4fa68SJens Axboe IORING_OP_SHUTDOWN, 23980a261fdSJens Axboe IORING_OP_RENAMEAT, 24014a1143bSJens Axboe IORING_OP_UNLINKAT, 241e34a02dcSDmitry Kadashev IORING_OP_MKDIRAT, 2427a8721f8SDmitry Kadashev IORING_OP_SYMLINKAT, 243cf30da90SDmitry Kadashev IORING_OP_LINKAT, 2444f57f06cSJens Axboe IORING_OP_MSG_RING, 245e9621e2bSStefan Roesch IORING_OP_FSETXATTR, 246e9621e2bSStefan Roesch IORING_OP_SETXATTR, 247a56834e0SStefan Roesch IORING_OP_FGETXATTR, 248a56834e0SStefan Roesch IORING_OP_GETXATTR, 2491374e08eSJens Axboe IORING_OP_SOCKET, 250ee692a21SJens Axboe IORING_OP_URING_CMD, 251b48c312bSPavel Begunkov IORING_OP_SEND_ZC, 252493108d9SPavel Begunkov IORING_OP_SENDMSG_ZC, 253fc68fcdaSJens Axboe IORING_OP_READ_MULTISHOT, 254f31ecf67SJens Axboe IORING_OP_WAITID, 255194bb58cSJens Axboe IORING_OP_FUTEX_WAIT, 256194bb58cSJens Axboe IORING_OP_FUTEX_WAKE, 2578f350194SJens Axboe IORING_OP_FUTEX_WAITV, 258dc18b89aSJens Axboe IORING_OP_FIXED_FD_INSTALL, 259b4bb1900STony Solomonik IORING_OP_FTRUNCATE, 2609e3aa61aSJens Axboe 2619e3aa61aSJens Axboe /* this goes last, obviously */ 2629e3aa61aSJens Axboe IORING_OP_LAST, 2639e3aa61aSJens Axboe }; 264c992fe29SChristoph Hellwig 265c992fe29SChristoph Hellwig /* 266528ce678SMing Lei * sqe->uring_cmd_flags top 8bits aren't available for userspace 2676dcabcd3SJens Axboe * IORING_URING_CMD_FIXED use registered buffer; pass this flag 2689cda70f6SAnuj Gupta * along with setting sqe->buf_index. 2699cda70f6SAnuj Gupta */ 2709cda70f6SAnuj Gupta #define IORING_URING_CMD_FIXED (1U << 0) 271528ce678SMing Lei #define IORING_URING_CMD_MASK IORING_URING_CMD_FIXED 2729cda70f6SAnuj Gupta 2739cda70f6SAnuj Gupta 2749cda70f6SAnuj Gupta /* 275c992fe29SChristoph Hellwig * sqe->fsync_flags 276c992fe29SChristoph Hellwig */ 277c992fe29SChristoph Hellwig #define IORING_FSYNC_DATASYNC (1U << 0) 2782b188cc1SJens Axboe 2792b188cc1SJens Axboe /* 280a41525abSJens Axboe * sqe->timeout_flags 281a41525abSJens Axboe */ 282a41525abSJens Axboe #define IORING_TIMEOUT_ABS (1U << 0) 2839c8e11b3SPavel Begunkov #define IORING_TIMEOUT_UPDATE (1U << 1) 28450c1df2bSJens Axboe #define IORING_TIMEOUT_BOOTTIME (1U << 2) 28550c1df2bSJens Axboe #define IORING_TIMEOUT_REALTIME (1U << 3) 286f1042b6cSPavel Begunkov #define IORING_LINK_TIMEOUT_UPDATE (1U << 4) 2876224590dSPavel Begunkov #define IORING_TIMEOUT_ETIME_SUCCESS (1U << 5) 288ea97f6c8SDavid Wei #define IORING_TIMEOUT_MULTISHOT (1U << 6) 28950c1df2bSJens Axboe #define IORING_TIMEOUT_CLOCK_MASK (IORING_TIMEOUT_BOOTTIME | IORING_TIMEOUT_REALTIME) 290f1042b6cSPavel Begunkov #define IORING_TIMEOUT_UPDATE_MASK (IORING_TIMEOUT_UPDATE | IORING_LINK_TIMEOUT_UPDATE) 291a41525abSJens Axboe /* 2927d67af2cSPavel Begunkov * sqe->splice_flags 2937d67af2cSPavel Begunkov * extends splice(2) flags 2947d67af2cSPavel Begunkov */ 2957d67af2cSPavel Begunkov #define SPLICE_F_FD_IN_FIXED (1U << 31) /* the last bit of __u32 */ 2967d67af2cSPavel Begunkov 2977d67af2cSPavel Begunkov /* 29888e41cf9SJens Axboe * POLL_ADD flags. Note that since sqe->poll_events is the flag space, the 29988e41cf9SJens Axboe * command flags for POLL_ADD are stored in sqe->len. 30088e41cf9SJens Axboe * 30188e41cf9SJens Axboe * IORING_POLL_ADD_MULTI Multishot poll. Sets IORING_CQE_F_MORE if 30288e41cf9SJens Axboe * the poll handler will continue to report 30388e41cf9SJens Axboe * CQEs on behalf of the same SQE. 304b69de288SJens Axboe * 305b69de288SJens Axboe * IORING_POLL_UPDATE Update existing poll request, matching 306b69de288SJens Axboe * sqe->addr as the old user_data field. 307b9ba8a44SJens Axboe * 308b9ba8a44SJens Axboe * IORING_POLL_LEVEL Level triggered poll. 30988e41cf9SJens Axboe */ 31088e41cf9SJens Axboe #define IORING_POLL_ADD_MULTI (1U << 0) 311b69de288SJens Axboe #define IORING_POLL_UPDATE_EVENTS (1U << 1) 312b69de288SJens Axboe #define IORING_POLL_UPDATE_USER_DATA (1U << 2) 313b9ba8a44SJens Axboe #define IORING_POLL_ADD_LEVEL (1U << 3) 31488e41cf9SJens Axboe 31588e41cf9SJens Axboe /* 3168e29da69SJens Axboe * ASYNC_CANCEL flags. 3178e29da69SJens Axboe * 3188e29da69SJens Axboe * IORING_ASYNC_CANCEL_ALL Cancel all requests that match the given key 3194bf94615SJens Axboe * IORING_ASYNC_CANCEL_FD Key off 'fd' for cancelation rather than the 3204bf94615SJens Axboe * request 'user_data' 321970f256eSJens Axboe * IORING_ASYNC_CANCEL_ANY Match any request 3227d8ca725SJens Axboe * IORING_ASYNC_CANCEL_FD_FIXED 'fd' passed in is a fixed descriptor 3238165b566SJens Axboe * IORING_ASYNC_CANCEL_USERDATA Match on user_data, default for no other key 324d7b8b079SJens Axboe * IORING_ASYNC_CANCEL_OP Match request based on opcode 3258e29da69SJens Axboe */ 3268e29da69SJens Axboe #define IORING_ASYNC_CANCEL_ALL (1U << 0) 3274bf94615SJens Axboe #define IORING_ASYNC_CANCEL_FD (1U << 1) 328970f256eSJens Axboe #define IORING_ASYNC_CANCEL_ANY (1U << 2) 3297d8ca725SJens Axboe #define IORING_ASYNC_CANCEL_FD_FIXED (1U << 3) 3308165b566SJens Axboe #define IORING_ASYNC_CANCEL_USERDATA (1U << 4) 331d7b8b079SJens Axboe #define IORING_ASYNC_CANCEL_OP (1U << 5) 3328e29da69SJens Axboe 3338e29da69SJens Axboe /* 33429c1ac23SPavel Begunkov * send/sendmsg and recv/recvmsg flags (sqe->ioprio) 3350455d4ccSJens Axboe * 3360455d4ccSJens Axboe * IORING_RECVSEND_POLL_FIRST If set, instead of first attempting to send 3370455d4ccSJens Axboe * or receive and arm poll if that yields an 3380455d4ccSJens Axboe * -EAGAIN result, arm poll upfront and skip 3390455d4ccSJens Axboe * the initial transfer attempt. 340b3fdea6eSDylan Yudaken * 341b3fdea6eSDylan Yudaken * IORING_RECV_MULTISHOT Multishot recv. Sets IORING_CQE_F_MORE if 342b3fdea6eSDylan Yudaken * the handler will continue to report 343b3fdea6eSDylan Yudaken * CQEs on behalf of the same SQE. 34410c7d33eSPavel Begunkov * 34510c7d33eSPavel Begunkov * IORING_RECVSEND_FIXED_BUF Use registered buffers, the index is stored in 34610c7d33eSPavel Begunkov * the buf_index field. 347e307e669SStefan Metzmacher * 348e307e669SStefan Metzmacher * IORING_SEND_ZC_REPORT_USAGE 349e307e669SStefan Metzmacher * If set, SEND[MSG]_ZC should report 350e307e669SStefan Metzmacher * the zerocopy usage in cqe.res 351e307e669SStefan Metzmacher * for the IORING_CQE_F_NOTIF cqe. 352e307e669SStefan Metzmacher * 0 is reported if zerocopy was actually possible. 353e307e669SStefan Metzmacher * IORING_NOTIF_USAGE_ZC_COPIED if data was copied 354e307e669SStefan Metzmacher * (at least partially). 355a05d1f62SJens Axboe * 3562f9c9515SJens Axboe * IORING_RECVSEND_BUNDLE Used with IOSQE_BUFFER_SELECT. If set, send or 3572f9c9515SJens Axboe * recv will grab as many buffers from the buffer 3582f9c9515SJens Axboe * group ID given and send them all. The completion 3592f9c9515SJens Axboe * result will be the number of buffers send, with 3602f9c9515SJens Axboe * the starting buffer ID in cqe->flags as per 3612f9c9515SJens Axboe * usual for provided buffer usage. The buffers 3622f9c9515SJens Axboe * will be contigious from the starting buffer ID. 3630455d4ccSJens Axboe */ 3640455d4ccSJens Axboe #define IORING_RECVSEND_POLL_FIRST (1U << 0) 365b3fdea6eSDylan Yudaken #define IORING_RECV_MULTISHOT (1U << 1) 36610c7d33eSPavel Begunkov #define IORING_RECVSEND_FIXED_BUF (1U << 2) 367e307e669SStefan Metzmacher #define IORING_SEND_ZC_REPORT_USAGE (1U << 3) 368a05d1f62SJens Axboe #define IORING_RECVSEND_BUNDLE (1U << 4) 369e307e669SStefan Metzmacher 370e307e669SStefan Metzmacher /* 371e307e669SStefan Metzmacher * cqe.res for IORING_CQE_F_NOTIF if 372e307e669SStefan Metzmacher * IORING_SEND_ZC_REPORT_USAGE was requested 373e307e669SStefan Metzmacher * 374e307e669SStefan Metzmacher * It should be treated as a flag, all other 375e307e669SStefan Metzmacher * bits of cqe.res should be treated as reserved! 376e307e669SStefan Metzmacher */ 377e307e669SStefan Metzmacher #define IORING_NOTIF_USAGE_ZC_COPIED (1U << 31) 3780455d4ccSJens Axboe 3790455d4ccSJens Axboe /* 380390ed29bSHao Xu * accept flags stored in sqe->ioprio 381390ed29bSHao Xu */ 382390ed29bSHao Xu #define IORING_ACCEPT_MULTISHOT (1U << 0) 3837dcc758cSJens Axboe #define IORING_ACCEPT_DONTWAIT (1U << 1) 384d3da8e98SJens Axboe #define IORING_ACCEPT_POLL_FIRST (1U << 2) 385390ed29bSHao Xu 386390ed29bSHao Xu /* 387e6130ebaSJens Axboe * IORING_OP_MSG_RING command types, stored in sqe->addr 388e6130ebaSJens Axboe */ 3890f21a957SGabriel Krisman Bertazi enum io_uring_msg_ring_flags { 390e6130ebaSJens Axboe IORING_MSG_DATA, /* pass sqe->len as 'res' and off as user_data */ 391e6130ebaSJens Axboe IORING_MSG_SEND_FD, /* send a registered fd to another ring */ 392e6130ebaSJens Axboe }; 393e6130ebaSJens Axboe 394e6130ebaSJens Axboe /* 395e6130ebaSJens Axboe * IORING_OP_MSG_RING flags (sqe->msg_ring_flags) 396e6130ebaSJens Axboe * 397e6130ebaSJens Axboe * IORING_MSG_RING_CQE_SKIP Don't post a CQE to the target ring. Not 398e6130ebaSJens Axboe * applicable for IORING_MSG_DATA, obviously. 399e6130ebaSJens Axboe */ 400e6130ebaSJens Axboe #define IORING_MSG_RING_CQE_SKIP (1U << 0) 401cbeb47a7SBreno Leitao /* Pass through the flags from sqe->file_index to cqe->flags */ 402cbeb47a7SBreno Leitao #define IORING_MSG_RING_FLAGS_PASS (1U << 1) 403e6130ebaSJens Axboe 404e6130ebaSJens Axboe /* 405dc18b89aSJens Axboe * IORING_OP_FIXED_FD_INSTALL flags (sqe->install_fd_flags) 406dc18b89aSJens Axboe * 407dc18b89aSJens Axboe * IORING_FIXED_FD_NO_CLOEXEC Don't mark the fd as O_CLOEXEC 408dc18b89aSJens Axboe */ 409dc18b89aSJens Axboe #define IORING_FIXED_FD_NO_CLOEXEC (1U << 0) 410dc18b89aSJens Axboe 411dc18b89aSJens Axboe /* 412*deb1e496SMing Lei * IORING_OP_NOP flags (sqe->nop_flags) 413*deb1e496SMing Lei * 414*deb1e496SMing Lei * IORING_NOP_INJECT_RESULT Inject result from sqe->result 415*deb1e496SMing Lei */ 416*deb1e496SMing Lei #define IORING_NOP_INJECT_RESULT (1U << 0) 417*deb1e496SMing Lei 418*deb1e496SMing Lei /* 4192b188cc1SJens Axboe * IO completion data structure (Completion Queue Entry) 4202b188cc1SJens Axboe */ 4212b188cc1SJens Axboe struct io_uring_cqe { 4222b188cc1SJens Axboe __u64 user_data; /* sqe->data submission passed back */ 4232b188cc1SJens Axboe __s32 res; /* result code for this event */ 4242b188cc1SJens Axboe __u32 flags; 4257a51e5b4SStefan Roesch 4267a51e5b4SStefan Roesch /* 4277a51e5b4SStefan Roesch * If the ring is initialized with IORING_SETUP_CQE32, then this field 4287a51e5b4SStefan Roesch * contains 16-bytes of padding, doubling the size of the CQE. 4297a51e5b4SStefan Roesch */ 4307a51e5b4SStefan Roesch __u64 big_cqe[]; 4312b188cc1SJens Axboe }; 4322b188cc1SJens Axboe 4332b188cc1SJens Axboe /* 434bcda7baaSJens Axboe * cqe->flags 435bcda7baaSJens Axboe * 436bcda7baaSJens Axboe * IORING_CQE_F_BUFFER If set, the upper 16 bits are the buffer ID 43788e41cf9SJens Axboe * IORING_CQE_F_MORE If set, parent SQE will generate more CQE entries 438f548a12eSJens Axboe * IORING_CQE_F_SOCK_NONEMPTY If set, more data to read after socket recv 439b48c312bSPavel Begunkov * IORING_CQE_F_NOTIF Set for notification CQEs. Can be used to distinct 440b48c312bSPavel Begunkov * them from sends. 441bcda7baaSJens Axboe */ 442bcda7baaSJens Axboe #define IORING_CQE_F_BUFFER (1U << 0) 44388e41cf9SJens Axboe #define IORING_CQE_F_MORE (1U << 1) 444f548a12eSJens Axboe #define IORING_CQE_F_SOCK_NONEMPTY (1U << 2) 445b48c312bSPavel Begunkov #define IORING_CQE_F_NOTIF (1U << 3) 446bcda7baaSJens Axboe 4470f21a957SGabriel Krisman Bertazi #define IORING_CQE_BUFFER_SHIFT 16 448bcda7baaSJens Axboe 449bcda7baaSJens Axboe /* 4502b188cc1SJens Axboe * Magic offsets for the application to mmap the data it needs 4512b188cc1SJens Axboe */ 4522b188cc1SJens Axboe #define IORING_OFF_SQ_RING 0ULL 4532b188cc1SJens Axboe #define IORING_OFF_CQ_RING 0x8000000ULL 4542b188cc1SJens Axboe #define IORING_OFF_SQES 0x10000000ULL 455c56e022cSJens Axboe #define IORING_OFF_PBUF_RING 0x80000000ULL 456c56e022cSJens Axboe #define IORING_OFF_PBUF_SHIFT 16 457c56e022cSJens Axboe #define IORING_OFF_MMAP_MASK 0xf8000000ULL 4582b188cc1SJens Axboe 4592b188cc1SJens Axboe /* 4602b188cc1SJens Axboe * Filled with the offset for mmap(2) 4612b188cc1SJens Axboe */ 4622b188cc1SJens Axboe struct io_sqring_offsets { 4632b188cc1SJens Axboe __u32 head; 4642b188cc1SJens Axboe __u32 tail; 4652b188cc1SJens Axboe __u32 ring_mask; 4662b188cc1SJens Axboe __u32 ring_entries; 4672b188cc1SJens Axboe __u32 flags; 4682b188cc1SJens Axboe __u32 dropped; 4692b188cc1SJens Axboe __u32 array; 4702b188cc1SJens Axboe __u32 resv1; 47103d89a2dSJens Axboe __u64 user_addr; 4722b188cc1SJens Axboe }; 4732b188cc1SJens Axboe 4746c271ce2SJens Axboe /* 4756c271ce2SJens Axboe * sq_ring->flags 4766c271ce2SJens Axboe */ 4776c271ce2SJens Axboe #define IORING_SQ_NEED_WAKEUP (1U << 0) /* needs io_uring_enter wakeup */ 4786d5f9049SXiaoguang Wang #define IORING_SQ_CQ_OVERFLOW (1U << 1) /* CQ ring is overflown */ 479ef060ea9SJens Axboe #define IORING_SQ_TASKRUN (1U << 2) /* task should enter the kernel */ 4806c271ce2SJens Axboe 4812b188cc1SJens Axboe struct io_cqring_offsets { 4822b188cc1SJens Axboe __u32 head; 4832b188cc1SJens Axboe __u32 tail; 4842b188cc1SJens Axboe __u32 ring_mask; 4852b188cc1SJens Axboe __u32 ring_entries; 4862b188cc1SJens Axboe __u32 overflow; 4872b188cc1SJens Axboe __u32 cqes; 4880d9b5b3aSStefano Garzarella __u32 flags; 4890d9b5b3aSStefano Garzarella __u32 resv1; 49003d89a2dSJens Axboe __u64 user_addr; 4912b188cc1SJens Axboe }; 4922b188cc1SJens Axboe 4932b188cc1SJens Axboe /* 4947e55a19cSStefano Garzarella * cq_ring->flags 4957e55a19cSStefano Garzarella */ 4967e55a19cSStefano Garzarella 4977e55a19cSStefano Garzarella /* disable eventfd notifications */ 4987e55a19cSStefano Garzarella #define IORING_CQ_EVENTFD_DISABLED (1U << 0) 4997e55a19cSStefano Garzarella 5007e55a19cSStefano Garzarella /* 5012b188cc1SJens Axboe * io_uring_enter(2) flags 5022b188cc1SJens Axboe */ 5032b188cc1SJens Axboe #define IORING_ENTER_GETEVENTS (1U << 0) 5046c271ce2SJens Axboe #define IORING_ENTER_SQ_WAKEUP (1U << 1) 50590554200SJens Axboe #define IORING_ENTER_SQ_WAIT (1U << 2) 506c73ebb68SHao Xu #define IORING_ENTER_EXT_ARG (1U << 3) 507e7a6c00dSJens Axboe #define IORING_ENTER_REGISTERED_RING (1U << 4) 5082b188cc1SJens Axboe 5092b188cc1SJens Axboe /* 5102b188cc1SJens Axboe * Passed in for io_uring_setup(2). Copied back with updated info on success 5112b188cc1SJens Axboe */ 5122b188cc1SJens Axboe struct io_uring_params { 5132b188cc1SJens Axboe __u32 sq_entries; 5142b188cc1SJens Axboe __u32 cq_entries; 5152b188cc1SJens Axboe __u32 flags; 5166c271ce2SJens Axboe __u32 sq_thread_cpu; 5176c271ce2SJens Axboe __u32 sq_thread_idle; 518ac90f249SJens Axboe __u32 features; 51924369c2eSPavel Begunkov __u32 wq_fd; 52024369c2eSPavel Begunkov __u32 resv[3]; 5212b188cc1SJens Axboe struct io_sqring_offsets sq_off; 5222b188cc1SJens Axboe struct io_cqring_offsets cq_off; 5232b188cc1SJens Axboe }; 5242b188cc1SJens Axboe 525edafcceeSJens Axboe /* 526ac90f249SJens Axboe * io_uring_params->features flags 527ac90f249SJens Axboe */ 528ac90f249SJens Axboe #define IORING_FEAT_SINGLE_MMAP (1U << 0) 5291d7bb1d5SJens Axboe #define IORING_FEAT_NODROP (1U << 1) 530da8c9690SJens Axboe #define IORING_FEAT_SUBMIT_STABLE (1U << 2) 531ba04291eSJens Axboe #define IORING_FEAT_RW_CUR_POS (1U << 3) 532cccf0ee8SJens Axboe #define IORING_FEAT_CUR_PERSONALITY (1U << 4) 533d7718a9dSJens Axboe #define IORING_FEAT_FAST_POLL (1U << 5) 5345769a351SJiufei Xue #define IORING_FEAT_POLL_32BITS (1U << 6) 53528cea78aSJens Axboe #define IORING_FEAT_SQPOLL_NONFIXED (1U << 7) 536c73ebb68SHao Xu #define IORING_FEAT_EXT_ARG (1U << 8) 5371c0aa1faSJens Axboe #define IORING_FEAT_NATIVE_WORKERS (1U << 9) 5389690557eSPavel Begunkov #define IORING_FEAT_RSRC_TAGS (1U << 10) 53904c76b41SPavel Begunkov #define IORING_FEAT_CQE_SKIP (1U << 11) 540c4212f3eSJens Axboe #define IORING_FEAT_LINKED_FILE (1U << 12) 5417d3fd88dSJosh Triplett #define IORING_FEAT_REG_REG_RING (1U << 13) 5422f9c9515SJens Axboe #define IORING_FEAT_RECVSEND_BUNDLE (1U << 14) 543ac90f249SJens Axboe 544ac90f249SJens Axboe /* 545edafcceeSJens Axboe * io_uring_register(2) opcodes and arguments 546edafcceeSJens Axboe */ 5470f21a957SGabriel Krisman Bertazi enum io_uring_register_op { 5489d4a75efSStefano Garzarella IORING_REGISTER_BUFFERS = 0, 5499d4a75efSStefano Garzarella IORING_UNREGISTER_BUFFERS = 1, 5509d4a75efSStefano Garzarella IORING_REGISTER_FILES = 2, 5519d4a75efSStefano Garzarella IORING_UNREGISTER_FILES = 3, 5529d4a75efSStefano Garzarella IORING_REGISTER_EVENTFD = 4, 5539d4a75efSStefano Garzarella IORING_UNREGISTER_EVENTFD = 5, 5549d4a75efSStefano Garzarella IORING_REGISTER_FILES_UPDATE = 6, 5559d4a75efSStefano Garzarella IORING_REGISTER_EVENTFD_ASYNC = 7, 5569d4a75efSStefano Garzarella IORING_REGISTER_PROBE = 8, 5579d4a75efSStefano Garzarella IORING_REGISTER_PERSONALITY = 9, 5589d4a75efSStefano Garzarella IORING_UNREGISTER_PERSONALITY = 10, 55921b55dbcSStefano Garzarella IORING_REGISTER_RESTRICTIONS = 11, 5607e84e1c7SStefano Garzarella IORING_REGISTER_ENABLE_RINGS = 12, 561992da01aSPavel Begunkov 562992da01aSPavel Begunkov /* extended with tagging */ 563992da01aSPavel Begunkov IORING_REGISTER_FILES2 = 13, 564992da01aSPavel Begunkov IORING_REGISTER_FILES_UPDATE2 = 14, 565992da01aSPavel Begunkov IORING_REGISTER_BUFFERS2 = 15, 566992da01aSPavel Begunkov IORING_REGISTER_BUFFERS_UPDATE = 16, 5679d4a75efSStefano Garzarella 568fe76421dSJens Axboe /* set/clear io-wq thread affinities */ 569fe76421dSJens Axboe IORING_REGISTER_IOWQ_AFF = 17, 570fe76421dSJens Axboe IORING_UNREGISTER_IOWQ_AFF = 18, 571fe76421dSJens Axboe 572dd47c104SEugene Syromiatnikov /* set/get max number of io-wq workers */ 5732e480058SJens Axboe IORING_REGISTER_IOWQ_MAX_WORKERS = 19, 5742e480058SJens Axboe 575e7a6c00dSJens Axboe /* register/unregister io_uring fd with the ring */ 576e7a6c00dSJens Axboe IORING_REGISTER_RING_FDS = 20, 577e7a6c00dSJens Axboe IORING_UNREGISTER_RING_FDS = 21, 578e7a6c00dSJens Axboe 579c7fb1942SJens Axboe /* register ring based provide buffer group */ 580c7fb1942SJens Axboe IORING_REGISTER_PBUF_RING = 22, 581c7fb1942SJens Axboe IORING_UNREGISTER_PBUF_RING = 23, 582c7fb1942SJens Axboe 58378a861b9SJens Axboe /* sync cancelation API */ 58478a861b9SJens Axboe IORING_REGISTER_SYNC_CANCEL = 24, 58578a861b9SJens Axboe 5866e73dffbSPavel Begunkov /* register a range of fixed file slots for automatic slot allocation */ 5876e73dffbSPavel Begunkov IORING_REGISTER_FILE_ALLOC_RANGE = 25, 5886e73dffbSPavel Begunkov 589d293b1a8SJens Axboe /* return status information for a buffer group */ 590d293b1a8SJens Axboe IORING_REGISTER_PBUF_STATUS = 26, 591d293b1a8SJens Axboe 592ef1186c1SStefan Roesch /* set/clear busy poll settings */ 593ef1186c1SStefan Roesch IORING_REGISTER_NAPI = 27, 594ef1186c1SStefan Roesch IORING_UNREGISTER_NAPI = 28, 595ef1186c1SStefan Roesch 5969d4a75efSStefano Garzarella /* this goes last */ 5977d3fd88dSJosh Triplett IORING_REGISTER_LAST, 5987d3fd88dSJosh Triplett 5997d3fd88dSJosh Triplett /* flag added to the opcode to use a registered ring fd */ 6007d3fd88dSJosh Triplett IORING_REGISTER_USE_REGISTERED_RING = 1U << 31 6019d4a75efSStefano Garzarella }; 602c3a31e60SJens Axboe 603dd47c104SEugene Syromiatnikov /* io-wq worker categories */ 6040f21a957SGabriel Krisman Bertazi enum io_wq_type { 605dd47c104SEugene Syromiatnikov IO_WQ_BOUND, 606dd47c104SEugene Syromiatnikov IO_WQ_UNBOUND, 607dd47c104SEugene Syromiatnikov }; 608dd47c104SEugene Syromiatnikov 609269bbe5fSBijan Mottahedeh /* deprecated, see struct io_uring_rsrc_update */ 610c3a31e60SJens Axboe struct io_uring_files_update { 611c3a31e60SJens Axboe __u32 offset; 6121292e972SEugene Syromiatnikov __u32 resv; 6131292e972SEugene Syromiatnikov __aligned_u64 /* __s32 * */ fds; 614c3a31e60SJens Axboe }; 615edafcceeSJens Axboe 616a8da73a3SJens Axboe /* 617a8da73a3SJens Axboe * Register a fully sparse file space, rather than pass in an array of all 618a8da73a3SJens Axboe * -1 file descriptors. 619a8da73a3SJens Axboe */ 620a8da73a3SJens Axboe #define IORING_RSRC_REGISTER_SPARSE (1U << 0) 621a8da73a3SJens Axboe 622792e3582SPavel Begunkov struct io_uring_rsrc_register { 623792e3582SPavel Begunkov __u32 nr; 624a8da73a3SJens Axboe __u32 flags; 625992da01aSPavel Begunkov __u64 resv2; 626792e3582SPavel Begunkov __aligned_u64 data; 627792e3582SPavel Begunkov __aligned_u64 tags; 628792e3582SPavel Begunkov }; 629792e3582SPavel Begunkov 630c3bdad02SPavel Begunkov struct io_uring_rsrc_update { 631c3bdad02SPavel Begunkov __u32 offset; 632c3bdad02SPavel Begunkov __u32 resv; 633c3bdad02SPavel Begunkov __aligned_u64 data; 634c3bdad02SPavel Begunkov }; 635c3bdad02SPavel Begunkov 636c3bdad02SPavel Begunkov struct io_uring_rsrc_update2 { 637c3bdad02SPavel Begunkov __u32 offset; 638c3bdad02SPavel Begunkov __u32 resv; 639c3bdad02SPavel Begunkov __aligned_u64 data; 640c3bdad02SPavel Begunkov __aligned_u64 tags; 641c3bdad02SPavel Begunkov __u32 nr; 642992da01aSPavel Begunkov __u32 resv2; 643c3bdad02SPavel Begunkov }; 644c3bdad02SPavel Begunkov 6454e0377a1Snoah /* Skip updating fd indexes set to this value in the fd table */ 6464e0377a1Snoah #define IORING_REGISTER_FILES_SKIP (-2) 6474e0377a1Snoah 64866f4af93SJens Axboe #define IO_URING_OP_SUPPORTED (1U << 0) 64966f4af93SJens Axboe 65066f4af93SJens Axboe struct io_uring_probe_op { 65166f4af93SJens Axboe __u8 op; 65266f4af93SJens Axboe __u8 resv; 65366f4af93SJens Axboe __u16 flags; /* IO_URING_OP_* flags */ 65466f4af93SJens Axboe __u32 resv2; 65566f4af93SJens Axboe }; 65666f4af93SJens Axboe 65766f4af93SJens Axboe struct io_uring_probe { 65866f4af93SJens Axboe __u8 last_op; /* last opcode supported */ 65966f4af93SJens Axboe __u8 ops_len; /* length of ops[] array below */ 66066f4af93SJens Axboe __u16 resv; 66166f4af93SJens Axboe __u32 resv2[3]; 6628fcf4c48SGustavo A. R. Silva struct io_uring_probe_op ops[]; 66366f4af93SJens Axboe }; 66466f4af93SJens Axboe 66521b55dbcSStefano Garzarella struct io_uring_restriction { 66621b55dbcSStefano Garzarella __u16 opcode; 66721b55dbcSStefano Garzarella union { 66821b55dbcSStefano Garzarella __u8 register_op; /* IORING_RESTRICTION_REGISTER_OP */ 66921b55dbcSStefano Garzarella __u8 sqe_op; /* IORING_RESTRICTION_SQE_OP */ 67021b55dbcSStefano Garzarella __u8 sqe_flags; /* IORING_RESTRICTION_SQE_FLAGS_* */ 67121b55dbcSStefano Garzarella }; 67221b55dbcSStefano Garzarella __u8 resv; 67321b55dbcSStefano Garzarella __u32 resv2[3]; 67421b55dbcSStefano Garzarella }; 67521b55dbcSStefano Garzarella 676c7fb1942SJens Axboe struct io_uring_buf { 677c7fb1942SJens Axboe __u64 addr; 678c7fb1942SJens Axboe __u32 len; 679c7fb1942SJens Axboe __u16 bid; 680c7fb1942SJens Axboe __u16 resv; 681c7fb1942SJens Axboe }; 682c7fb1942SJens Axboe 683c7fb1942SJens Axboe struct io_uring_buf_ring { 684c7fb1942SJens Axboe union { 685c7fb1942SJens Axboe /* 686c7fb1942SJens Axboe * To avoid spilling into more pages than we need to, the 687c7fb1942SJens Axboe * ring tail is overlaid with the io_uring_buf->resv field. 688c7fb1942SJens Axboe */ 689c7fb1942SJens Axboe struct { 690c7fb1942SJens Axboe __u64 resv1; 691c7fb1942SJens Axboe __u32 resv2; 692c7fb1942SJens Axboe __u16 resv3; 693c7fb1942SJens Axboe __u16 tail; 694c7fb1942SJens Axboe }; 69536632d06SKees Cook __DECLARE_FLEX_ARRAY(struct io_uring_buf, bufs); 696c7fb1942SJens Axboe }; 697c7fb1942SJens Axboe }; 698c7fb1942SJens Axboe 699c56e022cSJens Axboe /* 700c56e022cSJens Axboe * Flags for IORING_REGISTER_PBUF_RING. 701c56e022cSJens Axboe * 702c56e022cSJens Axboe * IOU_PBUF_RING_MMAP: If set, kernel will allocate the memory for the ring. 703c56e022cSJens Axboe * The application must not set a ring_addr in struct 704c56e022cSJens Axboe * io_uring_buf_reg, instead it must subsequently call 705c56e022cSJens Axboe * mmap(2) with the offset set as: 706c56e022cSJens Axboe * IORING_OFF_PBUF_RING | (bgid << IORING_OFF_PBUF_SHIFT) 707c56e022cSJens Axboe * to get a virtual mapping for the ring. 708c56e022cSJens Axboe */ 7090f21a957SGabriel Krisman Bertazi enum io_uring_register_pbuf_ring_flags { 710c56e022cSJens Axboe IOU_PBUF_RING_MMAP = 1, 711c56e022cSJens Axboe }; 712c56e022cSJens Axboe 713c7fb1942SJens Axboe /* argument for IORING_(UN)REGISTER_PBUF_RING */ 714c7fb1942SJens Axboe struct io_uring_buf_reg { 715c7fb1942SJens Axboe __u64 ring_addr; 716c7fb1942SJens Axboe __u32 ring_entries; 717c7fb1942SJens Axboe __u16 bgid; 71881cf17cdSJens Axboe __u16 flags; 719c7fb1942SJens Axboe __u64 resv[3]; 720c7fb1942SJens Axboe }; 721c7fb1942SJens Axboe 722d293b1a8SJens Axboe /* argument for IORING_REGISTER_PBUF_STATUS */ 723d293b1a8SJens Axboe struct io_uring_buf_status { 724d293b1a8SJens Axboe __u32 buf_group; /* input */ 725d293b1a8SJens Axboe __u32 head; /* output */ 726d293b1a8SJens Axboe __u32 resv[8]; 727d293b1a8SJens Axboe }; 728d293b1a8SJens Axboe 729ef1186c1SStefan Roesch /* argument for IORING_(UN)REGISTER_NAPI */ 730ef1186c1SStefan Roesch struct io_uring_napi { 731ef1186c1SStefan Roesch __u32 busy_poll_to; 732ef1186c1SStefan Roesch __u8 prefer_busy_poll; 733ef1186c1SStefan Roesch __u8 pad[3]; 734ef1186c1SStefan Roesch __u64 resv; 735ef1186c1SStefan Roesch }; 736ef1186c1SStefan Roesch 73721b55dbcSStefano Garzarella /* 73821b55dbcSStefano Garzarella * io_uring_restriction->opcode values 73921b55dbcSStefano Garzarella */ 7400f21a957SGabriel Krisman Bertazi enum io_uring_register_restriction_op { 74121b55dbcSStefano Garzarella /* Allow an io_uring_register(2) opcode */ 74221b55dbcSStefano Garzarella IORING_RESTRICTION_REGISTER_OP = 0, 74321b55dbcSStefano Garzarella 74421b55dbcSStefano Garzarella /* Allow an sqe opcode */ 74521b55dbcSStefano Garzarella IORING_RESTRICTION_SQE_OP = 1, 74621b55dbcSStefano Garzarella 74721b55dbcSStefano Garzarella /* Allow sqe flags */ 74821b55dbcSStefano Garzarella IORING_RESTRICTION_SQE_FLAGS_ALLOWED = 2, 74921b55dbcSStefano Garzarella 75021b55dbcSStefano Garzarella /* Require sqe flags (these flags must be set on each submission) */ 75121b55dbcSStefano Garzarella IORING_RESTRICTION_SQE_FLAGS_REQUIRED = 3, 75221b55dbcSStefano Garzarella 75321b55dbcSStefano Garzarella IORING_RESTRICTION_LAST 75421b55dbcSStefano Garzarella }; 75521b55dbcSStefano Garzarella 756c73ebb68SHao Xu struct io_uring_getevents_arg { 757c73ebb68SHao Xu __u64 sigmask; 758c73ebb68SHao Xu __u32 sigmask_sz; 759c73ebb68SHao Xu __u32 pad; 760c73ebb68SHao Xu __u64 ts; 761c73ebb68SHao Xu }; 762c73ebb68SHao Xu 76378a861b9SJens Axboe /* 76478a861b9SJens Axboe * Argument for IORING_REGISTER_SYNC_CANCEL 76578a861b9SJens Axboe */ 76678a861b9SJens Axboe struct io_uring_sync_cancel_reg { 76778a861b9SJens Axboe __u64 addr; 76878a861b9SJens Axboe __s32 fd; 76978a861b9SJens Axboe __u32 flags; 77078a861b9SJens Axboe struct __kernel_timespec timeout; 771f77569d2SJens Axboe __u8 opcode; 772f77569d2SJens Axboe __u8 pad[7]; 773f77569d2SJens Axboe __u64 pad2[3]; 77478a861b9SJens Axboe }; 77578a861b9SJens Axboe 7766e73dffbSPavel Begunkov /* 7776e73dffbSPavel Begunkov * Argument for IORING_REGISTER_FILE_ALLOC_RANGE 7786e73dffbSPavel Begunkov * The range is specified as [off, off + len) 7796e73dffbSPavel Begunkov */ 7806e73dffbSPavel Begunkov struct io_uring_file_index_range { 7816e73dffbSPavel Begunkov __u32 off; 7826e73dffbSPavel Begunkov __u32 len; 7836e73dffbSPavel Begunkov __u64 resv; 7846e73dffbSPavel Begunkov }; 7856e73dffbSPavel Begunkov 7869bb66906SDylan Yudaken struct io_uring_recvmsg_out { 7879bb66906SDylan Yudaken __u32 namelen; 7889bb66906SDylan Yudaken __u32 controllen; 7899bb66906SDylan Yudaken __u32 payloadlen; 7909bb66906SDylan Yudaken __u32 flags; 7919bb66906SDylan Yudaken }; 7929bb66906SDylan Yudaken 7938e9fad0eSBreno Leitao /* 7948e9fad0eSBreno Leitao * Argument for IORING_OP_URING_CMD when file is a socket 7958e9fad0eSBreno Leitao */ 7960f21a957SGabriel Krisman Bertazi enum io_uring_socket_op { 7978e9fad0eSBreno Leitao SOCKET_URING_OP_SIOCINQ = 0, 7988e9fad0eSBreno Leitao SOCKET_URING_OP_SIOCOUTQ, 799a5d2f99aSBreno Leitao SOCKET_URING_OP_GETSOCKOPT, 8004232c6e3SBreno Leitao SOCKET_URING_OP_SETSOCKOPT, 8018e9fad0eSBreno Leitao }; 8028e9fad0eSBreno Leitao 803e1d0c6d0SAmmar Faizi #ifdef __cplusplus 804e1d0c6d0SAmmar Faizi } 805e1d0c6d0SAmmar Faizi #endif 806e1d0c6d0SAmmar Faizi 8072b188cc1SJens Axboe #endif 808