mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-06 05:44:20 +00:00
Remove io_uring bit and pieces from linux.zig
now that IoUring.zig is self contained Signed-off-by: Bernard Assan <mega.alpha100@gmail.com>
This commit is contained in:
parent
6f40669413
commit
1ead5f2205
1 changed files with 1 additions and 658 deletions
|
|
@ -22,6 +22,7 @@ const iovec = std.posix.iovec;
|
|||
const iovec_const = std.posix.iovec_const;
|
||||
const winsize = std.posix.winsize;
|
||||
const ACCMODE = std.posix.ACCMODE;
|
||||
pub const IoUring = @import("linux/IoUring.zig");
|
||||
|
||||
test {
|
||||
if (builtin.os.tag == .linux) {
|
||||
|
|
@ -6664,664 +6665,6 @@ else
|
|||
fields: siginfo_fields_union,
|
||||
};
|
||||
|
||||
// io_uring_params.flags
|
||||
|
||||
/// io_context is polled
|
||||
pub const IORING_SETUP_IOPOLL = 1 << 0;
|
||||
|
||||
/// SQ poll thread
|
||||
pub const IORING_SETUP_SQPOLL = 1 << 1;
|
||||
|
||||
/// sq_thread_cpu is valid
|
||||
pub const IORING_SETUP_SQ_AFF = 1 << 2;
|
||||
|
||||
/// app defines CQ size
|
||||
pub const IORING_SETUP_CQSIZE = 1 << 3;
|
||||
|
||||
/// clamp SQ/CQ ring sizes
|
||||
pub const IORING_SETUP_CLAMP = 1 << 4;
|
||||
|
||||
/// attach to existing wq
|
||||
pub const IORING_SETUP_ATTACH_WQ = 1 << 5;
|
||||
|
||||
/// start with ring disabled
|
||||
pub const IORING_SETUP_R_DISABLED = 1 << 6;
|
||||
|
||||
/// continue submit on error
|
||||
pub const IORING_SETUP_SUBMIT_ALL = 1 << 7;
|
||||
|
||||
/// Cooperative task running. When requests complete, they often require
|
||||
/// forcing the submitter to transition to the kernel to complete. If this
|
||||
/// flag is set, work will be done when the task transitions anyway, rather
|
||||
/// than force an inter-processor interrupt reschedule. This avoids interrupting
|
||||
/// a task running in userspace, and saves an IPI.
|
||||
pub const IORING_SETUP_COOP_TASKRUN = 1 << 8;
|
||||
|
||||
/// If COOP_TASKRUN is set, get notified if task work is available for
|
||||
/// running and a kernel transition would be needed to run it. This sets
|
||||
/// IORING_SQ_TASKRUN in the sq ring flags. Not valid with COOP_TASKRUN.
|
||||
pub const IORING_SETUP_TASKRUN_FLAG = 1 << 9;
|
||||
|
||||
/// SQEs are 128 byte
|
||||
pub const IORING_SETUP_SQE128 = 1 << 10;
|
||||
/// CQEs are 32 byte
|
||||
pub const IORING_SETUP_CQE32 = 1 << 11;
|
||||
|
||||
/// Only one task is allowed to submit requests
|
||||
pub const IORING_SETUP_SINGLE_ISSUER = 1 << 12;
|
||||
|
||||
/// Defer running task work to get events.
|
||||
/// Rather than running bits of task work whenever the task transitions
|
||||
/// try to do it just before it is needed.
|
||||
pub const IORING_SETUP_DEFER_TASKRUN = 1 << 13;
|
||||
|
||||
/// Application provides ring memory
|
||||
pub const IORING_SETUP_NO_MMAP = 1 << 14;
|
||||
|
||||
/// Register the ring fd in itself for use with
|
||||
/// IORING_REGISTER_USE_REGISTERED_RING; return a registered fd index rather
|
||||
/// than an fd.
|
||||
pub const IORING_SETUP_REGISTERED_FD_ONLY = 1 << 15;
|
||||
|
||||
/// Removes indirection through the SQ index array.
|
||||
pub const IORING_SETUP_NO_SQARRAY = 1 << 16;
|
||||
|
||||
pub const IoUring = @import("linux/IoUring.zig");
|
||||
|
||||
/// If sqe->file_index is set to this for opcodes that instantiate a new
|
||||
/// direct descriptor (like openat/openat2/accept), then io_uring will allocate
|
||||
/// an available direct descriptor instead of having the application pass one
|
||||
/// in. The picked direct descriptor will be returned in cqe->res, or -ENFILE
|
||||
/// if the space is full.
|
||||
/// Available since Linux 5.19
|
||||
pub const IORING_FILE_INDEX_ALLOC = maxInt(u32);
|
||||
|
||||
pub const IOSQE_BIT = enum(u8) {
|
||||
FIXED_FILE,
|
||||
IO_DRAIN,
|
||||
IO_LINK,
|
||||
IO_HARDLINK,
|
||||
ASYNC,
|
||||
BUFFER_SELECT,
|
||||
CQE_SKIP_SUCCESS,
|
||||
|
||||
_,
|
||||
};
|
||||
|
||||
// io_uring_sqe.flags
|
||||
|
||||
/// use fixed fileset
|
||||
pub const IOSQE_FIXED_FILE = 1 << @intFromEnum(IOSQE_BIT.FIXED_FILE);
|
||||
|
||||
/// issue after inflight IO
|
||||
pub const IOSQE_IO_DRAIN = 1 << @intFromEnum(IOSQE_BIT.IO_DRAIN);
|
||||
|
||||
/// links next sqe
|
||||
pub const IOSQE_IO_LINK = 1 << @intFromEnum(IOSQE_BIT.IO_LINK);
|
||||
|
||||
/// like LINK, but stronger
|
||||
pub const IOSQE_IO_HARDLINK = 1 << @intFromEnum(IOSQE_BIT.IO_HARDLINK);
|
||||
|
||||
/// always go async
|
||||
pub const IOSQE_ASYNC = 1 << @intFromEnum(IOSQE_BIT.ASYNC);
|
||||
|
||||
/// select buffer from buf_group
|
||||
pub const IOSQE_BUFFER_SELECT = 1 << @intFromEnum(IOSQE_BIT.BUFFER_SELECT);
|
||||
|
||||
/// don't post CQE if request succeeded
|
||||
/// Available since Linux 5.17
|
||||
pub const IOSQE_CQE_SKIP_SUCCESS = 1 << @intFromEnum(IOSQE_BIT.CQE_SKIP_SUCCESS);
|
||||
|
||||
pub const IORING_OP = enum(u8) {
|
||||
NOP,
|
||||
READV,
|
||||
WRITEV,
|
||||
FSYNC,
|
||||
READ_FIXED,
|
||||
WRITE_FIXED,
|
||||
POLL_ADD,
|
||||
POLL_REMOVE,
|
||||
SYNC_FILE_RANGE,
|
||||
SENDMSG,
|
||||
RECVMSG,
|
||||
TIMEOUT,
|
||||
TIMEOUT_REMOVE,
|
||||
ACCEPT,
|
||||
ASYNC_CANCEL,
|
||||
LINK_TIMEOUT,
|
||||
CONNECT,
|
||||
FALLOCATE,
|
||||
OPENAT,
|
||||
CLOSE,
|
||||
FILES_UPDATE,
|
||||
STATX,
|
||||
READ,
|
||||
WRITE,
|
||||
FADVISE,
|
||||
MADVISE,
|
||||
SEND,
|
||||
RECV,
|
||||
OPENAT2,
|
||||
EPOLL_CTL,
|
||||
SPLICE,
|
||||
PROVIDE_BUFFERS,
|
||||
REMOVE_BUFFERS,
|
||||
TEE,
|
||||
SHUTDOWN,
|
||||
RENAMEAT,
|
||||
UNLINKAT,
|
||||
MKDIRAT,
|
||||
SYMLINKAT,
|
||||
LINKAT,
|
||||
MSG_RING,
|
||||
FSETXATTR,
|
||||
SETXATTR,
|
||||
FGETXATTR,
|
||||
GETXATTR,
|
||||
SOCKET,
|
||||
URING_CMD,
|
||||
SEND_ZC,
|
||||
SENDMSG_ZC,
|
||||
READ_MULTISHOT,
|
||||
WAITID,
|
||||
FUTEX_WAIT,
|
||||
FUTEX_WAKE,
|
||||
FUTEX_WAITV,
|
||||
FIXED_FD_INSTALL,
|
||||
FTRUNCATE,
|
||||
BIND,
|
||||
LISTEN,
|
||||
RECV_ZC,
|
||||
|
||||
_,
|
||||
};
|
||||
// io_uring_sqe.uring_cmd_flags (rw_flags in the Zig struct)
|
||||
|
||||
/// use registered buffer; pass thig flag along with setting sqe->buf_index.
|
||||
pub const IORING_URING_CMD_FIXED = 1 << 0;
|
||||
|
||||
// io_uring_sqe.fsync_flags (rw_flags in the Zig struct)
|
||||
pub const IORING_FSYNC_DATASYNC = 1 << 0;
|
||||
|
||||
// io_uring_sqe.timeout_flags (rw_flags in the Zig struct)
|
||||
pub const IORING_TIMEOUT_ABS = 1 << 0;
|
||||
pub const IORING_TIMEOUT_UPDATE = 1 << 1; // Available since Linux 5.11
|
||||
pub const IORING_TIMEOUT_BOOTTIME = 1 << 2; // Available since Linux 5.15
|
||||
pub const IORING_TIMEOUT_REALTIME = 1 << 3; // Available since Linux 5.15
|
||||
pub const IORING_LINK_TIMEOUT_UPDATE = 1 << 4; // Available since Linux 5.15
|
||||
pub const IORING_TIMEOUT_ETIME_SUCCESS = 1 << 5; // Available since Linux 5.16
|
||||
pub const IORING_TIMEOUT_CLOCK_MASK = IORING_TIMEOUT_BOOTTIME | IORING_TIMEOUT_REALTIME;
|
||||
pub const IORING_TIMEOUT_UPDATE_MASK = IORING_TIMEOUT_UPDATE | IORING_LINK_TIMEOUT_UPDATE;
|
||||
|
||||
// io_uring_sqe.splice_flags (rw_flags in the Zig struct)
|
||||
// extends splice(2) flags
|
||||
pub const IORING_SPLICE_F_FD_IN_FIXED = 1 << 31;
|
||||
|
||||
// POLL_ADD flags.
|
||||
// Note that since sqe->poll_events (rw_flags in the Zig struct) is the flag space, the command flags for POLL_ADD are stored in sqe->len.
|
||||
|
||||
/// Multishot poll. Sets IORING_CQE_F_MORE if the poll handler will continue to report CQEs on behalf of the same SQE.
|
||||
pub const IORING_POLL_ADD_MULTI = 1 << 0;
|
||||
/// Update existing poll request, matching sqe->addr as the old user_data field.
|
||||
pub const IORING_POLL_UPDATE_EVENTS = 1 << 1;
|
||||
pub const IORING_POLL_UPDATE_USER_DATA = 1 << 2;
|
||||
pub const IORING_POLL_ADD_LEVEL = 1 << 3;
|
||||
|
||||
// ASYNC_CANCEL flags.
|
||||
|
||||
/// Cancel all requests that match the given key
|
||||
pub const IORING_ASYNC_CANCEL_ALL = 1 << 0;
|
||||
/// Key off 'fd' for cancelation rather than the request 'user_data'.
|
||||
pub const IORING_ASYNC_CANCEL_FD = 1 << 1;
|
||||
/// Match any request
|
||||
pub const IORING_ASYNC_CANCEL_ANY = 1 << 2;
|
||||
/// 'fd' passed in is a fixed descriptor. Available since Linux 6.0
|
||||
pub const IORING_ASYNC_CANCEL_FD_FIXED = 1 << 3;
|
||||
|
||||
// send/sendmsg and recv/recvmsg flags (sqe->ioprio)
|
||||
|
||||
/// If set, instead of first attempting to send or receive and arm poll if that yields an -EAGAIN result,
|
||||
/// arm poll upfront and skip the initial transfer attempt.
|
||||
pub const IORING_RECVSEND_POLL_FIRST = 1 << 0;
|
||||
/// Multishot recv. Sets IORING_CQE_F_MORE if the handler will continue to report CQEs on behalf of the same SQE.
|
||||
pub const IORING_RECV_MULTISHOT = 1 << 1;
|
||||
/// Use registered buffers, the index is stored in the buf_index field.
|
||||
pub const IORING_RECVSEND_FIXED_BUF = 1 << 2;
|
||||
/// If set, SEND[MSG]_ZC should report the zerocopy usage in cqe.res for the IORING_CQE_F_NOTIF cqe.
|
||||
pub const IORING_SEND_ZC_REPORT_USAGE = 1 << 3;
|
||||
/// If set, send or recv will grab as many buffers from the buffer group ID given and send them all.
|
||||
/// The completion result will be the number of buffers send, with the starting buffer ID in cqe as per usual.
|
||||
/// The buffers be contigious from the starting buffer ID.
|
||||
/// Used with IOSQE_BUFFER_SELECT.
|
||||
pub const IORING_RECVSEND_BUNDLE = 1 << 4;
|
||||
/// CQE.RES FOR IORING_CQE_F_NOTIF if IORING_SEND_ZC_REPORT_USAGE was requested
|
||||
pub const IORING_NOTIF_USAGE_ZC_COPIED = 1 << 31;
|
||||
|
||||
/// accept flags stored in sqe->iopri
|
||||
pub const IORING_ACCEPT_MULTISHOT = 1 << 0;
|
||||
|
||||
/// IORING_OP_MSG_RING command types, stored in sqe->addr
|
||||
pub const IORING_MSG_RING_COMMAND = enum(u8) {
|
||||
/// pass sqe->len as 'res' and off as user_data
|
||||
DATA,
|
||||
/// send a registered fd to another ring
|
||||
SEND_FD,
|
||||
};
|
||||
|
||||
// io_uring_sqe.msg_ring_flags (rw_flags in the Zig struct)
|
||||
|
||||
/// Don't post a CQE to the target ring. Not applicable for IORING_MSG_DATA, obviously.
|
||||
pub const IORING_MSG_RING_CQE_SKIP = 1 << 0;
|
||||
|
||||
/// Pass through the flags from sqe->file_index (splice_fd_in in the zig struct) to cqe->flags */
|
||||
pub const IORING_MSG_RING_FLAGS_PASS = 1 << 1;
|
||||
|
||||
// IO completion data structure (Completion Queue Entry)
|
||||
pub const io_uring_cqe = extern struct {
|
||||
/// io_uring_sqe.data submission passed back
|
||||
user_data: u64,
|
||||
|
||||
/// result code for this event
|
||||
res: i32,
|
||||
flags: u32,
|
||||
|
||||
// Followed by 16 bytes of padding if initialized with IORING_SETUP_CQE32, doubling cqe size
|
||||
|
||||
pub fn err(self: io_uring_cqe) E {
|
||||
if (self.res > -4096 and self.res < 0) {
|
||||
return @as(E, @enumFromInt(-self.res));
|
||||
}
|
||||
return .SUCCESS;
|
||||
}
|
||||
|
||||
// On successful completion of the provided buffers IO request, the CQE flags field
|
||||
// will have IORING_CQE_F_BUFFER set and the selected buffer ID will be indicated by
|
||||
// the upper 16-bits of the flags field.
|
||||
pub fn buffer_id(self: io_uring_cqe) !u16 {
|
||||
if (self.flags & IORING_CQE_F_BUFFER != IORING_CQE_F_BUFFER) {
|
||||
return error.NoBufferSelected;
|
||||
}
|
||||
return @as(u16, @intCast(self.flags >> IORING_CQE_BUFFER_SHIFT));
|
||||
}
|
||||
};
|
||||
|
||||
// io_uring_cqe.flags
|
||||
|
||||
/// If set, the upper 16 bits are the buffer ID
|
||||
pub const IORING_CQE_F_BUFFER = 1 << 0;
|
||||
/// If set, parent SQE will generate more CQE entries.
|
||||
/// Available since Linux 5.13.
|
||||
pub const IORING_CQE_F_MORE = 1 << 1;
|
||||
/// If set, more data to read after socket recv
|
||||
pub const IORING_CQE_F_SOCK_NONEMPTY = 1 << 2;
|
||||
/// Set for notification CQEs. Can be used to distinct them from sends.
|
||||
pub const IORING_CQE_F_NOTIF = 1 << 3;
|
||||
/// If set, the buffer ID set in the completion will get more completions.
|
||||
pub const IORING_CQE_F_BUF_MORE = 1 << 4;
|
||||
|
||||
pub const IORING_CQE_BUFFER_SHIFT = 16;
|
||||
|
||||
/// Magic offsets for the application to mmap the data it needs
|
||||
pub const IORING_OFF_SQ_RING = 0;
|
||||
pub const IORING_OFF_CQ_RING = 0x8000000;
|
||||
pub const IORING_OFF_SQES = 0x10000000;
|
||||
|
||||
/// Filled with the offset for mmap(2)
|
||||
pub const io_sqring_offsets = extern struct {
|
||||
/// offset of ring head
|
||||
head: u32,
|
||||
|
||||
/// offset of ring tail
|
||||
tail: u32,
|
||||
|
||||
/// ring mask value
|
||||
ring_mask: u32,
|
||||
|
||||
/// entries in ring
|
||||
ring_entries: u32,
|
||||
|
||||
/// ring flags
|
||||
flags: u32,
|
||||
|
||||
/// number of sqes not submitted
|
||||
dropped: u32,
|
||||
|
||||
/// sqe index array
|
||||
array: u32,
|
||||
|
||||
resv1: u32,
|
||||
user_addr: u64,
|
||||
};
|
||||
|
||||
// io_sqring_offsets.flags
|
||||
|
||||
/// needs io_uring_enter wakeup
|
||||
pub const IORING_SQ_NEED_WAKEUP = 1 << 0;
|
||||
/// kernel has cqes waiting beyond the cq ring
|
||||
pub const IORING_SQ_CQ_OVERFLOW = 1 << 1;
|
||||
/// task should enter the kernel
|
||||
pub const IORING_SQ_TASKRUN = 1 << 2;
|
||||
|
||||
pub const io_cqring_offsets = extern struct {
|
||||
head: u32,
|
||||
tail: u32,
|
||||
ring_mask: u32,
|
||||
ring_entries: u32,
|
||||
overflow: u32,
|
||||
cqes: u32,
|
||||
flags: u32,
|
||||
resv: u32,
|
||||
user_addr: u64,
|
||||
};
|
||||
|
||||
// io_cqring_offsets.flags
|
||||
|
||||
/// disable eventfd notifications
|
||||
pub const IORING_CQ_EVENTFD_DISABLED = 1 << 0;
|
||||
|
||||
// io_uring_enter flags
|
||||
pub const IORING_ENTER_GETEVENTS = 1 << 0;
|
||||
pub const IORING_ENTER_SQ_WAKEUP = 1 << 1;
|
||||
pub const IORING_ENTER_SQ_WAIT = 1 << 2;
|
||||
pub const IORING_ENTER_EXT_ARG = 1 << 3;
|
||||
pub const IORING_ENTER_REGISTERED_RING = 1 << 4;
|
||||
|
||||
pub const io_uring_params = extern struct {
|
||||
sq_entries: u32,
|
||||
cq_entries: u32,
|
||||
flags: u32,
|
||||
sq_thread_cpu: u32,
|
||||
sq_thread_idle: u32,
|
||||
features: u32,
|
||||
wq_fd: u32,
|
||||
resv: [3]u32,
|
||||
sq_off: io_sqring_offsets,
|
||||
cq_off: io_cqring_offsets,
|
||||
};
|
||||
|
||||
// io_uring_params.features flags
|
||||
|
||||
pub const IORING_FEAT_SINGLE_MMAP = 1 << 0;
|
||||
pub const IORING_FEAT_NODROP = 1 << 1;
|
||||
pub const IORING_FEAT_SUBMIT_STABLE = 1 << 2;
|
||||
pub const IORING_FEAT_RW_CUR_POS = 1 << 3;
|
||||
pub const IORING_FEAT_CUR_PERSONALITY = 1 << 4;
|
||||
pub const IORING_FEAT_FAST_POLL = 1 << 5;
|
||||
pub const IORING_FEAT_POLL_32BITS = 1 << 6;
|
||||
pub const IORING_FEAT_SQPOLL_NONFIXED = 1 << 7;
|
||||
pub const IORING_FEAT_EXT_ARG = 1 << 8;
|
||||
pub const IORING_FEAT_NATIVE_WORKERS = 1 << 9;
|
||||
pub const IORING_FEAT_RSRC_TAGS = 1 << 10;
|
||||
pub const IORING_FEAT_CQE_SKIP = 1 << 11;
|
||||
pub const IORING_FEAT_LINKED_FILE = 1 << 12;
|
||||
|
||||
// io_uring_register opcodes and arguments
|
||||
pub const IORING_REGISTER = enum(u32) {
|
||||
REGISTER_BUFFERS,
|
||||
UNREGISTER_BUFFERS,
|
||||
REGISTER_FILES,
|
||||
UNREGISTER_FILES,
|
||||
REGISTER_EVENTFD,
|
||||
UNREGISTER_EVENTFD,
|
||||
REGISTER_FILES_UPDATE,
|
||||
REGISTER_EVENTFD_ASYNC,
|
||||
REGISTER_PROBE,
|
||||
REGISTER_PERSONALITY,
|
||||
UNREGISTER_PERSONALITY,
|
||||
REGISTER_RESTRICTIONS,
|
||||
REGISTER_ENABLE_RINGS,
|
||||
|
||||
// extended with tagging
|
||||
REGISTER_FILES2,
|
||||
REGISTER_FILES_UPDATE2,
|
||||
REGISTER_BUFFERS2,
|
||||
REGISTER_BUFFERS_UPDATE,
|
||||
|
||||
// set/clear io-wq thread affinities
|
||||
REGISTER_IOWQ_AFF,
|
||||
UNREGISTER_IOWQ_AFF,
|
||||
|
||||
// set/get max number of io-wq workers
|
||||
REGISTER_IOWQ_MAX_WORKERS,
|
||||
|
||||
// register/unregister io_uring fd with the ring
|
||||
REGISTER_RING_FDS,
|
||||
UNREGISTER_RING_FDS,
|
||||
|
||||
// register ring based provide buffer group
|
||||
REGISTER_PBUF_RING,
|
||||
UNREGISTER_PBUF_RING,
|
||||
|
||||
// sync cancelation API
|
||||
REGISTER_SYNC_CANCEL,
|
||||
|
||||
// register a range of fixed file slots for automatic slot allocation
|
||||
REGISTER_FILE_ALLOC_RANGE,
|
||||
|
||||
// return status information for a buffer group
|
||||
REGISTER_PBUF_STATUS,
|
||||
|
||||
// set/clear busy poll settings
|
||||
REGISTER_NAPI,
|
||||
UNREGISTER_NAPI,
|
||||
|
||||
REGISTER_CLOCK,
|
||||
|
||||
// clone registered buffers from source ring to current ring
|
||||
REGISTER_CLONE_BUFFERS,
|
||||
|
||||
// send MSG_RING without having a ring
|
||||
REGISTER_SEND_MSG_RING,
|
||||
|
||||
// register a netdev hw rx queue for zerocopy
|
||||
REGISTER_ZCRX_IFQ,
|
||||
|
||||
// resize CQ ring
|
||||
REGISTER_RESIZE_RINGS,
|
||||
|
||||
REGISTER_MEM_REGION,
|
||||
|
||||
// flag added to the opcode to use a registered ring fd
|
||||
REGISTER_USE_REGISTERED_RING = 1 << 31,
|
||||
|
||||
_,
|
||||
};
|
||||
|
||||
/// io_uring_restriction->opcode values
|
||||
pub const IOWQ_CATEGORIES = enum(u8) {
|
||||
BOUND,
|
||||
UNBOUND,
|
||||
};
|
||||
|
||||
/// deprecated, see struct io_uring_rsrc_update
|
||||
pub const io_uring_files_update = extern struct {
|
||||
offset: u32,
|
||||
resv: u32,
|
||||
fds: u64,
|
||||
};
|
||||
|
||||
/// Register a fully sparse file space, rather than pass in an array of all -1 file descriptors.
|
||||
pub const IORING_RSRC_REGISTER_SPARSE = 1 << 0;
|
||||
|
||||
pub const io_uring_rsrc_register = extern struct {
|
||||
nr: u32,
|
||||
flags: u32,
|
||||
resv2: u64,
|
||||
data: u64,
|
||||
tags: u64,
|
||||
};
|
||||
|
||||
pub const io_uring_rsrc_update = extern struct {
|
||||
offset: u32,
|
||||
resv: u32,
|
||||
data: u64,
|
||||
};
|
||||
|
||||
pub const io_uring_rsrc_update2 = extern struct {
|
||||
offset: u32,
|
||||
resv: u32,
|
||||
data: u64,
|
||||
tags: u64,
|
||||
nr: u32,
|
||||
resv2: u32,
|
||||
};
|
||||
|
||||
pub const io_uring_notification_slot = extern struct {
|
||||
tag: u64,
|
||||
resv: [3]u64,
|
||||
};
|
||||
|
||||
pub const io_uring_notification_register = extern struct {
|
||||
nr_slots: u32,
|
||||
resv: u32,
|
||||
resv2: u64,
|
||||
data: u64,
|
||||
resv3: u64,
|
||||
};
|
||||
|
||||
pub const io_uring_napi = extern struct {
|
||||
busy_poll_to: u32,
|
||||
prefer_busy_poll: u8,
|
||||
_pad: [3]u8,
|
||||
resv: u64,
|
||||
};
|
||||
|
||||
/// Skip updating fd indexes set to this value in the fd table */
|
||||
pub const IORING_REGISTER_FILES_SKIP = -2;
|
||||
|
||||
pub const IO_URING_OP_SUPPORTED = 1 << 0;
|
||||
|
||||
pub const io_uring_probe_op = extern struct {
|
||||
op: IORING_OP,
|
||||
resv: u8,
|
||||
/// IO_URING_OP_* flags
|
||||
flags: u16,
|
||||
resv2: u32,
|
||||
|
||||
pub fn is_supported(self: @This()) bool {
|
||||
return self.flags & IO_URING_OP_SUPPORTED != 0;
|
||||
}
|
||||
};
|
||||
|
||||
pub const io_uring_probe = extern struct {
|
||||
/// Last opcode supported
|
||||
last_op: IORING_OP,
|
||||
/// Length of ops[] array below
|
||||
ops_len: u8,
|
||||
resv: u16,
|
||||
resv2: [3]u32,
|
||||
ops: [256]io_uring_probe_op,
|
||||
|
||||
/// Is the operation supported on the running kernel.
|
||||
pub fn is_supported(self: @This(), op: IORING_OP) bool {
|
||||
const i = @intFromEnum(op);
|
||||
if (i > @intFromEnum(self.last_op) or i >= self.ops_len)
|
||||
return false;
|
||||
return self.ops[i].is_supported();
|
||||
}
|
||||
};
|
||||
|
||||
pub const io_uring_restriction = extern struct {
|
||||
opcode: IORING_RESTRICTION,
|
||||
arg: extern union {
|
||||
/// IORING_RESTRICTION_REGISTER_OP
|
||||
register_op: IORING_REGISTER,
|
||||
|
||||
/// IORING_RESTRICTION_SQE_OP
|
||||
sqe_op: IORING_OP,
|
||||
|
||||
/// IORING_RESTRICTION_SQE_FLAGS_*
|
||||
sqe_flags: u8,
|
||||
},
|
||||
resv: u8,
|
||||
resv2: [3]u32,
|
||||
};
|
||||
|
||||
/// io_uring_restriction->opcode values
|
||||
pub const IORING_RESTRICTION = enum(u16) {
|
||||
/// Allow an io_uring_register(2) opcode
|
||||
REGISTER_OP = 0,
|
||||
|
||||
/// Allow an sqe opcode
|
||||
SQE_OP = 1,
|
||||
|
||||
/// Allow sqe flags
|
||||
SQE_FLAGS_ALLOWED = 2,
|
||||
|
||||
/// Require sqe flags (these flags must be set on each submission)
|
||||
SQE_FLAGS_REQUIRED = 3,
|
||||
|
||||
_,
|
||||
};
|
||||
|
||||
pub const IO_URING_SOCKET_OP = enum(u16) {
|
||||
SIOCIN = 0,
|
||||
SIOCOUTQ = 1,
|
||||
GETSOCKOPT = 2,
|
||||
SETSOCKOPT = 3,
|
||||
};
|
||||
|
||||
pub const io_uring_buf = extern struct {
|
||||
addr: u64,
|
||||
len: u32,
|
||||
bid: u16,
|
||||
resv: u16,
|
||||
};
|
||||
|
||||
pub const io_uring_buf_ring = extern struct {
|
||||
resv1: u64,
|
||||
resv2: u32,
|
||||
resv3: u16,
|
||||
tail: u16,
|
||||
};
|
||||
|
||||
/// argument for IORING_(UN)REGISTER_PBUF_RING
|
||||
pub const io_uring_buf_reg = extern struct {
|
||||
ring_addr: u64,
|
||||
ring_entries: u32,
|
||||
bgid: u16,
|
||||
flags: Flags,
|
||||
resv: [3]u64,
|
||||
|
||||
pub const Flags = packed struct {
|
||||
_0: u1 = 0,
|
||||
/// Incremental buffer consumption.
|
||||
inc: bool,
|
||||
_: u14 = 0,
|
||||
};
|
||||
};
|
||||
|
||||
pub const io_uring_getevents_arg = extern struct {
|
||||
sigmask: u64,
|
||||
sigmask_sz: u32,
|
||||
pad: u32,
|
||||
ts: u64,
|
||||
};
|
||||
|
||||
/// Argument for IORING_REGISTER_SYNC_CANCEL
|
||||
pub const io_uring_sync_cancel_reg = extern struct {
|
||||
addr: u64,
|
||||
fd: i32,
|
||||
flags: u32,
|
||||
timeout: kernel_timespec,
|
||||
pad: [4]u64,
|
||||
};
|
||||
|
||||
/// Argument for IORING_REGISTER_FILE_ALLOC_RANGE
|
||||
/// The range is specified as [off, off + len)
|
||||
pub const io_uring_file_index_range = extern struct {
|
||||
off: u32,
|
||||
len: u32,
|
||||
resv: u64,
|
||||
};
|
||||
|
||||
pub const io_uring_recvmsg_out = extern struct {
|
||||
namelen: u32,
|
||||
controllen: u32,
|
||||
payloadlen: u32,
|
||||
flags: u32,
|
||||
};
|
||||
|
||||
pub const utsname = extern struct {
|
||||
sysname: [64:0]u8,
|
||||
nodename: [64:0]u8,
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue