mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-06 13:54:21 +00:00
replace some more fn flags with Typed Flags
and use linux Flags where appropriate constants is a namespace make AsyncCancel flags more descriptive move cqe.flags into the Cqe type make io_uring_sqe ioprio a packed union of SendRecv & Accept TODO: move some of the used linux Flags to be packed structs Signed-off-by: Bernard Assan <mega.alpha100@gmail.com>
This commit is contained in:
parent
c0a97b7d5f
commit
3b715fda9a
1 changed files with 156 additions and 143 deletions
|
|
@ -17,7 +17,7 @@ flags: Flags.Setup,
|
||||||
features: Flags.Features,
|
features: Flags.Features,
|
||||||
|
|
||||||
// COMMIT: move IoUring constants to Constants
|
// COMMIT: move IoUring constants to Constants
|
||||||
pub const Constants = struct {
|
pub const constants = struct {
|
||||||
/// If sqe.file_index (splice_fd_in in Zig Struct) is set to this for opcodes that instantiate a new
|
/// If sqe.file_index (splice_fd_in in Zig Struct) is set to this for opcodes that instantiate a new
|
||||||
/// an available direct descriptor instead of having the application pass one
|
/// an available direct descriptor instead of having the application pass one
|
||||||
/// direct descriptor (like openat/openat2/accept), then io_uring will allocate
|
/// direct descriptor (like openat/openat2/accept), then io_uring will allocate
|
||||||
|
|
@ -204,23 +204,23 @@ pub const Flags = struct {
|
||||||
pub const AsyncCancel = packed struct(u32) {
|
pub const AsyncCancel = packed struct(u32) {
|
||||||
/// IORING_ASYNC_CANCEL_ALL
|
/// IORING_ASYNC_CANCEL_ALL
|
||||||
/// Cancel all requests that match the given key
|
/// Cancel all requests that match the given key
|
||||||
ALL: bool = false,
|
CANCEL_ALL: bool = false,
|
||||||
/// IORING_ASYNC_CANCEL_FD
|
/// IORING_ASYNC_CANCEL_FD
|
||||||
/// Key off 'fd' for cancelation rather than the request 'user_data'
|
/// Key off 'fd' for cancelation rather than the request 'user_data'
|
||||||
FD: bool = false,
|
CANCEL_FD: bool = false,
|
||||||
/// IORING_ASYNC_CANCEL_ANY
|
/// IORING_ASYNC_CANCEL_ANY
|
||||||
/// Match any request
|
/// Match any request
|
||||||
ANY: bool = false,
|
CANCEL_ANY: bool = false,
|
||||||
/// IORING_ASYNC_CANCEL_FD_FIXED
|
/// IORING_ASYNC_CANCEL_FD_FIXED
|
||||||
/// 'fd' passed in is a fixed descriptor
|
/// 'fd' passed in is a fixed descriptor
|
||||||
FD_FIXED: bool = false,
|
CANCEL_FD_FIXED: bool = false,
|
||||||
// COMMIT: new AsyncCancel Flags
|
// COMMIT: new AsyncCancel Flags
|
||||||
/// IORING_ASYNC_CANCEL_USERDATA
|
/// IORING_ASYNC_CANCEL_USERDATA
|
||||||
/// Match on user_data, default for no other key
|
/// Match on user_data, default for no other key
|
||||||
USERDATA: bool = false,
|
CANCEL_USERDATA: bool = false,
|
||||||
/// IORING_ASYNC_CANCEL_OP
|
/// IORING_ASYNC_CANCEL_OP
|
||||||
/// Match request based on opcode
|
/// Match request based on opcode
|
||||||
OP: bool = false,
|
CANCEL_OP: bool = false,
|
||||||
_unused: u26 = 0,
|
_unused: u26 = 0,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -295,42 +295,6 @@ pub const Flags = struct {
|
||||||
_unused_1: u26 = 0,
|
_unused_1: u26 = 0,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// cqe.flags
|
|
||||||
pub const Cqe = packed struct(u32) {
|
|
||||||
/// IORING_CQE_F_BUFFER If set, the upper 16 bits are the buffer ID
|
|
||||||
F_BUFFER: bool = false,
|
|
||||||
/// IORING_CQE_F_MORE If set, parent SQE will generate more CQE entries
|
|
||||||
F_MORE: bool = false,
|
|
||||||
/// IORING_CQE_F_SOCK_NONEMPTY If set, more data to read after socket recv
|
|
||||||
F_SOCK_NONEMPTY: bool = false,
|
|
||||||
/// IORING_CQE_F_NOTIF Set for notification CQEs. Can be used to distinct
|
|
||||||
/// them from sends.
|
|
||||||
F_NOTIF: bool = false,
|
|
||||||
/// IORING_CQE_F_BUF_MORE If set, the buffer ID set in the completion will get
|
|
||||||
/// more completions. In other words, the buffer is being
|
|
||||||
/// partially consumed, and will be used by the kernel for
|
|
||||||
/// more completions. This is only set for buffers used via
|
|
||||||
/// the incremental buffer consumption, as provided by
|
|
||||||
/// a ring buffer setup with IOU_PBUF_RING_INC. For any
|
|
||||||
/// other provided buffer type, all completions with a
|
|
||||||
/// buffer passed back is automatically returned to the
|
|
||||||
/// application.
|
|
||||||
F_BUF_MORE: bool = false,
|
|
||||||
// COMMIT: new flags
|
|
||||||
/// IORING_CQE_F_SKIP If set, then the application/liburing must ignore this
|
|
||||||
/// CQE. It's only purpose is to fill a gap in the ring,
|
|
||||||
/// if a large CQE is attempted posted when the ring has
|
|
||||||
/// just a single small CQE worth of space left before
|
|
||||||
/// wrapping.
|
|
||||||
F_SKIP: bool = false,
|
|
||||||
_unused: u9 = 0,
|
|
||||||
/// IORING_CQE_F_32 If set, this is a 32b/big-cqe posting. Use with rings
|
|
||||||
/// setup in a mixed CQE mode, where both 16b and 32b
|
|
||||||
/// CQEs may be posted to the CQ ring.
|
|
||||||
F_32: bool = false,
|
|
||||||
_unused_1: u16 = 0,
|
|
||||||
};
|
|
||||||
|
|
||||||
/// sq_ring.flags
|
/// sq_ring.flags
|
||||||
pub const Sq = packed struct(u32) {
|
pub const Sq = packed struct(u32) {
|
||||||
/// needs io_uring_enter wakeup
|
/// needs io_uring_enter wakeup
|
||||||
|
|
@ -394,13 +358,47 @@ pub const io_uring_cqe = extern struct {
|
||||||
user_data: u64,
|
user_data: u64,
|
||||||
/// result code for this event
|
/// result code for this event
|
||||||
res: i32,
|
res: i32,
|
||||||
flags: Flags.Cqe,
|
flags: CqeFlags,
|
||||||
// COMMIT: add big_cqe which was missing in io_uring_cqe type declaration
|
// COMMIT: add big_cqe which was missing in io_uring_cqe type declaration
|
||||||
/// If the ring is initialized with IORING_SETUP_CQE32, then this field
|
/// If the ring is initialized with IORING_SETUP_CQE32, then this field
|
||||||
/// contains 16-bytes of padding, doubling the size of the CQE.
|
/// contains 16-bytes of padding, doubling the size of the CQE.
|
||||||
big_cqe: []u64,
|
big_cqe: []u64,
|
||||||
|
|
||||||
// Followed by 16 bytes of padding if initialized with IORING_SETUP_CQE32, doubling cqe size
|
/// cqe.flags
|
||||||
|
pub const CqeFlags = packed struct(u32) {
|
||||||
|
/// IORING_CQE_F_BUFFER If set, the upper 16 bits are the buffer ID
|
||||||
|
F_BUFFER: bool = false,
|
||||||
|
/// IORING_CQE_F_MORE If set, parent SQE will generate more CQE entries
|
||||||
|
F_MORE: bool = false,
|
||||||
|
/// IORING_CQE_F_SOCK_NONEMPTY If set, more data to read after socket recv
|
||||||
|
F_SOCK_NONEMPTY: bool = false,
|
||||||
|
/// IORING_CQE_F_NOTIF Set for notification CQEs. Can be used to distinct
|
||||||
|
/// them from sends.
|
||||||
|
F_NOTIF: bool = false,
|
||||||
|
/// IORING_CQE_F_BUF_MORE If set, the buffer ID set in the completion will get
|
||||||
|
/// more completions. In other words, the buffer is being
|
||||||
|
/// partially consumed, and will be used by the kernel for
|
||||||
|
/// more completions. This is only set for buffers used via
|
||||||
|
/// the incremental buffer consumption, as provided by
|
||||||
|
/// a ring buffer setup with IOU_PBUF_RING_INC. For any
|
||||||
|
/// other provided buffer type, all completions with a
|
||||||
|
/// buffer passed back is automatically returned to the
|
||||||
|
/// application.
|
||||||
|
F_BUF_MORE: bool = false,
|
||||||
|
// COMMIT: new flags
|
||||||
|
/// IORING_CQE_F_SKIP If set, then the application/liburing must ignore this
|
||||||
|
/// CQE. It's only purpose is to fill a gap in the ring,
|
||||||
|
/// if a large CQE is attempted posted when the ring has
|
||||||
|
/// just a single small CQE worth of space left before
|
||||||
|
/// wrapping.
|
||||||
|
F_SKIP: bool = false,
|
||||||
|
_unused: u9 = 0,
|
||||||
|
/// IORING_CQE_F_32 If set, this is a 32b/big-cqe posting. Use with rings
|
||||||
|
/// setup in a mixed CQE mode, where both 16b and 32b
|
||||||
|
/// CQEs may be posted to the CQ ring.
|
||||||
|
F_32: bool = false,
|
||||||
|
_unused_1: u16 = 0,
|
||||||
|
};
|
||||||
|
|
||||||
pub fn err(self: io_uring_cqe) linux.E {
|
pub fn err(self: io_uring_cqe) linux.E {
|
||||||
if (self.res > -4096 and self.res < 0) {
|
if (self.res > -4096 and self.res < 0) {
|
||||||
|
|
@ -416,7 +414,7 @@ pub const io_uring_cqe = extern struct {
|
||||||
if (!self.flags.F_BUFFER) {
|
if (!self.flags.F_BUFFER) {
|
||||||
return error.NoBufferSelected;
|
return error.NoBufferSelected;
|
||||||
}
|
}
|
||||||
return @as(u16, @intCast(@as(u32, @bitCast(self.flags)) >> Constants.CQE_BUFFER_SHIFT));
|
return @intCast(@as(u32, @bitCast(self.flags)) >> constants.CQE_BUFFER_SHIFT);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -427,7 +425,10 @@ pub const io_uring_sqe = extern struct {
|
||||||
/// IOSQE_* flags
|
/// IOSQE_* flags
|
||||||
flags: SqeFlags,
|
flags: SqeFlags,
|
||||||
/// ioprio for the request
|
/// ioprio for the request
|
||||||
ioprio: u16,
|
ioprio: packed union {
|
||||||
|
send_recv: Flags.SendRecv,
|
||||||
|
accept: Flags.Accept,
|
||||||
|
},
|
||||||
/// file descriptor to do IO on
|
/// file descriptor to do IO on
|
||||||
fd: i32,
|
fd: i32,
|
||||||
/// offset into file
|
/// offset into file
|
||||||
|
|
@ -474,8 +475,8 @@ pub const io_uring_sqe = extern struct {
|
||||||
pub fn prep_nop(sqe: *io_uring_sqe) void {
|
pub fn prep_nop(sqe: *io_uring_sqe) void {
|
||||||
sqe.* = .{
|
sqe.* = .{
|
||||||
.opcode = .NOP,
|
.opcode = .NOP,
|
||||||
.flags = @bitCast(0),
|
.flags = .{},
|
||||||
.ioprio = 0,
|
.ioprio = @bitCast(@as(u16, 0)),
|
||||||
.fd = 0,
|
.fd = 0,
|
||||||
.off = 0,
|
.off = 0,
|
||||||
.addr = 0,
|
.addr = 0,
|
||||||
|
|
@ -490,16 +491,16 @@ pub const io_uring_sqe = extern struct {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn prep_fsync(sqe: *io_uring_sqe, fd: linux.fd_t, flags: u32) void {
|
pub fn prep_fsync(sqe: *io_uring_sqe, fd: linux.fd_t, flags: Flags.Fsync) void {
|
||||||
sqe.* = .{
|
sqe.* = .{
|
||||||
.opcode = .FSYNC,
|
.opcode = .FSYNC,
|
||||||
.flags = 0,
|
.flags = .{},
|
||||||
.ioprio = 0,
|
.ioprio = @bitCast(@as(u16, 0)),
|
||||||
.fd = fd,
|
.fd = fd,
|
||||||
.off = 0,
|
.off = 0,
|
||||||
.addr = 0,
|
.addr = 0,
|
||||||
.len = 0,
|
.len = 0,
|
||||||
.rw_flags = flags,
|
.rw_flags = @bitCast(flags),
|
||||||
.user_data = 0,
|
.user_data = 0,
|
||||||
.buf_index = 0,
|
.buf_index = 0,
|
||||||
.personality = 0,
|
.personality = 0,
|
||||||
|
|
@ -519,8 +520,8 @@ pub const io_uring_sqe = extern struct {
|
||||||
) void {
|
) void {
|
||||||
sqe.* = .{
|
sqe.* = .{
|
||||||
.opcode = op,
|
.opcode = op,
|
||||||
.flags = @bitCast(0),
|
.flags = .{},
|
||||||
.ioprio = 0,
|
.ioprio = @bitCast(@as(u16, 0)),
|
||||||
.fd = fd,
|
.fd = fd,
|
||||||
.off = offset,
|
.off = offset,
|
||||||
.addr = addr,
|
.addr = addr,
|
||||||
|
|
@ -582,7 +583,7 @@ pub const io_uring_sqe = extern struct {
|
||||||
fd: linux.fd_t,
|
fd: linux.fd_t,
|
||||||
addr: ?*linux.sockaddr,
|
addr: ?*linux.sockaddr,
|
||||||
addrlen: ?*linux.socklen_t,
|
addrlen: ?*linux.socklen_t,
|
||||||
flags: u32,
|
flags: linux.SOCK,
|
||||||
) void {
|
) void {
|
||||||
// `addr` holds a pointer to `sockaddr`, and `addr2` holds a pointer to socklen_t`.
|
// `addr` holds a pointer to `sockaddr`, and `addr2` holds a pointer to socklen_t`.
|
||||||
// `addr2` maps to `sqe.off` (u64) instead of `sqe.len` (which is only a u32).
|
// `addr2` maps to `sqe.off` (u64) instead of `sqe.len` (which is only a u32).
|
||||||
|
|
@ -590,32 +591,45 @@ pub const io_uring_sqe = extern struct {
|
||||||
sqe.rw_flags = flags;
|
sqe.rw_flags = flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// accept directly into the fixed file table
|
||||||
pub fn prep_accept_direct(
|
pub fn prep_accept_direct(
|
||||||
sqe: *io_uring_sqe,
|
sqe: *io_uring_sqe,
|
||||||
fd: linux.fd_t,
|
fd: linux.fd_t,
|
||||||
addr: ?*linux.sockaddr,
|
addr: ?*linux.sockaddr,
|
||||||
addrlen: ?*linux.socklen_t,
|
addrlen: ?*linux.socklen_t,
|
||||||
flags: u32,
|
flags: linux.SOCK,
|
||||||
file_index: u32,
|
file_index: u32,
|
||||||
) void {
|
) void {
|
||||||
prep_accept(sqe, fd, addr, addrlen, flags);
|
prep_accept(sqe, fd, addr, addrlen, flags);
|
||||||
set_target_fixed_file(sqe, file_index);
|
set_target_fixed_file(sqe, file_index);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn prep_multishot_accept(
|
||||||
|
sqe: *io_uring_sqe,
|
||||||
|
fd: linux.fd_t,
|
||||||
|
addr: ?*linux.sockaddr,
|
||||||
|
addrlen: ?*linux.socklen_t,
|
||||||
|
flags: linux.SOCK,
|
||||||
|
) void {
|
||||||
|
prep_accept(sqe, fd, addr, addrlen, flags);
|
||||||
|
sqe.ioprio = .{ .accept = .{ .MULTISHOT = true } };
|
||||||
|
}
|
||||||
|
|
||||||
|
/// multishot accept directly into the fixed file table
|
||||||
pub fn prep_multishot_accept_direct(
|
pub fn prep_multishot_accept_direct(
|
||||||
sqe: *io_uring_sqe,
|
sqe: *io_uring_sqe,
|
||||||
fd: linux.fd_t,
|
fd: linux.fd_t,
|
||||||
addr: ?*linux.sockaddr,
|
addr: ?*linux.sockaddr,
|
||||||
addrlen: ?*linux.socklen_t,
|
addrlen: ?*linux.socklen_t,
|
||||||
flags: u32,
|
flags: linux.SOCK,
|
||||||
) void {
|
) void {
|
||||||
prep_multishot_accept(sqe, fd, addr, addrlen, flags);
|
prep_multishot_accept(sqe, fd, addr, addrlen, flags);
|
||||||
set_target_fixed_file(sqe, linux.IORING_FILE_INDEX_ALLOC);
|
set_target_fixed_file(sqe, constants.FILE_INDEX_ALLOC);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn set_target_fixed_file(sqe: *io_uring_sqe, file_index: u32) void {
|
fn set_target_fixed_file(sqe: *io_uring_sqe, file_index: u32) void {
|
||||||
const sqe_file_index: u32 = if (file_index == Constants.FILE_INDEX_ALLOC)
|
const sqe_file_index: u32 = if (file_index == constants.FILE_INDEX_ALLOC)
|
||||||
Constants.FILE_INDEX_ALLOC
|
constants.FILE_INDEX_ALLOC
|
||||||
else
|
else
|
||||||
// 0 means no fixed files, indexes should be encoded as "index + 1"
|
// 0 means no fixed files, indexes should be encoded as "index + 1"
|
||||||
file_index + 1;
|
file_index + 1;
|
||||||
|
|
@ -645,81 +659,84 @@ pub const io_uring_sqe = extern struct {
|
||||||
sqe.prep_rw(.EPOLL_CTL, epfd, @intFromPtr(ev), op, @intCast(fd));
|
sqe.prep_rw(.EPOLL_CTL, epfd, @intFromPtr(ev), op, @intCast(fd));
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn prep_recv(sqe: *io_uring_sqe, fd: linux.fd_t, buffer: []u8, flags: Flags.SendRecv) void {
|
pub fn prep_recv(sqe: *io_uring_sqe, fd: linux.fd_t, buffer: []u8, flags: linux.MSG) void {
|
||||||
sqe.prep_rw(.RECV, fd, @intFromPtr(buffer.ptr), buffer.len, 0);
|
sqe.prep_rw(.RECV, fd, @intFromPtr(buffer.ptr), buffer.len, 0);
|
||||||
sqe.rw_flags = @bitCast(flags);
|
sqe.rw_flags = flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: review recv `flags`
|
||||||
pub fn prep_recv_multishot(
|
pub fn prep_recv_multishot(
|
||||||
sqe: *io_uring_sqe,
|
sqe: *io_uring_sqe,
|
||||||
fd: linux.fd_t,
|
fd: linux.fd_t,
|
||||||
buffer: []u8,
|
buffer: []u8,
|
||||||
flags: Flags.SendRecv,
|
flags: linux.MSG,
|
||||||
) void {
|
) void {
|
||||||
sqe.prep_recv(fd, buffer, flags);
|
sqe.prep_recv(fd, buffer, flags);
|
||||||
const enable_multishot: Flags.SendRecv = .{ .RECV_MULTISHOT = true };
|
sqe.ioprio = .{ .send_recv = .{ .RECV_MULTISHOT = true } };
|
||||||
sqe.ioprio |= @bitCast(enable_multishot);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn prep_recvmsg(
|
pub fn prep_recvmsg(
|
||||||
sqe: *io_uring_sqe,
|
sqe: *io_uring_sqe,
|
||||||
fd: linux.fd_t,
|
fd: linux.fd_t,
|
||||||
msg: *linux.msghdr,
|
msg: *linux.msghdr,
|
||||||
flags: Flags.SendRecv,
|
flags: linux.MSG,
|
||||||
) void {
|
) void {
|
||||||
sqe.prep_rw(.RECVMSG, fd, @intFromPtr(msg), 1, 0);
|
sqe.prep_rw(.RECVMSG, fd, @intFromPtr(msg), 1, 0);
|
||||||
sqe.rw_flags = @bitCast(flags);
|
sqe.rw_flags = flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn prep_recvmsg_multishot(
|
pub fn prep_recvmsg_multishot(
|
||||||
sqe: *io_uring_sqe,
|
sqe: *io_uring_sqe,
|
||||||
fd: linux.fd_t,
|
fd: linux.fd_t,
|
||||||
msg: *linux.msghdr,
|
msg: *linux.msghdr,
|
||||||
flags: Flags.SendRecv,
|
flags: linux.MSG,
|
||||||
) void {
|
) void {
|
||||||
sqe.prep_recvmsg(fd, msg, flags);
|
sqe.prep_recvmsg(fd, msg, flags);
|
||||||
const enable_multishot: Flags.SendRecv = .{ .RECV_MULTISHOT = true };
|
sqe.ioprio = .{ .send_recv = .{ .RECV_MULTISHOT = true } };
|
||||||
sqe.ioprio |= @bitCast(enable_multishot);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// COMMIT: fix send[|recv] flag param type
|
// COMMIT: fix send[|recv] flag param type
|
||||||
pub fn prep_send(sqe: *io_uring_sqe, fd: linux.fd_t, buffer: []const u8, flags: Flags.SendRecv) void {
|
pub fn prep_send(sqe: *io_uring_sqe, fd: linux.fd_t, buffer: []const u8, flags: linux.MSG) void {
|
||||||
sqe.prep_rw(.SEND, fd, @intFromPtr(buffer.ptr), buffer.len, 0);
|
sqe.prep_rw(.SEND, fd, @intFromPtr(buffer.ptr), buffer.len, 0);
|
||||||
sqe.rw_flags = @bitCast(flags);
|
sqe.rw_flags = flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn prep_send_zc(sqe: *io_uring_sqe, fd: linux.fd_t, buffer: []const u8, flags: u32, zc_flags: u16) void {
|
pub fn prep_send_zc(sqe: *io_uring_sqe, fd: linux.fd_t, buffer: []const u8, flags: linux.MSG, zc_flags: Flags.SendRecv) void {
|
||||||
sqe.prep_rw(.SEND_ZC, fd, @intFromPtr(buffer.ptr), buffer.len, 0);
|
sqe.prep_rw(.SEND_ZC, fd, @intFromPtr(buffer.ptr), buffer.len, 0);
|
||||||
sqe.rw_flags = flags;
|
sqe.rw_flags = flags;
|
||||||
sqe.ioprio = zc_flags;
|
sqe.ioprio = .{ .send_recv = zc_flags };
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn prep_send_zc_fixed(sqe: *io_uring_sqe, fd: linux.fd_t, buffer: []const u8, flags: u32, zc_flags: u16, buf_index: u16) void {
|
pub fn prep_send_zc_fixed(sqe: *io_uring_sqe, fd: linux.fd_t, buffer: []const u8, flags: linux.MSG, zc_flags: Flags.SendRecv, buf_index: u16) void {
|
||||||
prep_send_zc(sqe, fd, buffer, flags, zc_flags);
|
const zc_flags_fixed = blk: {
|
||||||
sqe.ioprio |= linux.IORING_RECVSEND_FIXED_BUF;
|
var updated_flags = zc_flags;
|
||||||
|
updated_flags.RECVSEND_FIXED_BUF = true;
|
||||||
|
break :blk updated_flags;
|
||||||
|
};
|
||||||
|
prep_send_zc(sqe, fd, buffer, flags, zc_flags_fixed);
|
||||||
sqe.buf_index = buf_index;
|
sqe.buf_index = buf_index;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn prep_sendmsg_zc(
|
|
||||||
sqe: *io_uring_sqe,
|
|
||||||
fd: linux.fd_t,
|
|
||||||
msg: *const linux.msghdr_const,
|
|
||||||
flags: u32,
|
|
||||||
) void {
|
|
||||||
prep_sendmsg(sqe, fd, msg, flags);
|
|
||||||
sqe.opcode = .SENDMSG_ZC;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn prep_sendmsg(
|
pub fn prep_sendmsg(
|
||||||
sqe: *io_uring_sqe,
|
sqe: *io_uring_sqe,
|
||||||
fd: linux.fd_t,
|
fd: linux.fd_t,
|
||||||
msg: *const linux.msghdr_const,
|
msg: *const linux.msghdr_const,
|
||||||
flags: u32,
|
flags: linux.MSG,
|
||||||
) void {
|
) void {
|
||||||
sqe.prep_rw(.SENDMSG, fd, @intFromPtr(msg), 1, 0);
|
sqe.prep_rw(.SENDMSG, fd, @intFromPtr(msg), 1, 0);
|
||||||
sqe.rw_flags = flags;
|
sqe.rw_flags = flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn prep_sendmsg_zc(
|
||||||
|
sqe: *io_uring_sqe,
|
||||||
|
fd: linux.fd_t,
|
||||||
|
msg: *const linux.msghdr_const,
|
||||||
|
flags: linux.MSG,
|
||||||
|
) void {
|
||||||
|
prep_sendmsg(sqe, fd, msg, flags);
|
||||||
|
sqe.opcode = .SENDMSG_ZC;
|
||||||
|
}
|
||||||
|
|
||||||
pub fn prep_openat(
|
pub fn prep_openat(
|
||||||
sqe: *io_uring_sqe,
|
sqe: *io_uring_sqe,
|
||||||
fd: linux.fd_t,
|
fd: linux.fd_t,
|
||||||
|
|
@ -747,7 +764,7 @@ pub const io_uring_sqe = extern struct {
|
||||||
sqe.* = .{
|
sqe.* = .{
|
||||||
.opcode = .CLOSE,
|
.opcode = .CLOSE,
|
||||||
.flags = .{},
|
.flags = .{},
|
||||||
.ioprio = 0,
|
.ioprio = @bitCast(@as(u16, 0)),
|
||||||
.fd = fd,
|
.fd = fd,
|
||||||
.off = 0,
|
.off = 0,
|
||||||
.addr = 0,
|
.addr = 0,
|
||||||
|
|
@ -771,22 +788,22 @@ pub const io_uring_sqe = extern struct {
|
||||||
sqe: *io_uring_sqe,
|
sqe: *io_uring_sqe,
|
||||||
ts: *const linux.kernel_timespec,
|
ts: *const linux.kernel_timespec,
|
||||||
count: u32,
|
count: u32,
|
||||||
flags: u32,
|
flags: Flags.Timeout,
|
||||||
) void {
|
) void {
|
||||||
sqe.prep_rw(.TIMEOUT, -1, @intFromPtr(ts), 1, count);
|
sqe.prep_rw(.TIMEOUT, -1, @intFromPtr(ts), 1, count);
|
||||||
sqe.rw_flags = flags;
|
sqe.rw_flags = @bitCast(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn prep_timeout_remove(sqe: *io_uring_sqe, timeout_user_data: u64, flags: u32) void {
|
pub fn prep_timeout_remove(sqe: *io_uring_sqe, timeout_user_data: u64, flags: Flags.Timeout) void {
|
||||||
sqe.* = .{
|
sqe.* = .{
|
||||||
.opcode = .TIMEOUT_REMOVE,
|
.opcode = .TIMEOUT_REMOVE,
|
||||||
.flags = .{},
|
.flags = .{},
|
||||||
.ioprio = 0,
|
.ioprio = @bitCast(@as(u16, 0)),
|
||||||
.fd = -1,
|
.fd = -1,
|
||||||
.off = 0,
|
.off = 0,
|
||||||
.addr = timeout_user_data,
|
.addr = timeout_user_data,
|
||||||
.len = 0,
|
.len = 0,
|
||||||
.rw_flags = flags,
|
.rw_flags = @bitCast(flags),
|
||||||
.user_data = 0,
|
.user_data = 0,
|
||||||
.buf_index = 0,
|
.buf_index = 0,
|
||||||
.personality = 0,
|
.personality = 0,
|
||||||
|
|
@ -799,7 +816,7 @@ pub const io_uring_sqe = extern struct {
|
||||||
pub fn prep_link_timeout(
|
pub fn prep_link_timeout(
|
||||||
sqe: *io_uring_sqe,
|
sqe: *io_uring_sqe,
|
||||||
ts: *const linux.kernel_timespec,
|
ts: *const linux.kernel_timespec,
|
||||||
flags: u32,
|
flags: Flags.Timeout,
|
||||||
) void {
|
) void {
|
||||||
sqe.prep_rw(.LINK_TIMEOUT, -1, @intFromPtr(ts), 1, 0);
|
sqe.prep_rw(.LINK_TIMEOUT, -1, @intFromPtr(ts), 1, 0);
|
||||||
sqe.rw_flags = flags;
|
sqe.rw_flags = flags;
|
||||||
|
|
@ -808,7 +825,7 @@ pub const io_uring_sqe = extern struct {
|
||||||
pub fn prep_poll_add(
|
pub fn prep_poll_add(
|
||||||
sqe: *io_uring_sqe,
|
sqe: *io_uring_sqe,
|
||||||
fd: linux.fd_t,
|
fd: linux.fd_t,
|
||||||
poll_mask: u32,
|
poll_mask: linux.POLL,
|
||||||
) void {
|
) void {
|
||||||
sqe.prep_rw(.POLL_ADD, fd, @intFromPtr(@as(?*anyopaque, null)), 0, 0);
|
sqe.prep_rw(.POLL_ADD, fd, @intFromPtr(@as(?*anyopaque, null)), 0, 0);
|
||||||
// Poll masks previously used to comprise of 16 bits in the flags union of
|
// Poll masks previously used to comprise of 16 bits in the flags union of
|
||||||
|
|
@ -831,8 +848,8 @@ pub const io_uring_sqe = extern struct {
|
||||||
sqe: *io_uring_sqe,
|
sqe: *io_uring_sqe,
|
||||||
old_user_data: u64,
|
old_user_data: u64,
|
||||||
new_user_data: u64,
|
new_user_data: u64,
|
||||||
poll_mask: u32,
|
poll_mask: linux.POLL,
|
||||||
flags: u32,
|
flags: Flags.Poll,
|
||||||
) void {
|
) void {
|
||||||
sqe.prep_rw(.POLL_REMOVE, -1, old_user_data, flags, new_user_data);
|
sqe.prep_rw(.POLL_REMOVE, -1, old_user_data, flags, new_user_data);
|
||||||
// Poll masks previously used to comprise of 16 bits in the flags union of
|
// Poll masks previously used to comprise of 16 bits in the flags union of
|
||||||
|
|
@ -854,7 +871,7 @@ pub const io_uring_sqe = extern struct {
|
||||||
sqe.* = .{
|
sqe.* = .{
|
||||||
.opcode = .FALLOCATE,
|
.opcode = .FALLOCATE,
|
||||||
.flags = .{},
|
.flags = .{},
|
||||||
.ioprio = 0,
|
.ioprio = @bitCast(@as(u16, 0)),
|
||||||
.fd = fd,
|
.fd = fd,
|
||||||
.off = offset,
|
.off = offset,
|
||||||
.addr = len,
|
.addr = len,
|
||||||
|
|
@ -873,8 +890,8 @@ pub const io_uring_sqe = extern struct {
|
||||||
sqe: *io_uring_sqe,
|
sqe: *io_uring_sqe,
|
||||||
fd: linux.fd_t,
|
fd: linux.fd_t,
|
||||||
path: [*:0]const u8,
|
path: [*:0]const u8,
|
||||||
flags: u32,
|
flags: linux.AT,
|
||||||
mask: u32,
|
mask: linux.STATX, // TODO: compose linux.STATX
|
||||||
buf: *linux.Statx,
|
buf: *linux.Statx,
|
||||||
) void {
|
) void {
|
||||||
sqe.prep_rw(.STATX, fd, @intFromPtr(path), mask, @intFromPtr(buf));
|
sqe.prep_rw(.STATX, fd, @intFromPtr(path), mask, @intFromPtr(buf));
|
||||||
|
|
@ -884,25 +901,30 @@ pub const io_uring_sqe = extern struct {
|
||||||
pub fn prep_cancel(
|
pub fn prep_cancel(
|
||||||
sqe: *io_uring_sqe,
|
sqe: *io_uring_sqe,
|
||||||
cancel_user_data: u64,
|
cancel_user_data: u64,
|
||||||
flags: u32,
|
flags: Flags.AsyncCancel,
|
||||||
) void {
|
) void {
|
||||||
sqe.prep_rw(.ASYNC_CANCEL, -1, cancel_user_data, 0, 0);
|
sqe.prep_rw(.ASYNC_CANCEL, -1, cancel_user_data, 0, 0);
|
||||||
sqe.rw_flags = flags;
|
sqe.rw_flags = @bitCast(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn prep_cancel_fd(
|
pub fn prep_cancel_fd(
|
||||||
sqe: *io_uring_sqe,
|
sqe: *io_uring_sqe,
|
||||||
fd: linux.fd_t,
|
fd: linux.fd_t,
|
||||||
flags: u32,
|
flags: Flags.AsyncCancel,
|
||||||
) void {
|
) void {
|
||||||
sqe.prep_rw(.ASYNC_CANCEL, fd, 0, 0, 0);
|
sqe.prep_rw(.ASYNC_CANCEL, fd, 0, 0, 0);
|
||||||
sqe.rw_flags = flags | linux.IORING_ASYNC_CANCEL_FD;
|
const enable_cancel_fd = blk: {
|
||||||
|
var update_flags = flags;
|
||||||
|
update_flags.CANCEL_FD = true;
|
||||||
|
break :blk update_flags;
|
||||||
|
};
|
||||||
|
sqe.rw_flags = @bitCast(enable_cancel_fd);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn prep_shutdown(
|
pub fn prep_shutdown(
|
||||||
sqe: *io_uring_sqe,
|
sqe: *io_uring_sqe,
|
||||||
sockfd: linux.socket_t,
|
sockfd: linux.socket_t,
|
||||||
how: u32,
|
how: linux.SHUT,
|
||||||
) void {
|
) void {
|
||||||
sqe.prep_rw(.SHUTDOWN, sockfd, 0, how, 0);
|
sqe.prep_rw(.SHUTDOWN, sockfd, 0, how, 0);
|
||||||
}
|
}
|
||||||
|
|
@ -913,7 +935,7 @@ pub const io_uring_sqe = extern struct {
|
||||||
old_path: [*:0]const u8,
|
old_path: [*:0]const u8,
|
||||||
new_dir_fd: linux.fd_t,
|
new_dir_fd: linux.fd_t,
|
||||||
new_path: [*:0]const u8,
|
new_path: [*:0]const u8,
|
||||||
flags: u32,
|
flags: linux.RENAME,
|
||||||
) void {
|
) void {
|
||||||
sqe.prep_rw(
|
sqe.prep_rw(
|
||||||
.RENAMEAT,
|
.RENAMEAT,
|
||||||
|
|
@ -930,7 +952,7 @@ pub const io_uring_sqe = extern struct {
|
||||||
sqe: *io_uring_sqe,
|
sqe: *io_uring_sqe,
|
||||||
dir_fd: linux.fd_t,
|
dir_fd: linux.fd_t,
|
||||||
path: [*:0]const u8,
|
path: [*:0]const u8,
|
||||||
flags: u32,
|
flags: linux.AT, // TODO: unlink flags only AT_REMOVEDIR
|
||||||
) void {
|
) void {
|
||||||
sqe.prep_rw(.UNLINKAT, dir_fd, @intFromPtr(path), 0, 0);
|
sqe.prep_rw(.UNLINKAT, dir_fd, @intFromPtr(path), 0, 0);
|
||||||
sqe.rw_flags = flags;
|
sqe.rw_flags = flags;
|
||||||
|
|
@ -966,7 +988,7 @@ pub const io_uring_sqe = extern struct {
|
||||||
old_path: [*:0]const u8,
|
old_path: [*:0]const u8,
|
||||||
new_dir_fd: linux.fd_t,
|
new_dir_fd: linux.fd_t,
|
||||||
new_path: [*:0]const u8,
|
new_path: [*:0]const u8,
|
||||||
flags: u32,
|
flags: linux.AT, // only AT_EMPTY_PATH, AT_SYMLINK_FOLLOW
|
||||||
) void {
|
) void {
|
||||||
sqe.prep_rw(
|
sqe.prep_rw(
|
||||||
.LINKAT,
|
.LINKAT,
|
||||||
|
|
@ -991,9 +1013,10 @@ pub const io_uring_sqe = extern struct {
|
||||||
sqe: *io_uring_sqe,
|
sqe: *io_uring_sqe,
|
||||||
fds: []linux.fd_t,
|
fds: []linux.fd_t,
|
||||||
) void {
|
) void {
|
||||||
sqe.prep_rw(.FILES_UPDATE, -1, @intFromPtr(fds.ptr), fds.len, linux.IORING_FILE_INDEX_ALLOC);
|
sqe.prep_rw(.FILES_UPDATE, -1, @intFromPtr(fds.ptr), fds.len, constants.FILE_INDEX_ALLOC);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: why can't slice be used here ?
|
||||||
pub fn prep_provide_buffers(
|
pub fn prep_provide_buffers(
|
||||||
sqe: *io_uring_sqe,
|
sqe: *io_uring_sqe,
|
||||||
buffers: [*]u8,
|
buffers: [*]u8,
|
||||||
|
|
@ -1016,23 +1039,12 @@ pub const io_uring_sqe = extern struct {
|
||||||
sqe.buf_index = @intCast(group_id);
|
sqe.buf_index = @intCast(group_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn prep_multishot_accept(
|
|
||||||
sqe: *io_uring_sqe,
|
|
||||||
fd: linux.fd_t,
|
|
||||||
addr: ?*linux.sockaddr,
|
|
||||||
addrlen: ?*linux.socklen_t,
|
|
||||||
flags: u32,
|
|
||||||
) void {
|
|
||||||
prep_accept(sqe, fd, addr, addrlen, flags);
|
|
||||||
sqe.ioprio |= linux.IORING_ACCEPT_MULTISHOT;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn prep_socket(
|
pub fn prep_socket(
|
||||||
sqe: *io_uring_sqe,
|
sqe: *io_uring_sqe,
|
||||||
domain: u32,
|
domain: linux.AF,
|
||||||
socket_type: u32,
|
socket_type: linux.SOCK,
|
||||||
protocol: u32,
|
protocol: u32, // Enumerate https://github.com/kraj/musl/blob/kraj/master/src/network/proto.c#L7
|
||||||
flags: u32,
|
flags: u32, // flags is unused
|
||||||
) void {
|
) void {
|
||||||
sqe.prep_rw(.SOCKET, @intCast(domain), 0, protocol, socket_type);
|
sqe.prep_rw(.SOCKET, @intCast(domain), 0, protocol, socket_type);
|
||||||
sqe.rw_flags = flags;
|
sqe.rw_flags = flags;
|
||||||
|
|
@ -1040,10 +1052,10 @@ pub const io_uring_sqe = extern struct {
|
||||||
|
|
||||||
pub fn prep_socket_direct(
|
pub fn prep_socket_direct(
|
||||||
sqe: *io_uring_sqe,
|
sqe: *io_uring_sqe,
|
||||||
domain: u32,
|
domain: linux.AF,
|
||||||
socket_type: u32,
|
socket_type: linux.SOCK,
|
||||||
protocol: u32,
|
protocol: u32, // Enumerate https://github.com/kraj/musl/blob/kraj/master/src/network/proto.c#L7
|
||||||
flags: u32,
|
flags: u32, // flags is unused
|
||||||
file_index: u32,
|
file_index: u32,
|
||||||
) void {
|
) void {
|
||||||
prep_socket(sqe, domain, socket_type, protocol, flags);
|
prep_socket(sqe, domain, socket_type, protocol, flags);
|
||||||
|
|
@ -1052,13 +1064,13 @@ pub const io_uring_sqe = extern struct {
|
||||||
|
|
||||||
pub fn prep_socket_direct_alloc(
|
pub fn prep_socket_direct_alloc(
|
||||||
sqe: *io_uring_sqe,
|
sqe: *io_uring_sqe,
|
||||||
domain: u32,
|
domain: linux.AF,
|
||||||
socket_type: u32,
|
socket_type: linux.SOCK,
|
||||||
protocol: u32,
|
protocol: u32, // Enumerate https://github.com/kraj/musl/blob/kraj/master/src/network/proto.c#L7
|
||||||
flags: u32,
|
flags: u32, // flags is unused
|
||||||
) void {
|
) void {
|
||||||
prep_socket(sqe, domain, socket_type, protocol, flags);
|
prep_socket(sqe, domain, socket_type, protocol, flags);
|
||||||
set_target_fixed_file(sqe, linux.IORING_FILE_INDEX_ALLOC);
|
set_target_fixed_file(sqe, constants.FILE_INDEX_ALLOC);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn prep_waitid(
|
pub fn prep_waitid(
|
||||||
|
|
@ -1066,20 +1078,21 @@ pub const io_uring_sqe = extern struct {
|
||||||
id_type: linux.P,
|
id_type: linux.P,
|
||||||
id: i32,
|
id: i32,
|
||||||
infop: *linux.siginfo_t,
|
infop: *linux.siginfo_t,
|
||||||
options: u32,
|
options: linux.W,
|
||||||
flags: u32,
|
flags: u32, // flags is unused
|
||||||
) void {
|
) void {
|
||||||
sqe.prep_rw(.WAITID, id, 0, @intFromEnum(id_type), @intFromPtr(infop));
|
sqe.prep_rw(.WAITID, id, 0, @intFromEnum(id_type), @intFromPtr(infop));
|
||||||
sqe.rw_flags = flags;
|
sqe.rw_flags = flags;
|
||||||
sqe.splice_fd_in = @bitCast(options);
|
sqe.splice_fd_in = @bitCast(options);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: maybe remove unused flag fields?
|
||||||
pub fn prep_bind(
|
pub fn prep_bind(
|
||||||
sqe: *io_uring_sqe,
|
sqe: *io_uring_sqe,
|
||||||
fd: linux.fd_t,
|
fd: linux.fd_t,
|
||||||
addr: *const linux.sockaddr,
|
addr: *const linux.sockaddr,
|
||||||
addrlen: linux.socklen_t,
|
addrlen: linux.socklen_t,
|
||||||
flags: u32,
|
flags: u32, // flags is unused and does't exist in io_uring's api
|
||||||
) void {
|
) void {
|
||||||
sqe.prep_rw(.BIND, fd, @intFromPtr(addr), 0, addrlen);
|
sqe.prep_rw(.BIND, fd, @intFromPtr(addr), 0, addrlen);
|
||||||
sqe.rw_flags = flags;
|
sqe.rw_flags = flags;
|
||||||
|
|
@ -1089,7 +1102,7 @@ pub const io_uring_sqe = extern struct {
|
||||||
sqe: *io_uring_sqe,
|
sqe: *io_uring_sqe,
|
||||||
fd: linux.fd_t,
|
fd: linux.fd_t,
|
||||||
backlog: usize,
|
backlog: usize,
|
||||||
flags: u32,
|
flags: u32, // flags is unused and does't exist in io_uring's api
|
||||||
) void {
|
) void {
|
||||||
sqe.prep_rw(.LISTEN, fd, 0, backlog, 0);
|
sqe.prep_rw(.LISTEN, fd, 0, backlog, 0);
|
||||||
sqe.rw_flags = flags;
|
sqe.rw_flags = flags;
|
||||||
|
|
@ -1097,10 +1110,10 @@ pub const io_uring_sqe = extern struct {
|
||||||
|
|
||||||
pub fn prep_cmd_sock(
|
pub fn prep_cmd_sock(
|
||||||
sqe: *io_uring_sqe,
|
sqe: *io_uring_sqe,
|
||||||
cmd_op: linux.IO_URING_SOCKET_OP,
|
cmd_op: SocketOp,
|
||||||
fd: linux.fd_t,
|
fd: linux.fd_t,
|
||||||
level: u32,
|
level: linux.SOL,
|
||||||
optname: u32,
|
optname: linux.SO,
|
||||||
optval: u64,
|
optval: u64,
|
||||||
optlen: u32,
|
optlen: u32,
|
||||||
) void {
|
) void {
|
||||||
|
|
@ -1314,7 +1327,7 @@ pub const io_uring_probe_op = extern struct {
|
||||||
resv2: u32,
|
resv2: u32,
|
||||||
|
|
||||||
pub fn is_supported(self: @This()) bool {
|
pub fn is_supported(self: @This()) bool {
|
||||||
return self.flags & Constants.IO_URING_OP_SUPPORTED != 0;
|
return self.flags & constants.IO_URING_OP_SUPPORTED != 0;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue