Replace EPOLL struct with an EpollOp enum and Epoll packed struct type

Prefer Using aliases over packed union field

Fix bug in HANDLE_FID declaration

Use EpollOp op type and Epoll packed struct flag type in IoUring

Signed-off-by: Bernard Assan <mega.alpha100@gmail.com>
This commit is contained in:
Bernard Assan 2025-10-09 23:26:15 +00:00
parent 9d460e7a78
commit 2584301978
No known key found for this signature in database
GPG key ID: C2A2C53574321095
2 changed files with 116 additions and 59 deletions

View file

@ -3481,17 +3481,11 @@ pub const STDERR_FILENO = 2;
pub const AT = At;
/// matches AT_* and AT_STATX_*
pub const At = packed struct(u32) {
_reserved: u8 = 0,
_u1: u8 = 0,
/// Do not follow symbolic links
symlink_nofollow: bool = false,
/// Remove directory instead of unlinking file
/// Or
/// File handle is needed to compare object identity and may not be usable
/// with open_by_handle_at(2)
removedir_or_handle_fid: packed union {
removedir: bool,
handle_fid: bool,
} = @bitCast(false),
removedir: bool = false,
/// Follow symbolic links.
symlink_follow: bool = false,
/// Suppress terminal automount traversal
@ -3504,7 +3498,11 @@ pub const At = packed struct(u32) {
statx_dont_sync: bool = false,
/// Apply to the entire subtree
recursive: bool = false,
_reserved_1: u16 = 0,
_17: u16 = 0,
/// File handle is needed to compare object identity and may not be usable
/// with open_by_handle_at(2)
pub const handle_fid: At = .{ .removedir = true };
/// Special value used to indicate openat should use the current working directory
pub const fdcwd = -100;
@ -3524,36 +3522,25 @@ pub const At = packed struct(u32) {
//
/// Special value used to indicate openat should use the current working directory
pub const FDCWD = fdcwd;
/// Do not follow symbolic links
pub const SYMLINK_NOFOLLOW: u32 = @bitCast(At{ .symlink_nofollow = true });
/// Remove directory instead of unlinking file
pub const REMOVEDIR: u32 = @bitCast(At{ .removedir_or_handle_fid = .{ .removedir = true } });
pub const HANDLE_FID = At{ .removedir_or_handle_fid = .{ .handle_fid = true } };
pub const REMOVEDIR: u32 = @bitCast(At{ .removedir = true });
pub const HANDLE_FID: u32 = @bitCast(handle_fid);
/// Follow symbolic links.
pub const SYMLINK_FOLLOW: u32 = @bitCast(At{ .symlink_follow = true });
/// Suppress terminal automount traversal
pub const NO_AUTOMOUNT: u32 = @bitCast(At{ .no_automount = true });
/// Allow empty relative pathname
pub const EMPTY_PATH: u32 = @bitCast(At{ .empty_path = true });
/// Type of synchronisation required from statx()
pub const STATX_SYNC_TYPE: u32 = @bitCast(statx_sync_type);
/// - Do whatever stat() does
pub const STATX_SYNC_AS_STAT: u32 = @bitCast(statx_sync_as_stat);
/// - Force the attributes to be sync'd with the server
pub const STATX_FORCE_SYNC: u32 = @bitCast(At{ .statx_force_sync = true });
/// - Don't sync attributes with the server
pub const STATX_DONT_SYNC: u32 = @bitCast(At{ .statx_dont_sync = true });
/// Apply to the entire subtree
pub const RECURSIVE: u32 = @bitCast(At{ .recursive = true });
};
@ -5668,28 +5655,96 @@ pub const SER = struct {
};
};
pub const EPOLL = struct {
/// Valid opcodes to issue to sys_epoll_ctl()
pub const EpollOp = enum(u32) {
ctl_add = 1,
ctl_del = 2,
ctl_mod = 3,
_,
// Deprecated Constants
pub const CTL_ADD: u32 = @intFromEnum(EpollOp.ctl_add);
pub const CTL_DEL: u32 = @intFromEnum(EpollOp.ctl_del);
pub const CTL_MOD: u32 = @intFromEnum(EpollOp.ctl_mod);
};
/// Deprecated alias for Epoll
pub const EPOLL = Epoll;
/// Epoll event masks
// https://github.com/torvalds/linux/blob/18a7e218cfcdca6666e1f7356533e4c988780b57/include/uapi/linux/eventpoll.h#L30
pub const Epoll = packed struct(u32) {
// EPOLL event types (lower 16 bits)
//
/// The associated file is available for read(2) operations
in: bool = false,
/// There is an exceptional condition on the file descriptor
pri: bool = false,
/// The associated file is available for write(2) operations
out: bool = false,
/// Error condition happened on the associated file descriptor
err: bool = false,
/// Hang up happened on the associated file descriptor
hup: bool = false,
/// Invalid request: fd not open
nval: bool = false,
/// Normal data may be read
rdnorm: bool = false,
/// Priority data may be read
rdband: bool = false,
// COMMIT: new flags
/// Writing is now possible (normal data)
wrnorm: bool = false,
/// Priority data may be written
wrband: bool = false,
/// Message available (unused on Linux)
msg: bool = false,
_12: u2 = 0,
/// Stream socket peer closed connection
rdhup: bool = false,
_15: u13 = 0,
// EPOLL input flags (Higher order flags are included as internal stat)
//
/// Internal flag - wakeup generated by io_uring, used to detect
/// recursion back into the io_uring poll handler
uring_wake: bool = true,
/// Set exclusive wakeup mode for the target file descriptor
exclusive: bool = true,
/// Request the handling of system wakeup events so as to prevent system
/// suspends from happening while those events are being processed.
/// Assuming neither EPOLLET nor EPOLLONESHOT is set, system suspends will
/// not be re-allowed until epoll_wait is called again after consuming the
/// wakeup event(s).
/// Requires CAP_BLOCK_SUSPEND
wakeup: bool = true,
/// Set the One Shot behaviour for the target file descriptor
oneshot: bool = true,
/// Set the Edge Triggered behaviour for the target file descriptor
et: bool = true,
// Deprecated Named constants
// EPOLL event types
pub const IN: u32 = @bitCast(Epoll{ .in = true });
pub const PRI: u32 = @bitCast(Epoll{ .pri = true });
pub const OUT: u32 = @bitCast(Epoll{ .out = true });
pub const ERR: u32 = @bitCast(Epoll{ .err = true });
pub const HUP: u32 = @bitCast(Epoll{ .hup = true });
pub const NVAL: u32 = @bitCast(Epoll{ .nval = true });
pub const RDNORM: u32 = @bitCast(Epoll{ .rdnorm = true });
pub const RDBAND: u32 = @bitCast(Epoll{ .rdband = true });
pub const WRNORM: u32 = @bitCast(Epoll{ .wrnorm = true });
pub const WRBAND: u32 = @bitCast(Epoll{ .wrband = true });
pub const MSG: u32 = @bitCast(Epoll{ .msg = true });
pub const RDHUP: u32 = @bitCast(Epoll{ .rdhup = true });
// EPOLL input flags
pub const URING_WAKE: u32 = @bitCast(Epoll{ .uring_wake = true });
pub const EXCLUSIVE: u32 = @bitCast(Epoll{ .exclusive = true });
pub const WAKEUP: u32 = @bitCast(Epoll{ .wakeup = true });
pub const ONESHOT: u32 = @bitCast(Epoll{ .oneshot = true });
pub const ET: u32 = @bitCast(Epoll{ .let = true });
/// Flags for epoll_create1
pub const CLOEXEC = 1 << @bitOffsetOf(O, "CLOEXEC");
pub const CTL_ADD = 1;
pub const CTL_DEL = 2;
pub const CTL_MOD = 3;
pub const IN = 0x001;
pub const PRI = 0x002;
pub const OUT = 0x004;
pub const RDNORM = 0x040;
pub const RDBAND = 0x080;
pub const WRNORM = if (is_mips) 0x004 else 0x100;
pub const WRBAND = if (is_mips) 0x100 else 0x200;
pub const MSG = 0x400;
pub const ERR = 0x008;
pub const HUP = 0x010;
pub const RDHUP = 0x2000;
pub const EXCLUSIVE = (@as(u32, 1) << 28);
pub const WAKEUP = (@as(u32, 1) << 29);
pub const ONESHOT = (@as(u32, 1) << 30);
pub const ET = (@as(u32, 1) << 31);
};
pub const CLOCK = clockid_t;

View file

@ -585,7 +585,7 @@ pub fn epoll_ctl(
user_data: u64,
epfd: linux.fd_t,
fd: linux.fd_t,
op: u32,
op: linux.EpollOp,
ev: ?*linux.epoll_event,
) !*Sqe {
const sqe = try self.get_sqe();
@ -871,7 +871,7 @@ pub fn poll_add(
self: *IoUring,
user_data: u64,
fd: linux.fd_t,
poll_mask: u32,
poll_mask: linux.Epoll,
) !*Sqe {
const sqe = try self.get_sqe();
sqe.prep_poll_add(fd, poll_mask);
@ -899,8 +899,8 @@ pub fn poll_update(
user_data: u64,
old_user_data: u64,
new_user_data: u64,
poll_mask: u32,
flags: u32, // TODO: what are the flags
poll_mask: linux.Epoll,
flags: uflags.Poll,
) !*Sqe {
const sqe = try self.get_sqe();
sqe.prep_poll_update(old_user_data, new_user_data, poll_mask, flags);
@ -1100,7 +1100,7 @@ pub fn waitid(
id: i32,
infop: *linux.siginfo_t,
options: linux.W,
flags: u32, // TODO: wait flags
flags: u32, // They are currently unused, and hence 0 should be passed
) !*Sqe {
const sqe = try self.get_sqe();
sqe.prep_waitid(id_type, id, infop, options, flags);
@ -1383,7 +1383,9 @@ pub fn bind(
fd: linux.fd_t,
addr: *const posix.sockaddr,
addrlen: posix.socklen_t,
flags: u32, // TODO: bind flags
// liburing doesn't have this flag, hence 0 should be passed
// TODO: consider removing this and all flags like this
flags: u32,
) !*Sqe {
const sqe = try self.get_sqe();
sqe.prep_bind(fd, addr, addrlen, flags);
@ -1399,7 +1401,9 @@ pub fn listen(
user_data: u64,
fd: linux.fd_t,
backlog: usize,
flags: u32, // TODO: listen flags
// liburing doesn't have this flag, hence 0 should be passed
// TODO: consider removing this and all flags like this
flags: u32,
) !*Sqe {
const sqe = try self.get_sqe();
sqe.prep_listen(fd, backlog, flags);
@ -1599,7 +1603,6 @@ pub const Cqe = extern struct {
/// result code for this event
res: i32,
flags: Flags,
// COMMIT: add big_cqe which was missing in io_uring_cqe type declaration
// TODO: add support for the IORING_SETUP_CQE32 case
/// If the ring is initialized with IORING_SETUP_CQE32, then this field
/// contains 16-bytes of padding, doubling the size of the CQE.
@ -1944,10 +1947,10 @@ pub const Sqe = extern struct {
sqe: *Sqe,
epfd: linux.fd_t,
fd: linux.fd_t,
op: u32, // TODO: what is the type of OP
op: linux.EpollOp,
ev: ?*linux.epoll_event,
) void {
sqe.prep_rw(.EPOLL_CTL, epfd, @intFromPtr(ev), op, @intCast(fd));
sqe.prep_rw(.EPOLL_CTL, epfd, @intFromPtr(ev), @intFromEnum(op), @intCast(fd));
}
pub fn prep_recv(sqe: *Sqe, fd: linux.fd_t, buffer: []u8, flags: linux.Msg) void {
@ -1955,7 +1958,6 @@ pub const Sqe = extern struct {
sqe.rw_flags = @bitCast(flags);
}
// TODO: review recv `flags`
pub fn prep_recv_multishot(
sqe: *Sqe,
fd: linux.fd_t,
@ -2116,7 +2118,7 @@ pub const Sqe = extern struct {
pub fn prep_poll_add(
sqe: *Sqe,
fd: linux.fd_t,
poll_mask: linux.POLL, // TODO: Poll mask typed
poll_mask: linux.Epoll,
) void {
sqe.prep_rw(.POLL_ADD, fd, @intFromPtr(@as(?*anyopaque, null)), 0, 0);
// Poll masks previously used to comprise of 16 bits in the flags union of
@ -2125,7 +2127,7 @@ pub const Sqe = extern struct {
// poll masks are consistently and properly read across multiple kernel
// versions, poll masks are enforced to be little-endian.
// https://www.spinics.net/lists/io-uring/msg02848.html
sqe.rw_flags = std.mem.nativeToLittle(u32, poll_mask);
sqe.rw_flags = std.mem.nativeToLittle(u32, @as(u32, @bitCast(poll_mask)));
}
pub fn prep_poll_remove(
@ -2139,7 +2141,7 @@ pub const Sqe = extern struct {
sqe: *Sqe,
old_user_data: u64,
new_user_data: u64,
poll_mask: linux.POLL, //TODO: Poll mask
poll_mask: linux.Epoll,
flags: uflags.Poll,
) void {
sqe.prep_rw(.POLL_REMOVE, -1, old_user_data, flags, new_user_data);
@ -2149,7 +2151,7 @@ pub const Sqe = extern struct {
// poll masks are consistently and properly read across multiple kernel
// versions, poll masks are enforced to be little-endian.
// https://www.spinics.net/lists/io-uring/msg02848.html
sqe.rw_flags = std.mem.nativeToLittle(u32, poll_mask);
sqe.rw_flags = std.mem.nativeToLittle(u32, @as(u32, @bitCast(poll_mask)));
}
pub fn prep_fallocate(
@ -2243,7 +2245,7 @@ pub const Sqe = extern struct {
sqe: *Sqe,
dir_fd: linux.fd_t,
path: [*:0]const u8,
flags: linux.At, // TODO: unlink flags only AT_REMOVEDIR
flags: linux.At,
) void {
sqe.prep_rw(.UNLINKAT, dir_fd, @intFromPtr(path), 0, 0);
sqe.rw_flags = @bitCast(flags);