EventLoop: remove broken mechanism for making deinit block on detached

This commit is contained in:
Andrew Kelley 2025-03-31 17:06:05 -07:00
parent e5b2df0c9b
commit 34e85db4a2

View file

@ -27,7 +27,6 @@ const Thread = struct {
current_context: *Context, current_context: *Context,
ready_queue: ?*Fiber, ready_queue: ?*Fiber,
free_queue: ?*Fiber, free_queue: ?*Fiber,
detached_queue: ?*Fiber,
io_uring: IoUring, io_uring: IoUring,
idle_search_index: u32, idle_search_index: u32,
steal_ready_search_index: u32, steal_ready_search_index: u32,
@ -209,7 +208,6 @@ pub fn init(el: *EventLoop, gpa: Allocator) !void {
.current_context = &main_fiber.context, .current_context = &main_fiber.context,
.ready_queue = null, .ready_queue = null,
.free_queue = null, .free_queue = null,
.detached_queue = null,
.io_uring = try IoUring.init(io_uring_entries, 0), .io_uring = try IoUring.init(io_uring_entries, 0),
.idle_search_index = 1, .idle_search_index = 1,
.steal_ready_search_index = 1, .steal_ready_search_index = 1,
@ -220,16 +218,7 @@ pub fn init(el: *EventLoop, gpa: Allocator) !void {
} }
pub fn deinit(el: *EventLoop) void { pub fn deinit(el: *EventLoop) void {
// Wait for detached fibers.
const active_threads = @atomicLoad(u32, &el.threads.active, .acquire); const active_threads = @atomicLoad(u32, &el.threads.active, .acquire);
for (el.threads.allocated[0..active_threads]) |*thread| {
while (thread.detached_queue) |detached_fiber| {
if (@atomicLoad(?*Fiber, &detached_fiber.awaiter, .acquire) != Fiber.finished)
el.yield(null, .{ .register_awaiter = &detached_fiber.awaiter });
detached_fiber.recycle();
}
}
for (el.threads.allocated[0..active_threads]) |*thread| { for (el.threads.allocated[0..active_threads]) |*thread| {
const ready_fiber = @atomicLoad(?*Fiber, &thread.ready_queue, .monotonic); const ready_fiber = @atomicLoad(?*Fiber, &thread.ready_queue, .monotonic);
assert(ready_fiber == null or ready_fiber == Fiber.finished); // pending async assert(ready_fiber == null or ready_fiber == Fiber.finished); // pending async
@ -347,7 +336,6 @@ fn schedule(el: *EventLoop, thread: *Thread, ready_queue: Fiber.Queue) void {
.current_context = &new_thread.idle_context, .current_context = &new_thread.idle_context,
.ready_queue = ready_queue.head, .ready_queue = ready_queue.head,
.free_queue = null, .free_queue = null,
.detached_queue = null,
.io_uring = IoUring.init(io_uring_entries, 0) catch |err| { .io_uring = IoUring.init(io_uring_entries, 0) catch |err| {
@atomicStore(u32, &el.threads.reserved, new_thread_index, .release); @atomicStore(u32, &el.threads.reserved, new_thread_index, .release);
// no more access to `thread` after giving up reservation // no more access to `thread` after giving up reservation
@ -673,8 +661,6 @@ const DetachedClosure = struct {
message.handle(closure.event_loop); message.handle(closure.event_loop);
std.log.debug("{*} performing async detached", .{closure.fiber}); std.log.debug("{*} performing async detached", .{closure.fiber});
closure.start(closure.contextPointer()); closure.start(closure.contextPointer());
const current_thread: *Thread = .current();
current_thread.detached_queue = closure.fiber.queue_next;
const awaiter = @atomicRmw(?*Fiber, &closure.fiber.awaiter, .Xchg, Fiber.finished, .acq_rel); const awaiter = @atomicRmw(?*Fiber, &closure.fiber.awaiter, .Xchg, Fiber.finished, .acq_rel);
if (awaiter) |a| { if (awaiter) |a| {
closure.event_loop.yield(a, .nothing); closure.event_loop.yield(a, .nothing);
@ -766,11 +752,10 @@ fn go(
else => |arch| @compileError("unimplemented architecture: " ++ @tagName(arch)), else => |arch| @compileError("unimplemented architecture: " ++ @tagName(arch)),
}, },
.awaiter = null, .awaiter = null,
.queue_next = current_thread.detached_queue, .queue_next = null,
.cancel_thread = null, .cancel_thread = null,
.awaiting_completions = .initEmpty(), .awaiting_completions = .initEmpty(),
}; };
current_thread.detached_queue = fiber;
closure.* = .{ closure.* = .{
.event_loop = event_loop, .event_loop = event_loop,
.fiber = fiber, .fiber = fiber,