mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-09 23:29:03 +00:00
link: fix obvious race condition
Did you know that allocators reuse addresses? If not, then don't feel bad, because apparently I don't either! This dumb mistake was probably responsible for the CI failures on `master` yesterday.
This commit is contained in:
parent
121d620443
commit
55b7187429
2 changed files with 7 additions and 6 deletions
|
|
@ -4395,7 +4395,7 @@ pub fn runCodegen(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air, ou
|
|||
};
|
||||
// release `out.value` with this store; synchronizes with acquire loads in `link`
|
||||
out.status.store(if (success) .ready else .failed, .release);
|
||||
zcu.comp.link_task_queue.mirReady(zcu.comp, out);
|
||||
zcu.comp.link_task_queue.mirReady(zcu.comp, func_index, out);
|
||||
if (zcu.pending_codegen_jobs.rmw(.Sub, 1, .monotonic) == 1) {
|
||||
// Decremented to 0, so all done.
|
||||
zcu.codegen_prog_node.end();
|
||||
|
|
|
|||
|
|
@ -64,7 +64,7 @@ state: union(enum) {
|
|||
finished,
|
||||
/// The link thread is not running or queued, because it is waiting for this MIR to be populated.
|
||||
/// Once codegen completes, it must call `mirReady` which will restart the link thread.
|
||||
wait_for_mir: *ZcuTask.LinkFunc.SharedMir,
|
||||
wait_for_mir: InternPool.Index,
|
||||
},
|
||||
|
||||
/// In the worst observed case, MIR is around 50 times as large as AIR. More typically, the ratio is
|
||||
|
|
@ -113,7 +113,7 @@ pub fn start(q: *Queue, comp: *Compilation) void {
|
|||
|
||||
/// Called by codegen workers after they have populated a `ZcuTask.LinkFunc.SharedMir`. If the link
|
||||
/// thread was waiting for this MIR, it can resume.
|
||||
pub fn mirReady(q: *Queue, comp: *Compilation, mir: *ZcuTask.LinkFunc.SharedMir) void {
|
||||
pub fn mirReady(q: *Queue, comp: *Compilation, func_index: InternPool.Index, mir: *ZcuTask.LinkFunc.SharedMir) void {
|
||||
// We would like to assert that `mir` is not pending, but that would race with a worker thread
|
||||
// potentially freeing it.
|
||||
{
|
||||
|
|
@ -121,7 +121,7 @@ pub fn mirReady(q: *Queue, comp: *Compilation, mir: *ZcuTask.LinkFunc.SharedMir)
|
|||
defer q.mutex.unlock();
|
||||
switch (q.state) {
|
||||
.finished, .running => return,
|
||||
.wait_for_mir => |wait_for| if (wait_for != mir) return,
|
||||
.wait_for_mir => |wait_for| if (wait_for != func_index) return,
|
||||
}
|
||||
// We were waiting for `mir`, so we will restart the linker thread.
|
||||
q.state = .running;
|
||||
|
|
@ -171,7 +171,7 @@ pub fn enqueueZcu(q: *Queue, comp: *Compilation, task: ZcuTask) Allocator.Error!
|
|||
}
|
||||
// Restart the linker thread, unless it would immediately be blocked
|
||||
if (task == .link_func and task.link_func.mir.status.load(.acquire) == .pending) {
|
||||
q.state = .{ .wait_for_mir = task.link_func.mir };
|
||||
q.state = .{ .wait_for_mir = task.link_func.func };
|
||||
return;
|
||||
}
|
||||
q.state = .running;
|
||||
|
|
@ -248,7 +248,7 @@ fn flushTaskQueue(tid: usize, q: *Queue, comp: *Compilation) void {
|
|||
defer q.mutex.unlock();
|
||||
if (status_ptr.load(.acquire) != .pending) break :pending;
|
||||
// We will stop for now, and get restarted once this MIR is ready.
|
||||
q.state = .{ .wait_for_mir = task.link_func.mir };
|
||||
q.state = .{ .wait_for_mir = task.link_func.func };
|
||||
q.flush_safety.unlock();
|
||||
return;
|
||||
}
|
||||
|
|
@ -273,6 +273,7 @@ const std = @import("std");
|
|||
const assert = std.debug.assert;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const Compilation = @import("../Compilation.zig");
|
||||
const InternPool = @import("../InternPool.zig");
|
||||
const link = @import("../link.zig");
|
||||
const PrelinkTask = link.PrelinkTask;
|
||||
const ZcuTask = link.ZcuTask;
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue