mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-06 13:54:21 +00:00
build runner: update from std.Thread.Pool to std.Io
This commit is contained in:
parent
32dc46aae5
commit
a242292644
6 changed files with 40 additions and 68 deletions
|
|
@ -107,7 +107,6 @@ pub fn main() !void {
|
||||||
|
|
||||||
var targets = std.array_list.Managed([]const u8).init(arena);
|
var targets = std.array_list.Managed([]const u8).init(arena);
|
||||||
var debug_log_scopes = std.array_list.Managed([]const u8).init(arena);
|
var debug_log_scopes = std.array_list.Managed([]const u8).init(arena);
|
||||||
var thread_pool_options: std.Thread.Pool.Options = .{ .allocator = arena };
|
|
||||||
|
|
||||||
var install_prefix: ?[]const u8 = null;
|
var install_prefix: ?[]const u8 = null;
|
||||||
var dir_list = std.Build.DirList{};
|
var dir_list = std.Build.DirList{};
|
||||||
|
|
@ -413,19 +412,11 @@ pub fn main() !void {
|
||||||
};
|
};
|
||||||
} else if (mem.eql(u8, arg, "-fno-reference-trace")) {
|
} else if (mem.eql(u8, arg, "-fno-reference-trace")) {
|
||||||
builder.reference_trace = null;
|
builder.reference_trace = null;
|
||||||
} else if (mem.startsWith(u8, arg, "-j")) {
|
} else if (mem.cutPrefix(u8, arg, "-j")) |text| {
|
||||||
const num = arg["-j".len..];
|
const n = std.fmt.parseUnsigned(u32, text, 10) catch |err|
|
||||||
const n_jobs = std.fmt.parseUnsigned(u32, num, 10) catch |err| {
|
fatal("unable to parse jobs count '{s}': {t}", .{ text, err });
|
||||||
std.debug.print("unable to parse jobs count '{s}': {s}", .{
|
if (n < 1) fatal("number of jobs must be at least 1", .{});
|
||||||
num, @errorName(err),
|
threaded.setAsyncLimit(.limited(n));
|
||||||
});
|
|
||||||
process.exit(1);
|
|
||||||
};
|
|
||||||
if (n_jobs < 1) {
|
|
||||||
std.debug.print("number of jobs must be at least 1\n", .{});
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
thread_pool_options.n_jobs = n_jobs;
|
|
||||||
} else if (mem.eql(u8, arg, "--")) {
|
} else if (mem.eql(u8, arg, "--")) {
|
||||||
builder.args = argsRest(args, arg_idx);
|
builder.args = argsRest(args, arg_idx);
|
||||||
break;
|
break;
|
||||||
|
|
@ -516,7 +507,6 @@ pub fn main() !void {
|
||||||
.error_style = error_style,
|
.error_style = error_style,
|
||||||
.multiline_errors = multiline_errors,
|
.multiline_errors = multiline_errors,
|
||||||
.summary = summary orelse if (watch or webui_listen != null) .line else .failures,
|
.summary = summary orelse if (watch or webui_listen != null) .line else .failures,
|
||||||
.thread_pool = undefined,
|
|
||||||
|
|
||||||
.ttyconf = ttyconf,
|
.ttyconf = ttyconf,
|
||||||
};
|
};
|
||||||
|
|
@ -547,16 +537,12 @@ pub fn main() !void {
|
||||||
break :w try .init();
|
break :w try .init();
|
||||||
};
|
};
|
||||||
|
|
||||||
try run.thread_pool.init(thread_pool_options);
|
|
||||||
defer run.thread_pool.deinit();
|
|
||||||
|
|
||||||
const now = Io.Clock.Timestamp.now(io, .awake) catch |err| fatal("failed to collect timestamp: {t}", .{err});
|
const now = Io.Clock.Timestamp.now(io, .awake) catch |err| fatal("failed to collect timestamp: {t}", .{err});
|
||||||
|
|
||||||
run.web_server = if (webui_listen) |listen_address| ws: {
|
run.web_server = if (webui_listen) |listen_address| ws: {
|
||||||
if (builtin.single_threaded) unreachable; // `fatal` above
|
if (builtin.single_threaded) unreachable; // `fatal` above
|
||||||
break :ws .init(.{
|
break :ws .init(.{
|
||||||
.gpa = gpa,
|
.gpa = gpa,
|
||||||
.thread_pool = &run.thread_pool,
|
|
||||||
.ttyconf = ttyconf,
|
.ttyconf = ttyconf,
|
||||||
.graph = &graph,
|
.graph = &graph,
|
||||||
.all_steps = run.step_stack.keys(),
|
.all_steps = run.step_stack.keys(),
|
||||||
|
|
@ -675,7 +661,6 @@ const Run = struct {
|
||||||
memory_blocked_steps: std.ArrayList(*Step),
|
memory_blocked_steps: std.ArrayList(*Step),
|
||||||
/// Allocated into `gpa`.
|
/// Allocated into `gpa`.
|
||||||
step_stack: std.AutoArrayHashMapUnmanaged(*Step, void),
|
step_stack: std.AutoArrayHashMapUnmanaged(*Step, void),
|
||||||
thread_pool: std.Thread.Pool,
|
|
||||||
/// Similar to the `tty.Config` returned by `std.debug.lockStderrWriter`,
|
/// Similar to the `tty.Config` returned by `std.debug.lockStderrWriter`,
|
||||||
/// but also respects the '--color' flag.
|
/// but also respects the '--color' flag.
|
||||||
ttyconf: tty.Config,
|
ttyconf: tty.Config,
|
||||||
|
|
@ -754,14 +739,13 @@ fn runStepNames(
|
||||||
const gpa = run.gpa;
|
const gpa = run.gpa;
|
||||||
const io = b.graph.io;
|
const io = b.graph.io;
|
||||||
const step_stack = &run.step_stack;
|
const step_stack = &run.step_stack;
|
||||||
const thread_pool = &run.thread_pool;
|
|
||||||
|
|
||||||
{
|
{
|
||||||
const step_prog = parent_prog_node.start("steps", step_stack.count());
|
const step_prog = parent_prog_node.start("steps", step_stack.count());
|
||||||
defer step_prog.end();
|
defer step_prog.end();
|
||||||
|
|
||||||
var wait_group: std.Thread.WaitGroup = .{};
|
var group: Io.Group = .init;
|
||||||
defer wait_group.wait();
|
defer group.wait(io);
|
||||||
|
|
||||||
// Here we spawn the initial set of tasks with a nice heuristic -
|
// Here we spawn the initial set of tasks with a nice heuristic -
|
||||||
// dependency order. Each worker when it finishes a step will then
|
// dependency order. Each worker when it finishes a step will then
|
||||||
|
|
@ -771,9 +755,7 @@ fn runStepNames(
|
||||||
const step = steps_slice[steps_slice.len - i - 1];
|
const step = steps_slice[steps_slice.len - i - 1];
|
||||||
if (step.state == .skipped_oom) continue;
|
if (step.state == .skipped_oom) continue;
|
||||||
|
|
||||||
thread_pool.spawnWg(&wait_group, workerMakeOneStep, .{
|
group.async(io, workerMakeOneStep, .{ &group, b, step, step_prog, run });
|
||||||
&wait_group, b, step, step_prog, run,
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -855,7 +837,6 @@ fn runStepNames(
|
||||||
var f = std.Build.Fuzz.init(
|
var f = std.Build.Fuzz.init(
|
||||||
gpa,
|
gpa,
|
||||||
io,
|
io,
|
||||||
thread_pool,
|
|
||||||
run.ttyconf,
|
run.ttyconf,
|
||||||
step_stack.keys(),
|
step_stack.keys(),
|
||||||
parent_prog_node,
|
parent_prog_node,
|
||||||
|
|
@ -1318,14 +1299,12 @@ fn constructGraphAndCheckForDependencyLoop(
|
||||||
}
|
}
|
||||||
|
|
||||||
fn workerMakeOneStep(
|
fn workerMakeOneStep(
|
||||||
wg: *std.Thread.WaitGroup,
|
group: *Io.Group,
|
||||||
b: *std.Build,
|
b: *std.Build,
|
||||||
s: *Step,
|
s: *Step,
|
||||||
prog_node: std.Progress.Node,
|
prog_node: std.Progress.Node,
|
||||||
run: *Run,
|
run: *Run,
|
||||||
) void {
|
) void {
|
||||||
const thread_pool = &run.thread_pool;
|
|
||||||
|
|
||||||
// First, check the conditions for running this step. If they are not met,
|
// First, check the conditions for running this step. If they are not met,
|
||||||
// then we return without doing the step, relying on another worker to
|
// then we return without doing the step, relying on another worker to
|
||||||
// queue this step up again when dependencies are met.
|
// queue this step up again when dependencies are met.
|
||||||
|
|
@ -1381,7 +1360,6 @@ fn workerMakeOneStep(
|
||||||
|
|
||||||
const make_result = s.make(.{
|
const make_result = s.make(.{
|
||||||
.progress_node = sub_prog_node,
|
.progress_node = sub_prog_node,
|
||||||
.thread_pool = thread_pool,
|
|
||||||
.watch = run.watch,
|
.watch = run.watch,
|
||||||
.web_server = if (run.web_server) |*ws| ws else null,
|
.web_server = if (run.web_server) |*ws| ws else null,
|
||||||
.ttyconf = run.ttyconf,
|
.ttyconf = run.ttyconf,
|
||||||
|
|
@ -1400,6 +1378,8 @@ fn workerMakeOneStep(
|
||||||
printErrorMessages(run.gpa, s, .{}, bw, ttyconf, run.error_style, run.multiline_errors) catch {};
|
printErrorMessages(run.gpa, s, .{}, bw, ttyconf, run.error_style, run.multiline_errors) catch {};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const io = b.graph.io;
|
||||||
|
|
||||||
handle_result: {
|
handle_result: {
|
||||||
if (make_result) |_| {
|
if (make_result) |_| {
|
||||||
@atomicStore(Step.State, &s.state, .success, .seq_cst);
|
@atomicStore(Step.State, &s.state, .success, .seq_cst);
|
||||||
|
|
@ -1419,9 +1399,7 @@ fn workerMakeOneStep(
|
||||||
|
|
||||||
// Successful completion of a step, so we queue up its dependants as well.
|
// Successful completion of a step, so we queue up its dependants as well.
|
||||||
for (s.dependants.items) |dep| {
|
for (s.dependants.items) |dep| {
|
||||||
thread_pool.spawnWg(wg, workerMakeOneStep, .{
|
group.async(io, workerMakeOneStep, .{ group, b, dep, prog_node, run });
|
||||||
wg, b, dep, prog_node, run,
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1444,9 +1422,7 @@ fn workerMakeOneStep(
|
||||||
if (dep.max_rss <= remaining) {
|
if (dep.max_rss <= remaining) {
|
||||||
remaining -= dep.max_rss;
|
remaining -= dep.max_rss;
|
||||||
|
|
||||||
thread_pool.spawnWg(wg, workerMakeOneStep, .{
|
group.async(io, workerMakeOneStep, .{ group, b, dep, prog_node, run });
|
||||||
wg, b, dep, prog_node, run,
|
|
||||||
});
|
|
||||||
} else {
|
} else {
|
||||||
run.memory_blocked_steps.items[i] = dep;
|
run.memory_blocked_steps.items[i] = dep;
|
||||||
i += 1;
|
i += 1;
|
||||||
|
|
|
||||||
|
|
@ -22,10 +22,9 @@ mode: Mode,
|
||||||
/// Allocated into `gpa`.
|
/// Allocated into `gpa`.
|
||||||
run_steps: []const *Step.Run,
|
run_steps: []const *Step.Run,
|
||||||
|
|
||||||
wait_group: std.Thread.WaitGroup,
|
group: Io.Group,
|
||||||
root_prog_node: std.Progress.Node,
|
root_prog_node: std.Progress.Node,
|
||||||
prog_node: std.Progress.Node,
|
prog_node: std.Progress.Node,
|
||||||
thread_pool: *std.Thread.Pool,
|
|
||||||
|
|
||||||
/// Protects `coverage_files`.
|
/// Protects `coverage_files`.
|
||||||
coverage_mutex: std.Thread.Mutex,
|
coverage_mutex: std.Thread.Mutex,
|
||||||
|
|
@ -78,7 +77,6 @@ const CoverageMap = struct {
|
||||||
pub fn init(
|
pub fn init(
|
||||||
gpa: Allocator,
|
gpa: Allocator,
|
||||||
io: Io,
|
io: Io,
|
||||||
thread_pool: *std.Thread.Pool,
|
|
||||||
ttyconf: tty.Config,
|
ttyconf: tty.Config,
|
||||||
all_steps: []const *Build.Step,
|
all_steps: []const *Build.Step,
|
||||||
root_prog_node: std.Progress.Node,
|
root_prog_node: std.Progress.Node,
|
||||||
|
|
@ -89,20 +87,22 @@ pub fn init(
|
||||||
defer steps.deinit(gpa);
|
defer steps.deinit(gpa);
|
||||||
const rebuild_node = root_prog_node.start("Rebuilding Unit Tests", 0);
|
const rebuild_node = root_prog_node.start("Rebuilding Unit Tests", 0);
|
||||||
defer rebuild_node.end();
|
defer rebuild_node.end();
|
||||||
var rebuild_wg: std.Thread.WaitGroup = .{};
|
var rebuild_group: Io.Group = .init;
|
||||||
defer rebuild_wg.wait();
|
defer rebuild_group.cancel(io);
|
||||||
|
|
||||||
for (all_steps) |step| {
|
for (all_steps) |step| {
|
||||||
const run = step.cast(Step.Run) orelse continue;
|
const run = step.cast(Step.Run) orelse continue;
|
||||||
if (run.producer == null) continue;
|
if (run.producer == null) continue;
|
||||||
if (run.fuzz_tests.items.len == 0) continue;
|
if (run.fuzz_tests.items.len == 0) continue;
|
||||||
try steps.append(gpa, run);
|
try steps.append(gpa, run);
|
||||||
thread_pool.spawnWg(&rebuild_wg, rebuildTestsWorkerRun, .{ run, gpa, ttyconf, rebuild_node });
|
rebuild_group.async(io, rebuildTestsWorkerRun, .{ run, gpa, ttyconf, rebuild_node });
|
||||||
}
|
}
|
||||||
|
|
||||||
if (steps.items.len == 0) fatal("no fuzz tests found", .{});
|
if (steps.items.len == 0) fatal("no fuzz tests found", .{});
|
||||||
rebuild_node.setEstimatedTotalItems(steps.items.len);
|
rebuild_node.setEstimatedTotalItems(steps.items.len);
|
||||||
break :steps try gpa.dupe(*Step.Run, steps.items);
|
const run_steps = try gpa.dupe(*Step.Run, steps.items);
|
||||||
|
rebuild_group.wait(io);
|
||||||
|
break :steps run_steps;
|
||||||
};
|
};
|
||||||
errdefer gpa.free(run_steps);
|
errdefer gpa.free(run_steps);
|
||||||
|
|
||||||
|
|
@ -118,8 +118,7 @@ pub fn init(
|
||||||
.ttyconf = ttyconf,
|
.ttyconf = ttyconf,
|
||||||
.mode = mode,
|
.mode = mode,
|
||||||
.run_steps = run_steps,
|
.run_steps = run_steps,
|
||||||
.wait_group = .{},
|
.group = .init,
|
||||||
.thread_pool = thread_pool,
|
|
||||||
.root_prog_node = root_prog_node,
|
.root_prog_node = root_prog_node,
|
||||||
.prog_node = .none,
|
.prog_node = .none,
|
||||||
.coverage_files = .empty,
|
.coverage_files = .empty,
|
||||||
|
|
@ -131,29 +130,26 @@ pub fn init(
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn start(fuzz: *Fuzz) void {
|
pub fn start(fuzz: *Fuzz) void {
|
||||||
|
const io = fuzz.io;
|
||||||
fuzz.prog_node = fuzz.root_prog_node.start("Fuzzing", fuzz.run_steps.len);
|
fuzz.prog_node = fuzz.root_prog_node.start("Fuzzing", fuzz.run_steps.len);
|
||||||
|
|
||||||
if (fuzz.mode == .forever) {
|
if (fuzz.mode == .forever) {
|
||||||
// For polling messages and sending updates to subscribers.
|
// For polling messages and sending updates to subscribers.
|
||||||
fuzz.wait_group.start();
|
fuzz.group.concurrent(io, coverageRun, .{fuzz}) catch |err|
|
||||||
_ = std.Thread.spawn(.{}, coverageRun, .{fuzz}) catch |err| {
|
fatal("unable to spawn coverage task: {t}", .{err});
|
||||||
fuzz.wait_group.finish();
|
|
||||||
fatal("unable to spawn coverage thread: {s}", .{@errorName(err)});
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for (fuzz.run_steps) |run| {
|
for (fuzz.run_steps) |run| {
|
||||||
for (run.fuzz_tests.items) |unit_test_index| {
|
for (run.fuzz_tests.items) |unit_test_index| {
|
||||||
assert(run.rebuilt_executable != null);
|
assert(run.rebuilt_executable != null);
|
||||||
fuzz.thread_pool.spawnWg(&fuzz.wait_group, fuzzWorkerRun, .{
|
fuzz.group.async(io, fuzzWorkerRun, .{ fuzz, run, unit_test_index });
|
||||||
fuzz, run, unit_test_index,
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn deinit(fuzz: *Fuzz) void {
|
pub fn deinit(fuzz: *Fuzz) void {
|
||||||
if (!fuzz.wait_group.isDone()) @panic("TODO: terminate the fuzzer processes");
|
const io = fuzz.io;
|
||||||
|
fuzz.group.cancel(io);
|
||||||
fuzz.prog_node.end();
|
fuzz.prog_node.end();
|
||||||
fuzz.gpa.free(fuzz.run_steps);
|
fuzz.gpa.free(fuzz.run_steps);
|
||||||
}
|
}
|
||||||
|
|
@ -335,8 +331,6 @@ pub fn sendUpdate(
|
||||||
}
|
}
|
||||||
|
|
||||||
fn coverageRun(fuzz: *Fuzz) void {
|
fn coverageRun(fuzz: *Fuzz) void {
|
||||||
defer fuzz.wait_group.finish();
|
|
||||||
|
|
||||||
fuzz.queue_mutex.lock();
|
fuzz.queue_mutex.lock();
|
||||||
defer fuzz.queue_mutex.unlock();
|
defer fuzz.queue_mutex.unlock();
|
||||||
|
|
||||||
|
|
@ -511,8 +505,8 @@ pub fn waitAndPrintReport(fuzz: *Fuzz) void {
|
||||||
assert(fuzz.mode == .limit);
|
assert(fuzz.mode == .limit);
|
||||||
const io = fuzz.io;
|
const io = fuzz.io;
|
||||||
|
|
||||||
fuzz.wait_group.wait();
|
fuzz.group.wait(io);
|
||||||
fuzz.wait_group.reset();
|
fuzz.group = .init;
|
||||||
|
|
||||||
std.debug.print("======= FUZZING REPORT =======\n", .{});
|
std.debug.print("======= FUZZING REPORT =======\n", .{});
|
||||||
for (fuzz.msg_queue.items) |msg| {
|
for (fuzz.msg_queue.items) |msg| {
|
||||||
|
|
|
||||||
|
|
@ -110,7 +110,6 @@ pub const TestResults = struct {
|
||||||
|
|
||||||
pub const MakeOptions = struct {
|
pub const MakeOptions = struct {
|
||||||
progress_node: std.Progress.Node,
|
progress_node: std.Progress.Node,
|
||||||
thread_pool: *std.Thread.Pool,
|
|
||||||
watch: bool,
|
watch: bool,
|
||||||
web_server: switch (builtin.target.cpu.arch) {
|
web_server: switch (builtin.target.cpu.arch) {
|
||||||
else => ?*Build.WebServer,
|
else => ?*Build.WebServer,
|
||||||
|
|
|
||||||
|
|
@ -1151,7 +1151,6 @@ pub fn rerunInFuzzMode(
|
||||||
const tmp_dir_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(rand_int);
|
const tmp_dir_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(rand_int);
|
||||||
try runCommand(run, argv_list.items, has_side_effects, tmp_dir_path, .{
|
try runCommand(run, argv_list.items, has_side_effects, tmp_dir_path, .{
|
||||||
.progress_node = prog_node,
|
.progress_node = prog_node,
|
||||||
.thread_pool = undefined, // not used by `runCommand`
|
|
||||||
.watch = undefined, // not used by `runCommand`
|
.watch = undefined, // not used by `runCommand`
|
||||||
.web_server = null, // only needed for time reports
|
.web_server = null, // only needed for time reports
|
||||||
.ttyconf = fuzz.ttyconf,
|
.ttyconf = fuzz.ttyconf,
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,4 @@
|
||||||
gpa: Allocator,
|
gpa: Allocator,
|
||||||
thread_pool: *std.Thread.Pool,
|
|
||||||
graph: *const Build.Graph,
|
graph: *const Build.Graph,
|
||||||
all_steps: []const *Build.Step,
|
all_steps: []const *Build.Step,
|
||||||
listen_address: net.IpAddress,
|
listen_address: net.IpAddress,
|
||||||
|
|
@ -53,7 +52,6 @@ pub fn notifyUpdate(ws: *WebServer) void {
|
||||||
|
|
||||||
pub const Options = struct {
|
pub const Options = struct {
|
||||||
gpa: Allocator,
|
gpa: Allocator,
|
||||||
thread_pool: *std.Thread.Pool,
|
|
||||||
ttyconf: Io.tty.Config,
|
ttyconf: Io.tty.Config,
|
||||||
graph: *const std.Build.Graph,
|
graph: *const std.Build.Graph,
|
||||||
all_steps: []const *Build.Step,
|
all_steps: []const *Build.Step,
|
||||||
|
|
@ -100,7 +98,6 @@ pub fn init(opts: Options) WebServer {
|
||||||
|
|
||||||
return .{
|
return .{
|
||||||
.gpa = opts.gpa,
|
.gpa = opts.gpa,
|
||||||
.thread_pool = opts.thread_pool,
|
|
||||||
.ttyconf = opts.ttyconf,
|
.ttyconf = opts.ttyconf,
|
||||||
.graph = opts.graph,
|
.graph = opts.graph,
|
||||||
.all_steps = all_steps,
|
.all_steps = all_steps,
|
||||||
|
|
@ -235,7 +232,6 @@ pub fn finishBuild(ws: *WebServer, opts: struct {
|
||||||
ws.fuzz = Fuzz.init(
|
ws.fuzz = Fuzz.init(
|
||||||
ws.gpa,
|
ws.gpa,
|
||||||
ws.graph.io,
|
ws.graph.io,
|
||||||
ws.thread_pool,
|
|
||||||
ws.ttyconf,
|
ws.ttyconf,
|
||||||
ws.all_steps,
|
ws.all_steps,
|
||||||
ws.root_prog_node,
|
ws.root_prog_node,
|
||||||
|
|
|
||||||
|
|
@ -33,6 +33,9 @@ wait_group: std.Thread.WaitGroup = .{},
|
||||||
/// immediately.
|
/// immediately.
|
||||||
///
|
///
|
||||||
/// Defaults to a number equal to logical CPU cores.
|
/// Defaults to a number equal to logical CPU cores.
|
||||||
|
///
|
||||||
|
/// Protected by `mutex` once the I/O instance is already in use. See
|
||||||
|
/// `setAsyncLimit`.
|
||||||
async_limit: Io.Limit,
|
async_limit: Io.Limit,
|
||||||
/// Maximum thread pool size (excluding main thread) for dispatching concurrent
|
/// Maximum thread pool size (excluding main thread) for dispatching concurrent
|
||||||
/// tasks. Until this limit, calls to `Io.concurrent` will increase the thread
|
/// tasks. Until this limit, calls to `Io.concurrent` will increase the thread
|
||||||
|
|
@ -168,6 +171,12 @@ pub const init_single_threaded: Threaded = .{
|
||||||
.have_signal_handler = false,
|
.have_signal_handler = false,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
pub fn setAsyncLimit(t: *Threaded, new_limit: Io.Limit) void {
|
||||||
|
t.mutex.lock();
|
||||||
|
defer t.mutex.unlock();
|
||||||
|
t.async_limit = new_limit;
|
||||||
|
}
|
||||||
|
|
||||||
pub fn deinit(t: *Threaded) void {
|
pub fn deinit(t: *Threaded) void {
|
||||||
t.join();
|
t.join();
|
||||||
if (is_windows and t.wsa.status == .initialized) {
|
if (is_windows and t.wsa.status == .initialized) {
|
||||||
|
|
@ -507,7 +516,7 @@ fn async(
|
||||||
start: *const fn (context: *const anyopaque, result: *anyopaque) void,
|
start: *const fn (context: *const anyopaque, result: *anyopaque) void,
|
||||||
) ?*Io.AnyFuture {
|
) ?*Io.AnyFuture {
|
||||||
const t: *Threaded = @ptrCast(@alignCast(userdata));
|
const t: *Threaded = @ptrCast(@alignCast(userdata));
|
||||||
if (builtin.single_threaded or t.async_limit == .nothing) {
|
if (builtin.single_threaded) {
|
||||||
start(context.ptr, result.ptr);
|
start(context.ptr, result.ptr);
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
@ -684,8 +693,7 @@ fn groupAsync(
|
||||||
start: *const fn (*Io.Group, context: *const anyopaque) void,
|
start: *const fn (*Io.Group, context: *const anyopaque) void,
|
||||||
) void {
|
) void {
|
||||||
const t: *Threaded = @ptrCast(@alignCast(userdata));
|
const t: *Threaded = @ptrCast(@alignCast(userdata));
|
||||||
if (builtin.single_threaded or t.async_limit == .nothing)
|
if (builtin.single_threaded) return start(group, context.ptr);
|
||||||
return start(group, context.ptr);
|
|
||||||
|
|
||||||
const gpa = t.allocator;
|
const gpa = t.allocator;
|
||||||
const gc = GroupClosure.init(gpa, t, group, context, context_alignment, start) catch
|
const gc = GroupClosure.init(gpa, t, group, context, context_alignment, start) catch
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue