changes to accomodate std.Thread update

This commit is contained in:
kprotty 2021-06-19 21:31:43 -05:00
parent e16d3d162a
commit 0a1def7833
16 changed files with 130 additions and 128 deletions

View file

@ -220,9 +220,9 @@ test "basic usage" {
};
var context = Context{};
const send_thread = try std.Thread.spawn(Context.sender, &context);
const recv_thread = try std.Thread.spawn(Context.receiver, &context);
const send_thread = try std.Thread.spawn(.{}, Context.sender, .{&context});
const recv_thread = try std.Thread.spawn(.{}, Context.receiver, .{&context});
send_thread.wait();
recv_thread.wait();
send_thread.join();
recv_thread.join();
}

View file

@ -413,32 +413,27 @@ test "Futex - Signal" {
}
}
const Thread = struct {
tx: *Self,
rx: *Self,
const start_value = 1;
const start_value = 1;
fn run(self: Thread) void {
var iterations: u32 = start_value;
while (iterations < 10) : (iterations += 1) {
self.rx.recv(iterations);
self.tx.send(iterations);
}
fn runThread(rx: *Self, tx: *Self) void {
var iterations: u32 = start_value;
while (iterations < 10) : (iterations += 1) {
self.rx.recv(iterations);
self.tx.send(iterations);
}
};
}
fn run() !void {
var ping = Self{};
var pong = Self{};
const t1 = try std.Thread.spawn(Thread.run, .{ .rx = &ping, .tx = &pong });
defer t1.wait();
const t1 = try std.Thread.spawn(.{}, runThread, .{ &ping, &pong });
defer t1.join();
const t2 = try std.Thread.spawn(Thread.run, .{ .rx = &pong, .tx = &ping });
defer t2.wait();
const t2 = try std.Thread.spawn(.{}, runThread, .{ &pong, &ping });
defer t2.join();
ping.send(Thread.start_value);
ping.send(start_value);
}
}).run();
}
@ -507,7 +502,7 @@ test "Futex - Chain" {
try (struct {
completed: Signal = .{},
threads: [10]struct {
thread: *std.Thread,
thread: std.Thread,
signal: Signal,
} = undefined,
@ -531,39 +526,32 @@ test "Futex - Chain" {
};
const Self = @This();
const Chain = struct {
self: *Self,
index: usize,
fn run(chain: Chain) void {
const this_signal = &chain.self.threads[chain.index].signal;
fn runThread(self: *Self, index: usize) void {
const this_signal = &chain.self.threads[chain.index].signal;
var next_signal = &chain.self.completed;
if (chain.index + 1 < chain.self.threads.len) {
next_signal = &chain.self.threads[chain.index + 1].signal;
}
this_signal.wait();
next_signal.notify();
var next_signal = &chain.self.completed;
if (chain.index + 1 < chain.self.threads.len) {
next_signal = &chain.self.threads[chain.index + 1].signal;
}
};
this_signal.wait();
next_signal.notify();
}
fn run() !void {
var self = Self{};
for (self.threads) |*entry, index| {
entry.signal = .{};
entry.thread = try std.Thread.spawn(Chain.run, .{
.self = &self,
.index = index,
});
entry.thread = try std.Thread.spawn(.{}, runThread .{&self, index});
}
self.threads[0].signal.notify();
self.completed.wait();
for (self.threads) |entry| {
entry.thread.wait();
entry.thread.join();
}
}
}).run();

View file

@ -297,12 +297,12 @@ test "basic usage" {
try testing.expect(context.data == TestContext.incr_count);
} else {
const thread_count = 10;
var threads: [thread_count]*std.Thread = undefined;
var threads: [thread_count]std.Thread = undefined;
for (threads) |*t| {
t.* = try std.Thread.spawn(worker, &context);
t.* = try std.Thread.spawn(.{}, worker, .{&context});
}
for (threads) |t|
t.wait();
t.join();
try testing.expect(context.data == thread_count * TestContext.incr_count);
}

View file

@ -281,8 +281,8 @@ test "basic usage" {
var context: Context = undefined;
try context.init();
defer context.deinit();
const receiver = try std.Thread.spawn(Context.receiver, &context);
defer receiver.wait();
const receiver = try std.Thread.spawn(.{}, Context.receiver, .{&context});
defer receiver.join();
try context.sender();
if (false) {
@ -290,8 +290,8 @@ test "basic usage" {
// https://github.com/ziglang/zig/issues/7009
var timed = Context.init();
defer timed.deinit();
const sleeper = try std.Thread.spawn(Context.sleeper, &timed);
defer sleeper.wait();
const sleeper = try std.Thread.spawn(.{}, Context.sleeper, .{&timed});
defer sleeper.join();
try timed.timedWaiter();
}
}

View file

@ -384,8 +384,8 @@ test "basic usage" {
};
var context = Context{};
const receiver = try std.Thread.spawn(Context.receiver, &context);
defer receiver.wait();
const receiver = try std.Thread.spawn(.{}, Context.receiver, .{&context});
defer receiver.join();
try context.sender();
if (false) {
@ -393,8 +393,8 @@ test "basic usage" {
// https://github.com/ziglang/zig/issues/7009
var timed = Context.init();
defer timed.deinit();
const sleeper = try std.Thread.spawn(Context.sleeper, &timed);
defer sleeper.wait();
const sleeper = try std.Thread.spawn(.{}, Context.sleeper, .{&timed});
defer sleeper.join();
try timed.timedWaiter();
}
}

View file

@ -214,20 +214,20 @@ test "std.atomic.Queue" {
} else {
try expect(context.queue.isEmpty());
var putters: [put_thread_count]*std.Thread = undefined;
var putters: [put_thread_count]std.Thread = undefined;
for (putters) |*t| {
t.* = try std.Thread.spawn(startPuts, &context);
t.* = try std.Thread.spawn(.{}, startPuts, .{&context});
}
var getters: [put_thread_count]*std.Thread = undefined;
var getters: [put_thread_count]std.Thread = undefined;
for (getters) |*t| {
t.* = try std.Thread.spawn(startGets, &context);
t.* = try std.Thread.spawn(.{}, startGets, .{&context});
}
for (putters) |t|
t.wait();
t.join();
@atomicStore(bool, &context.puts_done, true, .SeqCst);
for (getters) |t|
t.wait();
t.join();
try expect(context.queue.isEmpty());
}

View file

@ -121,20 +121,20 @@ test "std.atomic.stack" {
}
}
} else {
var putters: [put_thread_count]*std.Thread = undefined;
var putters: [put_thread_count]std.Thread = undefined;
for (putters) |*t| {
t.* = try std.Thread.spawn(startPuts, &context);
t.* = try std.Thread.spawn(.{}, startPuts, .{&context});
}
var getters: [put_thread_count]*std.Thread = undefined;
var getters: [put_thread_count]std.Thread = undefined;
for (getters) |*t| {
t.* = try std.Thread.spawn(startGets, &context);
t.* = try std.Thread.spawn(.{}, startGets, .{&context});
}
for (putters) |t|
t.wait();
t.join();
@atomicStore(bool, &context.puts_done, true, .SeqCst);
for (getters) |t|
t.wait();
t.join();
}
if (context.put_sum != context.get_sum) {

View file

@ -273,8 +273,8 @@ pub fn panicExtra(trace: ?*const builtin.StackTrace, first_trace_addr: ?usize, c
if (builtin.single_threaded) {
stderr.print("panic: ", .{}) catch os.abort();
} else {
const current_thread_id = std.Thread.getCurrentThreadId();
stderr.print("thread {d} panic: ", .{current_thread_id}) catch os.abort();
const current_thread_id = std.Thread.getCurrentId();
stderr.print("thread {} panic: ", .{current_thread_id}) catch os.abort();
}
stderr.print(format ++ "\n", args) catch os.abort();
if (trace) |t| {

View file

@ -21,12 +21,12 @@ pub const Loop = struct {
os_data: OsData,
final_resume_node: ResumeNode,
pending_event_count: usize,
extra_threads: []*Thread,
extra_threads: []Thread,
/// TODO change this to a pool of configurable number of threads
/// and rename it to be not file-system-specific. it will become
/// a thread pool for turning non-CPU-bound blocking things into
/// async things. A fallback for any missing OS-specific API.
fs_thread: *Thread,
fs_thread: Thread,
fs_queue: std.atomic.Queue(Request),
fs_end_request: Request.Node,
fs_thread_wakeup: std.Thread.ResetEvent,
@ -189,11 +189,11 @@ pub const Loop = struct {
errdefer self.deinitOsData();
if (!builtin.single_threaded) {
self.fs_thread = try Thread.spawn(posixFsRun, self);
self.fs_thread = try Thread.spawn(.{}, posixFsRun, .{self});
}
errdefer if (!builtin.single_threaded) {
self.posixFsRequest(&self.fs_end_request);
self.fs_thread.wait();
self.fs_thread.join();
};
if (!std.builtin.single_threaded)
@ -264,11 +264,11 @@ pub const Loop = struct {
assert(amt == wakeup_bytes.len);
while (extra_thread_index != 0) {
extra_thread_index -= 1;
self.extra_threads[extra_thread_index].wait();
self.extra_threads[extra_thread_index].join();
}
}
while (extra_thread_index < extra_thread_count) : (extra_thread_index += 1) {
self.extra_threads[extra_thread_index] = try Thread.spawn(workerRun, self);
self.extra_threads[extra_thread_index] = try Thread.spawn(.{}, workerRun, .{self});
}
},
.macos, .freebsd, .netbsd, .dragonfly, .openbsd => {
@ -329,11 +329,11 @@ pub const Loop = struct {
_ = os.kevent(self.os_data.kqfd, final_kev_arr, empty_kevs, null) catch unreachable;
while (extra_thread_index != 0) {
extra_thread_index -= 1;
self.extra_threads[extra_thread_index].wait();
self.extra_threads[extra_thread_index].join();
}
}
while (extra_thread_index < extra_thread_count) : (extra_thread_index += 1) {
self.extra_threads[extra_thread_index] = try Thread.spawn(workerRun, self);
self.extra_threads[extra_thread_index] = try Thread.spawn(.{}, workerRun, .{self});
}
},
.windows => {
@ -378,11 +378,11 @@ pub const Loop = struct {
}
while (extra_thread_index != 0) {
extra_thread_index -= 1;
self.extra_threads[extra_thread_index].wait();
self.extra_threads[extra_thread_index].join();
}
}
while (extra_thread_index < extra_thread_count) : (extra_thread_index += 1) {
self.extra_threads[extra_thread_index] = try Thread.spawn(workerRun, self);
self.extra_threads[extra_thread_index] = try Thread.spawn(.{}, workerRun, .{self});
}
},
else => {},
@ -651,18 +651,18 @@ pub const Loop = struct {
.netbsd,
.dragonfly,
.openbsd,
=> self.fs_thread.wait(),
=> self.fs_thread.join(),
else => {},
}
}
for (self.extra_threads) |extra_thread| {
extra_thread.wait();
extra_thread.join();
}
@atomicStore(bool, &self.delay_queue.is_running, false, .SeqCst);
self.delay_queue.event.set();
self.delay_queue.thread.wait();
self.delay_queue.thread.join();
}
/// Runs the provided function asynchronously. The function's frame is allocated
@ -787,7 +787,7 @@ pub const Loop = struct {
const DelayQueue = struct {
timer: std.time.Timer,
waiters: Waiters,
thread: *std.Thread,
thread: std.Thread,
event: std.Thread.AutoResetEvent,
is_running: bool,
@ -802,7 +802,7 @@ pub const Loop = struct {
.event = std.Thread.AutoResetEvent{},
.is_running = true,
// Must be last so that it can read the other state, such as `is_running`.
.thread = try std.Thread.spawn(DelayQueue.run, self),
.thread = try std.Thread.spawn(.{}, DelayQueue.run, .{self}),
};
}

View file

@ -862,11 +862,10 @@ test "open file with exclusive lock twice, make sure it waits" {
errdefer file.close();
const S = struct {
const C = struct { dir: *fs.Dir, evt: *std.Thread.ResetEvent };
fn checkFn(ctx: C) !void {
const file1 = try ctx.dir.createFile(filename, .{ .lock = .Exclusive });
fn checkFn(dir: *fs.Dir, evt: *std.Thread.ResetEvent) !void {
const file1 = try dir.createFile(filename, .{ .lock = .Exclusive });
defer file1.close();
ctx.evt.set();
evt.set();
}
};
@ -874,8 +873,8 @@ test "open file with exclusive lock twice, make sure it waits" {
try evt.init();
defer evt.deinit();
const t = try std.Thread.spawn(S.checkFn, S.C{ .dir = &tmp.dir, .evt = &evt });
defer t.wait();
const t = try std.Thread.spawn(.{}, S.checkFn, .{ &tmp.dir, &evt });
defer t.join();
const SLEEP_TIMEOUT_NS = 10 * std.time.ns_per_ms;
// Make sure we've slept enough.

View file

@ -161,8 +161,8 @@ test "listen on a port, send bytes, receive bytes" {
}
};
const t = try std.Thread.spawn(S.clientFn, server.listen_address);
defer t.wait();
const t = try std.Thread.spawn(.{}, S.clientFn, .{server.listen_address});
defer t.join();
var client = try server.accept();
defer client.stream.close();
@ -277,7 +277,7 @@ test "listen on a unix socket, send bytes, receive bytes" {
try server.listen(socket_addr);
const S = struct {
fn clientFn(_: void) !void {
fn clientFn() !void {
const socket = try net.connectUnixSocket(socket_path);
defer socket.close();
@ -285,8 +285,8 @@ test "listen on a unix socket, send bytes, receive bytes" {
}
};
const t = try std.Thread.spawn(S.clientFn, {});
defer t.wait();
const t = try std.Thread.spawn(.{}, S.clientFn, .{});
defer t.join();
var client = try server.accept();
defer client.stream.close();

View file

@ -55,16 +55,16 @@ test "Once executes its function just once" {
global_once.call();
global_once.call();
} else {
var threads: [10]*std.Thread = undefined;
defer for (threads) |handle| handle.wait();
var threads: [10]std.Thread = undefined;
defer for (threads) |handle| handle.join();
for (threads) |*handle| {
handle.* = try std.Thread.spawn(struct {
handle.* = try std.Thread.spawn(.{}, struct {
fn thread_fn(x: u8) void {
_ = x;
global_once.call();
}
}.thread_fn, 0);
}.thread_fn, .{0});
}
}

View file

@ -320,9 +320,9 @@ test "std.Thread.getCurrentId" {
if (builtin.single_threaded) return error.SkipZigTest;
var thread_current_id: Thread.Id = undefined;
const thread = try Thread.spawn(testThreadIdFn, &thread_current_id);
const thread_id = thread.handle();
thread.wait();
const thread = try Thread.spawn(.{}, testThreadIdFn, .{&thread_current_id});
const thread_id = thread.getHandle();
thread.join();
if (Thread.use_pthreads) {
try expect(thread_current_id == thread_id);
} else if (native_os == .windows) {
@ -339,21 +339,20 @@ test "spawn threads" {
var shared_ctx: i32 = 1;
const thread1 = try Thread.spawn(start1, {});
const thread2 = try Thread.spawn(start2, &shared_ctx);
const thread3 = try Thread.spawn(start2, &shared_ctx);
const thread4 = try Thread.spawn(start2, &shared_ctx);
const thread1 = try Thread.spawn(.{}, start1, .{});
const thread2 = try Thread.spawn(.{}, start2, .{&shared_ctx});
const thread3 = try Thread.spawn(.{}, start2, .{&shared_ctx});
const thread4 = try Thread.spawn(.{}, start2, .{&shared_ctx});
thread1.wait();
thread2.wait();
thread3.wait();
thread4.wait();
thread1.join();
thread2.join();
thread3.join();
thread4.join();
try expect(shared_ctx == 4);
}
fn start1(ctx: void) u8 {
_ = ctx;
fn start1() u8 {
return 0;
}
@ -371,16 +370,15 @@ test "cpu count" {
test "thread local storage" {
if (builtin.single_threaded) return error.SkipZigTest;
const thread1 = try Thread.spawn(testTls, {});
const thread2 = try Thread.spawn(testTls, {});
const thread1 = try Thread.spawn(.{}, testTls, .{});
const thread2 = try Thread.spawn(.{}, testTls, .{});
try testTls({});
thread1.wait();
thread2.wait();
thread1.join();
thread2.join();
}
threadlocal var x: i32 = 1234;
fn testTls(context: void) !void {
_ = context;
fn testTls() !void {
if (x != 1234) return error.TlsBadStartValue;
x += 1;
if (x != 1235) return error.TlsBadEndValue;

View file

@ -69,6 +69,13 @@ pub const Target = struct {
};
}
pub fn isBSD(tag: Tag) bool {
return tag.isDarwin() or switch (tag) {
.kfreebsd, .freebsd, .openbsd, .netbsd, .dragonfly => true,
else => false,
};
}
pub fn dynamicLibSuffix(tag: Tag) [:0]const u8 {
if (tag.isDarwin()) {
return ".dylib";
@ -787,6 +794,13 @@ pub const Target = struct {
};
}
pub fn isAARCH64(arch: Arch) bool {
return switch (arch) {
.aarch64, .aarch64_be, .aarch64_32 => true,
else => false,
};
}
pub fn isThumb(arch: Arch) bool {
return switch (arch) {
.thumb, .thumbeb => true,
@ -1365,10 +1379,7 @@ pub const Target = struct {
}
pub fn isAndroid(self: Target) bool {
return switch (self.abi) {
.android => true,
else => false,
};
return self.abi == .android;
}
pub fn isWasm(self: Target) bool {
@ -1379,6 +1390,10 @@ pub const Target = struct {
return self.os.tag.isDarwin();
}
pub fn isBSD(self: Target) bool {
return self.os.tag.isBSD();
}
pub fn isGnuLibC_os_tag_abi(os_tag: Os.Tag, abi: Abi) bool {
return os_tag == .linux and abi.isGnu();
}

View file

@ -74,13 +74,13 @@ pub fn init(self: *ThreadPool, allocator: *std.mem.Allocator) !void {
try worker.idle_node.data.init();
errdefer worker.idle_node.data.deinit();
worker.thread = try std.Thread.spawn(Worker.run, worker);
worker.thread = try std.Thread.spawn(.{}, Worker.run, .{worker});
}
}
fn destroyWorkers(self: *ThreadPool, spawned: usize) void {
for (self.workers[0..spawned]) |*worker| {
worker.thread.wait();
worker.thread.join();
worker.idle_node.data.deinit();
}
}

View file

@ -816,18 +816,20 @@ pub fn main() anyerror!void {
});
}
} else {
var threads = try arena.alloc(*std.Thread, llvm_targets.len);
var threads = try arena.alloc(std.Thread, llvm_targets.len);
for (llvm_targets) |llvm_target, i| {
threads[i] = try std.Thread.spawn(processOneTarget, .{
.llvm_tblgen_exe = llvm_tblgen_exe,
.llvm_src_root = llvm_src_root,
.zig_src_dir = zig_src_dir,
.root_progress = root_progress,
.llvm_target = llvm_target,
threads[i] = try std.Thread.spawn(.{}, processOneTarget, .{
Job{
.llvm_tblgen_exe = llvm_tblgen_exe,
.llvm_src_root = llvm_src_root,
.zig_src_dir = zig_src_dir,
.root_progress = root_progress,
.llvm_target = llvm_target,
},
});
}
for (threads) |thread| {
thread.wait();
thread.join();
}
}
}