mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-06 13:54:21 +00:00
std.Io.poll: update to new I/O API
This commit is contained in:
parent
bc8e1a74c5
commit
b8955a2e0a
9 changed files with 391 additions and 384 deletions
|
|
@ -273,21 +273,17 @@ fn buildWasmBinary(
|
||||||
try sendMessage(child.stdin.?, .update);
|
try sendMessage(child.stdin.?, .update);
|
||||||
try sendMessage(child.stdin.?, .exit);
|
try sendMessage(child.stdin.?, .exit);
|
||||||
|
|
||||||
const Header = std.zig.Server.Message.Header;
|
|
||||||
var result: ?Path = null;
|
var result: ?Path = null;
|
||||||
var result_error_bundle = std.zig.ErrorBundle.empty;
|
var result_error_bundle = std.zig.ErrorBundle.empty;
|
||||||
|
|
||||||
const stdout = poller.fifo(.stdout);
|
const stdout = poller.reader(.stdout);
|
||||||
|
|
||||||
poll: while (true) {
|
poll: while (true) {
|
||||||
while (stdout.readableLength() < @sizeOf(Header)) {
|
const Header = std.zig.Server.Message.Header;
|
||||||
if (!(try poller.poll())) break :poll;
|
while (stdout.buffered().len < @sizeOf(Header)) if (!try poller.poll()) break :poll;
|
||||||
}
|
const header = stdout.takeStruct(Header, .little) catch unreachable;
|
||||||
const header = stdout.reader().readStruct(Header) catch unreachable;
|
while (stdout.buffered().len < header.bytes_len) if (!try poller.poll()) break :poll;
|
||||||
while (stdout.readableLength() < header.bytes_len) {
|
const body = stdout.take(header.bytes_len) catch unreachable;
|
||||||
if (!(try poller.poll())) break :poll;
|
|
||||||
}
|
|
||||||
const body = stdout.readableSliceOfLen(header.bytes_len);
|
|
||||||
|
|
||||||
switch (header.tag) {
|
switch (header.tag) {
|
||||||
.zig_version => {
|
.zig_version => {
|
||||||
|
|
@ -325,15 +321,11 @@ fn buildWasmBinary(
|
||||||
},
|
},
|
||||||
else => {}, // ignore other messages
|
else => {}, // ignore other messages
|
||||||
}
|
}
|
||||||
|
|
||||||
stdout.discard(body.len);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const stderr = poller.fifo(.stderr);
|
const stderr_contents = try poller.toOwnedSlice(.stderr);
|
||||||
if (stderr.readableLength() > 0) {
|
if (stderr_contents.len > 0) {
|
||||||
const owned_stderr = try stderr.toOwnedSlice();
|
std.debug.print("{s}", .{stderr_contents});
|
||||||
defer gpa.free(owned_stderr);
|
|
||||||
std.debug.print("{s}", .{owned_stderr});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Send EOF to stdin.
|
// Send EOF to stdin.
|
||||||
|
|
|
||||||
|
|
@ -286,7 +286,7 @@ pub fn cast(step: *Step, comptime T: type) ?*T {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// For debugging purposes, prints identifying information about this Step.
|
/// For debugging purposes, prints identifying information about this Step.
|
||||||
pub fn dump(step: *Step, w: *std.io.Writer, tty_config: std.io.tty.Config) void {
|
pub fn dump(step: *Step, w: *std.Io.Writer, tty_config: std.Io.tty.Config) void {
|
||||||
const debug_info = std.debug.getSelfDebugInfo() catch |err| {
|
const debug_info = std.debug.getSelfDebugInfo() catch |err| {
|
||||||
w.print("Unable to dump stack trace: Unable to open debug info: {s}\n", .{
|
w.print("Unable to dump stack trace: Unable to open debug info: {s}\n", .{
|
||||||
@errorName(err),
|
@errorName(err),
|
||||||
|
|
@ -359,7 +359,7 @@ pub fn addError(step: *Step, comptime fmt: []const u8, args: anytype) error{OutO
|
||||||
|
|
||||||
pub const ZigProcess = struct {
|
pub const ZigProcess = struct {
|
||||||
child: std.process.Child,
|
child: std.process.Child,
|
||||||
poller: std.io.Poller(StreamEnum),
|
poller: std.Io.Poller(StreamEnum),
|
||||||
progress_ipc_fd: if (std.Progress.have_ipc) ?std.posix.fd_t else void,
|
progress_ipc_fd: if (std.Progress.have_ipc) ?std.posix.fd_t else void,
|
||||||
|
|
||||||
pub const StreamEnum = enum { stdout, stderr };
|
pub const StreamEnum = enum { stdout, stderr };
|
||||||
|
|
@ -428,7 +428,7 @@ pub fn evalZigProcess(
|
||||||
const zp = try gpa.create(ZigProcess);
|
const zp = try gpa.create(ZigProcess);
|
||||||
zp.* = .{
|
zp.* = .{
|
||||||
.child = child,
|
.child = child,
|
||||||
.poller = std.io.poll(gpa, ZigProcess.StreamEnum, .{
|
.poller = std.Io.poll(gpa, ZigProcess.StreamEnum, .{
|
||||||
.stdout = child.stdout.?,
|
.stdout = child.stdout.?,
|
||||||
.stderr = child.stderr.?,
|
.stderr = child.stderr.?,
|
||||||
}),
|
}),
|
||||||
|
|
@ -508,20 +508,16 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool) !?Path {
|
||||||
try sendMessage(zp.child.stdin.?, .update);
|
try sendMessage(zp.child.stdin.?, .update);
|
||||||
if (!watch) try sendMessage(zp.child.stdin.?, .exit);
|
if (!watch) try sendMessage(zp.child.stdin.?, .exit);
|
||||||
|
|
||||||
const Header = std.zig.Server.Message.Header;
|
|
||||||
var result: ?Path = null;
|
var result: ?Path = null;
|
||||||
|
|
||||||
const stdout = zp.poller.fifo(.stdout);
|
const stdout = zp.poller.reader(.stdout);
|
||||||
|
|
||||||
poll: while (true) {
|
poll: while (true) {
|
||||||
while (stdout.readableLength() < @sizeOf(Header)) {
|
const Header = std.zig.Server.Message.Header;
|
||||||
if (!(try zp.poller.poll())) break :poll;
|
while (stdout.buffered().len < @sizeOf(Header)) if (!try zp.poller.poll()) break :poll;
|
||||||
}
|
const header = stdout.takeStruct(Header, .little) catch unreachable;
|
||||||
const header = stdout.reader().readStruct(Header) catch unreachable;
|
while (stdout.buffered().len < header.bytes_len) if (!try zp.poller.poll()) break :poll;
|
||||||
while (stdout.readableLength() < header.bytes_len) {
|
const body = stdout.take(header.bytes_len) catch unreachable;
|
||||||
if (!(try zp.poller.poll())) break :poll;
|
|
||||||
}
|
|
||||||
const body = stdout.readableSliceOfLen(header.bytes_len);
|
|
||||||
|
|
||||||
switch (header.tag) {
|
switch (header.tag) {
|
||||||
.zig_version => {
|
.zig_version => {
|
||||||
|
|
@ -547,11 +543,8 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool) !?Path {
|
||||||
.string_bytes = try arena.dupe(u8, string_bytes),
|
.string_bytes = try arena.dupe(u8, string_bytes),
|
||||||
.extra = extra_array,
|
.extra = extra_array,
|
||||||
};
|
};
|
||||||
if (watch) {
|
// This message indicates the end of the update.
|
||||||
// This message indicates the end of the update.
|
if (watch) break :poll;
|
||||||
stdout.discard(body.len);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
.emit_digest => {
|
.emit_digest => {
|
||||||
const EmitDigest = std.zig.Server.Message.EmitDigest;
|
const EmitDigest = std.zig.Server.Message.EmitDigest;
|
||||||
|
|
@ -611,15 +604,13 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool) !?Path {
|
||||||
},
|
},
|
||||||
else => {}, // ignore other messages
|
else => {}, // ignore other messages
|
||||||
}
|
}
|
||||||
|
|
||||||
stdout.discard(body.len);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
s.result_duration_ns = timer.read();
|
s.result_duration_ns = timer.read();
|
||||||
|
|
||||||
const stderr = zp.poller.fifo(.stderr);
|
const stderr_contents = try zp.poller.toOwnedSlice(.stderr);
|
||||||
if (stderr.readableLength() > 0) {
|
if (stderr_contents.len > 0) {
|
||||||
try s.result_error_msgs.append(arena, try stderr.toOwnedSlice());
|
try s.result_error_msgs.append(arena, try arena.dupe(u8, stderr_contents));
|
||||||
}
|
}
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
|
|
@ -736,7 +727,7 @@ pub fn allocPrintCmd2(
|
||||||
argv: []const []const u8,
|
argv: []const []const u8,
|
||||||
) Allocator.Error![]u8 {
|
) Allocator.Error![]u8 {
|
||||||
const shell = struct {
|
const shell = struct {
|
||||||
fn escape(writer: anytype, string: []const u8, is_argv0: bool) !void {
|
fn escape(writer: *std.Io.Writer, string: []const u8, is_argv0: bool) !void {
|
||||||
for (string) |c| {
|
for (string) |c| {
|
||||||
if (switch (c) {
|
if (switch (c) {
|
||||||
else => true,
|
else => true,
|
||||||
|
|
@ -770,9 +761,9 @@ pub fn allocPrintCmd2(
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
var buf: std.ArrayListUnmanaged(u8) = .empty;
|
var aw: std.Io.Writer.Allocating = .init(arena);
|
||||||
const writer = buf.writer(arena);
|
const writer = &aw.writer;
|
||||||
if (opt_cwd) |cwd| try writer.print("cd {s} && ", .{cwd});
|
if (opt_cwd) |cwd| writer.print("cd {s} && ", .{cwd}) catch return error.OutOfMemory;
|
||||||
if (opt_env) |env| {
|
if (opt_env) |env| {
|
||||||
const process_env_map = std.process.getEnvMap(arena) catch std.process.EnvMap.init(arena);
|
const process_env_map = std.process.getEnvMap(arena) catch std.process.EnvMap.init(arena);
|
||||||
var it = env.iterator();
|
var it = env.iterator();
|
||||||
|
|
@ -782,17 +773,17 @@ pub fn allocPrintCmd2(
|
||||||
if (process_env_map.get(key)) |process_value| {
|
if (process_env_map.get(key)) |process_value| {
|
||||||
if (std.mem.eql(u8, value, process_value)) continue;
|
if (std.mem.eql(u8, value, process_value)) continue;
|
||||||
}
|
}
|
||||||
try writer.print("{s}=", .{key});
|
writer.print("{s}=", .{key}) catch return error.OutOfMemory;
|
||||||
try shell.escape(writer, value, false);
|
shell.escape(writer, value, false) catch return error.OutOfMemory;
|
||||||
try writer.writeByte(' ');
|
writer.writeByte(' ') catch return error.OutOfMemory;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
try shell.escape(writer, argv[0], true);
|
shell.escape(writer, argv[0], true) catch return error.OutOfMemory;
|
||||||
for (argv[1..]) |arg| {
|
for (argv[1..]) |arg| {
|
||||||
try writer.writeByte(' ');
|
writer.writeByte(' ') catch return error.OutOfMemory;
|
||||||
try shell.escape(writer, arg, false);
|
shell.escape(writer, arg, false) catch return error.OutOfMemory;
|
||||||
}
|
}
|
||||||
return buf.toOwnedSlice(arena);
|
return aw.toOwnedSlice();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Prefer `cacheHitAndWatch` unless you already added watch inputs
|
/// Prefer `cacheHitAndWatch` unless you already added watch inputs
|
||||||
|
|
|
||||||
|
|
@ -73,9 +73,12 @@ skip_foreign_checks: bool,
|
||||||
/// external executor (such as qemu) but not fail if the executor is unavailable.
|
/// external executor (such as qemu) but not fail if the executor is unavailable.
|
||||||
failing_to_execute_foreign_is_an_error: bool,
|
failing_to_execute_foreign_is_an_error: bool,
|
||||||
|
|
||||||
|
/// Deprecated in favor of `stdio_limit`.
|
||||||
|
max_stdio_size: usize,
|
||||||
|
|
||||||
/// If stderr or stdout exceeds this amount, the child process is killed and
|
/// If stderr or stdout exceeds this amount, the child process is killed and
|
||||||
/// the step fails.
|
/// the step fails.
|
||||||
max_stdio_size: usize,
|
stdio_limit: std.Io.Limit,
|
||||||
|
|
||||||
captured_stdout: ?*Output,
|
captured_stdout: ?*Output,
|
||||||
captured_stderr: ?*Output,
|
captured_stderr: ?*Output,
|
||||||
|
|
@ -186,6 +189,7 @@ pub fn create(owner: *std.Build, name: []const u8) *Run {
|
||||||
.skip_foreign_checks = false,
|
.skip_foreign_checks = false,
|
||||||
.failing_to_execute_foreign_is_an_error = true,
|
.failing_to_execute_foreign_is_an_error = true,
|
||||||
.max_stdio_size = 10 * 1024 * 1024,
|
.max_stdio_size = 10 * 1024 * 1024,
|
||||||
|
.stdio_limit = .unlimited,
|
||||||
.captured_stdout = null,
|
.captured_stdout = null,
|
||||||
.captured_stderr = null,
|
.captured_stderr = null,
|
||||||
.dep_output_file = null,
|
.dep_output_file = null,
|
||||||
|
|
@ -1011,7 +1015,7 @@ fn populateGeneratedPaths(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn formatTerm(term: ?std.process.Child.Term, w: *std.io.Writer) std.io.Writer.Error!void {
|
fn formatTerm(term: ?std.process.Child.Term, w: *std.Io.Writer) std.Io.Writer.Error!void {
|
||||||
if (term) |t| switch (t) {
|
if (term) |t| switch (t) {
|
||||||
.Exited => |code| try w.print("exited with code {d}", .{code}),
|
.Exited => |code| try w.print("exited with code {d}", .{code}),
|
||||||
.Signal => |sig| try w.print("terminated with signal {d}", .{sig}),
|
.Signal => |sig| try w.print("terminated with signal {d}", .{sig}),
|
||||||
|
|
@ -1500,7 +1504,7 @@ fn evalZigTest(
|
||||||
const gpa = run.step.owner.allocator;
|
const gpa = run.step.owner.allocator;
|
||||||
const arena = run.step.owner.allocator;
|
const arena = run.step.owner.allocator;
|
||||||
|
|
||||||
var poller = std.io.poll(gpa, enum { stdout, stderr }, .{
|
var poller = std.Io.poll(gpa, enum { stdout, stderr }, .{
|
||||||
.stdout = child.stdout.?,
|
.stdout = child.stdout.?,
|
||||||
.stderr = child.stderr.?,
|
.stderr = child.stderr.?,
|
||||||
});
|
});
|
||||||
|
|
@ -1524,11 +1528,6 @@ fn evalZigTest(
|
||||||
break :failed false;
|
break :failed false;
|
||||||
};
|
};
|
||||||
|
|
||||||
const Header = std.zig.Server.Message.Header;
|
|
||||||
|
|
||||||
const stdout = poller.fifo(.stdout);
|
|
||||||
const stderr = poller.fifo(.stderr);
|
|
||||||
|
|
||||||
var fail_count: u32 = 0;
|
var fail_count: u32 = 0;
|
||||||
var skip_count: u32 = 0;
|
var skip_count: u32 = 0;
|
||||||
var leak_count: u32 = 0;
|
var leak_count: u32 = 0;
|
||||||
|
|
@ -1541,16 +1540,14 @@ fn evalZigTest(
|
||||||
var sub_prog_node: ?std.Progress.Node = null;
|
var sub_prog_node: ?std.Progress.Node = null;
|
||||||
defer if (sub_prog_node) |n| n.end();
|
defer if (sub_prog_node) |n| n.end();
|
||||||
|
|
||||||
|
const stdout = poller.reader(.stdout);
|
||||||
|
const stderr = poller.reader(.stderr);
|
||||||
const any_write_failed = first_write_failed or poll: while (true) {
|
const any_write_failed = first_write_failed or poll: while (true) {
|
||||||
while (stdout.readableLength() < @sizeOf(Header)) {
|
const Header = std.zig.Server.Message.Header;
|
||||||
if (!(try poller.poll())) break :poll false;
|
while (stdout.buffered().len < @sizeOf(Header)) if (!try poller.poll()) break :poll false;
|
||||||
}
|
const header = stdout.takeStruct(Header, .little) catch unreachable;
|
||||||
const header = stdout.reader().readStruct(Header) catch unreachable;
|
while (stdout.buffered().len < header.bytes_len) if (!try poller.poll()) break :poll false;
|
||||||
while (stdout.readableLength() < header.bytes_len) {
|
const body = stdout.take(header.bytes_len) catch unreachable;
|
||||||
if (!(try poller.poll())) break :poll false;
|
|
||||||
}
|
|
||||||
const body = stdout.readableSliceOfLen(header.bytes_len);
|
|
||||||
|
|
||||||
switch (header.tag) {
|
switch (header.tag) {
|
||||||
.zig_version => {
|
.zig_version => {
|
||||||
if (!std.mem.eql(u8, builtin.zig_version_string, body)) {
|
if (!std.mem.eql(u8, builtin.zig_version_string, body)) {
|
||||||
|
|
@ -1607,9 +1604,9 @@ fn evalZigTest(
|
||||||
|
|
||||||
if (tr_hdr.flags.fail or tr_hdr.flags.leak or tr_hdr.flags.log_err_count > 0) {
|
if (tr_hdr.flags.fail or tr_hdr.flags.leak or tr_hdr.flags.log_err_count > 0) {
|
||||||
const name = std.mem.sliceTo(md.string_bytes[md.names[tr_hdr.index]..], 0);
|
const name = std.mem.sliceTo(md.string_bytes[md.names[tr_hdr.index]..], 0);
|
||||||
const orig_msg = stderr.readableSlice(0);
|
const stderr_contents = stderr.buffered();
|
||||||
defer stderr.discard(orig_msg.len);
|
stderr.toss(stderr_contents.len);
|
||||||
const msg = std.mem.trim(u8, orig_msg, "\n");
|
const msg = std.mem.trim(u8, stderr_contents, "\n");
|
||||||
const label = if (tr_hdr.flags.fail)
|
const label = if (tr_hdr.flags.fail)
|
||||||
"failed"
|
"failed"
|
||||||
else if (tr_hdr.flags.leak)
|
else if (tr_hdr.flags.leak)
|
||||||
|
|
@ -1660,8 +1657,6 @@ fn evalZigTest(
|
||||||
},
|
},
|
||||||
else => {}, // ignore other messages
|
else => {}, // ignore other messages
|
||||||
}
|
}
|
||||||
|
|
||||||
stdout.discard(body.len);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
if (any_write_failed) {
|
if (any_write_failed) {
|
||||||
|
|
@ -1670,9 +1665,9 @@ fn evalZigTest(
|
||||||
while (try poller.poll()) {}
|
while (try poller.poll()) {}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (stderr.readableLength() > 0) {
|
const stderr_contents = std.mem.trim(u8, stderr.buffered(), "\n");
|
||||||
const msg = std.mem.trim(u8, try stderr.toOwnedSlice(), "\n");
|
if (stderr_contents.len > 0) {
|
||||||
if (msg.len > 0) run.step.result_stderr = msg;
|
run.step.result_stderr = try arena.dupe(u8, stderr_contents);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Send EOF to stdin.
|
// Send EOF to stdin.
|
||||||
|
|
@ -1795,28 +1790,43 @@ fn evalGeneric(run: *Run, child: *std.process.Child) !StdIoResult {
|
||||||
var stdout_bytes: ?[]const u8 = null;
|
var stdout_bytes: ?[]const u8 = null;
|
||||||
var stderr_bytes: ?[]const u8 = null;
|
var stderr_bytes: ?[]const u8 = null;
|
||||||
|
|
||||||
|
run.stdio_limit = run.stdio_limit.min(.limited(run.max_stdio_size));
|
||||||
if (child.stdout) |stdout| {
|
if (child.stdout) |stdout| {
|
||||||
if (child.stderr) |stderr| {
|
if (child.stderr) |stderr| {
|
||||||
var poller = std.io.poll(arena, enum { stdout, stderr }, .{
|
var poller = std.Io.poll(arena, enum { stdout, stderr }, .{
|
||||||
.stdout = stdout,
|
.stdout = stdout,
|
||||||
.stderr = stderr,
|
.stderr = stderr,
|
||||||
});
|
});
|
||||||
defer poller.deinit();
|
defer poller.deinit();
|
||||||
|
|
||||||
while (try poller.poll()) {
|
while (try poller.poll()) {
|
||||||
if (poller.fifo(.stdout).count > run.max_stdio_size)
|
if (run.stdio_limit.toInt()) |limit| {
|
||||||
return error.StdoutStreamTooLong;
|
if (poller.reader(.stderr).buffered().len > limit)
|
||||||
if (poller.fifo(.stderr).count > run.max_stdio_size)
|
return error.StdoutStreamTooLong;
|
||||||
return error.StderrStreamTooLong;
|
if (poller.reader(.stderr).buffered().len > limit)
|
||||||
|
return error.StderrStreamTooLong;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
stdout_bytes = try poller.fifo(.stdout).toOwnedSlice();
|
stdout_bytes = try poller.toOwnedSlice(.stdout);
|
||||||
stderr_bytes = try poller.fifo(.stderr).toOwnedSlice();
|
stderr_bytes = try poller.toOwnedSlice(.stderr);
|
||||||
} else {
|
} else {
|
||||||
stdout_bytes = try stdout.deprecatedReader().readAllAlloc(arena, run.max_stdio_size);
|
var small_buffer: [1]u8 = undefined;
|
||||||
|
var stdout_reader = stdout.readerStreaming(&small_buffer);
|
||||||
|
stdout_bytes = stdout_reader.interface.allocRemaining(arena, run.stdio_limit) catch |err| switch (err) {
|
||||||
|
error.OutOfMemory => return error.OutOfMemory,
|
||||||
|
error.ReadFailed => return stdout_reader.err.?,
|
||||||
|
error.StreamTooLong => return error.StdoutStreamTooLong,
|
||||||
|
};
|
||||||
}
|
}
|
||||||
} else if (child.stderr) |stderr| {
|
} else if (child.stderr) |stderr| {
|
||||||
stderr_bytes = try stderr.deprecatedReader().readAllAlloc(arena, run.max_stdio_size);
|
var small_buffer: [1]u8 = undefined;
|
||||||
|
var stderr_reader = stderr.readerStreaming(&small_buffer);
|
||||||
|
stderr_bytes = stderr_reader.interface.allocRemaining(arena, run.stdio_limit) catch |err| switch (err) {
|
||||||
|
error.OutOfMemory => return error.OutOfMemory,
|
||||||
|
error.ReadFailed => return stderr_reader.err.?,
|
||||||
|
error.StreamTooLong => return error.StderrStreamTooLong,
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
if (stderr_bytes) |bytes| if (bytes.len > 0) {
|
if (stderr_bytes) |bytes| if (bytes.len > 0) {
|
||||||
|
|
|
||||||
408
lib/std/Io.zig
408
lib/std/Io.zig
|
|
@ -1,16 +1,11 @@
|
||||||
const std = @import("std.zig");
|
|
||||||
const builtin = @import("builtin");
|
const builtin = @import("builtin");
|
||||||
const root = @import("root");
|
|
||||||
const c = std.c;
|
|
||||||
const is_windows = builtin.os.tag == .windows;
|
const is_windows = builtin.os.tag == .windows;
|
||||||
|
|
||||||
|
const std = @import("std.zig");
|
||||||
const windows = std.os.windows;
|
const windows = std.os.windows;
|
||||||
const posix = std.posix;
|
const posix = std.posix;
|
||||||
const math = std.math;
|
const math = std.math;
|
||||||
const assert = std.debug.assert;
|
const assert = std.debug.assert;
|
||||||
const fs = std.fs;
|
|
||||||
const mem = std.mem;
|
|
||||||
const meta = std.meta;
|
|
||||||
const File = std.fs.File;
|
|
||||||
const Allocator = std.mem.Allocator;
|
const Allocator = std.mem.Allocator;
|
||||||
const Alignment = std.mem.Alignment;
|
const Alignment = std.mem.Alignment;
|
||||||
|
|
||||||
|
|
@ -493,54 +488,51 @@ test null_writer {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn poll(
|
pub fn poll(
|
||||||
allocator: Allocator,
|
gpa: Allocator,
|
||||||
comptime StreamEnum: type,
|
comptime StreamEnum: type,
|
||||||
files: PollFiles(StreamEnum),
|
files: PollFiles(StreamEnum),
|
||||||
) Poller(StreamEnum) {
|
) Poller(StreamEnum) {
|
||||||
const enum_fields = @typeInfo(StreamEnum).@"enum".fields;
|
const enum_fields = @typeInfo(StreamEnum).@"enum".fields;
|
||||||
var result: Poller(StreamEnum) = undefined;
|
var result: Poller(StreamEnum) = .{
|
||||||
|
.gpa = gpa,
|
||||||
if (is_windows) result.windows = .{
|
.readers = @splat(.failing),
|
||||||
.first_read_done = false,
|
.poll_fds = undefined,
|
||||||
.overlapped = [1]windows.OVERLAPPED{
|
.windows = if (is_windows) .{
|
||||||
mem.zeroes(windows.OVERLAPPED),
|
.first_read_done = false,
|
||||||
} ** enum_fields.len,
|
.overlapped = [1]windows.OVERLAPPED{
|
||||||
.small_bufs = undefined,
|
std.mem.zeroes(windows.OVERLAPPED),
|
||||||
.active = .{
|
} ** enum_fields.len,
|
||||||
.count = 0,
|
.small_bufs = undefined,
|
||||||
.handles_buf = undefined,
|
.active = .{
|
||||||
.stream_map = undefined,
|
.count = 0,
|
||||||
},
|
.handles_buf = undefined,
|
||||||
|
.stream_map = undefined,
|
||||||
|
},
|
||||||
|
} else {},
|
||||||
};
|
};
|
||||||
|
|
||||||
inline for (0..enum_fields.len) |i| {
|
inline for (enum_fields, 0..) |field, i| {
|
||||||
result.fifos[i] = .{
|
|
||||||
.allocator = allocator,
|
|
||||||
.buf = &.{},
|
|
||||||
.head = 0,
|
|
||||||
.count = 0,
|
|
||||||
};
|
|
||||||
if (is_windows) {
|
if (is_windows) {
|
||||||
result.windows.active.handles_buf[i] = @field(files, enum_fields[i].name).handle;
|
result.windows.active.handles_buf[i] = @field(files, field.name).handle;
|
||||||
} else {
|
} else {
|
||||||
result.poll_fds[i] = .{
|
result.poll_fds[i] = .{
|
||||||
.fd = @field(files, enum_fields[i].name).handle,
|
.fd = @field(files, field.name).handle,
|
||||||
.events = posix.POLL.IN,
|
.events = posix.POLL.IN,
|
||||||
.revents = undefined,
|
.revents = undefined,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const PollFifo = std.fifo.LinearFifo(u8, .Dynamic);
|
|
||||||
|
|
||||||
pub fn Poller(comptime StreamEnum: type) type {
|
pub fn Poller(comptime StreamEnum: type) type {
|
||||||
return struct {
|
return struct {
|
||||||
const enum_fields = @typeInfo(StreamEnum).@"enum".fields;
|
const enum_fields = @typeInfo(StreamEnum).@"enum".fields;
|
||||||
const PollFd = if (is_windows) void else posix.pollfd;
|
const PollFd = if (is_windows) void else posix.pollfd;
|
||||||
|
|
||||||
fifos: [enum_fields.len]PollFifo,
|
gpa: Allocator,
|
||||||
|
readers: [enum_fields.len]Reader,
|
||||||
poll_fds: [enum_fields.len]PollFd,
|
poll_fds: [enum_fields.len]PollFd,
|
||||||
windows: if (is_windows) struct {
|
windows: if (is_windows) struct {
|
||||||
first_read_done: bool,
|
first_read_done: bool,
|
||||||
|
|
@ -552,7 +544,7 @@ pub fn Poller(comptime StreamEnum: type) type {
|
||||||
stream_map: [enum_fields.len]StreamEnum,
|
stream_map: [enum_fields.len]StreamEnum,
|
||||||
|
|
||||||
pub fn removeAt(self: *@This(), index: u32) void {
|
pub fn removeAt(self: *@This(), index: u32) void {
|
||||||
std.debug.assert(index < self.count);
|
assert(index < self.count);
|
||||||
for (index + 1..self.count) |i| {
|
for (index + 1..self.count) |i| {
|
||||||
self.handles_buf[i - 1] = self.handles_buf[i];
|
self.handles_buf[i - 1] = self.handles_buf[i];
|
||||||
self.stream_map[i - 1] = self.stream_map[i];
|
self.stream_map[i - 1] = self.stream_map[i];
|
||||||
|
|
@ -565,13 +557,14 @@ pub fn Poller(comptime StreamEnum: type) type {
|
||||||
const Self = @This();
|
const Self = @This();
|
||||||
|
|
||||||
pub fn deinit(self: *Self) void {
|
pub fn deinit(self: *Self) void {
|
||||||
|
const gpa = self.gpa;
|
||||||
if (is_windows) {
|
if (is_windows) {
|
||||||
// cancel any pending IO to prevent clobbering OVERLAPPED value
|
// cancel any pending IO to prevent clobbering OVERLAPPED value
|
||||||
for (self.windows.active.handles_buf[0..self.windows.active.count]) |h| {
|
for (self.windows.active.handles_buf[0..self.windows.active.count]) |h| {
|
||||||
_ = windows.kernel32.CancelIo(h);
|
_ = windows.kernel32.CancelIo(h);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
inline for (&self.fifos) |*q| q.deinit();
|
inline for (&self.readers) |*r| gpa.free(r.buffer);
|
||||||
self.* = undefined;
|
self.* = undefined;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -591,21 +584,40 @@ pub fn Poller(comptime StreamEnum: type) type {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub inline fn fifo(self: *Self, comptime which: StreamEnum) *PollFifo {
|
pub fn reader(self: *Self, which: StreamEnum) *Reader {
|
||||||
return &self.fifos[@intFromEnum(which)];
|
return &self.readers[@intFromEnum(which)];
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn toOwnedSlice(self: *Self, which: StreamEnum) error{OutOfMemory}![]u8 {
|
||||||
|
const gpa = self.gpa;
|
||||||
|
const r = reader(self, which);
|
||||||
|
if (r.seek == 0) {
|
||||||
|
const new = try gpa.realloc(r.buffer, r.end);
|
||||||
|
r.buffer = &.{};
|
||||||
|
r.end = 0;
|
||||||
|
return new;
|
||||||
|
}
|
||||||
|
const new = try gpa.dupe(u8, r.buffered());
|
||||||
|
gpa.free(r.buffer);
|
||||||
|
r.buffer = &.{};
|
||||||
|
r.seek = 0;
|
||||||
|
r.end = 0;
|
||||||
|
return new;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn pollWindows(self: *Self, nanoseconds: ?u64) !bool {
|
fn pollWindows(self: *Self, nanoseconds: ?u64) !bool {
|
||||||
const bump_amt = 512;
|
const bump_amt = 512;
|
||||||
|
const gpa = self.gpa;
|
||||||
|
|
||||||
if (!self.windows.first_read_done) {
|
if (!self.windows.first_read_done) {
|
||||||
var already_read_data = false;
|
var already_read_data = false;
|
||||||
for (0..enum_fields.len) |i| {
|
for (0..enum_fields.len) |i| {
|
||||||
const handle = self.windows.active.handles_buf[i];
|
const handle = self.windows.active.handles_buf[i];
|
||||||
switch (try windowsAsyncReadToFifoAndQueueSmallRead(
|
switch (try windowsAsyncReadToFifoAndQueueSmallRead(
|
||||||
|
gpa,
|
||||||
handle,
|
handle,
|
||||||
&self.windows.overlapped[i],
|
&self.windows.overlapped[i],
|
||||||
&self.fifos[i],
|
&self.readers[i],
|
||||||
&self.windows.small_bufs[i],
|
&self.windows.small_bufs[i],
|
||||||
bump_amt,
|
bump_amt,
|
||||||
)) {
|
)) {
|
||||||
|
|
@ -652,7 +664,7 @@ pub fn Poller(comptime StreamEnum: type) type {
|
||||||
const handle = self.windows.active.handles_buf[active_idx];
|
const handle = self.windows.active.handles_buf[active_idx];
|
||||||
|
|
||||||
const overlapped = &self.windows.overlapped[stream_idx];
|
const overlapped = &self.windows.overlapped[stream_idx];
|
||||||
const stream_fifo = &self.fifos[stream_idx];
|
const stream_reader = &self.readers[stream_idx];
|
||||||
const small_buf = &self.windows.small_bufs[stream_idx];
|
const small_buf = &self.windows.small_bufs[stream_idx];
|
||||||
|
|
||||||
const num_bytes_read = switch (try windowsGetReadResult(handle, overlapped, false)) {
|
const num_bytes_read = switch (try windowsGetReadResult(handle, overlapped, false)) {
|
||||||
|
|
@ -663,12 +675,16 @@ pub fn Poller(comptime StreamEnum: type) type {
|
||||||
},
|
},
|
||||||
.aborted => unreachable,
|
.aborted => unreachable,
|
||||||
};
|
};
|
||||||
try stream_fifo.write(small_buf[0..num_bytes_read]);
|
const buf = small_buf[0..num_bytes_read];
|
||||||
|
const dest = try writableSliceGreedyAlloc(stream_reader, gpa, buf.len);
|
||||||
|
@memcpy(dest[0..buf.len], buf);
|
||||||
|
advanceBufferEnd(stream_reader, buf.len);
|
||||||
|
|
||||||
switch (try windowsAsyncReadToFifoAndQueueSmallRead(
|
switch (try windowsAsyncReadToFifoAndQueueSmallRead(
|
||||||
|
gpa,
|
||||||
handle,
|
handle,
|
||||||
overlapped,
|
overlapped,
|
||||||
stream_fifo,
|
stream_reader,
|
||||||
small_buf,
|
small_buf,
|
||||||
bump_amt,
|
bump_amt,
|
||||||
)) {
|
)) {
|
||||||
|
|
@ -683,6 +699,7 @@ pub fn Poller(comptime StreamEnum: type) type {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn pollPosix(self: *Self, nanoseconds: ?u64) !bool {
|
fn pollPosix(self: *Self, nanoseconds: ?u64) !bool {
|
||||||
|
const gpa = self.gpa;
|
||||||
// We ask for ensureUnusedCapacity with this much extra space. This
|
// We ask for ensureUnusedCapacity with this much extra space. This
|
||||||
// has more of an effect on small reads because once the reads
|
// has more of an effect on small reads because once the reads
|
||||||
// start to get larger the amount of space an ArrayList will
|
// start to get larger the amount of space an ArrayList will
|
||||||
|
|
@ -702,18 +719,18 @@ pub fn Poller(comptime StreamEnum: type) type {
|
||||||
}
|
}
|
||||||
|
|
||||||
var keep_polling = false;
|
var keep_polling = false;
|
||||||
inline for (&self.poll_fds, &self.fifos) |*poll_fd, *q| {
|
for (&self.poll_fds, &self.readers) |*poll_fd, *r| {
|
||||||
// Try reading whatever is available before checking the error
|
// Try reading whatever is available before checking the error
|
||||||
// conditions.
|
// conditions.
|
||||||
// It's still possible to read after a POLL.HUP is received,
|
// It's still possible to read after a POLL.HUP is received,
|
||||||
// always check if there's some data waiting to be read first.
|
// always check if there's some data waiting to be read first.
|
||||||
if (poll_fd.revents & posix.POLL.IN != 0) {
|
if (poll_fd.revents & posix.POLL.IN != 0) {
|
||||||
const buf = try q.writableWithSize(bump_amt);
|
const buf = try writableSliceGreedyAlloc(r, gpa, bump_amt);
|
||||||
const amt = posix.read(poll_fd.fd, buf) catch |err| switch (err) {
|
const amt = posix.read(poll_fd.fd, buf) catch |err| switch (err) {
|
||||||
error.BrokenPipe => 0, // Handle the same as EOF.
|
error.BrokenPipe => 0, // Handle the same as EOF.
|
||||||
else => |e| return e,
|
else => |e| return e,
|
||||||
};
|
};
|
||||||
q.update(amt);
|
advanceBufferEnd(r, amt);
|
||||||
if (amt == 0) {
|
if (amt == 0) {
|
||||||
// Remove the fd when the EOF condition is met.
|
// Remove the fd when the EOF condition is met.
|
||||||
poll_fd.fd = -1;
|
poll_fd.fd = -1;
|
||||||
|
|
@ -729,146 +746,181 @@ pub fn Poller(comptime StreamEnum: type) type {
|
||||||
}
|
}
|
||||||
return keep_polling;
|
return keep_polling;
|
||||||
}
|
}
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The `ReadFile` docuementation states that `lpNumberOfBytesRead` does not have a meaningful
|
/// Returns a slice into the unused capacity of `buffer` with at least
|
||||||
/// result when using overlapped I/O, but also that it cannot be `null` on Windows 7. For
|
/// `min_len` bytes, extending `buffer` by resizing it with `gpa` as necessary.
|
||||||
/// compatibility, we point it to this dummy variables, which we never otherwise access.
|
///
|
||||||
/// See: https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-readfile
|
/// After calling this function, typically the caller will follow up with a
|
||||||
var win_dummy_bytes_read: u32 = undefined;
|
/// call to `advanceBufferEnd` to report the actual number of bytes buffered.
|
||||||
|
fn writableSliceGreedyAlloc(r: *Reader, allocator: Allocator, min_len: usize) Allocator.Error![]u8 {
|
||||||
/// Read as much data as possible from `handle` with `overlapped`, and write it to the FIFO. Before
|
{
|
||||||
/// returning, queue a read into `small_buf` so that `WaitForMultipleObjects` returns when more data
|
const unused = r.buffer[r.end..];
|
||||||
/// is available. `handle` must have no pending asynchronous operation.
|
if (unused.len >= min_len) return unused;
|
||||||
fn windowsAsyncReadToFifoAndQueueSmallRead(
|
|
||||||
handle: windows.HANDLE,
|
|
||||||
overlapped: *windows.OVERLAPPED,
|
|
||||||
fifo: *PollFifo,
|
|
||||||
small_buf: *[128]u8,
|
|
||||||
bump_amt: usize,
|
|
||||||
) !enum { empty, populated, closed_populated, closed } {
|
|
||||||
var read_any_data = false;
|
|
||||||
while (true) {
|
|
||||||
const fifo_read_pending = while (true) {
|
|
||||||
const buf = try fifo.writableWithSize(bump_amt);
|
|
||||||
const buf_len = math.cast(u32, buf.len) orelse math.maxInt(u32);
|
|
||||||
|
|
||||||
if (0 == windows.kernel32.ReadFile(
|
|
||||||
handle,
|
|
||||||
buf.ptr,
|
|
||||||
buf_len,
|
|
||||||
&win_dummy_bytes_read,
|
|
||||||
overlapped,
|
|
||||||
)) switch (windows.GetLastError()) {
|
|
||||||
.IO_PENDING => break true,
|
|
||||||
.BROKEN_PIPE => return if (read_any_data) .closed_populated else .closed,
|
|
||||||
else => |err| return windows.unexpectedError(err),
|
|
||||||
};
|
|
||||||
|
|
||||||
const num_bytes_read = switch (try windowsGetReadResult(handle, overlapped, false)) {
|
|
||||||
.success => |n| n,
|
|
||||||
.closed => return if (read_any_data) .closed_populated else .closed,
|
|
||||||
.aborted => unreachable,
|
|
||||||
};
|
|
||||||
|
|
||||||
read_any_data = true;
|
|
||||||
fifo.update(num_bytes_read);
|
|
||||||
|
|
||||||
if (num_bytes_read == buf_len) {
|
|
||||||
// We filled the buffer, so there's probably more data available.
|
|
||||||
continue;
|
|
||||||
} else {
|
|
||||||
// We didn't fill the buffer, so assume we're out of data.
|
|
||||||
// There is no pending read.
|
|
||||||
break false;
|
|
||||||
}
|
}
|
||||||
};
|
if (r.seek > 0) r.rebase();
|
||||||
|
{
|
||||||
if (fifo_read_pending) cancel_read: {
|
var list: std.ArrayListUnmanaged(u8) = .{
|
||||||
// Cancel the pending read into the FIFO.
|
.items = r.buffer[0..r.end],
|
||||||
_ = windows.kernel32.CancelIo(handle);
|
.capacity = r.buffer.len,
|
||||||
|
};
|
||||||
// We have to wait for the handle to be signalled, i.e. for the cancellation to complete.
|
defer r.buffer = list.allocatedSlice();
|
||||||
switch (windows.kernel32.WaitForSingleObject(handle, windows.INFINITE)) {
|
try list.ensureUnusedCapacity(allocator, min_len);
|
||||||
windows.WAIT_OBJECT_0 => {},
|
|
||||||
windows.WAIT_FAILED => return windows.unexpectedError(windows.GetLastError()),
|
|
||||||
else => unreachable,
|
|
||||||
}
|
}
|
||||||
|
const unused = r.buffer[r.end..];
|
||||||
// If it completed before we canceled, make sure to tell the FIFO!
|
assert(unused.len >= min_len);
|
||||||
const num_bytes_read = switch (try windowsGetReadResult(handle, overlapped, true)) {
|
return unused;
|
||||||
.success => |n| n,
|
|
||||||
.closed => return if (read_any_data) .closed_populated else .closed,
|
|
||||||
.aborted => break :cancel_read,
|
|
||||||
};
|
|
||||||
read_any_data = true;
|
|
||||||
fifo.update(num_bytes_read);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try to queue the 1-byte read.
|
/// After writing directly into the unused capacity of `buffer`, this function
|
||||||
if (0 == windows.kernel32.ReadFile(
|
/// updates `end` so that users of `Reader` can receive the data.
|
||||||
handle,
|
fn advanceBufferEnd(r: *Reader, n: usize) void {
|
||||||
small_buf,
|
assert(n <= r.buffer.len - r.end);
|
||||||
small_buf.len,
|
r.end += n;
|
||||||
&win_dummy_bytes_read,
|
}
|
||||||
overlapped,
|
|
||||||
)) switch (windows.GetLastError()) {
|
|
||||||
.IO_PENDING => {
|
|
||||||
// 1-byte read pending as intended
|
|
||||||
return if (read_any_data) .populated else .empty;
|
|
||||||
},
|
|
||||||
.BROKEN_PIPE => return if (read_any_data) .closed_populated else .closed,
|
|
||||||
else => |err| return windows.unexpectedError(err),
|
|
||||||
};
|
|
||||||
|
|
||||||
// We got data back this time. Write it to the FIFO and run the main loop again.
|
/// The `ReadFile` docuementation states that `lpNumberOfBytesRead` does not have a meaningful
|
||||||
const num_bytes_read = switch (try windowsGetReadResult(handle, overlapped, false)) {
|
/// result when using overlapped I/O, but also that it cannot be `null` on Windows 7. For
|
||||||
.success => |n| n,
|
/// compatibility, we point it to this dummy variables, which we never otherwise access.
|
||||||
.closed => return if (read_any_data) .closed_populated else .closed,
|
/// See: https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-readfile
|
||||||
.aborted => unreachable,
|
var win_dummy_bytes_read: u32 = undefined;
|
||||||
};
|
|
||||||
try fifo.write(small_buf[0..num_bytes_read]);
|
|
||||||
read_any_data = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Simple wrapper around `GetOverlappedResult` to determine the result of a `ReadFile` operation.
|
/// Read as much data as possible from `handle` with `overlapped`, and write it to the FIFO. Before
|
||||||
/// If `!allow_aborted`, then `aborted` is never returned (`OPERATION_ABORTED` is considered unexpected).
|
/// returning, queue a read into `small_buf` so that `WaitForMultipleObjects` returns when more data
|
||||||
///
|
/// is available. `handle` must have no pending asynchronous operation.
|
||||||
/// The `ReadFile` documentation states that the number of bytes read by an overlapped `ReadFile` must be determined using `GetOverlappedResult`, even if the
|
fn windowsAsyncReadToFifoAndQueueSmallRead(
|
||||||
/// operation immediately returns data:
|
gpa: Allocator,
|
||||||
/// "Use NULL for [lpNumberOfBytesRead] if this is an asynchronous operation to avoid potentially
|
handle: windows.HANDLE,
|
||||||
/// erroneous results."
|
overlapped: *windows.OVERLAPPED,
|
||||||
/// "If `hFile` was opened with `FILE_FLAG_OVERLAPPED`, the following conditions are in effect: [...]
|
r: *Reader,
|
||||||
/// The lpNumberOfBytesRead parameter should be set to NULL. Use the GetOverlappedResult function to
|
small_buf: *[128]u8,
|
||||||
/// get the actual number of bytes read."
|
bump_amt: usize,
|
||||||
/// See: https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-readfile
|
) !enum { empty, populated, closed_populated, closed } {
|
||||||
fn windowsGetReadResult(
|
var read_any_data = false;
|
||||||
handle: windows.HANDLE,
|
while (true) {
|
||||||
overlapped: *windows.OVERLAPPED,
|
const fifo_read_pending = while (true) {
|
||||||
allow_aborted: bool,
|
const buf = try writableSliceGreedyAlloc(r, gpa, bump_amt);
|
||||||
) !union(enum) {
|
const buf_len = math.cast(u32, buf.len) orelse math.maxInt(u32);
|
||||||
success: u32,
|
|
||||||
closed,
|
if (0 == windows.kernel32.ReadFile(
|
||||||
aborted,
|
handle,
|
||||||
} {
|
buf.ptr,
|
||||||
var num_bytes_read: u32 = undefined;
|
buf_len,
|
||||||
if (0 == windows.kernel32.GetOverlappedResult(
|
&win_dummy_bytes_read,
|
||||||
handle,
|
overlapped,
|
||||||
overlapped,
|
)) switch (windows.GetLastError()) {
|
||||||
&num_bytes_read,
|
.IO_PENDING => break true,
|
||||||
0,
|
.BROKEN_PIPE => return if (read_any_data) .closed_populated else .closed,
|
||||||
)) switch (windows.GetLastError()) {
|
else => |err| return windows.unexpectedError(err),
|
||||||
.BROKEN_PIPE => return .closed,
|
};
|
||||||
.OPERATION_ABORTED => |err| if (allow_aborted) {
|
|
||||||
return .aborted;
|
const num_bytes_read = switch (try windowsGetReadResult(handle, overlapped, false)) {
|
||||||
} else {
|
.success => |n| n,
|
||||||
return windows.unexpectedError(err);
|
.closed => return if (read_any_data) .closed_populated else .closed,
|
||||||
},
|
.aborted => unreachable,
|
||||||
else => |err| return windows.unexpectedError(err),
|
};
|
||||||
|
|
||||||
|
read_any_data = true;
|
||||||
|
advanceBufferEnd(r, num_bytes_read);
|
||||||
|
|
||||||
|
if (num_bytes_read == buf_len) {
|
||||||
|
// We filled the buffer, so there's probably more data available.
|
||||||
|
continue;
|
||||||
|
} else {
|
||||||
|
// We didn't fill the buffer, so assume we're out of data.
|
||||||
|
// There is no pending read.
|
||||||
|
break false;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if (fifo_read_pending) cancel_read: {
|
||||||
|
// Cancel the pending read into the FIFO.
|
||||||
|
_ = windows.kernel32.CancelIo(handle);
|
||||||
|
|
||||||
|
// We have to wait for the handle to be signalled, i.e. for the cancellation to complete.
|
||||||
|
switch (windows.kernel32.WaitForSingleObject(handle, windows.INFINITE)) {
|
||||||
|
windows.WAIT_OBJECT_0 => {},
|
||||||
|
windows.WAIT_FAILED => return windows.unexpectedError(windows.GetLastError()),
|
||||||
|
else => unreachable,
|
||||||
|
}
|
||||||
|
|
||||||
|
// If it completed before we canceled, make sure to tell the FIFO!
|
||||||
|
const num_bytes_read = switch (try windowsGetReadResult(handle, overlapped, true)) {
|
||||||
|
.success => |n| n,
|
||||||
|
.closed => return if (read_any_data) .closed_populated else .closed,
|
||||||
|
.aborted => break :cancel_read,
|
||||||
|
};
|
||||||
|
read_any_data = true;
|
||||||
|
advanceBufferEnd(r, num_bytes_read);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to queue the 1-byte read.
|
||||||
|
if (0 == windows.kernel32.ReadFile(
|
||||||
|
handle,
|
||||||
|
small_buf,
|
||||||
|
small_buf.len,
|
||||||
|
&win_dummy_bytes_read,
|
||||||
|
overlapped,
|
||||||
|
)) switch (windows.GetLastError()) {
|
||||||
|
.IO_PENDING => {
|
||||||
|
// 1-byte read pending as intended
|
||||||
|
return if (read_any_data) .populated else .empty;
|
||||||
|
},
|
||||||
|
.BROKEN_PIPE => return if (read_any_data) .closed_populated else .closed,
|
||||||
|
else => |err| return windows.unexpectedError(err),
|
||||||
|
};
|
||||||
|
|
||||||
|
// We got data back this time. Write it to the FIFO and run the main loop again.
|
||||||
|
const num_bytes_read = switch (try windowsGetReadResult(handle, overlapped, false)) {
|
||||||
|
.success => |n| n,
|
||||||
|
.closed => return if (read_any_data) .closed_populated else .closed,
|
||||||
|
.aborted => unreachable,
|
||||||
|
};
|
||||||
|
const buf = small_buf[0..num_bytes_read];
|
||||||
|
const dest = try writableSliceGreedyAlloc(r, gpa, buf.len);
|
||||||
|
@memcpy(dest[0..buf.len], buf);
|
||||||
|
advanceBufferEnd(r, buf.len);
|
||||||
|
read_any_data = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Simple wrapper around `GetOverlappedResult` to determine the result of a `ReadFile` operation.
|
||||||
|
/// If `!allow_aborted`, then `aborted` is never returned (`OPERATION_ABORTED` is considered unexpected).
|
||||||
|
///
|
||||||
|
/// The `ReadFile` documentation states that the number of bytes read by an overlapped `ReadFile` must be determined using `GetOverlappedResult`, even if the
|
||||||
|
/// operation immediately returns data:
|
||||||
|
/// "Use NULL for [lpNumberOfBytesRead] if this is an asynchronous operation to avoid potentially
|
||||||
|
/// erroneous results."
|
||||||
|
/// "If `hFile` was opened with `FILE_FLAG_OVERLAPPED`, the following conditions are in effect: [...]
|
||||||
|
/// The lpNumberOfBytesRead parameter should be set to NULL. Use the GetOverlappedResult function to
|
||||||
|
/// get the actual number of bytes read."
|
||||||
|
/// See: https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-readfile
|
||||||
|
fn windowsGetReadResult(
|
||||||
|
handle: windows.HANDLE,
|
||||||
|
overlapped: *windows.OVERLAPPED,
|
||||||
|
allow_aborted: bool,
|
||||||
|
) !union(enum) {
|
||||||
|
success: u32,
|
||||||
|
closed,
|
||||||
|
aborted,
|
||||||
|
} {
|
||||||
|
var num_bytes_read: u32 = undefined;
|
||||||
|
if (0 == windows.kernel32.GetOverlappedResult(
|
||||||
|
handle,
|
||||||
|
overlapped,
|
||||||
|
&num_bytes_read,
|
||||||
|
0,
|
||||||
|
)) switch (windows.GetLastError()) {
|
||||||
|
.BROKEN_PIPE => return .closed,
|
||||||
|
.OPERATION_ABORTED => |err| if (allow_aborted) {
|
||||||
|
return .aborted;
|
||||||
|
} else {
|
||||||
|
return windows.unexpectedError(err);
|
||||||
|
},
|
||||||
|
else => |err| return windows.unexpectedError(err),
|
||||||
|
};
|
||||||
|
return .{ .success = num_bytes_read };
|
||||||
|
}
|
||||||
};
|
};
|
||||||
return .{ .success = num_bytes_read };
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Given an enum, returns a struct with fields of that enum, each field
|
/// Given an enum, returns a struct with fields of that enum, each field
|
||||||
|
|
@ -879,10 +931,10 @@ pub fn PollFiles(comptime StreamEnum: type) type {
|
||||||
for (&struct_fields, enum_fields) |*struct_field, enum_field| {
|
for (&struct_fields, enum_fields) |*struct_field, enum_field| {
|
||||||
struct_field.* = .{
|
struct_field.* = .{
|
||||||
.name = enum_field.name,
|
.name = enum_field.name,
|
||||||
.type = fs.File,
|
.type = std.fs.File,
|
||||||
.default_value_ptr = null,
|
.default_value_ptr = null,
|
||||||
.is_comptime = false,
|
.is_comptime = false,
|
||||||
.alignment = @alignOf(fs.File),
|
.alignment = @alignOf(std.fs.File),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
return @Type(.{ .@"struct" = .{
|
return @Type(.{ .@"struct" = .{
|
||||||
|
|
|
||||||
|
|
@ -1241,37 +1241,6 @@ pub fn fillAlloc(r: *Reader, allocator: Allocator, n: usize) FillAllocError!void
|
||||||
return fill(r, n);
|
return fill(r, n);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a slice into the unused capacity of `buffer` with at least
|
|
||||||
/// `min_len` bytes, extending `buffer` by resizing it with `gpa` as necessary.
|
|
||||||
///
|
|
||||||
/// After calling this function, typically the caller will follow up with a
|
|
||||||
/// call to `advanceBufferEnd` to report the actual number of bytes buffered.
|
|
||||||
pub fn writableSliceGreedyAlloc(r: *Reader, allocator: Allocator, min_len: usize) Allocator.Error![]u8 {
|
|
||||||
{
|
|
||||||
const unused = r.buffer[r.end..];
|
|
||||||
if (unused.len >= min_len) return unused;
|
|
||||||
}
|
|
||||||
if (r.seek > 0) rebase(r);
|
|
||||||
{
|
|
||||||
var list: ArrayList(u8) = .{
|
|
||||||
.items = r.buffer[0..r.end],
|
|
||||||
.capacity = r.buffer.len,
|
|
||||||
};
|
|
||||||
defer r.buffer = list.allocatedSlice();
|
|
||||||
try list.ensureUnusedCapacity(allocator, min_len);
|
|
||||||
}
|
|
||||||
const unused = r.buffer[r.end..];
|
|
||||||
assert(unused.len >= min_len);
|
|
||||||
return unused;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// After writing directly into the unused capacity of `buffer`, this function
|
|
||||||
/// updates `end` so that users of `Reader` can receive the data.
|
|
||||||
pub fn advanceBufferEnd(r: *Reader, n: usize) void {
|
|
||||||
assert(n <= r.buffer.len - r.end);
|
|
||||||
r.end += n;
|
|
||||||
}
|
|
||||||
|
|
||||||
fn takeMultipleOf7Leb128(r: *Reader, comptime Result: type) TakeLeb128Error!Result {
|
fn takeMultipleOf7Leb128(r: *Reader, comptime Result: type) TakeLeb128Error!Result {
|
||||||
const result_info = @typeInfo(Result).int;
|
const result_info = @typeInfo(Result).int;
|
||||||
comptime assert(result_info.bits % 7 == 0);
|
comptime assert(result_info.bits % 7 == 0);
|
||||||
|
|
|
||||||
|
|
@ -14,6 +14,7 @@ const assert = std.debug.assert;
|
||||||
const native_os = builtin.os.tag;
|
const native_os = builtin.os.tag;
|
||||||
const Allocator = std.mem.Allocator;
|
const Allocator = std.mem.Allocator;
|
||||||
const ChildProcess = @This();
|
const ChildProcess = @This();
|
||||||
|
const ArrayList = std.ArrayListUnmanaged;
|
||||||
|
|
||||||
pub const Id = switch (native_os) {
|
pub const Id = switch (native_os) {
|
||||||
.windows => windows.HANDLE,
|
.windows => windows.HANDLE,
|
||||||
|
|
@ -348,19 +349,6 @@ pub const RunResult = struct {
|
||||||
stderr: []u8,
|
stderr: []u8,
|
||||||
};
|
};
|
||||||
|
|
||||||
fn writeFifoDataToArrayList(allocator: Allocator, list: *std.ArrayListUnmanaged(u8), fifo: *std.io.PollFifo) !void {
|
|
||||||
if (fifo.head != 0) fifo.realign();
|
|
||||||
if (list.capacity == 0) {
|
|
||||||
list.* = .{
|
|
||||||
.items = fifo.buf[0..fifo.count],
|
|
||||||
.capacity = fifo.buf.len,
|
|
||||||
};
|
|
||||||
fifo.* = std.io.PollFifo.init(fifo.allocator);
|
|
||||||
} else {
|
|
||||||
try list.appendSlice(allocator, fifo.buf[0..fifo.count]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Collect the output from the process's stdout and stderr. Will return once all output
|
/// Collect the output from the process's stdout and stderr. Will return once all output
|
||||||
/// has been collected. This does not mean that the process has ended. `wait` should still
|
/// has been collected. This does not mean that the process has ended. `wait` should still
|
||||||
/// be called to wait for and clean up the process.
|
/// be called to wait for and clean up the process.
|
||||||
|
|
@ -370,28 +358,48 @@ pub fn collectOutput(
|
||||||
child: ChildProcess,
|
child: ChildProcess,
|
||||||
/// Used for `stdout` and `stderr`.
|
/// Used for `stdout` and `stderr`.
|
||||||
allocator: Allocator,
|
allocator: Allocator,
|
||||||
stdout: *std.ArrayListUnmanaged(u8),
|
stdout: *ArrayList(u8),
|
||||||
stderr: *std.ArrayListUnmanaged(u8),
|
stderr: *ArrayList(u8),
|
||||||
max_output_bytes: usize,
|
max_output_bytes: usize,
|
||||||
) !void {
|
) !void {
|
||||||
assert(child.stdout_behavior == .Pipe);
|
assert(child.stdout_behavior == .Pipe);
|
||||||
assert(child.stderr_behavior == .Pipe);
|
assert(child.stderr_behavior == .Pipe);
|
||||||
|
|
||||||
var poller = std.io.poll(allocator, enum { stdout, stderr }, .{
|
var poller = std.Io.poll(allocator, enum { stdout, stderr }, .{
|
||||||
.stdout = child.stdout.?,
|
.stdout = child.stdout.?,
|
||||||
.stderr = child.stderr.?,
|
.stderr = child.stderr.?,
|
||||||
});
|
});
|
||||||
defer poller.deinit();
|
defer poller.deinit();
|
||||||
|
|
||||||
while (try poller.poll()) {
|
const stdout_r = poller.reader(.stdout);
|
||||||
if (poller.fifo(.stdout).count > max_output_bytes)
|
stdout_r.buffer = stdout.allocatedSlice();
|
||||||
return error.StdoutStreamTooLong;
|
stdout_r.seek = 0;
|
||||||
if (poller.fifo(.stderr).count > max_output_bytes)
|
stdout_r.end = stdout.items.len;
|
||||||
return error.StderrStreamTooLong;
|
|
||||||
|
const stderr_r = poller.reader(.stderr);
|
||||||
|
stderr_r.buffer = stderr.allocatedSlice();
|
||||||
|
stderr_r.seek = 0;
|
||||||
|
stderr_r.end = stderr.items.len;
|
||||||
|
|
||||||
|
defer {
|
||||||
|
stdout.* = .{
|
||||||
|
.items = stdout_r.buffer[0..stdout_r.end],
|
||||||
|
.capacity = stdout_r.buffer.len,
|
||||||
|
};
|
||||||
|
stderr.* = .{
|
||||||
|
.items = stderr_r.buffer[0..stderr_r.end],
|
||||||
|
.capacity = stderr_r.buffer.len,
|
||||||
|
};
|
||||||
|
stdout_r.buffer = &.{};
|
||||||
|
stderr_r.buffer = &.{};
|
||||||
}
|
}
|
||||||
|
|
||||||
try writeFifoDataToArrayList(allocator, stdout, poller.fifo(.stdout));
|
while (try poller.poll()) {
|
||||||
try writeFifoDataToArrayList(allocator, stderr, poller.fifo(.stderr));
|
if (stdout_r.bufferedLen() > max_output_bytes)
|
||||||
|
return error.StdoutStreamTooLong;
|
||||||
|
if (stderr_r.bufferedLen() > max_output_bytes)
|
||||||
|
return error.StderrStreamTooLong;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const RunError = posix.GetCwdError || posix.ReadError || SpawnError || posix.PollError || error{
|
pub const RunError = posix.GetCwdError || posix.ReadError || SpawnError || posix.PollError || error{
|
||||||
|
|
@ -421,10 +429,10 @@ pub fn run(args: struct {
|
||||||
child.expand_arg0 = args.expand_arg0;
|
child.expand_arg0 = args.expand_arg0;
|
||||||
child.progress_node = args.progress_node;
|
child.progress_node = args.progress_node;
|
||||||
|
|
||||||
var stdout: std.ArrayListUnmanaged(u8) = .empty;
|
var stdout: ArrayList(u8) = .empty;
|
||||||
errdefer stdout.deinit(args.allocator);
|
defer stdout.deinit(args.allocator);
|
||||||
var stderr: std.ArrayListUnmanaged(u8) = .empty;
|
var stderr: ArrayList(u8) = .empty;
|
||||||
errdefer stderr.deinit(args.allocator);
|
defer stderr.deinit(args.allocator);
|
||||||
|
|
||||||
try child.spawn();
|
try child.spawn();
|
||||||
errdefer {
|
errdefer {
|
||||||
|
|
@ -432,7 +440,7 @@ pub fn run(args: struct {
|
||||||
}
|
}
|
||||||
try child.collectOutput(args.allocator, &stdout, &stderr, args.max_output_bytes);
|
try child.collectOutput(args.allocator, &stdout, &stderr, args.max_output_bytes);
|
||||||
|
|
||||||
return RunResult{
|
return .{
|
||||||
.stdout = try stdout.toOwnedSlice(args.allocator),
|
.stdout = try stdout.toOwnedSlice(args.allocator),
|
||||||
.stderr = try stderr.toOwnedSlice(args.allocator),
|
.stderr = try stderr.toOwnedSlice(args.allocator),
|
||||||
.term = try child.wait(),
|
.term = try child.wait(),
|
||||||
|
|
@ -878,12 +886,12 @@ fn spawnWindows(self: *ChildProcess) SpawnError!void {
|
||||||
var cmd_line_cache = WindowsCommandLineCache.init(self.allocator, self.argv);
|
var cmd_line_cache = WindowsCommandLineCache.init(self.allocator, self.argv);
|
||||||
defer cmd_line_cache.deinit();
|
defer cmd_line_cache.deinit();
|
||||||
|
|
||||||
var app_buf: std.ArrayListUnmanaged(u16) = .empty;
|
var app_buf: ArrayList(u16) = .empty;
|
||||||
defer app_buf.deinit(self.allocator);
|
defer app_buf.deinit(self.allocator);
|
||||||
|
|
||||||
try app_buf.appendSlice(self.allocator, app_name_w);
|
try app_buf.appendSlice(self.allocator, app_name_w);
|
||||||
|
|
||||||
var dir_buf: std.ArrayListUnmanaged(u16) = .empty;
|
var dir_buf: ArrayList(u16) = .empty;
|
||||||
defer dir_buf.deinit(self.allocator);
|
defer dir_buf.deinit(self.allocator);
|
||||||
|
|
||||||
if (cwd_path_w.len > 0) {
|
if (cwd_path_w.len > 0) {
|
||||||
|
|
@ -1003,13 +1011,16 @@ fn forkChildErrReport(fd: i32, err: ChildProcess.SpawnError) noreturn {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn writeIntFd(fd: i32, value: ErrInt) !void {
|
fn writeIntFd(fd: i32, value: ErrInt) !void {
|
||||||
const file: File = .{ .handle = fd };
|
var buffer: [8]u8 = undefined;
|
||||||
file.deprecatedWriter().writeInt(u64, @intCast(value), .little) catch return error.SystemResources;
|
var fw: std.fs.File.Writer = .initMode(.{ .handle = fd }, &buffer, .streaming);
|
||||||
|
fw.interface.writeInt(u64, value, .little) catch unreachable;
|
||||||
|
fw.interface.flush() catch return error.SystemResources;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn readIntFd(fd: i32) !ErrInt {
|
fn readIntFd(fd: i32) !ErrInt {
|
||||||
const file: File = .{ .handle = fd };
|
var buffer: [8]u8 = undefined;
|
||||||
return @intCast(file.deprecatedReader().readInt(u64, .little) catch return error.SystemResources);
|
var fr: std.fs.File.Reader = .initMode(.{ .handle = fd }, &buffer, .streaming);
|
||||||
|
return @intCast(fr.interface.takeInt(u64, .little) catch return error.SystemResources);
|
||||||
}
|
}
|
||||||
|
|
||||||
const ErrInt = std.meta.Int(.unsigned, @sizeOf(anyerror) * 8);
|
const ErrInt = std.meta.Int(.unsigned, @sizeOf(anyerror) * 8);
|
||||||
|
|
@ -1020,8 +1031,8 @@ const ErrInt = std.meta.Int(.unsigned, @sizeOf(anyerror) * 8);
|
||||||
/// Note: If the dir is the cwd, dir_buf should be empty (len = 0).
|
/// Note: If the dir is the cwd, dir_buf should be empty (len = 0).
|
||||||
fn windowsCreateProcessPathExt(
|
fn windowsCreateProcessPathExt(
|
||||||
allocator: mem.Allocator,
|
allocator: mem.Allocator,
|
||||||
dir_buf: *std.ArrayListUnmanaged(u16),
|
dir_buf: *ArrayList(u16),
|
||||||
app_buf: *std.ArrayListUnmanaged(u16),
|
app_buf: *ArrayList(u16),
|
||||||
pathext: [:0]const u16,
|
pathext: [:0]const u16,
|
||||||
cmd_line_cache: *WindowsCommandLineCache,
|
cmd_line_cache: *WindowsCommandLineCache,
|
||||||
envp_ptr: ?[*]u16,
|
envp_ptr: ?[*]u16,
|
||||||
|
|
@ -1504,7 +1515,7 @@ const WindowsCommandLineCache = struct {
|
||||||
/// Returns the absolute path of `cmd.exe` within the Windows system directory.
|
/// Returns the absolute path of `cmd.exe` within the Windows system directory.
|
||||||
/// The caller owns the returned slice.
|
/// The caller owns the returned slice.
|
||||||
fn windowsCmdExePath(allocator: mem.Allocator) error{ OutOfMemory, Unexpected }![:0]u16 {
|
fn windowsCmdExePath(allocator: mem.Allocator) error{ OutOfMemory, Unexpected }![:0]u16 {
|
||||||
var buf = try std.ArrayListUnmanaged(u16).initCapacity(allocator, 128);
|
var buf = try ArrayList(u16).initCapacity(allocator, 128);
|
||||||
errdefer buf.deinit(allocator);
|
errdefer buf.deinit(allocator);
|
||||||
while (true) {
|
while (true) {
|
||||||
const unused_slice = buf.unusedCapacitySlice();
|
const unused_slice = buf.unusedCapacitySlice();
|
||||||
|
|
|
||||||
|
|
@ -6215,19 +6215,20 @@ fn spawnZigRc(
|
||||||
return comp.failWin32Resource(win32_resource, "unable to spawn {s} rc: {s}", .{ argv[0], @errorName(err) });
|
return comp.failWin32Resource(win32_resource, "unable to spawn {s} rc: {s}", .{ argv[0], @errorName(err) });
|
||||||
};
|
};
|
||||||
|
|
||||||
var poller = std.io.poll(comp.gpa, enum { stdout }, .{
|
var poller = std.Io.poll(comp.gpa, enum { stdout, stderr }, .{
|
||||||
.stdout = child.stdout.?,
|
.stdout = child.stdout.?,
|
||||||
|
.stderr = child.stderr.?,
|
||||||
});
|
});
|
||||||
defer poller.deinit();
|
defer poller.deinit();
|
||||||
|
|
||||||
const stdout = poller.fifo(.stdout);
|
const stdout = poller.reader(.stdout);
|
||||||
|
|
||||||
poll: while (true) {
|
poll: while (true) {
|
||||||
while (stdout.readableLength() < @sizeOf(std.zig.Server.Message.Header)) if (!try poller.poll()) break :poll;
|
const MessageHeader = std.zig.Server.Message.Header;
|
||||||
var header: std.zig.Server.Message.Header = undefined;
|
while (stdout.buffered().len < @sizeOf(MessageHeader)) if (!try poller.poll()) break :poll;
|
||||||
assert(stdout.read(std.mem.asBytes(&header)) == @sizeOf(std.zig.Server.Message.Header));
|
const header = stdout.takeStruct(MessageHeader, .little) catch unreachable;
|
||||||
while (stdout.readableLength() < header.bytes_len) if (!try poller.poll()) break :poll;
|
while (stdout.buffered().len < header.bytes_len) if (!try poller.poll()) break :poll;
|
||||||
const body = stdout.readableSliceOfLen(header.bytes_len);
|
const body = stdout.take(header.bytes_len) catch unreachable;
|
||||||
|
|
||||||
switch (header.tag) {
|
switch (header.tag) {
|
||||||
// We expect exactly one ErrorBundle, and if any error_bundle header is
|
// We expect exactly one ErrorBundle, and if any error_bundle header is
|
||||||
|
|
@ -6250,13 +6251,10 @@ fn spawnZigRc(
|
||||||
},
|
},
|
||||||
else => {}, // ignore other messages
|
else => {}, // ignore other messages
|
||||||
}
|
}
|
||||||
|
|
||||||
stdout.discard(body.len);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Just in case there's a failure that didn't send an ErrorBundle (e.g. an error return trace)
|
// Just in case there's a failure that didn't send an ErrorBundle (e.g. an error return trace)
|
||||||
const stderr_reader = child.stderr.?.deprecatedReader();
|
const stderr = poller.reader(.stderr);
|
||||||
const stderr = try stderr_reader.readAllAlloc(arena, 10 * 1024 * 1024);
|
|
||||||
|
|
||||||
const term = child.wait() catch |err| {
|
const term = child.wait() catch |err| {
|
||||||
return comp.failWin32Resource(win32_resource, "unable to wait for {s} rc: {s}", .{ argv[0], @errorName(err) });
|
return comp.failWin32Resource(win32_resource, "unable to wait for {s} rc: {s}", .{ argv[0], @errorName(err) });
|
||||||
|
|
@ -6265,12 +6263,12 @@ fn spawnZigRc(
|
||||||
switch (term) {
|
switch (term) {
|
||||||
.Exited => |code| {
|
.Exited => |code| {
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
log.err("zig rc failed with stderr:\n{s}", .{stderr});
|
log.err("zig rc failed with stderr:\n{s}", .{stderr.buffered()});
|
||||||
return comp.failWin32Resource(win32_resource, "zig rc exited with code {d}", .{code});
|
return comp.failWin32Resource(win32_resource, "zig rc exited with code {d}", .{code});
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
else => {
|
else => {
|
||||||
log.err("zig rc terminated with stderr:\n{s}", .{stderr});
|
log.err("zig rc terminated with stderr:\n{s}", .{stderr.buffered()});
|
||||||
return comp.failWin32Resource(win32_resource, "zig rc terminated unexpectedly", .{});
|
return comp.failWin32Resource(win32_resource, "zig rc terminated unexpectedly", .{});
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,6 @@ const builtin = @import("builtin");
|
||||||
const io = std.io;
|
const io = std.io;
|
||||||
const fs = std.fs;
|
const fs = std.fs;
|
||||||
const process = std.process;
|
const process = std.process;
|
||||||
const ChildProcess = std.process.Child;
|
|
||||||
const Progress = std.Progress;
|
const Progress = std.Progress;
|
||||||
const print = std.debug.print;
|
const print = std.debug.print;
|
||||||
const mem = std.mem;
|
const mem = std.mem;
|
||||||
|
|
|
||||||
|
|
@ -186,7 +186,7 @@ pub fn main() !void {
|
||||||
|
|
||||||
try child.spawn();
|
try child.spawn();
|
||||||
|
|
||||||
var poller = std.io.poll(arena, Eval.StreamEnum, .{
|
var poller = std.Io.poll(arena, Eval.StreamEnum, .{
|
||||||
.stdout = child.stdout.?,
|
.stdout = child.stdout.?,
|
||||||
.stderr = child.stderr.?,
|
.stderr = child.stderr.?,
|
||||||
});
|
});
|
||||||
|
|
@ -247,19 +247,15 @@ const Eval = struct {
|
||||||
|
|
||||||
fn check(eval: *Eval, poller: *Poller, update: Case.Update, prog_node: std.Progress.Node) !void {
|
fn check(eval: *Eval, poller: *Poller, update: Case.Update, prog_node: std.Progress.Node) !void {
|
||||||
const arena = eval.arena;
|
const arena = eval.arena;
|
||||||
const Header = std.zig.Server.Message.Header;
|
const stdout = poller.reader(.stdout);
|
||||||
const stdout = poller.fifo(.stdout);
|
const stderr = poller.reader(.stderr);
|
||||||
const stderr = poller.fifo(.stderr);
|
|
||||||
|
|
||||||
poll: while (true) {
|
poll: while (true) {
|
||||||
while (stdout.readableLength() < @sizeOf(Header)) {
|
const Header = std.zig.Server.Message.Header;
|
||||||
if (!(try poller.poll())) break :poll;
|
while (stdout.buffered().len < @sizeOf(Header)) if (!try poller.poll()) break :poll;
|
||||||
}
|
const header = stdout.takeStruct(Header, .little) catch unreachable;
|
||||||
const header = stdout.reader().readStruct(Header) catch unreachable;
|
while (stdout.buffered().len < header.bytes_len) if (!try poller.poll()) break :poll;
|
||||||
while (stdout.readableLength() < header.bytes_len) {
|
const body = stdout.take(header.bytes_len) catch unreachable;
|
||||||
if (!(try poller.poll())) break :poll;
|
|
||||||
}
|
|
||||||
const body = stdout.readableSliceOfLen(header.bytes_len);
|
|
||||||
|
|
||||||
switch (header.tag) {
|
switch (header.tag) {
|
||||||
.error_bundle => {
|
.error_bundle => {
|
||||||
|
|
@ -277,8 +273,8 @@ const Eval = struct {
|
||||||
.string_bytes = try arena.dupe(u8, string_bytes),
|
.string_bytes = try arena.dupe(u8, string_bytes),
|
||||||
.extra = extra_array,
|
.extra = extra_array,
|
||||||
};
|
};
|
||||||
if (stderr.readableLength() > 0) {
|
if (stderr.bufferedLen() > 0) {
|
||||||
const stderr_data = try stderr.toOwnedSlice();
|
const stderr_data = try poller.toOwnedSlice(.stderr);
|
||||||
if (eval.allow_stderr) {
|
if (eval.allow_stderr) {
|
||||||
std.log.info("error_bundle included stderr:\n{s}", .{stderr_data});
|
std.log.info("error_bundle included stderr:\n{s}", .{stderr_data});
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -289,15 +285,14 @@ const Eval = struct {
|
||||||
try eval.checkErrorOutcome(update, result_error_bundle);
|
try eval.checkErrorOutcome(update, result_error_bundle);
|
||||||
}
|
}
|
||||||
// This message indicates the end of the update.
|
// This message indicates the end of the update.
|
||||||
stdout.discard(body.len);
|
|
||||||
return;
|
return;
|
||||||
},
|
},
|
||||||
.emit_digest => {
|
.emit_digest => {
|
||||||
const EbpHdr = std.zig.Server.Message.EmitDigest;
|
const EbpHdr = std.zig.Server.Message.EmitDigest;
|
||||||
const ebp_hdr = @as(*align(1) const EbpHdr, @ptrCast(body));
|
const ebp_hdr = @as(*align(1) const EbpHdr, @ptrCast(body));
|
||||||
_ = ebp_hdr;
|
_ = ebp_hdr;
|
||||||
if (stderr.readableLength() > 0) {
|
if (stderr.bufferedLen() > 0) {
|
||||||
const stderr_data = try stderr.toOwnedSlice();
|
const stderr_data = try poller.toOwnedSlice(.stderr);
|
||||||
if (eval.allow_stderr) {
|
if (eval.allow_stderr) {
|
||||||
std.log.info("emit_digest included stderr:\n{s}", .{stderr_data});
|
std.log.info("emit_digest included stderr:\n{s}", .{stderr_data});
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -308,7 +303,6 @@ const Eval = struct {
|
||||||
if (eval.target.backend == .sema) {
|
if (eval.target.backend == .sema) {
|
||||||
try eval.checkSuccessOutcome(update, null, prog_node);
|
try eval.checkSuccessOutcome(update, null, prog_node);
|
||||||
// This message indicates the end of the update.
|
// This message indicates the end of the update.
|
||||||
stdout.discard(body.len);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const digest = body[@sizeOf(EbpHdr)..][0..Cache.bin_digest_len];
|
const digest = body[@sizeOf(EbpHdr)..][0..Cache.bin_digest_len];
|
||||||
|
|
@ -323,21 +317,18 @@ const Eval = struct {
|
||||||
|
|
||||||
try eval.checkSuccessOutcome(update, bin_path, prog_node);
|
try eval.checkSuccessOutcome(update, bin_path, prog_node);
|
||||||
// This message indicates the end of the update.
|
// This message indicates the end of the update.
|
||||||
stdout.discard(body.len);
|
|
||||||
},
|
},
|
||||||
else => {
|
else => {
|
||||||
// Ignore other messages.
|
// Ignore other messages.
|
||||||
stdout.discard(body.len);
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (stderr.readableLength() > 0) {
|
if (stderr.bufferedLen() > 0) {
|
||||||
const stderr_data = try stderr.toOwnedSlice();
|
|
||||||
if (eval.allow_stderr) {
|
if (eval.allow_stderr) {
|
||||||
std.log.info("update '{s}' included stderr:\n{s}", .{ update.name, stderr_data });
|
std.log.info("update '{s}' included stderr:\n{s}", .{ update.name, stderr.buffered() });
|
||||||
} else {
|
} else {
|
||||||
eval.fatal("update '{s}' failed:\n{s}", .{ update.name, stderr_data });
|
eval.fatal("update '{s}' failed:\n{s}", .{ update.name, stderr.buffered() });
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -537,25 +528,19 @@ const Eval = struct {
|
||||||
fn end(eval: *Eval, poller: *Poller) !void {
|
fn end(eval: *Eval, poller: *Poller) !void {
|
||||||
requestExit(eval.child, eval);
|
requestExit(eval.child, eval);
|
||||||
|
|
||||||
const Header = std.zig.Server.Message.Header;
|
const stdout = poller.reader(.stdout);
|
||||||
const stdout = poller.fifo(.stdout);
|
const stderr = poller.reader(.stderr);
|
||||||
const stderr = poller.fifo(.stderr);
|
|
||||||
|
|
||||||
poll: while (true) {
|
poll: while (true) {
|
||||||
while (stdout.readableLength() < @sizeOf(Header)) {
|
const Header = std.zig.Server.Message.Header;
|
||||||
if (!(try poller.poll())) break :poll;
|
while (stdout.buffered().len < @sizeOf(Header)) if (!try poller.poll()) break :poll;
|
||||||
}
|
const header = stdout.takeStruct(Header, .little) catch unreachable;
|
||||||
const header = stdout.reader().readStruct(Header) catch unreachable;
|
while (stdout.buffered().len < header.bytes_len) if (!try poller.poll()) break :poll;
|
||||||
while (stdout.readableLength() < header.bytes_len) {
|
stdout.toss(header.bytes_len);
|
||||||
if (!(try poller.poll())) break :poll;
|
|
||||||
}
|
|
||||||
const body = stdout.readableSliceOfLen(header.bytes_len);
|
|
||||||
stdout.discard(body.len);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (stderr.readableLength() > 0) {
|
if (stderr.bufferedLen() > 0) {
|
||||||
const stderr_data = try stderr.toOwnedSlice();
|
eval.fatal("unexpected stderr:\n{s}", .{stderr.buffered()});
|
||||||
eval.fatal("unexpected stderr:\n{s}", .{stderr_data});
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue