mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-06 05:44:20 +00:00
zig build: many enhancements related to parallel building
Rework std.Build.Step to have an `owner: *Build` field. This simplified the implementation of installation steps, as well as provided some much-needed common API for the new parallelized build system. --verbose is now defined very concretely: it prints to stderr just before spawning a child process. Child process execution is updated to conform to the new parallel-friendly make() function semantics. DRY up the failWithCacheError handling code. It now integrates properly with the step graph instead of incorrectly dumping to stderr and calling process exit. In the main CLI, fix `zig fmt` crash when there are no errors and stdin is used. Deleted steps: * EmulatableRunStep - this entire thing can be removed in favor of a flag added to std.Build.RunStep called `skip_foreign_checks`. * LogStep - this doesn't really fit with a multi-threaded build runner and is effectively superseded by the new build summary output. build runner: * add -fsummary and -fno-summary to override the default behavior, which is to print a summary if any of the build steps fail. * print the dep prefix when emitting error messages for steps. std.Build.FmtStep: * This step now supports exclude paths as well as a check flag. * The check flag decides between two modes, modify mode, and check mode. These can be used to update source files in place, or to fail the build, respectively. Zig's own build.zig: * The `test-fmt` step will do all the `zig fmt` checking that we expect to be done. Since the `test` step depends on this one, we can simply remove the explicit call to `zig fmt` in the CI. * The new `fmt` step will actually perform `zig fmt` and update source files in place. std.Build.RunStep: * expose max_stdio_size is a field (previously an unchangeable hard-coded value). * rework the API. Instead of configuring each stream independently, there is a `stdio` field where you can choose between `infer_from_args`, `inherit`, or `check`. These determine whether the RunStep is considered to have side-effects or not. The previous field, `condition` is gone. * when stdio mode is set to `check` there is a slice of any number of checks to make, which include things like exit code, stderr matching, or stdout matching. * remove the ill-defined `print` field. * when adding an output arg, it takes the opportunity to give itself a better name. * The flag `skip_foreign_checks` is added. If this is true, a RunStep which is configured to check the output of the executed binary will not fail the build if the binary cannot be executed due to being for a foreign binary to the host system which is running the build graph. Command-line arguments such as -fqemu and -fwasmtime may affect whether a binary is detected as foreign, as well as system configuration such as Rosetta (macOS) and binfmt_misc (Linux). - This makes EmulatableRunStep no longer needed. * Fix the child process handling to properly integrate with the new bulid API and to avoid deadlocks in stdout/stderr streams by polling if necessary. std.Build.RemoveDirStep now uses the open build_root directory handle instead of an absolute path.
This commit is contained in:
parent
d0f675827c
commit
58edefc6d1
23 changed files with 1109 additions and 1163 deletions
24
build.zig
24
build.zig
|
|
@ -61,8 +61,6 @@ pub fn build(b: *std.Build) !void {
|
|||
test_cases.stack_size = stack_size;
|
||||
test_cases.single_threaded = single_threaded;
|
||||
|
||||
const fmt_build_zig = b.addFmt(&[_][]const u8{"build.zig"});
|
||||
|
||||
const skip_debug = b.option(bool, "skip-debug", "Main test suite skips debug builds") orelse false;
|
||||
const skip_release = b.option(bool, "skip-release", "Main test suite skips release builds") orelse false;
|
||||
const skip_release_small = b.option(bool, "skip-release-small", "Main test suite skips release-small builds") orelse skip_release;
|
||||
|
|
@ -386,10 +384,24 @@ pub fn build(b: *std.Build) !void {
|
|||
}
|
||||
const optimization_modes = chosen_opt_modes_buf[0..chosen_mode_index];
|
||||
|
||||
// run stage1 `zig fmt` on this build.zig file just to make sure it works
|
||||
test_step.dependOn(&fmt_build_zig.step);
|
||||
const fmt_step = b.step("test-fmt", "Run zig fmt against build.zig to make sure it works");
|
||||
fmt_step.dependOn(&fmt_build_zig.step);
|
||||
const fmt_include_paths = &.{ "doc", "lib", "src", "test", "tools", "build.zig" };
|
||||
const fmt_exclude_paths = &.{ "test/cases" };
|
||||
const check_fmt = b.addFmt(.{
|
||||
.paths = fmt_include_paths,
|
||||
.exclude_paths = fmt_exclude_paths,
|
||||
.check = true,
|
||||
});
|
||||
const do_fmt = b.addFmt(.{
|
||||
.paths = fmt_include_paths,
|
||||
.exclude_paths = fmt_exclude_paths,
|
||||
});
|
||||
|
||||
const test_fmt_step = b.step("test-fmt", "Check whether source files have conforming formatting");
|
||||
test_fmt_step.dependOn(&check_fmt.step);
|
||||
|
||||
const do_fmt_step = b.step("fmt", "Modify source files in place to have conforming formatting");
|
||||
do_fmt_step.dependOn(&do_fmt.step);
|
||||
|
||||
|
||||
test_step.dependOn(tests.addPkgTests(
|
||||
b,
|
||||
|
|
|
|||
|
|
@ -93,6 +93,7 @@ pub fn main() !void {
|
|||
|
||||
var install_prefix: ?[]const u8 = null;
|
||||
var dir_list = std.Build.DirList{};
|
||||
var enable_summary: ?bool = null;
|
||||
|
||||
const Color = enum { auto, off, on };
|
||||
var color: Color = .auto;
|
||||
|
|
@ -217,6 +218,10 @@ pub fn main() !void {
|
|||
builder.enable_darling = true;
|
||||
} else if (mem.eql(u8, arg, "-fno-darling")) {
|
||||
builder.enable_darling = false;
|
||||
} else if (mem.eql(u8, arg, "-fsummary")) {
|
||||
enable_summary = true;
|
||||
} else if (mem.eql(u8, arg, "-fno-summary")) {
|
||||
enable_summary = false;
|
||||
} else if (mem.eql(u8, arg, "-freference-trace")) {
|
||||
builder.reference_trace = 256;
|
||||
} else if (mem.startsWith(u8, arg, "-freference-trace=")) {
|
||||
|
|
@ -252,8 +257,9 @@ pub fn main() !void {
|
|||
}
|
||||
}
|
||||
|
||||
const stderr = std.io.getStdErr();
|
||||
const ttyconf: std.debug.TTY.Config = switch (color) {
|
||||
.auto => std.debug.detectTTYConfig(std.io.getStdErr()),
|
||||
.auto => std.debug.detectTTYConfig(stderr),
|
||||
.on => .escape_codes,
|
||||
.off => .no_color,
|
||||
};
|
||||
|
|
@ -279,6 +285,8 @@ pub fn main() !void {
|
|||
main_progress_node,
|
||||
thread_pool_options,
|
||||
ttyconf,
|
||||
stderr,
|
||||
enable_summary,
|
||||
) catch |err| switch (err) {
|
||||
error.UncleanExit => process.exit(1),
|
||||
else => return err,
|
||||
|
|
@ -292,6 +300,8 @@ fn runStepNames(
|
|||
parent_prog_node: *std.Progress.Node,
|
||||
thread_pool_options: std.Thread.Pool.Options,
|
||||
ttyconf: std.debug.TTY.Config,
|
||||
stderr: std.fs.File,
|
||||
enable_summary: ?bool,
|
||||
) !void {
|
||||
const gpa = b.allocator;
|
||||
var step_stack: std.AutoArrayHashMapUnmanaged(*Step, void) = .{};
|
||||
|
|
@ -382,28 +392,35 @@ fn runStepNames(
|
|||
|
||||
// A proper command line application defaults to silently succeeding.
|
||||
// The user may request verbose mode if they have a different preference.
|
||||
if (failure_count == 0 and !b.verbose) return cleanExit();
|
||||
if (failure_count == 0 and enable_summary != true) return cleanExit();
|
||||
|
||||
const stderr = std.io.getStdErr();
|
||||
if (enable_summary != false) {
|
||||
const total_count = success_count + failure_count + pending_count;
|
||||
ttyconf.setColor(stderr, .Cyan) catch {};
|
||||
stderr.writeAll("Build Summary:") catch {};
|
||||
ttyconf.setColor(stderr, .Reset) catch {};
|
||||
stderr.writer().print(" {d}/{d} steps succeeded; {d} failed", .{
|
||||
success_count, total_count, failure_count,
|
||||
}) catch {};
|
||||
|
||||
const total_count = success_count + failure_count + pending_count;
|
||||
ttyconf.setColor(stderr, .Cyan) catch {};
|
||||
stderr.writeAll("Build Summary: ") catch {};
|
||||
ttyconf.setColor(stderr, .Reset) catch {};
|
||||
stderr.writer().print("{d}/{d} steps succeeded; {d} failed; {d} total compile errors\n", .{
|
||||
success_count, total_count, failure_count, total_compile_errors,
|
||||
}) catch {};
|
||||
if (enable_summary == null) {
|
||||
ttyconf.setColor(stderr, .Dim) catch {};
|
||||
stderr.writeAll(" (disable with -fno-summary)") catch {};
|
||||
ttyconf.setColor(stderr, .Reset) catch {};
|
||||
}
|
||||
stderr.writeAll("\n") catch {};
|
||||
|
||||
// Print a fancy tree with build results.
|
||||
var print_node: PrintNode = .{ .parent = null };
|
||||
if (step_names.len == 0) {
|
||||
print_node.last = true;
|
||||
printTreeStep(b, b.default_step, stderr, ttyconf, &print_node, &step_stack) catch {};
|
||||
} else {
|
||||
for (step_names, 0..) |step_name, i| {
|
||||
const tls = b.top_level_steps.get(step_name).?;
|
||||
print_node.last = i + 1 == b.top_level_steps.count();
|
||||
printTreeStep(b, &tls.step, stderr, ttyconf, &print_node, &step_stack) catch {};
|
||||
// Print a fancy tree with build results.
|
||||
var print_node: PrintNode = .{ .parent = null };
|
||||
if (step_names.len == 0) {
|
||||
print_node.last = true;
|
||||
printTreeStep(b, b.default_step, stderr, ttyconf, &print_node, &step_stack) catch {};
|
||||
} else {
|
||||
for (step_names, 0..) |step_name, i| {
|
||||
const tls = b.top_level_steps.get(step_name).?;
|
||||
print_node.last = i + 1 == b.top_level_steps.count();
|
||||
printTreeStep(b, &tls.step, stderr, ttyconf, &print_node, &step_stack) catch {};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -453,9 +470,9 @@ fn printTreeStep(
|
|||
step_stack: *std.AutoArrayHashMapUnmanaged(*Step, void),
|
||||
) !void {
|
||||
const first = step_stack.swapRemove(s);
|
||||
if (!first) try ttyconf.setColor(stderr, .Dim);
|
||||
try printPrefix(parent_node, stderr);
|
||||
|
||||
if (!first) try ttyconf.setColor(stderr, .Dim);
|
||||
if (parent_node.parent != null) {
|
||||
if (parent_node.last) {
|
||||
try stderr.writeAll("└─ ");
|
||||
|
|
@ -464,7 +481,7 @@ fn printTreeStep(
|
|||
}
|
||||
}
|
||||
|
||||
// TODO print the dep prefix too?
|
||||
// dep_prefix omitted here because it is redundant with the tree.
|
||||
try stderr.writeAll(s.name);
|
||||
|
||||
if (first) {
|
||||
|
|
@ -608,8 +625,10 @@ fn workerMakeOneStep(
|
|||
const stderr = std.io.getStdErr();
|
||||
|
||||
for (s.result_error_msgs.items) |msg| {
|
||||
// TODO print the dep prefix too
|
||||
// Sometimes it feels like you just can't catch a break. Finally,
|
||||
// with Zig, you can.
|
||||
ttyconf.setColor(stderr, .Bold) catch break;
|
||||
stderr.writeAll(s.owner.dep_prefix) catch break;
|
||||
stderr.writeAll(s.name) catch break;
|
||||
stderr.writeAll(": ") catch break;
|
||||
ttyconf.setColor(stderr, .Red) catch break;
|
||||
|
|
@ -735,6 +754,8 @@ fn usage(builder: *std.Build, already_ran_build: bool, out_stream: anytype) !voi
|
|||
\\Advanced Options:
|
||||
\\ -freference-trace[=num] How many lines of reference trace should be shown per compile error
|
||||
\\ -fno-reference-trace Disable reference trace
|
||||
\\ -fsummary Print the build summary, even on success
|
||||
\\ -fno-summary Omit the build summary, even on failure
|
||||
\\ --build-file [file] Override path to build.zig
|
||||
\\ --cache-dir [path] Override path to local Zig cache directory
|
||||
\\ --global-cache-dir [path] Override path to global Zig cache directory
|
||||
|
|
|
|||
|
|
@ -32,14 +32,12 @@ pub const Step = @import("Build/Step.zig");
|
|||
pub const CheckFileStep = @import("Build/CheckFileStep.zig");
|
||||
pub const CheckObjectStep = @import("Build/CheckObjectStep.zig");
|
||||
pub const ConfigHeaderStep = @import("Build/ConfigHeaderStep.zig");
|
||||
pub const EmulatableRunStep = @import("Build/EmulatableRunStep.zig");
|
||||
pub const FmtStep = @import("Build/FmtStep.zig");
|
||||
pub const InstallArtifactStep = @import("Build/InstallArtifactStep.zig");
|
||||
pub const InstallDirStep = @import("Build/InstallDirStep.zig");
|
||||
pub const InstallFileStep = @import("Build/InstallFileStep.zig");
|
||||
pub const ObjCopyStep = @import("Build/ObjCopyStep.zig");
|
||||
pub const CompileStep = @import("Build/CompileStep.zig");
|
||||
pub const LogStep = @import("Build/LogStep.zig");
|
||||
pub const OptionsStep = @import("Build/OptionsStep.zig");
|
||||
pub const RemoveDirStep = @import("Build/RemoveDirStep.zig");
|
||||
pub const RunStep = @import("Build/RunStep.zig");
|
||||
|
|
@ -195,7 +193,7 @@ pub fn create(
|
|||
env_map.* = try process.getEnvMap(allocator);
|
||||
|
||||
const self = try allocator.create(Build);
|
||||
self.* = Build{
|
||||
self.* = .{
|
||||
.zig_exe = zig_exe,
|
||||
.build_root = build_root,
|
||||
.cache_root = cache_root,
|
||||
|
|
@ -224,16 +222,18 @@ pub fn create(
|
|||
.dest_dir = env_map.get("DESTDIR"),
|
||||
.installed_files = ArrayList(InstalledFile).init(allocator),
|
||||
.install_tls = .{
|
||||
.step = Step.init(allocator, .{
|
||||
.step = Step.init(.{
|
||||
.id = .top_level,
|
||||
.name = "install",
|
||||
.owner = self,
|
||||
}),
|
||||
.description = "Copy build artifacts to prefix path",
|
||||
},
|
||||
.uninstall_tls = .{
|
||||
.step = Step.init(allocator, .{
|
||||
.step = Step.init(.{
|
||||
.id = .top_level,
|
||||
.name = "uninstall",
|
||||
.owner = self,
|
||||
.makeFn = makeUninstall,
|
||||
}),
|
||||
.description = "Remove build artifacts from prefix path",
|
||||
|
|
@ -267,16 +267,18 @@ fn createChildOnly(parent: *Build, dep_name: []const u8, build_root: Cache.Direc
|
|||
child.* = .{
|
||||
.allocator = allocator,
|
||||
.install_tls = .{
|
||||
.step = Step.init(allocator, .{
|
||||
.step = Step.init(.{
|
||||
.id = .top_level,
|
||||
.name = "install",
|
||||
.owner = child,
|
||||
}),
|
||||
.description = "Copy build artifacts to prefix path",
|
||||
},
|
||||
.uninstall_tls = .{
|
||||
.step = Step.init(allocator, .{
|
||||
.step = Step.init(.{
|
||||
.id = .top_level,
|
||||
.name = "uninstall",
|
||||
.owner = child,
|
||||
.makeFn = makeUninstall,
|
||||
}),
|
||||
.description = "Remove build artifacts from prefix path",
|
||||
|
|
@ -689,21 +691,14 @@ pub fn addWriteFiles(self: *Build) *WriteFileStep {
|
|||
return write_file_step;
|
||||
}
|
||||
|
||||
pub fn addLog(self: *Build, comptime format: []const u8, args: anytype) *LogStep {
|
||||
const data = self.fmt(format, args);
|
||||
const log_step = self.allocator.create(LogStep) catch @panic("OOM");
|
||||
log_step.* = LogStep.init(self, data);
|
||||
return log_step;
|
||||
}
|
||||
|
||||
pub fn addRemoveDirTree(self: *Build, dir_path: []const u8) *RemoveDirStep {
|
||||
const remove_dir_step = self.allocator.create(RemoveDirStep) catch @panic("OOM");
|
||||
remove_dir_step.* = RemoveDirStep.init(self, dir_path);
|
||||
return remove_dir_step;
|
||||
}
|
||||
|
||||
pub fn addFmt(self: *Build, paths: []const []const u8) *FmtStep {
|
||||
return FmtStep.create(self, paths);
|
||||
pub fn addFmt(b: *Build, options: FmtStep.Options) *FmtStep {
|
||||
return FmtStep.create(b, options);
|
||||
}
|
||||
|
||||
pub fn addTranslateC(self: *Build, options: TranslateCStep.Options) *TranslateCStep {
|
||||
|
|
@ -870,10 +865,11 @@ pub fn option(self: *Build, comptime T: type, name_raw: []const u8, description_
|
|||
|
||||
pub fn step(self: *Build, name: []const u8, description: []const u8) *Step {
|
||||
const step_info = self.allocator.create(TopLevelStep) catch @panic("OOM");
|
||||
step_info.* = TopLevelStep{
|
||||
.step = Step.init(self.allocator, .{
|
||||
step_info.* = .{
|
||||
.step = Step.init(.{
|
||||
.id = .top_level,
|
||||
.name = name,
|
||||
.owner = self,
|
||||
}),
|
||||
.description = self.dupe(description),
|
||||
};
|
||||
|
|
@ -1145,10 +1141,6 @@ pub fn validateUserInputDidItFail(self: *Build) bool {
|
|||
return self.invalid_user_input;
|
||||
}
|
||||
|
||||
pub fn spawnChild(self: *Build, argv: []const []const u8) !void {
|
||||
return self.spawnChildEnvMap(null, self.env_map, argv);
|
||||
}
|
||||
|
||||
fn allocPrintCmd(ally: Allocator, opt_cwd: ?[]const u8, argv: []const []const u8) ![]u8 {
|
||||
var buf = ArrayList(u8).init(ally);
|
||||
if (opt_cwd) |cwd| try buf.writer().print("cd {s} && ", .{cwd});
|
||||
|
|
@ -1163,40 +1155,6 @@ fn printCmd(ally: Allocator, cwd: ?[]const u8, argv: []const []const u8) void {
|
|||
std.debug.print("{s}\n", .{text});
|
||||
}
|
||||
|
||||
pub fn spawnChildEnvMap(self: *Build, cwd: ?[]const u8, env_map: *const EnvMap, argv: []const []const u8) !void {
|
||||
if (self.verbose) {
|
||||
printCmd(self.allocator, cwd, argv);
|
||||
}
|
||||
|
||||
if (!process.can_spawn)
|
||||
return error.ExecNotSupported;
|
||||
|
||||
var child = std.ChildProcess.init(argv, self.allocator);
|
||||
child.cwd = cwd;
|
||||
child.env_map = env_map;
|
||||
|
||||
const term = child.spawnAndWait() catch |err| {
|
||||
log.err("Unable to spawn {s}: {s}", .{ argv[0], @errorName(err) });
|
||||
return err;
|
||||
};
|
||||
|
||||
switch (term) {
|
||||
.Exited => |code| {
|
||||
if (code != 0) {
|
||||
log.err("The following command exited with error code {}:", .{code});
|
||||
printCmd(self.allocator, cwd, argv);
|
||||
return error.UncleanExit;
|
||||
}
|
||||
},
|
||||
else => {
|
||||
log.err("The following command terminated unexpectedly:", .{});
|
||||
printCmd(self.allocator, cwd, argv);
|
||||
|
||||
return error.UncleanExit;
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn installArtifact(self: *Build, artifact: *CompileStep) void {
|
||||
self.getInstallStep().dependOn(&self.addInstallArtifact(artifact).step);
|
||||
}
|
||||
|
|
@ -1403,160 +1361,6 @@ pub fn execAllowFail(
|
|||
}
|
||||
}
|
||||
|
||||
/// This function is used exclusively for spawning and communicating with the zig compiler.
|
||||
/// TODO: move to build_runner.zig
|
||||
pub fn execFromStep(b: *Build, argv: []const []const u8, s: *Step, prog_node: *std.Progress.Node) ![]const u8 {
|
||||
assert(argv.len != 0);
|
||||
|
||||
if (b.verbose) {
|
||||
const text = try allocPrintCmd(b.allocator, null, argv);
|
||||
try s.result_error_msgs.append(b.allocator, text);
|
||||
}
|
||||
|
||||
if (!process.can_spawn) {
|
||||
try s.result_error_msgs.append(b.allocator, b.fmt("Unable to spawn the following command: cannot spawn child processes\n{s}", .{
|
||||
try allocPrintCmd(b.allocator, null, argv),
|
||||
}));
|
||||
return error.MakeFailed;
|
||||
}
|
||||
|
||||
var child = std.ChildProcess.init(argv, b.allocator);
|
||||
child.env_map = b.env_map;
|
||||
child.stdin_behavior = .Pipe;
|
||||
child.stdout_behavior = .Pipe;
|
||||
child.stderr_behavior = .Pipe;
|
||||
|
||||
try child.spawn();
|
||||
|
||||
var poller = std.io.poll(b.allocator, enum { stdout, stderr }, .{
|
||||
.stdout = child.stdout.?,
|
||||
.stderr = child.stderr.?,
|
||||
});
|
||||
defer poller.deinit();
|
||||
|
||||
try sendMessage(child.stdin.?, .update);
|
||||
try sendMessage(child.stdin.?, .exit);
|
||||
|
||||
const Header = std.zig.Server.Message.Header;
|
||||
var result: ?[]const u8 = null;
|
||||
|
||||
var node_name: std.ArrayListUnmanaged(u8) = .{};
|
||||
defer node_name.deinit(b.allocator);
|
||||
var sub_prog_node: ?std.Progress.Node = null;
|
||||
defer if (sub_prog_node) |*n| n.end();
|
||||
|
||||
while (try poller.poll()) {
|
||||
const stdout = poller.fifo(.stdout);
|
||||
const buf = stdout.readableSlice(0);
|
||||
assert(stdout.readableLength() == buf.len);
|
||||
if (buf.len >= @sizeOf(Header)) {
|
||||
const header = @ptrCast(*align(1) const Header, buf[0..@sizeOf(Header)]);
|
||||
const header_and_msg_len = header.bytes_len + @sizeOf(Header);
|
||||
if (buf.len >= header_and_msg_len) {
|
||||
const body = buf[@sizeOf(Header)..][0..header.bytes_len];
|
||||
switch (header.tag) {
|
||||
.zig_version => {
|
||||
if (!mem.eql(u8, builtin.zig_version_string, body)) {
|
||||
try s.result_error_msgs.append(
|
||||
b.allocator,
|
||||
b.fmt("zig version mismatch build runner vs compiler: '{s}' vs '{s}'", .{
|
||||
builtin.zig_version_string, body,
|
||||
}),
|
||||
);
|
||||
return error.MakeFailed;
|
||||
}
|
||||
},
|
||||
.error_bundle => {
|
||||
const EbHdr = std.zig.Server.Message.ErrorBundle;
|
||||
const eb_hdr = @ptrCast(*align(1) const EbHdr, body);
|
||||
const extra_bytes =
|
||||
body[@sizeOf(EbHdr)..][0 .. @sizeOf(u32) * eb_hdr.extra_len];
|
||||
const string_bytes =
|
||||
body[@sizeOf(EbHdr) + extra_bytes.len ..][0..eb_hdr.string_bytes_len];
|
||||
// TODO: use @ptrCast when the compiler supports it
|
||||
const unaligned_extra = mem.bytesAsSlice(u32, extra_bytes);
|
||||
const extra_array = try b.allocator.alloc(u32, unaligned_extra.len);
|
||||
// TODO: use @memcpy when it supports slices
|
||||
for (extra_array, unaligned_extra) |*dst, src| dst.* = src;
|
||||
s.result_error_bundle = .{
|
||||
.string_bytes = try b.allocator.dupe(u8, string_bytes),
|
||||
.extra = extra_array,
|
||||
};
|
||||
},
|
||||
.progress => {
|
||||
if (sub_prog_node) |*n| n.end();
|
||||
node_name.clearRetainingCapacity();
|
||||
try node_name.appendSlice(b.allocator, body);
|
||||
sub_prog_node = prog_node.start(node_name.items, 0);
|
||||
sub_prog_node.?.activate();
|
||||
},
|
||||
.emit_bin_path => {
|
||||
result = try b.allocator.dupe(u8, body);
|
||||
},
|
||||
_ => {
|
||||
// Unrecognized message.
|
||||
},
|
||||
}
|
||||
stdout.discard(header_and_msg_len);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const stderr = poller.fifo(.stderr);
|
||||
if (stderr.readableLength() > 0) {
|
||||
try s.result_error_msgs.append(b.allocator, try stderr.toOwnedSlice());
|
||||
}
|
||||
|
||||
// Send EOF to stdin.
|
||||
child.stdin.?.close();
|
||||
child.stdin = null;
|
||||
|
||||
const term = try child.wait();
|
||||
switch (term) {
|
||||
.Exited => |code| {
|
||||
if (code != 0) {
|
||||
try s.result_error_msgs.append(b.allocator, b.fmt("the following command exited with error code {d}:\n{s}", .{
|
||||
code, try allocPrintCmd(b.allocator, null, argv),
|
||||
}));
|
||||
return error.MakeFailed;
|
||||
}
|
||||
},
|
||||
.Signal, .Stopped, .Unknown => |code| {
|
||||
_ = code;
|
||||
try s.result_error_msgs.append(b.allocator, b.fmt("the following command terminated unexpectedly:\n{s}", .{
|
||||
try allocPrintCmd(b.allocator, null, argv),
|
||||
}));
|
||||
return error.MakeFailed;
|
||||
},
|
||||
}
|
||||
|
||||
if (s.result_error_bundle.errorMessageCount() > 0) {
|
||||
try s.result_error_msgs.append(
|
||||
b.allocator,
|
||||
b.fmt("the following command failed with {d} compilation errors:\n{s}", .{
|
||||
s.result_error_bundle.errorMessageCount(),
|
||||
try allocPrintCmd(b.allocator, null, argv),
|
||||
}),
|
||||
);
|
||||
return error.MakeFailed;
|
||||
}
|
||||
|
||||
return result orelse {
|
||||
try s.result_error_msgs.append(b.allocator, b.fmt("the following command failed to communicate the compilation result:\n{s}", .{
|
||||
try allocPrintCmd(b.allocator, null, argv),
|
||||
}));
|
||||
return error.MakeFailed;
|
||||
};
|
||||
}
|
||||
|
||||
fn sendMessage(file: fs.File, tag: std.zig.Client.Message.Tag) !void {
|
||||
const header: std.zig.Client.Message.Header = .{
|
||||
.tag = tag,
|
||||
.bytes_len = 0,
|
||||
};
|
||||
try file.writeAll(std.mem.asBytes(&header));
|
||||
}
|
||||
|
||||
/// This is a helper function to be called from build.zig scripts, *not* from
|
||||
/// inside step make() functions. If any errors occur, it fails the build with
|
||||
/// a helpful message.
|
||||
|
|
@ -1910,14 +1714,12 @@ pub fn serializeCpu(allocator: Allocator, cpu: std.Target.Cpu) ![]const u8 {
|
|||
test {
|
||||
_ = CheckFileStep;
|
||||
_ = CheckObjectStep;
|
||||
_ = EmulatableRunStep;
|
||||
_ = FmtStep;
|
||||
_ = InstallArtifactStep;
|
||||
_ = InstallDirStep;
|
||||
_ = InstallFileStep;
|
||||
_ = ObjCopyStep;
|
||||
_ = CompileStep;
|
||||
_ = LogStep;
|
||||
_ = OptionsStep;
|
||||
_ = RemoveDirStep;
|
||||
_ = RunStep;
|
||||
|
|
|
|||
|
|
@ -8,26 +8,25 @@ const CheckFileStep = @This();
|
|||
pub const base_id = .check_file;
|
||||
|
||||
step: Step,
|
||||
builder: *std.Build,
|
||||
expected_matches: []const []const u8,
|
||||
source: std.Build.FileSource,
|
||||
max_bytes: usize = 20 * 1024 * 1024,
|
||||
|
||||
pub fn create(
|
||||
builder: *std.Build,
|
||||
owner: *std.Build,
|
||||
source: std.Build.FileSource,
|
||||
expected_matches: []const []const u8,
|
||||
) *CheckFileStep {
|
||||
const self = builder.allocator.create(CheckFileStep) catch @panic("OOM");
|
||||
const self = owner.allocator.create(CheckFileStep) catch @panic("OOM");
|
||||
self.* = CheckFileStep{
|
||||
.builder = builder,
|
||||
.step = Step.init(builder.allocator, .{
|
||||
.step = Step.init(.{
|
||||
.id = .check_file,
|
||||
.name = "CheckFile",
|
||||
.owner = owner,
|
||||
.makeFn = make,
|
||||
}),
|
||||
.source = source.dupe(builder),
|
||||
.expected_matches = builder.dupeStrings(expected_matches),
|
||||
.source = source.dupe(owner),
|
||||
.expected_matches = owner.dupeStrings(expected_matches),
|
||||
};
|
||||
self.source.addStepDependencies(&self.step);
|
||||
return self;
|
||||
|
|
@ -35,10 +34,11 @@ pub fn create(
|
|||
|
||||
fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
||||
_ = prog_node;
|
||||
const b = step.owner;
|
||||
const self = @fieldParentPtr(CheckFileStep, "step", step);
|
||||
|
||||
const src_path = self.source.getPath(self.builder);
|
||||
const contents = try fs.cwd().readFileAlloc(self.builder.allocator, src_path, self.max_bytes);
|
||||
const src_path = self.source.getPath(b);
|
||||
const contents = try fs.cwd().readFileAlloc(b.allocator, src_path, self.max_bytes);
|
||||
|
||||
for (self.expected_matches) |expected_match| {
|
||||
if (mem.indexOf(u8, contents, expected_match) == null) {
|
||||
|
|
|
|||
|
|
@ -10,29 +10,31 @@ const CheckObjectStep = @This();
|
|||
|
||||
const Allocator = mem.Allocator;
|
||||
const Step = std.Build.Step;
|
||||
const EmulatableRunStep = std.Build.EmulatableRunStep;
|
||||
|
||||
pub const base_id = .check_object;
|
||||
|
||||
step: Step,
|
||||
builder: *std.Build,
|
||||
source: std.Build.FileSource,
|
||||
max_bytes: usize = 20 * 1024 * 1024,
|
||||
checks: std.ArrayList(Check),
|
||||
dump_symtab: bool = false,
|
||||
obj_format: std.Target.ObjectFormat,
|
||||
|
||||
pub fn create(builder: *std.Build, source: std.Build.FileSource, obj_format: std.Target.ObjectFormat) *CheckObjectStep {
|
||||
const gpa = builder.allocator;
|
||||
pub fn create(
|
||||
owner: *std.Build,
|
||||
source: std.Build.FileSource,
|
||||
obj_format: std.Target.ObjectFormat,
|
||||
) *CheckObjectStep {
|
||||
const gpa = owner.allocator;
|
||||
const self = gpa.create(CheckObjectStep) catch @panic("OOM");
|
||||
self.* = .{
|
||||
.builder = builder,
|
||||
.step = Step.init(gpa, .{
|
||||
.step = Step.init(.{
|
||||
.id = .check_file,
|
||||
.name = "CheckObject",
|
||||
.owner = owner,
|
||||
.makeFn = make,
|
||||
}),
|
||||
.source = source.dupe(builder),
|
||||
.source = source.dupe(owner),
|
||||
.checks = std.ArrayList(Check).init(gpa),
|
||||
.obj_format = obj_format,
|
||||
};
|
||||
|
|
@ -42,14 +44,18 @@ pub fn create(builder: *std.Build, source: std.Build.FileSource, obj_format: std
|
|||
|
||||
/// Runs and (optionally) compares the output of a binary.
|
||||
/// Asserts `self` was generated from an executable step.
|
||||
pub fn runAndCompare(self: *CheckObjectStep) *EmulatableRunStep {
|
||||
/// TODO this doesn't actually compare, and there's no apparent reason for it
|
||||
/// to depend on the check object step. I don't see why this function should exist,
|
||||
/// the caller could just add the run step directly.
|
||||
pub fn runAndCompare(self: *CheckObjectStep) *std.Build.RunStep {
|
||||
const dependencies_len = self.step.dependencies.items.len;
|
||||
assert(dependencies_len > 0);
|
||||
const exe_step = self.step.dependencies.items[dependencies_len - 1];
|
||||
const exe = exe_step.cast(std.Build.CompileStep).?;
|
||||
const emulatable_step = EmulatableRunStep.create(self.builder, "EmulatableRun", exe);
|
||||
emulatable_step.step.dependOn(&self.step);
|
||||
return emulatable_step;
|
||||
const run = self.step.owner.addRunArtifact(exe);
|
||||
run.skip_foreign_checks = true;
|
||||
run.step.dependOn(&self.step);
|
||||
return run;
|
||||
}
|
||||
|
||||
/// There two types of actions currently suported:
|
||||
|
|
@ -253,7 +259,7 @@ const Check = struct {
|
|||
|
||||
/// Creates a new sequence of actions with `phrase` as the first anchor searched phrase.
|
||||
pub fn checkStart(self: *CheckObjectStep, phrase: []const u8) void {
|
||||
var new_check = Check.create(self.builder);
|
||||
var new_check = Check.create(self.step.owner);
|
||||
new_check.match(phrase);
|
||||
self.checks.append(new_check) catch @panic("OOM");
|
||||
}
|
||||
|
|
@ -295,17 +301,18 @@ pub fn checkComputeCompare(
|
|||
program: []const u8,
|
||||
expected: ComputeCompareExpected,
|
||||
) void {
|
||||
var new_check = Check.create(self.builder);
|
||||
var new_check = Check.create(self.step.owner);
|
||||
new_check.computeCmp(program, expected);
|
||||
self.checks.append(new_check) catch @panic("OOM");
|
||||
}
|
||||
|
||||
fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
||||
_ = prog_node;
|
||||
const b = step.owner;
|
||||
const gpa = b.allocator;
|
||||
const self = @fieldParentPtr(CheckObjectStep, "step", step);
|
||||
|
||||
const gpa = self.builder.allocator;
|
||||
const src_path = self.source.getPath(self.builder);
|
||||
const src_path = self.source.getPath(b);
|
||||
const contents = try fs.cwd().readFileAllocOptions(
|
||||
gpa,
|
||||
src_path,
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -34,7 +34,6 @@ pub const Value = union(enum) {
|
|||
};
|
||||
|
||||
step: Step,
|
||||
builder: *std.Build,
|
||||
values: std.StringArrayHashMap(Value),
|
||||
output_file: std.Build.GeneratedFile,
|
||||
|
||||
|
|
@ -49,8 +48,8 @@ pub const Options = struct {
|
|||
first_ret_addr: ?usize = null,
|
||||
};
|
||||
|
||||
pub fn create(builder: *std.Build, options: Options) *ConfigHeaderStep {
|
||||
const self = builder.allocator.create(ConfigHeaderStep) catch @panic("OOM");
|
||||
pub fn create(owner: *std.Build, options: Options) *ConfigHeaderStep {
|
||||
const self = owner.allocator.create(ConfigHeaderStep) catch @panic("OOM");
|
||||
|
||||
var include_path: []const u8 = "config.h";
|
||||
|
||||
|
|
@ -69,29 +68,28 @@ pub fn create(builder: *std.Build, options: Options) *ConfigHeaderStep {
|
|||
}
|
||||
|
||||
const name = if (options.style.getFileSource()) |s|
|
||||
builder.fmt("configure {s} header {s} to {s}", .{
|
||||
owner.fmt("configure {s} header {s} to {s}", .{
|
||||
@tagName(options.style), s.getDisplayName(), include_path,
|
||||
})
|
||||
else
|
||||
builder.fmt("configure {s} header to {s}", .{@tagName(options.style), include_path});
|
||||
owner.fmt("configure {s} header to {s}", .{ @tagName(options.style), include_path });
|
||||
|
||||
self.* = .{
|
||||
.builder = builder,
|
||||
.step = Step.init(builder.allocator, .{
|
||||
.step = Step.init(.{
|
||||
.id = base_id,
|
||||
.name = name,
|
||||
.owner = owner,
|
||||
.makeFn = make,
|
||||
.first_ret_addr = options.first_ret_addr orelse @returnAddress(),
|
||||
}),
|
||||
.style = options.style,
|
||||
.values = std.StringArrayHashMap(Value).init(builder.allocator),
|
||||
.values = std.StringArrayHashMap(Value).init(owner.allocator),
|
||||
|
||||
.max_bytes = options.max_bytes,
|
||||
.include_path = include_path,
|
||||
.output_file = .{ .step = &self.step },
|
||||
};
|
||||
|
||||
|
||||
return self;
|
||||
}
|
||||
|
||||
|
|
@ -161,8 +159,9 @@ fn putValue(self: *ConfigHeaderStep, field_name: []const u8, comptime T: type, v
|
|||
|
||||
fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
||||
_ = prog_node;
|
||||
const b = step.owner;
|
||||
const self = @fieldParentPtr(ConfigHeaderStep, "step", step);
|
||||
const gpa = self.builder.allocator;
|
||||
const gpa = b.allocator;
|
||||
|
||||
// The cache is used here not really as a way to speed things up - because writing
|
||||
// the data to a file would probably be very fast - but as a way to find a canonical
|
||||
|
|
@ -191,13 +190,13 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
|||
switch (self.style) {
|
||||
.autoconf => |file_source| {
|
||||
try output.appendSlice(c_generated_line);
|
||||
const src_path = file_source.getPath(self.builder);
|
||||
const src_path = file_source.getPath(b);
|
||||
const contents = try std.fs.cwd().readFileAlloc(gpa, src_path, self.max_bytes);
|
||||
try render_autoconf(contents, &output, self.values, src_path);
|
||||
},
|
||||
.cmake => |file_source| {
|
||||
try output.appendSlice(c_generated_line);
|
||||
const src_path = file_source.getPath(self.builder);
|
||||
const src_path = file_source.getPath(b);
|
||||
const contents = try std.fs.cwd().readFileAlloc(gpa, src_path, self.max_bytes);
|
||||
try render_cmake(contents, &output, self.values, src_path);
|
||||
},
|
||||
|
|
@ -222,7 +221,7 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
|||
.{std.fmt.fmtSliceHexLower(&digest)},
|
||||
) catch unreachable;
|
||||
|
||||
const output_dir = try self.builder.cache_root.join(gpa, &.{ "o", &hash_basename });
|
||||
const output_dir = try b.cache_root.join(gpa, &.{ "o", &hash_basename });
|
||||
|
||||
// If output_path has directory parts, deal with them. Example:
|
||||
// output_dir is zig-cache/o/HASH
|
||||
|
|
@ -242,7 +241,7 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
|||
|
||||
try dir.writeFile(std.fs.path.basename(self.include_path), output.items);
|
||||
|
||||
self.output_file.path = try std.fs.path.join(self.builder.allocator, &.{
|
||||
self.output_file.path = try std.fs.path.join(b.allocator, &.{
|
||||
output_dir, self.include_path,
|
||||
});
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,218 +0,0 @@
|
|||
//! Unlike `RunStep` this step will provide emulation, when enabled, to run foreign binaries.
|
||||
//! When a binary is foreign, but emulation for the target is disabled, the specified binary
|
||||
//! will not be run and therefore also not validated against its output.
|
||||
//! This step can be useful when wishing to run a built binary on multiple platforms,
|
||||
//! without having to verify if it's possible to be ran against.
|
||||
|
||||
const std = @import("../std.zig");
|
||||
const Step = std.Build.Step;
|
||||
const CompileStep = std.Build.CompileStep;
|
||||
const RunStep = std.Build.RunStep;
|
||||
|
||||
const fs = std.fs;
|
||||
const process = std.process;
|
||||
const EnvMap = process.EnvMap;
|
||||
|
||||
const EmulatableRunStep = @This();
|
||||
|
||||
pub const base_id = .emulatable_run;
|
||||
|
||||
const max_stdout_size = 1 * 1024 * 1024; // 1 MiB
|
||||
|
||||
step: Step,
|
||||
builder: *std.Build,
|
||||
|
||||
/// The artifact (executable) to be run by this step
|
||||
exe: *CompileStep,
|
||||
|
||||
/// Set this to `null` to ignore the exit code for the purpose of determining a successful execution
|
||||
expected_term: ?std.ChildProcess.Term = .{ .Exited = 0 },
|
||||
|
||||
/// Override this field to modify the environment
|
||||
env_map: ?*EnvMap,
|
||||
|
||||
/// Set this to modify the current working directory
|
||||
cwd: ?[]const u8,
|
||||
|
||||
stdout_action: RunStep.StdIoAction = .inherit,
|
||||
stderr_action: RunStep.StdIoAction = .inherit,
|
||||
|
||||
/// When set to true, hides the warning of skipping a foreign binary which cannot be run on the host
|
||||
/// or through emulation.
|
||||
hide_foreign_binaries_warning: bool,
|
||||
|
||||
/// Creates a step that will execute the given artifact. This step will allow running the
|
||||
/// binary through emulation when any of the emulation options such as `enable_rosetta` are set to true.
|
||||
/// When set to false, and the binary is foreign, running the executable is skipped.
|
||||
/// Asserts given artifact is an executable.
|
||||
pub fn create(builder: *std.Build, name: []const u8, artifact: *CompileStep) *EmulatableRunStep {
|
||||
std.debug.assert(artifact.kind == .exe or artifact.kind == .test_exe);
|
||||
const self = builder.allocator.create(EmulatableRunStep) catch @panic("OOM");
|
||||
|
||||
const option_name = "hide-foreign-warnings";
|
||||
const hide_warnings = if (builder.available_options_map.get(option_name) == null) warn: {
|
||||
break :warn builder.option(bool, option_name, "Hide the warning when a foreign binary which is incompatible is skipped") orelse false;
|
||||
} else false;
|
||||
|
||||
self.* = .{
|
||||
.builder = builder,
|
||||
.step = Step.init(builder.allocator, .{
|
||||
.id = .emulatable_run,
|
||||
.name = name,
|
||||
.makeFn = make,
|
||||
}),
|
||||
.exe = artifact,
|
||||
.env_map = null,
|
||||
.cwd = null,
|
||||
.hide_foreign_binaries_warning = hide_warnings,
|
||||
};
|
||||
self.step.dependOn(&artifact.step);
|
||||
|
||||
return self;
|
||||
}
|
||||
|
||||
fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
||||
_ = prog_node;
|
||||
const self = @fieldParentPtr(EmulatableRunStep, "step", step);
|
||||
const host_info = self.builder.host;
|
||||
|
||||
var argv_list = std.ArrayList([]const u8).init(self.builder.allocator);
|
||||
defer argv_list.deinit();
|
||||
|
||||
const need_cross_glibc = self.exe.target.isGnuLibC() and self.exe.is_linking_libc;
|
||||
switch (host_info.getExternalExecutor(self.exe.target_info, .{
|
||||
.qemu_fixes_dl = need_cross_glibc and self.builder.glibc_runtimes_dir != null,
|
||||
.link_libc = self.exe.is_linking_libc,
|
||||
})) {
|
||||
.native => {},
|
||||
.rosetta => if (!self.builder.enable_rosetta) return warnAboutForeignBinaries(self),
|
||||
.wine => |bin_name| if (self.builder.enable_wine) {
|
||||
try argv_list.append(bin_name);
|
||||
} else return,
|
||||
.qemu => |bin_name| if (self.builder.enable_qemu) {
|
||||
const glibc_dir_arg = if (need_cross_glibc)
|
||||
self.builder.glibc_runtimes_dir orelse return
|
||||
else
|
||||
null;
|
||||
try argv_list.append(bin_name);
|
||||
if (glibc_dir_arg) |dir| {
|
||||
// TODO look into making this a call to `linuxTriple`. This
|
||||
// needs the directory to be called "i686" rather than
|
||||
// "x86" which is why we do it manually here.
|
||||
const fmt_str = "{s}" ++ fs.path.sep_str ++ "{s}-{s}-{s}";
|
||||
const cpu_arch = self.exe.target.getCpuArch();
|
||||
const os_tag = self.exe.target.getOsTag();
|
||||
const abi = self.exe.target.getAbi();
|
||||
const cpu_arch_name: []const u8 = if (cpu_arch == .x86)
|
||||
"i686"
|
||||
else
|
||||
@tagName(cpu_arch);
|
||||
const full_dir = try std.fmt.allocPrint(self.builder.allocator, fmt_str, .{
|
||||
dir, cpu_arch_name, @tagName(os_tag), @tagName(abi),
|
||||
});
|
||||
|
||||
try argv_list.append("-L");
|
||||
try argv_list.append(full_dir);
|
||||
}
|
||||
} else return warnAboutForeignBinaries(self),
|
||||
.darling => |bin_name| if (self.builder.enable_darling) {
|
||||
try argv_list.append(bin_name);
|
||||
} else return warnAboutForeignBinaries(self),
|
||||
.wasmtime => |bin_name| if (self.builder.enable_wasmtime) {
|
||||
try argv_list.append(bin_name);
|
||||
try argv_list.append("--dir=.");
|
||||
} else return warnAboutForeignBinaries(self),
|
||||
else => return warnAboutForeignBinaries(self),
|
||||
}
|
||||
|
||||
if (self.exe.target.isWindows()) {
|
||||
// On Windows we don't have rpaths so we have to add .dll search paths to PATH
|
||||
RunStep.addPathForDynLibsInternal(&self.step, self.builder, self.exe);
|
||||
}
|
||||
|
||||
const executable_path = self.exe.installed_path orelse self.exe.getOutputSource().getPath(self.builder);
|
||||
try argv_list.append(executable_path);
|
||||
|
||||
try RunStep.runCommand(
|
||||
argv_list.items,
|
||||
self.builder,
|
||||
self.expected_term,
|
||||
self.stdout_action,
|
||||
self.stderr_action,
|
||||
.Inherit,
|
||||
self.env_map,
|
||||
self.cwd,
|
||||
false,
|
||||
);
|
||||
}
|
||||
|
||||
pub fn expectStdErrEqual(self: *EmulatableRunStep, bytes: []const u8) void {
|
||||
self.stderr_action = .{ .expect_exact = self.builder.dupe(bytes) };
|
||||
}
|
||||
|
||||
pub fn expectStdOutEqual(self: *EmulatableRunStep, bytes: []const u8) void {
|
||||
self.stdout_action = .{ .expect_exact = self.builder.dupe(bytes) };
|
||||
}
|
||||
|
||||
fn warnAboutForeignBinaries(step: *EmulatableRunStep) void {
|
||||
if (step.hide_foreign_binaries_warning) return;
|
||||
const builder = step.builder;
|
||||
const artifact = step.exe;
|
||||
|
||||
const host_name = builder.host.target.zigTriple(builder.allocator) catch @panic("unhandled error");
|
||||
const foreign_name = artifact.target.zigTriple(builder.allocator) catch @panic("unhandled error");
|
||||
const target_info = std.zig.system.NativeTargetInfo.detect(artifact.target) catch @panic("unhandled error");
|
||||
const need_cross_glibc = artifact.target.isGnuLibC() and artifact.is_linking_libc;
|
||||
switch (builder.host.getExternalExecutor(target_info, .{
|
||||
.qemu_fixes_dl = need_cross_glibc and builder.glibc_runtimes_dir != null,
|
||||
.link_libc = artifact.is_linking_libc,
|
||||
})) {
|
||||
.native => unreachable,
|
||||
.bad_dl => |foreign_dl| {
|
||||
const host_dl = builder.host.dynamic_linker.get() orelse "(none)";
|
||||
std.debug.print("the host system does not appear to be capable of executing binaries from the target because the host dynamic linker is '{s}', while the target dynamic linker is '{s}'. Consider setting the dynamic linker as '{s}'.\n", .{
|
||||
host_dl, foreign_dl, host_dl,
|
||||
});
|
||||
},
|
||||
.bad_os_or_cpu => {
|
||||
std.debug.print("the host system ({s}) does not appear to be capable of executing binaries from the target ({s}).\n", .{
|
||||
host_name, foreign_name,
|
||||
});
|
||||
},
|
||||
.darling => if (!builder.enable_darling) {
|
||||
std.debug.print(
|
||||
"the host system ({s}) does not appear to be capable of executing binaries " ++
|
||||
"from the target ({s}). Consider enabling darling.\n",
|
||||
.{ host_name, foreign_name },
|
||||
);
|
||||
},
|
||||
.rosetta => if (!builder.enable_rosetta) {
|
||||
std.debug.print(
|
||||
"the host system ({s}) does not appear to be capable of executing binaries " ++
|
||||
"from the target ({s}). Consider enabling rosetta.\n",
|
||||
.{ host_name, foreign_name },
|
||||
);
|
||||
},
|
||||
.wine => if (!builder.enable_wine) {
|
||||
std.debug.print(
|
||||
"the host system ({s}) does not appear to be capable of executing binaries " ++
|
||||
"from the target ({s}). Consider enabling wine.\n",
|
||||
.{ host_name, foreign_name },
|
||||
);
|
||||
},
|
||||
.qemu => if (!builder.enable_qemu) {
|
||||
std.debug.print(
|
||||
"the host system ({s}) does not appear to be capable of executing binaries " ++
|
||||
"from the target ({s}). Consider enabling qemu.\n",
|
||||
.{ host_name, foreign_name },
|
||||
);
|
||||
},
|
||||
.wasmtime => {
|
||||
std.debug.print(
|
||||
"the host system ({s}) does not appear to be capable of executing binaries " ++
|
||||
"from the target ({s}). Consider enabling wasmtime.\n",
|
||||
.{ host_name, foreign_name },
|
||||
);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
@ -1,37 +1,73 @@
|
|||
const std = @import("../std.zig");
|
||||
const Step = std.Build.Step;
|
||||
const FmtStep = @This();
|
||||
//! This step has two modes:
|
||||
//! * Modify mode: directly modify source files, formatting them in place.
|
||||
//! * Check mode: fail the step if a non-conforming file is found.
|
||||
|
||||
step: Step,
|
||||
paths: []const []const u8,
|
||||
exclude_paths: []const []const u8,
|
||||
check: bool,
|
||||
|
||||
pub const base_id = .fmt;
|
||||
|
||||
step: Step,
|
||||
builder: *std.Build,
|
||||
argv: [][]const u8,
|
||||
pub const Options = struct {
|
||||
paths: []const []const u8 = &.{},
|
||||
exclude_paths: []const []const u8 = &.{},
|
||||
/// If true, fails the build step when any non-conforming files are encountered.
|
||||
check: bool = false,
|
||||
};
|
||||
|
||||
pub fn create(builder: *std.Build, paths: []const []const u8) *FmtStep {
|
||||
const self = builder.allocator.create(FmtStep) catch @panic("OOM");
|
||||
const name = "zig fmt";
|
||||
self.* = FmtStep{
|
||||
.step = Step.init(builder.allocator, .{
|
||||
.id = .fmt,
|
||||
pub fn create(owner: *std.Build, options: Options) *FmtStep {
|
||||
const self = owner.allocator.create(FmtStep) catch @panic("OOM");
|
||||
const name = if (options.check) "zig fmt --check" else "zig fmt";
|
||||
self.* = .{
|
||||
.step = Step.init(.{
|
||||
.id = base_id,
|
||||
.name = name,
|
||||
.owner = owner,
|
||||
.makeFn = make,
|
||||
}),
|
||||
.builder = builder,
|
||||
.argv = builder.allocator.alloc([]u8, paths.len + 2) catch @panic("OOM"),
|
||||
.paths = options.paths,
|
||||
.exclude_paths = options.exclude_paths,
|
||||
.check = options.check,
|
||||
};
|
||||
|
||||
self.argv[0] = builder.zig_exe;
|
||||
self.argv[1] = "fmt";
|
||||
for (paths, 0..) |path, i| {
|
||||
self.argv[2 + i] = builder.pathFromRoot(path);
|
||||
}
|
||||
return self;
|
||||
}
|
||||
|
||||
fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
||||
// zig fmt is fast enough that no progress is needed.
|
||||
_ = prog_node;
|
||||
|
||||
// TODO: if check=false, this means we are modifying source files in place, which
|
||||
// is an operation that could race against other operations also modifying source files
|
||||
// in place. In this case, this step should obtain a write lock while making those
|
||||
// modifications.
|
||||
|
||||
const b = step.owner;
|
||||
const arena = b.allocator;
|
||||
const self = @fieldParentPtr(FmtStep, "step", step);
|
||||
|
||||
return self.builder.spawnChild(self.argv);
|
||||
var argv: std.ArrayListUnmanaged([]const u8) = .{};
|
||||
try argv.ensureUnusedCapacity(arena, 2 + 1 + self.paths.len + 2 * self.exclude_paths.len);
|
||||
|
||||
argv.appendAssumeCapacity(b.zig_exe);
|
||||
argv.appendAssumeCapacity("fmt");
|
||||
|
||||
if (self.check) {
|
||||
argv.appendAssumeCapacity("--check");
|
||||
}
|
||||
|
||||
for (self.paths) |p| {
|
||||
argv.appendAssumeCapacity(b.pathFromRoot(p));
|
||||
}
|
||||
|
||||
for (self.exclude_paths) |p| {
|
||||
argv.appendAssumeCapacity("--exclude");
|
||||
argv.appendAssumeCapacity(b.pathFromRoot(p));
|
||||
}
|
||||
|
||||
return step.evalChildProcess(argv.items);
|
||||
}
|
||||
|
||||
const std = @import("../std.zig");
|
||||
const Step = std.Build.Step;
|
||||
const FmtStep = @This();
|
||||
|
|
|
|||
|
|
@ -7,23 +7,24 @@ const InstallArtifactStep = @This();
|
|||
pub const base_id = .install_artifact;
|
||||
|
||||
step: Step,
|
||||
builder: *std.Build,
|
||||
dest_builder: *std.Build,
|
||||
artifact: *CompileStep,
|
||||
dest_dir: InstallDir,
|
||||
pdb_dir: ?InstallDir,
|
||||
h_dir: ?InstallDir,
|
||||
|
||||
pub fn create(builder: *std.Build, artifact: *CompileStep) *InstallArtifactStep {
|
||||
pub fn create(owner: *std.Build, artifact: *CompileStep) *InstallArtifactStep {
|
||||
if (artifact.install_step) |s| return s;
|
||||
|
||||
const self = builder.allocator.create(InstallArtifactStep) catch @panic("OOM");
|
||||
const self = owner.allocator.create(InstallArtifactStep) catch @panic("OOM");
|
||||
self.* = InstallArtifactStep{
|
||||
.builder = builder,
|
||||
.step = Step.init(builder.allocator, .{
|
||||
.step = Step.init(.{
|
||||
.id = base_id,
|
||||
.name = builder.fmt("install {s}", .{artifact.name}),
|
||||
.name = owner.fmt("install {s}", .{artifact.name}),
|
||||
.owner = owner,
|
||||
.makeFn = make,
|
||||
}),
|
||||
.dest_builder = owner,
|
||||
.artifact = artifact,
|
||||
.dest_dir = artifact.override_dest_dir orelse switch (artifact.kind) {
|
||||
.obj => @panic("Cannot install a .obj build artifact."),
|
||||
|
|
@ -43,48 +44,52 @@ pub fn create(builder: *std.Build, artifact: *CompileStep) *InstallArtifactStep
|
|||
self.step.dependOn(&artifact.step);
|
||||
artifact.install_step = self;
|
||||
|
||||
builder.pushInstalledFile(self.dest_dir, artifact.out_filename);
|
||||
owner.pushInstalledFile(self.dest_dir, artifact.out_filename);
|
||||
if (self.artifact.isDynamicLibrary()) {
|
||||
if (artifact.major_only_filename) |name| {
|
||||
builder.pushInstalledFile(.lib, name);
|
||||
owner.pushInstalledFile(.lib, name);
|
||||
}
|
||||
if (artifact.name_only_filename) |name| {
|
||||
builder.pushInstalledFile(.lib, name);
|
||||
owner.pushInstalledFile(.lib, name);
|
||||
}
|
||||
if (self.artifact.target.isWindows()) {
|
||||
builder.pushInstalledFile(.lib, artifact.out_lib_filename);
|
||||
owner.pushInstalledFile(.lib, artifact.out_lib_filename);
|
||||
}
|
||||
}
|
||||
if (self.pdb_dir) |pdb_dir| {
|
||||
builder.pushInstalledFile(pdb_dir, artifact.out_pdb_filename);
|
||||
owner.pushInstalledFile(pdb_dir, artifact.out_pdb_filename);
|
||||
}
|
||||
if (self.h_dir) |h_dir| {
|
||||
builder.pushInstalledFile(h_dir, artifact.out_h_filename);
|
||||
owner.pushInstalledFile(h_dir, artifact.out_h_filename);
|
||||
}
|
||||
return self;
|
||||
}
|
||||
|
||||
fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
||||
_ = prog_node;
|
||||
const src_builder = step.owner;
|
||||
const self = @fieldParentPtr(InstallArtifactStep, "step", step);
|
||||
const builder = self.builder;
|
||||
const dest_builder = self.dest_builder;
|
||||
|
||||
const full_dest_path = builder.getInstallPath(self.dest_dir, self.artifact.out_filename);
|
||||
try builder.updateFile(self.artifact.getOutputSource().getPath(builder), full_dest_path);
|
||||
const full_dest_path = dest_builder.getInstallPath(self.dest_dir, self.artifact.out_filename);
|
||||
try src_builder.updateFile(
|
||||
self.artifact.getOutputSource().getPath(src_builder),
|
||||
full_dest_path,
|
||||
);
|
||||
if (self.artifact.isDynamicLibrary() and self.artifact.version != null and self.artifact.target.wantSharedLibSymLinks()) {
|
||||
try CompileStep.doAtomicSymLinks(builder.allocator, full_dest_path, self.artifact.major_only_filename.?, self.artifact.name_only_filename.?);
|
||||
try CompileStep.doAtomicSymLinks(src_builder.allocator, full_dest_path, self.artifact.major_only_filename.?, self.artifact.name_only_filename.?);
|
||||
}
|
||||
if (self.artifact.isDynamicLibrary() and self.artifact.target.isWindows() and self.artifact.emit_implib != .no_emit) {
|
||||
const full_implib_path = builder.getInstallPath(self.dest_dir, self.artifact.out_lib_filename);
|
||||
try builder.updateFile(self.artifact.getOutputLibSource().getPath(builder), full_implib_path);
|
||||
const full_implib_path = dest_builder.getInstallPath(self.dest_dir, self.artifact.out_lib_filename);
|
||||
try src_builder.updateFile(self.artifact.getOutputLibSource().getPath(src_builder), full_implib_path);
|
||||
}
|
||||
if (self.pdb_dir) |pdb_dir| {
|
||||
const full_pdb_path = builder.getInstallPath(pdb_dir, self.artifact.out_pdb_filename);
|
||||
try builder.updateFile(self.artifact.getOutputPdbSource().getPath(builder), full_pdb_path);
|
||||
const full_pdb_path = dest_builder.getInstallPath(pdb_dir, self.artifact.out_pdb_filename);
|
||||
try src_builder.updateFile(self.artifact.getOutputPdbSource().getPath(src_builder), full_pdb_path);
|
||||
}
|
||||
if (self.h_dir) |h_dir| {
|
||||
const full_h_path = builder.getInstallPath(h_dir, self.artifact.out_h_filename);
|
||||
try builder.updateFile(self.artifact.getOutputHSource().getPath(builder), full_h_path);
|
||||
const full_h_path = dest_builder.getInstallPath(h_dir, self.artifact.out_h_filename);
|
||||
try src_builder.updateFile(self.artifact.getOutputHSource().getPath(src_builder), full_h_path);
|
||||
}
|
||||
self.artifact.installed_path = full_dest_path;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,11 +7,10 @@ const InstallDirStep = @This();
|
|||
const log = std.log;
|
||||
|
||||
step: Step,
|
||||
builder: *std.Build,
|
||||
options: Options,
|
||||
/// This is used by the build system when a file being installed comes from one
|
||||
/// package but is being installed by another.
|
||||
override_source_builder: ?*std.Build = null,
|
||||
dest_builder: *std.Build,
|
||||
|
||||
pub const base_id = .install_dir;
|
||||
|
||||
|
|
@ -40,27 +39,26 @@ pub const Options = struct {
|
|||
}
|
||||
};
|
||||
|
||||
pub fn init(
|
||||
builder: *std.Build,
|
||||
options: Options,
|
||||
) InstallDirStep {
|
||||
builder.pushInstalledFile(options.install_dir, options.install_subdir);
|
||||
pub fn init(owner: *std.Build, options: Options) InstallDirStep {
|
||||
owner.pushInstalledFile(options.install_dir, options.install_subdir);
|
||||
return .{
|
||||
.builder = builder,
|
||||
.step = Step.init(builder.allocator, .{
|
||||
.step = Step.init(.{
|
||||
.id = .install_dir,
|
||||
.name = builder.fmt("install {s}/", .{options.source_dir}),
|
||||
.name = owner.fmt("install {s}/", .{options.source_dir}),
|
||||
.owner = owner,
|
||||
.makeFn = make,
|
||||
}),
|
||||
.options = options.dupe(builder),
|
||||
.options = options.dupe(owner),
|
||||
.dest_builder = owner,
|
||||
};
|
||||
}
|
||||
|
||||
fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
||||
_ = prog_node;
|
||||
const self = @fieldParentPtr(InstallDirStep, "step", step);
|
||||
const dest_prefix = self.builder.getInstallPath(self.options.install_dir, self.options.install_subdir);
|
||||
const src_builder = self.override_source_builder orelse self.builder;
|
||||
const dest_builder = self.dest_builder;
|
||||
const dest_prefix = dest_builder.getInstallPath(self.options.install_dir, self.options.install_subdir);
|
||||
const src_builder = self.step.owner;
|
||||
const full_src_dir = src_builder.pathFromRoot(self.options.source_dir);
|
||||
var src_dir = std.fs.cwd().openIterableDir(full_src_dir, .{}) catch |err| {
|
||||
log.err("InstallDirStep: unable to open source directory '{s}': {s}", .{
|
||||
|
|
@ -69,7 +67,7 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
|||
return error.StepFailed;
|
||||
};
|
||||
defer src_dir.close();
|
||||
var it = try src_dir.walk(self.builder.allocator);
|
||||
var it = try src_dir.walk(dest_builder.allocator);
|
||||
next_entry: while (try it.next()) |entry| {
|
||||
for (self.options.exclude_extensions) |ext| {
|
||||
if (mem.endsWith(u8, entry.path, ext)) {
|
||||
|
|
@ -77,20 +75,20 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
|||
}
|
||||
}
|
||||
|
||||
const full_path = self.builder.pathJoin(&.{ full_src_dir, entry.path });
|
||||
const dest_path = self.builder.pathJoin(&.{ dest_prefix, entry.path });
|
||||
const full_path = dest_builder.pathJoin(&.{ full_src_dir, entry.path });
|
||||
const dest_path = dest_builder.pathJoin(&.{ dest_prefix, entry.path });
|
||||
|
||||
switch (entry.kind) {
|
||||
.Directory => try fs.cwd().makePath(dest_path),
|
||||
.File => {
|
||||
for (self.options.blank_extensions) |ext| {
|
||||
if (mem.endsWith(u8, entry.path, ext)) {
|
||||
try self.builder.truncateFile(dest_path);
|
||||
try dest_builder.truncateFile(dest_path);
|
||||
continue :next_entry;
|
||||
}
|
||||
}
|
||||
|
||||
try self.builder.updateFile(full_path, dest_path);
|
||||
try dest_builder.updateFile(full_path, dest_path);
|
||||
},
|
||||
else => continue,
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,39 +7,40 @@ const InstallFileStep = @This();
|
|||
pub const base_id = .install_file;
|
||||
|
||||
step: Step,
|
||||
builder: *std.Build,
|
||||
source: FileSource,
|
||||
dir: InstallDir,
|
||||
dest_rel_path: []const u8,
|
||||
/// This is used by the build system when a file being installed comes from one
|
||||
/// package but is being installed by another.
|
||||
override_source_builder: ?*std.Build = null,
|
||||
dest_builder: *std.Build,
|
||||
|
||||
pub fn init(
|
||||
builder: *std.Build,
|
||||
owner: *std.Build,
|
||||
source: FileSource,
|
||||
dir: InstallDir,
|
||||
dest_rel_path: []const u8,
|
||||
) InstallFileStep {
|
||||
builder.pushInstalledFile(dir, dest_rel_path);
|
||||
owner.pushInstalledFile(dir, dest_rel_path);
|
||||
return InstallFileStep{
|
||||
.builder = builder,
|
||||
.step = Step.init(builder.allocator, .{
|
||||
.id = .install_file,
|
||||
.name = builder.fmt("install {s} to {s}", .{ source.getDisplayName(), dest_rel_path }),
|
||||
.step = Step.init(.{
|
||||
.id = base_id,
|
||||
.name = owner.fmt("install {s} to {s}", .{ source.getDisplayName(), dest_rel_path }),
|
||||
.owner = owner,
|
||||
.makeFn = make,
|
||||
}),
|
||||
.source = source.dupe(builder),
|
||||
.dir = dir.dupe(builder),
|
||||
.dest_rel_path = builder.dupePath(dest_rel_path),
|
||||
.source = source.dupe(owner),
|
||||
.dir = dir.dupe(owner),
|
||||
.dest_rel_path = owner.dupePath(dest_rel_path),
|
||||
.dest_builder = owner,
|
||||
};
|
||||
}
|
||||
|
||||
fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
||||
_ = prog_node;
|
||||
const src_builder = step.owner;
|
||||
const self = @fieldParentPtr(InstallFileStep, "step", step);
|
||||
const src_builder = self.override_source_builder orelse self.builder;
|
||||
const dest_builder = self.dest_builder;
|
||||
const full_src_path = self.source.getPath2(src_builder, step);
|
||||
const full_dest_path = self.builder.getInstallPath(self.dir, self.dest_rel_path);
|
||||
try self.builder.updateFile(full_src_path, full_dest_path);
|
||||
const full_dest_path = dest_builder.getInstallPath(self.dir, self.dest_rel_path);
|
||||
try dest_builder.updateFile(full_src_path, full_dest_path);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,28 +0,0 @@
|
|||
const std = @import("../std.zig");
|
||||
const log = std.log;
|
||||
const Step = std.Build.Step;
|
||||
const LogStep = @This();
|
||||
|
||||
pub const base_id = .log;
|
||||
|
||||
step: Step,
|
||||
builder: *std.Build,
|
||||
data: []const u8,
|
||||
|
||||
pub fn init(builder: *std.Build, data: []const u8) LogStep {
|
||||
return LogStep{
|
||||
.builder = builder,
|
||||
.step = Step.init(builder.allocator, .{
|
||||
.id = .log,
|
||||
.name = builder.fmt("log {s}", .{data}),
|
||||
.makeFn = make,
|
||||
}),
|
||||
.data = builder.dupe(data),
|
||||
};
|
||||
}
|
||||
|
||||
fn make(step: *Step, prog_node: *std.Progress.Node) anyerror!void {
|
||||
_ = prog_node;
|
||||
const self = @fieldParentPtr(LogStep, "step", step);
|
||||
log.info("{s}", .{self.data});
|
||||
}
|
||||
|
|
@ -21,7 +21,6 @@ pub const RawFormat = enum {
|
|||
};
|
||||
|
||||
step: Step,
|
||||
builder: *std.Build,
|
||||
file_source: std.Build.FileSource,
|
||||
basename: []const u8,
|
||||
output_file: std.Build.GeneratedFile,
|
||||
|
|
@ -38,18 +37,18 @@ pub const Options = struct {
|
|||
};
|
||||
|
||||
pub fn create(
|
||||
builder: *std.Build,
|
||||
owner: *std.Build,
|
||||
file_source: std.Build.FileSource,
|
||||
options: Options,
|
||||
) *ObjCopyStep {
|
||||
const self = builder.allocator.create(ObjCopyStep) catch @panic("OOM");
|
||||
const self = owner.allocator.create(ObjCopyStep) catch @panic("OOM");
|
||||
self.* = ObjCopyStep{
|
||||
.step = Step.init(builder.allocator, .{
|
||||
.step = Step.init(.{
|
||||
.id = base_id,
|
||||
.name = builder.fmt("objcopy {s}", .{file_source.getDisplayName()}),
|
||||
.name = owner.fmt("objcopy {s}", .{file_source.getDisplayName()}),
|
||||
.owner = owner,
|
||||
.makeFn = make,
|
||||
}),
|
||||
.builder = builder,
|
||||
.file_source = file_source,
|
||||
.basename = options.basename orelse file_source.getDisplayName(),
|
||||
.output_file = std.Build.GeneratedFile{ .step = &self.step },
|
||||
|
|
@ -67,9 +66,8 @@ pub fn getOutputSource(self: *const ObjCopyStep) std.Build.FileSource {
|
|||
}
|
||||
|
||||
fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
||||
_ = prog_node;
|
||||
const b = step.owner;
|
||||
const self = @fieldParentPtr(ObjCopyStep, "step", step);
|
||||
const b = self.builder;
|
||||
|
||||
var man = b.cache.obtain();
|
||||
defer man.deinit();
|
||||
|
|
@ -84,7 +82,7 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
|||
man.hash.addOptional(self.pad_to);
|
||||
man.hash.addOptional(self.format);
|
||||
|
||||
if (man.hit() catch |err| failWithCacheError(man, err)) {
|
||||
if (try step.cacheHit(&man)) {
|
||||
// Cache hit, skip subprocess execution.
|
||||
const digest = man.final();
|
||||
self.output_file.path = try b.cache_root.join(b.allocator, &.{
|
||||
|
|
@ -116,23 +114,8 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
|||
};
|
||||
|
||||
try argv.appendSlice(&.{ full_src_path, full_dest_path });
|
||||
_ = try self.builder.execFromStep(argv.items, &self.step);
|
||||
_ = try step.spawnZigProcess(argv.items, prog_node);
|
||||
|
||||
self.output_file.path = full_dest_path;
|
||||
try man.writeManifest();
|
||||
}
|
||||
|
||||
/// TODO consolidate this with the same function in RunStep?
|
||||
/// Also properly deal with concurrency (see open PR)
|
||||
fn failWithCacheError(man: std.Build.Cache.Manifest, err: anyerror) noreturn {
|
||||
const i = man.failed_file_index orelse failWithSimpleError(err);
|
||||
const pp = man.files.items[i].prefixed_path orelse failWithSimpleError(err);
|
||||
const prefix = man.cache.prefixes()[pp.prefix].path orelse "";
|
||||
std.debug.print("{s}: {s}/{s}\n", .{ @errorName(err), prefix, pp.sub_path });
|
||||
std.process.exit(1);
|
||||
}
|
||||
|
||||
fn failWithSimpleError(err: anyerror) noreturn {
|
||||
std.debug.print("{s}\n", .{@errorName(err)});
|
||||
std.process.exit(1);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -12,25 +12,24 @@ pub const base_id = .options;
|
|||
|
||||
step: Step,
|
||||
generated_file: GeneratedFile,
|
||||
builder: *std.Build,
|
||||
|
||||
contents: std.ArrayList(u8),
|
||||
artifact_args: std.ArrayList(OptionArtifactArg),
|
||||
file_source_args: std.ArrayList(OptionFileSourceArg),
|
||||
|
||||
pub fn create(builder: *std.Build) *OptionsStep {
|
||||
const self = builder.allocator.create(OptionsStep) catch @panic("OOM");
|
||||
pub fn create(owner: *std.Build) *OptionsStep {
|
||||
const self = owner.allocator.create(OptionsStep) catch @panic("OOM");
|
||||
self.* = .{
|
||||
.builder = builder,
|
||||
.step = Step.init(builder.allocator, .{
|
||||
.step = Step.init(.{
|
||||
.id = base_id,
|
||||
.name = "options",
|
||||
.owner = owner,
|
||||
.makeFn = make,
|
||||
}),
|
||||
.generated_file = undefined,
|
||||
.contents = std.ArrayList(u8).init(builder.allocator),
|
||||
.artifact_args = std.ArrayList(OptionArtifactArg).init(builder.allocator),
|
||||
.file_source_args = std.ArrayList(OptionFileSourceArg).init(builder.allocator),
|
||||
.contents = std.ArrayList(u8).init(owner.allocator),
|
||||
.artifact_args = std.ArrayList(OptionArtifactArg).init(owner.allocator),
|
||||
.file_source_args = std.ArrayList(OptionFileSourceArg).init(owner.allocator),
|
||||
};
|
||||
self.generated_file = .{ .step = &self.step };
|
||||
|
||||
|
|
@ -196,7 +195,7 @@ pub fn addOptionFileSource(
|
|||
) void {
|
||||
self.file_source_args.append(.{
|
||||
.name = name,
|
||||
.source = source.dupe(self.builder),
|
||||
.source = source.dupe(self.step.owner),
|
||||
}) catch @panic("OOM");
|
||||
source.addStepDependencies(&self.step);
|
||||
}
|
||||
|
|
@ -204,12 +203,12 @@ pub fn addOptionFileSource(
|
|||
/// The value is the path in the cache dir.
|
||||
/// Adds a dependency automatically.
|
||||
pub fn addOptionArtifact(self: *OptionsStep, name: []const u8, artifact: *CompileStep) void {
|
||||
self.artifact_args.append(.{ .name = self.builder.dupe(name), .artifact = artifact }) catch @panic("OOM");
|
||||
self.artifact_args.append(.{ .name = self.step.owner.dupe(name), .artifact = artifact }) catch @panic("OOM");
|
||||
self.step.dependOn(&artifact.step);
|
||||
}
|
||||
|
||||
pub fn createModule(self: *OptionsStep) *std.Build.Module {
|
||||
return self.builder.createModule(.{
|
||||
return self.step.owner.createModule(.{
|
||||
.source_file = self.getSource(),
|
||||
.dependencies = &.{},
|
||||
});
|
||||
|
|
@ -220,14 +219,17 @@ pub fn getSource(self: *OptionsStep) FileSource {
|
|||
}
|
||||
|
||||
fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
||||
// This step completes so quickly that no progress is necessary.
|
||||
_ = prog_node;
|
||||
|
||||
const b = step.owner;
|
||||
const self = @fieldParentPtr(OptionsStep, "step", step);
|
||||
|
||||
for (self.artifact_args.items) |item| {
|
||||
self.addOption(
|
||||
[]const u8,
|
||||
item.name,
|
||||
self.builder.pathFromRoot(item.artifact.getOutputSource().getPath(self.builder)),
|
||||
b.pathFromRoot(item.artifact.getOutputSource().getPath(b)),
|
||||
);
|
||||
}
|
||||
|
||||
|
|
@ -235,20 +237,18 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
|||
self.addOption(
|
||||
[]const u8,
|
||||
item.name,
|
||||
item.source.getPath(self.builder),
|
||||
item.source.getPath(b),
|
||||
);
|
||||
}
|
||||
|
||||
var options_dir = try self.builder.cache_root.handle.makeOpenPath("options", .{});
|
||||
var options_dir = try b.cache_root.handle.makeOpenPath("options", .{});
|
||||
defer options_dir.close();
|
||||
|
||||
const basename = self.hashContentsToFileName();
|
||||
|
||||
try options_dir.writeFile(&basename, self.contents.items);
|
||||
|
||||
self.generated_file.path = try self.builder.cache_root.join(self.builder.allocator, &.{
|
||||
"options", &basename,
|
||||
});
|
||||
self.generated_file.path = try b.cache_root.join(b.allocator, &.{ "options", &basename });
|
||||
}
|
||||
|
||||
fn hashContentsToFileName(self: *OptionsStep) [64]u8 {
|
||||
|
|
|
|||
|
|
@ -7,28 +7,37 @@ const RemoveDirStep = @This();
|
|||
pub const base_id = .remove_dir;
|
||||
|
||||
step: Step,
|
||||
builder: *std.Build,
|
||||
dir_path: []const u8,
|
||||
|
||||
pub fn init(builder: *std.Build, dir_path: []const u8) RemoveDirStep {
|
||||
pub fn init(owner: *std.Build, dir_path: []const u8) RemoveDirStep {
|
||||
return RemoveDirStep{
|
||||
.builder = builder,
|
||||
.step = Step.init(builder.allocator, .{
|
||||
.step = Step.init(.{
|
||||
.id = .remove_dir,
|
||||
.name = builder.fmt("RemoveDir {s}", .{dir_path}),
|
||||
.name = owner.fmt("RemoveDir {s}", .{dir_path}),
|
||||
.owner = owner,
|
||||
.makeFn = make,
|
||||
}),
|
||||
.dir_path = builder.dupePath(dir_path),
|
||||
.dir_path = owner.dupePath(dir_path),
|
||||
};
|
||||
}
|
||||
|
||||
fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
||||
// TODO update progress node while walking file system.
|
||||
// Should the standard library support this use case??
|
||||
_ = prog_node;
|
||||
|
||||
const b = step.owner;
|
||||
const self = @fieldParentPtr(RemoveDirStep, "step", step);
|
||||
|
||||
const full_path = self.builder.pathFromRoot(self.dir_path);
|
||||
fs.cwd().deleteTree(full_path) catch |err| {
|
||||
log.err("Unable to remove {s}: {s}", .{ full_path, @errorName(err) });
|
||||
return err;
|
||||
b.build_root.handle.deleteTree(self.dir_path) catch |err| {
|
||||
if (b.build_root.path) |base| {
|
||||
return step.fail("unable to recursively delete path '{s}/{s}': {s}", .{
|
||||
base, self.dir_path, @errorName(err),
|
||||
});
|
||||
} else {
|
||||
return step.fail("unable to recursively delete path '{s}': {s}", .{
|
||||
self.dir_path, @errorName(err),
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,14 +11,11 @@ const EnvMap = process.EnvMap;
|
|||
const Allocator = mem.Allocator;
|
||||
const ExecError = std.Build.ExecError;
|
||||
|
||||
const max_stdout_size = 1 * 1024 * 1024; // 1 MiB
|
||||
|
||||
const RunStep = @This();
|
||||
|
||||
pub const base_id: Step.Id = .run;
|
||||
|
||||
step: Step,
|
||||
builder: *std.Build,
|
||||
|
||||
/// See also addArg and addArgs to modifying this directly
|
||||
argv: ArrayList(Arg),
|
||||
|
|
@ -29,35 +26,68 @@ cwd: ?[]const u8,
|
|||
/// Override this field to modify the environment, or use setEnvironmentVariable
|
||||
env_map: ?*EnvMap,
|
||||
|
||||
stdout_action: StdIoAction = .inherit,
|
||||
stderr_action: StdIoAction = .inherit,
|
||||
|
||||
stdin_behavior: std.ChildProcess.StdIo = .Inherit,
|
||||
|
||||
/// Set this to `null` to ignore the exit code for the purpose of determining a successful execution
|
||||
expected_term: ?std.ChildProcess.Term = .{ .Exited = 0 },
|
||||
|
||||
/// Print the command before running it
|
||||
print: bool,
|
||||
/// Controls whether execution is skipped if the output file is up-to-date.
|
||||
/// The default is to always run if there is no output file, and to skip
|
||||
/// running if all output files are up-to-date.
|
||||
condition: enum { output_outdated, always } = .output_outdated,
|
||||
/// Configures whether the RunStep is considered to have side-effects, and also
|
||||
/// whether the RunStep will inherit stdio streams, forwarding them to the
|
||||
/// parent process, in which case will require a global lock to prevent other
|
||||
/// steps from interfering with stdio while the subprocess associated with this
|
||||
/// RunStep is running.
|
||||
/// If the RunStep is determined to not have side-effects, then execution will
|
||||
/// be skipped if all output files are up-to-date and input files are
|
||||
/// unchanged.
|
||||
stdio: StdIo = .infer_from_args,
|
||||
|
||||
/// Additional file paths relative to build.zig that, when modified, indicate
|
||||
/// that the RunStep should be re-executed.
|
||||
/// If the RunStep is determined to have side-effects, this field is ignored
|
||||
/// and the RunStep is always executed when it appears in the build graph.
|
||||
extra_file_dependencies: []const []const u8 = &.{},
|
||||
|
||||
/// After adding an output argument, this step will by default rename itself
|
||||
/// for a better display name in the build summary.
|
||||
/// This can be disabled by setting this to false.
|
||||
rename_step_with_output_arg: bool,
|
||||
rename_step_with_output_arg: bool = true,
|
||||
|
||||
pub const StdIoAction = union(enum) {
|
||||
/// If this is true, a RunStep which is configured to check the output of the
|
||||
/// executed binary will not fail the build if the binary cannot be executed
|
||||
/// due to being for a foreign binary to the host system which is running the
|
||||
/// build graph.
|
||||
/// Command-line arguments such as -fqemu and -fwasmtime may affect whether a
|
||||
/// binary is detected as foreign, as well as system configuration such as
|
||||
/// Rosetta (macOS) and binfmt_misc (Linux).
|
||||
skip_foreign_checks: bool = false,
|
||||
|
||||
/// If stderr or stdout exceeds this amount, the child process is killed and
|
||||
/// the step fails.
|
||||
max_stdio_size: usize = 10 * 1024 * 1024,
|
||||
|
||||
pub const StdIo = union(enum) {
|
||||
/// Whether the RunStep has side-effects will be determined by whether or not one
|
||||
/// of the args is an output file (added with `addOutputFileArg`).
|
||||
/// If the RunStep is determined to have side-effects, this is the same as `inherit`.
|
||||
/// The step will fail if the subprocess crashes or returns a non-zero exit code.
|
||||
infer_from_args,
|
||||
/// Causes the RunStep to be considered to have side-effects, and therefore
|
||||
/// always execute when it appears in the build graph.
|
||||
/// It also means that this step will obtain a global lock to prevent other
|
||||
/// steps from running in the meantime.
|
||||
/// The step will fail if the subprocess crashes or returns a non-zero exit code.
|
||||
inherit,
|
||||
ignore,
|
||||
expect_exact: []const u8,
|
||||
expect_matches: []const []const u8,
|
||||
/// Causes the RunStep to be considered to *not* have side-effects. The
|
||||
/// process will be re-executed if any of the input dependencies are
|
||||
/// modified. The exit code and standard I/O streams will be checked for
|
||||
/// certain conditions, and the step will succeed or fail based on these
|
||||
/// conditions.
|
||||
/// Note that an explicit check for exit code 0 needs to be added to this
|
||||
/// list if such a check is desireable.
|
||||
check: []const Check,
|
||||
|
||||
pub const Check = union(enum) {
|
||||
expect_stderr_exact: []const u8,
|
||||
expect_stderr_match: []const u8,
|
||||
expect_stdout_exact: []const u8,
|
||||
expect_stdout_match: []const u8,
|
||||
expect_term: std.ChildProcess.Term,
|
||||
};
|
||||
};
|
||||
|
||||
pub const Arg = union(enum) {
|
||||
|
|
@ -72,20 +102,20 @@ pub const Arg = union(enum) {
|
|||
};
|
||||
};
|
||||
|
||||
pub fn create(builder: *std.Build, name: []const u8) *RunStep {
|
||||
const self = builder.allocator.create(RunStep) catch @panic("OOM");
|
||||
pub fn create(owner: *std.Build, name: []const u8) *RunStep {
|
||||
const self = owner.allocator.create(RunStep) catch @panic("OOM");
|
||||
self.* = .{
|
||||
.builder = builder,
|
||||
.step = Step.init(builder.allocator, .{
|
||||
.step = Step.init(.{
|
||||
.id = base_id,
|
||||
.name = name,
|
||||
.owner = owner,
|
||||
.makeFn = make,
|
||||
}),
|
||||
.argv = ArrayList(Arg).init(builder.allocator),
|
||||
.argv = ArrayList(Arg).init(owner.allocator),
|
||||
.cwd = null,
|
||||
.env_map = null,
|
||||
.print = builder.verbose,
|
||||
.rename_step_with_output_arg = true,
|
||||
.max_stdio_size = 10 * 1024 * 1024,
|
||||
};
|
||||
return self;
|
||||
}
|
||||
|
|
@ -99,16 +129,17 @@ pub fn addArtifactArg(self: *RunStep, artifact: *CompileStep) void {
|
|||
/// run, and returns a FileSource which can be used as inputs to other APIs
|
||||
/// throughout the build system.
|
||||
pub fn addOutputFileArg(rs: *RunStep, basename: []const u8) std.Build.FileSource {
|
||||
const generated_file = rs.builder.allocator.create(std.Build.GeneratedFile) catch @panic("OOM");
|
||||
const b = rs.step.owner;
|
||||
const generated_file = b.allocator.create(std.Build.GeneratedFile) catch @panic("OOM");
|
||||
generated_file.* = .{ .step = &rs.step };
|
||||
rs.argv.append(.{ .output = .{
|
||||
.generated_file = generated_file,
|
||||
.basename = rs.builder.dupe(basename),
|
||||
.basename = b.dupe(basename),
|
||||
} }) catch @panic("OOM");
|
||||
|
||||
if (rs.rename_step_with_output_arg) {
|
||||
rs.rename_step_with_output_arg = false;
|
||||
rs.step.name = rs.builder.fmt("{s} ({s})", .{ rs.step.name, basename });
|
||||
rs.step.name = b.fmt("{s} ({s})", .{ rs.step.name, basename });
|
||||
}
|
||||
|
||||
return .{ .generated = generated_file };
|
||||
|
|
@ -116,13 +147,13 @@ pub fn addOutputFileArg(rs: *RunStep, basename: []const u8) std.Build.FileSource
|
|||
|
||||
pub fn addFileSourceArg(self: *RunStep, file_source: std.Build.FileSource) void {
|
||||
self.argv.append(Arg{
|
||||
.file_source = file_source.dupe(self.builder),
|
||||
.file_source = file_source.dupe(self.step.owner),
|
||||
}) catch @panic("OOM");
|
||||
file_source.addStepDependencies(&self.step);
|
||||
}
|
||||
|
||||
pub fn addArg(self: *RunStep, arg: []const u8) void {
|
||||
self.argv.append(Arg{ .bytes = self.builder.dupe(arg) }) catch @panic("OOM");
|
||||
self.argv.append(Arg{ .bytes = self.step.owner.dupe(arg) }) catch @panic("OOM");
|
||||
}
|
||||
|
||||
pub fn addArgs(self: *RunStep, args: []const []const u8) void {
|
||||
|
|
@ -132,13 +163,14 @@ pub fn addArgs(self: *RunStep, args: []const []const u8) void {
|
|||
}
|
||||
|
||||
pub fn clearEnvironment(self: *RunStep) void {
|
||||
const new_env_map = self.builder.allocator.create(EnvMap) catch @panic("OOM");
|
||||
new_env_map.* = EnvMap.init(self.builder.allocator);
|
||||
const b = self.step.owner;
|
||||
const new_env_map = b.allocator.create(EnvMap) catch @panic("OOM");
|
||||
new_env_map.* = EnvMap.init(b.allocator);
|
||||
self.env_map = new_env_map;
|
||||
}
|
||||
|
||||
pub fn addPathDir(self: *RunStep, search_path: []const u8) void {
|
||||
addPathDirInternal(&self.step, self.builder, search_path);
|
||||
addPathDirInternal(&self.step, self.step.owner, search_path);
|
||||
}
|
||||
|
||||
/// For internal use only, users of `RunStep` should use `addPathDir` directly.
|
||||
|
|
@ -157,13 +189,12 @@ pub fn addPathDirInternal(step: *Step, builder: *std.Build, search_path: []const
|
|||
}
|
||||
|
||||
pub fn getEnvMap(self: *RunStep) *EnvMap {
|
||||
return getEnvMapInternal(&self.step, self.builder.allocator);
|
||||
return getEnvMapInternal(&self.step, self.step.owner.allocator);
|
||||
}
|
||||
|
||||
fn getEnvMapInternal(step: *Step, allocator: Allocator) *EnvMap {
|
||||
const maybe_env_map = switch (step.id) {
|
||||
.run => step.cast(RunStep).?.env_map,
|
||||
.emulatable_run => step.cast(std.Build.EmulatableRunStep).?.env_map,
|
||||
else => unreachable,
|
||||
};
|
||||
return maybe_env_map orelse {
|
||||
|
|
@ -171,7 +202,6 @@ fn getEnvMapInternal(step: *Step, allocator: Allocator) *EnvMap {
|
|||
env_map.* = process.getEnvMap(allocator) catch @panic("unhandled error");
|
||||
switch (step.id) {
|
||||
.run => step.cast(RunStep).?.env_map = env_map,
|
||||
.emulatable_run => step.cast(RunStep).?.env_map = env_map,
|
||||
else => unreachable,
|
||||
}
|
||||
return env_map;
|
||||
|
|
@ -179,41 +209,85 @@ fn getEnvMapInternal(step: *Step, allocator: Allocator) *EnvMap {
|
|||
}
|
||||
|
||||
pub fn setEnvironmentVariable(self: *RunStep, key: []const u8, value: []const u8) void {
|
||||
const b = self.step.owner;
|
||||
const env_map = self.getEnvMap();
|
||||
env_map.put(
|
||||
self.builder.dupe(key),
|
||||
self.builder.dupe(value),
|
||||
) catch @panic("unhandled error");
|
||||
env_map.put(b.dupe(key), b.dupe(value)) catch @panic("unhandled error");
|
||||
}
|
||||
|
||||
pub fn expectStdErrEqual(self: *RunStep, bytes: []const u8) void {
|
||||
self.stderr_action = .{ .expect_exact = self.builder.dupe(bytes) };
|
||||
const new_check: StdIo.Check = .{ .expect_stderr_exact = self.step.owner.dupe(bytes) };
|
||||
self.addCheck(new_check);
|
||||
}
|
||||
|
||||
pub fn expectStdOutEqual(self: *RunStep, bytes: []const u8) void {
|
||||
self.stdout_action = .{ .expect_exact = self.builder.dupe(bytes) };
|
||||
const new_check: StdIo.Check = .{ .expect_stdout_exact = self.step.owner.dupe(bytes) };
|
||||
self.addCheck(new_check);
|
||||
}
|
||||
|
||||
fn stdIoActionToBehavior(action: StdIoAction) std.ChildProcess.StdIo {
|
||||
return switch (action) {
|
||||
.ignore => .Ignore,
|
||||
.inherit => .Inherit,
|
||||
.expect_exact, .expect_matches => .Pipe,
|
||||
pub fn expectExitCode(self: *RunStep, code: u8) void {
|
||||
const new_check: StdIo.Check = .{ .expect_term = .{ .Exited = code } };
|
||||
self.addCheck(new_check);
|
||||
}
|
||||
|
||||
pub fn addCheck(self: *RunStep, new_check: StdIo.Check) void {
|
||||
const arena = self.step.owner.allocator;
|
||||
switch (self.stdio) {
|
||||
.infer_from_args => {
|
||||
const list = arena.create([1]StdIo.Check) catch @panic("OOM");
|
||||
list.* = .{new_check};
|
||||
self.stdio = .{ .check = list };
|
||||
},
|
||||
.check => |checks| {
|
||||
const new_list = arena.alloc(StdIo.Check, checks.len + 1) catch @panic("OOM");
|
||||
std.mem.copy(StdIo.Check, new_list, checks);
|
||||
new_list[checks.len] = new_check;
|
||||
},
|
||||
else => @panic("illegal call to addCheck: conflicting helper method calls. Suggest to directly set stdio field of RunStep instead"),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns whether the RunStep has side effects *other than* updating the output arguments.
|
||||
fn hasSideEffects(self: RunStep) bool {
|
||||
return switch (self.stdio) {
|
||||
.infer_from_args => !self.hasAnyOutputArgs(),
|
||||
.inherit => true,
|
||||
.check => false,
|
||||
};
|
||||
}
|
||||
|
||||
fn needOutputCheck(self: RunStep) bool {
|
||||
switch (self.condition) {
|
||||
.always => return false,
|
||||
.output_outdated => {},
|
||||
}
|
||||
if (self.extra_file_dependencies.len > 0) return true;
|
||||
|
||||
fn hasAnyOutputArgs(self: RunStep) bool {
|
||||
for (self.argv.items) |arg| switch (arg) {
|
||||
.output => return true,
|
||||
else => continue,
|
||||
};
|
||||
return false;
|
||||
}
|
||||
|
||||
fn checksContainStdout(checks: []const StdIo.Check) bool {
|
||||
for (checks) |check| switch (check) {
|
||||
.expect_stderr_exact,
|
||||
.expect_stderr_match,
|
||||
.expect_term,
|
||||
=> continue,
|
||||
|
||||
.expect_stdout_exact,
|
||||
.expect_stdout_match,
|
||||
=> return true,
|
||||
};
|
||||
return false;
|
||||
}
|
||||
|
||||
fn checksContainStderr(checks: []const StdIo.Check) bool {
|
||||
for (checks) |check| switch (check) {
|
||||
.expect_stdout_exact,
|
||||
.expect_stdout_match,
|
||||
.expect_term,
|
||||
=> continue,
|
||||
|
||||
.expect_stderr_exact,
|
||||
.expect_stderr_match,
|
||||
=> return true,
|
||||
};
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
@ -223,16 +297,17 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
|||
// processes could use to supply progress updates.
|
||||
_ = prog_node;
|
||||
|
||||
const b = step.owner;
|
||||
const self = @fieldParentPtr(RunStep, "step", step);
|
||||
const need_output_check = self.needOutputCheck();
|
||||
const has_side_effects = self.hasSideEffects();
|
||||
|
||||
var argv_list = ArrayList([]const u8).init(self.builder.allocator);
|
||||
var argv_list = ArrayList([]const u8).init(b.allocator);
|
||||
var output_placeholders = ArrayList(struct {
|
||||
index: usize,
|
||||
output: Arg.Output,
|
||||
}).init(self.builder.allocator);
|
||||
}).init(b.allocator);
|
||||
|
||||
var man = self.builder.cache.obtain();
|
||||
var man = b.cache.obtain();
|
||||
defer man.deinit();
|
||||
|
||||
for (self.argv.items) |arg| {
|
||||
|
|
@ -242,7 +317,7 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
|||
man.hash.addBytes(bytes);
|
||||
},
|
||||
.file_source => |file| {
|
||||
const file_path = file.getPath(self.builder);
|
||||
const file_path = file.getPath(b);
|
||||
try argv_list.append(file_path);
|
||||
_ = try man.addFile(file_path, null);
|
||||
},
|
||||
|
|
@ -252,7 +327,7 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
|||
self.addPathForDynLibs(artifact);
|
||||
}
|
||||
const file_path = artifact.installed_path orelse
|
||||
artifact.getOutputSource().getPath(self.builder);
|
||||
artifact.getOutputSource().getPath(b);
|
||||
|
||||
try argv_list.append(file_path);
|
||||
|
||||
|
|
@ -272,17 +347,17 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
|||
}
|
||||
}
|
||||
|
||||
if (need_output_check) {
|
||||
if (!has_side_effects) {
|
||||
for (self.extra_file_dependencies) |file_path| {
|
||||
_ = try man.addFile(self.builder.pathFromRoot(file_path), null);
|
||||
_ = try man.addFile(b.pathFromRoot(file_path), null);
|
||||
}
|
||||
|
||||
if (man.hit() catch |err| failWithCacheError(man, err)) {
|
||||
if (try step.cacheHit(&man)) {
|
||||
// cache hit, skip running command
|
||||
const digest = man.final();
|
||||
for (output_placeholders.items) |placeholder| {
|
||||
placeholder.output.generated_file.path = try self.builder.cache_root.join(
|
||||
self.builder.allocator,
|
||||
placeholder.output.generated_file.path = try b.cache_root.join(
|
||||
b.allocator,
|
||||
&.{ "o", &digest, placeholder.output.basename },
|
||||
);
|
||||
}
|
||||
|
|
@ -292,8 +367,8 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
|||
const digest = man.final();
|
||||
|
||||
for (output_placeholders.items) |placeholder| {
|
||||
const output_path = try self.builder.cache_root.join(
|
||||
self.builder.allocator,
|
||||
const output_path = try b.cache_root.join(
|
||||
b.allocator,
|
||||
&.{ "o", &digest, placeholder.output.basename },
|
||||
);
|
||||
const output_dir = fs.path.dirname(output_path).?;
|
||||
|
|
@ -308,18 +383,16 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
|||
}
|
||||
|
||||
try runCommand(
|
||||
argv_list.items,
|
||||
self.builder,
|
||||
self.expected_term,
|
||||
self.stdout_action,
|
||||
self.stderr_action,
|
||||
self.stdin_behavior,
|
||||
self.env_map,
|
||||
step,
|
||||
self.cwd,
|
||||
self.print,
|
||||
argv_list.items,
|
||||
self.env_map,
|
||||
self.stdio,
|
||||
has_side_effects,
|
||||
self.max_stdio_size,
|
||||
);
|
||||
|
||||
if (need_output_check) {
|
||||
if (!has_side_effects) {
|
||||
try man.writeManifest();
|
||||
}
|
||||
}
|
||||
|
|
@ -369,165 +442,171 @@ fn termMatches(expected: ?std.ChildProcess.Term, actual: std.ChildProcess.Term)
|
|||
};
|
||||
}
|
||||
|
||||
pub fn runCommand(
|
||||
fn runCommand(
|
||||
step: *Step,
|
||||
opt_cwd: ?[]const u8,
|
||||
argv: []const []const u8,
|
||||
builder: *std.Build,
|
||||
expected_term: ?std.ChildProcess.Term,
|
||||
stdout_action: StdIoAction,
|
||||
stderr_action: StdIoAction,
|
||||
stdin_behavior: std.ChildProcess.StdIo,
|
||||
env_map: ?*EnvMap,
|
||||
maybe_cwd: ?[]const u8,
|
||||
print: bool,
|
||||
stdio: StdIo,
|
||||
has_side_effects: bool,
|
||||
max_stdio_size: usize,
|
||||
) !void {
|
||||
const cwd = if (maybe_cwd) |cwd| builder.pathFromRoot(cwd) else builder.build_root.path;
|
||||
const b = step.owner;
|
||||
const arena = b.allocator;
|
||||
const cwd = if (opt_cwd) |cwd| b.pathFromRoot(cwd) else b.build_root.path;
|
||||
|
||||
if (!std.process.can_spawn) {
|
||||
const cmd = try std.mem.join(builder.allocator, " ", argv);
|
||||
std.debug.print("the following command cannot be executed ({s} does not support spawning a child process):\n{s}", .{
|
||||
@tagName(builtin.os.tag), cmd,
|
||||
});
|
||||
builder.allocator.free(cmd);
|
||||
return ExecError.ExecNotSupported;
|
||||
}
|
||||
try step.handleChildProcUnsupported(opt_cwd, argv);
|
||||
try Step.handleVerbose(step.owner, opt_cwd, argv);
|
||||
|
||||
var child = std.ChildProcess.init(argv, builder.allocator);
|
||||
var child = std.ChildProcess.init(argv, arena);
|
||||
child.cwd = cwd;
|
||||
child.env_map = env_map orelse builder.env_map;
|
||||
child.env_map = env_map orelse b.env_map;
|
||||
|
||||
child.stdin_behavior = stdin_behavior;
|
||||
child.stdout_behavior = stdIoActionToBehavior(stdout_action);
|
||||
child.stderr_behavior = stdIoActionToBehavior(stderr_action);
|
||||
|
||||
if (print)
|
||||
printCmd(cwd, argv);
|
||||
|
||||
child.spawn() catch |err| {
|
||||
std.debug.print("Unable to spawn {s}: {s}\n", .{ argv[0], @errorName(err) });
|
||||
return err;
|
||||
child.stdin_behavior = switch (stdio) {
|
||||
.infer_from_args => if (has_side_effects) .Inherit else .Ignore,
|
||||
.inherit => .Inherit,
|
||||
.check => .Close,
|
||||
};
|
||||
child.stdout_behavior = switch (stdio) {
|
||||
.infer_from_args => if (has_side_effects) .Inherit else .Ignore,
|
||||
.inherit => .Inherit,
|
||||
.check => |checks| if (checksContainStdout(checks)) .Pipe else .Ignore,
|
||||
};
|
||||
child.stderr_behavior = switch (stdio) {
|
||||
.infer_from_args => if (has_side_effects) .Inherit else .Pipe,
|
||||
.inherit => .Inherit,
|
||||
.check => .Pipe,
|
||||
};
|
||||
|
||||
// TODO need to poll to read these streams to prevent a deadlock (or rely on evented I/O).
|
||||
child.spawn() catch |err| return step.fail("unable to spawn {s}: {s}", .{
|
||||
argv[0], @errorName(err),
|
||||
});
|
||||
|
||||
var stdout: ?[]const u8 = null;
|
||||
defer if (stdout) |s| builder.allocator.free(s);
|
||||
var stdout_bytes: ?[]const u8 = null;
|
||||
var stderr_bytes: ?[]const u8 = null;
|
||||
|
||||
switch (stdout_action) {
|
||||
.expect_exact, .expect_matches => {
|
||||
stdout = try child.stdout.?.reader().readAllAlloc(builder.allocator, max_stdout_size);
|
||||
},
|
||||
.inherit, .ignore => {},
|
||||
if (child.stdout) |stdout| {
|
||||
if (child.stderr) |stderr| {
|
||||
var poller = std.io.poll(arena, enum { stdout, stderr }, .{
|
||||
.stdout = stdout,
|
||||
.stderr = stderr,
|
||||
});
|
||||
defer poller.deinit();
|
||||
|
||||
while (try poller.poll()) {
|
||||
if (poller.fifo(.stdout).count > max_stdio_size)
|
||||
return error.StdoutStreamTooLong;
|
||||
if (poller.fifo(.stderr).count > max_stdio_size)
|
||||
return error.StderrStreamTooLong;
|
||||
}
|
||||
|
||||
stdout_bytes = try poller.fifo(.stdout).toOwnedSlice();
|
||||
stderr_bytes = try poller.fifo(.stderr).toOwnedSlice();
|
||||
} else {
|
||||
stdout_bytes = try stdout.reader().readAllAlloc(arena, max_stdio_size);
|
||||
}
|
||||
} else if (child.stderr) |stderr| {
|
||||
stderr_bytes = try stderr.reader().readAllAlloc(arena, max_stdio_size);
|
||||
}
|
||||
|
||||
var stderr: ?[]const u8 = null;
|
||||
defer if (stderr) |s| builder.allocator.free(s);
|
||||
|
||||
switch (stderr_action) {
|
||||
.expect_exact, .expect_matches => {
|
||||
stderr = try child.stderr.?.reader().readAllAlloc(builder.allocator, max_stdout_size);
|
||||
},
|
||||
.inherit, .ignore => {},
|
||||
}
|
||||
if (stderr_bytes) |stderr| if (stderr.len > 0) {
|
||||
const stderr_is_diagnostic = switch (stdio) {
|
||||
.check => |checks| !checksContainStderr(checks),
|
||||
else => true,
|
||||
};
|
||||
if (stderr_is_diagnostic) {
|
||||
try step.result_error_msgs.append(arena, stderr);
|
||||
}
|
||||
};
|
||||
|
||||
const term = child.wait() catch |err| {
|
||||
std.debug.print("Unable to spawn {s}: {s}\n", .{ argv[0], @errorName(err) });
|
||||
return err;
|
||||
return step.fail("unable to wait for {s}: {s}", .{ argv[0], @errorName(err) });
|
||||
};
|
||||
|
||||
if (!termMatches(expected_term, term)) {
|
||||
std.debug.print("The following command {} (expected {}):\n", .{ fmtTerm(term), fmtTerm(expected_term) });
|
||||
printCmd(cwd, argv);
|
||||
return error.UnexpectedExit;
|
||||
}
|
||||
|
||||
switch (stderr_action) {
|
||||
.inherit, .ignore => {},
|
||||
.expect_exact => |expected_bytes| {
|
||||
if (!mem.eql(u8, expected_bytes, stderr.?)) {
|
||||
std.debug.print(
|
||||
\\
|
||||
\\========= Expected this stderr: =========
|
||||
\\{s}
|
||||
\\========= But found: ====================
|
||||
\\{s}
|
||||
\\
|
||||
, .{ expected_bytes, stderr.? });
|
||||
printCmd(cwd, argv);
|
||||
return error.TestFailed;
|
||||
}
|
||||
switch (stdio) {
|
||||
.check => |checks| for (checks) |check| switch (check) {
|
||||
.expect_stderr_exact => |expected_bytes| {
|
||||
if (!mem.eql(u8, expected_bytes, stderr_bytes.?)) {
|
||||
return step.fail(
|
||||
\\========= expected this stderr: =========
|
||||
\\{s}
|
||||
\\========= but found: ====================
|
||||
\\{s}
|
||||
\\========= from the following command: ===
|
||||
\\{s}
|
||||
, .{
|
||||
expected_bytes,
|
||||
stderr_bytes.?,
|
||||
try Step.allocPrintCmd(arena, opt_cwd, argv),
|
||||
});
|
||||
}
|
||||
},
|
||||
.expect_stderr_match => |match| {
|
||||
if (mem.indexOf(u8, stderr_bytes.?, match) == null) {
|
||||
return step.fail(
|
||||
\\========= expected to find in stderr: =========
|
||||
\\{s}
|
||||
\\========= but stderr does not contain it: =====
|
||||
\\{s}
|
||||
\\========= from the following command: =========
|
||||
\\{s}
|
||||
, .{
|
||||
match,
|
||||
stderr_bytes.?,
|
||||
try Step.allocPrintCmd(arena, opt_cwd, argv),
|
||||
});
|
||||
}
|
||||
},
|
||||
.expect_stdout_exact => |expected_bytes| {
|
||||
if (!mem.eql(u8, expected_bytes, stdout_bytes.?)) {
|
||||
return step.fail(
|
||||
\\========= expected this stdout: =========
|
||||
\\{s}
|
||||
\\========= but found: ====================
|
||||
\\{s}
|
||||
\\========= from the following command: ===
|
||||
\\{s}
|
||||
, .{
|
||||
expected_bytes,
|
||||
stdout_bytes.?,
|
||||
try Step.allocPrintCmd(arena, opt_cwd, argv),
|
||||
});
|
||||
}
|
||||
},
|
||||
.expect_stdout_match => |match| {
|
||||
if (mem.indexOf(u8, stdout_bytes.?, match) == null) {
|
||||
return step.fail(
|
||||
\\========= expected to find in stdout: =========
|
||||
\\{s}
|
||||
\\========= but stdout does not contain it: =====
|
||||
\\{s}
|
||||
\\========= from the following command: =========
|
||||
\\{s}
|
||||
, .{
|
||||
match,
|
||||
stdout_bytes.?,
|
||||
try Step.allocPrintCmd(arena, opt_cwd, argv),
|
||||
});
|
||||
}
|
||||
},
|
||||
.expect_term => |expected_term| {
|
||||
if (!termMatches(expected_term, term)) {
|
||||
return step.fail("the following command {} (expected {}):\n{s}", .{
|
||||
fmtTerm(term),
|
||||
fmtTerm(expected_term),
|
||||
try Step.allocPrintCmd(arena, opt_cwd, argv),
|
||||
});
|
||||
}
|
||||
},
|
||||
},
|
||||
.expect_matches => |matches| for (matches) |match| {
|
||||
if (mem.indexOf(u8, stderr.?, match) == null) {
|
||||
std.debug.print(
|
||||
\\
|
||||
\\========= Expected to find in stderr: =========
|
||||
\\{s}
|
||||
\\========= But stderr does not contain it: =====
|
||||
\\{s}
|
||||
\\
|
||||
, .{ match, stderr.? });
|
||||
printCmd(cwd, argv);
|
||||
return error.TestFailed;
|
||||
}
|
||||
else => {
|
||||
try step.handleChildProcessTerm(term, opt_cwd, argv);
|
||||
},
|
||||
}
|
||||
|
||||
switch (stdout_action) {
|
||||
.inherit, .ignore => {},
|
||||
.expect_exact => |expected_bytes| {
|
||||
if (!mem.eql(u8, expected_bytes, stdout.?)) {
|
||||
std.debug.print(
|
||||
\\
|
||||
\\========= Expected this stdout: =========
|
||||
\\{s}
|
||||
\\========= But found: ====================
|
||||
\\{s}
|
||||
\\
|
||||
, .{ expected_bytes, stdout.? });
|
||||
printCmd(cwd, argv);
|
||||
return error.TestFailed;
|
||||
}
|
||||
},
|
||||
.expect_matches => |matches| for (matches) |match| {
|
||||
if (mem.indexOf(u8, stdout.?, match) == null) {
|
||||
std.debug.print(
|
||||
\\
|
||||
\\========= Expected to find in stdout: =========
|
||||
\\{s}
|
||||
\\========= But stdout does not contain it: =====
|
||||
\\{s}
|
||||
\\
|
||||
, .{ match, stdout.? });
|
||||
printCmd(cwd, argv);
|
||||
return error.TestFailed;
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn failWithCacheError(man: std.Build.Cache.Manifest, err: anyerror) noreturn {
|
||||
const i = man.failed_file_index orelse failWithSimpleError(err);
|
||||
const pp = man.files.items[i].prefixed_path orelse failWithSimpleError(err);
|
||||
const prefix = man.cache.prefixes()[pp.prefix].path orelse "";
|
||||
std.debug.print("{s}: {s}/{s}\n", .{ @errorName(err), prefix, pp.sub_path });
|
||||
std.process.exit(1);
|
||||
}
|
||||
|
||||
fn failWithSimpleError(err: anyerror) noreturn {
|
||||
std.debug.print("{s}\n", .{@errorName(err)});
|
||||
std.process.exit(1);
|
||||
}
|
||||
|
||||
fn printCmd(cwd: ?[]const u8, argv: []const []const u8) void {
|
||||
if (cwd) |yes_cwd| std.debug.print("cd {s} && ", .{yes_cwd});
|
||||
for (argv) |arg| {
|
||||
std.debug.print("{s} ", .{arg});
|
||||
}
|
||||
std.debug.print("\n", .{});
|
||||
}
|
||||
|
||||
fn addPathForDynLibs(self: *RunStep, artifact: *CompileStep) void {
|
||||
addPathForDynLibsInternal(&self.step, self.builder, artifact);
|
||||
addPathForDynLibsInternal(&self.step, self.step.owner, artifact);
|
||||
}
|
||||
|
||||
/// This should only be used for internal usage, this is called automatically
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
id: Id,
|
||||
name: []const u8,
|
||||
owner: *Build,
|
||||
makeFn: MakeFn,
|
||||
dependencies: std.ArrayList(*Step),
|
||||
/// This field is empty during execution of the user's build script, and
|
||||
|
|
@ -39,7 +40,6 @@ pub const Id = enum {
|
|||
translate_c,
|
||||
write_file,
|
||||
run,
|
||||
emulatable_run,
|
||||
check_file,
|
||||
check_object,
|
||||
config_header,
|
||||
|
|
@ -60,7 +60,6 @@ pub const Id = enum {
|
|||
.translate_c => Build.TranslateCStep,
|
||||
.write_file => Build.WriteFileStep,
|
||||
.run => Build.RunStep,
|
||||
.emulatable_run => Build.EmulatableRunStep,
|
||||
.check_file => Build.CheckFileStep,
|
||||
.check_object => Build.CheckObjectStep,
|
||||
.config_header => Build.ConfigHeaderStep,
|
||||
|
|
@ -74,11 +73,14 @@ pub const Id = enum {
|
|||
pub const Options = struct {
|
||||
id: Id,
|
||||
name: []const u8,
|
||||
owner: *Build,
|
||||
makeFn: MakeFn = makeNoOp,
|
||||
first_ret_addr: ?usize = null,
|
||||
};
|
||||
|
||||
pub fn init(allocator: Allocator, options: Options) Step {
|
||||
pub fn init(options: Options) Step {
|
||||
const arena = options.owner.allocator;
|
||||
|
||||
var addresses = [1]usize{0} ** n_debug_stack_frames;
|
||||
const first_ret_addr = options.first_ret_addr orelse @returnAddress();
|
||||
var stack_trace = std.builtin.StackTrace{
|
||||
|
|
@ -89,9 +91,10 @@ pub fn init(allocator: Allocator, options: Options) Step {
|
|||
|
||||
return .{
|
||||
.id = options.id,
|
||||
.name = allocator.dupe(u8, options.name) catch @panic("OOM"),
|
||||
.name = arena.dupe(u8, options.name) catch @panic("OOM"),
|
||||
.owner = options.owner,
|
||||
.makeFn = options.makeFn,
|
||||
.dependencies = std.ArrayList(*Step).init(allocator),
|
||||
.dependencies = std.ArrayList(*Step).init(arena),
|
||||
.dependants = .{},
|
||||
.state = .precheck_unstarted,
|
||||
.debug_stack_trace = addresses,
|
||||
|
|
@ -168,3 +171,231 @@ const std = @import("../std.zig");
|
|||
const Build = std.Build;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const assert = std.debug.assert;
|
||||
const builtin = @import("builtin");
|
||||
|
||||
pub fn evalChildProcess(s: *Step, argv: []const []const u8) !void {
|
||||
const arena = s.owner.allocator;
|
||||
|
||||
try handleChildProcUnsupported(s, null, argv);
|
||||
try handleVerbose(s.owner, null, argv);
|
||||
|
||||
const result = std.ChildProcess.exec(.{
|
||||
.allocator = arena,
|
||||
.argv = argv,
|
||||
}) catch |err| return s.fail("unable to spawn {s}: {s}", .{ argv[0], @errorName(err) });
|
||||
|
||||
if (result.stderr.len > 0) {
|
||||
try s.result_error_msgs.append(arena, result.stderr);
|
||||
}
|
||||
|
||||
try handleChildProcessTerm(s, result.term, null, argv);
|
||||
}
|
||||
|
||||
pub fn fail(step: *Step, comptime fmt: []const u8, args: anytype) error{ OutOfMemory, MakeFailed } {
|
||||
const arena = step.owner.allocator;
|
||||
const msg = try std.fmt.allocPrint(arena, fmt, args);
|
||||
try step.result_error_msgs.append(arena, msg);
|
||||
return error.MakeFailed;
|
||||
}
|
||||
|
||||
/// Assumes that argv contains `--listen=-` and that the process being spawned
|
||||
/// is the zig compiler - the same version that compiled the build runner.
|
||||
pub fn evalZigProcess(
|
||||
s: *Step,
|
||||
argv: []const []const u8,
|
||||
prog_node: *std.Progress.Node,
|
||||
) ![]const u8 {
|
||||
assert(argv.len != 0);
|
||||
const b = s.owner;
|
||||
const arena = b.allocator;
|
||||
const gpa = arena;
|
||||
|
||||
try handleChildProcUnsupported(s, null, argv);
|
||||
try handleVerbose(s.owner, null, argv);
|
||||
|
||||
var child = std.ChildProcess.init(argv, arena);
|
||||
child.env_map = b.env_map;
|
||||
child.stdin_behavior = .Pipe;
|
||||
child.stdout_behavior = .Pipe;
|
||||
child.stderr_behavior = .Pipe;
|
||||
|
||||
child.spawn() catch |err| return s.fail("unable to spawn {s}: {s}", .{
|
||||
argv[0], @errorName(err),
|
||||
});
|
||||
|
||||
var poller = std.io.poll(gpa, enum { stdout, stderr }, .{
|
||||
.stdout = child.stdout.?,
|
||||
.stderr = child.stderr.?,
|
||||
});
|
||||
defer poller.deinit();
|
||||
|
||||
try sendMessage(child.stdin.?, .update);
|
||||
try sendMessage(child.stdin.?, .exit);
|
||||
|
||||
const Header = std.zig.Server.Message.Header;
|
||||
var result: ?[]const u8 = null;
|
||||
|
||||
var node_name: std.ArrayListUnmanaged(u8) = .{};
|
||||
defer node_name.deinit(gpa);
|
||||
var sub_prog_node: ?std.Progress.Node = null;
|
||||
defer if (sub_prog_node) |*n| n.end();
|
||||
|
||||
while (try poller.poll()) {
|
||||
const stdout = poller.fifo(.stdout);
|
||||
const buf = stdout.readableSlice(0);
|
||||
assert(stdout.readableLength() == buf.len);
|
||||
if (buf.len >= @sizeOf(Header)) {
|
||||
const header = @ptrCast(*align(1) const Header, buf[0..@sizeOf(Header)]);
|
||||
const header_and_msg_len = header.bytes_len + @sizeOf(Header);
|
||||
if (buf.len >= header_and_msg_len) {
|
||||
const body = buf[@sizeOf(Header)..][0..header.bytes_len];
|
||||
switch (header.tag) {
|
||||
.zig_version => {
|
||||
if (!std.mem.eql(u8, builtin.zig_version_string, body)) {
|
||||
return s.fail(
|
||||
"zig version mismatch build runner vs compiler: '{s}' vs '{s}'",
|
||||
.{ builtin.zig_version_string, body },
|
||||
);
|
||||
}
|
||||
},
|
||||
.error_bundle => {
|
||||
const EbHdr = std.zig.Server.Message.ErrorBundle;
|
||||
const eb_hdr = @ptrCast(*align(1) const EbHdr, body);
|
||||
const extra_bytes =
|
||||
body[@sizeOf(EbHdr)..][0 .. @sizeOf(u32) * eb_hdr.extra_len];
|
||||
const string_bytes =
|
||||
body[@sizeOf(EbHdr) + extra_bytes.len ..][0..eb_hdr.string_bytes_len];
|
||||
// TODO: use @ptrCast when the compiler supports it
|
||||
const unaligned_extra = std.mem.bytesAsSlice(u32, extra_bytes);
|
||||
const extra_array = try arena.alloc(u32, unaligned_extra.len);
|
||||
// TODO: use @memcpy when it supports slices
|
||||
for (extra_array, unaligned_extra) |*dst, src| dst.* = src;
|
||||
s.result_error_bundle = .{
|
||||
.string_bytes = try arena.dupe(u8, string_bytes),
|
||||
.extra = extra_array,
|
||||
};
|
||||
},
|
||||
.progress => {
|
||||
if (sub_prog_node) |*n| n.end();
|
||||
node_name.clearRetainingCapacity();
|
||||
try node_name.appendSlice(gpa, body);
|
||||
sub_prog_node = prog_node.start(node_name.items, 0);
|
||||
sub_prog_node.?.activate();
|
||||
},
|
||||
.emit_bin_path => {
|
||||
result = try arena.dupe(u8, body);
|
||||
},
|
||||
_ => {
|
||||
// Unrecognized message.
|
||||
},
|
||||
}
|
||||
stdout.discard(header_and_msg_len);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const stderr = poller.fifo(.stderr);
|
||||
if (stderr.readableLength() > 0) {
|
||||
try s.result_error_msgs.append(arena, try stderr.toOwnedSlice());
|
||||
}
|
||||
|
||||
// Send EOF to stdin.
|
||||
child.stdin.?.close();
|
||||
child.stdin = null;
|
||||
|
||||
const term = child.wait() catch |err| {
|
||||
return s.fail("unable to wait for {s}: {s}", .{ argv[0], @errorName(err) });
|
||||
};
|
||||
try handleChildProcessTerm(s, term, null, argv);
|
||||
|
||||
if (s.result_error_bundle.errorMessageCount() > 0) {
|
||||
return s.fail("the following command failed with {d} compilation errors:\n{s}", .{
|
||||
s.result_error_bundle.errorMessageCount(),
|
||||
try allocPrintCmd(arena, null, argv),
|
||||
});
|
||||
}
|
||||
|
||||
return result orelse return s.fail(
|
||||
"the following command failed to communicate the compilation result:\n{s}",
|
||||
.{try allocPrintCmd(arena, null, argv)},
|
||||
);
|
||||
}
|
||||
|
||||
fn sendMessage(file: std.fs.File, tag: std.zig.Client.Message.Tag) !void {
|
||||
const header: std.zig.Client.Message.Header = .{
|
||||
.tag = tag,
|
||||
.bytes_len = 0,
|
||||
};
|
||||
try file.writeAll(std.mem.asBytes(&header));
|
||||
}
|
||||
|
||||
pub fn handleVerbose(
|
||||
b: *Build,
|
||||
opt_cwd: ?[]const u8,
|
||||
argv: []const []const u8,
|
||||
) error{OutOfMemory}!void {
|
||||
if (b.verbose) {
|
||||
// Intention of verbose is to print all sub-process command lines to
|
||||
// stderr before spawning them.
|
||||
const text = try allocPrintCmd(b.allocator, opt_cwd, argv);
|
||||
std.debug.print("{s}\n", .{text});
|
||||
}
|
||||
}
|
||||
|
||||
pub inline fn handleChildProcUnsupported(
|
||||
s: *Step,
|
||||
opt_cwd: ?[]const u8,
|
||||
argv: []const []const u8,
|
||||
) error{ OutOfMemory, MakeFailed }!void {
|
||||
if (!std.process.can_spawn) {
|
||||
return s.fail(
|
||||
"unable to execute the following command: host cannot spawn child processes\n{s}",
|
||||
.{try allocPrintCmd(s.owner.allocator, opt_cwd, argv)},
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn handleChildProcessTerm(
|
||||
s: *Step,
|
||||
term: std.ChildProcess.Term,
|
||||
opt_cwd: ?[]const u8,
|
||||
argv: []const []const u8,
|
||||
) error{ MakeFailed, OutOfMemory }!void {
|
||||
const arena = s.owner.allocator;
|
||||
switch (term) {
|
||||
.Exited => |code| {
|
||||
if (code != 0) {
|
||||
return s.fail(
|
||||
"the following command exited with error code {d}:\n{s}",
|
||||
.{ code, try allocPrintCmd(arena, opt_cwd, argv) },
|
||||
);
|
||||
}
|
||||
},
|
||||
.Signal, .Stopped, .Unknown => {
|
||||
return s.fail(
|
||||
"the following command terminated unexpectedly:\n{s}",
|
||||
.{try allocPrintCmd(arena, opt_cwd, argv)},
|
||||
);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn allocPrintCmd(arena: Allocator, opt_cwd: ?[]const u8, argv: []const []const u8) ![]u8 {
|
||||
var buf: std.ArrayListUnmanaged(u8) = .{};
|
||||
if (opt_cwd) |cwd| try buf.writer(arena).print("cd {s} && ", .{cwd});
|
||||
for (argv) |arg| {
|
||||
try buf.writer(arena).print("{s} ", .{arg});
|
||||
}
|
||||
return buf.toOwnedSlice(arena);
|
||||
}
|
||||
|
||||
pub fn cacheHit(s: *Step, man: *std.Build.Cache.Manifest) !bool {
|
||||
return man.hit() catch |err| return failWithCacheError(s, man, err);
|
||||
}
|
||||
|
||||
fn failWithCacheError(s: *Step, man: *const std.Build.Cache.Manifest, err: anyerror) anyerror {
|
||||
const i = man.failed_file_index orelse return err;
|
||||
const pp = man.files.items[i].prefixed_path orelse return err;
|
||||
const prefix = man.cache.prefixes()[pp.prefix].path orelse "";
|
||||
return s.fail("{s}: {s}/{s}\n", .{ @errorName(err), prefix, pp.sub_path });
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,7 +11,6 @@ const TranslateCStep = @This();
|
|||
pub const base_id = .translate_c;
|
||||
|
||||
step: Step,
|
||||
builder: *std.Build,
|
||||
source: std.Build.FileSource,
|
||||
include_dirs: std.ArrayList([]const u8),
|
||||
c_macros: std.ArrayList([]const u8),
|
||||
|
|
@ -26,19 +25,19 @@ pub const Options = struct {
|
|||
optimize: std.builtin.OptimizeMode,
|
||||
};
|
||||
|
||||
pub fn create(builder: *std.Build, options: Options) *TranslateCStep {
|
||||
const self = builder.allocator.create(TranslateCStep) catch @panic("OOM");
|
||||
const source = options.source_file.dupe(builder);
|
||||
pub fn create(owner: *std.Build, options: Options) *TranslateCStep {
|
||||
const self = owner.allocator.create(TranslateCStep) catch @panic("OOM");
|
||||
const source = options.source_file.dupe(owner);
|
||||
self.* = TranslateCStep{
|
||||
.step = Step.init(builder.allocator, .{
|
||||
.step = Step.init(.{
|
||||
.id = .translate_c,
|
||||
.name = "translate-c",
|
||||
.owner = owner,
|
||||
.makeFn = make,
|
||||
}),
|
||||
.builder = builder,
|
||||
.source = source,
|
||||
.include_dirs = std.ArrayList([]const u8).init(builder.allocator),
|
||||
.c_macros = std.ArrayList([]const u8).init(builder.allocator),
|
||||
.include_dirs = std.ArrayList([]const u8).init(owner.allocator),
|
||||
.c_macros = std.ArrayList([]const u8).init(owner.allocator),
|
||||
.out_basename = undefined,
|
||||
.target = options.target,
|
||||
.optimize = options.optimize,
|
||||
|
|
@ -58,7 +57,7 @@ pub const AddExecutableOptions = struct {
|
|||
|
||||
/// Creates a step to build an executable from the translated source.
|
||||
pub fn addExecutable(self: *TranslateCStep, options: AddExecutableOptions) *CompileStep {
|
||||
return self.builder.addExecutable(.{
|
||||
return self.step.owner.addExecutable(.{
|
||||
.root_source_file = .{ .generated = &self.output_file },
|
||||
.name = options.name orelse "translated_c",
|
||||
.version = options.version,
|
||||
|
|
@ -69,30 +68,31 @@ pub fn addExecutable(self: *TranslateCStep, options: AddExecutableOptions) *Comp
|
|||
}
|
||||
|
||||
pub fn addIncludeDir(self: *TranslateCStep, include_dir: []const u8) void {
|
||||
self.include_dirs.append(self.builder.dupePath(include_dir)) catch @panic("OOM");
|
||||
self.include_dirs.append(self.step.owner.dupePath(include_dir)) catch @panic("OOM");
|
||||
}
|
||||
|
||||
pub fn addCheckFile(self: *TranslateCStep, expected_matches: []const []const u8) *CheckFileStep {
|
||||
return CheckFileStep.create(self.builder, .{ .generated = &self.output_file }, self.builder.dupeStrings(expected_matches));
|
||||
return CheckFileStep.create(self.step.owner, .{ .generated = &self.output_file }, self.step.owner.dupeStrings(expected_matches));
|
||||
}
|
||||
|
||||
/// If the value is omitted, it is set to 1.
|
||||
/// `name` and `value` need not live longer than the function call.
|
||||
pub fn defineCMacro(self: *TranslateCStep, name: []const u8, value: ?[]const u8) void {
|
||||
const macro = std.Build.constructCMacro(self.builder.allocator, name, value);
|
||||
const macro = std.Build.constructCMacro(self.step.owner.allocator, name, value);
|
||||
self.c_macros.append(macro) catch @panic("OOM");
|
||||
}
|
||||
|
||||
/// name_and_value looks like [name]=[value]. If the value is omitted, it is set to 1.
|
||||
pub fn defineCMacroRaw(self: *TranslateCStep, name_and_value: []const u8) void {
|
||||
self.c_macros.append(self.builder.dupe(name_and_value)) catch @panic("OOM");
|
||||
self.c_macros.append(self.step.owner.dupe(name_and_value)) catch @panic("OOM");
|
||||
}
|
||||
|
||||
fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
||||
const b = step.owner;
|
||||
const self = @fieldParentPtr(TranslateCStep, "step", step);
|
||||
|
||||
var argv_list = std.ArrayList([]const u8).init(self.builder.allocator);
|
||||
try argv_list.append(self.builder.zig_exe);
|
||||
var argv_list = std.ArrayList([]const u8).init(b.allocator);
|
||||
try argv_list.append(b.zig_exe);
|
||||
try argv_list.append("translate-c");
|
||||
try argv_list.append("-lc");
|
||||
|
||||
|
|
@ -101,12 +101,12 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
|||
|
||||
if (!self.target.isNative()) {
|
||||
try argv_list.append("-target");
|
||||
try argv_list.append(try self.target.zigTriple(self.builder.allocator));
|
||||
try argv_list.append(try self.target.zigTriple(b.allocator));
|
||||
}
|
||||
|
||||
switch (self.optimize) {
|
||||
.Debug => {}, // Skip since it's the default.
|
||||
else => try argv_list.append(self.builder.fmt("-O{s}", .{@tagName(self.optimize)})),
|
||||
else => try argv_list.append(b.fmt("-O{s}", .{@tagName(self.optimize)})),
|
||||
}
|
||||
|
||||
for (self.include_dirs.items) |include_dir| {
|
||||
|
|
@ -119,15 +119,15 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
|||
try argv_list.append(c_macro);
|
||||
}
|
||||
|
||||
try argv_list.append(self.source.getPath(self.builder));
|
||||
try argv_list.append(self.source.getPath(b));
|
||||
|
||||
const output_path = try self.builder.execFromStep(argv_list.items, &self.step, prog_node);
|
||||
const output_path = try step.evalZigProcess(argv_list.items, prog_node);
|
||||
|
||||
self.out_basename = fs.path.basename(output_path);
|
||||
const output_dir = fs.path.dirname(output_path).?;
|
||||
|
||||
self.output_file.path = try fs.path.join(
|
||||
self.builder.allocator,
|
||||
b.allocator,
|
||||
&[_][]const u8{ output_dir, self.out_basename },
|
||||
);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,7 +10,6 @@
|
|||
//! control.
|
||||
|
||||
step: Step,
|
||||
builder: *std.Build,
|
||||
/// The elements here are pointers because we need stable pointers for the
|
||||
/// GeneratedFile field.
|
||||
files: std.ArrayListUnmanaged(*File),
|
||||
|
|
@ -34,12 +33,12 @@ pub const Contents = union(enum) {
|
|||
copy: std.Build.FileSource,
|
||||
};
|
||||
|
||||
pub fn init(builder: *std.Build) WriteFileStep {
|
||||
pub fn init(owner: *std.Build) WriteFileStep {
|
||||
return .{
|
||||
.builder = builder,
|
||||
.step = Step.init(builder.allocator, .{
|
||||
.step = Step.init(.{
|
||||
.id = .write_file,
|
||||
.name = "writefile",
|
||||
.owner = owner,
|
||||
.makeFn = make,
|
||||
}),
|
||||
.files = .{},
|
||||
|
|
@ -48,12 +47,13 @@ pub fn init(builder: *std.Build) WriteFileStep {
|
|||
}
|
||||
|
||||
pub fn add(wf: *WriteFileStep, sub_path: []const u8, bytes: []const u8) void {
|
||||
const gpa = wf.builder.allocator;
|
||||
const b = wf.step.owner;
|
||||
const gpa = b.allocator;
|
||||
const file = gpa.create(File) catch @panic("OOM");
|
||||
file.* = .{
|
||||
.generated_file = .{ .step = &wf.step },
|
||||
.sub_path = wf.builder.dupePath(sub_path),
|
||||
.contents = .{ .bytes = wf.builder.dupe(bytes) },
|
||||
.sub_path = b.dupePath(sub_path),
|
||||
.contents = .{ .bytes = b.dupe(bytes) },
|
||||
};
|
||||
wf.files.append(gpa, file) catch @panic("OOM");
|
||||
}
|
||||
|
|
@ -66,11 +66,12 @@ pub fn add(wf: *WriteFileStep, sub_path: []const u8, bytes: []const u8) void {
|
|||
/// required sub-path exists.
|
||||
/// This is the option expected to be used most commonly with `addCopyFile`.
|
||||
pub fn addCopyFile(wf: *WriteFileStep, source: std.Build.FileSource, sub_path: []const u8) void {
|
||||
const gpa = wf.builder.allocator;
|
||||
const b = wf.step.owner;
|
||||
const gpa = b.allocator;
|
||||
const file = gpa.create(File) catch @panic("OOM");
|
||||
file.* = .{
|
||||
.generated_file = .{ .step = &wf.step },
|
||||
.sub_path = wf.builder.dupePath(sub_path),
|
||||
.sub_path = b.dupePath(sub_path),
|
||||
.contents = .{ .copy = source },
|
||||
};
|
||||
wf.files.append(gpa, file) catch @panic("OOM");
|
||||
|
|
@ -83,7 +84,8 @@ pub fn addCopyFile(wf: *WriteFileStep, source: std.Build.FileSource, sub_path: [
|
|||
/// those changes to version control.
|
||||
/// A file added this way is not available with `getFileSource`.
|
||||
pub fn addCopyFileToSource(wf: *WriteFileStep, source: std.Build.FileSource, sub_path: []const u8) void {
|
||||
wf.output_source_files.append(wf.builder.allocator, .{
|
||||
const b = wf.step.owner;
|
||||
wf.output_source_files.append(b.allocator, .{
|
||||
.contents = .{ .copy = source },
|
||||
.sub_path = sub_path,
|
||||
}) catch @panic("OOM");
|
||||
|
|
@ -101,6 +103,7 @@ pub fn getFileSource(wf: *WriteFileStep, sub_path: []const u8) ?std.Build.FileSo
|
|||
|
||||
fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
||||
_ = prog_node;
|
||||
const b = step.owner;
|
||||
const wf = @fieldParentPtr(WriteFileStep, "step", step);
|
||||
|
||||
// Writing to source files is kind of an extra capability of this
|
||||
|
|
@ -110,11 +113,11 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
|||
for (wf.output_source_files.items) |output_source_file| {
|
||||
const basename = fs.path.basename(output_source_file.sub_path);
|
||||
if (fs.path.dirname(output_source_file.sub_path)) |dirname| {
|
||||
var dir = try wf.builder.build_root.handle.makeOpenPath(dirname, .{});
|
||||
var dir = try b.build_root.handle.makeOpenPath(dirname, .{});
|
||||
defer dir.close();
|
||||
try writeFile(wf, dir, output_source_file.contents, basename);
|
||||
} else {
|
||||
try writeFile(wf, wf.builder.build_root.handle, output_source_file.contents, basename);
|
||||
try writeFile(wf, b.build_root.handle, output_source_file.contents, basename);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -125,7 +128,7 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
|||
// If, for example, a hard-coded path was used as the location to put WriteFileStep
|
||||
// files, then two WriteFileSteps executing in parallel might clobber each other.
|
||||
|
||||
var man = wf.builder.cache.obtain();
|
||||
var man = b.cache.obtain();
|
||||
defer man.deinit();
|
||||
|
||||
// Random bytes to make WriteFileStep unique. Refresh this with
|
||||
|
|
@ -140,17 +143,17 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
|||
man.hash.addBytes(bytes);
|
||||
},
|
||||
.copy => |file_source| {
|
||||
_ = try man.addFile(file_source.getPath(wf.builder), null);
|
||||
_ = try man.addFile(file_source.getPath(b), null);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if (man.hit() catch |err| failWithCacheError(man, err)) {
|
||||
if (try step.cacheHit(&man)) {
|
||||
// Cache hit, skip writing file data.
|
||||
const digest = man.final();
|
||||
for (wf.files.items) |file| {
|
||||
file.generated_file.path = try wf.builder.cache_root.join(
|
||||
wf.builder.allocator,
|
||||
file.generated_file.path = try b.cache_root.join(
|
||||
b.allocator,
|
||||
&.{ "o", &digest, file.sub_path },
|
||||
);
|
||||
}
|
||||
|
|
@ -160,7 +163,7 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
|||
const digest = man.final();
|
||||
const cache_path = "o" ++ fs.path.sep_str ++ digest;
|
||||
|
||||
var cache_dir = wf.builder.cache_root.handle.makeOpenPath(cache_path, .{}) catch |err| {
|
||||
var cache_dir = b.cache_root.handle.makeOpenPath(cache_path, .{}) catch |err| {
|
||||
std.debug.print("unable to make path {s}: {s}\n", .{ cache_path, @errorName(err) });
|
||||
return err;
|
||||
};
|
||||
|
|
@ -169,15 +172,15 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
|||
for (wf.files.items) |file| {
|
||||
const basename = fs.path.basename(file.sub_path);
|
||||
if (fs.path.dirname(file.sub_path)) |dirname| {
|
||||
var dir = try wf.builder.cache_root.handle.makeOpenPath(dirname, .{});
|
||||
var dir = try b.cache_root.handle.makeOpenPath(dirname, .{});
|
||||
defer dir.close();
|
||||
try writeFile(wf, dir, file.contents, basename);
|
||||
} else {
|
||||
try writeFile(wf, cache_dir, file.contents, basename);
|
||||
}
|
||||
|
||||
file.generated_file.path = try wf.builder.cache_root.join(
|
||||
wf.builder.allocator,
|
||||
file.generated_file.path = try b.cache_root.join(
|
||||
b.allocator,
|
||||
&.{ cache_path, file.sub_path },
|
||||
);
|
||||
}
|
||||
|
|
@ -186,32 +189,18 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
|||
}
|
||||
|
||||
fn writeFile(wf: *WriteFileStep, dir: fs.Dir, contents: Contents, basename: []const u8) !void {
|
||||
const b = wf.step.owner;
|
||||
// TODO after landing concurrency PR, improve error reporting here
|
||||
switch (contents) {
|
||||
.bytes => |bytes| return dir.writeFile(basename, bytes),
|
||||
.copy => |file_source| {
|
||||
const source_path = file_source.getPath(wf.builder);
|
||||
const source_path = file_source.getPath(b);
|
||||
const prev_status = try fs.Dir.updateFile(fs.cwd(), source_path, dir, basename, .{});
|
||||
_ = prev_status; // TODO logging (affected by open PR regarding concurrency)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// TODO consolidate this with the same function in RunStep?
|
||||
/// Also properly deal with concurrency (see open PR)
|
||||
fn failWithCacheError(man: std.Build.Cache.Manifest, err: anyerror) noreturn {
|
||||
const i = man.failed_file_index orelse failWithSimpleError(err);
|
||||
const pp = man.files.items[i].prefixed_path orelse failWithSimpleError(err);
|
||||
const prefix = man.cache.prefixes()[pp.prefix].path orelse "";
|
||||
std.debug.print("{s}: {s}/{s}\n", .{ @errorName(err), prefix, pp.sub_path });
|
||||
std.process.exit(1);
|
||||
}
|
||||
|
||||
fn failWithSimpleError(err: anyerror) noreturn {
|
||||
std.debug.print("{s}\n", .{@errorName(err)});
|
||||
std.process.exit(1);
|
||||
}
|
||||
|
||||
const std = @import("../std.zig");
|
||||
const Step = std.Build.Step;
|
||||
const fs = std.fs;
|
||||
|
|
|
|||
12
src/main.zig
12
src/main.zig
|
|
@ -4419,6 +4419,8 @@ pub const usage_build =
|
|||
\\Options:
|
||||
\\ -freference-trace[=num] How many lines of reference trace should be shown per compile error
|
||||
\\ -fno-reference-trace Disable reference trace
|
||||
\\ -fsummary Print the build summary, even on success
|
||||
\\ -fno-summary Omit the build summary, even on failure
|
||||
\\ --build-file [file] Override path to build.zig
|
||||
\\ --cache-dir [path] Override path to local Zig cache directory
|
||||
\\ --global-cache-dir [path] Override path to global Zig cache directory
|
||||
|
|
@ -4920,8 +4922,6 @@ pub fn cmdFmt(gpa: Allocator, arena: Allocator, args: []const []const u8) !void
|
|||
};
|
||||
defer tree.deinit(gpa);
|
||||
|
||||
try printAstErrorsToStderr(gpa, tree, "<stdin>", color);
|
||||
var has_ast_error = false;
|
||||
if (check_ast_flag) {
|
||||
var file: Module.File = .{
|
||||
.status = .never_loaded,
|
||||
|
|
@ -4957,11 +4957,11 @@ pub fn cmdFmt(gpa: Allocator, arena: Allocator, args: []const []const u8) !void
|
|||
var error_bundle = try wip_errors.toOwnedBundle();
|
||||
defer error_bundle.deinit(gpa);
|
||||
error_bundle.renderToStdErr(ttyconf);
|
||||
has_ast_error = true;
|
||||
process.exit(2);
|
||||
}
|
||||
}
|
||||
if (tree.errors.len != 0 or has_ast_error) {
|
||||
process.exit(1);
|
||||
} else if (tree.errors.len != 0) {
|
||||
try printAstErrorsToStderr(gpa, tree, "<stdin>", color);
|
||||
process.exit(2);
|
||||
}
|
||||
const formatted = try tree.render(gpa);
|
||||
defer gpa.free(formatted);
|
||||
|
|
|
|||
|
|
@ -166,9 +166,7 @@ pub const CompareOutputContext = struct {
|
|||
|
||||
const run = exe.run();
|
||||
run.addArgs(case.cli_args);
|
||||
run.stderr_action = .ignore;
|
||||
run.stdout_action = .ignore;
|
||||
run.expected_term = .{ .Exited = 126 };
|
||||
run.expectExitCode(126);
|
||||
|
||||
self.step.dependOn(&run.step);
|
||||
},
|
||||
|
|
|
|||
|
|
@ -858,10 +858,11 @@ pub const StackTracesContext = struct {
|
|||
const allocator = context.b.allocator;
|
||||
const ptr = allocator.create(RunAndCompareStep) catch unreachable;
|
||||
ptr.* = RunAndCompareStep{
|
||||
.step = Step.init(allocator, .{
|
||||
.step = Step.init(.{
|
||||
.id = .custom,
|
||||
.name = "StackTraceCompareOutputStep",
|
||||
.makeFn = make,
|
||||
.owner = context.b,
|
||||
}),
|
||||
.context = context,
|
||||
.exe = exe,
|
||||
|
|
@ -1121,10 +1122,7 @@ pub const StandaloneContext = struct {
|
|||
defer zig_args.resize(zig_args_base_len) catch unreachable;
|
||||
|
||||
const run_cmd = b.addSystemCommand(zig_args.items);
|
||||
const log_step = b.addLog("PASS {s} ({s})", .{ annotated_case_name, @tagName(optimize_mode) });
|
||||
log_step.step.dependOn(&run_cmd.step);
|
||||
|
||||
self.step.dependOn(&log_step.step);
|
||||
self.step.dependOn(&run_cmd.step);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1150,10 +1148,7 @@ pub const StandaloneContext = struct {
|
|||
exe.linkSystemLibrary("c");
|
||||
}
|
||||
|
||||
const log_step = b.addLog("PASS {s}", .{annotated_case_name});
|
||||
log_step.step.dependOn(&exe.step);
|
||||
|
||||
self.step.dependOn(&log_step.step);
|
||||
self.step.dependOn(&exe.step);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
|
@ -1203,9 +1198,10 @@ pub const GenHContext = struct {
|
|||
const allocator = context.b.allocator;
|
||||
const ptr = allocator.create(GenHCmpOutputStep) catch unreachable;
|
||||
ptr.* = GenHCmpOutputStep{
|
||||
.step = Step.init(allocator, .{
|
||||
.step = Step.init(.{
|
||||
.id = .custom,
|
||||
.name = "ParseCCmpOutput",
|
||||
.owner = context.b,
|
||||
.makeFn = make,
|
||||
}),
|
||||
.context = context,
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue