diff --git a/build.zig b/build.zig index b836037a3d..fe52457392 100644 --- a/build.zig +++ b/build.zig @@ -279,7 +279,7 @@ pub fn build(b: *std.Build) !void { const ancestor_ver = try std.SemanticVersion.parse(tagged_ancestor); if (zig_version.order(ancestor_ver) != .gt) { - std.debug.print("Zig version '{}' must be greater than tagged ancestor '{}'\n", .{ zig_version, ancestor_ver }); + std.debug.print("Zig version '{f}' must be greater than tagged ancestor '{f}'\n", .{ zig_version, ancestor_ver }); std.process.exit(1); } @@ -304,7 +304,7 @@ pub fn build(b: *std.Build) !void { if (enable_llvm) { const cmake_cfg = if (static_llvm) null else blk: { if (findConfigH(b, config_h_path_option)) |config_h_path| { - const file_contents = fs.cwd().readFileAlloc(b.allocator, config_h_path, max_config_h_bytes) catch unreachable; + const file_contents = fs.cwd().readFileAlloc(config_h_path, b.allocator, .limited(max_config_h_bytes)) catch unreachable; break :blk parseConfigH(b, file_contents); } else { std.log.warn("config.h could not be located automatically. Consider providing it explicitly via \"-Dconfig_h\"", .{}); @@ -912,7 +912,7 @@ fn addCxxKnownPath( return error.RequiredLibraryNotFound; const path_padded = run: { - var args = std.ArrayList([]const u8).init(b.allocator); + var args: std.ArrayList([]const u8) = .init(b.allocator); try args.append(ctx.cxx_compiler); var it = std.mem.tokenizeAny(u8, ctx.cxx_compiler_arg1, &std.ascii.whitespace); while (it.next()) |arg| try args.append(arg); @@ -1418,7 +1418,7 @@ fn generateLangRef(b: *std.Build) std.Build.LazyPath { }); var dir = b.build_root.handle.openDir("doc/langref", .{ .iterate = true }) catch |err| { - std.debug.panic("unable to open '{}doc/langref' directory: {s}", .{ + std.debug.panic("unable to open '{f}doc/langref' directory: {s}", .{ b.build_root, @errorName(err), }); }; @@ -1439,7 +1439,7 @@ fn generateLangRef(b: *std.Build) std.Build.LazyPath { // in a temporary directory "--cache-root", b.cache_root.path orelse ".", }); - cmd.addArgs(&.{ "--zig-lib-dir", b.fmt("{}", .{b.graph.zig_lib_directory}) }); + cmd.addArgs(&.{ "--zig-lib-dir", b.fmt("{f}", .{b.graph.zig_lib_directory}) }); cmd.addArgs(&.{"-i"}); cmd.addFileArg(b.path(b.fmt("doc/langref/{s}", .{entry.name}))); diff --git a/lib/compiler/build_runner.zig b/lib/compiler/build_runner.zig index 08c35a3452..fcd27145f2 100644 --- a/lib/compiler/build_runner.zig +++ b/lib/compiler/build_runner.zig @@ -330,7 +330,7 @@ pub fn main() !void { } } - const stderr = std.io.getStdErr(); + const stderr: std.fs.File = .stderr(); const ttyconf = get_tty_conf(color, stderr); switch (ttyconf) { .no_color => try graph.env_map.put("NO_COLOR", "1"), @@ -365,7 +365,7 @@ pub fn main() !void { .data = buffer.items, .flags = .{ .exclusive = true }, }) catch |err| { - fatal("unable to write configuration results to '{}{s}': {s}", .{ + fatal("unable to write configuration results to '{f}{s}': {s}", .{ local_cache_directory, tmp_sub_path, @errorName(err), }); }; @@ -378,16 +378,11 @@ pub fn main() !void { validateSystemLibraryOptions(builder); - var stdout_writer: std.io.BufferedWriter = .{ - .buffer = &stdout_buffer, - .unbuffered_writer = std.io.getStdOut().writer(), - }; - - if (help_menu) - return usage(builder, &stdout_writer); - - if (steps_menu) - return steps(builder, &stdout_writer); + { + var stdout_bw = std.fs.File.stdout().writer().buffered(&stdio_buffer); + if (help_menu) return usage(builder, &stdout_bw); + if (steps_menu) return steps(builder, &stdout_bw); + } var run: Run = .{ .max_rss = max_rss, @@ -699,7 +694,7 @@ fn runStepNames( const ttyconf = run.ttyconf; if (run.summary != .none) { - var bw = std.debug.lockStdErr2(); + var bw = std.debug.lockStdErr2(&stdio_buffer); defer std.debug.unlockStdErr(); const total_count = success_count + failure_count + pending_count + skipped_count; @@ -1131,7 +1126,7 @@ fn workerMakeOneStep( const show_stderr = s.result_stderr.len > 0; if (show_error_msgs or show_compile_errors or show_stderr) { - var bw = std.debug.lockStdErr2(); + var bw = std.debug.lockStdErr2(&stdio_buffer); defer std.debug.unlockStdErr(); const gpa = b.allocator; @@ -1256,7 +1251,7 @@ fn steps(builder: *std.Build, bw: *std.io.BufferedWriter) !void { } } -var stdout_buffer: [256]u8 = undefined; +var stdio_buffer: [256]u8 = undefined; fn usage(b: *std.Build, bw: *std.io.BufferedWriter) !void { try bw.print( diff --git a/lib/std/Build.zig b/lib/std/Build.zig index 442ebd65e5..fa3753ad16 100644 --- a/lib/std/Build.zig +++ b/lib/std/Build.zig @@ -284,7 +284,7 @@ pub fn create( .h_dir = undefined, .dest_dir = graph.env_map.get("DESTDIR"), .install_tls = .{ - .step = Step.init(.{ + .step = .init(.{ .id = TopLevelStep.base_id, .name = "install", .owner = b, @@ -292,7 +292,7 @@ pub fn create( .description = "Copy build artifacts to prefix path", }, .uninstall_tls = .{ - .step = Step.init(.{ + .step = .init(.{ .id = TopLevelStep.base_id, .name = "uninstall", .owner = b, @@ -342,7 +342,7 @@ fn createChildOnly( .graph = parent.graph, .allocator = allocator, .install_tls = .{ - .step = Step.init(.{ + .step = .init(.{ .id = TopLevelStep.base_id, .name = "install", .owner = child, @@ -350,7 +350,7 @@ fn createChildOnly( .description = "Copy build artifacts to prefix path", }, .uninstall_tls = .{ - .step = Step.init(.{ + .step = .init(.{ .id = TopLevelStep.base_id, .name = "uninstall", .owner = child, @@ -1525,7 +1525,7 @@ pub fn option(b: *Build, comptime T: type, name_raw: []const u8, description_raw pub fn step(b: *Build, name: []const u8, description: []const u8) *Step { const step_info = b.allocator.create(TopLevelStep) catch @panic("OOM"); step_info.* = .{ - .step = Step.init(.{ + .step = .init(.{ .id = TopLevelStep.base_id, .name = name, .owner = b, @@ -1745,7 +1745,7 @@ pub fn addUserInputOption(b: *Build, name_raw: []const u8, value_raw: []const u8 return true; }, .lazy_path, .lazy_path_list => { - log.warn("the lazy path value type isn't added from the CLI, but somehow '{s}' is a .{}", .{ name, std.zig.fmtId(@tagName(gop.value_ptr.value)) }); + log.warn("the lazy path value type isn't added from the CLI, but somehow '{s}' is a .{f}", .{ name, std.zig.fmtId(@tagName(gop.value_ptr.value)) }); return true; }, } @@ -2059,7 +2059,7 @@ pub fn runAllowFail( try Step.handleVerbose2(b, null, child.env_map, argv); try child.spawn(); - const stdout = child.stdout.?.reader().readAllAlloc(b.allocator, max_output_size) catch { + const stdout = child.stdout.?.readToEndAlloc(b.allocator, .limited(max_output_size)) catch { return error.ReadFailure; }; errdefer b.allocator.free(stdout); diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig index 8f59c0e6e8..1b65d8e2fb 100644 --- a/lib/std/Build/Cache.zig +++ b/lib/std/Build/Cache.zig @@ -333,7 +333,7 @@ pub const Manifest = struct { pub const Diagnostic = union(enum) { none, manifest_create: fs.File.OpenError, - manifest_read: fs.File.ReadError, + manifest_read: anyerror, manifest_lock: fs.File.LockError, manifest_seek: fs.File.SeekError, file_open: FileOp, @@ -1062,7 +1062,7 @@ pub const Manifest = struct { fn addDepFileMaybePost(self: *Manifest, dir: fs.Dir, dep_file_basename: []const u8) !void { const gpa = self.cache.gpa; - const dep_file_contents = try dir.readFileAlloc(gpa, dep_file_basename, manifest_file_size_max); + const dep_file_contents = try dir.readFileAlloc(dep_file_basename, gpa, .limited(manifest_file_size_max)); defer gpa.free(dep_file_contents); var error_buf: std.ArrayListUnmanaged(u8) = .empty; diff --git a/lib/std/Build/Cache/Directory.zig b/lib/std/Build/Cache/Directory.zig index 4de1cc18f1..a35b4fe78f 100644 --- a/lib/std/Build/Cache/Directory.zig +++ b/lib/std/Build/Cache/Directory.zig @@ -57,15 +57,13 @@ pub fn closeAndFree(self: *Directory, gpa: Allocator) void { pub fn format( self: Directory, + bw: *std.io.BufferedWriter, comptime fmt_string: []const u8, - options: fmt.FormatOptions, - writer: anytype, ) !void { - _ = options; if (fmt_string.len != 0) fmt.invalidFmtError(fmt_string, self); if (self.path) |p| { - try writer.writeAll(p); - try writer.writeAll(fs.path.sep_str); + try bw.writeAll(p); + try bw.writeAll(fs.path.sep_str); } } diff --git a/lib/std/Build/Cache/Path.zig b/lib/std/Build/Cache/Path.zig index 8822fb64be..52cffe18dc 100644 --- a/lib/std/Build/Cache/Path.zig +++ b/lib/std/Build/Cache/Path.zig @@ -142,9 +142,8 @@ pub fn toStringZ(p: Path, allocator: Allocator) Allocator.Error![:0]u8 { pub fn format( self: Path, + bw: *std.io.BufferedWriter, comptime fmt_string: []const u8, - options: std.fmt.FormatOptions, - writer: anytype, ) !void { if (fmt_string.len == 1) { // Quote-escape the string. @@ -155,33 +154,33 @@ pub fn format( else => @compileError("unsupported format string: " ++ fmt_string), }; if (self.root_dir.path) |p| { - try stringEscape(p, f, options, writer); - if (self.sub_path.len > 0) try stringEscape(fs.path.sep_str, f, options, writer); + try stringEscape(p, bw, f); + if (self.sub_path.len > 0) try stringEscape(fs.path.sep_str, bw, f); } if (self.sub_path.len > 0) { - try stringEscape(self.sub_path, f, options, writer); + try stringEscape(self.sub_path, bw, f); } return; } if (fmt_string.len > 0) std.fmt.invalidFmtError(fmt_string, self); if (std.fs.path.isAbsolute(self.sub_path)) { - try writer.writeAll(self.sub_path); + try bw.writeAll(self.sub_path); return; } if (self.root_dir.path) |p| { - try writer.writeAll(p); + try bw.writeAll(p); if (self.sub_path.len > 0) { - try writer.writeAll(fs.path.sep_str); - try writer.writeAll(self.sub_path); + try bw.writeAll(fs.path.sep_str); + try bw.writeAll(self.sub_path); } return; } if (self.sub_path.len > 0) { - try writer.writeAll(self.sub_path); + try bw.writeAll(self.sub_path); return; } - try writer.writeByte('.'); + try bw.writeByte('.'); } pub fn eql(self: Path, other: Path) bool { diff --git a/lib/std/Build/Fuzz/WebServer.zig b/lib/std/Build/Fuzz/WebServer.zig index d0baa61d18..bc73e7fa6d 100644 --- a/lib/std/Build/Fuzz/WebServer.zig +++ b/lib/std/Build/Fuzz/WebServer.zig @@ -169,8 +169,8 @@ fn serveFile( // The desired API is actually sendfile, which will require enhancing std.http.Server. // We load the file with every request so that the user can make changes to the file // and refresh the HTML page without restarting this server. - const file_contents = ws.zig_lib_directory.handle.readFileAlloc(gpa, name, 10 * 1024 * 1024) catch |err| { - log.err("failed to read '{}{s}': {s}", .{ ws.zig_lib_directory, name, @errorName(err) }); + const file_contents = ws.zig_lib_directory.handle.readFileAlloc(name, gpa, .limited(10 * 1024 * 1024)) catch |err| { + log.err("failed to read '{f}{s}': {s}", .{ ws.zig_lib_directory, name, @errorName(err) }); return error.AlreadyReported; }; defer gpa.free(file_contents); @@ -206,7 +206,7 @@ fn serveWasm( }); // std.http.Server does not have a sendfile API yet. const bin_path = try wasm_base_path.join(arena, bin_name); - const file_contents = try bin_path.root_dir.handle.readFileAlloc(gpa, bin_path.sub_path, 10 * 1024 * 1024); + const file_contents = try bin_path.root_dir.handle.readFileAlloc(bin_path.sub_path, gpa, .limited(10 * 1024 * 1024)); defer gpa.free(file_contents); try request.respond(file_contents, .{ .extra_headers = &.{ @@ -251,10 +251,10 @@ fn buildWasmBinary( "-fsingle-threaded", // "--dep", "Walk", // "--dep", "html_render", // - try std.fmt.allocPrint(arena, "-Mroot={}", .{main_src_path}), // - try std.fmt.allocPrint(arena, "-MWalk={}", .{walk_src_path}), // + try std.fmt.allocPrint(arena, "-Mroot={f}", .{main_src_path}), // + try std.fmt.allocPrint(arena, "-MWalk={f}", .{walk_src_path}), // "--dep", "Walk", // - try std.fmt.allocPrint(arena, "-Mhtml_render={}", .{html_render_src_path}), // + try std.fmt.allocPrint(arena, "-Mhtml_render={f}", .{html_render_src_path}), // "--listen=-", }); @@ -280,13 +280,10 @@ fn buildWasmBinary( const stdout = poller.fifo(.stdout); poll: while (true) { - while (stdout.readableLength() < @sizeOf(Header)) { - if (!(try poller.poll())) break :poll; - } - const header = stdout.reader().readStruct(Header) catch unreachable; - while (stdout.readableLength() < header.bytes_len) { - if (!(try poller.poll())) break :poll; - } + while (stdout.readableLength() < @sizeOf(Header)) if (!try poller.poll()) break :poll; + var header: Header = undefined; + assert(stdout.read(std.mem.asBytes(&header)) == @sizeOf(Header)); + while (stdout.readableLength() < header.bytes_len) if (!try poller.poll()) break :poll; const body = stdout.readableSliceOfLen(header.bytes_len); switch (header.tag) { @@ -527,7 +524,7 @@ fn serveSourcesTar(ws: *WebServer, request: *std.http.Server.Request) !void { for (deduped_paths) |joined_path| { var file = joined_path.root_dir.handle.openFile(joined_path.sub_path, .{}) catch |err| { - log.err("failed to open {}: {s}", .{ joined_path, @errorName(err) }); + log.err("failed to open {f}: {s}", .{ joined_path, @errorName(err) }); continue; }; defer file.close(); @@ -605,7 +602,7 @@ fn prepareTables( const rebuilt_exe_path = run_step.rebuilt_executable.?; var debug_info = std.debug.Info.load(gpa, rebuilt_exe_path, &gop.value_ptr.coverage) catch |err| { - log.err("step '{s}': failed to load debug information for '{}': {s}", .{ + log.err("step '{s}': failed to load debug information for '{f}': {s}", .{ run_step.step.name, rebuilt_exe_path, @errorName(err), }); return error.AlreadyReported; @@ -617,7 +614,7 @@ fn prepareTables( .sub_path = "v/" ++ std.fmt.hex(coverage_id), }; var coverage_file = coverage_file_path.root_dir.handle.openFile(coverage_file_path.sub_path, .{}) catch |err| { - log.err("step '{s}': failed to load coverage file '{}': {s}", .{ + log.err("step '{s}': failed to load coverage file '{f}': {s}", .{ run_step.step.name, coverage_file_path, @errorName(err), }); return error.AlreadyReported; @@ -625,7 +622,7 @@ fn prepareTables( defer coverage_file.close(); const file_size = coverage_file.getEndPos() catch |err| { - log.err("unable to check len of coverage file '{}': {s}", .{ coverage_file_path, @errorName(err) }); + log.err("unable to check len of coverage file '{f}': {s}", .{ coverage_file_path, @errorName(err) }); return error.AlreadyReported; }; @@ -637,7 +634,7 @@ fn prepareTables( coverage_file.handle, 0, ) catch |err| { - log.err("failed to map coverage file '{}': {s}", .{ coverage_file_path, @errorName(err) }); + log.err("failed to map coverage file '{f}': {s}", .{ coverage_file_path, @errorName(err) }); return error.AlreadyReported; }; gop.value_ptr.mapped_memory = mapped_memory; diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig index 4d5d8e93e5..a0f8da9ea3 100644 --- a/lib/std/Build/Step.zig +++ b/lib/std/Build/Step.zig @@ -516,13 +516,10 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool) !?Path { const stdout = zp.poller.fifo(.stdout); poll: while (true) { - while (stdout.readableLength() < @sizeOf(Header)) { - if (!(try zp.poller.poll())) break :poll; - } - const header = stdout.reader().readStruct(Header) catch unreachable; - while (stdout.readableLength() < header.bytes_len) { - if (!(try zp.poller.poll())) break :poll; - } + while (stdout.readableLength() < @sizeOf(Header)) if (!try zp.poller.poll()) break :poll; + var header: Header = undefined; + assert(stdout.read(std.mem.asBytes(&header)) == @sizeOf(Header)); + while (stdout.readableLength() < header.bytes_len) if (!try zp.poller.poll()) break :poll; const body = stdout.readableSliceOfLen(header.bytes_len); switch (header.tag) { diff --git a/lib/std/Build/Step/CheckFile.zig b/lib/std/Build/Step/CheckFile.zig index 699e6d2e9d..d05925e60a 100644 --- a/lib/std/Build/Step/CheckFile.zig +++ b/lib/std/Build/Step/CheckFile.zig @@ -28,7 +28,7 @@ pub fn create( ) *CheckFile { const check_file = owner.allocator.create(CheckFile) catch @panic("OOM"); check_file.* = .{ - .step = Step.init(.{ + .step = .init(.{ .id = base_id, .name = "CheckFile", .owner = owner, @@ -53,7 +53,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { try step.singleUnchangingWatchInput(check_file.source); const src_path = check_file.source.getPath2(b, step); - const contents = fs.cwd().readFileAlloc(b.allocator, src_path, check_file.max_bytes) catch |err| { + const contents = fs.cwd().readFileAlloc(src_path, b.allocator, .limited(check_file.max_bytes)) catch |err| { return step.fail("unable to read '{s}': {s}", .{ src_path, @errorName(err), }); diff --git a/lib/std/Build/Step/CheckObject.zig b/lib/std/Build/Step/CheckObject.zig index ba3614b948..5aa61a9d3b 100644 --- a/lib/std/Build/Step/CheckObject.zig +++ b/lib/std/Build/Step/CheckObject.zig @@ -28,14 +28,14 @@ pub fn create( const gpa = owner.allocator; const check_object = gpa.create(CheckObject) catch @panic("OOM"); check_object.* = .{ - .step = Step.init(.{ + .step = .init(.{ .id = base_id, .name = "CheckObject", .owner = owner, .makeFn = make, }), .source = source.dupe(owner), - .checks = std.ArrayList(Check).init(gpa), + .checks = .init(gpa), .obj_format = obj_format, }; check_object.source.addStepDependencies(&check_object.step); @@ -74,13 +74,13 @@ const Action = struct { b: *std.Build, step: *Step, haystack: []const u8, - global_vars: anytype, + global_vars: *std.StringHashMap(u64), ) !bool { assert(act.tag == .extract); const hay = mem.trim(u8, haystack, " "); const phrase = mem.trim(u8, act.phrase.resolve(b, step), " "); - var candidate_vars = std.ArrayList(struct { name: []const u8, value: u64 }).init(b.allocator); + var candidate_vars: std.ArrayList(struct { name: []const u8, value: u64 }) = .init(b.allocator); var hay_it = mem.tokenizeScalar(u8, hay, ' '); var needle_it = mem.tokenizeScalar(u8, phrase, ' '); @@ -153,11 +153,11 @@ const Action = struct { /// Will return true if the `phrase` is correctly parsed into an RPN program and /// its reduced, computed value compares using `op` with the expected value, either /// a literal or another extracted variable. - fn computeCmp(act: Action, b: *std.Build, step: *Step, global_vars: anytype) !bool { + fn computeCmp(act: Action, b: *std.Build, step: *Step, global_vars: std.StringHashMap(u64)) !bool { const gpa = step.owner.allocator; const phrase = act.phrase.resolve(b, step); - var op_stack = std.ArrayList(enum { add, sub, mod, mul }).init(gpa); - var values = std.ArrayList(u64).init(gpa); + var op_stack: std.ArrayList(enum { add, sub, mod, mul }) = .init(gpa); + var values: std.ArrayList(u64) = .init(gpa); var it = mem.tokenizeScalar(u8, phrase, ' '); while (it.next()) |next| { @@ -230,17 +230,15 @@ const ComputeCompareExpected = struct { }, pub fn format( - value: @This(), + value: ComputeCompareExpected, + bw: *std.io.BufferedWriter, comptime fmt: []const u8, - options: std.fmt.FormatOptions, - writer: anytype, - ) !void { + ) anyerror!void { if (fmt.len != 0) std.fmt.invalidFmtError(fmt, value); - _ = options; - try writer.print("{s} ", .{@tagName(value.op)}); + try bw.print("{s} ", .{@tagName(value.op)}); switch (value.value) { - .variable => |name| try writer.writeAll(name), - .literal => |x| try writer.print("{x}", .{x}), + .variable => |name| try bw.writeAll(name), + .literal => |x| try bw.print("{x}", .{x}), } } }; @@ -566,15 +564,15 @@ fn make(step: *Step, make_options: Step.MakeOptions) !void { const src_path = check_object.source.getPath3(b, step); const contents = src_path.root_dir.handle.readFileAllocOptions( - gpa, src_path.sub_path, - check_object.max_bytes, + gpa, + .limited(check_object.max_bytes), null, .of(u64), null, - ) catch |err| return step.fail("unable to read '{'}': {s}", .{ src_path, @errorName(err) }); + ) catch |err| return step.fail("unable to read '{f'}': {s}", .{ src_path, @errorName(err) }); - var vars = std.StringHashMap(u64).init(gpa); + var vars: std.StringHashMap(u64) = .init(gpa); for (check_object.checks.items) |chk| { if (chk.kind == .compute_compare) { assert(chk.actions.items.len == 1); @@ -588,7 +586,7 @@ fn make(step: *Step, make_options: Step.MakeOptions) !void { return step.fail( \\ \\========= comparison failed for action: =========== - \\{s} {} + \\{s} {f} \\=================================================== , .{ act.phrase.resolve(b, step), act.expected.? }); } @@ -621,15 +619,13 @@ fn make(step: *Step, make_options: Step.MakeOptions) !void { fn formatMessageString( ctx: Ctx, + bw: *std.io.BufferedWriter, comptime unused_fmt_string: []const u8, - options: std.fmt.FormatOptions, - writer: anytype, ) !void { _ = unused_fmt_string; - _ = options; switch (ctx.kind) { - .dump_section => try writer.print("{s}", .{std.fmt.fmtSliceEscapeLower(ctx.msg)}), - else => try writer.writeAll(ctx.msg), + .dump_section => try bw.print("{f}", .{std.fmt.fmtSliceEscapeLower(ctx.msg)}), + else => try bw.writeAll(ctx.msg), } } }.fmtMessageString; @@ -644,11 +640,11 @@ fn make(step: *Step, make_options: Step.MakeOptions) !void { return step.fail( \\ \\========= expected to find: ========================== - \\{s} + \\{f} \\========= but parsed file does not contain it: ======= - \\{s} + \\{f} \\========= file path: ================================= - \\{} + \\{f} , .{ fmtMessageString(chk.kind, act.phrase.resolve(b, step)), fmtMessageString(chk.kind, output), @@ -664,11 +660,11 @@ fn make(step: *Step, make_options: Step.MakeOptions) !void { return step.fail( \\ \\========= expected to find: ========================== - \\*{s}* + \\*{f}* \\========= but parsed file does not contain it: ======= - \\{s} + \\{f} \\========= file path: ================================= - \\{} + \\{f} , .{ fmtMessageString(chk.kind, act.phrase.resolve(b, step)), fmtMessageString(chk.kind, output), @@ -683,11 +679,11 @@ fn make(step: *Step, make_options: Step.MakeOptions) !void { return step.fail( \\ \\========= expected not to find: =================== - \\{s} + \\{f} \\========= but parsed file does contain it: ======== - \\{s} + \\{f} \\========= file path: ============================== - \\{} + \\{f} , .{ fmtMessageString(chk.kind, act.phrase.resolve(b, step)), fmtMessageString(chk.kind, output), @@ -703,13 +699,13 @@ fn make(step: *Step, make_options: Step.MakeOptions) !void { return step.fail( \\ \\========= expected to find and extract: ============== - \\{s} + \\{f} \\========= but parsed file does not contain it: ======= - \\{s} + \\{f} \\========= file path: ============================== - \\{} + \\{f} , .{ - act.phrase.resolve(b, step), + fmtMessageString(chk.kind, act.phrase.resolve(b, step)), fmtMessageString(chk.kind, output), src_path, }); @@ -762,14 +758,14 @@ const MachODumper = struct { }, .SYMTAB => { const lc = cmd.cast(macho.symtab_command).?; - const symtab = @as([*]align(1) const macho.nlist_64, @ptrCast(ctx.data.ptr + lc.symoff))[0..lc.nsyms]; + const symtab = @as([*]align(1) const macho.nlist_64, @ptrCast(ctx.data[lc.symoff..].ptr))[0..lc.nsyms]; const strtab = ctx.data[lc.stroff..][0..lc.strsize]; try ctx.symtab.appendUnalignedSlice(ctx.gpa, symtab); try ctx.strtab.appendSlice(ctx.gpa, strtab); }, .DYSYMTAB => { const lc = cmd.cast(macho.dysymtab_command).?; - const indexes = @as([*]align(1) const u32, @ptrCast(ctx.data.ptr + lc.indirectsymoff))[0..lc.nindirectsyms]; + const indexes = @as([*]align(1) const u32, @ptrCast(ctx.data[lc.indirectsymoff..].ptr))[0..lc.nindirectsyms]; try ctx.indsymtab.appendUnalignedSlice(ctx.gpa, indexes); }, .LOAD_DYLIB, @@ -787,7 +783,7 @@ const MachODumper = struct { fn getString(ctx: ObjectContext, off: u32) [:0]const u8 { assert(off < ctx.strtab.items.len); - return mem.sliceTo(@as([*:0]const u8, @ptrCast(ctx.strtab.items.ptr + off)), 0); + return mem.sliceTo(@as([*:0]const u8, @ptrCast(ctx.strtab.items[off..].ptr)), 0); } fn getLoadCommandIterator(ctx: ObjectContext) macho.LoadCommandIterator { @@ -1232,7 +1228,7 @@ const MachODumper = struct { } fn dumpRebaseInfo(ctx: ObjectContext, data: []const u8, bw: *std.io.BufferedWriter) !void { - var rebases = std.ArrayList(u64).init(ctx.gpa); + var rebases: std.ArrayList(u64) = .init(ctx.gpa); defer rebases.deinit(); try ctx.parseRebaseInfo(data, &rebases); mem.sort(u64, rebases.items, {}, std.sort.asc(u64)); @@ -1242,14 +1238,13 @@ const MachODumper = struct { } fn parseRebaseInfo(ctx: ObjectContext, data: []const u8, rebases: *std.ArrayList(u64)) !void { - var stream: std.io.FixedBufferStream = .{ .buffer = data }; - var creader = std.io.countingReader(stream.reader()); - const reader = creader.reader(); + var br: std.io.BufferedReader = undefined; + br.initFixed(data); var seg_id: ?u8 = null; var offset: u64 = 0; while (true) { - const byte = reader.readByte() catch break; + const byte = br.takeByte() catch break; const opc = byte & macho.REBASE_OPCODE_MASK; const imm = byte & macho.REBASE_IMMEDIATE_MASK; switch (opc) { @@ -1257,17 +1252,17 @@ const MachODumper = struct { macho.REBASE_OPCODE_SET_TYPE_IMM => {}, macho.REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB => { seg_id = imm; - offset = try std.leb.readUleb128(u64, reader); + offset = try br.takeLeb128(u64); }, macho.REBASE_OPCODE_ADD_ADDR_IMM_SCALED => { offset += imm * @sizeOf(u64); }, macho.REBASE_OPCODE_ADD_ADDR_ULEB => { - const addend = try std.leb.readUleb128(u64, reader); + const addend = try br.takeLeb128(u64); offset += addend; }, macho.REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB => { - const addend = try std.leb.readUleb128(u64, reader); + const addend = try br.takeLeb128(u64); const seg = ctx.segments.items[seg_id.?]; const addr = seg.vmaddr + offset; try rebases.append(addr); @@ -1284,11 +1279,11 @@ const MachODumper = struct { ntimes = imm; }, macho.REBASE_OPCODE_DO_REBASE_ULEB_TIMES => { - ntimes = try std.leb.readUleb128(u64, reader); + ntimes = try br.takeLeb128(u64); }, macho.REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB => { - ntimes = try std.leb.readUleb128(u64, reader); - skip = try std.leb.readUleb128(u64, reader); + ntimes = try br.takeLeb128(u64); + skip = try br.takeLeb128(u64); }, else => unreachable, } @@ -1331,7 +1326,7 @@ const MachODumper = struct { }; fn dumpBindInfo(ctx: ObjectContext, data: []const u8, bw: *std.io.BufferedWriter) !void { - var bindings = std.ArrayList(Binding).init(ctx.gpa); + var bindings: std.ArrayList(Binding) = .init(ctx.gpa); defer { for (bindings.items) |*b| { b.deinit(ctx.gpa); @@ -1354,9 +1349,8 @@ const MachODumper = struct { } fn parseBindInfo(ctx: ObjectContext, data: []const u8, bindings: *std.ArrayList(Binding)) !void { - var stream: std.io.FixedBufferStream = .{ .buffer = data }; - var creader = std.io.countingReader(stream.reader()); - const reader = creader.reader(); + var br: std.io.BufferedReader = undefined; + br.initFixed(data); var seg_id: ?u8 = null; var tag: Binding.Tag = .self; @@ -1364,11 +1358,10 @@ const MachODumper = struct { var offset: u64 = 0; var addend: i64 = 0; - var name_buf = std.ArrayList(u8).init(ctx.gpa); + var name_buf: std.ArrayList(u8) = .init(ctx.gpa); defer name_buf.deinit(); - while (true) { - const byte = reader.readByte() catch break; + while (br.takeByte()) |byte| { const opc = byte & macho.BIND_OPCODE_MASK; const imm = byte & macho.BIND_IMMEDIATE_MASK; switch (opc) { @@ -1389,7 +1382,7 @@ const MachODumper = struct { }, macho.BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB => { seg_id = imm; - offset = try std.leb.readUleb128(u64, reader); + offset = try br.takeLeb128(u64); }, macho.BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM => { name_buf.clearRetainingCapacity(); @@ -1398,10 +1391,10 @@ const MachODumper = struct { try name_buf.append(0); }, macho.BIND_OPCODE_SET_ADDEND_SLEB => { - addend = try std.leb.readIleb128(i64, reader); + addend = try br.takeLeb128(i64); }, macho.BIND_OPCODE_ADD_ADDR_ULEB => { - const x = try std.leb.readUleb128(u64, reader); + const x = try br.takeLeb128(u64); offset = @intCast(@as(i64, @intCast(offset)) + @as(i64, @bitCast(x))); }, macho.BIND_OPCODE_DO_BIND, @@ -1416,14 +1409,14 @@ const MachODumper = struct { switch (opc) { macho.BIND_OPCODE_DO_BIND => {}, macho.BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB => { - add_addr = try std.leb.readUleb128(u64, reader); + add_addr = try br.takeLeb128(u64); }, macho.BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED => { add_addr = imm * @sizeOf(u64); }, macho.BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB => { - count = try std.leb.readUleb128(u64, reader); - skip = try std.leb.readUleb128(u64, reader); + count = try br.takeLeb128(u64); + skip = try br.takeLeb128(u64); }, else => unreachable, } @@ -1444,7 +1437,7 @@ const MachODumper = struct { }, else => break, } - } + } else |_| {} } fn dumpExportsTrie(ctx: ObjectContext, data: []const u8, bw: *std.io.BufferedWriter) !void { @@ -1453,9 +1446,10 @@ const MachODumper = struct { var arena = std.heap.ArenaAllocator.init(ctx.gpa); defer arena.deinit(); - var exports = std.ArrayList(Export).init(arena.allocator()); - var it = TrieIterator{ .data = data }; - try parseTrieNode(arena.allocator(), &it, "", &exports); + var exports: std.ArrayList(Export) = .init(arena.allocator()); + var br: std.io.BufferedReader = undefined; + br.initFixed(data); + try parseTrieNode(arena.allocator(), &br, "", &exports); mem.sort(Export, exports.items, {}, Export.lessThan); @@ -1484,46 +1478,6 @@ const MachODumper = struct { } } - const TrieIterator = struct { - data: []const u8, - pos: usize = 0, - - fn getStream(it: *TrieIterator) std.io.FixedBufferStream { - return .{ .buffer = it.data[it.pos..] }; - } - - fn readUleb128(it: *TrieIterator) !u64 { - var stream = it.getStream(); - var creader = std.io.countingReader(stream.reader()); - const reader = creader.reader(); - const value = try std.leb.readUleb128(u64, reader); - it.pos += creader.bytes_read; - return value; - } - - fn readString(it: *TrieIterator) ![:0]const u8 { - var stream = it.getStream(); - const reader = stream.reader(); - - var count: usize = 0; - while (true) : (count += 1) { - const byte = try reader.readByte(); - if (byte == 0) break; - } - - const str = @as([*:0]const u8, @ptrCast(it.data.ptr + it.pos))[0..count :0]; - it.pos += count + 1; - return str; - } - - fn readByte(it: *TrieIterator) !u8 { - var stream = it.getStream(); - const value = try stream.reader().readByte(); - it.pos += 1; - return value; - } - }; - const Export = struct { name: []const u8, tag: enum { @"export", reexport, stub_resolver }, @@ -1563,17 +1517,17 @@ const MachODumper = struct { fn parseTrieNode( arena: Allocator, - it: *TrieIterator, + br: *std.io.BufferedReader, prefix: []const u8, exports: *std.ArrayList(Export), ) !void { - const size = try it.readUleb128(); + const size = try br.takeLeb128(u64); if (size > 0) { - const flags = try it.readUleb128(); + const flags = try br.takeLeb128(u64); switch (flags) { macho.EXPORT_SYMBOL_FLAGS_REEXPORT => { - const ord = try it.readUleb128(); - const name = try arena.dupe(u8, try it.readString()); + const ord = try br.takeLeb128(u64); + const name = try br.takeDelimiterConclusive(0); try exports.append(.{ .name = if (name.len > 0) name else prefix, .tag = .reexport, @@ -1581,8 +1535,8 @@ const MachODumper = struct { }); }, macho.EXPORT_SYMBOL_FLAGS_STUB_AND_RESOLVER => { - const stub_offset = try it.readUleb128(); - const resolver_offset = try it.readUleb128(); + const stub_offset = try br.takeLeb128(u64); + const resolver_offset = try br.takeLeb128(u64); try exports.append(.{ .name = prefix, .tag = .stub_resolver, @@ -1593,7 +1547,7 @@ const MachODumper = struct { }); }, else => { - const vmoff = try it.readUleb128(); + const vmoff = try br.takeLeb128(u64); try exports.append(.{ .name = prefix, .tag = .@"export", @@ -1612,15 +1566,15 @@ const MachODumper = struct { } } - const nedges = try it.readByte(); + const nedges = try br.takeByte(); for (0..nedges) |_| { - const label = try it.readString(); - const off = try it.readUleb128(); + const label = try br.takeDelimiterConclusive(0); + const off = try br.takeLeb128(u64); const prefix_label = try std.fmt.allocPrint(arena, "{s}{s}", .{ prefix, label }); - const curr = it.pos; - it.pos = off; - try parseTrieNode(arena, it, prefix_label, exports); - it.pos = curr; + const seek = br.seek; + br.seek = off; + try parseTrieNode(arena, br, prefix_label, exports); + br.seek = seek; } } @@ -1640,8 +1594,10 @@ const MachODumper = struct { var ctx = ObjectContext{ .gpa = gpa, .data = bytes, .header = hdr }; try ctx.parse(); - var output: std.io.AllocatingWriter = undefined; - const bw = output.init(gpa); + var aw: std.io.AllocatingWriter = undefined; + aw.init(gpa); + defer aw.deinit(); + const bw = &aw.buffered_writer; switch (check.kind) { .headers => { @@ -1717,7 +1673,7 @@ const MachODumper = struct { }, .dump_section => { - const name = mem.sliceTo(@as([*:0]const u8, @ptrCast(check.data.items.ptr + check.payload.dump_section)), 0); + const name = mem.sliceTo(@as([*:0]const u8, @ptrCast(check.data.items[check.payload.dump_section..].ptr)), 0); const sep_index = mem.indexOfScalar(u8, name, ',') orelse return step.fail("invalid section name: {s}", .{name}); const segname = name[0..sep_index]; @@ -1730,7 +1686,7 @@ const MachODumper = struct { else => return step.fail("invalid check kind for MachO file format: {s}", .{@tagName(check.kind)}), } - return output.toOwnedSlice(); + return aw.toOwnedSlice(); } }; @@ -1749,153 +1705,133 @@ const ElfDumper = struct { fn parseAndDumpArchive(step: *Step, check: Check, bytes: []const u8) ![]const u8 { const gpa = step.owner.allocator; - var stream: std.io.FixedBufferStream = .{ .buffer = bytes }; - const reader = stream.reader(); + var br: std.io.BufferedReader = undefined; + br.initFixed(bytes); - const magic = try reader.readBytesNoEof(elf.ARMAG.len); - if (!mem.eql(u8, &magic, elf.ARMAG)) { - return error.InvalidArchiveMagicNumber; - } + if (!mem.eql(u8, try br.takeArray(elf.ARMAG.len), elf.ARMAG)) return error.InvalidArchiveMagicNumber; - var ctx = ArchiveContext{ + var ctx: ArchiveContext = .{ .gpa = gpa, .data = bytes, - .strtab = &[0]u8{}, + .symtab = &.{}, + .strtab = &.{}, + .objects = .empty, }; - defer { - for (ctx.objects.items) |*object| { - gpa.free(object.name); - } - ctx.objects.deinit(gpa); - } + defer ctx.deinit(); - while (true) { - if (stream.pos >= ctx.data.len) break; - if (!mem.isAligned(stream.pos, 2)) stream.pos += 1; - - const hdr = try reader.readStruct(elf.ar_hdr); + while (br.seek < bytes.len) { + const hdr_seek = std.mem.alignForward(usize, br.seek, 2); + br.seek = hdr_seek; + const hdr = try br.takeStruct(elf.ar_hdr); if (!mem.eql(u8, &hdr.ar_fmag, elf.ARFMAG)) return error.InvalidArchiveHeaderMagicNumber; - const size = try hdr.size(); - defer { - _ = stream.seekBy(size) catch {}; - } + const data = try br.take(try hdr.size()); if (hdr.isSymtab()) { - try ctx.parseSymtab(ctx.data[stream.pos..][0..size], .p32); + try ctx.parseSymtab(data, .p32); continue; } if (hdr.isSymtab64()) { - try ctx.parseSymtab(ctx.data[stream.pos..][0..size], .p64); + try ctx.parseSymtab(data, .p64); continue; } if (hdr.isStrtab()) { - ctx.strtab = ctx.data[stream.pos..][0..size]; + ctx.strtab = data; continue; } if (hdr.isSymdef() or hdr.isSymdefSorted()) continue; - const name = if (hdr.name()) |name| - try gpa.dupe(u8, name) - else if (try hdr.nameOffset()) |off| - try gpa.dupe(u8, ctx.getString(off)) - else - unreachable; - - try ctx.objects.append(gpa, .{ .name = name, .off = stream.pos, .len = size }); + const name = hdr.name() orelse ctx.getString((try hdr.nameOffset()).?); + try ctx.objects.putNoClobber(gpa, hdr_seek, .{ + .name = name, + .data = data, + }); } - var output: std.io.AllocatingWriter = undefined; - const writer = output.init(gpa); + var aw: std.io.AllocatingWriter = undefined; + aw.init(gpa); + defer aw.deinit(); + const bw = &aw.buffered_writer; switch (check.kind) { - .archive_symtab => if (ctx.symtab.items.len > 0) { - try ctx.dumpSymtab(writer); + .archive_symtab => if (ctx.symtab.len > 0) { + try ctx.dumpSymtab(bw); } else return step.fail("no archive symbol table found", .{}), - else => if (ctx.objects.items.len > 0) { - try ctx.dumpObjects(step, check, writer); + else => if (ctx.objects.count() > 0) { + try ctx.dumpObjects(step, check, bw); } else return step.fail("empty archive", .{}), } - return output.toOwnedSlice(); + return aw.toOwnedSlice(); } const ArchiveContext = struct { gpa: Allocator, data: []const u8, - symtab: std.ArrayListUnmanaged(ArSymtabEntry) = .empty, + symtab: []ArSymtabEntry, strtab: []const u8, - objects: std.ArrayListUnmanaged(struct { name: []const u8, off: usize, len: usize }) = .empty, + objects: std.AutoArrayHashMapUnmanaged(usize, struct { name: []const u8, data: []const u8 }), - fn parseSymtab(ctx: *ArchiveContext, raw: []const u8, ptr_width: enum { p32, p64 }) !void { - var stream: std.io.FixedBufferStream = .{ .buffer = raw }; - const reader = stream.reader(); + fn deinit(ctx: *ArchiveContext) void { + ctx.gpa.free(ctx.symtab); + ctx.objects.deinit(ctx.gpa); + } + + fn parseSymtab(ctx: *ArchiveContext, data: []const u8, ptr_width: enum { p32, p64 }) !void { + var br: std.io.BufferedReader = undefined; + br.initFixed(data); const num = switch (ptr_width) { - .p32 => try reader.readInt(u32, .big), - .p64 => try reader.readInt(u64, .big), + .p32 => try br.takeInt(u32, .big), + .p64 => try br.takeInt(u64, .big), }; const ptr_size: usize = switch (ptr_width) { .p32 => @sizeOf(u32), .p64 => @sizeOf(u64), }; - const strtab_off = (num + 1) * ptr_size; - const strtab_len = raw.len - strtab_off; - const strtab = raw[strtab_off..][0..strtab_len]; + try br.discard(num * ptr_size); + const strtab = try br.peekAll(0); - try ctx.symtab.ensureTotalCapacityPrecise(ctx.gpa, num); + assert(ctx.symtab.len == 0); + ctx.symtab = try ctx.gpa.alloc(ArSymtabEntry, num); var stroff: usize = 0; - for (0..num) |_| { + for (ctx.symtab) |*entry| { const off = switch (ptr_width) { - .p32 => try reader.readInt(u32, .big), - .p64 => try reader.readInt(u64, .big), + .p32 => try br.takeInt(u32, .big), + .p64 => try br.takeInt(u64, .big), }; - const name = mem.sliceTo(@as([*:0]const u8, @ptrCast(strtab.ptr + stroff)), 0); + const name = mem.sliceTo(@as([*:0]const u8, @ptrCast(strtab[stroff..].ptr)), 0); stroff += name.len + 1; - ctx.symtab.appendAssumeCapacity(.{ .off = off, .name = name }); + entry.* = .{ .off = off, .name = name }; } } fn dumpSymtab(ctx: ArchiveContext, bw: *std.io.BufferedWriter) !void { - var files = std.AutoHashMap(usize, []const u8).init(ctx.gpa); - defer files.deinit(); - try files.ensureUnusedCapacity(@intCast(ctx.objects.items.len)); - - for (ctx.objects.items) |object| { - files.putAssumeCapacityNoClobber(object.off - @sizeOf(elf.ar_hdr), object.name); - } - - var symbols = std.AutoArrayHashMap(usize, std.ArrayList([]const u8)).init(ctx.gpa); + var symbols: std.AutoArrayHashMap(usize, std.ArrayList([]const u8)) = .init(ctx.gpa); defer { - for (symbols.values()) |*value| { - value.deinit(); - } + for (symbols.values()) |*value| value.deinit(); symbols.deinit(); } - for (ctx.symtab.items) |entry| { + for (ctx.symtab) |entry| { const gop = try symbols.getOrPut(@intCast(entry.off)); - if (!gop.found_existing) { - gop.value_ptr.* = std.ArrayList([]const u8).init(ctx.gpa); - } + if (!gop.found_existing) gop.value_ptr.* = .init(ctx.gpa); try gop.value_ptr.append(entry.name); } try bw.print("{s}\n", .{archive_symtab_label}); for (symbols.keys(), symbols.values()) |off, values| { - try bw.print("in object {s}\n", .{files.get(off).?}); - for (values.items) |value| { - try bw.print("{s}\n", .{value}); - } + try bw.print("in object {s}\n", .{ctx.objects.get(off).?.name}); + for (values.items) |value| try bw.print("{s}\n", .{value}); } } fn dumpObjects(ctx: ArchiveContext, step: *Step, check: Check, bw: *std.io.BufferedWriter) !void { - for (ctx.objects.items) |object| { + for (ctx.objects.values()) |object| { try bw.print("object {s}\n", .{object.name}); - const output = try parseAndDumpObject(step, check, ctx.data[object.off..][0..object.len]); + const output = try parseAndDumpObject(step, check, object.data); defer ctx.gpa.free(output); try bw.print("{s}\n", .{output}); } @@ -1903,7 +1839,7 @@ const ElfDumper = struct { fn getString(ctx: ArchiveContext, off: u32) []const u8 { assert(off < ctx.strtab.len); - const name = mem.sliceTo(@as([*:'\n']const u8, @ptrCast(ctx.strtab.ptr + off)), 0); + const name = mem.sliceTo(@as([*:'\n']const u8, @ptrCast(ctx.strtab[off..].ptr)), 0); return name[0 .. name.len - 1]; } @@ -1915,24 +1851,24 @@ const ElfDumper = struct { fn parseAndDumpObject(step: *Step, check: Check, bytes: []const u8) ![]const u8 { const gpa = step.owner.allocator; - var stream: std.io.FixedBufferStream = .{ .buffer = bytes }; - const reader = stream.reader(); + var br: std.io.BufferedReader = undefined; + br.initFixed(bytes); - const hdr = try reader.readStruct(elf.Elf64_Ehdr); - if (!mem.eql(u8, hdr.e_ident[0..4], "\x7fELF")) { - return error.InvalidMagicNumber; - } + const hdr = try br.takeStruct(elf.Elf64_Ehdr); + if (!mem.eql(u8, hdr.e_ident[0..4], "\x7fELF")) return error.InvalidMagicNumber; - const shdrs = @as([*]align(1) const elf.Elf64_Shdr, @ptrCast(bytes.ptr + hdr.e_shoff))[0..hdr.e_shnum]; - const phdrs = @as([*]align(1) const elf.Elf64_Phdr, @ptrCast(bytes.ptr + hdr.e_phoff))[0..hdr.e_phnum]; + const shdrs = @as([*]align(1) const elf.Elf64_Shdr, @ptrCast(bytes[hdr.e_shoff..].ptr))[0..hdr.e_shnum]; + const phdrs = @as([*]align(1) const elf.Elf64_Phdr, @ptrCast(bytes[hdr.e_phoff..].ptr))[0..hdr.e_phnum]; - var ctx = ObjectContext{ + var ctx: ObjectContext = .{ .gpa = gpa, .data = bytes, .hdr = hdr, .shdrs = shdrs, .phdrs = phdrs, .shstrtab = undefined, + .symtab = .{}, + .dysymtab = .{}, }; ctx.shstrtab = ctx.getSectionContents(ctx.hdr.e_shstrndx); @@ -1963,8 +1899,10 @@ const ElfDumper = struct { else => {}, }; - var output: std.io.AllocatingWriter = undefined; - const bw = output.init(gpa); + var aw: std.io.AllocatingWriter = undefined; + aw.init(gpa); + defer aw.deinit(); + const bw = &aw.buffered_writer; switch (check.kind) { .headers => { @@ -1986,7 +1924,7 @@ const ElfDumper = struct { } else return step.fail("no .dynamic section found", .{}), .dump_section => { - const name = mem.sliceTo(@as([*:0]const u8, @ptrCast(check.data.items.ptr + check.payload.dump_section)), 0); + const name = mem.sliceTo(@as([*:0]const u8, @ptrCast(check.data.items[check.payload.dump_section..].ptr)), 0); const shndx = ctx.getSectionByName(name) orelse return step.fail("no '{s}' section found", .{name}); try ctx.dumpSection(shndx, bw); }, @@ -1994,18 +1932,18 @@ const ElfDumper = struct { else => return step.fail("invalid check kind for ELF file format: {s}", .{@tagName(check.kind)}), } - return output.toOwnedSlice(); + return aw.toOwnedSlice(); } const ObjectContext = struct { gpa: Allocator, data: []const u8, - hdr: elf.Elf64_Ehdr, + hdr: *align(1) const elf.Elf64_Ehdr, shdrs: []align(1) const elf.Elf64_Shdr, phdrs: []align(1) const elf.Elf64_Phdr, shstrtab: []const u8, - symtab: Symtab = .{}, - dysymtab: Symtab = .{}, + symtab: Symtab, + dysymtab: Symtab, fn dumpHeader(ctx: ObjectContext, bw: *std.io.BufferedWriter) !void { try bw.writeAll("header\n"); @@ -2020,7 +1958,7 @@ const ElfDumper = struct { for (ctx.phdrs, 0..) |phdr, phndx| { try bw.print("phdr {d}\n", .{phndx}); - try bw.print("type {s}\n", .{fmtPhType(phdr.p_type)}); + try bw.print("type {f}\n", .{fmtPhType(phdr.p_type)}); try bw.print("vaddr {x}\n", .{phdr.p_vaddr}); try bw.print("paddr {x}\n", .{phdr.p_paddr}); try bw.print("offset {x}\n", .{phdr.p_offset}); @@ -2060,7 +1998,7 @@ const ElfDumper = struct { for (ctx.shdrs, 0..) |shdr, shndx| { try bw.print("shdr {d}\n", .{shndx}); try bw.print("name {s}\n", .{ctx.getSectionName(shndx)}); - try bw.print("type {s}\n", .{fmtShType(shdr.sh_type)}); + try bw.print("type {f}\n", .{fmtShType(shdr.sh_type)}); try bw.print("addr {x}\n", .{shdr.sh_addr}); try bw.print("offset {x}\n", .{shdr.sh_offset}); try bw.print("size {x}\n", .{shdr.sh_size}); @@ -2329,8 +2267,8 @@ const ElfDumper = struct { }; fn getString(strtab: []const u8, off: u32) []const u8 { - assert(off < strtab.len); - return mem.sliceTo(@as([*:0]const u8, @ptrCast(strtab.ptr + off)), 0); + const str = strtab[off..]; + return str[0..std.mem.indexOfScalar(u8, str, 0).?]; } fn fmtShType(sh_type: u32) std.fmt.Formatter(formatShType) { @@ -2339,12 +2277,10 @@ const ElfDumper = struct { fn formatShType( sh_type: u32, - comptime unused_fmt_string: []const u8, - options: std.fmt.FormatOptions, bw: *std.io.BufferedWriter, + comptime unused_fmt_string: []const u8, ) !void { _ = unused_fmt_string; - _ = options; const name = switch (sh_type) { elf.SHT_NULL => "NULL", elf.SHT_PROGBITS => "PROGBITS", @@ -2386,12 +2322,10 @@ const ElfDumper = struct { fn formatPhType( ph_type: u32, - comptime unused_fmt_string: []const u8, - options: std.fmt.FormatOptions, bw: *std.io.BufferedWriter, + comptime unused_fmt_string: []const u8, ) !void { _ = unused_fmt_string; - _ = options; const p_type = switch (ph_type) { elf.PT_NULL => "NULL", elf.PT_LOAD => "LOAD", @@ -2420,49 +2354,41 @@ const WasmDumper = struct { fn parseAndDump(step: *Step, check: Check, bytes: []const u8) ![]const u8 { const gpa = step.owner.allocator; - var fbs: std.io.FixedBufferStream = .{ .buffer = bytes }; - const reader = fbs.reader(); + var br: std.io.BufferedReader = undefined; + br.initFixed(bytes); - const buf = try reader.readBytesNoEof(8); - if (!mem.eql(u8, buf[0..4], &std.wasm.magic)) { - return error.InvalidMagicByte; - } - if (!mem.eql(u8, buf[4..], &std.wasm.version)) { - return error.UnsupportedWasmVersion; - } + const buf = try br.takeArray(8); + if (!mem.eql(u8, buf[0..4], &std.wasm.magic)) return error.InvalidMagicByte; + if (!mem.eql(u8, buf[4..8], &std.wasm.version)) return error.UnsupportedWasmVersion; - var output: std.io.AllocatingWriter = undefined; - const bw = output.init(gpa); - defer output.deinit(); - parseAndDumpInner(step, check, bytes, &fbs, bw) catch |err| switch (err) { + var aw: std.io.AllocatingWriter = undefined; + aw.init(gpa); + defer aw.deinit(); + const bw = &aw.buffered_writer; + + parseAndDumpInner(step, check, &br, bw) catch |err| switch (err) { error.EndOfStream => try bw.writeAll("\n"), else => |e| return e, }; - return output.toOwnedSlice(); + return aw.toOwnedSlice(); } fn parseAndDumpInner( step: *Step, check: Check, - bytes: []const u8, - fbs: *std.io.FixedBufferStream, + br: *std.io.BufferedReader, bw: *std.io.BufferedWriter, ) !void { - const reader = fbs.reader(); - + var section_br: std.io.BufferedReader = undefined; switch (check.kind) { - .headers => { - while (reader.readByte()) |current_byte| { - const section = std.enums.fromInt(std.wasm.Section, current_byte) orelse { - return step.fail("Found invalid section id '{d}'", .{current_byte}); - }; - - const section_length = try std.leb.readUleb128(u32, reader); - try parseAndDumpSection(step, section, bytes[fbs.pos..][0..section_length], bw); - fbs.pos += section_length; - } else |_| {} // reached end of stream + .headers => while (br.takeEnum(std.wasm.Section, .little)) |section| { + section_br.initFixed(try br.take(try br.takeLeb128(u32))); + try parseAndDumpSection(step, section, §ion_br, bw); + } else |err| switch (err) { + error.InvalidEnumTag => return step.fail("invalid section id", .{}), + error.EndOfStream => {}, + else => |e| return e, }, - else => return step.fail("invalid check kind for Wasm file format: {s}", .{@tagName(check.kind)}), } } @@ -2470,16 +2396,13 @@ const WasmDumper = struct { fn parseAndDumpSection( step: *Step, section: std.wasm.Section, - data: []const u8, + br: *std.io.BufferedReader, bw: *std.io.BufferedWriter, ) !void { - var fbs: std.io.FixedBufferStream = .{ .buffer = data }; - const reader = fbs.reader(); - try bw.print( \\Section {s} \\size {d} - , .{ @tagName(section), data.len }); + , .{ @tagName(section), br.storageBuffer().len }); switch (section) { .type, @@ -2493,74 +2416,65 @@ const WasmDumper = struct { .code, .data, => { - const entries = try std.leb.readUleb128(u32, reader); + const entries = try br.takeLeb128(u32); try bw.print("\nentries {d}\n", .{entries}); - try parseSection(step, section, data[fbs.pos..], entries, bw); + try parseSection(step, section, br, entries, bw); }, .custom => { - const name_length = try std.leb.readUleb128(u32, reader); - const name = data[fbs.pos..][0..name_length]; - fbs.pos += name_length; + const name = try br.take(try br.takeLeb128(u32)); try bw.print("\nname {s}\n", .{name}); if (mem.eql(u8, name, "name")) { - try parseDumpNames(step, reader, bw, data); + try parseDumpNames(step, br, bw); } else if (mem.eql(u8, name, "producers")) { - try parseDumpProducers(reader, bw, data); + try parseDumpProducers(br, bw); } else if (mem.eql(u8, name, "target_features")) { - try parseDumpFeatures(reader, bw, data); + try parseDumpFeatures(br, bw); } // TODO: Implement parsing and dumping other custom sections (such as relocations) }, .start => { - const start = try std.leb.readUleb128(u32, reader); + const start = try br.takeLeb128(u32); try bw.print("\nstart {d}\n", .{start}); }, .data_count => { - const count = try std.leb.readUleb128(u32, reader); + const count = try br.takeLeb128(u32); try bw.print("\ncount {d}\n", .{count}); }, else => {}, // skip unknown sections } } - fn parseSection(step: *Step, section: std.wasm.Section, data: []const u8, entries: u32, bw: *std.io.BufferedWriter) !void { - var fbs: std.io.FixedBufferStream = .{ .buffer = data }; - const reader = fbs.reader(); - + fn parseSection(step: *Step, section: std.wasm.Section, br: *std.io.BufferedReader, entries: u32, bw: *std.io.BufferedWriter) !void { switch (section) { .type => { var i: u32 = 0; while (i < entries) : (i += 1) { - const func_type = try reader.readByte(); + const func_type = try br.takeByte(); if (func_type != std.wasm.function_type) { return step.fail("expected function type, found byte '{d}'", .{func_type}); } - const params = try std.leb.readUleb128(u32, reader); + const params = try br.takeLeb128(u32); try bw.print("params {d}\n", .{params}); var index: u32 = 0; while (index < params) : (index += 1) { - _ = try parseDumpType(step, std.wasm.Valtype, reader, bw); + _ = try parseDumpType(step, std.wasm.Valtype, br, bw); } else index = 0; - const returns = try std.leb.readUleb128(u32, reader); + const returns = try br.takeLeb128(u32); try bw.print("returns {d}\n", .{returns}); while (index < returns) : (index += 1) { - _ = try parseDumpType(step, std.wasm.Valtype, reader, bw); + _ = try parseDumpType(step, std.wasm.Valtype, br, bw); } } }, .import => { var i: u32 = 0; while (i < entries) : (i += 1) { - const module_name_len = try std.leb.readUleb128(u32, reader); - const module_name = data[fbs.pos..][0..module_name_len]; - fbs.pos += module_name_len; - const name_len = try std.leb.readUleb128(u32, reader); - const name = data[fbs.pos..][0..name_len]; - fbs.pos += name_len; - - const kind = std.enums.fromInt(std.wasm.ExternalKind, try reader.readByte()) orelse { - return step.fail("invalid import kind", .{}); + const module_name = try br.take(try br.takeLeb128(u32)); + const name = try br.take(try br.takeLeb128(u32)); + const kind = br.takeEnum(std.wasm.ExternalKind, .little) catch |err| switch (err) { + error.InvalidEnumTag => return step.fail("invalid import kind", .{}), + else => |e| return e, }; try bw.print( @@ -2570,19 +2484,15 @@ const WasmDumper = struct { , .{ module_name, name, @tagName(kind) }); try bw.writeByte('\n'); switch (kind) { - .function => { - try bw.print("index {d}\n", .{try std.leb.readUleb128(u32, reader)}); - }, - .memory => { - try parseDumpLimits(reader, bw); - }, + .function => try bw.print("index {d}\n", .{try br.takeLeb128(u32)}), + .memory => try parseDumpLimits(br, bw), .global => { - _ = try parseDumpType(step, std.wasm.Valtype, reader, bw); - try bw.print("mutable {}\n", .{0x01 == try std.leb.readUleb128(u32, reader)}); + _ = try parseDumpType(step, std.wasm.Valtype, br, bw); + try bw.print("mutable {}\n", .{0x01 == try br.takeLeb128(u32)}); }, .table => { - _ = try parseDumpType(step, std.wasm.RefType, reader, bw); - try parseDumpLimits(reader, bw); + _ = try parseDumpType(step, std.wasm.RefType, br, bw); + try parseDumpLimits(br, bw); }, } } @@ -2590,41 +2500,39 @@ const WasmDumper = struct { .function => { var i: u32 = 0; while (i < entries) : (i += 1) { - try bw.print("index {d}\n", .{try std.leb.readUleb128(u32, reader)}); + try bw.print("index {d}\n", .{try br.takeLeb128(u32)}); } }, .table => { var i: u32 = 0; while (i < entries) : (i += 1) { - _ = try parseDumpType(step, std.wasm.RefType, reader, bw); - try parseDumpLimits(reader, bw); + _ = try parseDumpType(step, std.wasm.RefType, br, bw); + try parseDumpLimits(br, bw); } }, .memory => { var i: u32 = 0; while (i < entries) : (i += 1) { - try parseDumpLimits(reader, bw); + try parseDumpLimits(br, bw); } }, .global => { var i: u32 = 0; while (i < entries) : (i += 1) { - _ = try parseDumpType(step, std.wasm.Valtype, reader, bw); - try bw.print("mutable {}\n", .{0x01 == try std.leb.readUleb128(u1, reader)}); - try parseDumpInit(step, reader, bw); + _ = try parseDumpType(step, std.wasm.Valtype, br, bw); + try bw.print("mutable {}\n", .{0x01 == try br.takeLeb128(u1)}); + try parseDumpInit(step, br, bw); } }, .@"export" => { var i: u32 = 0; while (i < entries) : (i += 1) { - const name_len = try std.leb.readUleb128(u32, reader); - const name = data[fbs.pos..][0..name_len]; - fbs.pos += name_len; - const kind_byte = try std.leb.readUleb128(u8, reader); - const kind = std.enums.fromInt(std.wasm.ExternalKind, kind_byte) orelse { - return step.fail("invalid export kind value '{d}'", .{kind_byte}); + const name = try br.take(try br.takeLeb128(u32)); + const kind = br.takeEnum(std.wasm.ExternalKind, .little) catch |err| switch (err) { + error.InvalidEnumTag => return step.fail("invalid export kind value", .{}), + else => |e| return e, }; - const index = try std.leb.readUleb128(u32, reader); + const index = try br.takeLeb128(u32); try bw.print( \\name {s} \\kind {s} @@ -2636,14 +2544,14 @@ const WasmDumper = struct { .element => { var i: u32 = 0; while (i < entries) : (i += 1) { - try bw.print("table index {d}\n", .{try std.leb.readUleb128(u32, reader)}); - try parseDumpInit(step, reader, bw); + try bw.print("table index {d}\n", .{try br.takeLeb128(u32)}); + try parseDumpInit(step, br, bw); - const function_indexes = try std.leb.readUleb128(u32, reader); + const function_indexes = try br.takeLeb128(u32); var function_index: u32 = 0; try bw.print("indexes {d}\n", .{function_indexes}); while (function_index < function_indexes) : (function_index += 1) { - try bw.print("index {d}\n", .{try std.leb.readUleb128(u32, reader)}); + try bw.print("index {d}\n", .{try br.takeLeb128(u32)}); } } }, @@ -2651,101 +2559,95 @@ const WasmDumper = struct { .data => { var i: u32 = 0; while (i < entries) : (i += 1) { - const flags = try std.leb.readUleb128(u32, reader); - const index = if (flags & 0x02 != 0) - try std.leb.readUleb128(u32, reader) - else - 0; + const flags: packed struct(u32) { + passive: bool, + memidx: bool, + unused: u30, + } = @bitCast(try br.takeLeb128(u32)); + const index = if (flags.memidx) try br.takeLeb128(u32) else 0; try bw.print("memory index 0x{x}\n", .{index}); - if (flags == 0) { - try parseDumpInit(step, reader, bw); - } - - const size = try std.leb.readUleb128(u32, reader); + if (!flags.passive) try parseDumpInit(step, br, bw); + const size = try br.takeLeb128(u32); try bw.print("size {d}\n", .{size}); - try reader.skipBytes(size, .{}); // we do not care about the content of the segments + try br.discard(size); // we do not care about the content of the segments } }, else => unreachable, } } - fn parseDumpType(step: *Step, comptime E: type, reader: anytype, bw: *std.io.BufferedWriter) !E { - const byte = try reader.readByte(); - const tag = std.enums.fromInt(E, byte) orelse { - return step.fail("invalid wasm type value '{d}'", .{byte}); + fn parseDumpType(step: *Step, comptime E: type, br: *std.io.BufferedReader, bw: *std.io.BufferedWriter) !E { + const tag = br.takeEnum(E, .little) catch |err| switch (err) { + error.InvalidEnumTag => return step.fail("invalid wasm type value", .{}), + else => |e| return e, }; try bw.print("type {s}\n", .{@tagName(tag)}); return tag; } - fn parseDumpLimits(reader: anytype, bw: *std.io.BufferedWriter) !void { - const flags = try std.leb.readUleb128(u8, reader); - const min = try std.leb.readUleb128(u32, reader); + fn parseDumpLimits(br: *std.io.BufferedReader, bw: *std.io.BufferedWriter) !void { + const flags = try br.takeLeb128(u8); + const min = try br.takeLeb128(u32); try bw.print("min {x}\n", .{min}); - if (flags != 0) { - try bw.print("max {x}\n", .{try std.leb.readUleb128(u32, reader)}); - } + if (flags != 0) try bw.print("max {x}\n", .{try br.takeLeb128(u32)}); } - fn parseDumpInit(step: *Step, reader: anytype, bw: *std.io.BufferedWriter) !void { - const byte = try reader.readByte(); - const opcode = std.enums.fromInt(std.wasm.Opcode, byte) orelse { - return step.fail("invalid wasm opcode '{d}'", .{byte}); + fn parseDumpInit(step: *Step, br: *std.io.BufferedReader, bw: *std.io.BufferedWriter) !void { + const opcode = br.takeEnum(std.wasm.Opcode, .little) catch |err| switch (err) { + error.InvalidEnumTag => return step.fail("invalid wasm opcode", .{}), + else => |e| return e, }; switch (opcode) { - .i32_const => try bw.print("i32.const {x}\n", .{try std.leb.readIleb128(i32, reader)}), - .i64_const => try bw.print("i64.const {x}\n", .{try std.leb.readIleb128(i64, reader)}), - .f32_const => try bw.print("f32.const {x}\n", .{@as(f32, @bitCast(try reader.readInt(u32, .little)))}), - .f64_const => try bw.print("f64.const {x}\n", .{@as(f64, @bitCast(try reader.readInt(u64, .little)))}), - .global_get => try bw.print("global.get {x}\n", .{try std.leb.readUleb128(u32, reader)}), + .i32_const => try bw.print("i32.const {x}\n", .{try br.takeLeb128(i32)}), + .i64_const => try bw.print("i64.const {x}\n", .{try br.takeLeb128(i64)}), + .f32_const => try bw.print("f32.const {x}\n", .{@as(f32, @bitCast(try br.takeInt(u32, .little)))}), + .f64_const => try bw.print("f64.const {x}\n", .{@as(f64, @bitCast(try br.takeInt(u64, .little)))}), + .global_get => try bw.print("global.get {x}\n", .{try br.takeLeb128(u32)}), else => unreachable, } - const end_opcode = try std.leb.readUleb128(u8, reader); + const end_opcode = try br.takeLeb128(u8); if (end_opcode != @intFromEnum(std.wasm.Opcode.end)) { return step.fail("expected 'end' opcode in init expression", .{}); } } /// https://webassembly.github.io/spec/core/appendix/custom.html - fn parseDumpNames(step: *Step, reader: anytype, bw: *std.io.BufferedWriter, data: []const u8) !void { - while (reader.context.pos < data.len) { - switch (try parseDumpType(step, std.wasm.NameSubsection, reader, bw)) { + fn parseDumpNames(step: *Step, br: *std.io.BufferedReader, bw: *std.io.BufferedWriter) !void { + var subsection_br: std.io.BufferedReader = undefined; + while (br.seek < br.storageBuffer().len) { + switch (try parseDumpType(step, std.wasm.NameSubsection, br, bw)) { // The module name subsection ... consists of a single name // that is assigned to the module itself. .module => { - const size = try std.leb.readUleb128(u32, reader); - const name_len = try std.leb.readUleb128(u32, reader); - if (size != name_len + 1) return error.BadSubsectionSize; - if (reader.context.pos + name_len > data.len) return error.UnexpectedEndOfStream; - try bw.print("name {s}\n", .{data[reader.context.pos..][0..name_len]}); - reader.context.pos += name_len; + subsection_br.initFixed(try br.take(try br.takeLeb128(u32))); + const name = try subsection_br.take(try subsection_br.takeLeb128(u32)); + try bw.print( + \\name {s} + \\ + , .{name}); + if (subsection_br.seek != subsection_br.storageBuffer().len) return error.BadSubsectionSize; }, // The function name subsection ... consists of a name map // assigning function names to function indices. .function, .global, .data_segment => { - const size = try std.leb.readUleb128(u32, reader); - const entries = try std.leb.readUleb128(u32, reader); + subsection_br.initFixed(try br.take(try br.takeLeb128(u32))); + const entries = try br.takeLeb128(u32); try bw.print( - \\size {d} \\names {d} \\ - , .{ size, entries }); + , .{entries}); for (0..entries) |_| { - const index = try std.leb.readUleb128(u32, reader); - const name_len = try std.leb.readUleb128(u32, reader); - if (reader.context.pos + name_len > data.len) return error.UnexpectedEndOfStream; - const name = data[reader.context.pos..][0..name_len]; - reader.context.pos += name.len; - + const index = try br.takeLeb128(u32); + const name = try br.take(try br.takeLeb128(u32)); try bw.print( \\index {d} \\name {s} \\ , .{ index, name }); } + if (subsection_br.seek != subsection_br.storageBuffer().len) return error.BadSubsectionSize; }, // The local name subsection ... consists of an indirect name @@ -2760,52 +2662,49 @@ const WasmDumper = struct { } } - fn parseDumpProducers(reader: anytype, bw: *std.io.BufferedWriter, data: []const u8) !void { - const field_count = try std.leb.readUleb128(u32, reader); - try bw.print("fields {d}\n", .{field_count}); + fn parseDumpProducers(br: *std.io.BufferedReader, bw: *std.io.BufferedWriter) !void { + const field_count = try br.takeLeb128(u32); + try bw.print( + \\fields {d} + \\ + , .{field_count}); var current_field: u32 = 0; while (current_field < field_count) : (current_field += 1) { - const field_name_length = try std.leb.readUleb128(u32, reader); - const field_name = data[reader.context.pos..][0..field_name_length]; - reader.context.pos += field_name_length; - - const value_count = try std.leb.readUleb128(u32, reader); + const field_name = try br.take(try br.takeLeb128(u32)); + const value_count = try br.takeLeb128(u32); try bw.print( \\field_name {s} \\values {d} + \\ , .{ field_name, value_count }); - try bw.writeByte('\n'); var current_value: u32 = 0; while (current_value < value_count) : (current_value += 1) { - const value_length = try std.leb.readUleb128(u32, reader); - const value = data[reader.context.pos..][0..value_length]; - reader.context.pos += value_length; - - const version_length = try std.leb.readUleb128(u32, reader); - const version = data[reader.context.pos..][0..version_length]; - reader.context.pos += version_length; - + const value = try br.take(try br.takeLeb128(u32)); + const version = try br.take(try br.takeLeb128(u32)); try bw.print( \\value_name {s} \\version {s} + \\ , .{ value, version }); - try bw.writeByte('\n'); } } } - fn parseDumpFeatures(reader: anytype, bw: *std.io.BufferedWriter, data: []const u8) !void { - const feature_count = try std.leb.readUleb128(u32, reader); - try bw.print("features {d}\n", .{feature_count}); + fn parseDumpFeatures(br: *std.io.BufferedReader, bw: *std.io.BufferedWriter) !void { + const feature_count = try br.takeLeb128(u32); + try bw.print( + \\features {d} + \\ + , .{feature_count}); var index: u32 = 0; while (index < feature_count) : (index += 1) { - const prefix_byte = try std.leb.readUleb128(u8, reader); - const name_length = try std.leb.readUleb128(u32, reader); - const feature_name = data[reader.context.pos..][0..name_length]; - reader.context.pos += name_length; - - try bw.print("{c} {s}\n", .{ prefix_byte, feature_name }); + const prefix_byte = try br.takeLeb128(u8); + const feature_name = try br.take(try br.takeLeb128(u32)); + try bw.print( + \\{c} {s} + \\ + , .{ prefix_byte, feature_name }); } } }; diff --git a/lib/std/Build/Step/Compile.zig b/lib/std/Build/Step/Compile.zig index 3705b3ebe2..b5bdc5856d 100644 --- a/lib/std/Build/Step/Compile.zig +++ b/lib/std/Build/Step/Compile.zig @@ -409,7 +409,7 @@ pub fn create(owner: *std.Build, options: Options) *Compile { .linkage = options.linkage, .kind = options.kind, .name = name, - .step = Step.init(.{ + .step = .init(.{ .id = base_id, .name = step_name, .owner = owner, @@ -1542,7 +1542,7 @@ fn getZigArgs(compile: *Compile, fuzz: bool) ![][]const u8 { if (compile.kind == .lib and compile.linkage != null and compile.linkage.? == .dynamic) { if (compile.version) |version| { try zig_args.append("--version"); - try zig_args.append(b.fmt("{}", .{version})); + try zig_args.append(b.fmt("{f}", .{version})); } if (compile.rootModuleTarget().os.tag.isDarwin()) { @@ -1704,7 +1704,7 @@ fn getZigArgs(compile: *Compile, fuzz: bool) ![][]const u8 { const opt_zig_lib_dir = if (compile.zig_lib_dir) |dir| dir.getPath2(b, step) else if (b.graph.zig_lib_directory.path) |_| - b.fmt("{}", .{b.graph.zig_lib_directory}) + b.fmt("{f}", .{b.graph.zig_lib_directory}) else null; @@ -1830,7 +1830,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { // Update generated files if (maybe_output_dir) |output_dir| { if (compile.emit_directory) |lp| { - lp.path = b.fmt("{}", .{output_dir}); + lp.path = b.fmt("{f}", .{output_dir}); } // zig fmt: off @@ -1970,13 +1970,13 @@ fn checkCompileErrors(compile: *Compile) !void { const actual_errors = ae: { var aw: std.io.AllocatingWriter = undefined; - const bw = aw.init(arena); + aw.init(arena); defer aw.deinit(); try actual_eb.renderToWriter(.{ .ttyconf = .no_color, .include_reference_trace = false, .include_source_line = false, - }, bw); + }, &aw.buffered_writer); break :ae try aw.toOwnedSlice(); }; diff --git a/lib/std/Build/Step/ConfigHeader.zig b/lib/std/Build/Step/ConfigHeader.zig index 5254c16235..e1ffb6ae1d 100644 --- a/lib/std/Build/Step/ConfigHeader.zig +++ b/lib/std/Build/Step/ConfigHeader.zig @@ -87,7 +87,7 @@ pub fn create(owner: *std.Build, options: Options) *ConfigHeader { owner.fmt("configure {s} header to {s}", .{ @tagName(options.style), include_path }); config_header.* = .{ - .step = Step.init(.{ + .step = .init(.{ .id = base_id, .name = name, .owner = owner, @@ -95,7 +95,7 @@ pub fn create(owner: *std.Build, options: Options) *ConfigHeader { .first_ret_addr = options.first_ret_addr orelse @returnAddress(), }), .style = options.style, - .values = std.StringArrayHashMap(Value).init(owner.allocator), + .values = .init(owner.allocator), .max_bytes = options.max_bytes, .include_path = include_path, @@ -195,8 +195,10 @@ fn make(step: *Step, options: Step.MakeOptions) !void { man.hash.addBytes(config_header.include_path); man.hash.addOptionalBytes(config_header.include_guard_override); - var output = std.ArrayList(u8).init(gpa); - defer output.deinit(); + var aw: std.io.AllocatingWriter = undefined; + aw.init(gpa); + defer aw.deinit(); + const bw = &aw.buffered_writer; const header_text = "This file was generated by ConfigHeader using the Zig Build System."; const c_generated_line = "/* " ++ header_text ++ " */\n"; @@ -204,40 +206,41 @@ fn make(step: *Step, options: Step.MakeOptions) !void { switch (config_header.style) { .autoconf_undef, .autoconf, .autoconf_at => |file_source| { - try output.appendSlice(c_generated_line); + try bw.writeAll(c_generated_line); const src_path = file_source.getPath2(b, step); - const contents = std.fs.cwd().readFileAlloc(arena, src_path, config_header.max_bytes) catch |err| { + const contents = std.fs.cwd().readFileAlloc(src_path, arena, .limited(config_header.max_bytes)) catch |err| { return step.fail("unable to read autoconf input file '{s}': {s}", .{ src_path, @errorName(err), }); }; switch (config_header.style) { - .autoconf_undef, .autoconf => try render_autoconf_undef(step, contents, &output, config_header.values, src_path), - .autoconf_at => try render_autoconf_at(step, contents, &output, config_header.values, src_path), + .autoconf_undef, .autoconf => try render_autoconf_undef(step, contents, bw, config_header.values, src_path), + .autoconf_at => try render_autoconf_at(step, contents, &aw, config_header.values, src_path), else => unreachable, } }, .cmake => |file_source| { - try output.appendSlice(c_generated_line); + try bw.writeAll(c_generated_line); const src_path = file_source.getPath2(b, step); - const contents = std.fs.cwd().readFileAlloc(arena, src_path, config_header.max_bytes) catch |err| { + const contents = std.fs.cwd().readFileAlloc(src_path, arena, .limited(config_header.max_bytes)) catch |err| { return step.fail("unable to read cmake input file '{s}': {s}", .{ src_path, @errorName(err), }); }; - try render_cmake(step, contents, &output, config_header.values, src_path); + try render_cmake(step, contents, bw, config_header.values, src_path); }, .blank => { - try output.appendSlice(c_generated_line); - try render_blank(&output, config_header.values, config_header.include_path, config_header.include_guard_override); + try bw.writeAll(c_generated_line); + try render_blank(gpa, bw, config_header.values, config_header.include_path, config_header.include_guard_override); }, .nasm => { - try output.appendSlice(asm_generated_line); - try render_nasm(&output, config_header.values); + try bw.writeAll(asm_generated_line); + try render_nasm(bw, config_header.values); }, } - man.hash.addBytes(output.items); + const output = aw.getWritten(); + man.hash.addBytes(output); if (try step.cacheHit(&man)) { const digest = man.final(); @@ -256,13 +259,13 @@ fn make(step: *Step, options: Step.MakeOptions) !void { const sub_path_dirname = std.fs.path.dirname(sub_path).?; b.cache_root.handle.makePath(sub_path_dirname) catch |err| { - return step.fail("unable to make path '{}{s}': {s}", .{ + return step.fail("unable to make path '{f}{s}': {s}", .{ b.cache_root, sub_path_dirname, @errorName(err), }); }; - b.cache_root.handle.writeFile(.{ .sub_path = sub_path, .data = output.items }) catch |err| { - return step.fail("unable to write file '{}{s}': {s}", .{ + b.cache_root.handle.writeFile(.{ .sub_path = sub_path, .data = output }) catch |err| { + return step.fail("unable to write file '{f}{s}': {s}", .{ b.cache_root, sub_path, @errorName(err), }); }; @@ -274,7 +277,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { fn render_autoconf_undef( step: *Step, contents: []const u8, - output: *std.ArrayList(u8), + bw: *std.io.BufferedWriter, values: std.StringArrayHashMap(Value), src_path: []const u8, ) !void { @@ -289,15 +292,15 @@ fn render_autoconf_undef( var line_it = std.mem.splitScalar(u8, contents, '\n'); while (line_it.next()) |line| : (line_index += 1) { if (!std.mem.startsWith(u8, line, "#")) { - try output.appendSlice(line); - try output.appendSlice("\n"); + try bw.writeAll(line); + try bw.writeByte('\n'); continue; } var it = std.mem.tokenizeAny(u8, line[1..], " \t\r"); const undef = it.next().?; if (!std.mem.eql(u8, undef, "undef")) { - try output.appendSlice(line); - try output.appendSlice("\n"); + try bw.writeAll(line); + try bw.writeByte('\n'); continue; } const name = it.next().?; @@ -309,7 +312,7 @@ fn render_autoconf_undef( continue; }; is_used.set(index); - try renderValueC(output, name, values.values()[index]); + try renderValueC(bw, name, values.values()[index]); } var unused_value_it = is_used.iterator(.{ .kind = .unset }); @@ -326,12 +329,13 @@ fn render_autoconf_undef( fn render_autoconf_at( step: *Step, contents: []const u8, - output: *std.ArrayList(u8), + aw: *std.io.AllocatingWriter, values: std.StringArrayHashMap(Value), src_path: []const u8, ) !void { const build = step.owner; const allocator = build.allocator; + const bw = &aw.buffered_writer; const used = allocator.alloc(bool, values.count()) catch @panic("OOM"); for (used) |*u| u.* = false; @@ -343,11 +347,11 @@ fn render_autoconf_at( while (line_it.next()) |line| : (line_index += 1) { const last_line = line_it.index == line_it.buffer.len; - const old_len = output.items.len; - expand_variables_autoconf_at(output, line, values, used) catch |err| switch (err) { + const old_len = aw.getWritten().len; + expand_variables_autoconf_at(bw, line, values, used) catch |err| switch (err) { error.MissingValue => { - const name = output.items[old_len..]; - defer output.shrinkRetainingCapacity(old_len); + const name = aw.getWritten()[old_len..]; + defer aw.shrinkRetainingCapacity(old_len); try step.addError("{s}:{d}: error: unspecified config header value: '{s}'", .{ src_path, line_index + 1, name, }); @@ -362,9 +366,7 @@ fn render_autoconf_at( continue; }, }; - if (!last_line) { - try output.append('\n'); - } + if (!last_line) try bw.writeByte('\n'); } for (values.unmanaged.entries.slice().items(.key), used) |name, u| { @@ -374,15 +376,13 @@ fn render_autoconf_at( } } - if (any_errors) { - return error.MakeFailed; - } + if (any_errors) return error.MakeFailed; } fn render_cmake( step: *Step, contents: []const u8, - output: *std.ArrayList(u8), + bw: *std.io.BufferedWriter, values: std.StringArrayHashMap(Value), src_path: []const u8, ) !void { @@ -417,10 +417,8 @@ fn render_cmake( defer allocator.free(line); if (!std.mem.startsWith(u8, line, "#")) { - try output.appendSlice(line); - if (!last_line) { - try output.appendSlice("\n"); - } + try bw.writeAll(line); + if (!last_line) try bw.writeByte('\n'); continue; } var it = std.mem.tokenizeAny(u8, line[1..], " \t\r"); @@ -428,10 +426,8 @@ fn render_cmake( if (!std.mem.eql(u8, cmakedefine, "cmakedefine") and !std.mem.eql(u8, cmakedefine, "cmakedefine01")) { - try output.appendSlice(line); - if (!last_line) { - try output.appendSlice("\n"); - } + try bw.writeAll(line); + if (!last_line) try bw.writeByte('\n'); continue; } @@ -502,7 +498,7 @@ fn render_cmake( value = Value{ .ident = it.rest() }; } - try renderValueC(output, name, value); + try renderValueC(bw, name, value); } if (any_errors) { @@ -511,13 +507,14 @@ fn render_cmake( } fn render_blank( - output: *std.ArrayList(u8), + gpa: std.mem.Allocator, + bw: *std.io.BufferedWriter, defines: std.StringArrayHashMap(Value), include_path: []const u8, include_guard_override: ?[]const u8, ) !void { const include_guard_name = include_guard_override orelse blk: { - const name = try output.allocator.dupe(u8, include_path); + const name = try gpa.dupe(u8, include_path); for (name) |*byte| { switch (byte.*) { 'a'...'z' => byte.* = byte.* - 'a' + 'A', @@ -527,92 +524,53 @@ fn render_blank( } break :blk name; }; + defer if (include_guard_override == null) gpa.free(include_guard_name); - try output.appendSlice("#ifndef "); - try output.appendSlice(include_guard_name); - try output.appendSlice("\n#define "); - try output.appendSlice(include_guard_name); - try output.appendSlice("\n"); + try bw.print( + \\#ifndef {[0]s} + \\#define {[0]s} + \\ + , .{include_guard_name}); const values = defines.values(); - for (defines.keys(), 0..) |name, i| { - try renderValueC(output, name, values[i]); - } + for (defines.keys(), 0..) |name, i| try renderValueC(bw, name, values[i]); - try output.appendSlice("#endif /* "); - try output.appendSlice(include_guard_name); - try output.appendSlice(" */\n"); + try bw.print( + \\#endif /* {s} */ + \\ + , .{include_guard_name}); } -fn render_nasm(output: *std.ArrayList(u8), defines: std.StringArrayHashMap(Value)) !void { - const values = defines.values(); - for (defines.keys(), 0..) |name, i| { - try renderValueNasm(output, name, values[i]); - } +fn render_nasm(bw: *std.io.BufferedWriter, defines: std.StringArrayHashMap(Value)) !void { + for (defines.keys(), defines.values()) |name, value| try renderValueNasm(bw, name, value); } -fn renderValueC(output: *std.ArrayList(u8), name: []const u8, value: Value) !void { +fn renderValueC(bw: *std.io.BufferedWriter, name: []const u8, value: Value) !void { switch (value) { - .undef => { - try output.appendSlice("/* #undef "); - try output.appendSlice(name); - try output.appendSlice(" */\n"); - }, - .defined => { - try output.appendSlice("#define "); - try output.appendSlice(name); - try output.appendSlice("\n"); - }, - .boolean => |b| { - try output.appendSlice("#define "); - try output.appendSlice(name); - try output.appendSlice(if (b) " 1\n" else " 0\n"); - }, - .int => |i| { - try output.print("#define {s} {d}\n", .{ name, i }); - }, - .ident => |ident| { - try output.print("#define {s} {s}\n", .{ name, ident }); - }, - .string => |string| { - // TODO: use C-specific escaping instead of zig string literals - try output.print("#define {s} \"{}\"\n", .{ name, std.zig.fmtEscapes(string) }); - }, + .undef => try bw.print("/* #undef {s} */\n", .{name}), + .defined => try bw.print("#define {s}\n", .{name}), + .boolean => |b| try bw.print("#define {s} {c}\n", .{ name, @as(u8, '0') + @intFromBool(b) }), + .int => |i| try bw.print("#define {s} {d}\n", .{ name, i }), + .ident => |ident| try bw.print("#define {s} {s}\n", .{ name, ident }), + // TODO: use C-specific escaping instead of zig string literals + .string => |string| try bw.print("#define {s} \"{f}\"\n", .{ name, std.zig.fmtEscapes(string) }), } } -fn renderValueNasm(output: *std.ArrayList(u8), name: []const u8, value: Value) !void { +fn renderValueNasm(bw: *std.io.BufferedWriter, name: []const u8, value: Value) !void { switch (value) { - .undef => { - try output.appendSlice("; %undef "); - try output.appendSlice(name); - try output.appendSlice("\n"); - }, - .defined => { - try output.appendSlice("%define "); - try output.appendSlice(name); - try output.appendSlice("\n"); - }, - .boolean => |b| { - try output.appendSlice("%define "); - try output.appendSlice(name); - try output.appendSlice(if (b) " 1\n" else " 0\n"); - }, - .int => |i| { - try output.print("%define {s} {d}\n", .{ name, i }); - }, - .ident => |ident| { - try output.print("%define {s} {s}\n", .{ name, ident }); - }, - .string => |string| { - // TODO: use nasm-specific escaping instead of zig string literals - try output.print("%define {s} \"{}\"\n", .{ name, std.zig.fmtEscapes(string) }); - }, + .undef => try bw.print("; %undef {s}\n", .{name}), + .defined => try bw.print("%define {s}\n", .{name}), + .boolean => |b| try bw.print("%define {s} {c}\n", .{ name, @as(u8, '0') + @intFromBool(b) }), + .int => |i| try bw.print("%define {s} {d}\n", .{ name, i }), + .ident => |ident| try bw.print("%define {s} {s}\n", .{ name, ident }), + // TODO: use nasm-specific escaping instead of zig string literals + .string => |string| try bw.print("%define {s} \"{f}\"\n", .{ name, std.zig.fmtEscapes(string) }), } } fn expand_variables_autoconf_at( - output: *std.ArrayList(u8), + bw: *std.io.BufferedWriter, contents: []const u8, values: std.StringArrayHashMap(Value), used: []bool, @@ -637,23 +595,17 @@ fn expand_variables_autoconf_at( const key = contents[curr + 1 .. close_pos]; const index = values.getIndex(key) orelse { // Report the missing key to the caller. - try output.appendSlice(key); + try bw.writeAll(key); return error.MissingValue; }; const value = values.unmanaged.entries.slice().items(.value)[index]; used[index] = true; - try output.appendSlice(contents[source_offset..curr]); + try bw.writeAll(contents[source_offset..curr]); switch (value) { .undef, .defined => {}, - .boolean => |b| { - try output.append(if (b) '1' else '0'); - }, - .int => |i| { - try output.writer().print("{d}", .{i}); - }, - .ident, .string => |s| { - try output.appendSlice(s); - }, + .boolean => |b| try bw.writeByte(@as(u8, '0') + @intFromBool(b)), + .int => |i| try bw.print("{d}", .{i}), + .ident, .string => |s| try bw.writeAll(s), } curr = close_pos; @@ -661,7 +613,7 @@ fn expand_variables_autoconf_at( } } - try output.appendSlice(contents[source_offset..]); + try bw.writeAll(contents[source_offset..]); } fn expand_variables_cmake( @@ -669,7 +621,7 @@ fn expand_variables_cmake( contents: []const u8, values: std.StringArrayHashMap(Value), ) ![]const u8 { - var result = std.ArrayList(u8).init(allocator); + var result: std.ArrayList(u8) = .init(allocator); errdefer result.deinit(); const valid_varname_chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789/_.+-"; @@ -681,7 +633,7 @@ fn expand_variables_cmake( source: usize, target: usize, }; - var var_stack = std.ArrayList(Position).init(allocator); + var var_stack: std.ArrayList(Position) = .init(allocator); defer var_stack.deinit(); loop: while (curr < contents.len) : (curr += 1) { switch (contents[curr]) { @@ -801,7 +753,7 @@ fn testReplaceVariablesAutoconfAt( expected: []const u8, values: std.StringArrayHashMap(Value), ) !void { - var output = std.ArrayList(u8).init(allocator); + var output: std.ArrayList(u8) = .init(allocator); defer output.deinit(); const used = try allocator.alloc(bool, values.count()); @@ -828,7 +780,7 @@ fn testReplaceVariablesCMake( test "expand_variables_autoconf_at simple cases" { const allocator = std.testing.allocator; - var values = std.StringArrayHashMap(Value).init(allocator); + var values: std.StringArrayHashMap(Value) = .init(allocator); defer values.deinit(); // empty strings are preserved @@ -924,7 +876,7 @@ test "expand_variables_autoconf_at simple cases" { test "expand_variables_autoconf_at edge cases" { const allocator = std.testing.allocator; - var values = std.StringArrayHashMap(Value).init(allocator); + var values: std.StringArrayHashMap(Value) = .init(allocator); defer values.deinit(); // @-vars resolved only when they wrap valid characters, otherwise considered literals @@ -940,7 +892,7 @@ test "expand_variables_autoconf_at edge cases" { test "expand_variables_cmake simple cases" { const allocator = std.testing.allocator; - var values = std.StringArrayHashMap(Value).init(allocator); + var values: std.StringArrayHashMap(Value) = .init(allocator); defer values.deinit(); try values.putNoClobber("undef", .undef); @@ -1028,7 +980,7 @@ test "expand_variables_cmake simple cases" { test "expand_variables_cmake edge cases" { const allocator = std.testing.allocator; - var values = std.StringArrayHashMap(Value).init(allocator); + var values: std.StringArrayHashMap(Value) = .init(allocator); defer values.deinit(); // special symbols @@ -1089,7 +1041,7 @@ test "expand_variables_cmake edge cases" { test "expand_variables_cmake escaped characters" { const allocator = std.testing.allocator; - var values = std.StringArrayHashMap(Value).init(allocator); + var values: std.StringArrayHashMap(Value) = .init(allocator); defer values.deinit(); try values.putNoClobber("string", Value{ .string = "text" }); diff --git a/lib/std/Build/Step/Fail.zig b/lib/std/Build/Step/Fail.zig index 9236c2ac7b..2394340148 100644 --- a/lib/std/Build/Step/Fail.zig +++ b/lib/std/Build/Step/Fail.zig @@ -12,7 +12,7 @@ pub fn create(owner: *std.Build, error_msg: []const u8) *Fail { const fail = owner.allocator.create(Fail) catch @panic("OOM"); fail.* = .{ - .step = Step.init(.{ + .step = .init(.{ .id = base_id, .name = "fail", .owner = owner, diff --git a/lib/std/Build/Step/Fmt.zig b/lib/std/Build/Step/Fmt.zig index a364dfa6f4..f3595bcb79 100644 --- a/lib/std/Build/Step/Fmt.zig +++ b/lib/std/Build/Step/Fmt.zig @@ -23,7 +23,7 @@ pub fn create(owner: *std.Build, options: Options) *Fmt { const fmt = owner.allocator.create(Fmt) catch @panic("OOM"); const name = if (options.check) "zig fmt --check" else "zig fmt"; fmt.* = .{ - .step = Step.init(.{ + .step = .init(.{ .id = base_id, .name = name, .owner = owner, diff --git a/lib/std/Build/Step/InstallArtifact.zig b/lib/std/Build/Step/InstallArtifact.zig index 6a5b834cae..d0131baa8d 100644 --- a/lib/std/Build/Step/InstallArtifact.zig +++ b/lib/std/Build/Step/InstallArtifact.zig @@ -63,7 +63,7 @@ pub fn create(owner: *std.Build, artifact: *Step.Compile, options: Options) *Ins .override => |o| o, }; install_artifact.* = .{ - .step = Step.init(.{ + .step = .init(.{ .id = base_id, .name = owner.fmt("install {s}", .{artifact.name}), .owner = owner, @@ -164,7 +164,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { const full_h_prefix = b.getInstallPath(h_dir, dir.dest_rel_path); var src_dir = src_dir_path.root_dir.handle.openDir(src_dir_path.subPathOrDot(), .{ .iterate = true }) catch |err| { - return step.fail("unable to open source directory '{}': {s}", .{ + return step.fail("unable to open source directory '{f}': {s}", .{ src_dir_path, @errorName(err), }); }; diff --git a/lib/std/Build/Step/InstallDir.zig b/lib/std/Build/Step/InstallDir.zig index ece1184d8f..8f9fa61704 100644 --- a/lib/std/Build/Step/InstallDir.zig +++ b/lib/std/Build/Step/InstallDir.zig @@ -43,7 +43,7 @@ pub const Options = struct { pub fn create(owner: *std.Build, options: Options) *InstallDir { const install_dir = owner.allocator.create(InstallDir) catch @panic("OOM"); install_dir.* = .{ - .step = Step.init(.{ + .step = .init(.{ .id = base_id, .name = owner.fmt("install {s}/", .{options.source_dir.getDisplayName()}), .owner = owner, @@ -65,7 +65,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { const src_dir_path = install_dir.options.source_dir.getPath3(b, step); const need_derived_inputs = try step.addDirectoryWatchInput(install_dir.options.source_dir); var src_dir = src_dir_path.root_dir.handle.openDir(src_dir_path.subPathOrDot(), .{ .iterate = true }) catch |err| { - return step.fail("unable to open source directory '{}': {s}", .{ + return step.fail("unable to open source directory '{f}': {s}", .{ src_dir_path, @errorName(err), }); }; diff --git a/lib/std/Build/Step/InstallFile.zig b/lib/std/Build/Step/InstallFile.zig index 10adb4754d..833adc6908 100644 --- a/lib/std/Build/Step/InstallFile.zig +++ b/lib/std/Build/Step/InstallFile.zig @@ -21,7 +21,7 @@ pub fn create( assert(dest_rel_path.len != 0); const install_file = owner.allocator.create(InstallFile) catch @panic("OOM"); install_file.* = .{ - .step = Step.init(.{ + .step = .init(.{ .id = base_id, .name = owner.fmt("install {s} to {s}", .{ source.getDisplayName(), dest_rel_path }), .owner = owner, diff --git a/lib/std/Build/Step/ObjCopy.zig b/lib/std/Build/Step/ObjCopy.zig index 74f871d2fc..5893a1585b 100644 --- a/lib/std/Build/Step/ObjCopy.zig +++ b/lib/std/Build/Step/ObjCopy.zig @@ -111,8 +111,8 @@ pub fn create( options: Options, ) *ObjCopy { const objcopy = owner.allocator.create(ObjCopy) catch @panic("OOM"); - objcopy.* = ObjCopy{ - .step = Step.init(.{ + objcopy.* = .{ + .step = .init(.{ .id = base_id, .name = owner.fmt("objcopy {s}", .{input_file.getDisplayName()}), .owner = owner, diff --git a/lib/std/Build/Step/Options.zig b/lib/std/Build/Step/Options.zig index 6162da9e0d..0ad07130b0 100644 --- a/lib/std/Build/Step/Options.zig +++ b/lib/std/Build/Step/Options.zig @@ -19,7 +19,7 @@ encountered_types: std.StringHashMapUnmanaged(void), pub fn create(owner: *std.Build) *Options { const options = owner.allocator.create(Options) catch @panic("OOM"); options.* = .{ - .step = Step.init(.{ + .step = .init(.{ .id = base_id, .name = "options", .owner = owner, @@ -79,15 +79,15 @@ fn printType( std.zig.fmtId(some), std.zig.fmtEscapes(value), }); } else { - try out.print(gpa, "\"{}\",", .{std.zig.fmtEscapes(value)}); + try out.print(gpa, "\"{f}\",", .{std.zig.fmtEscapes(value)}); } return out.appendSlice(gpa, "\n"); }, [:0]const u8 => { if (name) |some| { - try out.print(gpa, "pub const {}: [:0]const u8 = \"{}\";", .{ std.zig.fmtId(some), std.zig.fmtEscapes(value) }); + try out.print(gpa, "pub const {f}: [:0]const u8 = \"{f}\";", .{ std.zig.fmtId(some), std.zig.fmtEscapes(value) }); } else { - try out.print(gpa, "\"{}\",", .{std.zig.fmtEscapes(value)}); + try out.print(gpa, "\"{f}\",", .{std.zig.fmtEscapes(value)}); } return out.appendSlice(gpa, "\n"); }, @@ -97,7 +97,7 @@ fn printType( } if (value) |payload| { - try out.print(gpa, "\"{}\"", .{std.zig.fmtEscapes(payload)}); + try out.print(gpa, "\"{f}\"", .{std.zig.fmtEscapes(payload)}); } else { try out.appendSlice(gpa, "null"); } @@ -115,7 +115,7 @@ fn printType( } if (value) |payload| { - try out.print(gpa, "\"{}\"", .{std.zig.fmtEscapes(payload)}); + try out.print(gpa, "\"{f}\"", .{std.zig.fmtEscapes(payload)}); } else { try out.appendSlice(gpa, "null"); } @@ -129,7 +129,7 @@ fn printType( }, std.SemanticVersion => { if (name) |some| { - try out.print(gpa, "pub const {}: @import(\"std\").SemanticVersion = ", .{std.zig.fmtId(some)}); + try out.print(gpa, "pub const {f}: @import(\"std\").SemanticVersion = ", .{std.zig.fmtId(some)}); } try out.appendSlice(gpa, ".{\n"); @@ -142,11 +142,11 @@ fn printType( if (value.pre) |some| { try out.appendNTimes(gpa, ' ', indent); - try out.print(gpa, " .pre = \"{}\",\n", .{std.zig.fmtEscapes(some)}); + try out.print(gpa, " .pre = \"{f}\",\n", .{std.zig.fmtEscapes(some)}); } if (value.build) |some| { try out.appendNTimes(gpa, ' ', indent); - try out.print(gpa, " .build = \"{}\",\n", .{std.zig.fmtEscapes(some)}); + try out.print(gpa, " .build = \"{f}\",\n", .{std.zig.fmtEscapes(some)}); } if (name != null) { @@ -233,7 +233,7 @@ fn printType( .null, => { if (name) |some| { - try out.print(gpa, "pub const {}: {s} = {any};\n", .{ std.zig.fmtId(some), @typeName(T), value }); + try out.print(gpa, "pub const {f}: {s} = {any};\n", .{ std.zig.fmtId(some), @typeName(T), value }); } else { try out.print(gpa, "{any},\n", .{value}); } @@ -243,7 +243,7 @@ fn printType( try printEnum(options, out, T, info, indent); if (name) |some| { - try out.print(gpa, "pub const {}: {} = .{p_};\n", .{ + try out.print(gpa, "pub const {f}: {f} = .{fp_};\n", .{ std.zig.fmtId(some), std.zig.fmtId(@typeName(T)), std.zig.fmtId(@tagName(value)), @@ -255,7 +255,7 @@ fn printType( try printStruct(options, out, T, info, indent); if (name) |some| { - try out.print(gpa, "pub const {}: {} = ", .{ + try out.print(gpa, "pub const {f}: {f} = ", .{ std.zig.fmtId(some), std.zig.fmtId(@typeName(T)), }); @@ -291,7 +291,7 @@ fn printEnum( if (gop.found_existing) return; try out.appendNTimes(gpa, ' ', indent); - try out.print(gpa, "pub const {} = enum ({s}) {{\n", .{ std.zig.fmtId(@typeName(T)), @typeName(val.tag_type) }); + try out.print(gpa, "pub const {f} = enum ({s}) {{\n", .{ std.zig.fmtId(@typeName(T)), @typeName(val.tag_type) }); inline for (val.fields) |field| { try out.appendNTimes(gpa, ' ', indent); @@ -464,7 +464,7 @@ fn make(step: *Step, make_options: Step.MakeOptions) !void { error.FileNotFound => { const sub_dirname = fs.path.dirname(sub_path).?; b.cache_root.handle.makePath(sub_dirname) catch |e| { - return step.fail("unable to make path '{}{s}': {s}", .{ + return step.fail("unable to make path '{f}{s}': {s}", .{ b.cache_root, sub_dirname, @errorName(e), }); }; @@ -476,13 +476,13 @@ fn make(step: *Step, make_options: Step.MakeOptions) !void { const tmp_sub_path_dirname = fs.path.dirname(tmp_sub_path).?; b.cache_root.handle.makePath(tmp_sub_path_dirname) catch |err| { - return step.fail("unable to make temporary directory '{}{s}': {s}", .{ + return step.fail("unable to make temporary directory '{f}{s}': {s}", .{ b.cache_root, tmp_sub_path_dirname, @errorName(err), }); }; b.cache_root.handle.writeFile(.{ .sub_path = tmp_sub_path, .data = options.contents.items }) catch |err| { - return step.fail("unable to write options to '{}{s}': {s}", .{ + return step.fail("unable to write options to '{f}{s}': {s}", .{ b.cache_root, tmp_sub_path, @errorName(err), }); }; @@ -491,7 +491,7 @@ fn make(step: *Step, make_options: Step.MakeOptions) !void { error.PathAlreadyExists => { // Other process beat us to it. Clean up the temp file. b.cache_root.handle.deleteFile(tmp_sub_path) catch |e| { - try step.addError("warning: unable to delete temp file '{}{s}': {s}", .{ + try step.addError("warning: unable to delete temp file '{f}{s}': {s}", .{ b.cache_root, tmp_sub_path, @errorName(e), }); }; @@ -499,7 +499,7 @@ fn make(step: *Step, make_options: Step.MakeOptions) !void { return; }, else => { - return step.fail("unable to rename options from '{}{s}' to '{}{s}': {s}", .{ + return step.fail("unable to rename options from '{f}{s}' to '{f}{s}': {s}", .{ b.cache_root, tmp_sub_path, b.cache_root, sub_path, @errorName(err), @@ -507,7 +507,7 @@ fn make(step: *Step, make_options: Step.MakeOptions) !void { }, }; }, - else => |e| return step.fail("unable to access options file '{}{s}': {s}", .{ + else => |e| return step.fail("unable to access options file '{f}{s}': {s}", .{ b.cache_root, sub_path, @errorName(e), }), } diff --git a/lib/std/Build/Step/RemoveDir.zig b/lib/std/Build/Step/RemoveDir.zig index e2d4c02abc..a0913b0325 100644 --- a/lib/std/Build/Step/RemoveDir.zig +++ b/lib/std/Build/Step/RemoveDir.zig @@ -12,7 +12,7 @@ doomed_path: LazyPath, pub fn create(owner: *std.Build, doomed_path: LazyPath) *RemoveDir { const remove_dir = owner.allocator.create(RemoveDir) catch @panic("OOM"); remove_dir.* = .{ - .step = Step.init(.{ + .step = .init(.{ .id = base_id, .name = owner.fmt("RemoveDir {s}", .{doomed_path.getDisplayName()}), .owner = owner, diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index 4b8dabc14e..e84cdac58e 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -169,7 +169,7 @@ pub const Output = struct { pub fn create(owner: *std.Build, name: []const u8) *Run { const run = owner.allocator.create(Run) catch @panic("OOM"); run.* = .{ - .step = Step.init(.{ + .step = .init(.{ .id = base_id, .name = name, .owner = owner, @@ -832,7 +832,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { else => unreachable, }; b.cache_root.handle.makePath(output_sub_dir_path) catch |err| { - return step.fail("unable to make path '{}{s}': {s}", .{ + return step.fail("unable to make path '{f}{s}': {s}", .{ b.cache_root, output_sub_dir_path, @errorName(err), }); }; @@ -864,7 +864,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { else => unreachable, }; b.cache_root.handle.makePath(output_sub_dir_path) catch |err| { - return step.fail("unable to make path '{}{s}': {s}", .{ + return step.fail("unable to make path '{f}{s}': {s}", .{ b.cache_root, output_sub_dir_path, @errorName(err), }); }; @@ -903,21 +903,21 @@ fn make(step: *Step, options: Step.MakeOptions) !void { b.cache_root.handle.rename(tmp_dir_path, o_sub_path) catch |err| { if (err == error.PathAlreadyExists) { b.cache_root.handle.deleteTree(o_sub_path) catch |del_err| { - return step.fail("unable to remove dir '{}'{s}: {s}", .{ + return step.fail("unable to remove dir '{f}'{s}: {s}", .{ b.cache_root, tmp_dir_path, @errorName(del_err), }); }; b.cache_root.handle.rename(tmp_dir_path, o_sub_path) catch |retry_err| { - return step.fail("unable to rename dir '{}{s}' to '{}{s}': {s}", .{ + return step.fail("unable to rename dir '{f}{s}' to '{f}{s}': {s}", .{ b.cache_root, tmp_dir_path, b.cache_root, o_sub_path, @errorName(retry_err), }); }; } else { - return step.fail("unable to rename dir '{}{s}' to '{}{s}': {s}", .{ + return step.fail("unable to rename dir '{f}{s}' to '{f}{s}': {s}", .{ b.cache_root, tmp_dir_path, b.cache_root, o_sub_path, @errorName(err), @@ -964,7 +964,7 @@ pub fn rerunInFuzzMode( .artifact => |pa| { const artifact = pa.artifact; const file_path: []const u8 = p: { - if (artifact == run.producer.?) break :p b.fmt("{}", .{run.rebuilt_executable.?}); + if (artifact == run.producer.?) break :p b.fmt("{f}", .{run.rebuilt_executable.?}); break :p artifact.installed_path orelse artifact.generated_bin.?.path.?; }; try argv_list.append(arena, b.fmt("{s}{s}", .{ @@ -1013,20 +1013,16 @@ fn populateGeneratedPaths( fn formatTerm( term: ?std.process.Child.Term, + bw: *std.io.BufferedWriter, comptime fmt: []const u8, - options: std.fmt.FormatOptions, - writer: anytype, ) !void { _ = fmt; - _ = options; if (term) |t| switch (t) { - .Exited => |code| try writer.print("exited with code {}", .{code}), - .Signal => |sig| try writer.print("terminated with signal {}", .{sig}), - .Stopped => |sig| try writer.print("stopped with signal {}", .{sig}), - .Unknown => |code| try writer.print("terminated for unknown reason with code {}", .{code}), - } else { - try writer.writeAll("exited with any code"); - } + .Exited => |code| try bw.print("exited with code {}", .{code}), + .Signal => |sig| try bw.print("terminated with signal {}", .{sig}), + .Stopped => |sig| try bw.print("stopped with signal {}", .{sig}), + .Unknown => |code| try bw.print("terminated for unknown reason with code {}", .{code}), + } else try bw.writeAll("exited with any code"); } fn fmtTerm(term: ?std.process.Child.Term) std.fmt.Formatter(formatTerm) { return .{ .data = term }; @@ -1262,12 +1258,12 @@ fn runCommand( const sub_path = b.pathJoin(&output_components); const sub_path_dirname = fs.path.dirname(sub_path).?; b.cache_root.handle.makePath(sub_path_dirname) catch |err| { - return step.fail("unable to make path '{}{s}': {s}", .{ + return step.fail("unable to make path '{f}{s}': {s}", .{ b.cache_root, sub_path_dirname, @errorName(err), }); }; b.cache_root.handle.writeFile(.{ .sub_path = sub_path, .data = stream.bytes.? }) catch |err| { - return step.fail("unable to write file '{}{s}': {s}", .{ + return step.fail("unable to write file '{f}{s}': {s}", .{ b.cache_root, sub_path, @errorName(err), }); }; @@ -1346,7 +1342,7 @@ fn runCommand( }, .expect_term => |expected_term| { if (!termMatches(expected_term, result.term)) { - return step.fail("the following command {} (expected {}):\n{s}", .{ + return step.fail("the following command {f} (expected {f}):\n{s}", .{ fmtTerm(result.term), fmtTerm(expected_term), try Step.allocPrintCmd(arena, cwd, final_argv), @@ -1366,7 +1362,7 @@ fn runCommand( }; const expected_term: std.process.Child.Term = .{ .Exited = 0 }; if (!termMatches(expected_term, result.term)) { - return step.fail("{s}the following command {} (expected {}):\n{s}", .{ + return step.fail("{s}the following command {f} (expected {f}):\n{s}", .{ prefix, fmtTerm(result.term), fmtTerm(expected_term), @@ -1535,13 +1531,10 @@ fn evalZigTest( defer if (sub_prog_node) |n| n.end(); const any_write_failed = first_write_failed or poll: while (true) { - while (stdout.readableLength() < @sizeOf(Header)) { - if (!(try poller.poll())) break :poll false; - } - const header = stdout.reader().readStruct(Header) catch unreachable; - while (stdout.readableLength() < header.bytes_len) { - if (!(try poller.poll())) break :poll false; - } + while (stdout.readableLength() < @sizeOf(Header)) if (!try poller.poll()) break :poll false; + var header: Header = undefined; + assert(stdout.read(std.mem.asBytes(&header)) == @sizeOf(Header)); + while (stdout.readableLength() < header.bytes_len) if (!try poller.poll()) break :poll false; const body = stdout.readableSliceOfLen(header.bytes_len); switch (header.tag) { @@ -1797,10 +1790,10 @@ fn evalGeneric(run: *Run, child: *std.process.Child) !StdIoResult { stdout_bytes = try poller.fifo(.stdout).toOwnedSlice(); stderr_bytes = try poller.fifo(.stderr).toOwnedSlice(); } else { - stdout_bytes = try stdout.reader().readAllAlloc(arena, run.max_stdio_size); + stdout_bytes = try stdout.reader().readAlloc(arena, run.max_stdio_size); } } else if (child.stderr) |stderr| { - stderr_bytes = try stderr.reader().readAllAlloc(arena, run.max_stdio_size); + stderr_bytes = try stderr.reader().readAlloc(arena, run.max_stdio_size); } if (stderr_bytes) |bytes| if (bytes.len > 0) { diff --git a/lib/std/Build/Step/TranslateC.zig b/lib/std/Build/Step/TranslateC.zig index 2592719f80..8c14b55c7d 100644 --- a/lib/std/Build/Step/TranslateC.zig +++ b/lib/std/Build/Step/TranslateC.zig @@ -31,7 +31,7 @@ pub fn create(owner: *std.Build, options: Options) *TranslateC { const translate_c = owner.allocator.create(TranslateC) catch @panic("OOM"); const source = options.root_source_file.dupe(owner); translate_c.* = .{ - .step = Step.init(.{ + .step = .init(.{ .id = base_id, .name = "translate-c", .owner = owner, diff --git a/lib/std/Build/Step/UpdateSourceFiles.zig b/lib/std/Build/Step/UpdateSourceFiles.zig index d4a9565083..03c16c71db 100644 --- a/lib/std/Build/Step/UpdateSourceFiles.zig +++ b/lib/std/Build/Step/UpdateSourceFiles.zig @@ -27,7 +27,7 @@ pub const Contents = union(enum) { pub fn create(owner: *std.Build) *UpdateSourceFiles { const usf = owner.allocator.create(UpdateSourceFiles) catch @panic("OOM"); usf.* = .{ - .step = Step.init(.{ + .step = .init(.{ .id = base_id, .name = "UpdateSourceFiles", .owner = owner, @@ -76,7 +76,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { for (usf.output_source_files.items) |output_source_file| { if (fs.path.dirname(output_source_file.sub_path)) |dirname| { b.build_root.handle.makePath(dirname) catch |err| { - return step.fail("unable to make path '{}{s}': {s}", .{ + return step.fail("unable to make path '{f}{s}': {s}", .{ b.build_root, dirname, @errorName(err), }); }; @@ -84,7 +84,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { switch (output_source_file.contents) { .bytes => |bytes| { b.build_root.handle.writeFile(.{ .sub_path = output_source_file.sub_path, .data = bytes }) catch |err| { - return step.fail("unable to write file '{}{s}': {s}", .{ + return step.fail("unable to write file '{f}{s}': {s}", .{ b.build_root, output_source_file.sub_path, @errorName(err), }); }; @@ -101,7 +101,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { output_source_file.sub_path, .{}, ) catch |err| { - return step.fail("unable to update file from '{s}' to '{}{s}': {s}", .{ + return step.fail("unable to update file from '{s}' to '{f}{s}': {s}", .{ source_path, b.build_root, output_source_file.sub_path, @errorName(err), }); }; diff --git a/lib/std/Build/Step/WriteFile.zig b/lib/std/Build/Step/WriteFile.zig index 29fba1c871..132df87d15 100644 --- a/lib/std/Build/Step/WriteFile.zig +++ b/lib/std/Build/Step/WriteFile.zig @@ -67,7 +67,7 @@ pub const Contents = union(enum) { pub fn create(owner: *std.Build) *WriteFile { const write_file = owner.allocator.create(WriteFile) catch @panic("OOM"); write_file.* = .{ - .step = Step.init(.{ + .step = .init(.{ .id = base_id, .name = "WriteFile", .owner = owner, @@ -217,7 +217,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { const src_dir_path = dir.source.getPath3(b, step); var src_dir = src_dir_path.root_dir.handle.openDir(src_dir_path.subPathOrDot(), .{ .iterate = true }) catch |err| { - return step.fail("unable to open source directory '{}': {s}", .{ + return step.fail("unable to open source directory '{f}': {s}", .{ src_dir_path, @errorName(err), }); }; @@ -258,7 +258,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { write_file.generated_directory.path = try b.cache_root.join(arena, &.{ "o", &digest }); var cache_dir = b.cache_root.handle.makeOpenPath(cache_path, .{}) catch |err| { - return step.fail("unable to make path '{}{s}': {s}", .{ + return step.fail("unable to make path '{f}{s}': {s}", .{ b.cache_root, cache_path, @errorName(err), }); }; @@ -269,7 +269,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { for (write_file.files.items) |file| { if (fs.path.dirname(file.sub_path)) |dirname| { cache_dir.makePath(dirname) catch |err| { - return step.fail("unable to make path '{}{s}{c}{s}': {s}", .{ + return step.fail("unable to make path '{f}{s}{c}{s}': {s}", .{ b.cache_root, cache_path, fs.path.sep, dirname, @errorName(err), }); }; @@ -277,7 +277,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { switch (file.contents) { .bytes => |bytes| { cache_dir.writeFile(.{ .sub_path = file.sub_path, .data = bytes }) catch |err| { - return step.fail("unable to write file '{}{s}{c}{s}': {s}", .{ + return step.fail("unable to write file '{f}{s}{c}{s}': {s}", .{ b.cache_root, cache_path, fs.path.sep, file.sub_path, @errorName(err), }); }; @@ -291,7 +291,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { file.sub_path, .{}, ) catch |err| { - return step.fail("unable to update file from '{s}' to '{}{s}{c}{s}': {s}", .{ + return step.fail("unable to update file from '{s}' to '{f}{s}{c}{s}': {s}", .{ source_path, b.cache_root, cache_path, @@ -315,7 +315,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { if (dest_dirname.len != 0) { cache_dir.makePath(dest_dirname) catch |err| { - return step.fail("unable to make path '{}{s}{c}{s}': {s}", .{ + return step.fail("unable to make path '{f}{s}{c}{s}': {s}", .{ b.cache_root, cache_path, fs.path.sep, dest_dirname, @errorName(err), }); }; @@ -338,7 +338,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { dest_path, .{}, ) catch |err| { - return step.fail("unable to update file from '{}' to '{}{s}{c}{s}': {s}", .{ + return step.fail("unable to update file from '{f}' to '{f}{s}{c}{s}': {s}", .{ src_entry_path, b.cache_root, cache_path, fs.path.sep, dest_path, @errorName(err), }); }; diff --git a/lib/std/Build/Watch.zig b/lib/std/Build/Watch.zig index c48f6a0532..95aa6f5280 100644 --- a/lib/std/Build/Watch.zig +++ b/lib/std/Build/Watch.zig @@ -211,7 +211,7 @@ const Os = switch (builtin.os.tag) { .ADD = true, .ONLYDIR = true, }, fan_mask, path.root_dir.handle.fd, path.subPathOrDot()) catch |err| { - fatal("unable to watch {}: {s}", .{ path, @errorName(err) }); + fatal("unable to watch {f}: {s}", .{ path, @errorName(err) }); }; } break :rs &dh_gop.value_ptr.reaction_set; @@ -265,7 +265,7 @@ const Os = switch (builtin.os.tag) { .ONLYDIR = true, }, fan_mask, path.root_dir.handle.fd, path.subPathOrDot()) catch |err| switch (err) { error.FileNotFound => {}, // Expected, harmless. - else => |e| std.log.warn("unable to unwatch '{}': {s}", .{ path, @errorName(e) }), + else => |e| std.log.warn("unable to unwatch '{f}': {s}", .{ path, @errorName(e) }), }; w.dir_table.swapRemoveAt(i); diff --git a/lib/std/SemanticVersion.zig b/lib/std/SemanticVersion.zig index 7cb3888e54..82764b0f82 100644 --- a/lib/std/SemanticVersion.zig +++ b/lib/std/SemanticVersion.zig @@ -152,15 +152,13 @@ fn parseNum(text: []const u8) error{ InvalidVersion, Overflow }!usize { pub fn format( self: Version, + bw: *std.io.BufferedWriter, comptime fmt: []const u8, - options: std.fmt.FormatOptions, - out_stream: anytype, ) !void { - _ = options; if (fmt.len != 0) std.fmt.invalidFmtError(fmt, self); - try std.fmt.format(out_stream, "{d}.{d}.{d}", .{ self.major, self.minor, self.patch }); - if (self.pre) |pre| try std.fmt.format(out_stream, "-{s}", .{pre}); - if (self.build) |build| try std.fmt.format(out_stream, "+{s}", .{build}); + try bw.print("{d}.{d}.{d}", .{ self.major, self.minor, self.patch }); + if (self.pre) |pre| try bw.print("-{s}", .{pre}); + if (self.build) |build| try bw.print("+{s}", .{build}); } const expect = std.testing.expect; diff --git a/lib/std/Target/Query.zig b/lib/std/Target/Query.zig index 55d3cf5527..2d3b0f4436 100644 --- a/lib/std/Target/Query.zig +++ b/lib/std/Target/Query.zig @@ -423,7 +423,7 @@ pub fn zigTriple(self: Query, gpa: Allocator) Allocator.Error![]u8 { try formatVersion(v, gpa, &result); }, .windows => |v| { - try result.print(gpa, "{s}", .{v}); + try result.print(gpa, "{d}", .{v}); }, } } @@ -437,7 +437,7 @@ pub fn zigTriple(self: Query, gpa: Allocator) Allocator.Error![]u8 { .windows => |v| { // This is counting on a custom format() function defined on `WindowsVersion` // to add a prefix '.' and make there be a total of three dots. - try result.print(gpa, "..{s}", .{v}); + try result.print(gpa, "..{d}", .{v}); }, } } diff --git a/lib/std/fifo.zig b/lib/std/fifo.zig index 457c36ffec..e1a6a31b25 100644 --- a/lib/std/fifo.zig +++ b/lib/std/fifo.zig @@ -38,8 +38,6 @@ pub fn LinearFifo( count: usize, const Self = @This(); - pub const Reader = std.io.Reader(*Self, error{}, readFn); - pub const Writer = std.io.Writer(*Self, error{OutOfMemory}, appendWrite); // Type of Self argument for slice operations. // If buffer is inline (Static) then we need to ensure we haven't @@ -236,8 +234,31 @@ pub fn LinearFifo( return self.read(dest); } - pub fn reader(self: *Self) Reader { - return .{ .context = self }; + pub fn reader(self: *Self) std.io.Reader { + return .{ + .context = self, + .vtable = &.{ + .read = &reader_read, + .readv = &reader_readv, + }, + }; + } + fn reader_read( + ctx: ?*anyopaque, + bw: *std.io.BufferedWriter, + limit: std.io.Reader.Limit, + ) anyerror!std.io.Reader.Status { + const fifo: *Self = @alignCast(@ptrCast(ctx)); + _ = fifo; + _ = bw; + _ = limit; + @panic("TODO"); + } + fn reader_readv(ctx: ?*anyopaque, data: []const []u8) anyerror!std.io.Reader.Status { + const fifo: *Self = @alignCast(@ptrCast(ctx)); + _ = fifo; + _ = data; + @panic("TODO"); } /// Returns number of items available in fifo @@ -326,8 +347,38 @@ pub fn LinearFifo( return bytes.len; } - pub fn writer(self: *Self) Writer { - return .{ .context = self }; + pub fn writer(fifo: *Self) std.io.Writer { + return .{ + .context = fifo, + .vtable = &.{ + .writeSplat = writer_writeSplat, + .writeFile = writer_writeFile, + }, + }; + } + fn writer_writeSplat(ctx: ?*anyopaque, data: []const []const u8, splat: usize) anyerror!usize { + const fifo: *Self = @alignCast(@ptrCast(ctx)); + _ = fifo; + _ = data; + _ = splat; + @panic("TODO"); + } + fn writer_writeFile( + ctx: ?*anyopaque, + file: std.fs.File, + offset: std.io.Writer.Offset, + limit: std.io.Writer.Limit, + headers_and_trailers: []const []const u8, + headers_len: usize, + ) anyerror!usize { + const fifo: *Self = @alignCast(@ptrCast(ctx)); + _ = fifo; + _ = file; + _ = offset; + _ = limit; + _ = headers_and_trailers; + _ = headers_len; + @panic("TODO"); } /// Make `count` items available before the current read location diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig index 39cd730eab..54fec30cfb 100644 --- a/lib/std/fmt.zig +++ b/lib/std/fmt.zig @@ -451,12 +451,10 @@ fn SliceEscape(comptime case: Case) type { return struct { pub fn format( bytes: []const u8, + bw: *std.io.BufferedWriter, comptime fmt: []const u8, - options: std.fmt.Options, - writer: anytype, ) !void { _ = fmt; - _ = options; var buf: [4]u8 = undefined; buf[0] = '\\'; @@ -464,11 +462,11 @@ fn SliceEscape(comptime case: Case) type { for (bytes) |c| { if (std.ascii.isPrint(c)) { - try writer.writeByte(c); + try bw.writeByte(c); } else { buf[2] = charset[c >> 4]; buf[3] = charset[c & 15]; - try writer.writeAll(&buf); + try bw.writeAll(&buf); } } } @@ -535,11 +533,10 @@ pub fn Formatter(comptime formatFn: anytype) type { data: Data, pub fn format( self: @This(), - comptime fmt: []const u8, - options: std.fmt.Options, writer: *std.io.BufferedWriter, + comptime fmt: []const u8, ) anyerror!void { - try formatFn(self.data, fmt, options, writer); + try formatFn(self.data, writer, fmt); } }; } diff --git a/lib/std/fs/Dir.zig b/lib/std/fs/Dir.zig index 2826f72ef6..e61e5d515a 100644 --- a/lib/std/fs/Dir.zig +++ b/lib/std/fs/Dir.zig @@ -1979,10 +1979,45 @@ pub fn readFileAlloc( /// * `error.FileTooBig` is returned. limit: std.io.Reader.Limit, ) (File.OpenError || File.ReadAllocError)![]u8 { - var buffer: std.ArrayListUnmanaged(u8) = .empty; + return dir.readFileAllocOptions(file_path, gpa, limit, null, .of(u8), null); +} + +/// Reads all the bytes from the named file. On success, caller owns returned +/// buffer. +pub fn readFileAllocOptions( + dir: Dir, + /// On Windows, should be encoded as [WTF-8](https://simonsapin.github.io/wtf-8/). + /// On WASI, should be encoded as valid UTF-8. + /// On other platforms, an opaque sequence of bytes with no particular encoding. + file_path: []const u8, + /// Used to allocate the result. + gpa: mem.Allocator, + /// If exceeded: + /// * The array list's length is increased by exactly one byte past `limit`. + /// * The file seek position is advanced by exactly one byte past `limit`. + /// * `error.FileTooBig` is returned. + limit: std.io.Reader.Limit, + /// If specified, the initial buffer size is calculated using this value, + /// otherwise the effective file size is used instead. + size_hint: ?usize, + comptime alignment: std.mem.Alignment, + comptime optional_sentinel: ?u8, +) (File.OpenError || File.ReadAllocError)!(if (optional_sentinel) |s| [:s]align(alignment.toByteUnits()) u8 else []align(alignment.toByteUnits()) u8) { + var buffer: std.ArrayListAlignedUnmanaged(u8, alignment) = .empty; defer buffer.deinit(gpa); - try readFileIntoArrayList(dir, file_path, gpa, limit, null, &buffer); - return buffer.toOwnedSlice(gpa); + try readFileIntoArrayList( + dir, + file_path, + gpa, + limit, + if (size_hint) |sh| sh +| 1 else null, + alignment, + &buffer, + ); + return if (optional_sentinel) |sentinel| + buffer.toOwnedSliceSentinel(gpa, sentinel) + else + buffer.toOwnedSlice(gpa); } /// Reads all the bytes from the named file, appending them into the provided @@ -2004,7 +2039,7 @@ pub fn readFileIntoArrayList( /// otherwise the effective file size is used instead. size_hint: ?usize, comptime alignment: ?std.mem.Alignment, - list: *std.ArrayListAligned(u8, alignment), + list: *std.ArrayListAlignedUnmanaged(u8, alignment), ) (File.OpenError || File.ReadAllocError)!void { var file = try dir.openFile(file_path, .{}); defer file.close(); diff --git a/lib/std/fs/File.zig b/lib/std/fs/File.zig index 471bf9c96d..3c712fa145 100644 --- a/lib/std/fs/File.zig +++ b/lib/std/fs/File.zig @@ -1169,7 +1169,7 @@ pub fn readIntoArrayList( gpa: Allocator, limit: std.io.Reader.Limit, comptime alignment: ?std.mem.Alignment, - list: *std.ArrayListAligned(u8, alignment), + list: *std.ArrayListAlignedUnmanaged(u8, alignment), ) ReadAllocError!void { var remaining = limit; while (true) { @@ -1676,7 +1676,7 @@ fn streamReadVec(context: ?*anyopaque, data: []const []u8) anyerror!std.io.Reade return .{ .len = @intCast(n), .end = n == 0 }; } -fn writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) anyerror!usize { +pub fn writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) anyerror!usize { const handle = opaqueToHandle(context); var splat_buffer: [256]u8 = undefined; if (is_windows) { @@ -1716,7 +1716,7 @@ fn writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) anye return std.posix.writev(handle, iovecs[0..len]); } -fn writeFile( +pub fn writeFile( context: ?*anyopaque, in_file: std.fs.File, in_offset: std.io.Writer.Offset, @@ -1727,8 +1727,8 @@ fn writeFile( const out_fd = opaqueToHandle(context); const in_fd = in_file.handle; const len_int = switch (in_limit) { - .zero => return writeSplat(context, headers_and_trailers, 1), - .none => 0, + .nothing => return writeSplat(context, headers_and_trailers, 1), + .unlimited => 0, else => in_limit.toInt().?, }; if (native_os == .linux) sf: { diff --git a/lib/std/http/Server.zig b/lib/std/http/Server.zig index d81295b833..91208f56d9 100644 --- a/lib/std/http/Server.zig +++ b/lib/std/http/Server.zig @@ -593,8 +593,46 @@ pub const Request = struct { HttpHeadersOversize, }; + fn contentLengthReader_read( + ctx: ?*anyopaque, + bw: *std.io.BufferedWriter, + limit: std.io.Reader.Limit, + ) anyerror!std.io.Reader.Status { + const request: *Request = @alignCast(@ptrCast(ctx)); + _ = request; + _ = bw; + _ = limit; + @panic("TODO"); + } + + fn contentLengthReader_readv(ctx: ?*anyopaque, data: []const []u8) anyerror!std.io.Reader.Status { + const request: *Request = @alignCast(@ptrCast(ctx)); + _ = request; + _ = data; + @panic("TODO"); + } + + fn chunkedReader_read( + ctx: ?*anyopaque, + bw: *std.io.BufferedWriter, + limit: std.io.Reader.Limit, + ) anyerror!std.io.Reader.Status { + const request: *Request = @alignCast(@ptrCast(ctx)); + _ = request; + _ = bw; + _ = limit; + @panic("TODO"); + } + + fn chunkedReader_readv(ctx: ?*anyopaque, data: []const []u8) anyerror!std.io.Reader.Status { + const request: *Request = @alignCast(@ptrCast(ctx)); + _ = request; + _ = data; + @panic("TODO"); + } + fn read_cl(context: *const anyopaque, buffer: []u8) ReadError!usize { - const request: *Request = @constCast(@alignCast(@ptrCast(context))); + const request: *Request = @alignCast(@ptrCast(context)); const s = request.server; const remaining_content_length = &request.reader_state.remaining_content_length; @@ -622,7 +660,7 @@ pub const Request = struct { } fn read_chunked(context: *const anyopaque, buffer: []u8) ReadError!usize { - const request: *Request = @constCast(@alignCast(@ptrCast(context))); + const request: *Request = @alignCast(@ptrCast(context)); const s = request.server; const cp = &request.reader_state.chunk_parser; @@ -724,7 +762,7 @@ pub const Request = struct { /// request's expect field to `null`. /// /// Asserts that this function is only called once. - pub fn reader(request: *Request) ReaderError!std.io.AnyReader { + pub fn reader(request: *Request) ReaderError!std.io.Reader { const s = request.server; assert(s.state == .received_head); s.state = .receiving_body; @@ -747,8 +785,11 @@ pub const Request = struct { .chunked => { request.reader_state = .{ .chunk_parser = http.ChunkParser.init }; return .{ - .readFn = read_chunked, .context = request, + .vtable = &.{ + .read = &chunkedReader_read, + .readv = &chunkedReader_readv, + }, }; }, .none => { @@ -756,8 +797,11 @@ pub const Request = struct { .remaining_content_length = request.head.content_length orelse 0, }; return .{ - .readFn = read_cl, .context = request, + .vtable = &.{ + .read = &contentLengthReader_read, + .readv = &contentLengthReader_readv, + }, }; }, } @@ -779,7 +823,7 @@ pub const Request = struct { if (keep_alive and request.head.keep_alive) switch (s.state) { .received_head => { const r = request.reader() catch return false; - _ = r.discard() catch return false; + _ = r.discardUntilEnd() catch return false; assert(s.state == .ready); return true; }, @@ -868,30 +912,30 @@ pub const Response = struct { } } - fn cl_writeSplat(context: *anyopaque, data: []const []const u8, splat: usize) anyerror!usize { + fn cl_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) anyerror!usize { _ = splat; return cl_write(context, data[0]); // TODO: try to send all the data } fn cl_writeFile( - context: *anyopaque, + context: ?*anyopaque, file: std.fs.File, - offset: u64, - len: std.io.Writer.FileLen, + offset: std.io.Writer.Offset, + limit: std.io.Writer.Limit, headers_and_trailers: []const []const u8, headers_len: usize, ) anyerror!usize { _ = context; _ = file; _ = offset; - _ = len; + _ = limit; _ = headers_and_trailers; _ = headers_len; return error.Unimplemented; } - fn cl_write(context: *anyopaque, bytes: []const u8) anyerror!usize { - const r: *Response = @constCast(@alignCast(@ptrCast(context))); + fn cl_write(context: ?*anyopaque, bytes: []const u8) anyerror!usize { + const r: *Response = @alignCast(@ptrCast(context)); var trash: u64 = std.math.maxInt(u64); const len = switch (r.transfer_encoding) { @@ -935,30 +979,30 @@ pub const Response = struct { return bytes.len; } - fn chunked_writeSplat(context: *anyopaque, data: []const []const u8, splat: usize) anyerror!usize { + fn chunked_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) anyerror!usize { _ = splat; return chunked_write(context, data[0]); // TODO: try to send all the data } fn chunked_writeFile( - context: *anyopaque, + context: ?*anyopaque, file: std.fs.File, - offset: u64, - len: std.io.Writer.FileLen, + offset: std.io.Writer.Offset, + limit: std.io.Writer.Limit, headers_and_trailers: []const []const u8, headers_len: usize, ) anyerror!usize { _ = context; _ = file; _ = offset; - _ = len; + _ = limit; _ = headers_and_trailers; _ = headers_len; return error.Unimplemented; // TODO lower to a call to writeFile on the output } - fn chunked_write(context: *anyopaque, bytes: []const u8) anyerror!usize { - const r: *Response = @constCast(@alignCast(@ptrCast(context))); + fn chunked_write(context: ?*anyopaque, bytes: []const u8) anyerror!usize { + const r: *Response = @alignCast(@ptrCast(context)); assert(r.transfer_encoding == .chunked); if (r.elide_body) diff --git a/lib/std/http/WebSocket.zig b/lib/std/http/WebSocket.zig index 8d900bc1c7..e02d47a714 100644 --- a/lib/std/http/WebSocket.zig +++ b/lib/std/http/WebSocket.zig @@ -57,8 +57,8 @@ pub fn init( ws.* = .{ .key = key, - .recv_fifo = std.fifo.LinearFifo(u8, .Slice).init(recv_buffer), - .reader = try request.reader(), + .recv_fifo = .init(recv_buffer), + .reader = undefined, .response = request.respondStreaming(.{ .send_buffer = send_buffer, .respond_options = .{ @@ -74,6 +74,7 @@ pub fn init( .request = request, .outstanding_len = 0, }; + ws.reader.init(try request.reader(), &.{}); return true; } diff --git a/lib/std/io/AllocatingWriter.zig b/lib/std/io/AllocatingWriter.zig index 415d40a2f6..78479f07d1 100644 --- a/lib/std/io/AllocatingWriter.zig +++ b/lib/std/io/AllocatingWriter.zig @@ -28,12 +28,12 @@ const vtable: std.io.Writer.VTable = .{ /// Sets the `AllocatingWriter` to an empty state. pub fn init(aw: *AllocatingWriter, allocator: std.mem.Allocator) void { - initOwnedSlice(aw, allocator, &.{}); + aw.initOwnedSlice(allocator, &.{}); } pub fn initCapacity(aw: *AllocatingWriter, allocator: std.mem.Allocator, capacity: usize) error{OutOfMemory}!void { const initial_buffer = try allocator.alloc(u8, capacity); - initOwnedSlice(aw, allocator, initial_buffer); + aw.initOwnedSlice(allocator, initial_buffer); } pub fn initOwnedSlice(aw: *AllocatingWriter, allocator: std.mem.Allocator, slice: []u8) void { @@ -119,11 +119,15 @@ pub fn getWritten(aw: *AllocatingWriter) []u8 { return written; } -pub fn clearRetainingCapacity(aw: *AllocatingWriter) void { +pub fn shrinkRetainingCapacity(aw: *AllocatingWriter, new_len: usize) void { const bw = &aw.buffered_writer; - bw.buffer = aw.written.ptr[0 .. aw.written.len + bw.buffer.len]; + bw.buffer = aw.written.ptr[new_len .. aw.written.len + bw.buffer.len]; bw.end = 0; - aw.written.len = 0; + aw.written.len = new_len; +} + +pub fn clearRetainingCapacity(aw: *AllocatingWriter) void { + aw.shrinkRetainingCapacity(0); } fn writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) anyerror!usize { @@ -161,7 +165,7 @@ fn writeFile( context: ?*anyopaque, file: std.fs.File, offset: std.io.Writer.Offset, - len: std.io.Writer.FileLen, + limit: std.io.Writer.Limit, headers_and_trailers_full: []const []const u8, headers_len_full: usize, ) anyerror!usize { @@ -177,7 +181,7 @@ fn writeFile( } else .{ headers_and_trailers_full, headers_len_full }; const trailers = headers_and_trailers[headers_len..]; const pos = offset.toInt() orelse @panic("TODO treat file as stream"); - if (len == .entire_file) { + const limit_int = limit.toInt() orelse { var new_capacity: usize = list.capacity + std.atomic.cache_line; for (headers_and_trailers) |bytes| new_capacity += bytes.len; try list.ensureTotalCapacity(gpa, new_capacity); @@ -193,12 +197,12 @@ fn writeFile( } list.items.len += n; return list.items.len - start_len; - } - var new_capacity: usize = list.capacity + len.int(); + }; + var new_capacity: usize = list.capacity + limit_int; for (headers_and_trailers) |bytes| new_capacity += bytes.len; try list.ensureTotalCapacity(gpa, new_capacity); for (headers_and_trailers[0..headers_len]) |bytes| list.appendSliceAssumeCapacity(bytes); - const dest = list.items.ptr[list.items.len..][0..len.int()]; + const dest = list.items.ptr[list.items.len..][0..limit_int]; const n = try file.pread(dest, pos); list.items.len += n; if (n < dest.len) { diff --git a/lib/std/io/BufferedReader.zig b/lib/std/io/BufferedReader.zig index b2df42b454..ef1634659e 100644 --- a/lib/std/io/BufferedReader.zig +++ b/lib/std/io/BufferedReader.zig @@ -253,18 +253,17 @@ pub fn discardUpTo(br: *BufferedReader, n: usize) anyerror!usize { const proposed_seek = br.seek + remaining; if (proposed_seek <= storage.end) { br.seek = proposed_seek; - return; + return n; } remaining -= (storage.end - br.seek); storage.end = 0; br.seek = 0; - const result = try br.unbuffered_reader.read(&storage, .none); - result.write_err catch unreachable; - try result.read_err; + const result = try br.unbuffered_reader.read(storage, .unlimited); assert(result.len == storage.end); if (remaining <= storage.end) continue; if (result.end) return n - remaining; } + return n; } /// Reads the stream until the end, ignoring all the data. @@ -302,7 +301,7 @@ pub fn read(br: *BufferedReader, buffer: []u8) anyerror!void { br.seek = 0; var i: usize = in_buffer.len; while (true) { - const status = try br.unbuffered_reader.read(storage, .none); + const status = try br.unbuffered_reader.read(storage, .unlimited); const next_i = i + storage.end; if (next_i >= buffer.len) { const remaining = buffer[i..]; @@ -389,7 +388,7 @@ pub fn peekDelimiterInclusive(br: *BufferedReader, delimiter: u8) anyerror![]u8 /// * `peekDelimiterConclusive` pub fn takeDelimiterConclusive(br: *BufferedReader, delimiter: u8) anyerror![]u8 { const result = try peekDelimiterConclusive(br, delimiter); - toss(result.len); + br.toss(result.len); return result; } @@ -407,7 +406,7 @@ pub fn peekDelimiterConclusive(br: *BufferedReader, delimiter: u8) anyerror![]u8 storage.end = i; br.seek = 0; while (i < storage.buffer.len) { - const status = try br.unbuffered_reader.read(storage, .none); + const status = try br.unbuffered_reader.read(storage, .unlimited); if (std.mem.indexOfScalarPos(u8, storage.buffer[0..storage.end], i, delimiter)) |end| { return storage.buffer[0 .. end + 1]; } @@ -505,7 +504,7 @@ pub fn fill(br: *BufferedReader, n: usize) anyerror!void { storage.end = remainder.len; br.seek = 0; while (true) { - const status = try br.unbuffered_reader.read(storage, .none); + const status = try br.unbuffered_reader.read(storage, .unlimited); if (n <= storage.end) return; if (status.end) return error.EndOfStream; } @@ -589,7 +588,7 @@ fn takeMultipleOf7Leb128(br: *BufferedReader, comptime Result: type) anyerror!Re const buffer: []const packed struct(u8) { bits: u7, more: bool } = @ptrCast(try br.peekAll(1)); for (buffer, 1..) |byte, len| { if (remaining_bits > 0) { - result = @shlExact(@as(UnsignedResult, byte.bits), result_info.bits - 7) | @shrExact(result, 7); + result = @shlExact(@as(UnsignedResult, byte.bits), result_info.bits - 7) | if (result_info.bits > 7) @shrExact(result, 7) else 0; remaining_bits -= 7; } else if (fits) fits = switch (result_info.signedness) { .signed => @as(i7, @bitCast(byte.bits)) == @as(i7, @truncate(@as(Result, @bitCast(result)) >> (result_info.bits - 1))), diff --git a/lib/std/io/BufferedWriter.zig b/lib/std/io/BufferedWriter.zig index e5c898b596..114b6491a6 100644 --- a/lib/std/io/BufferedWriter.zig +++ b/lib/std/io/BufferedWriter.zig @@ -503,7 +503,7 @@ pub const WriteFileOptions = struct { offset: Writer.Offset = .none, /// If the size of the source file is known, it is likely that passing the /// size here will save one syscall. - limit: Writer.Limit = .none, + limit: Writer.Limit = .unlimited, /// Headers and trailers must be passed together so that in case `len` is /// zero, they can be forwarded directly to `Writer.VTable.writev`. /// @@ -518,55 +518,58 @@ pub const WriteFileOptions = struct { pub fn writeFileAll(bw: *BufferedWriter, file: std.fs.File, options: WriteFileOptions) anyerror!void { const headers_and_trailers = options.headers_and_trailers; const headers = headers_and_trailers[0..options.headers_len]; - if (options.limit == .zero) return writevAll(bw, headers_and_trailers); - if (options.limit == .none) { - // When reading the whole file, we cannot include the trailers in the - // call that reads from the file handle, because we have no way to - // determine whether a partial write is past the end of the file or - // not. - var i: usize = 0; - var offset = options.offset; - while (true) { - var n = try writeFile(bw, file, offset, .entire_file, headers[i..], headers.len - i); - while (i < headers.len and n >= headers[i].len) { - n -= headers[i].len; - i += 1; - } - if (i < headers.len) { - headers[i] = headers[i][n..]; - continue; - } - if (n == 0) break; - offset += n; - } - } else { - var len = options.limit.toInt().?; - var i: usize = 0; - var offset = options.offset; - while (true) { - var n = try writeFile(bw, file, offset, .init(len), headers_and_trailers[i..], headers.len - i); - while (i < headers.len and n >= headers[i].len) { - n -= headers[i].len; - i += 1; - } - if (i < headers.len) { - headers[i] = headers[i][n..]; - continue; - } - if (n >= len) { - n -= len; - if (i >= headers_and_trailers.len) return; - while (n >= headers_and_trailers[i].len) { - n -= headers_and_trailers[i].len; + switch (options.limit) { + .nothing => return writevAll(bw, headers_and_trailers), + .unlimited => { + // When reading the whole file, we cannot include the trailers in the + // call that reads from the file handle, because we have no way to + // determine whether a partial write is past the end of the file or + // not. + var i: usize = 0; + var offset = options.offset; + while (true) { + var n = try writeFile(bw, file, offset, .unlimited, headers[i..], headers.len - i); + while (i < headers.len and n >= headers[i].len) { + n -= headers[i].len; i += 1; - if (i >= headers_and_trailers.len) return; } - headers_and_trailers[i] = headers_and_trailers[i][n..]; - return writevAll(bw, headers_and_trailers[i..]); + if (i < headers.len) { + headers[i] = headers[i][n..]; + continue; + } + if (n == 0) break; + offset = offset.advance(n); } - offset += n; - len -= n; - } + }, + else => { + var len = options.limit.toInt().?; + var i: usize = 0; + var offset = options.offset; + while (true) { + var n = try writeFile(bw, file, offset, .limited(len), headers_and_trailers[i..], headers.len - i); + while (i < headers.len and n >= headers[i].len) { + n -= headers[i].len; + i += 1; + } + if (i < headers.len) { + headers[i] = headers[i][n..]; + continue; + } + if (n >= len) { + n -= len; + if (i >= headers_and_trailers.len) return; + while (n >= headers_and_trailers[i].len) { + n -= headers_and_trailers[i].len; + i += 1; + if (i >= headers_and_trailers.len) return; + } + headers_and_trailers[i] = headers_and_trailers[i][n..]; + return writevAll(bw, headers_and_trailers[i..]); + } + offset = offset.advance(n); + len -= n; + } + }, } } @@ -717,9 +720,9 @@ pub fn printValue( } } - try bw.writeByteCount('('); + try bw.writeByte('('); try printValue(bw, actual_fmt, options, @intFromEnum(value), max_depth); - try bw.writeByteCount(')'); + try bw.writeByte(')'); }, .@"union" => |info| { if (actual_fmt.len != 0) invalidFmtError(fmt, value); diff --git a/lib/std/io/Reader.zig b/lib/std/io/Reader.zig index c562f5b8d5..4d580821ec 100644 --- a/lib/std/io/Reader.zig +++ b/lib/std/io/Reader.zig @@ -48,12 +48,12 @@ pub const Status = packed struct(usize) { }; pub const Limit = enum(usize) { - zero = 0, - none = std.math.maxInt(usize), + nothing = 0, + unlimited = std.math.maxInt(usize), _, - /// `std.math.maxInt(usize)` is interpreted to mean "no limit". - pub fn init(n: usize) Limit { + /// `std.math.maxInt(usize)` is interpreted to mean `.unlimited`. + pub fn limited(n: usize) Limit { return @enumFromInt(n); } @@ -66,7 +66,10 @@ pub const Limit = enum(usize) { } pub fn toInt(l: Limit) ?usize { - return if (l == .none) null else @intFromEnum(l); + return switch (l) { + else => @intFromEnum(l), + .unlimited => null, + }; } /// Reduces a slice to account for the limit, leaving room for one extra @@ -84,7 +87,7 @@ pub const Limit = enum(usize) { /// Return a new limit reduced by `amount` or return `null` indicating /// limit would be exceeded. pub fn subtract(l: Limit, amount: usize) ?Limit { - if (l == .none) return .{ .next = .none }; + if (l == .unlimited) return .unlimited; if (amount > @intFromEnum(l)) return null; return @enumFromInt(@intFromEnum(l) - amount); } @@ -103,7 +106,7 @@ pub fn readAll(r: Reader, w: *std.io.BufferedWriter) anyerror!usize { const readFn = r.vtable.read; var offset: usize = 0; while (true) { - const status = try readFn(r.context, w, .none); + const status = try readFn(r.context, w, .unlimited); offset += status.len; if (status.end) return offset; } @@ -119,21 +122,21 @@ pub fn readAlloc(r: Reader, gpa: std.mem.Allocator, max_size: usize) anyerror![] const readFn = r.vtable.read; var aw: std.io.AllocatingWriter = undefined; errdefer aw.deinit(); - const bw = aw.init(gpa); + aw.init(gpa); var remaining = max_size; while (remaining > 0) { - const status = try readFn(r.context, bw, .init(remaining)); + const status = try readFn(r.context, &aw.buffered_writer, .limited(remaining)); if (status.end) break; remaining -= status.len; } - return aw.toOwnedSlice(gpa); + return aw.toOwnedSlice(); } /// Reads the stream until the end, ignoring all the data. /// Returns the number of bytes discarded. pub fn discardUntilEnd(r: Reader) anyerror!usize { - var bw = std.io.null_writer.unbuffered(); - return readAll(r, &bw); + var bw = std.io.Writer.null.unbuffered(); + return r.readAll(&bw); } test "readAlloc when the backing reader provides one byte at a time" { diff --git a/lib/std/io/Writer.zig b/lib/std/io/Writer.zig index 6d4f3fe171..d8ec926554 100644 --- a/lib/std/io/Writer.zig +++ b/lib/std/io/Writer.zig @@ -60,6 +60,13 @@ pub const Offset = enum(u64) { pub fn toInt(o: Offset) ?u64 { return if (o == .none) null else @intFromEnum(o); } + + pub fn advance(o: Offset, amount: u64) Offset { + return switch (o) { + .none => .none, + else => .init(@intFromEnum(o) + amount), + }; + } }; pub fn writev(w: Writer, data: []const []const u8) anyerror!usize { @@ -106,7 +113,7 @@ pub fn buffered(w: Writer, buffer: []u8) std.io.BufferedWriter { } pub fn unbuffered(w: Writer) std.io.BufferedWriter { - return buffered(w, &.{}); + return w.buffered(&.{}); } /// A `Writer` that discards all data. diff --git a/lib/std/net.zig b/lib/std/net.zig index 24a07db179..e01df2b417 100644 --- a/lib/std/net.zig +++ b/lib/std/net.zig @@ -1853,7 +1853,7 @@ pub const Stream = struct { }, else => &.{ .writeSplat = posix_writeSplat, - .writeFile = std.fs.File.writer_writeFile, + .writeFile = std.fs.File.writeFile, }, }, }; @@ -1960,7 +1960,7 @@ pub const Stream = struct { return n; } - fn posix_writeSplat(context: *anyopaque, data: []const []const u8, splat: usize) anyerror!usize { + fn posix_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) anyerror!usize { const sock_fd = opaqueToHandle(context); comptime assert(native_os != .windows); var splat_buffer: [256]u8 = undefined; @@ -2029,7 +2029,7 @@ pub const Stream = struct { const max_buffers_len = 8; - fn handleToOpaque(handle: Handle) *anyopaque { + fn handleToOpaque(handle: Handle) ?*anyopaque { return switch (@typeInfo(Handle)) { .pointer => @ptrCast(handle), .int => @ptrFromInt(@as(u32, @bitCast(handle))), @@ -2037,7 +2037,7 @@ pub const Stream = struct { }; } - fn opaqueToHandle(userdata: *anyopaque) Handle { + fn opaqueToHandle(userdata: ?*anyopaque) Handle { return switch (@typeInfo(Handle)) { .pointer => @ptrCast(userdata), .int => @intCast(@intFromPtr(userdata)), diff --git a/lib/std/process/Child.zig b/lib/std/process/Child.zig index b3224525da..e24006faf8 100644 --- a/lib/std/process/Child.zig +++ b/lib/std/process/Child.zig @@ -1004,13 +1004,17 @@ fn forkChildErrReport(fd: i32, err: ChildProcess.SpawnError) noreturn { fn writeIntFd(fd: i32, value: ErrInt) !void { const file: File = .{ .handle = fd }; - var bw = file.writer().unbuffered(); - bw.writeInt(u64, @intCast(value), .little) catch return error.SystemResources; + var buffer: [8]u8 = undefined; + std.mem.writeInt(u64, &buffer, @intCast(value), .little); + file.writeAll(&buffer) catch return error.SystemResorces; } fn readIntFd(fd: i32) !ErrInt { const file: File = .{ .handle = fd }; - return @intCast(file.reader().readInt(u64, .little) catch return error.SystemResources); + var buffer: [8]u8 = undefined; + const n = file.readAll(&buffer) catch return error.SystemResources; + if (n != buffer.len) return error.SystemResources; + return @intCast(std.mem.readInt(u64, &buffer, .little)); } const ErrInt = std.meta.Int(.unsigned, @sizeOf(anyerror) * 8); diff --git a/lib/std/tar/Writer.zig b/lib/std/tar/Writer.zig index c8ad4a78be..964be0f013 100644 --- a/lib/std/tar/Writer.zig +++ b/lib/std/tar/Writer.zig @@ -44,7 +44,7 @@ pub fn writeFile(self: *Self, sub_path: []const u8, file: std.fs.File) !void { try header.setMtime(mtime); try header.write(self.underlying_writer); - try self.underlying_writer.writeFileAll(file, .{ .len = .init(stat.size) }); + try self.underlying_writer.writeFileAll(file, .{ .limit = .limited(stat.size) }); try self.writePadding(stat.size); } diff --git a/lib/std/zig.zig b/lib/std/zig.zig index 4401b5fc80..04fb6e4d19 100644 --- a/lib/std/zig.zig +++ b/lib/std/zig.zig @@ -414,9 +414,8 @@ test fmtId { /// Print the string as a Zig identifier, escaping it with `@""` syntax if needed. fn formatId( bytes: []const u8, + bw: *std.io.BufferedWriter, comptime fmt: []const u8, - options: std.fmt.FormatOptions, - writer: *std.io.BufferedWriter, ) !void { const allow_primitive, const allow_underscore = comptime parse_fmt: { var allow_primitive = false; @@ -442,11 +441,11 @@ fn formatId( (allow_primitive or !std.zig.isPrimitive(bytes)) and (allow_underscore or !isUnderscore(bytes))) { - return writer.writeAll(bytes); + return bw.writeAll(bytes); } - try writer.writeAll("@\""); - try stringEscape(bytes, "", options, writer); - try writer.writeByte('"'); + try bw.writeAll("@\""); + try stringEscape(bytes, bw, ""); + try bw.writeByte('"'); } /// Return a Formatter for Zig Escapes of a double quoted string. @@ -473,11 +472,9 @@ test fmtEscapes { /// Format `{'}` treats contents as a single-quoted string. pub fn stringEscape( bytes: []const u8, - comptime f: []const u8, - options: std.fmt.FormatOptions, bw: *std.io.BufferedWriter, + comptime f: []const u8, ) !void { - _ = options; for (bytes) |byte| switch (byte) { '\n' => try bw.writeAll("\\n"), '\r' => try bw.writeAll("\\r"), diff --git a/lib/std/zig/ErrorBundle.zig b/lib/std/zig/ErrorBundle.zig index 1ecfb881af..9b47fb86d5 100644 --- a/lib/std/zig/ErrorBundle.zig +++ b/lib/std/zig/ErrorBundle.zig @@ -190,7 +190,7 @@ fn renderErrorMessageToWriter( ) anyerror!void { const ttyconf = options.ttyconf; const err_msg = eb.getErrorMessage(err_msg_index); - const prefix_start = bw.bytes_written; + const prefix_start = bw.count; if (err_msg.src_loc != .none) { const src = eb.extraData(SourceLocation, @intFromEnum(err_msg.src_loc)); try bw.splatByteAll(' ', indent); @@ -205,7 +205,7 @@ fn renderErrorMessageToWriter( try bw.writeAll(": "); // This is the length of the part before the error message: // e.g. "file.zig:4:5: error: " - const prefix_len = bw.bytes_written - prefix_start; + const prefix_len = bw.count - prefix_start; try ttyconf.setColor(bw, .reset); try ttyconf.setColor(bw, .bold); if (err_msg.count == 1) { diff --git a/test/src/Cases.zig b/test/src/Cases.zig index d8fe74c93c..d2f784a5b0 100644 --- a/test/src/Cases.zig +++ b/test/src/Cases.zig @@ -378,7 +378,7 @@ fn addFromDirInner( current_file.* = filename; const max_file_size = 10 * 1024 * 1024; - const src = try iterable_dir.readFileAllocOptions(ctx.arena, filename, max_file_size, null, .@"1", 0); + const src = try iterable_dir.readFileAllocOptions(filename, ctx.arena, .limited(max_file_size), null, .@"1", 0); // Parse the manifest var manifest = try TestManifest.parse(ctx.arena, src); diff --git a/test/standalone/run_output_caching/build.zig b/test/standalone/run_output_caching/build.zig index 98f5a63dd5..8d600708c1 100644 --- a/test/standalone/run_output_caching/build.zig +++ b/test/standalone/run_output_caching/build.zig @@ -75,7 +75,7 @@ const CheckOutputCaching = struct { pub fn init(owner: *std.Build, expect_caching: bool, output_paths: []const std.Build.LazyPath) *CheckOutputCaching { const check = owner.allocator.create(CheckOutputCaching) catch @panic("OOM"); check.* = .{ - .step = std.Build.Step.init(.{ + .step = .init(.{ .id = .custom, .name = "check output caching", .owner = owner, @@ -112,7 +112,7 @@ const CheckPathEquality = struct { pub fn init(owner: *std.Build, expected_equality: bool, output_paths: []const std.Build.LazyPath) *CheckPathEquality { const check = owner.allocator.create(CheckPathEquality) catch @panic("OOM"); check.* = .{ - .step = std.Build.Step.init(.{ + .step = .init(.{ .id = .custom, .name = "check output path equality", .owner = owner, diff --git a/test/tests.zig b/test/tests.zig index 0362233b3c..c9414081e3 100644 --- a/test/tests.zig +++ b/test/tests.zig @@ -2711,7 +2711,7 @@ pub fn addIncrementalTests(b: *std.Build, test_step: *Step) !void { run.addArg(b.graph.zig_exe); run.addFileArg(b.path("test/incremental/").path(b, entry.path)); - run.addArgs(&.{ "--zig-lib-dir", b.fmt("{}", .{b.graph.zig_lib_directory}) }); + run.addArgs(&.{ "--zig-lib-dir", b.fmt("{f}", .{b.graph.zig_lib_directory}) }); run.addCheck(.{ .expect_term = .{ .Exited = 0 } });