Merge pull request #25077 from ziglang/GenericReader

std.Io: delete GenericReader, AnyReader, FixedBufferStream; and related API breakage
This commit is contained in:
Andrew Kelley 2025-08-30 12:43:52 -07:00 committed by GitHub
commit b7104231af
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
187 changed files with 1391 additions and 2148 deletions

View file

@ -304,7 +304,7 @@ pub fn build(b: *std.Build) !void {
if (enable_llvm) {
const cmake_cfg = if (static_llvm) null else blk: {
if (findConfigH(b, config_h_path_option)) |config_h_path| {
const file_contents = fs.cwd().readFileAlloc(b.allocator, config_h_path, max_config_h_bytes) catch unreachable;
const file_contents = fs.cwd().readFileAlloc(config_h_path, b.allocator, .limited(max_config_h_bytes)) catch unreachable;
break :blk parseConfigH(b, file_contents);
} else {
std.log.warn("config.h could not be located automatically. Consider providing it explicitly via \"-Dconfig_h\"", .{});

View file

@ -117,8 +117,7 @@ pub fn nameFromUniqueIndex(index: u16, buf: []u8) []u8 {
var node_index: u16 = 0;
var count: u16 = index;
var fbs = std.io.fixedBufferStream(buf);
const w = fbs.writer();
var w: std.Io.Writer = .fixed(buf);
while (true) {
var sibling_index = dafsa[node_index].child_index;
@ -140,7 +139,7 @@ pub fn nameFromUniqueIndex(index: u16, buf: []u8) []u8 {
if (count == 0) break;
}
return fbs.getWritten();
return w.buffered();
}
const Node = packed struct(u32) {

View file

@ -1308,13 +1308,7 @@ fn addSourceFromPathExtra(comp: *Compilation, path: []const u8, kind: Source.Kin
return error.FileNotFound;
}
const file = try comp.cwd.openFile(path, .{});
defer file.close();
const contents = file.readToEndAlloc(comp.gpa, std.math.maxInt(u32)) catch |err| switch (err) {
error.FileTooBig => return error.StreamTooLong,
else => |e| return e,
};
const contents = try comp.cwd.readFileAlloc(path, comp.gpa, .limited(std.math.maxInt(u32)));
errdefer comp.gpa.free(contents);
return comp.addSourceFromOwnedBuffer(contents, path, kind);
@ -1433,19 +1427,7 @@ fn getFileContents(comp: *Compilation, path: []const u8, limit: ?u32) ![]const u
return error.FileNotFound;
}
const file = try comp.cwd.openFile(path, .{});
defer file.close();
var buf = std.array_list.Managed(u8).init(comp.gpa);
defer buf.deinit();
const max = limit orelse std.math.maxInt(u32);
file.deprecatedReader().readAllArrayList(&buf, max) catch |e| switch (e) {
error.StreamTooLong => if (limit == null) return e,
else => return e,
};
return buf.toOwnedSlice();
return comp.cwd.readFileAlloc(path, comp.gpa, .limited(limit orelse std.math.maxInt(u32)));
}
pub fn findEmbed(
@ -1645,8 +1627,8 @@ test "addSourceFromReader" {
var comp = Compilation.init(std.testing.allocator, std.fs.cwd());
defer comp.deinit();
var buf_reader = std.io.fixedBufferStream(str);
const source = try comp.addSourceFromReader(buf_reader.reader(), "path", .user);
var buf_reader: std.Io.Reader = .fixed(str);
const source = try comp.addSourceFromReader(&buf_reader, "path", .user);
try std.testing.expectEqualStrings(expected, source.buf);
try std.testing.expectEqual(warning_count, @as(u32, @intCast(comp.diagnostics.list.items.len)));
@ -1727,8 +1709,8 @@ test "ignore BOM at beginning of file" {
var comp = Compilation.init(std.testing.allocator, std.fs.cwd());
defer comp.deinit();
var buf_reader = std.io.fixedBufferStream(buf);
const source = try comp.addSourceFromReader(buf_reader.reader(), "file.c", .user);
var buf_reader: std.Io.Reader = .fixed(buf);
const source = try comp.addSourceFromReader(&buf_reader, "file.c", .user);
const expected_output = if (mem.startsWith(u8, buf, BOM)) buf[BOM.len..] else buf;
try std.testing.expectEqualStrings(expected_output, source.buf);
}

View file

@ -322,14 +322,14 @@ pub fn addExtra(
return error.FatalError;
}
pub fn render(comp: *Compilation, config: std.io.tty.Config) void {
pub fn render(comp: *Compilation, config: std.Io.tty.Config) void {
if (comp.diagnostics.list.items.len == 0) return;
var buffer: [1000]u8 = undefined;
var m = defaultMsgWriter(config, &buffer);
defer m.deinit();
renderMessages(comp, &m);
}
pub fn defaultMsgWriter(config: std.io.tty.Config, buffer: []u8) MsgWriter {
pub fn defaultMsgWriter(config: std.Io.tty.Config, buffer: []u8) MsgWriter {
return MsgWriter.init(config, buffer);
}
@ -451,7 +451,7 @@ pub fn renderMessage(comp: *Compilation, m: anytype, msg: Message) void {
},
.normalized => {
const f = struct {
pub fn f(bytes: []const u8, writer: *std.io.Writer) std.io.Writer.Error!void {
pub fn f(bytes: []const u8, writer: *std.Io.Writer) std.Io.Writer.Error!void {
var it: std.unicode.Utf8Iterator = .{
.bytes = bytes,
.i = 0,
@ -526,10 +526,10 @@ fn tagKind(d: *Diagnostics, tag: Tag, langopts: LangOpts) Kind {
}
const MsgWriter = struct {
writer: *std.io.Writer,
config: std.io.tty.Config,
writer: *std.Io.Writer,
config: std.Io.tty.Config,
fn init(config: std.io.tty.Config, buffer: []u8) MsgWriter {
fn init(config: std.Io.tty.Config, buffer: []u8) MsgWriter {
return .{
.writer = std.debug.lockStderrWriter(buffer),
.config = config,
@ -549,7 +549,7 @@ const MsgWriter = struct {
m.writer.writeAll(msg) catch {};
}
fn setColor(m: *MsgWriter, color: std.io.tty.Color) void {
fn setColor(m: *MsgWriter, color: std.Io.tty.Color) void {
m.config.setColor(m.writer, color) catch {};
}

View file

@ -519,8 +519,8 @@ fn option(arg: []const u8, name: []const u8) ?[]const u8 {
fn addSource(d: *Driver, path: []const u8) !Source {
if (mem.eql(u8, "-", path)) {
const stdin = std.fs.File.stdin().deprecatedReader();
const input = try stdin.readAllAlloc(d.comp.gpa, std.math.maxInt(u32));
var stdin_reader: std.fs.File.Reader = .initStreaming(.stdin(), &.{});
const input = try stdin_reader.interface.allocRemaining(d.comp.gpa, .limited(std.math.maxInt(u32)));
defer d.comp.gpa.free(input);
return d.comp.addSourceFromBuffer("<stdin>", input);
}
@ -544,7 +544,7 @@ pub fn renderErrors(d: *Driver) void {
Diagnostics.render(d.comp, d.detectConfig(std.fs.File.stderr()));
}
pub fn detectConfig(d: *Driver, file: std.fs.File) std.io.tty.Config {
pub fn detectConfig(d: *Driver, file: std.fs.File) std.Io.tty.Config {
if (d.color == true) return .escape_codes;
if (d.color == false) return .no_color;

View file

@ -800,7 +800,7 @@ pub fn nodeLoc(tree: *const Tree, node: NodeIndex) ?Source.Location {
return tree.tokens.items(.loc)[@intFromEnum(tok_i)];
}
pub fn dump(tree: *const Tree, config: std.io.tty.Config, writer: anytype) !void {
pub fn dump(tree: *const Tree, config: std.Io.tty.Config, writer: anytype) !void {
const mapper = tree.comp.string_interner.getFastTypeMapper(tree.comp.gpa) catch tree.comp.string_interner.getSlowTypeMapper();
defer mapper.deinit(tree.comp.gpa);
@ -855,17 +855,17 @@ fn dumpNode(
node: NodeIndex,
level: u32,
mapper: StringInterner.TypeMapper,
config: std.io.tty.Config,
config: std.Io.tty.Config,
w: anytype,
) !void {
const delta = 2;
const half = delta / 2;
const TYPE = std.io.tty.Color.bright_magenta;
const TAG = std.io.tty.Color.bright_cyan;
const IMPLICIT = std.io.tty.Color.bright_blue;
const NAME = std.io.tty.Color.bright_red;
const LITERAL = std.io.tty.Color.bright_green;
const ATTRIBUTE = std.io.tty.Color.bright_yellow;
const TYPE = std.Io.tty.Color.bright_magenta;
const TAG = std.Io.tty.Color.bright_cyan;
const IMPLICIT = std.Io.tty.Color.bright_blue;
const NAME = std.Io.tty.Color.bright_red;
const LITERAL = std.Io.tty.Color.bright_green;
const ATTRIBUTE = std.Io.tty.Color.bright_yellow;
std.debug.assert(node != .none);
const tag = tree.nodes.items(.tag)[@intFromEnum(node)];

View file

@ -578,8 +578,7 @@ pub fn toLLVMTriple(target: std.Target, buf: []u8) []const u8 {
// 64 bytes is assumed to be large enough to hold any target triple; increase if necessary
std.debug.assert(buf.len >= 64);
var stream = std.io.fixedBufferStream(buf);
const writer = stream.writer();
var writer: std.Io.Writer = .fixed(buf);
const llvm_arch = switch (target.cpu.arch) {
.arm => "arm",
@ -719,7 +718,7 @@ pub fn toLLVMTriple(target: std.Target, buf: []u8) []const u8 {
.ohoseabi => "ohoseabi",
};
writer.writeAll(llvm_abi) catch unreachable;
return stream.getWritten();
return writer.buffered();
}
test "alignment functions - smoke test" {

View file

@ -374,21 +374,21 @@ pub fn deinit(ir: *Ir, gpa: std.mem.Allocator) void {
ir.* = undefined;
}
const TYPE = std.io.tty.Color.bright_magenta;
const INST = std.io.tty.Color.bright_cyan;
const REF = std.io.tty.Color.bright_blue;
const LITERAL = std.io.tty.Color.bright_green;
const ATTRIBUTE = std.io.tty.Color.bright_yellow;
const TYPE = std.Io.tty.Color.bright_magenta;
const INST = std.Io.tty.Color.bright_cyan;
const REF = std.Io.tty.Color.bright_blue;
const LITERAL = std.Io.tty.Color.bright_green;
const ATTRIBUTE = std.Io.tty.Color.bright_yellow;
const RefMap = std.AutoArrayHashMap(Ref, void);
pub fn dump(ir: *const Ir, gpa: Allocator, config: std.io.tty.Config, w: anytype) !void {
pub fn dump(ir: *const Ir, gpa: Allocator, config: std.Io.tty.Config, w: anytype) !void {
for (ir.decls.keys(), ir.decls.values()) |name, *decl| {
try ir.dumpDecl(decl, gpa, name, config, w);
}
}
fn dumpDecl(ir: *const Ir, decl: *const Decl, gpa: Allocator, name: []const u8, config: std.io.tty.Config, w: anytype) !void {
fn dumpDecl(ir: *const Ir, decl: *const Decl, gpa: Allocator, name: []const u8, config: std.Io.tty.Config, w: anytype) !void {
const tags = decl.instructions.items(.tag);
const data = decl.instructions.items(.data);
@ -609,7 +609,7 @@ fn dumpDecl(ir: *const Ir, decl: *const Decl, gpa: Allocator, name: []const u8,
try w.writeAll("}\n\n");
}
fn writeType(ir: Ir, ty_ref: Interner.Ref, config: std.io.tty.Config, w: anytype) !void {
fn writeType(ir: Ir, ty_ref: Interner.Ref, config: std.Io.tty.Config, w: anytype) !void {
const ty = ir.interner.get(ty_ref);
try config.setColor(w, TYPE);
switch (ty) {
@ -639,7 +639,7 @@ fn writeType(ir: Ir, ty_ref: Interner.Ref, config: std.io.tty.Config, w: anytype
}
}
fn writeValue(ir: Ir, val: Interner.Ref, config: std.io.tty.Config, w: anytype) !void {
fn writeValue(ir: Ir, val: Interner.Ref, config: std.Io.tty.Config, w: anytype) !void {
try config.setColor(w, LITERAL);
const key = ir.interner.get(val);
switch (key) {
@ -655,7 +655,7 @@ fn writeValue(ir: Ir, val: Interner.Ref, config: std.io.tty.Config, w: anytype)
}
}
fn writeRef(ir: Ir, decl: *const Decl, ref_map: *RefMap, ref: Ref, config: std.io.tty.Config, w: anytype) !void {
fn writeRef(ir: Ir, decl: *const Decl, ref_map: *RefMap, ref: Ref, config: std.Io.tty.Config, w: anytype) !void {
assert(ref != .none);
const index = @intFromEnum(ref);
const ty_ref = decl.instructions.items(.ty)[index];
@ -678,7 +678,7 @@ fn writeRef(ir: Ir, decl: *const Decl, ref_map: *RefMap, ref: Ref, config: std.i
try w.print(" %{d}", .{ref_index});
}
fn writeNewRef(ir: Ir, decl: *const Decl, ref_map: *RefMap, ref: Ref, config: std.io.tty.Config, w: anytype) !void {
fn writeNewRef(ir: Ir, decl: *const Decl, ref_map: *RefMap, ref: Ref, config: std.Io.tty.Config, w: anytype) !void {
try ref_map.put(ref, {});
try w.writeAll(" ");
try ir.writeRef(decl, ref_map, ref, config, w);
@ -687,7 +687,7 @@ fn writeNewRef(ir: Ir, decl: *const Decl, ref_map: *RefMap, ref: Ref, config: st
try config.setColor(w, INST);
}
fn writeLabel(decl: *const Decl, label_map: *RefMap, ref: Ref, config: std.io.tty.Config, w: anytype) !void {
fn writeLabel(decl: *const Decl, label_map: *RefMap, ref: Ref, config: std.Io.tty.Config, w: anytype) !void {
assert(ref != .none);
const index = @intFromEnum(ref);
const label = decl.instructions.items(.data)[index].label;

View file

@ -1783,7 +1783,7 @@ fn renderErrorsAndExit(comp: *aro.Compilation) noreturn {
defer std.process.exit(1);
var buffer: [1000]u8 = undefined;
var writer = aro.Diagnostics.defaultMsgWriter(std.io.tty.detectConfig(std.fs.File.stderr()), &buffer);
var writer = aro.Diagnostics.defaultMsgWriter(std.Io.tty.detectConfig(std.fs.File.stderr()), &buffer);
defer writer.deinit(); // writer deinit must run *before* exit so that stderr is flushed
var saw_error = false;

View file

@ -1,7 +1,6 @@
const std = @import("std");
const builtin = @import("builtin");
const assert = std.debug.assert;
const io = std.io;
const fmt = std.fmt;
const mem = std.mem;
const process = std.process;
@ -11,8 +10,9 @@ const Watch = std.Build.Watch;
const WebServer = std.Build.WebServer;
const Allocator = std.mem.Allocator;
const fatal = std.process.fatal;
const Writer = std.io.Writer;
const Writer = std.Io.Writer;
const runner = @This();
const tty = std.Io.tty;
pub const root = @import("@build");
pub const dependencies = @import("@dependencies");
@ -576,7 +576,7 @@ const Run = struct {
claimed_rss: usize,
summary: Summary,
ttyconf: std.io.tty.Config,
ttyconf: tty.Config,
stderr: File,
fn cleanExit(run: Run) void {
@ -819,7 +819,7 @@ const PrintNode = struct {
last: bool = false,
};
fn printPrefix(node: *PrintNode, stderr: *Writer, ttyconf: std.io.tty.Config) !void {
fn printPrefix(node: *PrintNode, stderr: *Writer, ttyconf: tty.Config) !void {
const parent = node.parent orelse return;
if (parent.parent == null) return;
try printPrefix(parent, stderr, ttyconf);
@ -833,7 +833,7 @@ fn printPrefix(node: *PrintNode, stderr: *Writer, ttyconf: std.io.tty.Config) !v
}
}
fn printChildNodePrefix(stderr: *Writer, ttyconf: std.io.tty.Config) !void {
fn printChildNodePrefix(stderr: *Writer, ttyconf: tty.Config) !void {
try stderr.writeAll(switch (ttyconf) {
.no_color, .windows_api => "+- ",
.escape_codes => "\x1B\x28\x30\x6d\x71\x1B\x28\x42 ", //
@ -843,7 +843,7 @@ fn printChildNodePrefix(stderr: *Writer, ttyconf: std.io.tty.Config) !void {
fn printStepStatus(
s: *Step,
stderr: *Writer,
ttyconf: std.io.tty.Config,
ttyconf: tty.Config,
run: *const Run,
) !void {
switch (s.state) {
@ -923,7 +923,7 @@ fn printStepStatus(
fn printStepFailure(
s: *Step,
stderr: *Writer,
ttyconf: std.io.tty.Config,
ttyconf: tty.Config,
) !void {
if (s.result_error_bundle.errorMessageCount() > 0) {
try ttyconf.setColor(stderr, .red);
@ -977,7 +977,7 @@ fn printTreeStep(
s: *Step,
run: *const Run,
stderr: *Writer,
ttyconf: std.io.tty.Config,
ttyconf: tty.Config,
parent_node: *PrintNode,
step_stack: *std.AutoArrayHashMapUnmanaged(*Step, void),
) !void {
@ -1494,9 +1494,9 @@ fn uncleanExit() error{UncleanExit} {
const Color = std.zig.Color;
const Summary = enum { all, new, failures, none };
fn get_tty_conf(color: Color, stderr: File) std.io.tty.Config {
fn get_tty_conf(color: Color, stderr: File) tty.Config {
return switch (color) {
.auto => std.io.tty.detectConfig(stderr),
.auto => tty.detectConfig(stderr),
.on => .escape_codes,
.off => .no_color,
};

View file

@ -1,6 +1,5 @@
const std = @import("std");
const mem = std.mem;
const io = std.io;
const LibCInstallation = std.zig.LibCInstallation;
const usage_libc =

View file

@ -381,7 +381,7 @@ fn transformationsToFixups(
}
}
var other_source: std.io.Writer.Allocating = .init(gpa);
var other_source: std.Io.Writer.Allocating = .init(gpa);
defer other_source.deinit();
try other_source.writer.writeAll("struct {\n");
try other_file_ast.render(gpa, &other_source.writer, inlined_fixups);
@ -398,10 +398,9 @@ fn transformationsToFixups(
fn parse(gpa: Allocator, file_path: []const u8) !Ast {
const source_code = std.fs.cwd().readFileAllocOptions(
gpa,
file_path,
std.math.maxInt(u32),
null,
gpa,
.limited(std.math.maxInt(u32)),
.fromByteUnits(1),
0,
) catch |err| {

View file

@ -22,7 +22,7 @@ pub const Tree = struct {
return @alignCast(@fieldParentPtr("base", self.node));
}
pub fn dump(self: *Tree, writer: *std.io.Writer) !void {
pub fn dump(self: *Tree, writer: *std.Io.Writer) !void {
try self.node.dump(self, writer, 0);
}
};
@ -726,9 +726,9 @@ pub const Node = struct {
pub fn dump(
node: *const Node,
tree: *const Tree,
writer: *std.io.Writer,
writer: *std.Io.Writer,
indent: usize,
) std.io.Writer.Error!void {
) std.Io.Writer.Error!void {
try writer.splatByteAll(' ', indent);
try writer.writeAll(@tagName(node.id));
switch (node.id) {

View file

@ -124,13 +124,13 @@ pub const Diagnostics = struct {
try self.errors.append(self.allocator, error_details);
}
pub fn renderToStdErr(self: *Diagnostics, args: []const []const u8, config: std.io.tty.Config) void {
pub fn renderToStdErr(self: *Diagnostics, args: []const []const u8, config: std.Io.tty.Config) void {
const stderr = std.debug.lockStderrWriter(&.{});
defer std.debug.unlockStderrWriter();
self.renderToWriter(args, stderr, config) catch return;
}
pub fn renderToWriter(self: *Diagnostics, args: []const []const u8, writer: *std.io.Writer, config: std.io.tty.Config) !void {
pub fn renderToWriter(self: *Diagnostics, args: []const []const u8, writer: *std.Io.Writer, config: std.Io.tty.Config) !void {
for (self.errors.items) |err_details| {
try renderErrorMessage(writer, config, err_details, args);
}
@ -1343,7 +1343,7 @@ test parsePercent {
try std.testing.expectError(error.InvalidFormat, parsePercent("~1"));
}
pub fn renderErrorMessage(writer: *std.io.Writer, config: std.io.tty.Config, err_details: Diagnostics.ErrorDetails, args: []const []const u8) !void {
pub fn renderErrorMessage(writer: *std.Io.Writer, config: std.Io.tty.Config, err_details: Diagnostics.ErrorDetails, args: []const []const u8) !void {
try config.setColor(writer, .dim);
try writer.writeAll("<cli>");
try config.setColor(writer, .reset);
@ -1470,7 +1470,7 @@ fn testParseOutput(args: []const []const u8, expected_output: []const u8) !?Opti
var diagnostics = Diagnostics.init(std.testing.allocator);
defer diagnostics.deinit();
var output: std.io.Writer.Allocating = .init(std.testing.allocator);
var output: std.Io.Writer.Allocating = .init(std.testing.allocator);
defer output.deinit();
var options = parse(std.testing.allocator, args, &diagnostics) catch |err| switch (err) {

View file

@ -61,7 +61,7 @@ pub const Diagnostics = struct {
return @intCast(index);
}
pub fn renderToStdErr(self: *Diagnostics, cwd: std.fs.Dir, source: []const u8, tty_config: std.io.tty.Config, source_mappings: ?SourceMappings) void {
pub fn renderToStdErr(self: *Diagnostics, cwd: std.fs.Dir, source: []const u8, tty_config: std.Io.tty.Config, source_mappings: ?SourceMappings) void {
const stderr = std.debug.lockStderrWriter(&.{});
defer std.debug.unlockStderrWriter();
for (self.errors.items) |err_details| {
@ -70,7 +70,7 @@ pub const Diagnostics = struct {
}
pub fn renderToStdErrDetectTTY(self: *Diagnostics, cwd: std.fs.Dir, source: []const u8, source_mappings: ?SourceMappings) void {
const tty_config = std.io.tty.detectConfig(std.fs.File.stderr());
const tty_config = std.Io.tty.detectConfig(std.fs.File.stderr());
return self.renderToStdErr(cwd, source, tty_config, source_mappings);
}
@ -409,7 +409,7 @@ pub const ErrorDetails = struct {
failed_to_open_cwd,
};
fn formatToken(ctx: TokenFormatContext, writer: *std.io.Writer) std.io.Writer.Error!void {
fn formatToken(ctx: TokenFormatContext, writer: *std.Io.Writer) std.Io.Writer.Error!void {
switch (ctx.token.id) {
.eof => return writer.writeAll(ctx.token.id.nameForErrorDisplay()),
else => {},
@ -894,7 +894,7 @@ fn cellCount(code_page: SupportedCodePage, source: []const u8, start_index: usiz
const truncated_str = "<...truncated...>";
pub fn renderErrorMessage(writer: *std.io.Writer, tty_config: std.io.tty.Config, cwd: std.fs.Dir, err_details: ErrorDetails, source: []const u8, strings: []const []const u8, source_mappings: ?SourceMappings) !void {
pub fn renderErrorMessage(writer: *std.Io.Writer, tty_config: std.Io.tty.Config, cwd: std.fs.Dir, err_details: ErrorDetails, source: []const u8, strings: []const []const u8, source_mappings: ?SourceMappings) !void {
if (err_details.type == .hint) return;
const source_line_start = err_details.token.getLineStartForErrorDisplay(source);

View file

@ -24,7 +24,7 @@ pub fn main() !void {
const arena = arena_state.allocator();
const stderr = std.fs.File.stderr();
const stderr_config = std.io.tty.detectConfig(stderr);
const stderr_config = std.Io.tty.detectConfig(stderr);
const args = try std.process.argsAlloc(allocator);
defer std.process.argsFree(allocator, args);
@ -164,13 +164,14 @@ pub fn main() !void {
} else {
switch (options.input_source) {
.stdio => |file| {
break :full_input file.readToEndAlloc(allocator, std.math.maxInt(usize)) catch |err| {
var file_reader = file.reader(&.{});
break :full_input file_reader.interface.allocRemaining(allocator, .unlimited) catch |err| {
try error_handler.emitMessage(allocator, .err, "unable to read input from stdin: {s}", .{@errorName(err)});
std.process.exit(1);
};
},
.filename => |input_filename| {
break :full_input std.fs.cwd().readFileAlloc(allocator, input_filename, std.math.maxInt(usize)) catch |err| {
break :full_input std.fs.cwd().readFileAlloc(input_filename, allocator, .unlimited) catch |err| {
try error_handler.emitMessage(allocator, .err, "unable to read input file path '{s}': {s}", .{ input_filename, @errorName(err) });
std.process.exit(1);
};
@ -462,7 +463,10 @@ const IoStream = struct {
pub fn readAll(self: Source, allocator: std.mem.Allocator) !Data {
return switch (self) {
inline .file, .stdio => |file| .{
.bytes = try file.readToEndAlloc(allocator, std.math.maxInt(usize)),
.bytes = b: {
var file_reader = file.reader(&.{});
break :b try file_reader.interface.allocRemaining(allocator, .unlimited);
},
.needs_free = true,
},
.memory => |list| .{ .bytes = list.items, .needs_free = false },
@ -621,7 +625,7 @@ const SourceMappings = @import("source_mapping.zig").SourceMappings;
const ErrorHandler = union(enum) {
server: std.zig.Server,
tty: std.io.tty.Config,
tty: std.Io.tty.Config,
pub fn emitCliDiagnostics(
self: *ErrorHandler,
@ -984,7 +988,7 @@ const MsgWriter = struct {
m.buf.appendSlice(msg) catch {};
}
pub fn setColor(m: *MsgWriter, color: std.io.tty.Color) void {
pub fn setColor(m: *MsgWriter, color: std.Io.tty.Color) void {
_ = m;
_ = color;
}

View file

@ -164,7 +164,7 @@ pub const Language = packed struct(u16) {
return @bitCast(self);
}
pub fn format(language: Language, w: *std.io.Writer) std.io.Writer.Error!void {
pub fn format(language: Language, w: *std.Io.Writer) std.Io.Writer.Error!void {
const language_id = language.asInt();
const language_name = language_name: {
if (std.enums.fromInt(lang.LanguageId, language_id)) |lang_enum_val| {
@ -439,7 +439,7 @@ pub const NameOrOrdinal = union(enum) {
}
}
pub fn format(self: NameOrOrdinal, w: *std.io.Writer) !void {
pub fn format(self: NameOrOrdinal, w: *std.Io.Writer) !void {
switch (self) {
.name => |name| {
try w.print("{f}", .{std.unicode.fmtUtf16Le(name)});
@ -450,7 +450,7 @@ pub const NameOrOrdinal = union(enum) {
}
}
fn formatResourceType(self: NameOrOrdinal, w: *std.io.Writer) std.io.Writer.Error!void {
fn formatResourceType(self: NameOrOrdinal, w: *std.Io.Writer) std.Io.Writer.Error!void {
switch (self) {
.name => |name| {
try w.print("{f}", .{std.unicode.fmtUtf16Le(name)});

View file

@ -1,7 +1,6 @@
const std = @import("std");
const builtin = @import("builtin");
/// Like std.io.FixedBufferStream but does no bounds checking
pub const UncheckedSliceWriter = struct {
const Self = @This();
@ -86,7 +85,7 @@ pub const ErrorMessageType = enum { err, warning, note };
/// Used for generic colored errors/warnings/notes, more context-specific error messages
/// are handled elsewhere.
pub fn renderErrorMessage(writer: *std.io.Writer, config: std.io.tty.Config, msg_type: ErrorMessageType, comptime format: []const u8, args: anytype) !void {
pub fn renderErrorMessage(writer: *std.Io.Writer, config: std.Io.tty.Config, msg_type: ErrorMessageType, comptime format: []const u8, args: anytype) !void {
switch (msg_type) {
.err => {
try config.setColor(writer, .bold);

View file

@ -1,7 +1,6 @@
const builtin = @import("builtin");
const std = @import("std");
const mem = std.mem;
const io = std.io;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const Cache = std.Build.Cache;
@ -174,7 +173,7 @@ fn serveDocsFile(
// The desired API is actually sendfile, which will require enhancing std.http.Server.
// We load the file with every request so that the user can make changes to the file
// and refresh the HTML page without restarting this server.
const file_contents = try context.lib_dir.readFileAlloc(gpa, name, 10 * 1024 * 1024);
const file_contents = try context.lib_dir.readFileAlloc(name, gpa, .limited(10 * 1024 * 1024));
defer gpa.free(file_contents);
try request.respond(file_contents, .{
.extra_headers = &.{
@ -264,7 +263,7 @@ fn serveWasm(
});
// std.http.Server does not have a sendfile API yet.
const bin_path = try wasm_base_path.join(arena, bin_name);
const file_contents = try bin_path.root_dir.handle.readFileAlloc(gpa, bin_path.sub_path, 10 * 1024 * 1024);
const file_contents = try bin_path.root_dir.handle.readFileAlloc(bin_path.sub_path, gpa, .limited(10 * 1024 * 1024));
defer gpa.free(file_contents);
try request.respond(file_contents, .{
.extra_headers = &.{
@ -318,7 +317,7 @@ fn buildWasmBinary(
child.stderr_behavior = .Pipe;
try child.spawn();
var poller = std.io.poll(gpa, enum { stdout, stderr }, .{
var poller = std.Io.poll(gpa, enum { stdout, stderr }, .{
.stdout = child.stdout.?,
.stderr = child.stderr.?,
});

View file

@ -220,7 +220,7 @@ const Fuzzer = struct {
const i = f.corpus.items.len;
var buf: [30]u8 = undefined;
const input_sub_path = std.fmt.bufPrint(&buf, "{d}", .{i}) catch unreachable;
const input = f.corpus_directory.handle.readFileAlloc(gpa, input_sub_path, 1 << 31) catch |err| switch (err) {
const input = f.corpus_directory.handle.readFileAlloc(input_sub_path, gpa, .limited(1 << 31)) catch |err| switch (err) {
error.FileNotFound => {
// Make this one the next input.
const input_file = f.corpus_directory.handle.createFile(input_sub_path, .{

View file

@ -1,6 +1,5 @@
const std = @import("std.zig");
const builtin = @import("builtin");
const io = std.io;
const fs = std.fs;
const mem = std.mem;
const debug = std.debug;
@ -1830,7 +1829,8 @@ pub fn runAllowFail(
try Step.handleVerbose2(b, null, child.env_map, argv);
try child.spawn();
const stdout = child.stdout.?.deprecatedReader().readAllAlloc(b.allocator, max_output_size) catch {
var stdout_reader = child.stdout.?.readerStreaming(&.{});
const stdout = stdout_reader.interface.allocRemaining(b.allocator, .limited(max_output_size)) catch {
return error.ReadFailure;
};
errdefer b.allocator.free(stdout);
@ -2540,7 +2540,7 @@ fn dumpBadDirnameHelp(
try w.print(msg, args);
const tty_config = std.io.tty.detectConfig(.stderr());
const tty_config = std.Io.tty.detectConfig(.stderr());
if (fail_step) |s| {
tty_config.setColor(w, .red) catch {};
@ -2566,8 +2566,8 @@ fn dumpBadDirnameHelp(
/// In this function the stderr mutex has already been locked.
pub fn dumpBadGetPathHelp(
s: *Step,
w: *std.io.Writer,
tty_config: std.io.tty.Config,
w: *std.Io.Writer,
tty_config: std.Io.tty.Config,
src_builder: *Build,
asking_step: ?*Step,
) anyerror!void {

View file

@ -286,7 +286,7 @@ pub const HashHelper = struct {
pub fn binToHex(bin_digest: BinDigest) HexDigest {
var out_digest: HexDigest = undefined;
var w: std.io.Writer = .fixed(&out_digest);
var w: std.Io.Writer = .fixed(&out_digest);
w.printHex(&bin_digest, .lower) catch unreachable;
return out_digest;
}
@ -664,7 +664,7 @@ pub const Manifest = struct {
const input_file_count = self.files.entries.len;
var tiny_buffer: [1]u8 = undefined; // allows allocRemaining to detect limit exceeded
var manifest_reader = self.manifest_file.?.reader(&tiny_buffer); // Reads positionally from zero.
const limit: std.io.Limit = .limited(manifest_file_size_max);
const limit: std.Io.Limit = .limited(manifest_file_size_max);
const file_contents = manifest_reader.interface.allocRemaining(gpa, limit) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.StreamTooLong => return error.OutOfMemory,
@ -1056,7 +1056,7 @@ pub const Manifest = struct {
fn addDepFileMaybePost(self: *Manifest, dir: fs.Dir, dep_file_basename: []const u8) !void {
const gpa = self.cache.gpa;
const dep_file_contents = try dir.readFileAlloc(gpa, dep_file_basename, manifest_file_size_max);
const dep_file_contents = try dir.readFileAlloc(dep_file_basename, gpa, .limited(manifest_file_size_max));
defer gpa.free(dep_file_contents);
var error_buf: std.ArrayListUnmanaged(u8) = .empty;

View file

@ -56,7 +56,7 @@ pub fn closeAndFree(self: *Directory, gpa: Allocator) void {
self.* = undefined;
}
pub fn format(self: Directory, writer: *std.io.Writer) std.io.Writer.Error!void {
pub fn format(self: Directory, writer: *std.Io.Writer) std.Io.Writer.Error!void {
if (self.path) |p| {
try writer.writeAll(p);
try writer.writeAll(fs.path.sep_str);

View file

@ -151,7 +151,7 @@ pub fn fmtEscapeString(path: Path) std.fmt.Formatter(Path, formatEscapeString) {
return .{ .data = path };
}
pub fn formatEscapeString(path: Path, writer: *std.io.Writer) std.io.Writer.Error!void {
pub fn formatEscapeString(path: Path, writer: *std.Io.Writer) std.Io.Writer.Error!void {
if (path.root_dir.path) |p| {
try std.zig.stringEscape(p, writer);
if (path.sub_path.len > 0) try std.zig.stringEscape(fs.path.sep_str, writer);
@ -167,7 +167,7 @@ pub fn fmtEscapeChar(path: Path) std.fmt.Formatter(Path, formatEscapeChar) {
}
/// Deprecated, use double quoted escape to print paths.
pub fn formatEscapeChar(path: Path, writer: *std.io.Writer) std.io.Writer.Error!void {
pub fn formatEscapeChar(path: Path, writer: *std.Io.Writer) std.Io.Writer.Error!void {
if (path.root_dir.path) |p| {
for (p) |byte| try std.zig.charEscape(byte, writer);
if (path.sub_path.len > 0) try writer.writeByte(fs.path.sep);
@ -177,7 +177,7 @@ pub fn formatEscapeChar(path: Path, writer: *std.io.Writer) std.io.Writer.Error!
}
}
pub fn format(self: Path, writer: *std.io.Writer) std.io.Writer.Error!void {
pub fn format(self: Path, writer: *std.Io.Writer) std.Io.Writer.Error!void {
if (std.fs.path.isAbsolute(self.sub_path)) {
try writer.writeAll(self.sub_path);
return;

View file

@ -127,7 +127,7 @@ pub fn deinit(fuzz: *Fuzz) void {
gpa.free(fuzz.run_steps);
}
fn rebuildTestsWorkerRun(run: *Step.Run, gpa: Allocator, ttyconf: std.io.tty.Config, parent_prog_node: std.Progress.Node) void {
fn rebuildTestsWorkerRun(run: *Step.Run, gpa: Allocator, ttyconf: std.Io.tty.Config, parent_prog_node: std.Progress.Node) void {
rebuildTestsWorkerRunFallible(run, gpa, ttyconf, parent_prog_node) catch |err| {
const compile = run.producer.?;
log.err("step '{s}': failed to rebuild in fuzz mode: {s}", .{
@ -136,7 +136,7 @@ fn rebuildTestsWorkerRun(run: *Step.Run, gpa: Allocator, ttyconf: std.io.tty.Con
};
}
fn rebuildTestsWorkerRunFallible(run: *Step.Run, gpa: Allocator, ttyconf: std.io.tty.Config, parent_prog_node: std.Progress.Node) !void {
fn rebuildTestsWorkerRunFallible(run: *Step.Run, gpa: Allocator, ttyconf: std.Io.tty.Config, parent_prog_node: std.Progress.Node) !void {
const compile = run.producer.?;
const prog_node = parent_prog_node.start(compile.step.name, 0);
defer prog_node.end();

View file

@ -53,7 +53,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
try step.singleUnchangingWatchInput(check_file.source);
const src_path = check_file.source.getPath2(b, step);
const contents = fs.cwd().readFileAlloc(b.allocator, src_path, check_file.max_bytes) catch |err| {
const contents = fs.cwd().readFileAlloc(src_path, b.allocator, .limited(check_file.max_bytes)) catch |err| {
return step.fail("unable to read '{s}': {s}", .{
src_path, @errorName(err),
});

View file

@ -6,7 +6,7 @@ const macho = std.macho;
const math = std.math;
const mem = std.mem;
const testing = std.testing;
const Writer = std.io.Writer;
const Writer = std.Io.Writer;
const CheckObject = @This();
@ -553,14 +553,13 @@ fn make(step: *Step, make_options: Step.MakeOptions) !void {
const src_path = check_object.source.getPath3(b, step);
const contents = src_path.root_dir.handle.readFileAllocOptions(
gpa,
src_path.sub_path,
check_object.max_bytes,
null,
gpa,
.limited(check_object.max_bytes),
.of(u64),
null,
) catch |err| return step.fail("unable to read '{f}': {s}", .{
std.fmt.alt(src_path, .formatEscapeChar), @errorName(err),
) catch |err| return step.fail("unable to read '{f}': {t}", .{
std.fmt.alt(src_path, .formatEscapeChar), err,
});
var vars: std.StringHashMap(u64) = .init(gpa);
@ -1462,7 +1461,7 @@ const MachODumper = struct {
const TrieIterator = struct {
stream: std.Io.Reader,
fn readUleb128(it: *TrieIterator) !u64 {
fn takeLeb128(it: *TrieIterator) !u64 {
return it.stream.takeLeb128(u64);
}
@ -1470,7 +1469,7 @@ const MachODumper = struct {
return it.stream.takeSentinel(0);
}
fn readByte(it: *TrieIterator) !u8 {
fn takeByte(it: *TrieIterator) !u8 {
return it.stream.takeByte();
}
};
@ -1518,12 +1517,12 @@ const MachODumper = struct {
prefix: []const u8,
exports: *std.array_list.Managed(Export),
) !void {
const size = try it.readUleb128();
const size = try it.takeLeb128();
if (size > 0) {
const flags = try it.readUleb128();
const flags = try it.takeLeb128();
switch (flags) {
macho.EXPORT_SYMBOL_FLAGS_REEXPORT => {
const ord = try it.readUleb128();
const ord = try it.takeLeb128();
const name = try arena.dupe(u8, try it.readString());
try exports.append(.{
.name = if (name.len > 0) name else prefix,
@ -1532,8 +1531,8 @@ const MachODumper = struct {
});
},
macho.EXPORT_SYMBOL_FLAGS_STUB_AND_RESOLVER => {
const stub_offset = try it.readUleb128();
const resolver_offset = try it.readUleb128();
const stub_offset = try it.takeLeb128();
const resolver_offset = try it.takeLeb128();
try exports.append(.{
.name = prefix,
.tag = .stub_resolver,
@ -1544,7 +1543,7 @@ const MachODumper = struct {
});
},
else => {
const vmoff = try it.readUleb128();
const vmoff = try it.takeLeb128();
try exports.append(.{
.name = prefix,
.tag = .@"export",
@ -1563,10 +1562,10 @@ const MachODumper = struct {
}
}
const nedges = try it.readByte();
const nedges = try it.takeByte();
for (0..nedges) |_| {
const label = try it.readString();
const off = try it.readUleb128();
const off = try it.takeLeb128();
const prefix_label = try std.fmt.allocPrint(arena, "{s}{s}", .{ prefix, label });
const curr = it.stream.seek;
it.stream.seek = off;
@ -1701,11 +1700,10 @@ const ElfDumper = struct {
fn parseAndDumpArchive(step: *Step, check: Check, bytes: []const u8) ![]const u8 {
const gpa = step.owner.allocator;
var stream = std.io.fixedBufferStream(bytes);
const reader = stream.reader();
var reader: std.Io.Reader = .fixed(bytes);
const magic = try reader.readBytesNoEof(elf.ARMAG.len);
if (!mem.eql(u8, &magic, elf.ARMAG)) {
const magic = try reader.takeArray(elf.ARMAG.len);
if (!mem.eql(u8, magic, elf.ARMAG)) {
return error.InvalidArchiveMagicNumber;
}
@ -1722,28 +1720,26 @@ const ElfDumper = struct {
}
while (true) {
if (stream.pos >= ctx.data.len) break;
if (!mem.isAligned(stream.pos, 2)) stream.pos += 1;
if (reader.seek >= ctx.data.len) break;
if (!mem.isAligned(reader.seek, 2)) reader.seek += 1;
const hdr = try reader.readStruct(elf.ar_hdr);
const hdr = try reader.takeStruct(elf.ar_hdr, .little);
if (!mem.eql(u8, &hdr.ar_fmag, elf.ARFMAG)) return error.InvalidArchiveHeaderMagicNumber;
const size = try hdr.size();
defer {
_ = stream.seekBy(size) catch {};
}
defer reader.seek += size;
if (hdr.isSymtab()) {
try ctx.parseSymtab(ctx.data[stream.pos..][0..size], .p32);
try ctx.parseSymtab(ctx.data[reader.seek..][0..size], .p32);
continue;
}
if (hdr.isSymtab64()) {
try ctx.parseSymtab(ctx.data[stream.pos..][0..size], .p64);
try ctx.parseSymtab(ctx.data[reader.seek..][0..size], .p64);
continue;
}
if (hdr.isStrtab()) {
ctx.strtab = ctx.data[stream.pos..][0..size];
ctx.strtab = ctx.data[reader.seek..][0..size];
continue;
}
if (hdr.isSymdef() or hdr.isSymdefSorted()) continue;
@ -1755,7 +1751,7 @@ const ElfDumper = struct {
else
unreachable;
try ctx.objects.append(gpa, .{ .name = name, .off = stream.pos, .len = size });
try ctx.objects.append(gpa, .{ .name = name, .off = reader.seek, .len = size });
}
var output: std.Io.Writer.Allocating = .init(gpa);
@ -1783,11 +1779,10 @@ const ElfDumper = struct {
objects: std.ArrayListUnmanaged(struct { name: []const u8, off: usize, len: usize }) = .empty,
fn parseSymtab(ctx: *ArchiveContext, raw: []const u8, ptr_width: enum { p32, p64 }) !void {
var stream = std.io.fixedBufferStream(raw);
const reader = stream.reader();
var reader: std.Io.Reader = .fixed(raw);
const num = switch (ptr_width) {
.p32 => try reader.readInt(u32, .big),
.p64 => try reader.readInt(u64, .big),
.p32 => try reader.takeInt(u32, .big),
.p64 => try reader.takeInt(u64, .big),
};
const ptr_size: usize = switch (ptr_width) {
.p32 => @sizeOf(u32),
@ -1802,8 +1797,8 @@ const ElfDumper = struct {
var stroff: usize = 0;
for (0..num) |_| {
const off = switch (ptr_width) {
.p32 => try reader.readInt(u32, .big),
.p64 => try reader.readInt(u64, .big),
.p32 => try reader.takeInt(u32, .big),
.p64 => try reader.takeInt(u64, .big),
};
const name = mem.sliceTo(@as([*:0]const u8, @ptrCast(strtab.ptr + stroff)), 0);
stroff += name.len + 1;
@ -1868,10 +1863,9 @@ const ElfDumper = struct {
fn parseAndDumpObject(step: *Step, check: Check, bytes: []const u8) ![]const u8 {
const gpa = step.owner.allocator;
var stream = std.io.fixedBufferStream(bytes);
const reader = stream.reader();
var reader: std.Io.Reader = .fixed(bytes);
const hdr = try reader.readStruct(elf.Elf64_Ehdr);
const hdr = try reader.takeStruct(elf.Elf64_Ehdr, .little);
if (!mem.eql(u8, hdr.e_ident[0..4], "\x7fELF")) {
return error.InvalidMagicNumber;
}
@ -2360,10 +2354,9 @@ const WasmDumper = struct {
fn parseAndDump(step: *Step, check: Check, bytes: []const u8) ![]const u8 {
const gpa = step.owner.allocator;
var fbs = std.io.fixedBufferStream(bytes);
const reader = fbs.reader();
var reader: std.Io.Reader = .fixed(bytes);
const buf = try reader.readBytesNoEof(8);
const buf = try reader.takeArray(8);
if (!mem.eql(u8, buf[0..4], &std.wasm.magic)) {
return error.InvalidMagicByte;
}
@ -2373,7 +2366,7 @@ const WasmDumper = struct {
var output: std.Io.Writer.Allocating = .init(gpa);
defer output.deinit();
parseAndDumpInner(step, check, bytes, &fbs, &output.writer) catch |err| switch (err) {
parseAndDumpInner(step, check, bytes, &reader, &output.writer) catch |err| switch (err) {
error.EndOfStream => try output.writer.writeAll("\n<UnexpectedEndOfStream>"),
else => |e| return e,
};
@ -2384,21 +2377,19 @@ const WasmDumper = struct {
step: *Step,
check: Check,
bytes: []const u8,
fbs: *std.io.FixedBufferStream([]const u8),
reader: *std.Io.Reader,
writer: *std.Io.Writer,
) !void {
const reader = fbs.reader();
switch (check.kind) {
.headers => {
while (reader.readByte()) |current_byte| {
while (reader.takeByte()) |current_byte| {
const section = std.enums.fromInt(std.wasm.Section, current_byte) orelse {
return step.fail("Found invalid section id '{d}'", .{current_byte});
};
const section_length = try std.leb.readUleb128(u32, reader);
try parseAndDumpSection(step, section, bytes[fbs.pos..][0..section_length], writer);
fbs.pos += section_length;
const section_length = try reader.takeLeb128(u32);
try parseAndDumpSection(step, section, bytes[reader.seek..][0..section_length], writer);
reader.seek += section_length;
} else |_| {} // reached end of stream
},
@ -2410,10 +2401,9 @@ const WasmDumper = struct {
step: *Step,
section: std.wasm.Section,
data: []const u8,
writer: anytype,
writer: *std.Io.Writer,
) !void {
var fbs = std.io.fixedBufferStream(data);
const reader = fbs.reader();
var reader: std.Io.Reader = .fixed(data);
try writer.print(
\\Section {s}
@ -2432,31 +2422,31 @@ const WasmDumper = struct {
.code,
.data,
=> {
const entries = try std.leb.readUleb128(u32, reader);
const entries = try reader.takeLeb128(u32);
try writer.print("\nentries {d}\n", .{entries});
try parseSection(step, section, data[fbs.pos..], entries, writer);
try parseSection(step, section, data[reader.seek..], entries, writer);
},
.custom => {
const name_length = try std.leb.readUleb128(u32, reader);
const name = data[fbs.pos..][0..name_length];
fbs.pos += name_length;
const name_length = try reader.takeLeb128(u32);
const name = data[reader.seek..][0..name_length];
reader.seek += name_length;
try writer.print("\nname {s}\n", .{name});
if (mem.eql(u8, name, "name")) {
try parseDumpNames(step, reader, writer, data);
try parseDumpNames(step, &reader, writer, data);
} else if (mem.eql(u8, name, "producers")) {
try parseDumpProducers(reader, writer, data);
try parseDumpProducers(&reader, writer, data);
} else if (mem.eql(u8, name, "target_features")) {
try parseDumpFeatures(reader, writer, data);
try parseDumpFeatures(&reader, writer, data);
}
// TODO: Implement parsing and dumping other custom sections (such as relocations)
},
.start => {
const start = try std.leb.readUleb128(u32, reader);
const start = try reader.takeLeb128(u32);
try writer.print("\nstart {d}\n", .{start});
},
.data_count => {
const count = try std.leb.readUleb128(u32, reader);
const count = try reader.takeLeb128(u32);
try writer.print("\ncount {d}\n", .{count});
},
else => {}, // skip unknown sections
@ -2464,41 +2454,40 @@ const WasmDumper = struct {
}
fn parseSection(step: *Step, section: std.wasm.Section, data: []const u8, entries: u32, writer: anytype) !void {
var fbs = std.io.fixedBufferStream(data);
const reader = fbs.reader();
var reader: std.Io.Reader = .fixed(data);
switch (section) {
.type => {
var i: u32 = 0;
while (i < entries) : (i += 1) {
const func_type = try reader.readByte();
const func_type = try reader.takeByte();
if (func_type != std.wasm.function_type) {
return step.fail("expected function type, found byte '{d}'", .{func_type});
}
const params = try std.leb.readUleb128(u32, reader);
const params = try reader.takeLeb128(u32);
try writer.print("params {d}\n", .{params});
var index: u32 = 0;
while (index < params) : (index += 1) {
_ = try parseDumpType(step, std.wasm.Valtype, reader, writer);
_ = try parseDumpType(step, std.wasm.Valtype, &reader, writer);
} else index = 0;
const returns = try std.leb.readUleb128(u32, reader);
const returns = try reader.takeLeb128(u32);
try writer.print("returns {d}\n", .{returns});
while (index < returns) : (index += 1) {
_ = try parseDumpType(step, std.wasm.Valtype, reader, writer);
_ = try parseDumpType(step, std.wasm.Valtype, &reader, writer);
}
}
},
.import => {
var i: u32 = 0;
while (i < entries) : (i += 1) {
const module_name_len = try std.leb.readUleb128(u32, reader);
const module_name = data[fbs.pos..][0..module_name_len];
fbs.pos += module_name_len;
const name_len = try std.leb.readUleb128(u32, reader);
const name = data[fbs.pos..][0..name_len];
fbs.pos += name_len;
const module_name_len = try reader.takeLeb128(u32);
const module_name = data[reader.seek..][0..module_name_len];
reader.seek += module_name_len;
const name_len = try reader.takeLeb128(u32);
const name = data[reader.seek..][0..name_len];
reader.seek += name_len;
const kind = std.enums.fromInt(std.wasm.ExternalKind, try reader.readByte()) orelse {
const kind = std.enums.fromInt(std.wasm.ExternalKind, try reader.takeByte()) orelse {
return step.fail("invalid import kind", .{});
};
@ -2510,18 +2499,18 @@ const WasmDumper = struct {
try writer.writeByte('\n');
switch (kind) {
.function => {
try writer.print("index {d}\n", .{try std.leb.readUleb128(u32, reader)});
try writer.print("index {d}\n", .{try reader.takeLeb128(u32)});
},
.memory => {
try parseDumpLimits(reader, writer);
try parseDumpLimits(&reader, writer);
},
.global => {
_ = try parseDumpType(step, std.wasm.Valtype, reader, writer);
try writer.print("mutable {}\n", .{0x01 == try std.leb.readUleb128(u32, reader)});
_ = try parseDumpType(step, std.wasm.Valtype, &reader, writer);
try writer.print("mutable {}\n", .{0x01 == try reader.takeLeb128(u32)});
},
.table => {
_ = try parseDumpType(step, std.wasm.RefType, reader, writer);
try parseDumpLimits(reader, writer);
_ = try parseDumpType(step, std.wasm.RefType, &reader, writer);
try parseDumpLimits(&reader, writer);
},
}
}
@ -2529,41 +2518,41 @@ const WasmDumper = struct {
.function => {
var i: u32 = 0;
while (i < entries) : (i += 1) {
try writer.print("index {d}\n", .{try std.leb.readUleb128(u32, reader)});
try writer.print("index {d}\n", .{try reader.takeLeb128(u32)});
}
},
.table => {
var i: u32 = 0;
while (i < entries) : (i += 1) {
_ = try parseDumpType(step, std.wasm.RefType, reader, writer);
try parseDumpLimits(reader, writer);
_ = try parseDumpType(step, std.wasm.RefType, &reader, writer);
try parseDumpLimits(&reader, writer);
}
},
.memory => {
var i: u32 = 0;
while (i < entries) : (i += 1) {
try parseDumpLimits(reader, writer);
try parseDumpLimits(&reader, writer);
}
},
.global => {
var i: u32 = 0;
while (i < entries) : (i += 1) {
_ = try parseDumpType(step, std.wasm.Valtype, reader, writer);
try writer.print("mutable {}\n", .{0x01 == try std.leb.readUleb128(u1, reader)});
try parseDumpInit(step, reader, writer);
_ = try parseDumpType(step, std.wasm.Valtype, &reader, writer);
try writer.print("mutable {}\n", .{0x01 == try reader.takeLeb128(u1)});
try parseDumpInit(step, &reader, writer);
}
},
.@"export" => {
var i: u32 = 0;
while (i < entries) : (i += 1) {
const name_len = try std.leb.readUleb128(u32, reader);
const name = data[fbs.pos..][0..name_len];
fbs.pos += name_len;
const kind_byte = try std.leb.readUleb128(u8, reader);
const name_len = try reader.takeLeb128(u32);
const name = data[reader.seek..][0..name_len];
reader.seek += name_len;
const kind_byte = try reader.takeLeb128(u8);
const kind = std.enums.fromInt(std.wasm.ExternalKind, kind_byte) orelse {
return step.fail("invalid export kind value '{d}'", .{kind_byte});
};
const index = try std.leb.readUleb128(u32, reader);
const index = try reader.takeLeb128(u32);
try writer.print(
\\name {s}
\\kind {s}
@ -2575,14 +2564,14 @@ const WasmDumper = struct {
.element => {
var i: u32 = 0;
while (i < entries) : (i += 1) {
try writer.print("table index {d}\n", .{try std.leb.readUleb128(u32, reader)});
try parseDumpInit(step, reader, writer);
try writer.print("table index {d}\n", .{try reader.takeLeb128(u32)});
try parseDumpInit(step, &reader, writer);
const function_indexes = try std.leb.readUleb128(u32, reader);
const function_indexes = try reader.takeLeb128(u32);
var function_index: u32 = 0;
try writer.print("indexes {d}\n", .{function_indexes});
while (function_index < function_indexes) : (function_index += 1) {
try writer.print("index {d}\n", .{try std.leb.readUleb128(u32, reader)});
try writer.print("index {d}\n", .{try reader.takeLeb128(u32)});
}
}
},
@ -2590,27 +2579,27 @@ const WasmDumper = struct {
.data => {
var i: u32 = 0;
while (i < entries) : (i += 1) {
const flags = try std.leb.readUleb128(u32, reader);
const flags = try reader.takeLeb128(u32);
const index = if (flags & 0x02 != 0)
try std.leb.readUleb128(u32, reader)
try reader.takeLeb128(u32)
else
0;
try writer.print("memory index 0x{x}\n", .{index});
if (flags == 0) {
try parseDumpInit(step, reader, writer);
try parseDumpInit(step, &reader, writer);
}
const size = try std.leb.readUleb128(u32, reader);
const size = try reader.takeLeb128(u32);
try writer.print("size {d}\n", .{size});
try reader.skipBytes(size, .{}); // we do not care about the content of the segments
try reader.discardAll(size); // we do not care about the content of the segments
}
},
else => unreachable,
}
}
fn parseDumpType(step: *Step, comptime E: type, reader: anytype, writer: anytype) !E {
const byte = try reader.readByte();
fn parseDumpType(step: *Step, comptime E: type, reader: *std.Io.Reader, writer: *std.Io.Writer) !E {
const byte = try reader.takeByte();
const tag = std.enums.fromInt(E, byte) orelse {
return step.fail("invalid wasm type value '{d}'", .{byte});
};
@ -2619,65 +2608,65 @@ const WasmDumper = struct {
}
fn parseDumpLimits(reader: anytype, writer: anytype) !void {
const flags = try std.leb.readUleb128(u8, reader);
const min = try std.leb.readUleb128(u32, reader);
const flags = try reader.takeLeb128(u8);
const min = try reader.takeLeb128(u32);
try writer.print("min {x}\n", .{min});
if (flags != 0) {
try writer.print("max {x}\n", .{try std.leb.readUleb128(u32, reader)});
try writer.print("max {x}\n", .{try reader.takeLeb128(u32)});
}
}
fn parseDumpInit(step: *Step, reader: anytype, writer: anytype) !void {
const byte = try reader.readByte();
fn parseDumpInit(step: *Step, reader: *std.Io.Reader, writer: *std.Io.Writer) !void {
const byte = try reader.takeByte();
const opcode = std.enums.fromInt(std.wasm.Opcode, byte) orelse {
return step.fail("invalid wasm opcode '{d}'", .{byte});
};
switch (opcode) {
.i32_const => try writer.print("i32.const {x}\n", .{try std.leb.readIleb128(i32, reader)}),
.i64_const => try writer.print("i64.const {x}\n", .{try std.leb.readIleb128(i64, reader)}),
.f32_const => try writer.print("f32.const {x}\n", .{@as(f32, @bitCast(try reader.readInt(u32, .little)))}),
.f64_const => try writer.print("f64.const {x}\n", .{@as(f64, @bitCast(try reader.readInt(u64, .little)))}),
.global_get => try writer.print("global.get {x}\n", .{try std.leb.readUleb128(u32, reader)}),
.i32_const => try writer.print("i32.const {x}\n", .{try reader.takeLeb128(i32)}),
.i64_const => try writer.print("i64.const {x}\n", .{try reader.takeLeb128(i64)}),
.f32_const => try writer.print("f32.const {x}\n", .{@as(f32, @bitCast(try reader.takeInt(u32, .little)))}),
.f64_const => try writer.print("f64.const {x}\n", .{@as(f64, @bitCast(try reader.takeInt(u64, .little)))}),
.global_get => try writer.print("global.get {x}\n", .{try reader.takeLeb128(u32)}),
else => unreachable,
}
const end_opcode = try std.leb.readUleb128(u8, reader);
const end_opcode = try reader.takeLeb128(u8);
if (end_opcode != @intFromEnum(std.wasm.Opcode.end)) {
return step.fail("expected 'end' opcode in init expression", .{});
}
}
/// https://webassembly.github.io/spec/core/appendix/custom.html
fn parseDumpNames(step: *Step, reader: anytype, writer: anytype, data: []const u8) !void {
while (reader.context.pos < data.len) {
fn parseDumpNames(step: *Step, reader: *std.Io.Reader, writer: *std.Io.Writer, data: []const u8) !void {
while (reader.seek < data.len) {
switch (try parseDumpType(step, std.wasm.NameSubsection, reader, writer)) {
// The module name subsection ... consists of a single name
// that is assigned to the module itself.
.module => {
const size = try std.leb.readUleb128(u32, reader);
const name_len = try std.leb.readUleb128(u32, reader);
const size = try reader.takeLeb128(u32);
const name_len = try reader.takeLeb128(u32);
if (size != name_len + 1) return error.BadSubsectionSize;
if (reader.context.pos + name_len > data.len) return error.UnexpectedEndOfStream;
try writer.print("name {s}\n", .{data[reader.context.pos..][0..name_len]});
reader.context.pos += name_len;
if (reader.seek + name_len > data.len) return error.UnexpectedEndOfStream;
try writer.print("name {s}\n", .{data[reader.seek..][0..name_len]});
reader.seek += name_len;
},
// The function name subsection ... consists of a name map
// assigning function names to function indices.
.function, .global, .data_segment => {
const size = try std.leb.readUleb128(u32, reader);
const entries = try std.leb.readUleb128(u32, reader);
const size = try reader.takeLeb128(u32);
const entries = try reader.takeLeb128(u32);
try writer.print(
\\size {d}
\\names {d}
\\
, .{ size, entries });
for (0..entries) |_| {
const index = try std.leb.readUleb128(u32, reader);
const name_len = try std.leb.readUleb128(u32, reader);
if (reader.context.pos + name_len > data.len) return error.UnexpectedEndOfStream;
const name = data[reader.context.pos..][0..name_len];
reader.context.pos += name.len;
const index = try reader.takeLeb128(u32);
const name_len = try reader.takeLeb128(u32);
if (reader.seek + name_len > data.len) return error.UnexpectedEndOfStream;
const name = data[reader.seek..][0..name_len];
reader.seek += name.len;
try writer.print(
\\index {d}
@ -2699,16 +2688,16 @@ const WasmDumper = struct {
}
}
fn parseDumpProducers(reader: anytype, writer: anytype, data: []const u8) !void {
const field_count = try std.leb.readUleb128(u32, reader);
fn parseDumpProducers(reader: *std.Io.Reader, writer: *std.Io.Writer, data: []const u8) !void {
const field_count = try reader.takeLeb128(u32);
try writer.print("fields {d}\n", .{field_count});
var current_field: u32 = 0;
while (current_field < field_count) : (current_field += 1) {
const field_name_length = try std.leb.readUleb128(u32, reader);
const field_name = data[reader.context.pos..][0..field_name_length];
reader.context.pos += field_name_length;
const field_name_length = try reader.takeLeb128(u32);
const field_name = data[reader.seek..][0..field_name_length];
reader.seek += field_name_length;
const value_count = try std.leb.readUleb128(u32, reader);
const value_count = try reader.takeLeb128(u32);
try writer.print(
\\field_name {s}
\\values {d}
@ -2716,13 +2705,13 @@ const WasmDumper = struct {
try writer.writeByte('\n');
var current_value: u32 = 0;
while (current_value < value_count) : (current_value += 1) {
const value_length = try std.leb.readUleb128(u32, reader);
const value = data[reader.context.pos..][0..value_length];
reader.context.pos += value_length;
const value_length = try reader.takeLeb128(u32);
const value = data[reader.seek..][0..value_length];
reader.seek += value_length;
const version_length = try std.leb.readUleb128(u32, reader);
const version = data[reader.context.pos..][0..version_length];
reader.context.pos += version_length;
const version_length = try reader.takeLeb128(u32);
const version = data[reader.seek..][0..version_length];
reader.seek += version_length;
try writer.print(
\\value_name {s}
@ -2733,16 +2722,16 @@ const WasmDumper = struct {
}
}
fn parseDumpFeatures(reader: anytype, writer: anytype, data: []const u8) !void {
const feature_count = try std.leb.readUleb128(u32, reader);
fn parseDumpFeatures(reader: *std.Io.Reader, writer: *std.Io.Writer, data: []const u8) !void {
const feature_count = try reader.takeLeb128(u32);
try writer.print("features {d}\n", .{feature_count});
var index: u32 = 0;
while (index < feature_count) : (index += 1) {
const prefix_byte = try std.leb.readUleb128(u8, reader);
const name_length = try std.leb.readUleb128(u32, reader);
const feature_name = data[reader.context.pos..][0..name_length];
reader.context.pos += name_length;
const prefix_byte = try reader.takeLeb128(u8);
const name_length = try reader.takeLeb128(u32);
const feature_name = data[reader.seek..][0..name_length];
reader.seek += name_length;
try writer.print("{c} {s}\n", .{ prefix_byte, feature_name });
}

View file

@ -2021,7 +2021,7 @@ fn checkCompileErrors(compile: *Compile) !void {
const arena = compile.step.owner.allocator;
const actual_errors = ae: {
var aw: std.io.Writer.Allocating = .init(arena);
var aw: std.Io.Writer.Allocating = .init(arena);
defer aw.deinit();
try actual_eb.renderToWriter(.{
.ttyconf = .no_color,

View file

@ -2,7 +2,7 @@ const std = @import("std");
const ConfigHeader = @This();
const Step = std.Build.Step;
const Allocator = std.mem.Allocator;
const Writer = std.io.Writer;
const Writer = std.Io.Writer;
pub const Style = union(enum) {
/// A configure format supported by autotools that uses `#undef foo` to
@ -196,7 +196,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
man.hash.addBytes(config_header.include_path);
man.hash.addOptionalBytes(config_header.include_guard_override);
var aw: std.io.Writer.Allocating = .init(gpa);
var aw: Writer.Allocating = .init(gpa);
defer aw.deinit();
const bw = &aw.writer;
@ -208,7 +208,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
.autoconf_undef, .autoconf_at => |file_source| {
try bw.writeAll(c_generated_line);
const src_path = file_source.getPath2(b, step);
const contents = std.fs.cwd().readFileAlloc(arena, src_path, config_header.max_bytes) catch |err| {
const contents = std.fs.cwd().readFileAlloc(src_path, arena, .limited(config_header.max_bytes)) catch |err| {
return step.fail("unable to read autoconf input file '{s}': {s}", .{
src_path, @errorName(err),
});
@ -222,7 +222,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
.cmake => |file_source| {
try bw.writeAll(c_generated_line);
const src_path = file_source.getPath2(b, step);
const contents = std.fs.cwd().readFileAlloc(arena, src_path, config_header.max_bytes) catch |err| {
const contents = std.fs.cwd().readFileAlloc(src_path, arena, .limited(config_header.max_bytes)) catch |err| {
return step.fail("unable to read cmake input file '{s}': {s}", .{
src_path, @errorName(err),
});
@ -329,7 +329,7 @@ fn render_autoconf_undef(
fn render_autoconf_at(
step: *Step,
contents: []const u8,
aw: *std.io.Writer.Allocating,
aw: *Writer.Allocating,
values: std.StringArrayHashMap(Value),
src_path: []const u8,
) !void {
@ -753,7 +753,7 @@ fn testReplaceVariablesAutoconfAt(
expected: []const u8,
values: std.StringArrayHashMap(Value),
) !void {
var aw: std.io.Writer.Allocating = .init(allocator);
var aw: Writer.Allocating = .init(allocator);
defer aw.deinit();
const used = try allocator.alloc(bool, values.count());

View file

@ -9,7 +9,6 @@ const InstallDir = std.Build.InstallDir;
const Step = std.Build.Step;
const elf = std.elf;
const fs = std.fs;
const io = std.io;
const sort = std.sort;
pub const base_id: Step.Id = .objcopy;

View file

@ -3,7 +3,7 @@ thread_pool: *std.Thread.Pool,
graph: *const Build.Graph,
all_steps: []const *Build.Step,
listen_address: std.net.Address,
ttyconf: std.io.tty.Config,
ttyconf: std.Io.tty.Config,
root_prog_node: std.Progress.Node,
watch: bool,
@ -53,7 +53,7 @@ pub const Options = struct {
thread_pool: *std.Thread.Pool,
graph: *const std.Build.Graph,
all_steps: []const *Build.Step,
ttyconf: std.io.tty.Config,
ttyconf: std.Io.tty.Config,
root_prog_node: std.Progress.Node,
watch: bool,
listen_address: std.net.Address,
@ -446,7 +446,7 @@ pub fn serveFile(
// The desired API is actually sendfile, which will require enhancing http.Server.
// We load the file with every request so that the user can make changes to the file
// and refresh the HTML page without restarting this server.
const file_contents = path.root_dir.handle.readFileAlloc(gpa, path.sub_path, 10 * 1024 * 1024) catch |err| {
const file_contents = path.root_dir.handle.readFileAlloc(path.sub_path, gpa, .limited(10 * 1024 * 1024)) catch |err| {
log.err("failed to read '{f}': {s}", .{ path, @errorName(err) });
return error.AlreadyReported;
};
@ -557,7 +557,7 @@ fn buildClientWasm(ws: *WebServer, arena: Allocator, optimize: std.builtin.Optim
child.stderr_behavior = .Pipe;
try child.spawn();
var poller = std.io.poll(gpa, enum { stdout, stderr }, .{
var poller = std.Io.poll(gpa, enum { stdout, stderr }, .{
.stdout = child.stdout.?,
.stderr = child.stderr.?,
});

View file

@ -82,202 +82,6 @@ pub const Limit = enum(usize) {
pub const Reader = @import("Io/Reader.zig");
pub const Writer = @import("Io/Writer.zig");
/// Deprecated in favor of `Reader`.
pub fn GenericReader(
comptime Context: type,
comptime ReadError: type,
/// Returns the number of bytes read. It may be less than buffer.len.
/// If the number of bytes read is 0, it means end of stream.
/// End of stream is not an error condition.
comptime readFn: fn (context: Context, buffer: []u8) ReadError!usize,
) type {
return struct {
context: Context,
pub const Error = ReadError;
pub const NoEofError = ReadError || error{
EndOfStream,
};
pub inline fn read(self: Self, buffer: []u8) Error!usize {
return readFn(self.context, buffer);
}
pub inline fn readAll(self: Self, buffer: []u8) Error!usize {
return @errorCast(self.any().readAll(buffer));
}
pub inline fn readAtLeast(self: Self, buffer: []u8, len: usize) Error!usize {
return @errorCast(self.any().readAtLeast(buffer, len));
}
pub inline fn readNoEof(self: Self, buf: []u8) NoEofError!void {
return @errorCast(self.any().readNoEof(buf));
}
pub inline fn readAllArrayList(
self: Self,
array_list: *std.array_list.Managed(u8),
max_append_size: usize,
) (error{StreamTooLong} || Allocator.Error || Error)!void {
return @errorCast(self.any().readAllArrayList(array_list, max_append_size));
}
pub inline fn readAllArrayListAligned(
self: Self,
comptime alignment: ?Alignment,
array_list: *std.array_list.AlignedManaged(u8, alignment),
max_append_size: usize,
) (error{StreamTooLong} || Allocator.Error || Error)!void {
return @errorCast(self.any().readAllArrayListAligned(
alignment,
array_list,
max_append_size,
));
}
pub inline fn readAllAlloc(
self: Self,
allocator: Allocator,
max_size: usize,
) (Error || Allocator.Error || error{StreamTooLong})![]u8 {
return @errorCast(self.any().readAllAlloc(allocator, max_size));
}
pub inline fn streamUntilDelimiter(
self: Self,
writer: anytype,
delimiter: u8,
optional_max_size: ?usize,
) (NoEofError || error{StreamTooLong} || @TypeOf(writer).Error)!void {
return @errorCast(self.any().streamUntilDelimiter(
writer,
delimiter,
optional_max_size,
));
}
pub inline fn skipUntilDelimiterOrEof(self: Self, delimiter: u8) Error!void {
return @errorCast(self.any().skipUntilDelimiterOrEof(delimiter));
}
pub inline fn readByte(self: Self) NoEofError!u8 {
return @errorCast(self.any().readByte());
}
pub inline fn readByteSigned(self: Self) NoEofError!i8 {
return @errorCast(self.any().readByteSigned());
}
pub inline fn readBytesNoEof(
self: Self,
comptime num_bytes: usize,
) NoEofError![num_bytes]u8 {
return @errorCast(self.any().readBytesNoEof(num_bytes));
}
pub inline fn readInt(self: Self, comptime T: type, endian: std.builtin.Endian) NoEofError!T {
return @errorCast(self.any().readInt(T, endian));
}
pub inline fn readVarInt(
self: Self,
comptime ReturnType: type,
endian: std.builtin.Endian,
size: usize,
) NoEofError!ReturnType {
return @errorCast(self.any().readVarInt(ReturnType, endian, size));
}
pub const SkipBytesOptions = AnyReader.SkipBytesOptions;
pub inline fn skipBytes(
self: Self,
num_bytes: u64,
comptime options: SkipBytesOptions,
) NoEofError!void {
return @errorCast(self.any().skipBytes(num_bytes, options));
}
pub inline fn isBytes(self: Self, slice: []const u8) NoEofError!bool {
return @errorCast(self.any().isBytes(slice));
}
pub inline fn readStruct(self: Self, comptime T: type) NoEofError!T {
return @errorCast(self.any().readStruct(T));
}
pub inline fn readStructEndian(self: Self, comptime T: type, endian: std.builtin.Endian) NoEofError!T {
return @errorCast(self.any().readStructEndian(T, endian));
}
pub const ReadEnumError = NoEofError || error{
/// An integer was read, but it did not match any of the tags in the supplied enum.
InvalidValue,
};
pub inline fn readEnum(
self: Self,
comptime Enum: type,
endian: std.builtin.Endian,
) ReadEnumError!Enum {
return @errorCast(self.any().readEnum(Enum, endian));
}
pub inline fn any(self: *const Self) AnyReader {
return .{
.context = @ptrCast(&self.context),
.readFn = typeErasedReadFn,
};
}
const Self = @This();
fn typeErasedReadFn(context: *const anyopaque, buffer: []u8) anyerror!usize {
const ptr: *const Context = @ptrCast(@alignCast(context));
return readFn(ptr.*, buffer);
}
/// Helper for bridging to the new `Reader` API while upgrading.
pub fn adaptToNewApi(self: *const Self, buffer: []u8) Adapter {
return .{
.derp_reader = self.*,
.new_interface = .{
.buffer = buffer,
.vtable = &.{ .stream = Adapter.stream },
.seek = 0,
.end = 0,
},
};
}
pub const Adapter = struct {
derp_reader: Self,
new_interface: Reader,
err: ?Error = null,
fn stream(r: *Reader, w: *Writer, limit: Limit) Reader.StreamError!usize {
const a: *@This() = @alignCast(@fieldParentPtr("new_interface", r));
const buf = limit.slice(try w.writableSliceGreedy(1));
const n = a.derp_reader.read(buf) catch |err| {
a.err = err;
return error.ReadFailed;
};
if (n == 0) return error.EndOfStream;
w.advance(n);
return n;
}
};
};
}
/// Deprecated in favor of `Reader`.
pub const AnyReader = @import("Io/DeprecatedReader.zig");
/// Deprecated in favor of `Reader`.
pub const FixedBufferStream = @import("Io/fixed_buffer_stream.zig").FixedBufferStream;
/// Deprecated in favor of `Reader`.
pub const fixedBufferStream = @import("Io/fixed_buffer_stream.zig").fixedBufferStream;
pub const tty = @import("Io/tty.zig");
pub fn poll(
@ -746,7 +550,6 @@ pub fn PollFiles(comptime StreamEnum: type) type {
test {
_ = Reader;
_ = Writer;
_ = FixedBufferStream;
_ = tty;
_ = @import("Io/test.zig");
}

View file

@ -1,292 +0,0 @@
context: *const anyopaque,
readFn: *const fn (context: *const anyopaque, buffer: []u8) anyerror!usize,
pub const Error = anyerror;
/// Returns the number of bytes read. It may be less than buffer.len.
/// If the number of bytes read is 0, it means end of stream.
/// End of stream is not an error condition.
pub fn read(self: Self, buffer: []u8) anyerror!usize {
return self.readFn(self.context, buffer);
}
/// Returns the number of bytes read. If the number read is smaller than `buffer.len`, it
/// means the stream reached the end. Reaching the end of a stream is not an error
/// condition.
pub fn readAll(self: Self, buffer: []u8) anyerror!usize {
return readAtLeast(self, buffer, buffer.len);
}
/// Returns the number of bytes read, calling the underlying read
/// function the minimal number of times until the buffer has at least
/// `len` bytes filled. If the number read is less than `len` it means
/// the stream reached the end. Reaching the end of the stream is not
/// an error condition.
pub fn readAtLeast(self: Self, buffer: []u8, len: usize) anyerror!usize {
assert(len <= buffer.len);
var index: usize = 0;
while (index < len) {
const amt = try self.read(buffer[index..]);
if (amt == 0) break;
index += amt;
}
return index;
}
/// If the number read would be smaller than `buf.len`, `error.EndOfStream` is returned instead.
pub fn readNoEof(self: Self, buf: []u8) anyerror!void {
const amt_read = try self.readAll(buf);
if (amt_read < buf.len) return error.EndOfStream;
}
/// Appends to the `std.array_list.Managed` contents by reading from the stream
/// until end of stream is found.
/// If the number of bytes appended would exceed `max_append_size`,
/// `error.StreamTooLong` is returned
/// and the `std.array_list.Managed` has exactly `max_append_size` bytes appended.
pub fn readAllArrayList(
self: Self,
array_list: *std.array_list.Managed(u8),
max_append_size: usize,
) anyerror!void {
return self.readAllArrayListAligned(null, array_list, max_append_size);
}
pub fn readAllArrayListAligned(
self: Self,
comptime alignment: ?Alignment,
array_list: *std.array_list.AlignedManaged(u8, alignment),
max_append_size: usize,
) anyerror!void {
try array_list.ensureTotalCapacity(@min(max_append_size, 4096));
const original_len = array_list.items.len;
var start_index: usize = original_len;
while (true) {
array_list.expandToCapacity();
const dest_slice = array_list.items[start_index..];
const bytes_read = try self.readAll(dest_slice);
start_index += bytes_read;
if (start_index - original_len > max_append_size) {
array_list.shrinkAndFree(original_len + max_append_size);
return error.StreamTooLong;
}
if (bytes_read != dest_slice.len) {
array_list.shrinkAndFree(start_index);
return;
}
// This will trigger ArrayList to expand superlinearly at whatever its growth rate is.
try array_list.ensureTotalCapacity(start_index + 1);
}
}
/// Allocates enough memory to hold all the contents of the stream. If the allocated
/// memory would be greater than `max_size`, returns `error.StreamTooLong`.
/// Caller owns returned memory.
/// If this function returns an error, the contents from the stream read so far are lost.
pub fn readAllAlloc(self: Self, allocator: mem.Allocator, max_size: usize) anyerror![]u8 {
var array_list = std.array_list.Managed(u8).init(allocator);
defer array_list.deinit();
try self.readAllArrayList(&array_list, max_size);
return try array_list.toOwnedSlice();
}
/// Appends to the `writer` contents by reading from the stream until `delimiter` is found.
/// Does not write the delimiter itself.
/// If `optional_max_size` is not null and amount of written bytes exceeds `optional_max_size`,
/// returns `error.StreamTooLong` and finishes appending.
/// If `optional_max_size` is null, appending is unbounded.
pub fn streamUntilDelimiter(
self: Self,
writer: anytype,
delimiter: u8,
optional_max_size: ?usize,
) anyerror!void {
if (optional_max_size) |max_size| {
for (0..max_size) |_| {
const byte: u8 = try self.readByte();
if (byte == delimiter) return;
try writer.writeByte(byte);
}
return error.StreamTooLong;
} else {
while (true) {
const byte: u8 = try self.readByte();
if (byte == delimiter) return;
try writer.writeByte(byte);
}
// Can not throw `error.StreamTooLong` since there are no boundary.
}
}
/// Reads from the stream until specified byte is found, discarding all data,
/// including the delimiter.
/// If end-of-stream is found, this function succeeds.
pub fn skipUntilDelimiterOrEof(self: Self, delimiter: u8) anyerror!void {
while (true) {
const byte = self.readByte() catch |err| switch (err) {
error.EndOfStream => return,
else => |e| return e,
};
if (byte == delimiter) return;
}
}
/// Reads 1 byte from the stream or returns `error.EndOfStream`.
pub fn readByte(self: Self) anyerror!u8 {
var result: [1]u8 = undefined;
const amt_read = try self.read(result[0..]);
if (amt_read < 1) return error.EndOfStream;
return result[0];
}
/// Same as `readByte` except the returned byte is signed.
pub fn readByteSigned(self: Self) anyerror!i8 {
return @as(i8, @bitCast(try self.readByte()));
}
/// Reads exactly `num_bytes` bytes and returns as an array.
/// `num_bytes` must be comptime-known
pub fn readBytesNoEof(self: Self, comptime num_bytes: usize) anyerror![num_bytes]u8 {
var bytes: [num_bytes]u8 = undefined;
try self.readNoEof(&bytes);
return bytes;
}
pub inline fn readInt(self: Self, comptime T: type, endian: std.builtin.Endian) anyerror!T {
const bytes = try self.readBytesNoEof(@divExact(@typeInfo(T).int.bits, 8));
return mem.readInt(T, &bytes, endian);
}
pub fn readVarInt(
self: Self,
comptime ReturnType: type,
endian: std.builtin.Endian,
size: usize,
) anyerror!ReturnType {
assert(size <= @sizeOf(ReturnType));
var bytes_buf: [@sizeOf(ReturnType)]u8 = undefined;
const bytes = bytes_buf[0..size];
try self.readNoEof(bytes);
return mem.readVarInt(ReturnType, bytes, endian);
}
/// Optional parameters for `skipBytes`
pub const SkipBytesOptions = struct {
buf_size: usize = 512,
};
// `num_bytes` is a `u64` to match `off_t`
/// Reads `num_bytes` bytes from the stream and discards them
pub fn skipBytes(self: Self, num_bytes: u64, comptime options: SkipBytesOptions) anyerror!void {
var buf: [options.buf_size]u8 = undefined;
var remaining = num_bytes;
while (remaining > 0) {
const amt = @min(remaining, options.buf_size);
try self.readNoEof(buf[0..amt]);
remaining -= amt;
}
}
/// Reads `slice.len` bytes from the stream and returns if they are the same as the passed slice
pub fn isBytes(self: Self, slice: []const u8) anyerror!bool {
var i: usize = 0;
var matches = true;
while (i < slice.len) : (i += 1) {
if (slice[i] != try self.readByte()) {
matches = false;
}
}
return matches;
}
pub fn readStruct(self: Self, comptime T: type) anyerror!T {
// Only extern and packed structs have defined in-memory layout.
comptime assert(@typeInfo(T).@"struct".layout != .auto);
var res: [1]T = undefined;
try self.readNoEof(mem.sliceAsBytes(res[0..]));
return res[0];
}
pub fn readStructEndian(self: Self, comptime T: type, endian: std.builtin.Endian) anyerror!T {
var res = try self.readStruct(T);
if (native_endian != endian) {
mem.byteSwapAllFields(T, &res);
}
return res;
}
/// Reads an integer with the same size as the given enum's tag type. If the integer matches
/// an enum tag, casts the integer to the enum tag and returns it. Otherwise, returns an `error.InvalidValue`.
/// TODO optimization taking advantage of most fields being in order
pub fn readEnum(self: Self, comptime Enum: type, endian: std.builtin.Endian) anyerror!Enum {
const E = error{
/// An integer was read, but it did not match any of the tags in the supplied enum.
InvalidValue,
};
const type_info = @typeInfo(Enum).@"enum";
const tag = try self.readInt(type_info.tag_type, endian);
inline for (std.meta.fields(Enum)) |field| {
if (tag == field.value) {
return @field(Enum, field.name);
}
}
return E.InvalidValue;
}
/// Reads the stream until the end, ignoring all the data.
/// Returns the number of bytes discarded.
pub fn discard(self: Self) anyerror!u64 {
var trash: [4096]u8 = undefined;
var index: u64 = 0;
while (true) {
const n = try self.read(&trash);
if (n == 0) return index;
index += n;
}
}
/// Helper for bridging to the new `Reader` API while upgrading.
pub fn adaptToNewApi(self: *const Self, buffer: []u8) Adapter {
return .{
.derp_reader = self.*,
.new_interface = .{
.buffer = buffer,
.vtable = &.{ .stream = Adapter.stream },
.seek = 0,
.end = 0,
},
};
}
pub const Adapter = struct {
derp_reader: Self,
new_interface: std.io.Reader,
err: ?Error = null,
fn stream(r: *std.io.Reader, w: *std.io.Writer, limit: std.io.Limit) std.io.Reader.StreamError!usize {
const a: *@This() = @alignCast(@fieldParentPtr("new_interface", r));
const buf = limit.slice(try w.writableSliceGreedy(1));
const n = a.derp_reader.read(buf) catch |err| {
a.err = err;
return error.ReadFailed;
};
if (n == 0) return error.EndOfStream;
w.advance(n);
return n;
}
};
const std = @import("../std.zig");
const Self = @This();
const math = std.math;
const assert = std.debug.assert;
const mem = std.mem;
const testing = std.testing;
const native_endian = @import("builtin").target.cpu.arch.endian();
const Alignment = std.mem.Alignment;

View file

@ -4,12 +4,12 @@ const builtin = @import("builtin");
const native_endian = builtin.target.cpu.arch.endian();
const std = @import("../std.zig");
const Writer = std.io.Writer;
const Writer = std.Io.Writer;
const Limit = std.Io.Limit;
const assert = std.debug.assert;
const testing = std.testing;
const Allocator = std.mem.Allocator;
const ArrayList = std.ArrayList;
const Limit = std.io.Limit;
pub const Limited = @import("Reader/Limited.zig");
@ -292,6 +292,23 @@ pub fn allocRemaining(r: *Reader, gpa: Allocator, limit: Limit) LimitedAllocErro
return buffer.toOwnedSlice(gpa);
}
pub fn allocRemainingAlignedSentinel(
r: *Reader,
gpa: Allocator,
limit: Limit,
comptime alignment: std.mem.Alignment,
comptime sentinel: ?u8,
) LimitedAllocError!(if (sentinel) |s| [:s]align(alignment.toByteUnits()) u8 else []align(alignment.toByteUnits()) u8) {
var buffer: std.array_list.Aligned(u8, alignment) = .empty;
defer buffer.deinit(gpa);
try appendRemainingAligned(r, gpa, alignment, &buffer, limit);
if (sentinel) |s| {
return buffer.toOwnedSliceSentinel(gpa, s);
} else {
return buffer.toOwnedSlice(gpa);
}
}
/// Transfers all bytes from the current position to the end of the stream, up
/// to `limit`, appending them to `list`.
///
@ -308,15 +325,30 @@ pub fn appendRemaining(
list: *ArrayList(u8),
limit: Limit,
) LimitedAllocError!void {
var a: std.Io.Writer.Allocating = .initOwnedSlice(gpa, list.allocatedSlice());
a.writer.end = list.items.len;
list.* = .empty;
defer {
list.* = .{
.items = a.writer.buffer[0..a.writer.end],
.capacity = a.writer.buffer.len,
};
}
return appendRemainingAligned(r, gpa, .of(u8), list, limit);
}
/// Transfers all bytes from the current position to the end of the stream, up
/// to `limit`, appending them to `list`.
///
/// If `limit` is reached or exceeded, `error.StreamTooLong` is returned
/// instead. In such case, the next byte that would be read will be the first
/// one to exceed `limit`, and all preceeding bytes have been appended to
/// `list`.
///
/// See also:
/// * `appendRemaining`
/// * `allocRemainingAligned`
pub fn appendRemainingAligned(
r: *Reader,
gpa: Allocator,
comptime alignment: std.mem.Alignment,
list: *std.array_list.Aligned(u8, alignment),
limit: Limit,
) LimitedAllocError!void {
var a = std.Io.Writer.Allocating.fromArrayListAligned(gpa, alignment, list);
defer list.* = a.toArrayListAligned(alignment);
var remaining = limit;
while (remaining.nonzero()) {
const n = stream(r, &a.writer, remaining) catch |err| switch (err) {
@ -1584,7 +1616,7 @@ test readVec {
test "expected error.EndOfStream" {
// Unit test inspired by https://github.com/ziglang/zig/issues/17733
var buffer: [3]u8 = undefined;
var r: std.io.Reader = .fixed(&buffer);
var r: std.Io.Reader = .fixed(&buffer);
r.end = 0; // capacity 3, but empty
try std.testing.expectError(error.EndOfStream, r.takeEnum(enum(u8) { a, b }, .little));
try std.testing.expectError(error.EndOfStream, r.take(3));
@ -1639,15 +1671,6 @@ fn failingDiscard(r: *Reader, limit: Limit) Error!usize {
return error.ReadFailed;
}
pub fn adaptToOldInterface(r: *Reader) std.Io.AnyReader {
return .{ .context = r, .readFn = derpRead };
}
fn derpRead(context: *const anyopaque, buffer: []u8) anyerror!usize {
const r: *Reader = @ptrCast(@alignCast(@constCast(context)));
return r.readSliceShort(buffer);
}
test "readAlloc when the backing reader provides one byte at a time" {
const str = "This is a test";
var tiny_buffer: [1]u8 = undefined;
@ -1870,6 +1893,94 @@ pub fn writableVector(r: *Reader, buffer: [][]u8, data: []const []u8) Error!stru
return .{ i, n };
}
test "deserialize signed LEB128" {
// Truncated
try testing.expectError(error.EndOfStream, testLeb128(i64, "\x80"));
// Overflow
try testing.expectError(error.Overflow, testLeb128(i8, "\x80\x80\x40"));
try testing.expectError(error.Overflow, testLeb128(i16, "\x80\x80\x80\x40"));
try testing.expectError(error.Overflow, testLeb128(i32, "\x80\x80\x80\x80\x40"));
try testing.expectError(error.Overflow, testLeb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x40"));
try testing.expectError(error.Overflow, testLeb128(i8, "\xff\x7e"));
try testing.expectError(error.Overflow, testLeb128(i32, "\x80\x80\x80\x80\x08"));
try testing.expectError(error.Overflow, testLeb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x01"));
// Decode SLEB128
try testing.expect((try testLeb128(i64, "\x00")) == 0);
try testing.expect((try testLeb128(i64, "\x01")) == 1);
try testing.expect((try testLeb128(i64, "\x3f")) == 63);
try testing.expect((try testLeb128(i64, "\x40")) == -64);
try testing.expect((try testLeb128(i64, "\x41")) == -63);
try testing.expect((try testLeb128(i64, "\x7f")) == -1);
try testing.expect((try testLeb128(i64, "\x80\x01")) == 128);
try testing.expect((try testLeb128(i64, "\x81\x01")) == 129);
try testing.expect((try testLeb128(i64, "\xff\x7e")) == -129);
try testing.expect((try testLeb128(i64, "\x80\x7f")) == -128);
try testing.expect((try testLeb128(i64, "\x81\x7f")) == -127);
try testing.expect((try testLeb128(i64, "\xc0\x00")) == 64);
try testing.expect((try testLeb128(i64, "\xc7\x9f\x7f")) == -12345);
try testing.expect((try testLeb128(i8, "\xff\x7f")) == -1);
try testing.expect((try testLeb128(i16, "\xff\xff\x7f")) == -1);
try testing.expect((try testLeb128(i32, "\xff\xff\xff\xff\x7f")) == -1);
try testing.expect((try testLeb128(i32, "\x80\x80\x80\x80\x78")) == -0x80000000);
try testing.expect((try testLeb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x7f")) == @as(i64, @bitCast(@as(u64, @intCast(0x8000000000000000)))));
try testing.expect((try testLeb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x40")) == -0x4000000000000000);
try testing.expect((try testLeb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x7f")) == -0x8000000000000000);
// Decode unnormalized SLEB128 with extra padding bytes.
try testing.expect((try testLeb128(i64, "\x80\x00")) == 0);
try testing.expect((try testLeb128(i64, "\x80\x80\x00")) == 0);
try testing.expect((try testLeb128(i64, "\xff\x00")) == 0x7f);
try testing.expect((try testLeb128(i64, "\xff\x80\x00")) == 0x7f);
try testing.expect((try testLeb128(i64, "\x80\x81\x00")) == 0x80);
try testing.expect((try testLeb128(i64, "\x80\x81\x80\x00")) == 0x80);
}
test "deserialize unsigned LEB128" {
// Truncated
try testing.expectError(error.EndOfStream, testLeb128(u64, "\x80"));
try testing.expectError(error.EndOfStream, testLeb128(u16, "\x80\x80\x84"));
try testing.expectError(error.EndOfStream, testLeb128(u32, "\x80\x80\x80\x80\x90"));
// Overflow
try testing.expectError(error.Overflow, testLeb128(u8, "\x80\x02"));
try testing.expectError(error.Overflow, testLeb128(u8, "\x80\x80\x40"));
try testing.expectError(error.Overflow, testLeb128(u16, "\x80\x80\x80\x40"));
try testing.expectError(error.Overflow, testLeb128(u32, "\x80\x80\x80\x80\x40"));
try testing.expectError(error.Overflow, testLeb128(u64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x40"));
// Decode ULEB128
try testing.expect((try testLeb128(u64, "\x00")) == 0);
try testing.expect((try testLeb128(u64, "\x01")) == 1);
try testing.expect((try testLeb128(u64, "\x3f")) == 63);
try testing.expect((try testLeb128(u64, "\x40")) == 64);
try testing.expect((try testLeb128(u64, "\x7f")) == 0x7f);
try testing.expect((try testLeb128(u64, "\x80\x01")) == 0x80);
try testing.expect((try testLeb128(u64, "\x81\x01")) == 0x81);
try testing.expect((try testLeb128(u64, "\x90\x01")) == 0x90);
try testing.expect((try testLeb128(u64, "\xff\x01")) == 0xff);
try testing.expect((try testLeb128(u64, "\x80\x02")) == 0x100);
try testing.expect((try testLeb128(u64, "\x81\x02")) == 0x101);
try testing.expect((try testLeb128(u64, "\x80\xc1\x80\x80\x10")) == 4294975616);
try testing.expect((try testLeb128(u64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x01")) == 0x8000000000000000);
// Decode ULEB128 with extra padding bytes
try testing.expect((try testLeb128(u64, "\x80\x00")) == 0);
try testing.expect((try testLeb128(u64, "\x80\x80\x00")) == 0);
try testing.expect((try testLeb128(u64, "\xff\x00")) == 0x7f);
try testing.expect((try testLeb128(u64, "\xff\x80\x00")) == 0x7f);
try testing.expect((try testLeb128(u64, "\x80\x81\x00")) == 0x80);
try testing.expect((try testLeb128(u64, "\x80\x81\x80\x00")) == 0x80);
}
fn testLeb128(comptime T: type, encoded: []const u8) !T {
var reader: std.Io.Reader = .fixed(encoded);
const result = try reader.takeLeb128(T);
try testing.expect(reader.seek == reader.end);
return result;
}
test {
_ = Limited;
}

View file

@ -1,9 +1,9 @@
const Limited = @This();
const std = @import("../../std.zig");
const Reader = std.io.Reader;
const Writer = std.io.Writer;
const Limit = std.io.Limit;
const Reader = std.Io.Reader;
const Writer = std.Io.Writer;
const Limit = std.Io.Limit;
unlimited: *Reader,
remaining: Limit,

View file

@ -2531,13 +2531,14 @@ pub fn Hashing(comptime Hasher: type) type {
/// Maintains `Writer` state such that it writes to the unused capacity of an
/// array list, filling it up completely before making a call through the
/// vtable, causing a resize. Consequently, the same, optimized, non-generic
/// machine code that uses `std.Io.Reader`, such as formatted printing, takes
/// machine code that uses `Writer`, such as formatted printing, takes
/// the hot paths when using this API.
///
/// When using this API, it is not necessary to call `flush`.
pub const Allocating = struct {
allocator: Allocator,
writer: Writer,
alignment: std.mem.Alignment,
pub fn init(allocator: Allocator) Allocating {
return .{
@ -2546,6 +2547,7 @@ pub const Allocating = struct {
.buffer = &.{},
.vtable = &vtable,
},
.alignment = .of(u8),
};
}
@ -2553,24 +2555,47 @@ pub const Allocating = struct {
return .{
.allocator = allocator,
.writer = .{
.buffer = try allocator.alloc(u8, capacity),
.buffer = if (capacity == 0)
&.{}
else
(allocator.rawAlloc(capacity, .of(u8), @returnAddress()) orelse
return error.OutOfMemory)[0..capacity],
.vtable = &vtable,
},
.alignment = .of(u8),
};
}
pub fn initOwnedSlice(allocator: Allocator, slice: []u8) Allocating {
return initOwnedSliceAligned(allocator, .of(u8), slice);
}
pub fn initOwnedSliceAligned(
allocator: Allocator,
comptime alignment: std.mem.Alignment,
slice: []align(alignment.toByteUnits()) u8,
) Allocating {
return .{
.allocator = allocator,
.writer = .{
.buffer = slice,
.vtable = &vtable,
},
.alignment = alignment,
};
}
/// Replaces `array_list` with empty, taking ownership of the memory.
pub fn fromArrayList(allocator: Allocator, array_list: *ArrayList(u8)) Allocating {
return fromArrayListAligned(allocator, .of(u8), array_list);
}
/// Replaces `array_list` with empty, taking ownership of the memory.
pub fn fromArrayListAligned(
allocator: Allocator,
comptime alignment: std.mem.Alignment,
array_list: *std.array_list.Aligned(u8, alignment),
) Allocating {
defer array_list.* = .empty;
return .{
.allocator = allocator,
@ -2579,6 +2604,7 @@ pub const Allocating = struct {
.buffer = array_list.allocatedSlice(),
.end = array_list.items.len,
},
.alignment = alignment,
};
}
@ -2590,16 +2616,27 @@ pub const Allocating = struct {
};
pub fn deinit(a: *Allocating) void {
a.allocator.free(a.writer.buffer);
if (a.writer.buffer.len == 0) return;
a.allocator.rawFree(a.writer.buffer, a.alignment, @returnAddress());
a.* = undefined;
}
/// Returns an array list that takes ownership of the allocated memory.
/// Resets the `Allocating` to an empty state.
pub fn toArrayList(a: *Allocating) ArrayList(u8) {
return toArrayListAligned(a, .of(u8));
}
/// Returns an array list that takes ownership of the allocated memory.
/// Resets the `Allocating` to an empty state.
pub fn toArrayListAligned(
a: *Allocating,
comptime alignment: std.mem.Alignment,
) std.array_list.Aligned(u8, alignment) {
assert(a.alignment == alignment); // Required for Allocator correctness.
const w = &a.writer;
const result: ArrayList(u8) = .{
.items = w.buffer[0..w.end],
const result: std.array_list.Aligned(u8, alignment) = .{
.items = @alignCast(w.buffer[0..w.end]),
.capacity = w.buffer.len,
};
w.buffer = &.{};
@ -2608,28 +2645,74 @@ pub const Allocating = struct {
}
pub fn ensureUnusedCapacity(a: *Allocating, additional_count: usize) Allocator.Error!void {
var list = a.toArrayList();
defer a.setArrayList(list);
return list.ensureUnusedCapacity(a.allocator, additional_count);
const new_capacity = std.math.add(usize, a.writer.end, additional_count) catch return error.OutOfMemory;
return ensureTotalCapacity(a, new_capacity);
}
pub fn ensureTotalCapacity(a: *Allocating, new_capacity: usize) Allocator.Error!void {
var list = a.toArrayList();
defer a.setArrayList(list);
return list.ensureTotalCapacity(a.allocator, new_capacity);
// Protects growing unnecessarily since better_capacity will be larger.
if (a.writer.buffer.len >= new_capacity) return;
const better_capacity = ArrayList(u8).growCapacity(a.writer.buffer.len, new_capacity);
return ensureTotalCapacityPrecise(a, better_capacity);
}
pub fn toOwnedSlice(a: *Allocating) error{OutOfMemory}![]u8 {
var list = a.toArrayList();
defer a.setArrayList(list);
return list.toOwnedSlice(a.allocator);
pub fn ensureTotalCapacityPrecise(a: *Allocating, new_capacity: usize) Allocator.Error!void {
const old_memory = a.writer.buffer;
if (old_memory.len >= new_capacity) return;
assert(new_capacity != 0);
const alignment = a.alignment;
if (old_memory.len > 0) {
if (a.allocator.rawRemap(old_memory, alignment, new_capacity, @returnAddress())) |new| {
a.writer.buffer = new[0..new_capacity];
return;
}
}
const new_memory = (a.allocator.rawAlloc(new_capacity, alignment, @returnAddress()) orelse
return error.OutOfMemory)[0..new_capacity];
const saved = old_memory[0..a.writer.end];
@memcpy(new_memory[0..saved.len], saved);
if (old_memory.len != 0) a.allocator.rawFree(old_memory, alignment, @returnAddress());
a.writer.buffer = new_memory;
}
pub fn toOwnedSliceSentinel(a: *Allocating, comptime sentinel: u8) error{OutOfMemory}![:sentinel]u8 {
const gpa = a.allocator;
var list = @This().toArrayList(a);
defer a.setArrayList(list);
return list.toOwnedSliceSentinel(gpa, sentinel);
pub fn toOwnedSlice(a: *Allocating) Allocator.Error![]u8 {
const old_memory = a.writer.buffer;
const alignment = a.alignment;
const buffered_len = a.writer.end;
if (old_memory.len > 0) {
if (buffered_len == 0) {
a.allocator.rawFree(old_memory, alignment, @returnAddress());
a.writer.buffer = &.{};
a.writer.end = 0;
return old_memory[0..0];
} else if (a.allocator.rawRemap(old_memory, alignment, buffered_len, @returnAddress())) |new| {
a.writer.buffer = &.{};
a.writer.end = 0;
return new[0..buffered_len];
}
}
if (buffered_len == 0)
return a.writer.buffer[0..0];
const new_memory = (a.allocator.rawAlloc(buffered_len, alignment, @returnAddress()) orelse
return error.OutOfMemory)[0..buffered_len];
@memcpy(new_memory, old_memory[0..buffered_len]);
if (old_memory.len != 0) a.allocator.rawFree(old_memory, alignment, @returnAddress());
a.writer.buffer = &.{};
a.writer.end = 0;
return new_memory;
}
pub fn toOwnedSliceSentinel(a: *Allocating, comptime sentinel: u8) Allocator.Error![:sentinel]u8 {
// This addition can never overflow because `a.writer.buffer` can never occupy the whole address space.
try ensureTotalCapacityPrecise(a, a.writer.end + 1);
a.writer.buffer[a.writer.end] = sentinel;
a.writer.end += 1;
errdefer a.writer.end -= 1;
const result = try toOwnedSlice(a);
return result[0 .. result.len - 1 :sentinel];
}
pub fn written(a: *Allocating) []u8 {
@ -2646,57 +2729,50 @@ pub const Allocating = struct {
fn drain(w: *Writer, data: []const []const u8, splat: usize) Error!usize {
const a: *Allocating = @fieldParentPtr("writer", w);
const gpa = a.allocator;
const pattern = data[data.len - 1];
const splat_len = pattern.len * splat;
var list = a.toArrayList();
defer setArrayList(a, list);
const start_len = list.items.len;
const start_len = a.writer.end;
assert(data.len != 0);
for (data) |bytes| {
list.ensureUnusedCapacity(gpa, bytes.len + splat_len + 1) catch return error.WriteFailed;
list.appendSliceAssumeCapacity(bytes);
a.ensureUnusedCapacity(bytes.len + splat_len + 1) catch return error.WriteFailed;
@memcpy(a.writer.buffer[a.writer.end..][0..bytes.len], bytes);
a.writer.end += bytes.len;
}
if (splat == 0) {
list.items.len -= pattern.len;
a.writer.end -= pattern.len;
} else switch (pattern.len) {
0 => {},
1 => list.appendNTimesAssumeCapacity(pattern[0], splat - 1),
else => for (0..splat - 1) |_| list.appendSliceAssumeCapacity(pattern),
1 => {
@memset(a.writer.buffer[a.writer.end..][0 .. splat - 1], pattern[0]);
a.writer.end += splat - 1;
},
else => for (0..splat - 1) |_| {
@memcpy(a.writer.buffer[a.writer.end..][0..pattern.len], pattern);
a.writer.end += pattern.len;
},
}
return list.items.len - start_len;
return a.writer.end - start_len;
}
fn sendFile(w: *Writer, file_reader: *File.Reader, limit: Limit) FileError!usize {
if (File.Handle == void) return error.Unimplemented;
if (limit == .nothing) return 0;
const a: *Allocating = @fieldParentPtr("writer", w);
const gpa = a.allocator;
var list = a.toArrayList();
defer setArrayList(a, list);
const pos = file_reader.logicalPos();
const additional = if (file_reader.getSize()) |size| size - pos else |_| std.atomic.cache_line;
if (additional == 0) return error.EndOfStream;
list.ensureUnusedCapacity(gpa, limit.minInt64(additional)) catch return error.WriteFailed;
const dest = limit.slice(list.unusedCapacitySlice());
a.ensureUnusedCapacity(limit.minInt64(additional)) catch return error.WriteFailed;
const dest = limit.slice(a.writer.buffer[a.writer.end..]);
const n = try file_reader.read(dest);
list.items.len += n;
a.writer.end += n;
return n;
}
fn growingRebase(w: *Writer, preserve: usize, minimum_len: usize) Error!void {
const a: *Allocating = @fieldParentPtr("writer", w);
const gpa = a.allocator;
var list = a.toArrayList();
defer setArrayList(a, list);
const total = std.math.add(usize, preserve, minimum_len) catch return error.WriteFailed;
list.ensureTotalCapacity(gpa, total) catch return error.WriteFailed;
list.ensureUnusedCapacity(gpa, minimum_len) catch return error.WriteFailed;
}
fn setArrayList(a: *Allocating, list: ArrayList(u8)) void {
a.writer.buffer = list.allocatedSlice();
a.writer.end = list.items.len;
a.ensureTotalCapacity(total) catch return error.WriteFailed;
a.ensureUnusedCapacity(minimum_len) catch return error.WriteFailed;
}
test Allocating {

View file

@ -1,114 +0,0 @@
const std = @import("../std.zig");
const io = std.io;
const testing = std.testing;
const mem = std.mem;
const assert = std.debug.assert;
/// Deprecated in favor of `std.Io.Reader.fixed` and `std.Io.Writer.fixed`.
pub fn FixedBufferStream(comptime Buffer: type) type {
return struct {
/// `Buffer` is either a `[]u8` or `[]const u8`.
buffer: Buffer,
pos: usize,
pub const ReadError = error{};
pub const WriteError = error{NoSpaceLeft};
pub const SeekError = error{};
pub const GetSeekPosError = error{};
pub const Reader = io.GenericReader(*Self, ReadError, read);
const Self = @This();
pub fn reader(self: *Self) Reader {
return .{ .context = self };
}
pub fn read(self: *Self, dest: []u8) ReadError!usize {
const size = @min(dest.len, self.buffer.len - self.pos);
const end = self.pos + size;
@memcpy(dest[0..size], self.buffer[self.pos..end]);
self.pos = end;
return size;
}
pub fn seekTo(self: *Self, pos: u64) SeekError!void {
self.pos = @min(std.math.lossyCast(usize, pos), self.buffer.len);
}
pub fn seekBy(self: *Self, amt: i64) SeekError!void {
if (amt < 0) {
const abs_amt = @abs(amt);
const abs_amt_usize = std.math.cast(usize, abs_amt) orelse std.math.maxInt(usize);
if (abs_amt_usize > self.pos) {
self.pos = 0;
} else {
self.pos -= abs_amt_usize;
}
} else {
const amt_usize = std.math.cast(usize, amt) orelse std.math.maxInt(usize);
const new_pos = std.math.add(usize, self.pos, amt_usize) catch std.math.maxInt(usize);
self.pos = @min(self.buffer.len, new_pos);
}
}
pub fn getEndPos(self: *Self) GetSeekPosError!u64 {
return self.buffer.len;
}
pub fn getPos(self: *Self) GetSeekPosError!u64 {
return self.pos;
}
pub fn reset(self: *Self) void {
self.pos = 0;
}
};
}
pub fn fixedBufferStream(buffer: anytype) FixedBufferStream(Slice(@TypeOf(buffer))) {
return .{ .buffer = buffer, .pos = 0 };
}
fn Slice(comptime T: type) type {
switch (@typeInfo(T)) {
.pointer => |ptr_info| {
var new_ptr_info = ptr_info;
switch (ptr_info.size) {
.slice => {},
.one => switch (@typeInfo(ptr_info.child)) {
.array => |info| new_ptr_info.child = info.child,
else => @compileError("invalid type given to fixedBufferStream"),
},
else => @compileError("invalid type given to fixedBufferStream"),
}
new_ptr_info.size = .slice;
return @Type(.{ .pointer = new_ptr_info });
},
else => @compileError("invalid type given to fixedBufferStream"),
}
}
test "input" {
const bytes = [_]u8{ 1, 2, 3, 4, 5, 6, 7 };
var fbs = fixedBufferStream(&bytes);
var dest: [4]u8 = undefined;
var read = try fbs.reader().read(&dest);
try testing.expect(read == 4);
try testing.expect(mem.eql(u8, dest[0..4], bytes[0..4]));
read = try fbs.reader().read(&dest);
try testing.expect(read == 3);
try testing.expect(mem.eql(u8, dest[0..3], bytes[4..7]));
read = try fbs.reader().read(&dest);
try testing.expect(read == 0);
try fbs.seekTo((try fbs.getEndPos()) + 1);
read = try fbs.reader().read(&dest);
try testing.expect(read == 0);
}

View file

@ -1,5 +1,4 @@
const std = @import("std");
const io = std.io;
const DefaultPrng = std.Random.DefaultPrng;
const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
@ -122,24 +121,3 @@ test "updateTimes" {
try expect(stat_new.atime < stat_old.atime);
try expect(stat_new.mtime < stat_old.mtime);
}
test "GenericReader methods can return error.EndOfStream" {
// https://github.com/ziglang/zig/issues/17733
var fbs = std.io.fixedBufferStream("");
try std.testing.expectError(
error.EndOfStream,
fbs.reader().readEnum(enum(u8) { a, b }, .little),
);
try std.testing.expectError(
error.EndOfStream,
fbs.reader().isBytes("foo"),
);
}
test "Adapted DeprecatedReader EndOfStream" {
var fbs: io.FixedBufferStream([]const u8) = .{ .buffer = &.{}, .pos = 0 };
const reader = fbs.reader();
var buf: [1]u8 = undefined;
var adapted = reader.adaptToNewApi(&buf);
try std.testing.expectError(error.EndOfStream, adapted.new_interface.takeByte());
}

View file

@ -76,9 +76,9 @@ pub const Config = union(enum) {
reset_attributes: u16,
};
pub const SetColorError = std.os.windows.SetConsoleTextAttributeError || std.io.Writer.Error;
pub const SetColorError = std.os.windows.SetConsoleTextAttributeError || std.Io.Writer.Error;
pub fn setColor(conf: Config, w: *std.io.Writer, color: Color) SetColorError!void {
pub fn setColor(conf: Config, w: *std.Io.Writer, color: Color) SetColorError!void {
nosuspend switch (conf) {
.no_color => return,
.escape_codes => {

View file

@ -9,7 +9,7 @@ const Progress = @This();
const posix = std.posix;
const is_big_endian = builtin.cpu.arch.endian() == .big;
const is_windows = builtin.os.tag == .windows;
const Writer = std.io.Writer;
const Writer = std.Io.Writer;
/// `null` if the current node (and its children) should
/// not print on update()

View file

@ -150,7 +150,7 @@ fn parseNum(text: []const u8) error{ InvalidVersion, Overflow }!usize {
};
}
pub fn format(self: Version, w: *std.io.Writer) std.io.Writer.Error!void {
pub fn format(self: Version, w: *std.Io.Writer) std.Io.Writer.Error!void {
try w.print("{d}.{d}.{d}", .{ self.major, self.minor, self.patch });
if (self.pre) |pre| try w.print("-{s}", .{pre});
if (self.build) |build| try w.print("+{s}", .{build});

View file

@ -311,7 +311,7 @@ pub const Os = struct {
/// This function is defined to serialize a Zig source code representation of this
/// type, that, when parsed, will deserialize into the same data.
pub fn format(wv: WindowsVersion, w: *std.io.Writer) std.io.Writer.Error!void {
pub fn format(wv: WindowsVersion, w: *std.Io.Writer) std.Io.Writer.Error!void {
if (std.enums.tagName(WindowsVersion, wv)) |name| {
var vecs: [2][]const u8 = .{ ".", name };
return w.writeVecAll(&vecs);

View file

@ -280,8 +280,10 @@ pub fn getName(self: Thread, buffer_ptr: *[max_name_len:0]u8) GetNameError!?[]co
const file = try std.fs.cwd().openFile(path, .{});
defer file.close();
const data_len = try file.deprecatedReader().readAll(buffer_ptr[0 .. max_name_len + 1]);
var file_reader = file.readerStreaming(&.{});
const data_len = file_reader.interface.readSliceShort(buffer_ptr[0 .. max_name_len + 1]) catch |err| switch (err) {
error.ReadFailed => return file_reader.err.?,
};
return if (data_len >= 1) buffer[0 .. data_len - 1] else null;
},
.windows => {

View file

@ -405,6 +405,7 @@ pub fn AlignedManaged(comptime T: type, comptime alignment: ?mem.Alignment) type
return;
}
// Protects growing unnecessarily since better_capacity will be larger.
if (self.capacity >= new_capacity) return;
const better_capacity = Aligned(T, alignment).growCapacity(self.capacity, new_capacity);
@ -664,9 +665,10 @@ pub fn Aligned(comptime T: type, comptime alignment: ?mem.Alignment) type {
/// The caller owns the returned memory. ArrayList becomes empty.
pub fn toOwnedSliceSentinel(self: *Self, gpa: Allocator, comptime sentinel: T) Allocator.Error!SentinelSlice(sentinel) {
// This addition can never overflow because `self.items` can never occupy the whole address space
// This addition can never overflow because `self.items` can never occupy the whole address space.
try self.ensureTotalCapacityPrecise(gpa, self.items.len + 1);
self.appendAssumeCapacity(sentinel);
errdefer self.items.len -= 1;
const result = try self.toOwnedSlice(gpa);
return result[0 .. result.len - 1 :sentinel];
}
@ -1038,14 +1040,14 @@ pub fn Aligned(comptime T: type, comptime alignment: ?mem.Alignment) type {
pub fn printAssumeCapacity(self: *Self, comptime fmt: []const u8, args: anytype) void {
comptime assert(T == u8);
var w: std.io.Writer = .fixed(self.unusedCapacitySlice());
var w: std.Io.Writer = .fixed(self.unusedCapacitySlice());
w.print(fmt, args) catch unreachable;
self.items.len += w.end;
}
pub fn printBounded(self: *Self, comptime fmt: []const u8, args: anytype) error{OutOfMemory}!void {
comptime assert(T == u8);
var w: std.io.Writer = .fixed(self.unusedCapacitySlice());
var w: std.Io.Writer = .fixed(self.unusedCapacitySlice());
w.print(fmt, args) catch return error.OutOfMemory;
self.items.len += w.end;
}
@ -1361,7 +1363,7 @@ pub fn Aligned(comptime T: type, comptime alignment: ?mem.Alignment) type {
/// Called when memory growth is necessary. Returns a capacity larger than
/// minimum that grows super-linearly.
fn growCapacity(current: usize, minimum: usize) usize {
pub fn growCapacity(current: usize, minimum: usize) usize {
var new = current;
while (true) {
new +|= new / 2 + init_capacity;

View file

@ -444,7 +444,7 @@ pub const HexEscape = struct {
pub const upper_charset = "0123456789ABCDEF";
pub const lower_charset = "0123456789abcdef";
pub fn format(se: HexEscape, w: *std.io.Writer) std.io.Writer.Error!void {
pub fn format(se: HexEscape, w: *std.Io.Writer) std.Io.Writer.Error!void {
const charset = se.charset;
var buf: [4]u8 = undefined;

View file

@ -38,7 +38,7 @@ pub const StackTrace = struct {
index: usize,
instruction_addresses: []usize,
pub fn format(self: StackTrace, writer: *std.io.Writer) std.io.Writer.Error!void {
pub fn format(self: StackTrace, writer: *std.Io.Writer) std.Io.Writer.Error!void {
// TODO: re-evaluate whether to use format() methods at all.
// Until then, avoid an error when using GeneralPurposeAllocator with WebAssembly
// where it tries to call detectTTYConfig here.
@ -47,7 +47,7 @@ pub const StackTrace = struct {
const debug_info = std.debug.getSelfDebugInfo() catch |err| {
return writer.print("\nUnable to print stack trace: Unable to open debug info: {s}\n", .{@errorName(err)});
};
const tty_config = std.io.tty.detectConfig(std.fs.File.stderr());
const tty_config = std.Io.tty.detectConfig(std.fs.File.stderr());
try writer.writeAll("\n");
std.debug.writeStackTrace(self, writer, debug_info, tty_config) catch |err| {
try writer.print("Unable to print stack trace: {s}\n", .{@errorName(err)});

View file

@ -1087,14 +1087,11 @@ pub const Coff = struct {
const pe_pointer_offset = 0x3C;
const pe_magic = "PE\x00\x00";
var stream = std.io.fixedBufferStream(data);
const reader = stream.reader();
try stream.seekTo(pe_pointer_offset);
const coff_header_offset = try reader.readInt(u32, .little);
try stream.seekTo(coff_header_offset);
var buf: [4]u8 = undefined;
try reader.readNoEof(&buf);
const is_image = mem.eql(u8, pe_magic, &buf);
var reader: std.Io.Reader = .fixed(data);
reader.seek = pe_pointer_offset;
const coff_header_offset = try reader.takeInt(u32, .little);
reader.seek = coff_header_offset;
const is_image = mem.eql(u8, pe_magic, try reader.takeArray(4));
var coff = @This(){
.data = data,
@ -1123,16 +1120,15 @@ pub const Coff = struct {
if (@intFromEnum(DirectoryEntry.DEBUG) >= data_dirs.len) return null;
const debug_dir = data_dirs[@intFromEnum(DirectoryEntry.DEBUG)];
var stream = std.io.fixedBufferStream(self.data);
const reader = stream.reader();
var reader: std.Io.Reader = .fixed(self.data);
if (self.is_loaded) {
try stream.seekTo(debug_dir.virtual_address);
reader.seek = debug_dir.virtual_address;
} else {
// Find what section the debug_dir is in, in order to convert the RVA to a file offset
for (self.getSectionHeaders()) |*sect| {
if (debug_dir.virtual_address >= sect.virtual_address and debug_dir.virtual_address < sect.virtual_address + sect.virtual_size) {
try stream.seekTo(sect.pointer_to_raw_data + (debug_dir.virtual_address - sect.virtual_address));
reader.seek = sect.pointer_to_raw_data + (debug_dir.virtual_address - sect.virtual_address);
break;
}
} else return error.InvalidDebugDirectory;
@ -1143,24 +1139,23 @@ pub const Coff = struct {
const debug_dir_entry_count = debug_dir.size / @sizeOf(DebugDirectoryEntry);
var i: u32 = 0;
while (i < debug_dir_entry_count) : (i += 1) {
const debug_dir_entry = try reader.readStruct(DebugDirectoryEntry);
const debug_dir_entry = try reader.takeStruct(DebugDirectoryEntry, .little);
if (debug_dir_entry.type == .CODEVIEW) {
const dir_offset = if (self.is_loaded) debug_dir_entry.address_of_raw_data else debug_dir_entry.pointer_to_raw_data;
try stream.seekTo(dir_offset);
reader.seek = dir_offset;
break;
}
} else return null;
var cv_signature: [4]u8 = undefined; // CodeView signature
try reader.readNoEof(cv_signature[0..]);
const code_view_signature = try reader.takeArray(4);
// 'RSDS' indicates PDB70 format, used by lld.
if (!mem.eql(u8, &cv_signature, "RSDS"))
if (!mem.eql(u8, code_view_signature, "RSDS"))
return error.InvalidPEMagic;
try reader.readNoEof(self.guid[0..]);
self.age = try reader.readInt(u32, .little);
try reader.readSliceAll(self.guid[0..]);
self.age = try reader.takeInt(u32, .little);
// Finally read the null-terminated string.
const start = reader.context.pos;
const start = reader.seek;
const len = std.mem.indexOfScalar(u8, self.data[start..], 0) orelse return null;
return self.data[start .. start + len];
}

View file

@ -1,9 +1,8 @@
//! Accepts list of tokens, decides what is best block type to write. What block
//! type will provide best compression. Writes header and body of the block.
const std = @import("std");
const io = std.io;
const assert = std.debug.assert;
const Writer = std.io.Writer;
const Writer = std.Io.Writer;
const BlockWriter = @This();
const flate = @import("../flate.zig");

View file

@ -1,10 +1,10 @@
const Decompress = @This();
const std = @import("std");
const assert = std.debug.assert;
const Reader = std.io.Reader;
const Limit = std.io.Limit;
const Reader = std.Io.Reader;
const Limit = std.Io.Limit;
const zstd = @import("../zstd.zig");
const Writer = std.io.Writer;
const Writer = std.Io.Writer;
input: *Reader,
reader: Reader,

View file

@ -11,42 +11,50 @@ pub fn rescanMac(cb: *Bundle, gpa: Allocator) RescanMacError!void {
cb.bytes.clearRetainingCapacity();
cb.map.clearRetainingCapacity();
const keychainPaths = [2][]const u8{
const keychain_paths = [2][]const u8{
"/System/Library/Keychains/SystemRootCertificates.keychain",
"/Library/Keychains/System.keychain",
};
for (keychainPaths) |keychainPath| {
const file = try fs.openFileAbsolute(keychainPath, .{});
defer file.close();
const bytes = try file.readToEndAlloc(gpa, std.math.maxInt(u32));
for (keychain_paths) |keychain_path| {
const bytes = std.fs.cwd().readFileAlloc(keychain_path, gpa, .limited(std.math.maxInt(u32))) catch |err| switch (err) {
error.StreamTooLong => return error.FileTooBig,
else => |e| return e,
};
defer gpa.free(bytes);
var stream = std.io.fixedBufferStream(bytes);
const reader = stream.reader();
var reader: std.Io.Reader = .fixed(bytes);
scanReader(cb, gpa, &reader) catch |err| switch (err) {
error.ReadFailed => unreachable, // prebuffered
else => |e| return e,
};
}
const db_header = try reader.readStructEndian(ApplDbHeader, .big);
cb.bytes.shrinkAndFree(gpa, cb.bytes.items.len);
}
fn scanReader(cb: *Bundle, gpa: Allocator, reader: *std.Io.Reader) !void {
const db_header = try reader.takeStruct(ApplDbHeader, .big);
assert(mem.eql(u8, &db_header.signature, "kych"));
try stream.seekTo(db_header.schema_offset);
reader.seek = db_header.schema_offset;
const db_schema = try reader.readStructEndian(ApplDbSchema, .big);
const db_schema = try reader.takeStruct(ApplDbSchema, .big);
var table_list = try gpa.alloc(u32, db_schema.table_count);
defer gpa.free(table_list);
var table_idx: u32 = 0;
while (table_idx < table_list.len) : (table_idx += 1) {
table_list[table_idx] = try reader.readInt(u32, .big);
table_list[table_idx] = try reader.takeInt(u32, .big);
}
const now_sec = std.time.timestamp();
for (table_list) |table_offset| {
try stream.seekTo(db_header.schema_offset + table_offset);
reader.seek = db_header.schema_offset + table_offset;
const table_header = try reader.readStructEndian(TableHeader, .big);
const table_header = try reader.takeStruct(TableHeader, .big);
if (@as(std.c.DB_RECORDTYPE, @enumFromInt(table_header.table_id)) != .X509_CERTIFICATE) {
continue;
@ -57,7 +65,7 @@ pub fn rescanMac(cb: *Bundle, gpa: Allocator) RescanMacError!void {
var record_idx: u32 = 0;
while (record_idx < record_list.len) : (record_idx += 1) {
record_list[record_idx] = try reader.readInt(u32, .big);
record_list[record_idx] = try reader.takeInt(u32, .big);
}
for (record_list) |record_offset| {
@ -65,22 +73,19 @@ pub fn rescanMac(cb: *Bundle, gpa: Allocator) RescanMacError!void {
// An offset that is not 4-byte-aligned is invalid.
if (record_offset == 0 or record_offset % 4 != 0) continue;
try stream.seekTo(db_header.schema_offset + table_offset + record_offset);
reader.seek = db_header.schema_offset + table_offset + record_offset;
const cert_header = try reader.readStructEndian(X509CertHeader, .big);
const cert_header = try reader.takeStruct(X509CertHeader, .big);
if (cert_header.cert_size == 0) continue;
const cert_start = @as(u32, @intCast(cb.bytes.items.len));
const cert_start: u32 = @intCast(cb.bytes.items.len);
const dest_buf = try cb.bytes.addManyAsSlice(gpa, cert_header.cert_size);
try reader.readNoEof(dest_buf);
try reader.readSliceAll(dest_buf);
try cb.parseCert(gpa, cert_start, now_sec);
}
}
}
cb.bytes.shrinkAndFree(gpa, cb.bytes.items.len);
}
const ApplDbHeader = extern struct {

View file

@ -69,15 +69,15 @@ pub const Tag = struct {
return .{ .number = number, .constructed = constructed, .class = .universal };
}
pub fn decode(reader: anytype) !Tag {
const tag1: FirstTag = @bitCast(try reader.readByte());
pub fn decode(reader: *std.Io.Reader) !Tag {
const tag1: FirstTag = @bitCast(try reader.takeByte());
var number: u14 = tag1.number;
if (tag1.number == 15) {
const tag2: NextTag = @bitCast(try reader.readByte());
const tag2: NextTag = @bitCast(try reader.takeByte());
number = tag2.number;
if (tag2.continues) {
const tag3: NextTag = @bitCast(try reader.readByte());
const tag3: NextTag = @bitCast(try reader.takeByte());
number = (number << 7) + tag3.number;
if (tag3.continues) return error.InvalidLength;
}
@ -90,7 +90,7 @@ pub const Tag = struct {
};
}
pub fn encode(self: Tag, writer: anytype) @TypeOf(writer).Error!void {
pub fn encode(self: Tag, writer: *std.Io.Writer) @TypeOf(writer).Error!void {
var tag1 = FirstTag{
.number = undefined,
.constructed = self.constructed,
@ -98,8 +98,7 @@ pub const Tag = struct {
};
var buffer: [3]u8 = undefined;
var stream = std.io.fixedBufferStream(&buffer);
var writer2 = stream.writer();
var writer2: std.Io.Writer = .init(&buffer);
switch (@intFromEnum(self.number)) {
0...std.math.maxInt(u5) => |n| {
@ -122,7 +121,7 @@ pub const Tag = struct {
},
}
_ = try writer.write(stream.getWritten());
_ = try writer.write(writer2.buffered());
}
const FirstTag = packed struct(u8) { number: u5, constructed: bool, class: Tag.Class };
@ -161,8 +160,8 @@ pub const Tag = struct {
test Tag {
const buf = [_]u8{0xa3};
var stream = std.io.fixedBufferStream(&buf);
const t = Tag.decode(stream.reader());
var reader: std.Io.Reader = .fixed(&buf);
const t = Tag.decode(&reader);
try std.testing.expectEqual(Tag.init(@enumFromInt(3), true, .context_specific), t);
}
@ -191,11 +190,10 @@ pub const Element = struct {
/// - Ensures length is within `bytes`
/// - Ensures length is less than `std.math.maxInt(Index)`
pub fn decode(bytes: []const u8, index: Index) DecodeError!Element {
var stream = std.io.fixedBufferStream(bytes[index..]);
var reader = stream.reader();
var reader: std.Io.Reader = .fixed(bytes[index..]);
const tag = try Tag.decode(reader);
const size_or_len_size = try reader.readByte();
const tag = try Tag.decode(&reader);
const size_or_len_size = try reader.takeByte();
var start = index + 2;
var end = start + size_or_len_size;
@ -208,7 +206,7 @@ pub const Element = struct {
start += len_size;
if (len_size > @sizeOf(Index)) return error.InvalidLength;
const len = try reader.readVarInt(Index, .big, len_size);
const len = try reader.takeVarInt(Index, .big, len_size);
if (len < 128) return error.InvalidLength; // should have used short form
end = std.math.add(Index, start, len) catch return error.InvalidLength;

View file

@ -4,7 +4,7 @@
//! organizations, or policy documents.
encoded: []const u8,
pub const InitError = std.fmt.ParseIntError || error{MissingPrefix} || std.io.FixedBufferStream(u8).WriteError;
pub const InitError = std.fmt.ParseIntError || error{MissingPrefix} || std.Io.Writer.Error;
pub fn fromDot(dot_notation: []const u8, out: []u8) InitError!Oid {
var split = std.mem.splitScalar(u8, dot_notation, '.');
@ -14,8 +14,7 @@ pub fn fromDot(dot_notation: []const u8, out: []u8) InitError!Oid {
const first = try std.fmt.parseInt(u8, first_str, 10);
const second = try std.fmt.parseInt(u8, second_str, 10);
var stream = std.io.fixedBufferStream(out);
var writer = stream.writer();
var writer: std.Io.Writer = .fixed(out);
try writer.writeByte(first * 40 + second);
@ -37,7 +36,7 @@ pub fn fromDot(dot_notation: []const u8, out: []u8) InitError!Oid {
i += 1;
}
return .{ .encoded = stream.getWritten() };
return .{ .encoded = writer.buffered() };
}
test fromDot {
@ -80,9 +79,9 @@ test toDot {
var buf: [256]u8 = undefined;
for (test_cases) |t| {
var stream = std.io.fixedBufferStream(&buf);
try toDot(Oid{ .encoded = t.encoded }, stream.writer());
try std.testing.expectEqualStrings(t.dot_notation, stream.getWritten());
var stream: std.Io.Writer = .fixed(&buf);
try toDot(Oid{ .encoded = t.encoded }, &stream);
try std.testing.expectEqualStrings(t.dot_notation, stream.written());
}
}

View file

@ -2,7 +2,6 @@ const builtin = @import("builtin");
const std = @import("std");
const crypto = std.crypto;
const fmt = std.fmt;
const io = std.io;
const mem = std.mem;
const sha3 = crypto.hash.sha3;
const testing = std.testing;
@ -135,8 +134,7 @@ pub fn Ecdsa(comptime Curve: type, comptime Hash: type) type {
/// The maximum length of the DER encoding is der_encoded_length_max.
/// The function returns a slice, that can be shorter than der_encoded_length_max.
pub fn toDer(sig: Signature, buf: *[der_encoded_length_max]u8) []u8 {
var fb = io.fixedBufferStream(buf);
const w = fb.writer();
var w: std.Io.Writer = .fixed(buf);
const r_len = @as(u8, @intCast(sig.r.len + (sig.r[0] >> 7)));
const s_len = @as(u8, @intCast(sig.s.len + (sig.s[0] >> 7)));
const seq_len = @as(u8, @intCast(2 + r_len + 2 + s_len));
@ -151,24 +149,23 @@ pub fn Ecdsa(comptime Curve: type, comptime Hash: type) type {
w.writeByte(0x00) catch unreachable;
}
w.writeAll(&sig.s) catch unreachable;
return fb.getWritten();
return w.buffered();
}
// Read a DER-encoded integer.
fn readDerInt(out: []u8, reader: anytype) EncodingError!void {
var buf: [2]u8 = undefined;
_ = reader.readNoEof(&buf) catch return error.InvalidEncoding;
fn readDerInt(out: []u8, reader: *std.Io.Reader) EncodingError!void {
const buf = reader.takeArray(2) catch return error.InvalidEncoding;
if (buf[0] != 0x02) return error.InvalidEncoding;
var expected_len = @as(usize, buf[1]);
var expected_len: usize = buf[1];
if (expected_len == 0 or expected_len > 1 + out.len) return error.InvalidEncoding;
var has_top_bit = false;
if (expected_len == 1 + out.len) {
if ((reader.readByte() catch return error.InvalidEncoding) != 0) return error.InvalidEncoding;
if ((reader.takeByte() catch return error.InvalidEncoding) != 0) return error.InvalidEncoding;
expected_len -= 1;
has_top_bit = true;
}
const out_slice = out[out.len - expected_len ..];
reader.readNoEof(out_slice) catch return error.InvalidEncoding;
reader.readSliceAll(out_slice) catch return error.InvalidEncoding;
if (@intFromBool(has_top_bit) != out[0] >> 7) return error.InvalidEncoding;
}
@ -176,16 +173,14 @@ pub fn Ecdsa(comptime Curve: type, comptime Hash: type) type {
/// Returns InvalidEncoding if the DER encoding is invalid.
pub fn fromDer(der: []const u8) EncodingError!Signature {
var sig: Signature = mem.zeroInit(Signature, .{});
var fb = io.fixedBufferStream(der);
const reader = fb.reader();
var buf: [2]u8 = undefined;
_ = reader.readNoEof(&buf) catch return error.InvalidEncoding;
var reader: std.Io.Reader = .fixed(der);
const buf = reader.takeArray(2) catch return error.InvalidEncoding;
if (buf[0] != 0x30 or @as(usize, buf[1]) + 2 != der.len) {
return error.InvalidEncoding;
}
try readDerInt(&sig.r, reader);
try readDerInt(&sig.s, reader);
if (fb.getPos() catch unreachable != der.len) return error.InvalidEncoding;
try readDerInt(&sig.r, &reader);
try readDerInt(&sig.s, &reader);
if (reader.seek != der.len) return error.InvalidEncoding;
return sig;
}

View file

@ -2,7 +2,6 @@
const std = @import("std");
const fmt = std.fmt;
const io = std.io;
const mem = std.mem;
const meta = std.meta;
const Writer = std.Io.Writer;

View file

@ -5,7 +5,6 @@
const std = @import("std");
const crypto = std.crypto;
const fmt = std.fmt;
const io = std.io;
const math = std.math;
const mem = std.mem;
const meta = std.meta;

View file

@ -655,7 +655,7 @@ pub const Decoder = struct {
}
/// Use this function to increase `their_end`.
pub fn readAtLeast(d: *Decoder, stream: *std.io.Reader, their_amt: usize) !void {
pub fn readAtLeast(d: *Decoder, stream: *std.Io.Reader, their_amt: usize) !void {
assert(!d.disable_reads);
const existing_amt = d.cap - d.idx;
d.their_end = d.idx + their_amt;
@ -672,7 +672,7 @@ pub const Decoder = struct {
/// Same as `readAtLeast` but also increases `our_end` by exactly `our_amt`.
/// Use when `our_amt` is calculated by us, not by them.
pub fn readAtLeastOurAmt(d: *Decoder, stream: *std.io.Reader, our_amt: usize) !void {
pub fn readAtLeastOurAmt(d: *Decoder, stream: *std.Io.Reader, our_amt: usize) !void {
assert(!d.disable_reads);
try readAtLeast(d, stream, our_amt);
d.our_end = d.idx + our_amt;

View file

@ -2,7 +2,6 @@ const builtin = @import("builtin");
const std = @import("std.zig");
const math = std.math;
const mem = std.mem;
const io = std.io;
const posix = std.posix;
const fs = std.fs;
const testing = std.testing;
@ -12,7 +11,8 @@ const windows = std.os.windows;
const native_arch = builtin.cpu.arch;
const native_os = builtin.os.tag;
const native_endian = native_arch.endian();
const Writer = std.io.Writer;
const Writer = std.Io.Writer;
const tty = std.Io.tty;
pub const Dwarf = @import("debug/Dwarf.zig");
pub const Pdb = @import("debug/Pdb.zig");
@ -246,12 +246,12 @@ pub fn getSelfDebugInfo() !*SelfInfo {
pub fn dumpHex(bytes: []const u8) void {
const bw = lockStderrWriter(&.{});
defer unlockStderrWriter();
const ttyconf = std.io.tty.detectConfig(.stderr());
const ttyconf = tty.detectConfig(.stderr());
dumpHexFallible(bw, ttyconf, bytes) catch {};
}
/// Prints a hexadecimal view of the bytes, returning any error that occurs.
pub fn dumpHexFallible(bw: *Writer, ttyconf: std.io.tty.Config, bytes: []const u8) !void {
pub fn dumpHexFallible(bw: *Writer, ttyconf: tty.Config, bytes: []const u8) !void {
var chunks = mem.window(u8, bytes, 16, 16);
while (chunks.next()) |window| {
// 1. Print the address.
@ -302,7 +302,7 @@ pub fn dumpHexFallible(bw: *Writer, ttyconf: std.io.tty.Config, bytes: []const u
test dumpHexFallible {
const bytes: []const u8 = &.{ 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff, 0x01, 0x12, 0x13 };
var aw: std.io.Writer.Allocating = .init(std.testing.allocator);
var aw: Writer.Allocating = .init(std.testing.allocator);
defer aw.deinit();
try dumpHexFallible(&aw.writer, .no_color, bytes);
@ -342,7 +342,7 @@ pub fn dumpCurrentStackTraceToWriter(start_addr: ?usize, writer: *Writer) !void
try writer.print("Unable to dump stack trace: Unable to open debug info: {s}\n", .{@errorName(err)});
return;
};
writeCurrentStackTrace(writer, debug_info, io.tty.detectConfig(.stderr()), start_addr) catch |err| {
writeCurrentStackTrace(writer, debug_info, tty.detectConfig(.stderr()), start_addr) catch |err| {
try writer.print("Unable to dump stack trace: {s}\n", .{@errorName(err)});
return;
};
@ -427,7 +427,7 @@ pub fn dumpStackTraceFromBase(context: *ThreadContext, stderr: *Writer) void {
stderr.print("Unable to dump stack trace: Unable to open debug info: {s}\n", .{@errorName(err)}) catch return;
return;
};
const tty_config = io.tty.detectConfig(.stderr());
const tty_config = tty.detectConfig(.stderr());
if (native_os == .windows) {
// On x86_64 and aarch64, the stack will be unwound using RtlVirtualUnwind using the context
// provided by the exception handler. On x86, RtlVirtualUnwind doesn't exist. Instead, a new backtrace
@ -533,7 +533,7 @@ pub fn dumpStackTrace(stack_trace: std.builtin.StackTrace) void {
stderr.print("Unable to dump stack trace: Unable to open debug info: {s}\n", .{@errorName(err)}) catch return;
return;
};
writeStackTrace(stack_trace, stderr, debug_info, io.tty.detectConfig(.stderr())) catch |err| {
writeStackTrace(stack_trace, stderr, debug_info, tty.detectConfig(.stderr())) catch |err| {
stderr.print("Unable to dump stack trace: {s}\n", .{@errorName(err)}) catch return;
return;
};
@ -738,7 +738,7 @@ pub fn writeStackTrace(
stack_trace: std.builtin.StackTrace,
writer: *Writer,
debug_info: *SelfInfo,
tty_config: io.tty.Config,
tty_config: tty.Config,
) !void {
if (builtin.strip_debug_info) return error.MissingDebugInfo;
var frame_index: usize = 0;
@ -959,7 +959,7 @@ pub const StackIterator = struct {
pub fn writeCurrentStackTrace(
writer: *Writer,
debug_info: *SelfInfo,
tty_config: io.tty.Config,
tty_config: tty.Config,
start_addr: ?usize,
) !void {
if (native_os == .windows) {
@ -1047,7 +1047,7 @@ pub noinline fn walkStackWindows(addresses: []usize, existing_context: ?*const w
pub fn writeStackTraceWindows(
writer: *Writer,
debug_info: *SelfInfo,
tty_config: io.tty.Config,
tty_config: tty.Config,
context: *const windows.CONTEXT,
start_addr: ?usize,
) !void {
@ -1065,7 +1065,7 @@ pub fn writeStackTraceWindows(
}
}
fn printUnknownSource(debug_info: *SelfInfo, writer: *Writer, address: usize, tty_config: io.tty.Config) !void {
fn printUnknownSource(debug_info: *SelfInfo, writer: *Writer, address: usize, tty_config: tty.Config) !void {
const module_name = debug_info.getModuleNameForAddress(address);
return printLineInfo(
writer,
@ -1078,14 +1078,14 @@ fn printUnknownSource(debug_info: *SelfInfo, writer: *Writer, address: usize, tt
);
}
fn printLastUnwindError(it: *StackIterator, debug_info: *SelfInfo, writer: *Writer, tty_config: io.tty.Config) void {
fn printLastUnwindError(it: *StackIterator, debug_info: *SelfInfo, writer: *Writer, tty_config: tty.Config) void {
if (!have_ucontext) return;
if (it.getLastError()) |unwind_error| {
printUnwindError(debug_info, writer, unwind_error.address, unwind_error.err, tty_config) catch {};
}
}
fn printUnwindError(debug_info: *SelfInfo, writer: *Writer, address: usize, err: UnwindError, tty_config: io.tty.Config) !void {
fn printUnwindError(debug_info: *SelfInfo, writer: *Writer, address: usize, err: UnwindError, tty_config: tty.Config) !void {
const module_name = debug_info.getModuleNameForAddress(address) orelse "???";
try tty_config.setColor(writer, .dim);
if (err == error.MissingDebugInfo) {
@ -1096,7 +1096,7 @@ fn printUnwindError(debug_info: *SelfInfo, writer: *Writer, address: usize, err:
try tty_config.setColor(writer, .reset);
}
pub fn printSourceAtAddress(debug_info: *SelfInfo, writer: *Writer, address: usize, tty_config: io.tty.Config) !void {
pub fn printSourceAtAddress(debug_info: *SelfInfo, writer: *Writer, address: usize, tty_config: tty.Config) !void {
const module = debug_info.getModuleForAddress(address) catch |err| switch (err) {
error.MissingDebugInfo, error.InvalidDebugInfo => return printUnknownSource(debug_info, writer, address, tty_config),
else => return err,
@ -1125,7 +1125,7 @@ fn printLineInfo(
address: usize,
symbol_name: []const u8,
compile_unit_name: []const u8,
tty_config: io.tty.Config,
tty_config: tty.Config,
comptime printLineFromFile: anytype,
) !void {
nosuspend {
@ -1597,10 +1597,10 @@ test "manage resources correctly" {
// self-hosted debug info is still too buggy
if (builtin.zig_backend != .stage2_llvm) return error.SkipZigTest;
var discarding: std.io.Writer.Discarding = .init(&.{});
var discarding: Writer.Discarding = .init(&.{});
var di = try SelfInfo.open(testing.allocator);
defer di.deinit();
try printSourceAtAddress(&di, &discarding.writer, showMyTrace(), io.tty.detectConfig(.stderr()));
try printSourceAtAddress(&di, &discarding.writer, showMyTrace(), tty.detectConfig(.stderr()));
}
noinline fn showMyTrace() usize {
@ -1666,7 +1666,7 @@ pub fn ConfigurableTrace(comptime size: usize, comptime stack_frame_count: usize
pub fn dump(t: @This()) void {
if (!enabled) return;
const tty_config = io.tty.detectConfig(.stderr());
const tty_config = tty.detectConfig(.stderr());
const stderr = lockStderrWriter(&.{});
defer unlockStderrWriter();
const end = @min(t.index, size);

View file

@ -51,15 +51,9 @@ const Opcode = enum(u8) {
pub const hi_user = 0x3f;
};
fn readBlock(stream: *std.io.FixedBufferStream([]const u8)) ![]const u8 {
const reader = stream.reader();
const block_len = try leb.readUleb128(usize, reader);
if (stream.pos + block_len > stream.buffer.len) return error.InvalidOperand;
const block = stream.buffer[stream.pos..][0..block_len];
reader.context.pos += block_len;
return block;
fn readBlock(reader: *std.Io.Reader) ![]const u8 {
const block_len = try reader.takeLeb128(usize);
return reader.take(block_len);
}
pub const Instruction = union(Opcode) {
@ -147,12 +141,11 @@ pub const Instruction = union(Opcode) {
},
pub fn read(
stream: *std.io.FixedBufferStream([]const u8),
reader: *std.Io.Reader,
addr_size_bytes: u8,
endian: std.builtin.Endian,
) !Instruction {
const reader = stream.reader();
switch (try reader.readByte()) {
switch (try reader.takeByte()) {
Opcode.lo_inline...Opcode.hi_inline => |opcode| {
const e: Opcode = @enumFromInt(opcode & 0b11000000);
const value: u6 = @intCast(opcode & 0b111111);
@ -163,7 +156,7 @@ pub const Instruction = union(Opcode) {
.offset => .{
.offset = .{
.register = value,
.offset = try leb.readUleb128(u64, reader),
.offset = try reader.takeLeb128(u64),
},
},
.restore => .{
@ -183,111 +176,111 @@ pub const Instruction = union(Opcode) {
.set_loc => .{
.set_loc = .{
.address = switch (addr_size_bytes) {
2 => try reader.readInt(u16, endian),
4 => try reader.readInt(u32, endian),
8 => try reader.readInt(u64, endian),
2 => try reader.takeInt(u16, endian),
4 => try reader.takeInt(u32, endian),
8 => try reader.takeInt(u64, endian),
else => return error.InvalidAddrSize,
},
},
},
.advance_loc1 => .{
.advance_loc1 = .{ .delta = try reader.readByte() },
.advance_loc1 = .{ .delta = try reader.takeByte() },
},
.advance_loc2 => .{
.advance_loc2 = .{ .delta = try reader.readInt(u16, endian) },
.advance_loc2 = .{ .delta = try reader.takeInt(u16, endian) },
},
.advance_loc4 => .{
.advance_loc4 = .{ .delta = try reader.readInt(u32, endian) },
.advance_loc4 = .{ .delta = try reader.takeInt(u32, endian) },
},
.offset_extended => .{
.offset_extended = .{
.register = try leb.readUleb128(u8, reader),
.offset = try leb.readUleb128(u64, reader),
.register = try reader.takeLeb128(u8),
.offset = try reader.takeLeb128(u64),
},
},
.restore_extended => .{
.restore_extended = .{
.register = try leb.readUleb128(u8, reader),
.register = try reader.takeLeb128(u8),
},
},
.undefined => .{
.undefined = .{
.register = try leb.readUleb128(u8, reader),
.register = try reader.takeLeb128(u8),
},
},
.same_value => .{
.same_value = .{
.register = try leb.readUleb128(u8, reader),
.register = try reader.takeLeb128(u8),
},
},
.register => .{
.register = .{
.register = try leb.readUleb128(u8, reader),
.target_register = try leb.readUleb128(u8, reader),
.register = try reader.takeLeb128(u8),
.target_register = try reader.takeLeb128(u8),
},
},
.remember_state => .{ .remember_state = {} },
.restore_state => .{ .restore_state = {} },
.def_cfa => .{
.def_cfa = .{
.register = try leb.readUleb128(u8, reader),
.offset = try leb.readUleb128(u64, reader),
.register = try reader.takeLeb128(u8),
.offset = try reader.takeLeb128(u64),
},
},
.def_cfa_register => .{
.def_cfa_register = .{
.register = try leb.readUleb128(u8, reader),
.register = try reader.takeLeb128(u8),
},
},
.def_cfa_offset => .{
.def_cfa_offset = .{
.offset = try leb.readUleb128(u64, reader),
.offset = try reader.takeLeb128(u64),
},
},
.def_cfa_expression => .{
.def_cfa_expression = .{
.block = try readBlock(stream),
.block = try readBlock(reader),
},
},
.expression => .{
.expression = .{
.register = try leb.readUleb128(u8, reader),
.block = try readBlock(stream),
.register = try reader.takeLeb128(u8),
.block = try readBlock(reader),
},
},
.offset_extended_sf => .{
.offset_extended_sf = .{
.register = try leb.readUleb128(u8, reader),
.offset = try leb.readIleb128(i64, reader),
.register = try reader.takeLeb128(u8),
.offset = try reader.takeLeb128(i64),
},
},
.def_cfa_sf => .{
.def_cfa_sf = .{
.register = try leb.readUleb128(u8, reader),
.offset = try leb.readIleb128(i64, reader),
.register = try reader.takeLeb128(u8),
.offset = try reader.takeLeb128(i64),
},
},
.def_cfa_offset_sf => .{
.def_cfa_offset_sf = .{
.offset = try leb.readIleb128(i64, reader),
.offset = try reader.takeLeb128(i64),
},
},
.val_offset => .{
.val_offset = .{
.register = try leb.readUleb128(u8, reader),
.offset = try leb.readUleb128(u64, reader),
.register = try reader.takeLeb128(u8),
.offset = try reader.takeLeb128(u64),
},
},
.val_offset_sf => .{
.val_offset_sf = .{
.register = try leb.readUleb128(u8, reader),
.offset = try leb.readIleb128(i64, reader),
.register = try reader.takeLeb128(u8),
.offset = try reader.takeLeb128(i64),
},
},
.val_expression => .{
.val_expression = .{
.register = try leb.readUleb128(u8, reader),
.block = try readBlock(stream),
.register = try reader.takeLeb128(u8),
.block = try readBlock(reader),
},
},
};

View file

@ -62,7 +62,7 @@ pub const Error = error{
InvalidTypeLength,
TruncatedIntegralType,
} || abi.RegBytesError || error{ EndOfStream, Overflow, OutOfMemory, DivisionByZero };
} || abi.RegBytesError || error{ EndOfStream, Overflow, OutOfMemory, DivisionByZero, ReadFailed };
/// A stack machine that can decode and run DWARF expressions.
/// Expressions can be decoded for non-native address size and endianness,
@ -178,61 +178,60 @@ pub fn StackMachine(comptime options: Options) type {
}
}
pub fn readOperand(stream: *std.io.FixedBufferStream([]const u8), opcode: u8, context: Context) !?Operand {
const reader = stream.reader();
pub fn readOperand(reader: *std.Io.Reader, opcode: u8, context: Context) !?Operand {
return switch (opcode) {
OP.addr => generic(try reader.readInt(addr_type, options.endian)),
OP.addr => generic(try reader.takeInt(addr_type, options.endian)),
OP.call_ref => switch (context.format) {
.@"32" => generic(try reader.readInt(u32, options.endian)),
.@"64" => generic(try reader.readInt(u64, options.endian)),
.@"32" => generic(try reader.takeInt(u32, options.endian)),
.@"64" => generic(try reader.takeInt(u64, options.endian)),
},
OP.const1u,
OP.pick,
=> generic(try reader.readByte()),
=> generic(try reader.takeByte()),
OP.deref_size,
OP.xderef_size,
=> .{ .type_size = try reader.readByte() },
OP.const1s => generic(try reader.readByteSigned()),
=> .{ .type_size = try reader.takeByte() },
OP.const1s => generic(try reader.takeByteSigned()),
OP.const2u,
OP.call2,
=> generic(try reader.readInt(u16, options.endian)),
OP.call4 => generic(try reader.readInt(u32, options.endian)),
OP.const2s => generic(try reader.readInt(i16, options.endian)),
=> generic(try reader.takeInt(u16, options.endian)),
OP.call4 => generic(try reader.takeInt(u32, options.endian)),
OP.const2s => generic(try reader.takeInt(i16, options.endian)),
OP.bra,
OP.skip,
=> .{ .branch_offset = try reader.readInt(i16, options.endian) },
OP.const4u => generic(try reader.readInt(u32, options.endian)),
OP.const4s => generic(try reader.readInt(i32, options.endian)),
OP.const8u => generic(try reader.readInt(u64, options.endian)),
OP.const8s => generic(try reader.readInt(i64, options.endian)),
=> .{ .branch_offset = try reader.takeInt(i16, options.endian) },
OP.const4u => generic(try reader.takeInt(u32, options.endian)),
OP.const4s => generic(try reader.takeInt(i32, options.endian)),
OP.const8u => generic(try reader.takeInt(u64, options.endian)),
OP.const8s => generic(try reader.takeInt(i64, options.endian)),
OP.constu,
OP.plus_uconst,
OP.addrx,
OP.constx,
OP.convert,
OP.reinterpret,
=> generic(try leb.readUleb128(u64, reader)),
=> generic(try reader.takeLeb128(u64)),
OP.consts,
OP.fbreg,
=> generic(try leb.readIleb128(i64, reader)),
=> generic(try reader.takeLeb128(i64)),
OP.lit0...OP.lit31 => |n| generic(n - OP.lit0),
OP.reg0...OP.reg31 => |n| .{ .register = n - OP.reg0 },
OP.breg0...OP.breg31 => |n| .{ .base_register = .{
.base_register = n - OP.breg0,
.offset = try leb.readIleb128(i64, reader),
.offset = try reader.takeLeb128(i64),
} },
OP.regx => .{ .register = try leb.readUleb128(u8, reader) },
OP.regx => .{ .register = try reader.takeLeb128(u8) },
OP.bregx => blk: {
const base_register = try leb.readUleb128(u8, reader);
const offset = try leb.readIleb128(i64, reader);
const base_register = try reader.takeLeb128(u8);
const offset = try reader.takeLeb128(i64);
break :blk .{ .base_register = .{
.base_register = base_register,
.offset = offset,
} };
},
OP.regval_type => blk: {
const register = try leb.readUleb128(u8, reader);
const type_offset = try leb.readUleb128(addr_type, reader);
const register = try reader.takeLeb128(u8);
const type_offset = try reader.takeLeb128(addr_type);
break :blk .{ .register_type = .{
.register = register,
.type_offset = type_offset,
@ -240,33 +239,27 @@ pub fn StackMachine(comptime options: Options) type {
},
OP.piece => .{
.composite_location = .{
.size = try leb.readUleb128(u8, reader),
.size = try reader.takeLeb128(u8),
.offset = 0,
},
},
OP.bit_piece => blk: {
const size = try leb.readUleb128(u8, reader);
const offset = try leb.readIleb128(i64, reader);
const size = try reader.takeLeb128(u8);
const offset = try reader.takeLeb128(i64);
break :blk .{ .composite_location = .{
.size = size,
.offset = offset,
} };
},
OP.implicit_value, OP.entry_value => blk: {
const size = try leb.readUleb128(u8, reader);
if (stream.pos + size > stream.buffer.len) return error.InvalidExpression;
const block = stream.buffer[stream.pos..][0..size];
stream.pos += size;
break :blk .{
.block = block,
};
const size = try reader.takeLeb128(u8);
const block = try reader.take(size);
break :blk .{ .block = block };
},
OP.const_type => blk: {
const type_offset = try leb.readUleb128(addr_type, reader);
const size = try reader.readByte();
if (stream.pos + size > stream.buffer.len) return error.InvalidExpression;
const value_bytes = stream.buffer[stream.pos..][0..size];
stream.pos += size;
const type_offset = try reader.takeLeb128(addr_type);
const size = try reader.takeByte();
const value_bytes = try reader.take(size);
break :blk .{ .const_type = .{
.type_offset = type_offset,
.value_bytes = value_bytes,
@ -276,8 +269,8 @@ pub fn StackMachine(comptime options: Options) type {
OP.xderef_type,
=> .{
.deref_type = .{
.size = try reader.readByte(),
.type_offset = try leb.readUleb128(addr_type, reader),
.size = try reader.takeByte(),
.type_offset = try reader.takeLeb128(addr_type),
},
},
OP.lo_user...OP.hi_user => return error.UnimplementedUserOpcode,
@ -293,7 +286,7 @@ pub fn StackMachine(comptime options: Options) type {
initial_value: ?usize,
) Error!?Value {
if (initial_value) |i| try self.stack.append(allocator, .{ .generic = i });
var stream = std.io.fixedBufferStream(expression);
var stream: std.Io.Reader = .fixed(expression);
while (try self.step(&stream, allocator, context)) {}
if (self.stack.items.len == 0) return null;
return self.stack.items[self.stack.items.len - 1];
@ -302,14 +295,14 @@ pub fn StackMachine(comptime options: Options) type {
/// Reads an opcode and its operands from `stream`, then executes it
pub fn step(
self: *Self,
stream: *std.io.FixedBufferStream([]const u8),
stream: *std.Io.Reader,
allocator: std.mem.Allocator,
context: Context,
) Error!bool {
if (@sizeOf(usize) != @sizeOf(addr_type) or options.endian != native_endian)
@compileError("Execution of non-native address sizes / endianness is not supported");
const opcode = try stream.reader().readByte();
const opcode = try stream.takeByte();
if (options.call_frame_context and !isOpcodeValidInCFA(opcode)) return error.InvalidCFAOpcode;
const operand = try readOperand(stream, opcode, context);
switch (opcode) {
@ -663,11 +656,11 @@ pub fn StackMachine(comptime options: Options) type {
if (condition) {
const new_pos = std.math.cast(
usize,
try std.math.add(isize, @as(isize, @intCast(stream.pos)), branch_offset),
try std.math.add(isize, @as(isize, @intCast(stream.seek)), branch_offset),
) orelse return error.InvalidExpression;
if (new_pos < 0 or new_pos > stream.buffer.len) return error.InvalidExpression;
stream.pos = new_pos;
stream.seek = new_pos;
}
},
OP.call2,
@ -746,7 +739,7 @@ pub fn StackMachine(comptime options: Options) type {
if (isOpcodeRegisterLocation(block[0])) {
if (context.thread_context == null) return error.IncompleteExpressionContext;
var block_stream = std.io.fixedBufferStream(block);
var block_stream: std.Io.Reader = .fixed(block);
const register = (try readOperand(&block_stream, block[0], context)).?.register;
const value = mem.readInt(usize, (try abi.regBytes(context.thread_context.?, register, context.reg_context))[0..@sizeOf(usize)], native_endian);
try self.stack.append(allocator, .{ .generic = value });
@ -769,7 +762,7 @@ pub fn StackMachine(comptime options: Options) type {
},
}
return stream.pos < stream.buffer.len;
return stream.seek < stream.buffer.len;
}
};
}
@ -858,7 +851,7 @@ pub fn Builder(comptime options: Options) type {
},
.signed => {
try writer.writeByte(OP.consts);
try leb.writeIleb128(writer, value);
try writer.writeLeb128(value);
},
},
}
@ -892,19 +885,19 @@ pub fn Builder(comptime options: Options) type {
// 2.5.1.2: Register Values
pub fn writeFbreg(writer: *Writer, offset: anytype) !void {
try writer.writeByte(OP.fbreg);
try leb.writeIleb128(writer, offset);
try writer.writeSleb128(offset);
}
pub fn writeBreg(writer: *Writer, register: u8, offset: anytype) !void {
if (register > 31) return error.InvalidRegister;
try writer.writeByte(OP.breg0 + register);
try leb.writeIleb128(writer, offset);
try writer.writeSleb128(offset);
}
pub fn writeBregx(writer: *Writer, register: anytype, offset: anytype) !void {
try writer.writeByte(OP.bregx);
try writer.writeUleb128(register);
try leb.writeIleb128(writer, offset);
try writer.writeSleb128(offset);
}
pub fn writeRegvalType(writer: *Writer, register: anytype, offset: anytype) !void {

View file

@ -2017,15 +2017,12 @@ pub const VirtualMachine = struct {
var prev_row: Row = self.current_row;
var cie_stream = std.io.fixedBufferStream(cie.initial_instructions);
var fde_stream = std.io.fixedBufferStream(fde.instructions);
var streams = [_]*std.io.FixedBufferStream([]const u8){
&cie_stream,
&fde_stream,
};
var cie_stream: std.Io.Reader = .fixed(cie.initial_instructions);
var fde_stream: std.Io.Reader = .fixed(fde.instructions);
const streams = [_]*std.Io.Reader{ &cie_stream, &fde_stream };
for (&streams, 0..) |stream, i| {
while (stream.pos < stream.buffer.len) {
while (stream.seek < stream.buffer.len) {
const instruction = try std.debug.Dwarf.call_frame.Instruction.read(stream, addr_size_bytes, endian);
prev_row = try self.step(allocator, cie, i == 0, instruction);
if (pc < fde.pc_begin + self.current_row.offset) return prev_row;

View file

@ -609,7 +609,7 @@ pub const ProgramHeaderBufferIterator = struct {
}
};
fn takePhdr(reader: *std.io.Reader, elf_header: Header) !?Elf64_Phdr {
fn takePhdr(reader: *std.Io.Reader, elf_header: Header) !?Elf64_Phdr {
if (elf_header.is_64) {
const phdr = try reader.takeStruct(Elf64_Phdr, elf_header.endian);
return phdr;

View file

@ -3,7 +3,6 @@
const builtin = @import("builtin");
const std = @import("std.zig");
const io = std.io;
const math = std.math;
const assert = std.debug.assert;
const mem = std.mem;
@ -12,7 +11,7 @@ const lossyCast = math.lossyCast;
const expectFmt = std.testing.expectFmt;
const testing = std.testing;
const Allocator = std.mem.Allocator;
const Writer = std.io.Writer;
const Writer = std.Io.Writer;
pub const float = @import("fmt/float.zig");

View file

@ -1977,41 +1977,59 @@ pub fn readFile(self: Dir, file_path: []const u8, buffer: []u8) ![]u8 {
return buffer[0..end_index];
}
/// On success, caller owns returned buffer.
/// If the file is larger than `max_bytes`, returns `error.FileTooBig`.
/// On Windows, `file_path` should be encoded as [WTF-8](https://simonsapin.github.io/wtf-8/).
/// On WASI, `file_path` should be encoded as valid UTF-8.
/// On other platforms, `file_path` is an opaque sequence of bytes with no particular encoding.
pub fn readFileAlloc(self: Dir, allocator: mem.Allocator, file_path: []const u8, max_bytes: usize) ![]u8 {
return self.readFileAllocOptions(allocator, file_path, max_bytes, null, .of(u8), null);
pub const ReadFileAllocError = File.OpenError || File.ReadError || Allocator.Error || error{
/// File size reached or exceeded the provided limit.
StreamTooLong,
};
/// Reads all the bytes from the named file. On success, caller owns returned
/// buffer.
///
/// If the file size is already known, a better alternative is to initialize a
/// `File.Reader`.
///
/// If the file size cannot be obtained, an error is returned. If
/// this is a realistic possibility, a better alternative is to initialize a
/// `File.Reader` which handles this seamlessly.
pub fn readFileAlloc(
dir: Dir,
/// On Windows, should be encoded as [WTF-8](https://simonsapin.github.io/wtf-8/).
/// On WASI, should be encoded as valid UTF-8.
/// On other platforms, an opaque sequence of bytes with no particular encoding.
sub_path: []const u8,
/// Used to allocate the result.
gpa: Allocator,
/// If reached or exceeded, `error.StreamTooLong` is returned instead.
limit: std.Io.Limit,
) ReadFileAllocError![]u8 {
return readFileAllocOptions(dir, sub_path, gpa, limit, .of(u8), null);
}
/// On success, caller owns returned buffer.
/// If the file is larger than `max_bytes`, returns `error.FileTooBig`.
/// If `size_hint` is specified the initial buffer size is calculated using
/// that value, otherwise the effective file size is used instead.
/// Allows specifying alignment and a sentinel value.
/// On Windows, `file_path` should be encoded as [WTF-8](https://simonsapin.github.io/wtf-8/).
/// On WASI, `file_path` should be encoded as valid UTF-8.
/// On other platforms, `file_path` is an opaque sequence of bytes with no particular encoding.
/// Reads all the bytes from the named file. On success, caller owns returned
/// buffer.
///
/// If the file size is already known, a better alternative is to initialize a
/// `File.Reader`.
pub fn readFileAllocOptions(
self: Dir,
allocator: mem.Allocator,
file_path: []const u8,
max_bytes: usize,
size_hint: ?usize,
dir: Dir,
/// On Windows, should be encoded as [WTF-8](https://simonsapin.github.io/wtf-8/).
/// On WASI, should be encoded as valid UTF-8.
/// On other platforms, an opaque sequence of bytes with no particular encoding.
sub_path: []const u8,
/// Used to allocate the result.
gpa: Allocator,
/// If reached or exceeded, `error.StreamTooLong` is returned instead.
limit: std.Io.Limit,
comptime alignment: std.mem.Alignment,
comptime optional_sentinel: ?u8,
) !(if (optional_sentinel) |s| [:s]align(alignment.toByteUnits()) u8 else []align(alignment.toByteUnits()) u8) {
var file = try self.openFile(file_path, .{});
comptime sentinel: ?u8,
) ReadFileAllocError!(if (sentinel) |s| [:s]align(alignment.toByteUnits()) u8 else []align(alignment.toByteUnits()) u8) {
var file = try dir.openFile(sub_path, .{});
defer file.close();
// If the file size doesn't fit a usize it'll be certainly greater than
// `max_bytes`
const stat_size = size_hint orelse std.math.cast(usize, try file.getEndPos()) orelse
return error.FileTooBig;
return file.readToEndAllocOptions(allocator, max_bytes, stat_size, alignment, optional_sentinel);
var file_reader = file.reader(&.{});
return file_reader.interface.allocRemainingAlignedSentinel(gpa, limit, alignment, sentinel) catch |err| switch (err) {
error.ReadFailed => return file_reader.err.?,
error.OutOfMemory, error.StreamTooLong => |e| return e,
};
}
pub const DeleteTreeError = error{

View file

@ -7,7 +7,6 @@ const File = @This();
const std = @import("../std.zig");
const Allocator = std.mem.Allocator;
const posix = std.posix;
const io = std.io;
const math = std.math;
const assert = std.debug.assert;
const linux = std.os.linux;
@ -805,42 +804,6 @@ pub fn updateTimes(
try posix.futimens(self.handle, &times);
}
/// Deprecated in favor of `Reader`.
pub fn readToEndAlloc(self: File, allocator: Allocator, max_bytes: usize) ![]u8 {
return self.readToEndAllocOptions(allocator, max_bytes, null, .of(u8), null);
}
/// Deprecated in favor of `Reader`.
pub fn readToEndAllocOptions(
self: File,
allocator: Allocator,
max_bytes: usize,
size_hint: ?usize,
comptime alignment: Alignment,
comptime optional_sentinel: ?u8,
) !(if (optional_sentinel) |s| [:s]align(alignment.toByteUnits()) u8 else []align(alignment.toByteUnits()) u8) {
// If no size hint is provided fall back to the size=0 code path
const size = size_hint orelse 0;
// The file size returned by stat is used as hint to set the buffer
// size. If the reported size is zero, as it happens on Linux for files
// in /proc, a small buffer is allocated instead.
const initial_cap = @min((if (size > 0) size else 1024), max_bytes) + @intFromBool(optional_sentinel != null);
var array_list = try std.array_list.AlignedManaged(u8, alignment).initCapacity(allocator, initial_cap);
defer array_list.deinit();
self.deprecatedReader().readAllArrayListAligned(alignment, &array_list, max_bytes) catch |err| switch (err) {
error.StreamTooLong => return error.FileTooBig,
else => |e| return e,
};
if (optional_sentinel) |sentinel| {
return try array_list.toOwnedSliceSentinel(sentinel);
} else {
return try array_list.toOwnedSlice();
}
}
pub const ReadError = posix.ReadError;
pub const PReadError = posix.PReadError;
@ -1089,14 +1052,6 @@ pub fn copyRangeAll(in: File, in_offset: u64, out: File, out_offset: u64, len: u
return total_bytes_copied;
}
/// Deprecated in favor of `Reader`.
pub const DeprecatedReader = io.GenericReader(File, ReadError, read);
/// Deprecated in favor of `Reader`.
pub fn deprecatedReader(file: File) DeprecatedReader {
return .{ .context = file };
}
/// Memoizes key information about a file handle such as:
/// * The size from calling stat, or the error that occurred therein.
/// * The current seek position.

View file

@ -150,7 +150,7 @@ pub fn fmtJoin(paths: []const []const u8) std.fmt.Formatter([]const []const u8,
return .{ .data = paths };
}
fn formatJoin(paths: []const []const u8, w: *std.io.Writer) std.io.Writer.Error!void {
fn formatJoin(paths: []const []const u8, w: *std.Io.Writer) std.Io.Writer.Error!void {
const first_path_idx = for (paths, 0..) |p, idx| {
if (p.len != 0) break idx;
} else return;

View file

@ -676,37 +676,47 @@ test "Dir.realpath smoke test" {
}.impl);
}
test "readAllAlloc" {
test "readFileAlloc" {
var tmp_dir = tmpDir(.{});
defer tmp_dir.cleanup();
var file = try tmp_dir.dir.createFile("test_file", .{ .read = true });
defer file.close();
const buf1 = try file.readToEndAlloc(testing.allocator, 1024);
const buf1 = try tmp_dir.dir.readFileAlloc("test_file", testing.allocator, .limited(1024));
defer testing.allocator.free(buf1);
try testing.expectEqual(@as(usize, 0), buf1.len);
try testing.expectEqualStrings("", buf1);
const write_buf: []const u8 = "this is a test.\nthis is a test.\nthis is a test.\nthis is a test.\n";
try file.writeAll(write_buf);
try file.seekTo(0);
{
// max_bytes > file_size
const buf2 = try file.readToEndAlloc(testing.allocator, 1024);
const buf2 = try tmp_dir.dir.readFileAlloc("test_file", testing.allocator, .limited(1024));
defer testing.allocator.free(buf2);
try testing.expectEqual(write_buf.len, buf2.len);
try testing.expectEqualStrings(write_buf, buf2);
try file.seekTo(0);
}
{
// max_bytes == file_size
const buf3 = try file.readToEndAlloc(testing.allocator, write_buf.len);
defer testing.allocator.free(buf3);
try testing.expectEqual(write_buf.len, buf3.len);
try testing.expectEqualStrings(write_buf, buf3);
try file.seekTo(0);
try testing.expectError(
error.StreamTooLong,
tmp_dir.dir.readFileAlloc("test_file", testing.allocator, .limited(write_buf.len)),
);
}
{
// max_bytes == file_size + 1
const buf2 = try tmp_dir.dir.readFileAlloc("test_file", testing.allocator, .limited(write_buf.len + 1));
defer testing.allocator.free(buf2);
try testing.expectEqualStrings(write_buf, buf2);
}
// max_bytes < file_size
try testing.expectError(error.FileTooBig, file.readToEndAlloc(testing.allocator, write_buf.len - 1));
try testing.expectError(
error.StreamTooLong,
tmp_dir.dir.readFileAlloc("test_file", testing.allocator, .limited(write_buf.len - 1)),
);
}
test "Dir.statFile" {
@ -778,16 +788,16 @@ test "file operations on directories" {
switch (native_os) {
.dragonfly, .netbsd => {
// no error when reading a directory. See https://github.com/ziglang/zig/issues/5732
const buf = try ctx.dir.readFileAlloc(testing.allocator, test_dir_name, std.math.maxInt(usize));
const buf = try ctx.dir.readFileAlloc(test_dir_name, testing.allocator, .unlimited);
testing.allocator.free(buf);
},
.wasi => {
// WASI return EBADF, which gets mapped to NotOpenForReading.
// See https://github.com/bytecodealliance/wasmtime/issues/1935
try testing.expectError(error.NotOpenForReading, ctx.dir.readFileAlloc(testing.allocator, test_dir_name, std.math.maxInt(usize)));
try testing.expectError(error.NotOpenForReading, ctx.dir.readFileAlloc(test_dir_name, testing.allocator, .unlimited));
},
else => {
try testing.expectError(error.IsDir, ctx.dir.readFileAlloc(testing.allocator, test_dir_name, std.math.maxInt(usize)));
try testing.expectError(error.IsDir, ctx.dir.readFileAlloc(test_dir_name, testing.allocator, .unlimited));
},
}
@ -1564,7 +1574,7 @@ test "copyFile" {
}
fn expectFileContents(dir: Dir, file_path: []const u8, data: []const u8) !void {
const contents = try dir.readFileAlloc(testing.allocator, file_path, 1000);
const contents = try dir.readFileAlloc(file_path, testing.allocator, .limited(1000));
defer testing.allocator.free(contents);
try testing.expectEqualSlices(u8, data, contents);
@ -1587,7 +1597,7 @@ test "AtomicFile" {
try af.file_writer.interface.writeAll(test_content);
try af.finish();
}
const content = try ctx.dir.readFileAlloc(allocator, test_out_file, 9999);
const content = try ctx.dir.readFileAlloc(test_out_file, allocator, .limited(9999));
try testing.expectEqualStrings(test_content, content);
try ctx.dir.deleteFile(test_out_file);
@ -2004,7 +2014,7 @@ test "invalid UTF-8/WTF-8 paths" {
}
try testing.expectError(expected_err, ctx.dir.readFile(invalid_path, &[_]u8{}));
try testing.expectError(expected_err, ctx.dir.readFileAlloc(testing.allocator, invalid_path, 0));
try testing.expectError(expected_err, ctx.dir.readFileAlloc(invalid_path, testing.allocator, .limited(0)));
try testing.expectError(expected_err, ctx.dir.deleteTree(invalid_path));
try testing.expectError(expected_err, ctx.dir.deleteTreeMinStackSize(invalid_path));

View file

@ -1,7 +1,7 @@
//! JSON parsing and stringification conforming to RFC 8259. https://datatracker.ietf.org/doc/html/rfc8259
//!
//! The low-level `Scanner` API produces `Token`s from an input slice or successive slices of inputs,
//! The `Reader` API connects a `std.io.GenericReader` to a `Scanner`.
//! The `Reader` API connects a `std.Io.GenericReader` to a `Scanner`.
//!
//! The high-level `parseFromSlice` and `parseFromTokenSource` deserialize a JSON document into a Zig type.
//! Parse into a dynamically-typed `Value` to load any JSON value for runtime inspection.
@ -42,7 +42,7 @@ test Value {
}
test Stringify {
var out: std.io.Writer.Allocating = .init(testing.allocator);
var out: std.Io.Writer.Allocating = .init(testing.allocator);
var write_stream: Stringify = .{
.writer = &out.writer,
.options = .{ .whitespace = .indent_2 },

View file

@ -23,7 +23,7 @@ const Allocator = std.mem.Allocator;
const ArrayList = std.ArrayList;
const BitStack = std.BitStack;
const Stringify = @This();
const Writer = std.io.Writer;
const Writer = std.Io.Writer;
const IndentationMode = enum(u1) {
object = 0,
@ -576,7 +576,7 @@ pub fn value(v: anytype, options: Options, writer: *Writer) Error!void {
}
test value {
var out: std.io.Writer.Allocating = .init(std.testing.allocator);
var out: Writer.Allocating = .init(std.testing.allocator);
const writer = &out.writer;
defer out.deinit();
@ -616,7 +616,7 @@ test value {
///
/// Caller owns returned memory.
pub fn valueAlloc(gpa: Allocator, v: anytype, options: Options) error{OutOfMemory}![]u8 {
var aw: std.io.Writer.Allocating = .init(gpa);
var aw: Writer.Allocating = .init(gpa);
defer aw.deinit();
value(v, options, &aw.writer) catch return error.OutOfMemory;
return aw.toOwnedSlice();

View file

@ -4,7 +4,7 @@ const mem = std.mem;
const testing = std.testing;
const ArenaAllocator = std.heap.ArenaAllocator;
const Allocator = std.mem.Allocator;
const Writer = std.io.Writer;
const Writer = std.Io.Writer;
const ObjectMap = @import("dynamic.zig").ObjectMap;
const Array = @import("dynamic.zig").Array;

View file

@ -2,120 +2,6 @@ const builtin = @import("builtin");
const std = @import("std");
const testing = std.testing;
/// Read a single unsigned LEB128 value from the given reader as type T,
/// or error.Overflow if the value cannot fit.
pub fn readUleb128(comptime T: type, reader: anytype) !T {
const U = if (@typeInfo(T).int.bits < 8) u8 else T;
const ShiftT = std.math.Log2Int(U);
const max_group = (@typeInfo(U).int.bits + 6) / 7;
var value: U = 0;
var group: ShiftT = 0;
while (group < max_group) : (group += 1) {
const byte = try reader.readByte();
const ov = @shlWithOverflow(@as(U, byte & 0x7f), group * 7);
if (ov[1] != 0) return error.Overflow;
value |= ov[0];
if (byte & 0x80 == 0) break;
} else {
return error.Overflow;
}
// only applies in the case that we extended to u8
if (U != T) {
if (value > std.math.maxInt(T)) return error.Overflow;
}
return @as(T, @truncate(value));
}
/// Read a single signed LEB128 value from the given reader as type T,
/// or error.Overflow if the value cannot fit.
pub fn readIleb128(comptime T: type, reader: anytype) !T {
const S = if (@typeInfo(T).int.bits < 8) i8 else T;
const U = std.meta.Int(.unsigned, @typeInfo(S).int.bits);
const ShiftU = std.math.Log2Int(U);
const max_group = (@typeInfo(U).int.bits + 6) / 7;
var value = @as(U, 0);
var group = @as(ShiftU, 0);
while (group < max_group) : (group += 1) {
const byte = try reader.readByte();
const shift = group * 7;
const ov = @shlWithOverflow(@as(U, byte & 0x7f), shift);
if (ov[1] != 0) {
// Overflow is ok so long as the sign bit is set and this is the last byte
if (byte & 0x80 != 0) return error.Overflow;
if (@as(S, @bitCast(ov[0])) >= 0) return error.Overflow;
// and all the overflowed bits are 1
const remaining_shift = @as(u3, @intCast(@typeInfo(U).int.bits - @as(u16, shift)));
const remaining_bits = @as(i8, @bitCast(byte | 0x80)) >> remaining_shift;
if (remaining_bits != -1) return error.Overflow;
} else {
// If we don't overflow and this is the last byte and the number being decoded
// is negative, check that the remaining bits are 1
if ((byte & 0x80 == 0) and (@as(S, @bitCast(ov[0])) < 0)) {
const remaining_shift = @as(u3, @intCast(@typeInfo(U).int.bits - @as(u16, shift)));
const remaining_bits = @as(i8, @bitCast(byte | 0x80)) >> remaining_shift;
if (remaining_bits != -1) return error.Overflow;
}
}
value |= ov[0];
if (byte & 0x80 == 0) {
const needs_sign_ext = group + 1 < max_group;
if (byte & 0x40 != 0 and needs_sign_ext) {
const ones = @as(S, -1);
value |= @as(U, @bitCast(ones)) << (shift + 7);
}
break;
}
} else {
return error.Overflow;
}
const result = @as(S, @bitCast(value));
// Only applies if we extended to i8
if (S != T) {
if (result > std.math.maxInt(T) or result < std.math.minInt(T)) return error.Overflow;
}
return @as(T, @truncate(result));
}
/// Write a single signed integer as signed LEB128 to the given writer.
pub fn writeIleb128(writer: anytype, arg: anytype) !void {
const Arg = @TypeOf(arg);
const Int = switch (Arg) {
comptime_int => std.math.IntFittingRange(-@abs(arg), @abs(arg)),
else => Arg,
};
const Signed = if (@typeInfo(Int).int.bits < 8) i8 else Int;
const Unsigned = std.meta.Int(.unsigned, @typeInfo(Signed).int.bits);
var value: Signed = arg;
while (true) {
const unsigned: Unsigned = @bitCast(value);
const byte: u8 = @truncate(unsigned);
value >>= 6;
if (value == -1 or value == 0) {
try writer.writeByte(byte & 0x7F);
break;
} else {
value >>= 1;
try writer.writeByte(byte | 0x80);
}
}
}
/// This is an "advanced" function. It allows one to use a fixed amount of memory to store a
/// ULEB128. This defeats the entire purpose of using this data encoding; it will no longer use
/// fewer bytes to store smaller numbers. The advantage of using a fixed width is that it makes
@ -149,22 +35,26 @@ test writeUnsignedFixed {
{
var buf: [4]u8 = undefined;
writeUnsignedFixed(4, &buf, 0);
try testing.expect((try test_read_uleb128(u64, &buf)) == 0);
var reader: std.Io.Reader = .fixed(&buf);
try testing.expectEqual(0, try reader.takeLeb128(u64));
}
{
var buf: [4]u8 = undefined;
writeUnsignedFixed(4, &buf, 1);
try testing.expect((try test_read_uleb128(u64, &buf)) == 1);
var reader: std.Io.Reader = .fixed(&buf);
try testing.expectEqual(1, try reader.takeLeb128(u64));
}
{
var buf: [4]u8 = undefined;
writeUnsignedFixed(4, &buf, 1000);
try testing.expect((try test_read_uleb128(u64, &buf)) == 1000);
var reader: std.Io.Reader = .fixed(&buf);
try testing.expectEqual(1000, try reader.takeLeb128(u64));
}
{
var buf: [4]u8 = undefined;
writeUnsignedFixed(4, &buf, 10000000);
try testing.expect((try test_read_uleb128(u64, &buf)) == 10000000);
var reader: std.Io.Reader = .fixed(&buf);
try testing.expectEqual(10000000, try reader.takeLeb128(u64));
}
}
@ -193,162 +83,43 @@ test writeSignedFixed {
{
var buf: [4]u8 = undefined;
writeSignedFixed(4, &buf, 0);
try testing.expect((try test_read_ileb128(i64, &buf)) == 0);
var reader: std.Io.Reader = .fixed(&buf);
try testing.expectEqual(0, try reader.takeLeb128(i64));
}
{
var buf: [4]u8 = undefined;
writeSignedFixed(4, &buf, 1);
try testing.expect((try test_read_ileb128(i64, &buf)) == 1);
var reader: std.Io.Reader = .fixed(&buf);
try testing.expectEqual(1, try reader.takeLeb128(i64));
}
{
var buf: [4]u8 = undefined;
writeSignedFixed(4, &buf, -1);
try testing.expect((try test_read_ileb128(i64, &buf)) == -1);
var reader: std.Io.Reader = .fixed(&buf);
try testing.expectEqual(-1, try reader.takeLeb128(i64));
}
{
var buf: [4]u8 = undefined;
writeSignedFixed(4, &buf, 1000);
try testing.expect((try test_read_ileb128(i64, &buf)) == 1000);
var reader: std.Io.Reader = .fixed(&buf);
try testing.expectEqual(1000, try reader.takeLeb128(i64));
}
{
var buf: [4]u8 = undefined;
writeSignedFixed(4, &buf, -1000);
try testing.expect((try test_read_ileb128(i64, &buf)) == -1000);
var reader: std.Io.Reader = .fixed(&buf);
try testing.expectEqual(-1000, try reader.takeLeb128(i64));
}
{
var buf: [4]u8 = undefined;
writeSignedFixed(4, &buf, -10000000);
try testing.expect((try test_read_ileb128(i64, &buf)) == -10000000);
var reader: std.Io.Reader = .fixed(&buf);
try testing.expectEqual(-10000000, try reader.takeLeb128(i64));
}
{
var buf: [4]u8 = undefined;
writeSignedFixed(4, &buf, 10000000);
try testing.expect((try test_read_ileb128(i64, &buf)) == 10000000);
var reader: std.Io.Reader = .fixed(&buf);
try testing.expectEqual(10000000, try reader.takeLeb128(i64));
}
}
// tests
fn test_read_stream_ileb128(comptime T: type, encoded: []const u8) !T {
var reader = std.io.fixedBufferStream(encoded);
return try readIleb128(T, reader.reader());
}
fn test_read_stream_uleb128(comptime T: type, encoded: []const u8) !T {
var reader = std.io.fixedBufferStream(encoded);
return try readUleb128(T, reader.reader());
}
fn test_read_ileb128(comptime T: type, encoded: []const u8) !T {
var reader = std.io.fixedBufferStream(encoded);
const v1 = try readIleb128(T, reader.reader());
return v1;
}
fn test_read_uleb128(comptime T: type, encoded: []const u8) !T {
var reader = std.io.fixedBufferStream(encoded);
const v1 = try readUleb128(T, reader.reader());
return v1;
}
fn test_read_ileb128_seq(comptime T: type, comptime N: usize, encoded: []const u8) !void {
var reader = std.io.fixedBufferStream(encoded);
var i: usize = 0;
while (i < N) : (i += 1) {
_ = try readIleb128(T, reader.reader());
}
}
fn test_read_uleb128_seq(comptime T: type, comptime N: usize, encoded: []const u8) !void {
var reader = std.io.fixedBufferStream(encoded);
var i: usize = 0;
while (i < N) : (i += 1) {
_ = try readUleb128(T, reader.reader());
}
}
test "deserialize signed LEB128" {
// Truncated
try testing.expectError(error.EndOfStream, test_read_stream_ileb128(i64, "\x80"));
// Overflow
try testing.expectError(error.Overflow, test_read_ileb128(i8, "\x80\x80\x40"));
try testing.expectError(error.Overflow, test_read_ileb128(i16, "\x80\x80\x80\x40"));
try testing.expectError(error.Overflow, test_read_ileb128(i32, "\x80\x80\x80\x80\x40"));
try testing.expectError(error.Overflow, test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x40"));
try testing.expectError(error.Overflow, test_read_ileb128(i8, "\xff\x7e"));
try testing.expectError(error.Overflow, test_read_ileb128(i32, "\x80\x80\x80\x80\x08"));
try testing.expectError(error.Overflow, test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x01"));
// Decode SLEB128
try testing.expect((try test_read_ileb128(i64, "\x00")) == 0);
try testing.expect((try test_read_ileb128(i64, "\x01")) == 1);
try testing.expect((try test_read_ileb128(i64, "\x3f")) == 63);
try testing.expect((try test_read_ileb128(i64, "\x40")) == -64);
try testing.expect((try test_read_ileb128(i64, "\x41")) == -63);
try testing.expect((try test_read_ileb128(i64, "\x7f")) == -1);
try testing.expect((try test_read_ileb128(i64, "\x80\x01")) == 128);
try testing.expect((try test_read_ileb128(i64, "\x81\x01")) == 129);
try testing.expect((try test_read_ileb128(i64, "\xff\x7e")) == -129);
try testing.expect((try test_read_ileb128(i64, "\x80\x7f")) == -128);
try testing.expect((try test_read_ileb128(i64, "\x81\x7f")) == -127);
try testing.expect((try test_read_ileb128(i64, "\xc0\x00")) == 64);
try testing.expect((try test_read_ileb128(i64, "\xc7\x9f\x7f")) == -12345);
try testing.expect((try test_read_ileb128(i8, "\xff\x7f")) == -1);
try testing.expect((try test_read_ileb128(i16, "\xff\xff\x7f")) == -1);
try testing.expect((try test_read_ileb128(i32, "\xff\xff\xff\xff\x7f")) == -1);
try testing.expect((try test_read_ileb128(i32, "\x80\x80\x80\x80\x78")) == -0x80000000);
try testing.expect((try test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x7f")) == @as(i64, @bitCast(@as(u64, @intCast(0x8000000000000000)))));
try testing.expect((try test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x40")) == -0x4000000000000000);
try testing.expect((try test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x7f")) == -0x8000000000000000);
// Decode unnormalized SLEB128 with extra padding bytes.
try testing.expect((try test_read_ileb128(i64, "\x80\x00")) == 0);
try testing.expect((try test_read_ileb128(i64, "\x80\x80\x00")) == 0);
try testing.expect((try test_read_ileb128(i64, "\xff\x00")) == 0x7f);
try testing.expect((try test_read_ileb128(i64, "\xff\x80\x00")) == 0x7f);
try testing.expect((try test_read_ileb128(i64, "\x80\x81\x00")) == 0x80);
try testing.expect((try test_read_ileb128(i64, "\x80\x81\x80\x00")) == 0x80);
// Decode sequence of SLEB128 values
try test_read_ileb128_seq(i64, 4, "\x81\x01\x3f\x80\x7f\x80\x80\x80\x00");
}
test "deserialize unsigned LEB128" {
// Truncated
try testing.expectError(error.EndOfStream, test_read_stream_uleb128(u64, "\x80"));
// Overflow
try testing.expectError(error.Overflow, test_read_uleb128(u8, "\x80\x02"));
try testing.expectError(error.Overflow, test_read_uleb128(u8, "\x80\x80\x40"));
try testing.expectError(error.Overflow, test_read_uleb128(u16, "\x80\x80\x84"));
try testing.expectError(error.Overflow, test_read_uleb128(u16, "\x80\x80\x80\x40"));
try testing.expectError(error.Overflow, test_read_uleb128(u32, "\x80\x80\x80\x80\x90"));
try testing.expectError(error.Overflow, test_read_uleb128(u32, "\x80\x80\x80\x80\x40"));
try testing.expectError(error.Overflow, test_read_uleb128(u64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x40"));
// Decode ULEB128
try testing.expect((try test_read_uleb128(u64, "\x00")) == 0);
try testing.expect((try test_read_uleb128(u64, "\x01")) == 1);
try testing.expect((try test_read_uleb128(u64, "\x3f")) == 63);
try testing.expect((try test_read_uleb128(u64, "\x40")) == 64);
try testing.expect((try test_read_uleb128(u64, "\x7f")) == 0x7f);
try testing.expect((try test_read_uleb128(u64, "\x80\x01")) == 0x80);
try testing.expect((try test_read_uleb128(u64, "\x81\x01")) == 0x81);
try testing.expect((try test_read_uleb128(u64, "\x90\x01")) == 0x90);
try testing.expect((try test_read_uleb128(u64, "\xff\x01")) == 0xff);
try testing.expect((try test_read_uleb128(u64, "\x80\x02")) == 0x100);
try testing.expect((try test_read_uleb128(u64, "\x81\x02")) == 0x101);
try testing.expect((try test_read_uleb128(u64, "\x80\xc1\x80\x80\x10")) == 4294975616);
try testing.expect((try test_read_uleb128(u64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x01")) == 0x8000000000000000);
// Decode ULEB128 with extra padding bytes
try testing.expect((try test_read_uleb128(u64, "\x80\x00")) == 0);
try testing.expect((try test_read_uleb128(u64, "\x80\x80\x00")) == 0);
try testing.expect((try test_read_uleb128(u64, "\xff\x00")) == 0x7f);
try testing.expect((try test_read_uleb128(u64, "\xff\x80\x00")) == 0x7f);
try testing.expect((try test_read_uleb128(u64, "\x80\x81\x00")) == 0x80);
try testing.expect((try test_read_uleb128(u64, "\x80\x81\x80\x00")) == 0x80);
// Decode sequence of ULEB128 values
try test_read_uleb128_seq(u64, 4, "\x81\x01\x3f\x80\x7f\x80\x80\x80\x00");
}

View file

@ -1,7 +1,6 @@
const std = @import("std");
const builtin = @import("builtin");
const assert = std.debug.assert;
const io = std.io;
const mem = std.mem;
const meta = std.meta;
const testing = std.testing;

View file

@ -2029,11 +2029,11 @@ pub const Mutable = struct {
r.len = llnormalize(r.limbs[0..length]);
}
pub fn format(self: Mutable, w: *std.io.Writer) std.io.Writer.Error!void {
pub fn format(self: Mutable, w: *std.Io.Writer) std.Io.Writer.Error!void {
return formatNumber(self, w, .{});
}
pub fn formatNumber(self: Const, w: *std.io.Writer, n: std.fmt.Number) std.io.Writer.Error!void {
pub fn formatNumber(self: Const, w: *std.Io.Writer, n: std.fmt.Number) std.Io.Writer.Error!void {
return self.toConst().formatNumber(w, n);
}
};
@ -2326,7 +2326,7 @@ pub const Const = struct {
/// this function will fail to print the string, printing "(BigInt)" instead of a number.
/// This is because the rendering algorithm requires reversing a string, which requires O(N) memory.
/// See `toString` and `toStringAlloc` for a way to print big integers without failure.
pub fn formatNumber(self: Const, w: *std.io.Writer, number: std.fmt.Number) std.io.Writer.Error!void {
pub fn formatNumber(self: Const, w: *std.Io.Writer, number: std.fmt.Number) std.Io.Writer.Error!void {
const available_len = 64;
if (self.limbs.len > available_len)
return w.writeAll("(BigInt)");
@ -2907,7 +2907,7 @@ pub const Managed = struct {
}
/// To allow `std.fmt.format` to work with `Managed`.
pub fn format(self: Managed, w: *std.io.Writer) std.io.Writer.Error!void {
pub fn format(self: Managed, w: *std.Io.Writer) std.Io.Writer.Error!void {
return formatNumber(self, w, .{});
}
@ -2915,7 +2915,7 @@ pub const Managed = struct {
/// this function will fail to print the string, printing "(BigInt)" instead of a number.
/// This is because the rendering algorithm requires reversing a string, which requires O(N) memory.
/// See `toString` and `toStringAlloc` for a way to print big integers without failure.
pub fn formatNumber(self: Managed, w: *std.io.Writer, n: std.fmt.Number) std.io.Writer.Error!void {
pub fn formatNumber(self: Managed, w: *std.Io.Writer, n: std.fmt.Number) std.Io.Writer.Error!void {
return self.toConst().formatNumber(w, n);
}

View file

@ -106,7 +106,7 @@ pub const Guid = extern struct {
node: [6]u8,
/// Format GUID into hexadecimal lowercase xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format
pub fn format(self: Guid, writer: *std.io.Writer) std.io.Writer.Error!void {
pub fn format(self: Guid, writer: *std.Io.Writer) std.Io.Writer.Error!void {
const time_low = @byteSwap(self.time_low);
const time_mid = @byteSwap(self.time_mid);
const time_high_and_version = @byteSwap(self.time_high_and_version);

View file

@ -1,6 +1,5 @@
const std = @import("std");
const uefi = std.os.uefi;
const io = std.io;
const Guid = uefi.Guid;
const Time = uefi.Time;
const Status = uefi.Status;

View file

@ -90,7 +90,7 @@ pub const MemoryType = enum(u32) {
return @truncate(as_int - vendor_start);
}
pub fn format(self: MemoryType, w: *std.io.Writer) std.io.Writer.Error!void {
pub fn format(self: MemoryType, w: *std.Io.Writer) std.Io.Writer.Error!void {
if (self.toOem()) |oemval|
try w.print("OEM({X})", .{oemval})
else if (self.toVendor()) |vendorval|

View file

@ -8,7 +8,6 @@
//! documentation and/or contributors.
const std = @import("std.zig");
const io = std.io;
const math = std.math;
const mem = std.mem;
const coff = std.coff;

View file

@ -671,8 +671,8 @@ fn getRandomBytesDevURandom(buf: []u8) !void {
}
const file: fs.File = .{ .handle = fd };
const stream = file.deprecatedReader();
stream.readNoEof(buf) catch return error.Unexpected;
var file_reader = file.readerStreaming(&.{});
file_reader.readSliceAll(buf) catch return error.Unexpected;
}
/// Causes abnormal process termination.

View file

@ -4,7 +4,6 @@ const testing = std.testing;
const expect = testing.expect;
const expectEqual = testing.expectEqual;
const expectError = testing.expectError;
const io = std.io;
const fs = std.fs;
const mem = std.mem;
const elf = std.elf;
@ -706,12 +705,11 @@ test "mmap" {
);
defer posix.munmap(data);
var mem_stream = io.fixedBufferStream(data);
const stream = mem_stream.reader();
var stream: std.Io.Reader = .fixed(data);
var i: usize = 0;
while (i < alloc_size / @sizeOf(u32)) : (i += 1) {
try testing.expectEqual(i, try stream.readInt(u32, .little));
try testing.expectEqual(i, try stream.takeInt(u32, .little));
}
}
@ -730,12 +728,11 @@ test "mmap" {
);
defer posix.munmap(data);
var mem_stream = io.fixedBufferStream(data);
const stream = mem_stream.reader();
var stream: std.Io.Reader = .fixed(data);
var i: usize = alloc_size / 2 / @sizeOf(u32);
while (i < alloc_size / @sizeOf(u32)) : (i += 1) {
try testing.expectEqual(i, try stream.readInt(u32, .little));
try testing.expectEqual(i, try stream.takeInt(u32, .little));
}
}
}

View file

@ -1552,59 +1552,66 @@ pub fn getUserInfo(name: []const u8) !UserInfo {
pub fn posixGetUserInfo(name: []const u8) !UserInfo {
const file = try std.fs.openFileAbsolute("/etc/passwd", .{});
defer file.close();
var buffer: [4096]u8 = undefined;
var file_reader = file.reader(&buffer);
return posixGetUserInfoPasswdStream(name, &file_reader.interface) catch |err| switch (err) {
error.ReadFailed => return file_reader.err.?,
error.EndOfStream => return error.UserNotFound,
error.CorruptPasswordFile => return error.CorruptPasswordFile,
};
}
const reader = file.deprecatedReader();
fn posixGetUserInfoPasswdStream(name: []const u8, reader: *std.Io.Reader) !UserInfo {
const State = enum {
Start,
WaitForNextLine,
SkipPassword,
ReadUserId,
ReadGroupId,
start,
wait_for_next_line,
skip_password,
read_user_id,
read_group_id,
};
var buf: [std.heap.page_size_min]u8 = undefined;
var name_index: usize = 0;
var state = State.Start;
var uid: posix.uid_t = 0;
var gid: posix.gid_t = 0;
while (true) {
const amt_read = try reader.read(buf[0..]);
for (buf[0..amt_read]) |byte| {
switch (state) {
.Start => switch (byte) {
sw: switch (State.start) {
.start => switch (try reader.takeByte()) {
':' => {
state = if (name_index == name.len) State.SkipPassword else State.WaitForNextLine;
if (name_index == name.len) {
continue :sw .skip_password;
} else {
continue :sw .wait_for_next_line;
}
},
'\n' => return error.CorruptPasswordFile,
else => {
else => |byte| {
if (name_index == name.len or name[name_index] != byte) {
state = .WaitForNextLine;
continue :sw .wait_for_next_line;
}
name_index += 1;
continue :sw .start;
},
},
.WaitForNextLine => switch (byte) {
.wait_for_next_line => switch (try reader.takeByte()) {
'\n' => {
name_index = 0;
state = .Start;
continue :sw .start;
},
else => continue,
else => continue :sw .wait_for_next_line,
},
.SkipPassword => switch (byte) {
.skip_password => switch (try reader.takeByte()) {
'\n' => return error.CorruptPasswordFile,
':' => {
state = .ReadUserId;
continue :sw .read_user_id;
},
else => continue,
else => continue :sw .skip_password,
},
.ReadUserId => switch (byte) {
.read_user_id => switch (try reader.takeByte()) {
':' => {
state = .ReadGroupId;
continue :sw .read_group_id;
},
'\n' => return error.CorruptPasswordFile,
else => {
else => |byte| {
const digit = switch (byte) {
'0'...'9' => byte - '0',
else => return error.CorruptPasswordFile,
@ -1619,16 +1626,15 @@ pub fn posixGetUserInfo(name: []const u8) !UserInfo {
if (ov[1] != 0) return error.CorruptPasswordFile;
uid = ov[0];
}
continue :sw .read_user_id;
},
},
.ReadGroupId => switch (byte) {
'\n', ':' => {
return UserInfo{
.read_group_id => switch (try reader.takeByte()) {
'\n', ':' => return .{
.uid = uid,
.gid = gid,
};
},
else => {
else => |byte| {
const digit = switch (byte) {
'0'...'9' => byte - '0',
else => return error.CorruptPasswordFile,
@ -1643,12 +1649,11 @@ pub fn posixGetUserInfo(name: []const u8) !UserInfo {
if (ov[1] != 0) return error.CorruptPasswordFile;
gid = ov[0];
}
continue :sw .read_group_id;
},
},
}
}
if (amt_read < buf.len) return error.UserNotFound;
}
comptime unreachable;
}
pub fn getBaseAddress() usize {

View file

@ -78,8 +78,6 @@ pub const hash = @import("hash.zig");
pub const hash_map = @import("hash_map.zig");
pub const heap = @import("heap.zig");
pub const http = @import("http.zig");
/// Deprecated
pub const io = Io;
pub const json = @import("json.zig");
pub const leb = @import("leb128.zig");
pub const log = @import("log.zig");

View file

@ -336,7 +336,7 @@ fn testCase(case: Case) !void {
var file_name_buffer: [std.fs.max_path_bytes]u8 = undefined;
var link_name_buffer: [std.fs.max_path_bytes]u8 = undefined;
var br: std.io.Reader = .fixed(case.data);
var br: std.Io.Reader = .fixed(case.data);
var it: tar.Iterator = .init(&br, .{
.file_name_buffer = &file_name_buffer,
.link_name_buffer = &link_name_buffer,
@ -387,7 +387,7 @@ fn testLongNameCase(case: Case) !void {
var min_file_name_buffer: [256]u8 = undefined;
var min_link_name_buffer: [100]u8 = undefined;
var br: std.io.Reader = .fixed(case.data);
var br: std.Io.Reader = .fixed(case.data);
var iter: tar.Iterator = .init(&br, .{
.file_name_buffer = &min_file_name_buffer,
.link_name_buffer = &min_link_name_buffer,
@ -407,7 +407,7 @@ test "insufficient buffer in Header name filed" {
var min_file_name_buffer: [9]u8 = undefined;
var min_link_name_buffer: [100]u8 = undefined;
var br: std.io.Reader = .fixed(gnu_case.data);
var br: std.Io.Reader = .fixed(gnu_case.data);
var iter: tar.Iterator = .init(&br, .{
.file_name_buffer = &min_file_name_buffer,
.link_name_buffer = &min_link_name_buffer,
@ -462,7 +462,7 @@ test "should not overwrite existing file" {
// This ensures that file is not overwritten.
//
const data = @embedFile("testdata/overwrite_file.tar");
var r: std.io.Reader = .fixed(data);
var r: std.Io.Reader = .fixed(data);
// Unpack with strip_components = 1 should fail
var root = std.testing.tmpDir(.{});
@ -490,7 +490,7 @@ test "case sensitivity" {
// 18089/alacritty/Darkermatrix.yml
//
const data = @embedFile("testdata/18089.tar");
var r: std.io.Reader = .fixed(data);
var r: std.Io.Reader = .fixed(data);
var root = std.testing.tmpDir(.{});
defer root.cleanup();

View file

@ -358,7 +358,7 @@ test expectApproxEqRel {
/// This function is intended to be used only in tests. When the two slices are not
/// equal, prints diagnostics to stderr to show exactly how they are not equal (with
/// the differences highlighted in red), then returns a test failure error.
/// The colorized output is optional and controlled by the return of `std.io.tty.detectConfig()`.
/// The colorized output is optional and controlled by the return of `std.Io.tty.detectConfig()`.
/// If your inputs are UTF-8 encoded strings, consider calling `expectEqualStrings` instead.
pub fn expectEqualSlices(comptime T: type, expected: []const T, actual: []const T) !void {
const diff_index: usize = diff_index: {
@ -381,7 +381,7 @@ fn failEqualSlices(
expected: []const T,
actual: []const T,
diff_index: usize,
w: *std.io.Writer,
w: *std.Io.Writer,
) !void {
try w.print("slices differ. first difference occurs at index {d} (0x{X})\n", .{ diff_index, diff_index });
@ -401,7 +401,7 @@ fn failEqualSlices(
const actual_window = actual[window_start..@min(actual.len, window_start + max_window_size)];
const actual_truncated = window_start + actual_window.len < actual.len;
const ttyconf = std.io.tty.detectConfig(.stderr());
const ttyconf = std.Io.tty.detectConfig(.stderr());
var differ = if (T == u8) BytesDiffer{
.expected = expected_window,
.actual = actual_window,
@ -467,11 +467,11 @@ fn SliceDiffer(comptime T: type) type {
start_index: usize,
expected: []const T,
actual: []const T,
ttyconf: std.io.tty.Config,
ttyconf: std.Io.tty.Config,
const Self = @This();
pub fn write(self: Self, writer: *std.io.Writer) !void {
pub fn write(self: Self, writer: *std.Io.Writer) !void {
for (self.expected, 0..) |value, i| {
const full_index = self.start_index + i;
const diff = if (i < self.actual.len) !std.meta.eql(self.actual[i], value) else true;
@ -490,9 +490,9 @@ fn SliceDiffer(comptime T: type) type {
const BytesDiffer = struct {
expected: []const u8,
actual: []const u8,
ttyconf: std.io.tty.Config,
ttyconf: std.Io.tty.Config,
pub fn write(self: BytesDiffer, writer: *std.io.Writer) !void {
pub fn write(self: BytesDiffer, writer: *std.Io.Writer) !void {
var expected_iterator = std.mem.window(u8, self.expected, 16, 16);
var row: usize = 0;
while (expected_iterator.next()) |chunk| {
@ -538,7 +538,7 @@ const BytesDiffer = struct {
}
}
fn writeDiff(self: BytesDiffer, writer: *std.io.Writer, comptime fmt: []const u8, args: anytype, diff: bool) !void {
fn writeDiff(self: BytesDiffer, writer: *std.Io.Writer, comptime fmt: []const u8, args: anytype, diff: bool) !void {
if (diff) try self.ttyconf.setColor(writer, .red);
try writer.print(fmt, args);
if (diff) try self.ttyconf.setColor(writer, .reset);

View file

@ -804,7 +804,7 @@ fn testDecode(bytes: []const u8) !u21 {
/// Ill-formed UTF-8 byte sequences are replaced by the replacement character (U+FFFD)
/// according to "U+FFFD Substitution of Maximal Subparts" from Chapter 3 of
/// the Unicode standard, and as specified by https://encoding.spec.whatwg.org/#utf-8-decoder
fn formatUtf8(utf8: []const u8, writer: *std.io.Writer) std.io.Writer.Error!void {
fn formatUtf8(utf8: []const u8, writer: *std.Io.Writer) std.Io.Writer.Error!void {
var buf: [300]u8 = undefined; // just an arbitrary size
var u8len: usize = 0;
@ -1464,7 +1464,7 @@ test calcWtf16LeLen {
/// Print the given `utf16le` string, encoded as UTF-8 bytes.
/// Unpaired surrogates are replaced by the replacement character (U+FFFD).
fn formatUtf16Le(utf16le: []const u16, writer: *std.io.Writer) std.io.Writer.Error!void {
fn formatUtf16Le(utf16le: []const u16, writer: *std.Io.Writer) std.Io.Writer.Error!void {
var buf: [300]u8 = undefined; // just an arbitrary size
var it = Utf16LeIterator.init(utf16le);
var u8len: usize = 0;

View file

@ -51,9 +51,9 @@ pub const Color = enum {
/// Assume stderr is a terminal.
on,
pub fn get_tty_conf(color: Color) std.io.tty.Config {
pub fn get_tty_conf(color: Color) std.Io.tty.Config {
return switch (color) {
.auto => std.io.tty.detectConfig(std.fs.File.stderr()),
.auto => std.Io.tty.detectConfig(std.fs.File.stderr()),
.on => .escape_codes,
.off => .no_color,
};
@ -322,7 +322,7 @@ pub const BuildId = union(enum) {
try std.testing.expectError(error.InvalidBuildIdStyle, parse("yaddaxxx"));
}
pub fn format(id: BuildId, writer: *std.io.Writer) std.io.Writer.Error!void {
pub fn format(id: BuildId, writer: *std.Io.Writer) std.Io.Writer.Error!void {
switch (id) {
.none, .fast, .uuid, .sha1, .md5 => {
try writer.writeAll(@tagName(id));

View file

@ -204,7 +204,7 @@ pub fn parse(gpa: Allocator, source: [:0]const u8, mode: Mode) Allocator.Error!A
/// `gpa` is used for allocating the resulting formatted source code.
/// Caller owns the returned slice of bytes, allocated with `gpa`.
pub fn renderAlloc(tree: Ast, gpa: Allocator) error{OutOfMemory}![]u8 {
var aw: std.io.Writer.Allocating = .init(gpa);
var aw: std.Io.Writer.Allocating = .init(gpa);
defer aw.deinit();
render(tree, gpa, &aw.writer, .{}) catch |err| switch (err) {
error.WriteFailed, error.OutOfMemory => return error.OutOfMemory,

View file

@ -6,7 +6,7 @@ const meta = std.meta;
const Ast = std.zig.Ast;
const Token = std.zig.Token;
const primitives = std.zig.primitives;
const Writer = std.io.Writer;
const Writer = std.Io.Writer;
const Render = @This();
@ -2169,7 +2169,7 @@ fn renderArrayInit(
const section_exprs = row_exprs[0..section_end];
var sub_expr_buffer: std.io.Writer.Allocating = .init(gpa);
var sub_expr_buffer: Writer.Allocating = .init(gpa);
defer sub_expr_buffer.deinit();
const sub_expr_buffer_starts = try gpa.alloc(usize, section_exprs.len + 1);

View file

@ -11339,7 +11339,7 @@ fn parseStrLit(
) InnerError!void {
const raw_string = bytes[offset..];
const result = r: {
var aw: std.io.Writer.Allocating = .fromArrayList(astgen.gpa, buf);
var aw: std.Io.Writer.Allocating = .fromArrayList(astgen.gpa, buf);
defer buf.* = aw.toArrayList();
break :r std.zig.string_literal.parseWrite(&aw.writer, raw_string) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
@ -13785,7 +13785,7 @@ fn lowerAstErrors(astgen: *AstGen) error{OutOfMemory}!void {
const tree = astgen.tree;
assert(tree.errors.len > 0);
var msg: std.io.Writer.Allocating = .init(gpa);
var msg: std.Io.Writer.Allocating = .init(gpa);
defer msg.deinit();
const msg_w = &msg.writer;

View file

@ -11,7 +11,7 @@ const std = @import("std");
const ErrorBundle = @This();
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const Writer = std.io.Writer;
const Writer = std.Io.Writer;
string_bytes: []const u8,
/// The first thing in this array is an `ErrorMessageList`.
@ -156,7 +156,7 @@ pub fn nullTerminatedString(eb: ErrorBundle, index: String) [:0]const u8 {
}
pub const RenderOptions = struct {
ttyconf: std.io.tty.Config,
ttyconf: std.Io.tty.Config,
include_reference_trace: bool = true,
include_source_line: bool = true,
include_log_text: bool = true,
@ -190,14 +190,14 @@ fn renderErrorMessageToWriter(
err_msg_index: MessageIndex,
w: *Writer,
kind: []const u8,
color: std.io.tty.Color,
color: std.Io.tty.Color,
indent: usize,
) (Writer.Error || std.posix.UnexpectedError)!void {
const ttyconf = options.ttyconf;
const err_msg = eb.getErrorMessage(err_msg_index);
if (err_msg.src_loc != .none) {
const src = eb.extraData(SourceLocation, @intFromEnum(err_msg.src_loc));
var prefix: std.io.Writer.Discarding = .init(&.{});
var prefix: Writer.Discarding = .init(&.{});
try w.splatByteAll(' ', indent);
prefix.count += indent;
try ttyconf.setColor(w, .bold);
@ -794,9 +794,9 @@ pub const Wip = struct {
};
defer bundle.deinit(std.testing.allocator);
const ttyconf: std.io.tty.Config = .no_color;
const ttyconf: std.Io.tty.Config = .no_color;
var bundle_buf: std.io.Writer.Allocating = .init(std.testing.allocator);
var bundle_buf: Writer.Allocating = .init(std.testing.allocator);
const bundle_bw = &bundle_buf.interface;
defer bundle_buf.deinit();
try bundle.renderToWriter(.{ .ttyconf = ttyconf }, bundle_bw);
@ -812,7 +812,7 @@ pub const Wip = struct {
};
defer copy.deinit(std.testing.allocator);
var copy_buf: std.io.Writer.Allocating = .init(std.testing.allocator);
var copy_buf: Writer.Allocating = .init(std.testing.allocator);
const copy_bw = &copy_buf.interface;
defer copy_buf.deinit();
try copy.renderToWriter(.{ .ttyconf = ttyconf }, copy_bw);

View file

@ -43,7 +43,7 @@ pub fn parse(
}
}
const contents = try std.fs.cwd().readFileAlloc(allocator, libc_file, std.math.maxInt(usize));
const contents = try std.fs.cwd().readFileAlloc(libc_file, allocator, .limited(std.math.maxInt(usize)));
defer allocator.free(contents);
var it = std.mem.tokenizeScalar(u8, contents, '\n');

View file

@ -766,7 +766,7 @@ const MsvcLibDir = struct {
writer.writeByte(std.fs.path.sep) catch unreachable;
writer.writeAll("state.json") catch unreachable;
const json_contents = instances_dir.readFileAlloc(allocator, writer.buffered(), std.math.maxInt(usize)) catch continue;
const json_contents = instances_dir.readFileAlloc(writer.buffered(), allocator, .limited(std.math.maxInt(usize))) catch continue;
defer allocator.free(json_contents);
var parsed = std.json.parseFromSlice(std.json.Value, allocator, json_contents, .{}) catch continue;

View file

@ -9,7 +9,7 @@ const StringIndexContext = std.hash_map.StringIndexContext;
const ZonGen = @This();
const Zoir = @import("Zoir.zig");
const Ast = @import("Ast.zig");
const Writer = std.io.Writer;
const Writer = std.Io.Writer;
gpa: Allocator,
tree: Ast,
@ -472,7 +472,7 @@ fn appendIdentStr(zg: *ZonGen, ident_token: Ast.TokenIndex) error{ OutOfMemory,
const raw_string = zg.tree.tokenSlice(ident_token)[offset..];
try zg.string_bytes.ensureUnusedCapacity(gpa, raw_string.len);
const result = r: {
var aw: std.io.Writer.Allocating = .fromArrayList(gpa, &zg.string_bytes);
var aw: Writer.Allocating = .fromArrayList(gpa, &zg.string_bytes);
defer zg.string_bytes = aw.toArrayList();
break :r std.zig.string_literal.parseWrite(&aw.writer, raw_string) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
@ -570,7 +570,7 @@ fn strLitAsString(zg: *ZonGen, str_node: Ast.Node.Index) error{ OutOfMemory, Bad
const size_hint = strLitSizeHint(zg.tree, str_node);
try string_bytes.ensureUnusedCapacity(gpa, size_hint);
const result = r: {
var aw: std.io.Writer.Allocating = .fromArrayList(gpa, &zg.string_bytes);
var aw: Writer.Allocating = .fromArrayList(gpa, &zg.string_bytes);
defer zg.string_bytes = aw.toArrayList();
break :r parseStrLit(zg.tree, str_node, &aw.writer) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
@ -885,7 +885,7 @@ fn lowerAstErrors(zg: *ZonGen) Allocator.Error!void {
const tree = zg.tree;
assert(tree.errors.len > 0);
var msg: std.io.Writer.Allocating = .init(gpa);
var msg: Writer.Allocating = .init(gpa);
defer msg.deinit();
const msg_bw = &msg.writer;

View file

@ -7,7 +7,7 @@ const builtin = @import("builtin");
const DW = std.dwarf;
const ir = @import("ir.zig");
const log = std.log.scoped(.llvm);
const Writer = std.io.Writer;
const Writer = std.Io.Writer;
gpa: Allocator,
strip: bool,

View file

@ -1,7 +1,6 @@
const std = @import("std");
const mem = std.mem;
const print = std.debug.print;
const io = std.io;
const maxInt = std.math.maxInt;
test "zig fmt: remove extra whitespace at start and end of file with comment between" {

View file

@ -1,7 +1,7 @@
const std = @import("../std.zig");
const assert = std.debug.assert;
const utf8Encode = std.unicode.utf8Encode;
const Writer = std.io.Writer;
const Writer = std.Io.Writer;
pub const ParseError = error{
OutOfMemory,
@ -45,7 +45,7 @@ pub const Error = union(enum) {
raw_string: []const u8,
};
fn formatMessage(self: FormatMessage, writer: *std.io.Writer) std.io.Writer.Error!void {
fn formatMessage(self: FormatMessage, writer: *Writer) Writer.Error!void {
switch (self.err) {
.invalid_escape_character => |bad_index| try writer.print(
"invalid escape character: '{c}'",
@ -358,7 +358,7 @@ pub fn parseWrite(writer: *Writer, bytes: []const u8) Writer.Error!Result {
/// Higher level API. Does not return extra info about parse errors.
/// Caller owns returned memory.
pub fn parseAlloc(allocator: std.mem.Allocator, bytes: []const u8) ParseError![]u8 {
var aw: std.io.Writer.Allocating = .init(allocator);
var aw: Writer.Allocating = .init(allocator);
defer aw.deinit();
const result = parseWrite(&aw.writer, bytes) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,

View file

@ -195,12 +195,12 @@ pub const Decompress = struct {
};
}
fn streamStore(r: *Reader, w: *Writer, limit: std.io.Limit) Reader.StreamError!usize {
fn streamStore(r: *Reader, w: *Writer, limit: std.Io.Limit) Reader.StreamError!usize {
const d: *Decompress = @fieldParentPtr("interface", r);
return d.store.read(w, limit);
}
fn streamDeflate(r: *Reader, w: *Writer, limit: std.io.Limit) Reader.StreamError!usize {
fn streamDeflate(r: *Reader, w: *Writer, limit: std.Io.Limit) Reader.StreamError!usize {
const d: *Decompress = @fieldParentPtr("interface", r);
return flate.Decompress.read(&d.inflate, w, limit);
}

View file

@ -119,7 +119,7 @@ const Value = extern struct {
}
}
pub fn format(value: Value, writer: *std.io.Writer) std.io.Writer.Error!void {
pub fn format(value: Value, writer: *std.Io.Writer) std.Io.Writer.Error!void {
// Work around x86_64 backend limitation.
if (builtin.zig_backend == .stage2_x86_64 and builtin.os.tag == .windows) {
try writer.writeAll("(unknown)");

View file

@ -961,7 +961,7 @@ pub const Inst = struct {
return index.unwrap().target;
}
pub fn format(index: Index, w: *std.io.Writer) std.io.Writer.Error!void {
pub fn format(index: Index, w: *std.Io.Writer) std.Io.Writer.Error!void {
try w.writeByte('%');
switch (index.unwrap()) {
.ref => {},

View file

@ -10,6 +10,7 @@ const log = std.log.scoped(.liveness);
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const Log2Int = std.math.Log2Int;
const Writer = std.Io.Writer;
const Liveness = @This();
const trace = @import("../tracy.zig").trace;
@ -2037,7 +2038,7 @@ fn fmtInstSet(set: *const std.AutoHashMapUnmanaged(Air.Inst.Index, void)) FmtIns
const FmtInstSet = struct {
set: *const std.AutoHashMapUnmanaged(Air.Inst.Index, void),
pub fn format(val: FmtInstSet, w: *std.io.Writer) std.io.Writer.Error!void {
pub fn format(val: FmtInstSet, w: *Writer) Writer.Error!void {
if (val.set.count() == 0) {
try w.writeAll("[no instructions]");
return;
@ -2057,7 +2058,7 @@ fn fmtInstList(list: []const Air.Inst.Index) FmtInstList {
const FmtInstList = struct {
list: []const Air.Inst.Index,
pub fn format(val: FmtInstList, w: *std.io.Writer) std.io.Writer.Error!void {
pub fn format(val: FmtInstList, w: *Writer) Writer.Error!void {
if (val.list.len == 0) {
try w.writeAll("[no instructions]");
return;

View file

@ -9,7 +9,7 @@ const Type = @import("../Type.zig");
const Air = @import("../Air.zig");
const InternPool = @import("../InternPool.zig");
pub fn write(air: Air, stream: *std.io.Writer, pt: Zcu.PerThread, liveness: ?Air.Liveness) void {
pub fn write(air: Air, stream: *std.Io.Writer, pt: Zcu.PerThread, liveness: ?Air.Liveness) void {
comptime assert(build_options.enable_debug_extensions);
const instruction_bytes = air.instructions.len *
// Here we don't use @sizeOf(Air.Inst.Data) because it would include
@ -55,7 +55,7 @@ pub fn write(air: Air, stream: *std.io.Writer, pt: Zcu.PerThread, liveness: ?Air
pub fn writeInst(
air: Air,
stream: *std.io.Writer,
stream: *std.Io.Writer,
inst: Air.Inst.Index,
pt: Zcu.PerThread,
liveness: ?Air.Liveness,
@ -92,16 +92,16 @@ const Writer = struct {
indent: usize,
skip_body: bool,
const Error = std.io.Writer.Error;
const Error = std.Io.Writer.Error;
fn writeBody(w: *Writer, s: *std.io.Writer, body: []const Air.Inst.Index) Error!void {
fn writeBody(w: *Writer, s: *std.Io.Writer, body: []const Air.Inst.Index) Error!void {
for (body) |inst| {
try w.writeInst(s, inst);
try s.writeByte('\n');
}
}
fn writeInst(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writeInst(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const tag = w.air.instructions.items(.tag)[@intFromEnum(inst)];
try s.splatByteAll(' ', w.indent);
try s.print("{f}{c}= {s}(", .{
@ -341,48 +341,48 @@ const Writer = struct {
try s.writeByte(')');
}
fn writeBinOp(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writeBinOp(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const bin_op = w.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
try w.writeOperand(s, inst, 0, bin_op.lhs);
try s.writeAll(", ");
try w.writeOperand(s, inst, 1, bin_op.rhs);
}
fn writeUnOp(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writeUnOp(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const un_op = w.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
try w.writeOperand(s, inst, 0, un_op);
}
fn writeNoOp(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writeNoOp(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
_ = w;
_ = s;
_ = inst;
// no-op, no argument to write
}
fn writeType(w: *Writer, s: *std.io.Writer, ty: Type) !void {
fn writeType(w: *Writer, s: *std.Io.Writer, ty: Type) !void {
return ty.print(s, w.pt);
}
fn writeTy(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writeTy(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const ty = w.air.instructions.items(.data)[@intFromEnum(inst)].ty;
try w.writeType(s, ty);
}
fn writeArg(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writeArg(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const arg = w.air.instructions.items(.data)[@intFromEnum(inst)].arg;
try w.writeType(s, arg.ty.toType());
try s.print(", {d}", .{arg.zir_param_index});
}
fn writeTyOp(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writeTyOp(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const ty_op = w.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
try w.writeType(s, ty_op.ty.toType());
try s.writeAll(", ");
try w.writeOperand(s, inst, 0, ty_op.operand);
}
fn writeBlock(w: *Writer, s: *std.io.Writer, tag: Air.Inst.Tag, inst: Air.Inst.Index) Error!void {
fn writeBlock(w: *Writer, s: *std.Io.Writer, tag: Air.Inst.Tag, inst: Air.Inst.Index) Error!void {
const ty_pl = w.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
try w.writeType(s, ty_pl.ty.toType());
const body: []const Air.Inst.Index = @ptrCast(switch (tag) {
@ -423,7 +423,7 @@ const Writer = struct {
}
}
fn writeLoop(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writeLoop(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const ty_pl = w.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = w.air.extraData(Air.Block, ty_pl.payload);
const body: []const Air.Inst.Index = @ptrCast(w.air.extra.items[extra.end..][0..extra.data.body_len]);
@ -439,7 +439,7 @@ const Writer = struct {
try s.writeAll("}");
}
fn writeAggregateInit(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writeAggregateInit(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const zcu = w.pt.zcu;
const ty_pl = w.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const vector_ty = ty_pl.ty.toType();
@ -455,7 +455,7 @@ const Writer = struct {
try s.writeAll("]");
}
fn writeUnionInit(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writeUnionInit(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const ty_pl = w.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = w.air.extraData(Air.UnionInit, ty_pl.payload).data;
@ -463,7 +463,7 @@ const Writer = struct {
try w.writeOperand(s, inst, 0, extra.init);
}
fn writeStructField(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writeStructField(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const ty_pl = w.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = w.air.extraData(Air.StructField, ty_pl.payload).data;
@ -471,7 +471,7 @@ const Writer = struct {
try s.print(", {d}", .{extra.field_index});
}
fn writeTyPlBin(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writeTyPlBin(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const data = w.air.instructions.items(.data);
const ty_pl = data[@intFromEnum(inst)].ty_pl;
const extra = w.air.extraData(Air.Bin, ty_pl.payload).data;
@ -484,7 +484,7 @@ const Writer = struct {
try w.writeOperand(s, inst, 1, extra.rhs);
}
fn writeCmpxchg(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writeCmpxchg(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const ty_pl = w.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = w.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
@ -498,7 +498,7 @@ const Writer = struct {
});
}
fn writeMulAdd(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writeMulAdd(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const pl_op = w.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = w.air.extraData(Air.Bin, pl_op.payload).data;
@ -509,7 +509,7 @@ const Writer = struct {
try w.writeOperand(s, inst, 2, pl_op.operand);
}
fn writeShuffleOne(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writeShuffleOne(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const unwrapped = w.air.unwrapShuffleOne(w.pt.zcu, inst);
try w.writeType(s, unwrapped.result_ty);
try s.writeAll(", ");
@ -525,7 +525,7 @@ const Writer = struct {
try s.writeByte(']');
}
fn writeShuffleTwo(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writeShuffleTwo(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const unwrapped = w.air.unwrapShuffleTwo(w.pt.zcu, inst);
try w.writeType(s, unwrapped.result_ty);
try s.writeAll(", ");
@ -544,7 +544,7 @@ const Writer = struct {
try s.writeByte(']');
}
fn writeSelect(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writeSelect(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const zcu = w.pt.zcu;
const pl_op = w.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = w.air.extraData(Air.Bin, pl_op.payload).data;
@ -559,14 +559,14 @@ const Writer = struct {
try w.writeOperand(s, inst, 2, extra.rhs);
}
fn writeReduce(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writeReduce(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const reduce = w.air.instructions.items(.data)[@intFromEnum(inst)].reduce;
try w.writeOperand(s, inst, 0, reduce.operand);
try s.print(", {s}", .{@tagName(reduce.operation)});
}
fn writeCmpVector(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writeCmpVector(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const ty_pl = w.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = w.air.extraData(Air.VectorCmp, ty_pl.payload).data;
@ -576,7 +576,7 @@ const Writer = struct {
try w.writeOperand(s, inst, 1, extra.rhs);
}
fn writeVectorStoreElem(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writeVectorStoreElem(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const data = w.air.instructions.items(.data)[@intFromEnum(inst)].vector_store_elem;
const extra = w.air.extraData(Air.VectorCmp, data.payload).data;
@ -587,21 +587,21 @@ const Writer = struct {
try w.writeOperand(s, inst, 2, extra.rhs);
}
fn writeRuntimeNavPtr(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writeRuntimeNavPtr(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const ip = &w.pt.zcu.intern_pool;
const ty_nav = w.air.instructions.items(.data)[@intFromEnum(inst)].ty_nav;
try w.writeType(s, .fromInterned(ty_nav.ty));
try s.print(", '{f}'", .{ip.getNav(ty_nav.nav).fqn.fmt(ip)});
}
fn writeAtomicLoad(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writeAtomicLoad(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const atomic_load = w.air.instructions.items(.data)[@intFromEnum(inst)].atomic_load;
try w.writeOperand(s, inst, 0, atomic_load.ptr);
try s.print(", {s}", .{@tagName(atomic_load.order)});
}
fn writePrefetch(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writePrefetch(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const prefetch = w.air.instructions.items(.data)[@intFromEnum(inst)].prefetch;
try w.writeOperand(s, inst, 0, prefetch.ptr);
@ -612,7 +612,7 @@ const Writer = struct {
fn writeAtomicStore(
w: *Writer,
s: *std.io.Writer,
s: *std.Io.Writer,
inst: Air.Inst.Index,
order: std.builtin.AtomicOrder,
) Error!void {
@ -623,7 +623,7 @@ const Writer = struct {
try s.print(", {s}", .{@tagName(order)});
}
fn writeAtomicRmw(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writeAtomicRmw(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const pl_op = w.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = w.air.extraData(Air.AtomicRmw, pl_op.payload).data;
@ -633,7 +633,7 @@ const Writer = struct {
try s.print(", {s}, {s}", .{ @tagName(extra.op()), @tagName(extra.ordering()) });
}
fn writeFieldParentPtr(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writeFieldParentPtr(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const ty_pl = w.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = w.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
@ -641,7 +641,7 @@ const Writer = struct {
try s.print(", {d}", .{extra.field_index});
}
fn writeAssembly(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writeAssembly(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const ty_pl = w.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = w.air.extraData(Air.Asm, ty_pl.payload);
const is_volatile = extra.data.flags.is_volatile;
@ -730,19 +730,19 @@ const Writer = struct {
try s.print(", \"{f}\"", .{std.zig.fmtString(asm_source)});
}
fn writeDbgStmt(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writeDbgStmt(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const dbg_stmt = w.air.instructions.items(.data)[@intFromEnum(inst)].dbg_stmt;
try s.print("{d}:{d}", .{ dbg_stmt.line + 1, dbg_stmt.column + 1 });
}
fn writeDbgVar(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writeDbgVar(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const pl_op = w.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
try w.writeOperand(s, inst, 0, pl_op.operand);
const name: Air.NullTerminatedString = @enumFromInt(pl_op.payload);
try s.print(", \"{f}\"", .{std.zig.fmtString(name.toSlice(w.air))});
}
fn writeCall(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writeCall(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const pl_op = w.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = w.air.extraData(Air.Call, pl_op.payload);
const args = @as([]const Air.Inst.Ref, @ptrCast(w.air.extra.items[extra.end..][0..extra.data.args_len]));
@ -755,19 +755,19 @@ const Writer = struct {
try s.writeAll("]");
}
fn writeBr(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writeBr(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const br = w.air.instructions.items(.data)[@intFromEnum(inst)].br;
try w.writeInstIndex(s, br.block_inst, false);
try s.writeAll(", ");
try w.writeOperand(s, inst, 0, br.operand);
}
fn writeRepeat(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writeRepeat(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const repeat = w.air.instructions.items(.data)[@intFromEnum(inst)].repeat;
try w.writeInstIndex(s, repeat.loop_inst, false);
}
fn writeTry(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writeTry(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const pl_op = w.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = w.air.extraData(Air.Try, pl_op.payload);
const body: []const Air.Inst.Index = @ptrCast(w.air.extra.items[extra.end..][0..extra.data.body_len]);
@ -801,7 +801,7 @@ const Writer = struct {
}
}
fn writeTryPtr(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writeTryPtr(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const ty_pl = w.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = w.air.extraData(Air.TryPtr, ty_pl.payload);
const body: []const Air.Inst.Index = @ptrCast(w.air.extra.items[extra.end..][0..extra.data.body_len]);
@ -838,7 +838,7 @@ const Writer = struct {
}
}
fn writeCondBr(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writeCondBr(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const pl_op = w.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = w.air.extraData(Air.CondBr, pl_op.payload);
const then_body: []const Air.Inst.Index = @ptrCast(w.air.extra.items[extra.end..][0..extra.data.then_body_len]);
@ -897,7 +897,7 @@ const Writer = struct {
try s.writeAll("}");
}
fn writeSwitchBr(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writeSwitchBr(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const switch_br = w.air.unwrapSwitch(inst);
const liveness: Air.Liveness.SwitchBrTable = if (w.liveness) |liveness|
@ -983,25 +983,25 @@ const Writer = struct {
try s.splatByteAll(' ', old_indent);
}
fn writeWasmMemorySize(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writeWasmMemorySize(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const pl_op = w.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
try s.print("{d}", .{pl_op.payload});
}
fn writeWasmMemoryGrow(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writeWasmMemoryGrow(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const pl_op = w.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
try s.print("{d}, ", .{pl_op.payload});
try w.writeOperand(s, inst, 0, pl_op.operand);
}
fn writeWorkDimension(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
fn writeWorkDimension(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const pl_op = w.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
try s.print("{d}", .{pl_op.payload});
}
fn writeOperand(
w: *Writer,
s: *std.io.Writer,
s: *std.Io.Writer,
inst: Air.Inst.Index,
op_index: usize,
operand: Air.Inst.Ref,
@ -1027,7 +1027,7 @@ const Writer = struct {
fn writeInstRef(
w: *Writer,
s: *std.io.Writer,
s: *std.Io.Writer,
operand: Air.Inst.Ref,
dies: bool,
) Error!void {
@ -1047,7 +1047,7 @@ const Writer = struct {
fn writeInstIndex(
w: *Writer,
s: *std.io.Writer,
s: *std.Io.Writer,
inst: Air.Inst.Index,
dies: bool,
) Error!void {

View file

@ -12,7 +12,7 @@ const ThreadPool = std.Thread.Pool;
const WaitGroup = std.Thread.WaitGroup;
const ErrorBundle = std.zig.ErrorBundle;
const fatal = std.process.fatal;
const Writer = std.io.Writer;
const Writer = std.Io.Writer;
const Value = @import("Value.zig");
const Type = @import("Type.zig");
@ -468,7 +468,7 @@ pub const Path = struct {
const Formatter = struct {
p: Path,
comp: *Compilation,
pub fn format(f: Formatter, w: *std.io.Writer) std.io.Writer.Error!void {
pub fn format(f: Formatter, w: *Writer) Writer.Error!void {
const root_path: []const u8 = switch (f.p.root) {
.zig_lib => f.comp.dirs.zig_lib.path orelse ".",
.global_cache => f.comp.dirs.global_cache.path orelse ".",
@ -1883,7 +1883,7 @@ pub const CreateDiagnostic = union(enum) {
sub: []const u8,
err: (fs.Dir.MakeError || fs.Dir.OpenError || fs.Dir.StatFileError),
};
pub fn format(diag: CreateDiagnostic, w: *std.Io.Writer) std.Io.Writer.Error!void {
pub fn format(diag: CreateDiagnostic, w: *Writer) Writer.Error!void {
switch (diag) {
.export_table_import_table_conflict => try w.writeAll("'--import-table' and '--export-table' cannot be used together"),
.emit_h_without_zcu => try w.writeAll("cannot emit C header with no Zig source files"),
@ -6457,7 +6457,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32
// In .rc files, a " within a quoted string is escaped as ""
const fmtRcEscape = struct {
fn formatRcEscape(bytes: []const u8, writer: *std.io.Writer) std.io.Writer.Error!void {
fn formatRcEscape(bytes: []const u8, writer: *Writer) Writer.Error!void {
for (bytes) |byte| switch (byte) {
'"' => try writer.writeAll("\"\""),
'\\' => try writer.writeAll("\\\\"),
@ -6576,7 +6576,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32
// Read depfile and update cache manifest
{
const dep_basename = fs.path.basename(out_dep_path);
const dep_file_contents = try zig_cache_tmp_dir.readFileAlloc(arena, dep_basename, 50 * 1024 * 1024);
const dep_file_contents = try zig_cache_tmp_dir.readFileAlloc(dep_basename, arena, .limited(50 * 1024 * 1024));
defer arena.free(dep_file_contents);
const value = try std.json.parseFromSliceLeaky(std.json.Value, arena, dep_file_contents, .{});

Some files were not shown because too many files have changed in this diff Show more