resinator: update for new Io APIs

This commit is contained in:
Andrew Kelley 2025-10-20 23:25:57 -07:00
parent 76107e9e65
commit 4174ac18e9
5 changed files with 147 additions and 94 deletions

View file

@ -1,6 +1,12 @@
const std = @import("std");
const builtin = @import("builtin"); const builtin = @import("builtin");
const native_endian = builtin.cpu.arch.endian();
const std = @import("std");
const Io = std.Io;
const Allocator = std.mem.Allocator; const Allocator = std.mem.Allocator;
const WORD = std.os.windows.WORD;
const DWORD = std.os.windows.DWORD;
const Node = @import("ast.zig").Node; const Node = @import("ast.zig").Node;
const lex = @import("lex.zig"); const lex = @import("lex.zig");
const Parser = @import("parse.zig").Parser; const Parser = @import("parse.zig").Parser;
@ -17,8 +23,6 @@ const res = @import("res.zig");
const ico = @import("ico.zig"); const ico = @import("ico.zig");
const ani = @import("ani.zig"); const ani = @import("ani.zig");
const bmp = @import("bmp.zig"); const bmp = @import("bmp.zig");
const WORD = std.os.windows.WORD;
const DWORD = std.os.windows.DWORD;
const utils = @import("utils.zig"); const utils = @import("utils.zig");
const NameOrOrdinal = res.NameOrOrdinal; const NameOrOrdinal = res.NameOrOrdinal;
const SupportedCodePage = @import("code_pages.zig").SupportedCodePage; const SupportedCodePage = @import("code_pages.zig").SupportedCodePage;
@ -28,7 +32,6 @@ const windows1252 = @import("windows1252.zig");
const lang = @import("lang.zig"); const lang = @import("lang.zig");
const code_pages = @import("code_pages.zig"); const code_pages = @import("code_pages.zig");
const errors = @import("errors.zig"); const errors = @import("errors.zig");
const native_endian = builtin.cpu.arch.endian();
pub const CompileOptions = struct { pub const CompileOptions = struct {
cwd: std.fs.Dir, cwd: std.fs.Dir,
@ -77,7 +80,7 @@ pub const Dependencies = struct {
} }
}; };
pub fn compile(allocator: Allocator, source: []const u8, writer: *std.Io.Writer, options: CompileOptions) !void { pub fn compile(allocator: Allocator, io: Io, source: []const u8, writer: *std.Io.Writer, options: CompileOptions) !void {
var lexer = lex.Lexer.init(source, .{ var lexer = lex.Lexer.init(source, .{
.default_code_page = options.default_code_page, .default_code_page = options.default_code_page,
.source_mappings = options.source_mappings, .source_mappings = options.source_mappings,
@ -166,10 +169,11 @@ pub fn compile(allocator: Allocator, source: []const u8, writer: *std.Io.Writer,
defer arena_allocator.deinit(); defer arena_allocator.deinit();
const arena = arena_allocator.allocator(); const arena = arena_allocator.allocator();
var compiler = Compiler{ var compiler: Compiler = .{
.source = source, .source = source,
.arena = arena, .arena = arena,
.allocator = allocator, .allocator = allocator,
.io = io,
.cwd = options.cwd, .cwd = options.cwd,
.diagnostics = options.diagnostics, .diagnostics = options.diagnostics,
.dependencies = options.dependencies, .dependencies = options.dependencies,
@ -191,6 +195,7 @@ pub const Compiler = struct {
source: []const u8, source: []const u8,
arena: Allocator, arena: Allocator,
allocator: Allocator, allocator: Allocator,
io: Io,
cwd: std.fs.Dir, cwd: std.fs.Dir,
state: State = .{}, state: State = .{},
diagnostics: *Diagnostics, diagnostics: *Diagnostics,
@ -409,7 +414,7 @@ pub const Compiler = struct {
} }
} }
var first_error: ?std.fs.File.OpenError = null; var first_error: ?(std.fs.File.OpenError || std.fs.File.StatError) = null;
for (self.search_dirs) |search_dir| { for (self.search_dirs) |search_dir| {
if (utils.openFileNotDir(search_dir.dir, path, .{})) |file| { if (utils.openFileNotDir(search_dir.dir, path, .{})) |file| {
errdefer file.close(); errdefer file.close();
@ -496,6 +501,8 @@ pub const Compiler = struct {
} }
pub fn writeResourceExternal(self: *Compiler, node: *Node.ResourceExternal, writer: *std.Io.Writer) !void { pub fn writeResourceExternal(self: *Compiler, node: *Node.ResourceExternal, writer: *std.Io.Writer) !void {
const io = self.io;
// Init header with data size zero for now, will need to fill it in later // Init header with data size zero for now, will need to fill it in later
var header = try self.resourceHeader(node.id, node.type, .{}); var header = try self.resourceHeader(node.id, node.type, .{});
defer header.deinit(self.allocator); defer header.deinit(self.allocator);
@ -582,7 +589,7 @@ pub const Compiler = struct {
}; };
defer file_handle.close(); defer file_handle.close();
var file_buffer: [2048]u8 = undefined; var file_buffer: [2048]u8 = undefined;
var file_reader = file_handle.reader(&file_buffer); var file_reader = file_handle.reader(io, &file_buffer);
if (maybe_predefined_type) |predefined_type| { if (maybe_predefined_type) |predefined_type| {
switch (predefined_type) { switch (predefined_type) {

View file

@ -1,5 +1,7 @@
const std = @import("std"); const std = @import("std");
const Io = std.Io;
const Allocator = std.mem.Allocator; const Allocator = std.mem.Allocator;
const res = @import("res.zig"); const res = @import("res.zig");
const NameOrOrdinal = res.NameOrOrdinal; const NameOrOrdinal = res.NameOrOrdinal;
const MemoryFlags = res.MemoryFlags; const MemoryFlags = res.MemoryFlags;
@ -169,8 +171,7 @@ pub fn parseNameOrOrdinal(allocator: Allocator, reader: *std.Io.Reader) !NameOrO
pub const CoffOptions = struct { pub const CoffOptions = struct {
target: std.coff.IMAGE.FILE.MACHINE = .AMD64, target: std.coff.IMAGE.FILE.MACHINE = .AMD64,
/// If true, zeroes will be written to all timestamp fields timestamp: i64 = 0,
reproducible: bool = true,
/// If true, the MEM_WRITE flag will not be set in the .rsrc section header /// If true, the MEM_WRITE flag will not be set in the .rsrc section header
read_only: bool = false, read_only: bool = false,
/// If non-null, a symbol with this name and storage class EXTERNAL will be added to the symbol table. /// If non-null, a symbol with this name and storage class EXTERNAL will be added to the symbol table.
@ -188,7 +189,13 @@ pub const Diagnostics = union {
overflow_resource: usize, overflow_resource: usize,
}; };
pub fn writeCoff(allocator: Allocator, writer: *std.Io.Writer, resources: []const Resource, options: CoffOptions, diagnostics: ?*Diagnostics) !void { pub fn writeCoff(
allocator: Allocator,
writer: *std.Io.Writer,
resources: []const Resource,
options: CoffOptions,
diagnostics: ?*Diagnostics,
) !void {
var resource_tree = ResourceTree.init(allocator, options); var resource_tree = ResourceTree.init(allocator, options);
defer resource_tree.deinit(); defer resource_tree.deinit();
@ -215,7 +222,7 @@ pub fn writeCoff(allocator: Allocator, writer: *std.Io.Writer, resources: []cons
const pointer_to_rsrc02_data = pointer_to_relocations + relocations_len; const pointer_to_rsrc02_data = pointer_to_relocations + relocations_len;
const pointer_to_symbol_table = pointer_to_rsrc02_data + lengths.rsrc02; const pointer_to_symbol_table = pointer_to_rsrc02_data + lengths.rsrc02;
const timestamp: i64 = if (options.reproducible) 0 else std.time.timestamp(); const timestamp: i64 = options.timestamp;
const size_of_optional_header = 0; const size_of_optional_header = 0;
const machine_type: std.coff.IMAGE.FILE.MACHINE = options.target; const machine_type: std.coff.IMAGE.FILE.MACHINE = options.target;
const flags = std.coff.Header.Flags{ const flags = std.coff.Header.Flags{

View file

@ -1,5 +1,11 @@
const builtin = @import("builtin");
const native_endian = builtin.cpu.arch.endian();
const std = @import("std"); const std = @import("std");
const Io = std.Io;
const assert = std.debug.assert; const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const Token = @import("lex.zig").Token; const Token = @import("lex.zig").Token;
const SourceMappings = @import("source_mapping.zig").SourceMappings; const SourceMappings = @import("source_mapping.zig").SourceMappings;
const utils = @import("utils.zig"); const utils = @import("utils.zig");
@ -11,19 +17,19 @@ const parse = @import("parse.zig");
const lang = @import("lang.zig"); const lang = @import("lang.zig");
const code_pages = @import("code_pages.zig"); const code_pages = @import("code_pages.zig");
const SupportedCodePage = code_pages.SupportedCodePage; const SupportedCodePage = code_pages.SupportedCodePage;
const builtin = @import("builtin");
const native_endian = builtin.cpu.arch.endian();
pub const Diagnostics = struct { pub const Diagnostics = struct {
errors: std.ArrayList(ErrorDetails) = .empty, errors: std.ArrayList(ErrorDetails) = .empty,
/// Append-only, cannot handle removing strings. /// Append-only, cannot handle removing strings.
/// Expects to own all strings within the list. /// Expects to own all strings within the list.
strings: std.ArrayList([]const u8) = .empty, strings: std.ArrayList([]const u8) = .empty,
allocator: std.mem.Allocator, allocator: Allocator,
io: Io,
pub fn init(allocator: std.mem.Allocator) Diagnostics { pub fn init(allocator: Allocator, io: Io) Diagnostics {
return .{ return .{
.allocator = allocator, .allocator = allocator,
.io = io,
}; };
} }
@ -62,10 +68,11 @@ pub const Diagnostics = struct {
} }
pub fn renderToStdErr(self: *Diagnostics, cwd: std.fs.Dir, source: []const u8, tty_config: std.Io.tty.Config, source_mappings: ?SourceMappings) void { pub fn renderToStdErr(self: *Diagnostics, cwd: std.fs.Dir, source: []const u8, tty_config: std.Io.tty.Config, source_mappings: ?SourceMappings) void {
const io = self.io;
const stderr = std.debug.lockStderrWriter(&.{}); const stderr = std.debug.lockStderrWriter(&.{});
defer std.debug.unlockStderrWriter(); defer std.debug.unlockStderrWriter();
for (self.errors.items) |err_details| { for (self.errors.items) |err_details| {
renderErrorMessage(stderr, tty_config, cwd, err_details, source, self.strings.items, source_mappings) catch return; renderErrorMessage(io, stderr, tty_config, cwd, err_details, source, self.strings.items, source_mappings) catch return;
} }
} }
@ -167,9 +174,9 @@ pub const ErrorDetails = struct {
filename_string_index: FilenameStringIndex, filename_string_index: FilenameStringIndex,
pub const FilenameStringIndex = std.meta.Int(.unsigned, 32 - @bitSizeOf(FileOpenErrorEnum)); pub const FilenameStringIndex = std.meta.Int(.unsigned, 32 - @bitSizeOf(FileOpenErrorEnum));
pub const FileOpenErrorEnum = std.meta.FieldEnum(std.fs.File.OpenError); pub const FileOpenErrorEnum = std.meta.FieldEnum(std.fs.File.OpenError || std.fs.File.StatError);
pub fn enumFromError(err: std.fs.File.OpenError) FileOpenErrorEnum { pub fn enumFromError(err: (std.fs.File.OpenError || std.fs.File.StatError)) FileOpenErrorEnum {
return switch (err) { return switch (err) {
inline else => |e| @field(ErrorDetails.FileOpenError.FileOpenErrorEnum, @errorName(e)), inline else => |e| @field(ErrorDetails.FileOpenError.FileOpenErrorEnum, @errorName(e)),
}; };
@ -894,7 +901,16 @@ fn cellCount(code_page: SupportedCodePage, source: []const u8, start_index: usiz
const truncated_str = "<...truncated...>"; const truncated_str = "<...truncated...>";
pub fn renderErrorMessage(writer: *std.Io.Writer, tty_config: std.Io.tty.Config, cwd: std.fs.Dir, err_details: ErrorDetails, source: []const u8, strings: []const []const u8, source_mappings: ?SourceMappings) !void { pub fn renderErrorMessage(
io: Io,
writer: *std.Io.Writer,
tty_config: std.Io.tty.Config,
cwd: std.fs.Dir,
err_details: ErrorDetails,
source: []const u8,
strings: []const []const u8,
source_mappings: ?SourceMappings,
) !void {
if (err_details.type == .hint) return; if (err_details.type == .hint) return;
const source_line_start = err_details.token.getLineStartForErrorDisplay(source); const source_line_start = err_details.token.getLineStartForErrorDisplay(source);
@ -989,6 +1005,7 @@ pub fn renderErrorMessage(writer: *std.Io.Writer, tty_config: std.Io.tty.Config,
var initial_lines_err: ?anyerror = null; var initial_lines_err: ?anyerror = null;
var file_reader_buf: [max_source_line_bytes * 2]u8 = undefined; var file_reader_buf: [max_source_line_bytes * 2]u8 = undefined;
var corresponding_lines: ?CorrespondingLines = CorrespondingLines.init( var corresponding_lines: ?CorrespondingLines = CorrespondingLines.init(
io,
cwd, cwd,
err_details, err_details,
source_line_for_display.line, source_line_for_display.line,
@ -1084,6 +1101,7 @@ const CorrespondingLines = struct {
code_page: SupportedCodePage, code_page: SupportedCodePage,
pub fn init( pub fn init(
io: Io,
cwd: std.fs.Dir, cwd: std.fs.Dir,
err_details: ErrorDetails, err_details: ErrorDetails,
line_for_comparison: []const u8, line_for_comparison: []const u8,
@ -1108,7 +1126,7 @@ const CorrespondingLines = struct {
.code_page = err_details.code_page, .code_page = err_details.code_page,
.file_reader = undefined, .file_reader = undefined,
}; };
corresponding_lines.file_reader = corresponding_lines.file.reader(file_reader_buf); corresponding_lines.file_reader = corresponding_lines.file.reader(io, file_reader_buf);
errdefer corresponding_lines.deinit(); errdefer corresponding_lines.deinit();
try corresponding_lines.writeLineFromStreamVerbatim( try corresponding_lines.writeLineFromStreamVerbatim(

View file

@ -1,5 +1,9 @@
const std = @import("std");
const builtin = @import("builtin"); const builtin = @import("builtin");
const std = @import("std");
const Io = std.Io;
const Allocator = std.mem.Allocator;
const removeComments = @import("comments.zig").removeComments; const removeComments = @import("comments.zig").removeComments;
const parseAndRemoveLineCommands = @import("source_mapping.zig").parseAndRemoveLineCommands; const parseAndRemoveLineCommands = @import("source_mapping.zig").parseAndRemoveLineCommands;
const compile = @import("compile.zig").compile; const compile = @import("compile.zig").compile;
@ -16,19 +20,18 @@ const aro = @import("aro");
const compiler_util = @import("../util.zig"); const compiler_util = @import("../util.zig");
pub fn main() !void { pub fn main() !void {
var gpa: std.heap.GeneralPurposeAllocator(.{}) = .init; var debug_allocator: std.heap.DebugAllocator(.{}) = .init;
defer std.debug.assert(gpa.deinit() == .ok); defer std.debug.assert(debug_allocator.deinit() == .ok);
const allocator = gpa.allocator(); const gpa = debug_allocator.allocator();
var arena_state = std.heap.ArenaAllocator.init(allocator); var arena_state = std.heap.ArenaAllocator.init(gpa);
defer arena_state.deinit(); defer arena_state.deinit();
const arena = arena_state.allocator(); const arena = arena_state.allocator();
const stderr = std.fs.File.stderr(); const stderr = std.fs.File.stderr();
const stderr_config = std.Io.tty.detectConfig(stderr); const stderr_config = std.Io.tty.detectConfig(stderr);
const args = try std.process.argsAlloc(allocator); const args = try std.process.argsAlloc(arena);
defer std.process.argsFree(allocator, args);
if (args.len < 2) { if (args.len < 2) {
try renderErrorMessage(std.debug.lockStderrWriter(&.{}), stderr_config, .err, "expected zig lib dir as first argument", .{}); try renderErrorMessage(std.debug.lockStderrWriter(&.{}), stderr_config, .err, "expected zig lib dir as first argument", .{});
@ -59,11 +62,11 @@ pub fn main() !void {
}; };
var options = options: { var options = options: {
var cli_diagnostics = cli.Diagnostics.init(allocator); var cli_diagnostics = cli.Diagnostics.init(gpa);
defer cli_diagnostics.deinit(); defer cli_diagnostics.deinit();
var options = cli.parse(allocator, cli_args, &cli_diagnostics) catch |err| switch (err) { var options = cli.parse(gpa, cli_args, &cli_diagnostics) catch |err| switch (err) {
error.ParseError => { error.ParseError => {
try error_handler.emitCliDiagnostics(allocator, cli_args, &cli_diagnostics); try error_handler.emitCliDiagnostics(gpa, cli_args, &cli_diagnostics);
std.process.exit(1); std.process.exit(1);
}, },
else => |e| return e, else => |e| return e,
@ -84,6 +87,10 @@ pub fn main() !void {
}; };
defer options.deinit(); defer options.deinit();
var threaded: std.Io.Threaded = .init(gpa);
defer threaded.deinit();
const io = threaded.io();
if (options.print_help_and_exit) { if (options.print_help_and_exit) {
try cli.writeUsage(stdout, "zig rc"); try cli.writeUsage(stdout, "zig rc");
try stdout.flush(); try stdout.flush();
@ -99,12 +106,13 @@ pub fn main() !void {
try stdout.flush(); try stdout.flush();
} }
var dependencies = Dependencies.init(allocator); var dependencies = Dependencies.init(gpa);
defer dependencies.deinit(); defer dependencies.deinit();
const maybe_dependencies: ?*Dependencies = if (options.depfile_path != null) &dependencies else null; const maybe_dependencies: ?*Dependencies = if (options.depfile_path != null) &dependencies else null;
var include_paths = LazyIncludePaths{ var include_paths = LazyIncludePaths{
.arena = arena, .arena = arena,
.io = io,
.auto_includes_option = options.auto_includes, .auto_includes_option = options.auto_includes,
.zig_lib_dir = zig_lib_dir, .zig_lib_dir = zig_lib_dir,
.target_machine_type = options.coff_options.target, .target_machine_type = options.coff_options.target,
@ -112,12 +120,12 @@ pub fn main() !void {
const full_input = full_input: { const full_input = full_input: {
if (options.input_format == .rc and options.preprocess != .no) { if (options.input_format == .rc and options.preprocess != .no) {
var preprocessed_buf: std.Io.Writer.Allocating = .init(allocator); var preprocessed_buf: std.Io.Writer.Allocating = .init(gpa);
errdefer preprocessed_buf.deinit(); errdefer preprocessed_buf.deinit();
// We're going to throw away everything except the final preprocessed output anyway, // We're going to throw away everything except the final preprocessed output anyway,
// so we can use a scoped arena for everything else. // so we can use a scoped arena for everything else.
var aro_arena_state = std.heap.ArenaAllocator.init(allocator); var aro_arena_state = std.heap.ArenaAllocator.init(gpa);
defer aro_arena_state.deinit(); defer aro_arena_state.deinit();
const aro_arena = aro_arena_state.allocator(); const aro_arena = aro_arena_state.allocator();
@ -129,12 +137,12 @@ pub fn main() !void {
.color = stderr_config, .color = stderr_config,
} } }, } } },
true => .{ .output = .{ .to_list = .{ true => .{ .output = .{ .to_list = .{
.arena = .init(allocator), .arena = .init(gpa),
} } }, } } },
}; };
defer diagnostics.deinit(); defer diagnostics.deinit();
var comp = aro.Compilation.init(aro_arena, aro_arena, &diagnostics, std.fs.cwd()); var comp = aro.Compilation.init(aro_arena, aro_arena, io, &diagnostics, std.fs.cwd());
defer comp.deinit(); defer comp.deinit();
var argv: std.ArrayList([]const u8) = .empty; var argv: std.ArrayList([]const u8) = .empty;
@ -159,20 +167,20 @@ pub fn main() !void {
preprocess.preprocess(&comp, &preprocessed_buf.writer, argv.items, maybe_dependencies) catch |err| switch (err) { preprocess.preprocess(&comp, &preprocessed_buf.writer, argv.items, maybe_dependencies) catch |err| switch (err) {
error.GeneratedSourceError => { error.GeneratedSourceError => {
try error_handler.emitAroDiagnostics(allocator, "failed during preprocessor setup (this is always a bug)", &comp); try error_handler.emitAroDiagnostics(gpa, "failed during preprocessor setup (this is always a bug)", &comp);
std.process.exit(1); std.process.exit(1);
}, },
// ArgError can occur if e.g. the .rc file is not found // ArgError can occur if e.g. the .rc file is not found
error.ArgError, error.PreprocessError => { error.ArgError, error.PreprocessError => {
try error_handler.emitAroDiagnostics(allocator, "failed during preprocessing", &comp); try error_handler.emitAroDiagnostics(gpa, "failed during preprocessing", &comp);
std.process.exit(1); std.process.exit(1);
}, },
error.FileTooBig => { error.FileTooBig => {
try error_handler.emitMessage(allocator, .err, "failed during preprocessing: maximum file size exceeded", .{}); try error_handler.emitMessage(gpa, .err, "failed during preprocessing: maximum file size exceeded", .{});
std.process.exit(1); std.process.exit(1);
}, },
error.WriteFailed => { error.WriteFailed => {
try error_handler.emitMessage(allocator, .err, "failed during preprocessing: error writing the preprocessed output", .{}); try error_handler.emitMessage(gpa, .err, "failed during preprocessing: error writing the preprocessed output", .{});
std.process.exit(1); std.process.exit(1);
}, },
error.OutOfMemory => |e| return e, error.OutOfMemory => |e| return e,
@ -182,22 +190,22 @@ pub fn main() !void {
} else { } else {
switch (options.input_source) { switch (options.input_source) {
.stdio => |file| { .stdio => |file| {
var file_reader = file.reader(&.{}); var file_reader = file.reader(io, &.{});
break :full_input file_reader.interface.allocRemaining(allocator, .unlimited) catch |err| { break :full_input file_reader.interface.allocRemaining(gpa, .unlimited) catch |err| {
try error_handler.emitMessage(allocator, .err, "unable to read input from stdin: {s}", .{@errorName(err)}); try error_handler.emitMessage(gpa, .err, "unable to read input from stdin: {s}", .{@errorName(err)});
std.process.exit(1); std.process.exit(1);
}; };
}, },
.filename => |input_filename| { .filename => |input_filename| {
break :full_input std.fs.cwd().readFileAlloc(input_filename, allocator, .unlimited) catch |err| { break :full_input std.fs.cwd().readFileAlloc(input_filename, gpa, .unlimited) catch |err| {
try error_handler.emitMessage(allocator, .err, "unable to read input file path '{s}': {s}", .{ input_filename, @errorName(err) }); try error_handler.emitMessage(gpa, .err, "unable to read input file path '{s}': {s}", .{ input_filename, @errorName(err) });
std.process.exit(1); std.process.exit(1);
}; };
}, },
} }
} }
}; };
defer allocator.free(full_input); defer gpa.free(full_input);
if (options.preprocess == .only) { if (options.preprocess == .only) {
switch (options.output_source) { switch (options.output_source) {
@ -221,55 +229,55 @@ pub fn main() !void {
} }
else if (options.input_format == .res) else if (options.input_format == .res)
IoStream.fromIoSource(options.input_source, .input) catch |err| { IoStream.fromIoSource(options.input_source, .input) catch |err| {
try error_handler.emitMessage(allocator, .err, "unable to read res file path '{s}': {s}", .{ options.input_source.filename, @errorName(err) }); try error_handler.emitMessage(gpa, .err, "unable to read res file path '{s}': {s}", .{ options.input_source.filename, @errorName(err) });
std.process.exit(1); std.process.exit(1);
} }
else else
IoStream.fromIoSource(options.output_source, .output) catch |err| { IoStream.fromIoSource(options.output_source, .output) catch |err| {
try error_handler.emitMessage(allocator, .err, "unable to create output file '{s}': {s}", .{ options.output_source.filename, @errorName(err) }); try error_handler.emitMessage(gpa, .err, "unable to create output file '{s}': {s}", .{ options.output_source.filename, @errorName(err) });
std.process.exit(1); std.process.exit(1);
}; };
defer res_stream.deinit(allocator); defer res_stream.deinit(gpa);
const res_data = res_data: { const res_data = res_data: {
if (options.input_format != .res) { if (options.input_format != .res) {
// Note: We still want to run this when no-preprocess is set because: // Note: We still want to run this when no-preprocess is set because:
// 1. We want to print accurate line numbers after removing multiline comments // 1. We want to print accurate line numbers after removing multiline comments
// 2. We want to be able to handle an already-preprocessed input with #line commands in it // 2. We want to be able to handle an already-preprocessed input with #line commands in it
var mapping_results = parseAndRemoveLineCommands(allocator, full_input, full_input, .{ .initial_filename = options.input_source.filename }) catch |err| switch (err) { var mapping_results = parseAndRemoveLineCommands(gpa, full_input, full_input, .{ .initial_filename = options.input_source.filename }) catch |err| switch (err) {
error.InvalidLineCommand => { error.InvalidLineCommand => {
// TODO: Maybe output the invalid line command // TODO: Maybe output the invalid line command
try error_handler.emitMessage(allocator, .err, "invalid line command in the preprocessed source", .{}); try error_handler.emitMessage(gpa, .err, "invalid line command in the preprocessed source", .{});
if (options.preprocess == .no) { if (options.preprocess == .no) {
try error_handler.emitMessage(allocator, .note, "line commands must be of the format: #line <num> \"<path>\"", .{}); try error_handler.emitMessage(gpa, .note, "line commands must be of the format: #line <num> \"<path>\"", .{});
} else { } else {
try error_handler.emitMessage(allocator, .note, "this is likely to be a bug, please report it", .{}); try error_handler.emitMessage(gpa, .note, "this is likely to be a bug, please report it", .{});
} }
std.process.exit(1); std.process.exit(1);
}, },
error.LineNumberOverflow => { error.LineNumberOverflow => {
// TODO: Better error message // TODO: Better error message
try error_handler.emitMessage(allocator, .err, "line number count exceeded maximum of {}", .{std.math.maxInt(usize)}); try error_handler.emitMessage(gpa, .err, "line number count exceeded maximum of {}", .{std.math.maxInt(usize)});
std.process.exit(1); std.process.exit(1);
}, },
error.OutOfMemory => |e| return e, error.OutOfMemory => |e| return e,
}; };
defer mapping_results.mappings.deinit(allocator); defer mapping_results.mappings.deinit(gpa);
const default_code_page = options.default_code_page orelse .windows1252; const default_code_page = options.default_code_page orelse .windows1252;
const has_disjoint_code_page = hasDisjointCodePage(mapping_results.result, &mapping_results.mappings, default_code_page); const has_disjoint_code_page = hasDisjointCodePage(mapping_results.result, &mapping_results.mappings, default_code_page);
const final_input = try removeComments(mapping_results.result, mapping_results.result, &mapping_results.mappings); const final_input = try removeComments(mapping_results.result, mapping_results.result, &mapping_results.mappings);
var diagnostics = Diagnostics.init(allocator); var diagnostics = Diagnostics.init(gpa, io);
defer diagnostics.deinit(); defer diagnostics.deinit();
var output_buffer: [4096]u8 = undefined; var output_buffer: [4096]u8 = undefined;
var res_stream_writer = res_stream.source.writer(allocator, &output_buffer); var res_stream_writer = res_stream.source.writer(gpa, &output_buffer);
defer res_stream_writer.deinit(&res_stream.source); defer res_stream_writer.deinit(&res_stream.source);
const output_buffered_stream = res_stream_writer.interface(); const output_buffered_stream = res_stream_writer.interface();
compile(allocator, final_input, output_buffered_stream, .{ compile(gpa, io, final_input, output_buffered_stream, .{
.cwd = std.fs.cwd(), .cwd = std.fs.cwd(),
.diagnostics = &diagnostics, .diagnostics = &diagnostics,
.source_mappings = &mapping_results.mappings, .source_mappings = &mapping_results.mappings,
@ -287,7 +295,7 @@ pub fn main() !void {
.warn_instead_of_error_on_invalid_code_page = options.warn_instead_of_error_on_invalid_code_page, .warn_instead_of_error_on_invalid_code_page = options.warn_instead_of_error_on_invalid_code_page,
}) catch |err| switch (err) { }) catch |err| switch (err) {
error.ParseError, error.CompileError => { error.ParseError, error.CompileError => {
try error_handler.emitDiagnostics(allocator, std.fs.cwd(), final_input, &diagnostics, mapping_results.mappings); try error_handler.emitDiagnostics(gpa, std.fs.cwd(), final_input, &diagnostics, mapping_results.mappings);
// Delete the output file on error // Delete the output file on error
res_stream.cleanupAfterError(); res_stream.cleanupAfterError();
std.process.exit(1); std.process.exit(1);
@ -305,7 +313,7 @@ pub fn main() !void {
// write the depfile // write the depfile
if (options.depfile_path) |depfile_path| { if (options.depfile_path) |depfile_path| {
var depfile = std.fs.cwd().createFile(depfile_path, .{}) catch |err| { var depfile = std.fs.cwd().createFile(depfile_path, .{}) catch |err| {
try error_handler.emitMessage(allocator, .err, "unable to create depfile '{s}': {s}", .{ depfile_path, @errorName(err) }); try error_handler.emitMessage(gpa, .err, "unable to create depfile '{s}': {s}", .{ depfile_path, @errorName(err) });
std.process.exit(1); std.process.exit(1);
}; };
defer depfile.close(); defer depfile.close();
@ -332,41 +340,41 @@ pub fn main() !void {
if (options.output_format != .coff) return; if (options.output_format != .coff) return;
break :res_data res_stream.source.readAll(allocator) catch |err| { break :res_data res_stream.source.readAll(gpa, io) catch |err| {
try error_handler.emitMessage(allocator, .err, "unable to read res from '{s}': {s}", .{ res_stream.name, @errorName(err) }); try error_handler.emitMessage(gpa, .err, "unable to read res from '{s}': {s}", .{ res_stream.name, @errorName(err) });
std.process.exit(1); std.process.exit(1);
}; };
}; };
// No need to keep the res_data around after parsing the resources from it // No need to keep the res_data around after parsing the resources from it
defer res_data.deinit(allocator); defer res_data.deinit(gpa);
std.debug.assert(options.output_format == .coff); std.debug.assert(options.output_format == .coff);
// TODO: Maybe use a buffered file reader instead of reading file into memory -> fbs // TODO: Maybe use a buffered file reader instead of reading file into memory -> fbs
var res_reader: std.Io.Reader = .fixed(res_data.bytes); var res_reader: std.Io.Reader = .fixed(res_data.bytes);
break :resources cvtres.parseRes(allocator, &res_reader, .{ .max_size = res_data.bytes.len }) catch |err| { break :resources cvtres.parseRes(gpa, &res_reader, .{ .max_size = res_data.bytes.len }) catch |err| {
// TODO: Better errors // TODO: Better errors
try error_handler.emitMessage(allocator, .err, "unable to parse res from '{s}': {s}", .{ res_stream.name, @errorName(err) }); try error_handler.emitMessage(gpa, .err, "unable to parse res from '{s}': {s}", .{ res_stream.name, @errorName(err) });
std.process.exit(1); std.process.exit(1);
}; };
}; };
defer resources.deinit(); defer resources.deinit();
var coff_stream = IoStream.fromIoSource(options.output_source, .output) catch |err| { var coff_stream = IoStream.fromIoSource(options.output_source, .output) catch |err| {
try error_handler.emitMessage(allocator, .err, "unable to create output file '{s}': {s}", .{ options.output_source.filename, @errorName(err) }); try error_handler.emitMessage(gpa, .err, "unable to create output file '{s}': {s}", .{ options.output_source.filename, @errorName(err) });
std.process.exit(1); std.process.exit(1);
}; };
defer coff_stream.deinit(allocator); defer coff_stream.deinit(gpa);
var coff_output_buffer: [4096]u8 = undefined; var coff_output_buffer: [4096]u8 = undefined;
var coff_output_buffered_stream = coff_stream.source.writer(allocator, &coff_output_buffer); var coff_output_buffered_stream = coff_stream.source.writer(gpa, &coff_output_buffer);
var cvtres_diagnostics: cvtres.Diagnostics = .{ .none = {} }; var cvtres_diagnostics: cvtres.Diagnostics = .{ .none = {} };
cvtres.writeCoff(allocator, coff_output_buffered_stream.interface(), resources.list.items, options.coff_options, &cvtres_diagnostics) catch |err| { cvtres.writeCoff(gpa, coff_output_buffered_stream.interface(), resources.list.items, options.coff_options, &cvtres_diagnostics) catch |err| {
switch (err) { switch (err) {
error.DuplicateResource => { error.DuplicateResource => {
const duplicate_resource = resources.list.items[cvtres_diagnostics.duplicate_resource]; const duplicate_resource = resources.list.items[cvtres_diagnostics.duplicate_resource];
try error_handler.emitMessage(allocator, .err, "duplicate resource [id: {f}, type: {f}, language: {f}]", .{ try error_handler.emitMessage(gpa, .err, "duplicate resource [id: {f}, type: {f}, language: {f}]", .{
duplicate_resource.name_value, duplicate_resource.name_value,
fmtResourceType(duplicate_resource.type_value), fmtResourceType(duplicate_resource.type_value),
duplicate_resource.language, duplicate_resource.language,
@ -374,8 +382,8 @@ pub fn main() !void {
}, },
error.ResourceDataTooLong => { error.ResourceDataTooLong => {
const overflow_resource = resources.list.items[cvtres_diagnostics.duplicate_resource]; const overflow_resource = resources.list.items[cvtres_diagnostics.duplicate_resource];
try error_handler.emitMessage(allocator, .err, "resource has a data length that is too large to be written into a coff section", .{}); try error_handler.emitMessage(gpa, .err, "resource has a data length that is too large to be written into a coff section", .{});
try error_handler.emitMessage(allocator, .note, "the resource with the invalid size is [id: {f}, type: {f}, language: {f}]", .{ try error_handler.emitMessage(gpa, .note, "the resource with the invalid size is [id: {f}, type: {f}, language: {f}]", .{
overflow_resource.name_value, overflow_resource.name_value,
fmtResourceType(overflow_resource.type_value), fmtResourceType(overflow_resource.type_value),
overflow_resource.language, overflow_resource.language,
@ -383,15 +391,15 @@ pub fn main() !void {
}, },
error.TotalResourceDataTooLong => { error.TotalResourceDataTooLong => {
const overflow_resource = resources.list.items[cvtres_diagnostics.duplicate_resource]; const overflow_resource = resources.list.items[cvtres_diagnostics.duplicate_resource];
try error_handler.emitMessage(allocator, .err, "total resource data exceeds the maximum of the coff 'size of raw data' field", .{}); try error_handler.emitMessage(gpa, .err, "total resource data exceeds the maximum of the coff 'size of raw data' field", .{});
try error_handler.emitMessage(allocator, .note, "size overflow occurred when attempting to write this resource: [id: {f}, type: {f}, language: {f}]", .{ try error_handler.emitMessage(gpa, .note, "size overflow occurred when attempting to write this resource: [id: {f}, type: {f}, language: {f}]", .{
overflow_resource.name_value, overflow_resource.name_value,
fmtResourceType(overflow_resource.type_value), fmtResourceType(overflow_resource.type_value),
overflow_resource.language, overflow_resource.language,
}); });
}, },
else => { else => {
try error_handler.emitMessage(allocator, .err, "unable to write coff output file '{s}': {s}", .{ coff_stream.name, @errorName(err) }); try error_handler.emitMessage(gpa, .err, "unable to write coff output file '{s}': {s}", .{ coff_stream.name, @errorName(err) });
}, },
} }
// Delete the output file on error // Delete the output file on error
@ -423,7 +431,7 @@ const IoStream = struct {
}; };
} }
pub fn deinit(self: *IoStream, allocator: std.mem.Allocator) void { pub fn deinit(self: *IoStream, allocator: Allocator) void {
self.source.deinit(allocator); self.source.deinit(allocator);
} }
@ -458,7 +466,7 @@ const IoStream = struct {
} }
} }
pub fn deinit(self: *Source, allocator: std.mem.Allocator) void { pub fn deinit(self: *Source, allocator: Allocator) void {
switch (self.*) { switch (self.*) {
.file => |file| file.close(), .file => |file| file.close(),
.stdio => {}, .stdio => {},
@ -471,18 +479,18 @@ const IoStream = struct {
bytes: []const u8, bytes: []const u8,
needs_free: bool, needs_free: bool,
pub fn deinit(self: Data, allocator: std.mem.Allocator) void { pub fn deinit(self: Data, allocator: Allocator) void {
if (self.needs_free) { if (self.needs_free) {
allocator.free(self.bytes); allocator.free(self.bytes);
} }
} }
}; };
pub fn readAll(self: Source, allocator: std.mem.Allocator) !Data { pub fn readAll(self: Source, allocator: Allocator, io: Io) !Data {
return switch (self) { return switch (self) {
inline .file, .stdio => |file| .{ inline .file, .stdio => |file| .{
.bytes = b: { .bytes = b: {
var file_reader = file.reader(&.{}); var file_reader = file.reader(io, &.{});
break :b try file_reader.interface.allocRemaining(allocator, .unlimited); break :b try file_reader.interface.allocRemaining(allocator, .unlimited);
}, },
.needs_free = true, .needs_free = true,
@ -496,7 +504,7 @@ const IoStream = struct {
file: std.fs.File.Writer, file: std.fs.File.Writer,
allocating: std.Io.Writer.Allocating, allocating: std.Io.Writer.Allocating,
pub const Error = std.mem.Allocator.Error || std.fs.File.WriteError; pub const Error = Allocator.Error || std.fs.File.WriteError;
pub fn interface(this: *@This()) *std.Io.Writer { pub fn interface(this: *@This()) *std.Io.Writer {
return switch (this.*) { return switch (this.*) {
@ -514,7 +522,7 @@ const IoStream = struct {
} }
}; };
pub fn writer(source: *Source, allocator: std.mem.Allocator, buffer: []u8) Writer { pub fn writer(source: *Source, allocator: Allocator, buffer: []u8) Writer {
return switch (source.*) { return switch (source.*) {
.file, .stdio => |file| .{ .file = file.writer(buffer) }, .file, .stdio => |file| .{ .file = file.writer(buffer) },
.memory => |*list| .{ .allocating = .fromArrayList(allocator, list) }, .memory => |*list| .{ .allocating = .fromArrayList(allocator, list) },
@ -525,17 +533,20 @@ const IoStream = struct {
}; };
const LazyIncludePaths = struct { const LazyIncludePaths = struct {
arena: std.mem.Allocator, arena: Allocator,
io: Io,
auto_includes_option: cli.Options.AutoIncludes, auto_includes_option: cli.Options.AutoIncludes,
zig_lib_dir: []const u8, zig_lib_dir: []const u8,
target_machine_type: std.coff.IMAGE.FILE.MACHINE, target_machine_type: std.coff.IMAGE.FILE.MACHINE,
resolved_include_paths: ?[]const []const u8 = null, resolved_include_paths: ?[]const []const u8 = null,
pub fn get(self: *LazyIncludePaths, error_handler: *ErrorHandler) ![]const []const u8 { pub fn get(self: *LazyIncludePaths, error_handler: *ErrorHandler) ![]const []const u8 {
const io = self.io;
if (self.resolved_include_paths) |include_paths| if (self.resolved_include_paths) |include_paths|
return include_paths; return include_paths;
return getIncludePaths(self.arena, self.auto_includes_option, self.zig_lib_dir, self.target_machine_type) catch |err| switch (err) { return getIncludePaths(self.arena, io, self.auto_includes_option, self.zig_lib_dir, self.target_machine_type) catch |err| switch (err) {
error.OutOfMemory => |e| return e, error.OutOfMemory => |e| return e,
else => |e| { else => |e| {
switch (e) { switch (e) {
@ -556,7 +567,13 @@ const LazyIncludePaths = struct {
} }
}; };
fn getIncludePaths(arena: std.mem.Allocator, auto_includes_option: cli.Options.AutoIncludes, zig_lib_dir: []const u8, target_machine_type: std.coff.IMAGE.FILE.MACHINE) ![]const []const u8 { fn getIncludePaths(
arena: Allocator,
io: Io,
auto_includes_option: cli.Options.AutoIncludes,
zig_lib_dir: []const u8,
target_machine_type: std.coff.IMAGE.FILE.MACHINE,
) ![]const []const u8 {
if (auto_includes_option == .none) return &[_][]const u8{}; if (auto_includes_option == .none) return &[_][]const u8{};
const includes_arch: std.Target.Cpu.Arch = switch (target_machine_type) { const includes_arch: std.Target.Cpu.Arch = switch (target_machine_type) {
@ -626,7 +643,7 @@ fn getIncludePaths(arena: std.mem.Allocator, auto_includes_option: cli.Options.A
.cpu_arch = includes_arch, .cpu_arch = includes_arch,
.abi = .gnu, .abi = .gnu,
}; };
const target = std.zig.resolveTargetQueryOrFatal(target_query); const target = std.zig.resolveTargetQueryOrFatal(io, target_query);
const is_native_abi = target_query.isNativeAbi(); const is_native_abi = target_query.isNativeAbi();
const detected_libc = std.zig.LibCDirs.detect(arena, zig_lib_dir, &target, is_native_abi, true, null) catch |err| switch (err) { const detected_libc = std.zig.LibCDirs.detect(arena, zig_lib_dir, &target, is_native_abi, true, null) catch |err| switch (err) {
error.OutOfMemory => |e| return e, error.OutOfMemory => |e| return e,
@ -647,7 +664,7 @@ const ErrorHandler = union(enum) {
pub fn emitCliDiagnostics( pub fn emitCliDiagnostics(
self: *ErrorHandler, self: *ErrorHandler,
allocator: std.mem.Allocator, allocator: Allocator,
args: []const []const u8, args: []const []const u8,
diagnostics: *cli.Diagnostics, diagnostics: *cli.Diagnostics,
) !void { ) !void {
@ -666,7 +683,7 @@ const ErrorHandler = union(enum) {
pub fn emitAroDiagnostics( pub fn emitAroDiagnostics(
self: *ErrorHandler, self: *ErrorHandler,
allocator: std.mem.Allocator, allocator: Allocator,
fail_msg: []const u8, fail_msg: []const u8,
comp: *aro.Compilation, comp: *aro.Compilation,
) !void { ) !void {
@ -692,7 +709,7 @@ const ErrorHandler = union(enum) {
pub fn emitDiagnostics( pub fn emitDiagnostics(
self: *ErrorHandler, self: *ErrorHandler,
allocator: std.mem.Allocator, allocator: Allocator,
cwd: std.fs.Dir, cwd: std.fs.Dir,
source: []const u8, source: []const u8,
diagnostics: *Diagnostics, diagnostics: *Diagnostics,
@ -713,7 +730,7 @@ const ErrorHandler = union(enum) {
pub fn emitMessage( pub fn emitMessage(
self: *ErrorHandler, self: *ErrorHandler,
allocator: std.mem.Allocator, allocator: Allocator,
msg_type: @import("utils.zig").ErrorMessageType, msg_type: @import("utils.zig").ErrorMessageType,
comptime format: []const u8, comptime format: []const u8,
args: anytype, args: anytype,
@ -738,7 +755,7 @@ const ErrorHandler = union(enum) {
}; };
fn cliDiagnosticsToErrorBundle( fn cliDiagnosticsToErrorBundle(
gpa: std.mem.Allocator, gpa: Allocator,
diagnostics: *cli.Diagnostics, diagnostics: *cli.Diagnostics,
) !ErrorBundle { ) !ErrorBundle {
@branchHint(.cold); @branchHint(.cold);
@ -783,7 +800,7 @@ fn cliDiagnosticsToErrorBundle(
} }
fn diagnosticsToErrorBundle( fn diagnosticsToErrorBundle(
gpa: std.mem.Allocator, gpa: Allocator,
source: []const u8, source: []const u8,
diagnostics: *Diagnostics, diagnostics: *Diagnostics,
mappings: SourceMappings, mappings: SourceMappings,
@ -870,7 +887,7 @@ fn diagnosticsToErrorBundle(
return try bundle.toOwnedBundle(""); return try bundle.toOwnedBundle("");
} }
fn errorStringToErrorBundle(allocator: std.mem.Allocator, comptime format: []const u8, args: anytype) !ErrorBundle { fn errorStringToErrorBundle(allocator: Allocator, comptime format: []const u8, args: anytype) !ErrorBundle {
@branchHint(.cold); @branchHint(.cold);
var bundle: ErrorBundle.Wip = undefined; var bundle: ErrorBundle.Wip = undefined;
try bundle.init(allocator); try bundle.init(allocator);

View file

@ -26,7 +26,11 @@ pub const UncheckedSliceWriter = struct {
/// Cross-platform 'std.fs.Dir.openFile' wrapper that will always return IsDir if /// Cross-platform 'std.fs.Dir.openFile' wrapper that will always return IsDir if
/// a directory is attempted to be opened. /// a directory is attempted to be opened.
/// TODO: Remove once https://github.com/ziglang/zig/issues/5732 is addressed. /// TODO: Remove once https://github.com/ziglang/zig/issues/5732 is addressed.
pub fn openFileNotDir(cwd: std.fs.Dir, path: []const u8, flags: std.fs.File.OpenFlags) std.fs.File.OpenError!std.fs.File { pub fn openFileNotDir(
cwd: std.fs.Dir,
path: []const u8,
flags: std.fs.File.OpenFlags,
) (std.fs.File.OpenError || std.fs.File.StatError)!std.fs.File {
const file = try cwd.openFile(path, flags); const file = try cwd.openFile(path, flags);
errdefer file.close(); errdefer file.close();
// https://github.com/ziglang/zig/issues/5732 // https://github.com/ziglang/zig/issues/5732