mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-06 05:44:20 +00:00
compiler: update a bunch of format strings
This commit is contained in:
parent
d09b99d043
commit
30c2921eb8
57 changed files with 432 additions and 514 deletions
13
lib/compiler/aro/aro/Diagnostics.zig
vendored
13
lib/compiler/aro/aro/Diagnostics.zig
vendored
|
|
@ -324,7 +324,8 @@ pub fn addExtra(
|
|||
|
||||
pub fn render(comp: *Compilation, config: std.io.tty.Config) void {
|
||||
if (comp.diagnostics.list.items.len == 0) return;
|
||||
var m = defaultMsgWriter(config);
|
||||
var buffer: [1000]u8 = undefined;
|
||||
var m = defaultMsgWriter(config, &buffer);
|
||||
defer m.deinit();
|
||||
renderMessages(comp, &m);
|
||||
}
|
||||
|
|
@ -525,12 +526,12 @@ fn tagKind(d: *Diagnostics, tag: Tag, langopts: LangOpts) Kind {
|
|||
}
|
||||
|
||||
const MsgWriter = struct {
|
||||
w: *std.fs.File.Writer,
|
||||
writer: *std.io.Writer,
|
||||
config: std.io.tty.Config,
|
||||
|
||||
fn init(config: std.io.tty.Config, buffer: []u8) MsgWriter {
|
||||
return .{
|
||||
.w = std.debug.lockStderrWriter(buffer),
|
||||
.writer = std.debug.lockStderrWriter(buffer),
|
||||
.config = config,
|
||||
};
|
||||
}
|
||||
|
|
@ -541,15 +542,15 @@ const MsgWriter = struct {
|
|||
}
|
||||
|
||||
pub fn print(m: *MsgWriter, comptime fmt: []const u8, args: anytype) void {
|
||||
m.w.interface.print(fmt, args) catch {};
|
||||
m.writer.print(fmt, args) catch {};
|
||||
}
|
||||
|
||||
fn write(m: *MsgWriter, msg: []const u8) void {
|
||||
m.w.interface.writeAll(msg) catch {};
|
||||
m.writer.writeAll(msg) catch {};
|
||||
}
|
||||
|
||||
fn setColor(m: *MsgWriter, color: std.io.tty.Color) void {
|
||||
m.config.setColor(m.w.interface, color) catch {};
|
||||
m.config.setColor(m.writer, color) catch {};
|
||||
}
|
||||
|
||||
fn location(m: *MsgWriter, path: []const u8, line: u32, col: u32) void {
|
||||
|
|
|
|||
3
lib/compiler/aro/aro/Value.zig
vendored
3
lib/compiler/aro/aro/Value.zig
vendored
|
|
@ -961,7 +961,8 @@ pub fn print(v: Value, ty: Type, comp: *const Compilation, w: anytype) @TypeOf(w
|
|||
switch (key) {
|
||||
.null => return w.writeAll("nullptr_t"),
|
||||
.int => |repr| switch (repr) {
|
||||
inline else => |x| return w.print("{fd}", .{x}),
|
||||
inline .u64, .i64 => |x| return w.print("{d}", .{x}),
|
||||
.big_int => |x| return w.print("{fd}", .{x}),
|
||||
},
|
||||
.float => |repr| switch (repr) {
|
||||
.f16 => |x| return w.print("{d}", .{@round(@as(f64, @floatCast(x)) * 1000) / 1000}),
|
||||
|
|
|
|||
|
|
@ -881,6 +881,10 @@ pub fn printValue(
|
|||
return;
|
||||
},
|
||||
.@"union" => |info| {
|
||||
if (fmt.len == 1 and fmt[0] == 's') {
|
||||
try w.writeAll(@tagName(value));
|
||||
return;
|
||||
}
|
||||
if (!is_any) {
|
||||
if (fmt.len != 0) invalidFmtError(fmt, value);
|
||||
return printValue(w, ANY, options, value, max_depth);
|
||||
|
|
|
|||
|
|
@ -11305,13 +11305,7 @@ fn failWithStrLitError(
|
|||
offset: u32,
|
||||
) InnerError {
|
||||
const raw_string = bytes[offset..];
|
||||
return failOff(
|
||||
astgen,
|
||||
token,
|
||||
@intCast(offset + err.offset()),
|
||||
"{}",
|
||||
.{err.fmt(raw_string)},
|
||||
);
|
||||
return failOff(astgen, token, @intCast(offset + err.offset()), "{f}", .{err.fmt(raw_string)});
|
||||
}
|
||||
|
||||
fn failNode(
|
||||
|
|
|
|||
|
|
@ -1155,8 +1155,11 @@ pub const Attribute = union(Kind) {
|
|||
const FormatData = struct {
|
||||
attribute_index: Index,
|
||||
builder: *const Builder,
|
||||
mode: Mode,
|
||||
const Mode = enum { default, quote, pound };
|
||||
flags: Flags = .{},
|
||||
const Flags = struct {
|
||||
pound: bool = false,
|
||||
quote: bool = false,
|
||||
};
|
||||
};
|
||||
fn format(data: FormatData, w: *Writer) Writer.Error!void {
|
||||
const attribute = data.attribute_index.toAttribute(data.builder);
|
||||
|
|
@ -1262,11 +1265,12 @@ pub const Attribute = union(Kind) {
|
|||
try w.writeByte(')');
|
||||
},
|
||||
.alignstack => |alignment| {
|
||||
try w.print(" {f}", .{attribute});
|
||||
try w.print(" {s}", .{attribute});
|
||||
const alignment_bytes = alignment.toByteUnits() orelse return;
|
||||
switch (data.mode) {
|
||||
.pound => try w.print("({d})", .{alignment_bytes}),
|
||||
else => try w.print("={d}", .{alignment_bytes}),
|
||||
if (data.flags.pound) {
|
||||
try w.print("={d}", .{alignment_bytes});
|
||||
} else {
|
||||
try w.print("({d})", .{alignment_bytes});
|
||||
}
|
||||
},
|
||||
.allockind => |allockind| {
|
||||
|
|
@ -1313,7 +1317,7 @@ pub const Attribute = union(Kind) {
|
|||
vscale_range.min.toByteUnits().?,
|
||||
vscale_range.max.toByteUnits() orelse 0,
|
||||
}),
|
||||
.string => |string_attr| if (data.mode == .quote) {
|
||||
.string => |string_attr| if (data.flags.quote) {
|
||||
try w.print(" {f}", .{string_attr.kind.fmtQ(data.builder)});
|
||||
if (string_attr.value != .empty)
|
||||
try w.print("={f}", .{string_attr.value.fmtQ(data.builder)});
|
||||
|
|
@ -1595,16 +1599,18 @@ pub const Attributes = enum(u32) {
|
|||
const FormatData = struct {
|
||||
attributes: Attributes,
|
||||
builder: *const Builder,
|
||||
flags: Flags = .{},
|
||||
const Flags = Attribute.Index.FormatData.Flags;
|
||||
};
|
||||
fn format(data: FormatData, w: *Writer) Writer.Error!void {
|
||||
for (data.attributes.slice(data.builder)) |attribute_index| try Attribute.Index.format(.{
|
||||
.attribute_index = attribute_index,
|
||||
.builder = data.builder,
|
||||
.mode = .default,
|
||||
.flags = data.flags,
|
||||
}, w);
|
||||
}
|
||||
pub fn fmt(self: Attributes, builder: *const Builder) std.fmt.Formatter(FormatData, format) {
|
||||
return .{ .data = .{ .attributes = self, .builder = builder } };
|
||||
pub fn fmt(self: Attributes, builder: *const Builder, flags: FormatData.Flags) std.fmt.Formatter(FormatData, format) {
|
||||
return .{ .data = .{ .attributes = self, .builder = builder, .flags = flags } };
|
||||
}
|
||||
};
|
||||
|
||||
|
|
@ -1808,7 +1814,8 @@ pub const Preemption = enum {
|
|||
dso_local,
|
||||
implicit_dso_local,
|
||||
|
||||
pub fn format(self: Preemption, w: *Writer, comptime _: []const u8) Writer.Error!void {
|
||||
pub fn format(self: Preemption, w: *Writer, comptime f: []const u8) Writer.Error!void {
|
||||
comptime assert(f.len == 0);
|
||||
if (self == .dso_local) try w.print(" {s}", .{@tagName(self)});
|
||||
}
|
||||
};
|
||||
|
|
@ -1826,8 +1833,8 @@ pub const Visibility = enum(u2) {
|
|||
};
|
||||
}
|
||||
|
||||
pub fn format(self: Visibility, comptime format_string: []const u8, writer: *Writer) Writer.Error!void {
|
||||
comptime assert(format_string.len == 0);
|
||||
pub fn format(self: Visibility, writer: *Writer, comptime f: []const u8) Writer.Error!void {
|
||||
comptime assert(f.len == 0);
|
||||
if (self != .default) try writer.print(" {s}", .{@tagName(self)});
|
||||
}
|
||||
};
|
||||
|
|
@ -1837,7 +1844,8 @@ pub const DllStorageClass = enum(u2) {
|
|||
dllimport = 1,
|
||||
dllexport = 2,
|
||||
|
||||
pub fn format(self: DllStorageClass, w: *Writer, comptime _: []const u8) Writer.Error!void {
|
||||
pub fn format(self: DllStorageClass, w: *Writer, comptime f: []const u8) Writer.Error!void {
|
||||
comptime assert(f.len == 0);
|
||||
if (self != .default) try w.print(" {s}", .{@tagName(self)});
|
||||
}
|
||||
};
|
||||
|
|
@ -1863,7 +1871,8 @@ pub const UnnamedAddr = enum(u2) {
|
|||
unnamed_addr = 1,
|
||||
local_unnamed_addr = 2,
|
||||
|
||||
pub fn format(self: UnnamedAddr, w: *Writer, comptime _: []const u8) Writer.Error!void {
|
||||
pub fn format(self: UnnamedAddr, w: *Writer, comptime f: []const u8) Writer.Error!void {
|
||||
comptime assert(f.len == 0);
|
||||
if (self != .default) try w.print(" {s}", .{@tagName(self)});
|
||||
}
|
||||
};
|
||||
|
|
@ -1966,7 +1975,8 @@ pub const ExternallyInitialized = enum {
|
|||
default,
|
||||
externally_initialized,
|
||||
|
||||
pub fn format(self: ExternallyInitialized, w: *Writer, comptime _: []const u8) Writer.Error!void {
|
||||
pub fn format(self: ExternallyInitialized, w: *Writer, comptime f: []const u8) Writer.Error!void {
|
||||
comptime assert(f.len == 0);
|
||||
if (self != .default) try w.print(" {s}", .{@tagName(self)});
|
||||
}
|
||||
};
|
||||
|
|
@ -2064,7 +2074,8 @@ pub const CallConv = enum(u10) {
|
|||
|
||||
pub const default = CallConv.ccc;
|
||||
|
||||
pub fn format(self: CallConv, w: *Writer, comptime _: []const u8) Writer.Error!void {
|
||||
pub fn format(self: CallConv, w: *Writer, comptime f: []const u8) Writer.Error!void {
|
||||
comptime assert(f.len == 0);
|
||||
switch (self) {
|
||||
default => {},
|
||||
.fastcc,
|
||||
|
|
@ -7958,7 +7969,8 @@ pub const Metadata = enum(u32) {
|
|||
AllCallsDescribed: bool = false,
|
||||
Unused: u2 = 0,
|
||||
|
||||
pub fn format(self: DIFlags, w: *Writer, comptime _: []const u8) Writer.Error!void {
|
||||
pub fn format(self: DIFlags, w: *Writer, comptime f: []const u8) Writer.Error!void {
|
||||
comptime assert(f.len == 0);
|
||||
var need_pipe = false;
|
||||
inline for (@typeInfo(DIFlags).@"struct".fields) |field| {
|
||||
switch (@typeInfo(field.type)) {
|
||||
|
|
@ -8015,7 +8027,8 @@ pub const Metadata = enum(u32) {
|
|||
ObjCDirect: bool = false,
|
||||
Unused: u20 = 0,
|
||||
|
||||
pub fn format(self: DISPFlags, w: *Writer, comptime _: []const u8) Writer.Error!void {
|
||||
pub fn format(self: DISPFlags, w: *Writer, comptime f: []const u8) Writer.Error!void {
|
||||
comptime assert(f.len == 0);
|
||||
var need_pipe = false;
|
||||
inline for (@typeInfo(DISPFlags).@"struct".fields) |field| {
|
||||
switch (@typeInfo(field.type)) {
|
||||
|
|
@ -9469,8 +9482,9 @@ pub fn print(self: *Builder, w: *Writer) (Writer.Error || Allocator.Error)!void
|
|||
\\
|
||||
, .{
|
||||
variable.global.fmt(self),
|
||||
Linkage.fmtOptional(if (global.linkage == .external and
|
||||
variable.init != .no_init) null else global.linkage),
|
||||
Linkage.fmtOptional(
|
||||
if (global.linkage == .external and variable.init != .no_init) null else global.linkage,
|
||||
),
|
||||
global.preemption,
|
||||
global.visibility,
|
||||
global.dll_storage_class,
|
||||
|
|
@ -9525,7 +9539,7 @@ pub fn print(self: *Builder, w: *Writer) (Writer.Error || Allocator.Error)!void
|
|||
if (function_attributes != .none) try w.print(
|
||||
\\; Function Attrs:{f}
|
||||
\\
|
||||
, .{function_attributes.fmt(self)});
|
||||
, .{function_attributes.fmt(self, .{})});
|
||||
try w.print(
|
||||
\\{s}{f}{f}{f}{f}{f}{f} {f} {f}(
|
||||
, .{
|
||||
|
|
@ -9535,7 +9549,7 @@ pub fn print(self: *Builder, w: *Writer) (Writer.Error || Allocator.Error)!void
|
|||
global.visibility,
|
||||
global.dll_storage_class,
|
||||
function.call_conv,
|
||||
function.attributes.ret(self).fmt(self),
|
||||
function.attributes.ret(self).fmt(self, .{}),
|
||||
global.type.functionReturn(self).fmt(self, .percent),
|
||||
function.global.fmt(self),
|
||||
});
|
||||
|
|
@ -9545,7 +9559,7 @@ pub fn print(self: *Builder, w: *Writer) (Writer.Error || Allocator.Error)!void
|
|||
\\{f}{f}
|
||||
, .{
|
||||
global.type.functionParameters(self)[arg].fmt(self, .percent),
|
||||
function.attributes.param(arg, self).fmt(self),
|
||||
function.attributes.param(arg, self).fmt(self, .{}),
|
||||
});
|
||||
if (function.instructions.len > 0)
|
||||
try w.print(" {f}", .{function.arg(@intCast(arg)).fmt(function_index, self, .{})})
|
||||
|
|
@ -9790,7 +9804,7 @@ pub fn print(self: *Builder, w: *Writer) (Writer.Error || Allocator.Error)!void
|
|||
try w.print("{s}{f}{f}{f} {f} {f}(", .{
|
||||
@tagName(tag),
|
||||
extra.data.info.call_conv,
|
||||
extra.data.attributes.ret(self).fmt(self),
|
||||
extra.data.attributes.ret(self).fmt(self, .{}),
|
||||
extra.data.callee.typeOf(function_index, self).pointerAddrSpace(self),
|
||||
switch (extra.data.ty.functionKind(self)) {
|
||||
.normal => ret_ty,
|
||||
|
|
@ -9804,7 +9818,7 @@ pub fn print(self: *Builder, w: *Writer) (Writer.Error || Allocator.Error)!void
|
|||
defer metadata_formatter.need_comma = undefined;
|
||||
try w.print("{f}{f}{f}", .{
|
||||
arg.typeOf(function_index, self).fmt(self, .percent),
|
||||
extra.data.attributes.param(arg_index, self).fmt(self),
|
||||
extra.data.attributes.param(arg_index, self).fmt(self, .{}),
|
||||
try metadata_formatter.fmtLocal(" ", arg, function_index),
|
||||
});
|
||||
}
|
||||
|
|
@ -10074,9 +10088,9 @@ pub fn print(self: *Builder, w: *Writer) (Writer.Error || Allocator.Error)!void
|
|||
if (need_newline) try w.writeByte('\n') else need_newline = true;
|
||||
for (0.., attribute_groups.keys()) |attribute_group_index, attribute_group|
|
||||
try w.print(
|
||||
\\attributes #{d} = {{{f#"} }}
|
||||
\\attributes #{d} = {{{f} }}
|
||||
\\
|
||||
, .{ attribute_group_index, attribute_group.fmt(self) });
|
||||
, .{ attribute_group_index, attribute_group.fmt(self, .{ .pound = true, .quote = true }) });
|
||||
}
|
||||
|
||||
if (self.metadata_named.count() > 0) {
|
||||
|
|
|
|||
|
|
@ -1323,7 +1323,7 @@ fn analyzeOperands(
|
|||
const mask = @as(Bpi, 1) << @as(OperandInt, @intCast(i));
|
||||
|
||||
if ((try data.live_set.fetchPut(gpa, operand, {})) == null) {
|
||||
log.debug("[{}] %{f}: added %{d} to live set (operand dies here)", .{ pass, @intFromEnum(inst), operand });
|
||||
log.debug("[{}] %{d}: added %{d} to live set (operand dies here)", .{ pass, @intFromEnum(inst), operand });
|
||||
tomb_bits |= mask;
|
||||
}
|
||||
}
|
||||
|
|
@ -2036,7 +2036,8 @@ fn fmtInstSet(set: *const std.AutoHashMapUnmanaged(Air.Inst.Index, void)) FmtIns
|
|||
const FmtInstSet = struct {
|
||||
set: *const std.AutoHashMapUnmanaged(Air.Inst.Index, void),
|
||||
|
||||
pub fn format(val: FmtInstSet, comptime _: []const u8, _: std.fmt.FormatOptions, w: anytype) !void {
|
||||
pub fn format(val: FmtInstSet, w: *std.io.Writer, comptime f: []const u8) std.io.Writer.Error!void {
|
||||
comptime assert(f.len == 0);
|
||||
if (val.set.count() == 0) {
|
||||
try w.writeAll("[no instructions]");
|
||||
return;
|
||||
|
|
@ -2056,7 +2057,8 @@ fn fmtInstList(list: []const Air.Inst.Index) FmtInstList {
|
|||
const FmtInstList = struct {
|
||||
list: []const Air.Inst.Index,
|
||||
|
||||
pub fn format(val: FmtInstList, comptime _: []const u8, _: std.fmt.FormatOptions, w: anytype) !void {
|
||||
pub fn format(val: FmtInstList, w: *std.io.Writer, comptime f: []const u8) std.io.Writer.Error!void {
|
||||
comptime assert(f.len == 0);
|
||||
if (val.list.len == 0) {
|
||||
try w.writeAll("[no instructions]");
|
||||
return;
|
||||
|
|
|
|||
|
|
@ -73,7 +73,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
|
|||
.trap, .unreach => {
|
||||
try self.verifyInstOperands(inst, .{ .none, .none, .none });
|
||||
// This instruction terminates the function, so everything should be dead
|
||||
if (self.live.count() > 0) return invalid("%{}: instructions still alive", .{inst});
|
||||
if (self.live.count() > 0) return invalid("%{f}: instructions still alive", .{inst});
|
||||
},
|
||||
|
||||
// unary
|
||||
|
|
@ -166,7 +166,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
|
|||
const un_op = data[@intFromEnum(inst)].un_op;
|
||||
try self.verifyInstOperands(inst, .{ un_op, .none, .none });
|
||||
// This instruction terminates the function, so everything should be dead
|
||||
if (self.live.count() > 0) return invalid("%{}: instructions still alive", .{inst});
|
||||
if (self.live.count() > 0) return invalid("%{f}: instructions still alive", .{inst});
|
||||
},
|
||||
.dbg_var_ptr,
|
||||
.dbg_var_val,
|
||||
|
|
@ -450,7 +450,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
|
|||
.repeat => {
|
||||
const repeat = data[@intFromEnum(inst)].repeat;
|
||||
const expected_live = self.loops.get(repeat.loop_inst) orelse
|
||||
return invalid("%{}: loop %{} not in scope", .{ @intFromEnum(inst), @intFromEnum(repeat.loop_inst) });
|
||||
return invalid("%{d}: loop %{d} not in scope", .{ @intFromEnum(inst), @intFromEnum(repeat.loop_inst) });
|
||||
|
||||
try self.verifyMatchingLiveness(repeat.loop_inst, expected_live);
|
||||
},
|
||||
|
|
@ -460,7 +460,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
|
|||
try self.verifyOperand(inst, br.operand, self.liveness.operandDies(inst, 0));
|
||||
|
||||
const expected_live = self.loops.get(br.block_inst) orelse
|
||||
return invalid("%{}: loop %{} not in scope", .{ @intFromEnum(inst), @intFromEnum(br.block_inst) });
|
||||
return invalid("%{d}: loop %{d} not in scope", .{ @intFromEnum(inst), @intFromEnum(br.block_inst) });
|
||||
|
||||
try self.verifyMatchingLiveness(br.block_inst, expected_live);
|
||||
},
|
||||
|
|
@ -511,7 +511,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
|
|||
|
||||
// The same stuff should be alive after the loop as before it.
|
||||
const gop = try self.loops.getOrPut(self.gpa, inst);
|
||||
if (gop.found_existing) return invalid("%{}: loop already exists", .{@intFromEnum(inst)});
|
||||
if (gop.found_existing) return invalid("%{d}: loop already exists", .{@intFromEnum(inst)});
|
||||
defer {
|
||||
var live = self.loops.fetchRemove(inst).?;
|
||||
live.value.deinit(self.gpa);
|
||||
|
|
@ -560,7 +560,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
|
|||
// after the loop as before it.
|
||||
{
|
||||
const gop = try self.loops.getOrPut(self.gpa, inst);
|
||||
if (gop.found_existing) return invalid("%{}: loop already exists", .{@intFromEnum(inst)});
|
||||
if (gop.found_existing) return invalid("%{d}: loop already exists", .{@intFromEnum(inst)});
|
||||
gop.value_ptr.* = self.live.move();
|
||||
}
|
||||
defer {
|
||||
|
|
@ -601,9 +601,11 @@ fn verifyOperand(self: *Verify, inst: Air.Inst.Index, op_ref: Air.Inst.Ref, dies
|
|||
return;
|
||||
};
|
||||
if (dies) {
|
||||
if (!self.live.remove(operand)) return invalid("%{}: dead operand %{} reused and killed again", .{ inst, operand });
|
||||
if (!self.live.remove(operand)) return invalid("%{f}: dead operand %{f} reused and killed again", .{
|
||||
inst, operand,
|
||||
});
|
||||
} else {
|
||||
if (!self.live.contains(operand)) return invalid("%{}: dead operand %{} reused", .{ inst, operand });
|
||||
if (!self.live.contains(operand)) return invalid("%{f}: dead operand %{f} reused", .{ inst, operand });
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -628,9 +630,9 @@ fn verifyInst(self: *Verify, inst: Air.Inst.Index) Error!void {
|
|||
}
|
||||
|
||||
fn verifyMatchingLiveness(self: *Verify, block: Air.Inst.Index, live: LiveMap) Error!void {
|
||||
if (self.live.count() != live.count()) return invalid("%{}: different deaths across branches", .{block});
|
||||
if (self.live.count() != live.count()) return invalid("%{f}: different deaths across branches", .{block});
|
||||
var live_it = self.live.keyIterator();
|
||||
while (live_it.next()) |live_inst| if (!live.contains(live_inst.*)) return invalid("%{}: different deaths across branches", .{block});
|
||||
while (live_it.next()) |live_inst| if (!live.contains(live_inst.*)) return invalid("%{f}: different deaths across branches", .{block});
|
||||
}
|
||||
|
||||
fn invalid(comptime fmt: []const u8, args: anytype) error{LivenessInvalid} {
|
||||
|
|
|
|||
|
|
@ -518,7 +518,7 @@ const Writer = struct {
|
|||
if (mask_idx > 0) try s.writeAll(", ");
|
||||
switch (mask_elem.unwrap()) {
|
||||
.elem => |idx| try s.print("elem {d}", .{idx}),
|
||||
.value => |val| try s.print("val {}", .{Value.fromInterned(val).fmtValue(w.pt)}),
|
||||
.value => |val| try s.print("val {f}", .{Value.fromInterned(val).fmtValue(w.pt)}),
|
||||
}
|
||||
}
|
||||
try s.writeByte(']');
|
||||
|
|
@ -590,7 +590,7 @@ const Writer = struct {
|
|||
const ip = &w.pt.zcu.intern_pool;
|
||||
const ty_nav = w.air.instructions.items(.data)[@intFromEnum(inst)].ty_nav;
|
||||
try w.writeType(s, .fromInterned(ty_nav.ty));
|
||||
try s.print(", '{}'", .{ip.getNav(ty_nav.nav).fqn.fmt(ip)});
|
||||
try s.print(", '{f}'", .{ip.getNav(ty_nav.nav).fqn.fmt(ip)});
|
||||
}
|
||||
|
||||
fn writeAtomicLoad(w: *Writer, s: *std.io.Writer, inst: Air.Inst.Index) Error!void {
|
||||
|
|
|
|||
|
|
@ -729,10 +729,10 @@ pub const Directories = struct {
|
|||
};
|
||||
|
||||
if (std.mem.eql(u8, zig_lib.path orelse "", global_cache.path orelse "")) {
|
||||
fatal("zig lib directory '{}' cannot be equal to global cache directory '{}'", .{ zig_lib, global_cache });
|
||||
fatal("zig lib directory '{f}' cannot be equal to global cache directory '{f}'", .{ zig_lib, global_cache });
|
||||
}
|
||||
if (std.mem.eql(u8, zig_lib.path orelse "", local_cache.path orelse "")) {
|
||||
fatal("zig lib directory '{}' cannot be equal to local cache directory '{}'", .{ zig_lib, local_cache });
|
||||
fatal("zig lib directory '{f}' cannot be equal to local cache directory '{f}'", .{ zig_lib, local_cache });
|
||||
}
|
||||
|
||||
return .{
|
||||
|
|
@ -2698,7 +2698,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
|
|||
const prefix = man.cache.prefixes()[pp.prefix];
|
||||
return comp.setMiscFailure(
|
||||
.check_whole_cache,
|
||||
"failed to check cache: '{}{s}' {s} {s}",
|
||||
"failed to check cache: '{f}{s}' {s} {s}",
|
||||
.{ prefix, pp.sub_path, @tagName(man.diagnostic), @errorName(op.err) },
|
||||
);
|
||||
},
|
||||
|
|
@ -2915,7 +2915,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
|
|||
renameTmpIntoCache(comp.dirs.local_cache, tmp_dir_sub_path, o_sub_path) catch |err| {
|
||||
return comp.setMiscFailure(
|
||||
.rename_results,
|
||||
"failed to rename compilation results ('{}{s}') into local cache ('{}{s}'): {s}",
|
||||
"failed to rename compilation results ('{f}{s}') into local cache ('{f}{s}'): {s}",
|
||||
.{
|
||||
comp.dirs.local_cache, tmp_dir_sub_path,
|
||||
comp.dirs.local_cache, o_sub_path,
|
||||
|
|
@ -2982,7 +2982,7 @@ pub fn appendFileSystemInput(comp: *Compilation, path: Compilation.Path) Allocat
|
|||
break @intCast(i);
|
||||
}
|
||||
} else std.debug.panic(
|
||||
"missing prefix directory '{s}' ('{}') for '{s}'",
|
||||
"missing prefix directory '{s}' ('{f}') for '{s}'",
|
||||
.{ @tagName(path.root), want_prefix_dir, path.sub_path },
|
||||
);
|
||||
|
||||
|
|
@ -3321,7 +3321,7 @@ fn emitFromCObject(
|
|||
emit_path.root_dir.handle,
|
||||
emit_path.sub_path,
|
||||
.{},
|
||||
) catch |err| log.err("unable to copy '{}' to '{}': {s}", .{
|
||||
) catch |err| log.err("unable to copy '{f}' to '{f}': {s}", .{
|
||||
src_path,
|
||||
emit_path,
|
||||
@errorName(err),
|
||||
|
|
@ -3669,7 +3669,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
|
|||
.illegal_zig_import => try bundle.addString("this compiler implementation does not allow importing files from this directory"),
|
||||
},
|
||||
.src_loc = try bundle.addSourceLocation(.{
|
||||
.src_path = try bundle.printString("{}", .{file.path.fmt(comp)}),
|
||||
.src_path = try bundle.printString("{f}", .{file.path.fmt(comp)}),
|
||||
.span_start = start,
|
||||
.span_main = start,
|
||||
.span_end = @intCast(end),
|
||||
|
|
@ -3716,7 +3716,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
|
|||
assert(!is_retryable);
|
||||
// AstGen/ZoirGen succeeded with errors. Note that this may include AST errors.
|
||||
_ = try file.getTree(zcu); // Tree must be loaded.
|
||||
const path = try std.fmt.allocPrint(gpa, "{}", .{file.path.fmt(comp)});
|
||||
const path = try std.fmt.allocPrint(gpa, "{f}", .{file.path.fmt(comp)});
|
||||
defer gpa.free(path);
|
||||
if (file.zir != null) {
|
||||
try bundle.addZirErrorMessages(file.zir.?, file.tree.?, file.source.?, path);
|
||||
|
|
@ -3771,9 +3771,8 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
|
|||
if (!refs.contains(anal_unit)) continue;
|
||||
}
|
||||
|
||||
std.log.scoped(.zcu).debug("analysis error '{s}' reported from unit '{}'", .{
|
||||
error_msg.msg,
|
||||
zcu.fmtAnalUnit(anal_unit),
|
||||
std.log.scoped(.zcu).debug("analysis error '{s}' reported from unit '{f}'", .{
|
||||
error_msg.msg, zcu.fmtAnalUnit(anal_unit),
|
||||
});
|
||||
|
||||
try addModuleErrorMsg(zcu, &bundle, error_msg.*, added_any_analysis_error);
|
||||
|
|
@ -3933,9 +3932,9 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
|
|||
// This is a compiler bug.
|
||||
const stderr = std.fs.File.stderr().deprecatedWriter();
|
||||
try stderr.writeAll("referenced transitive analysis errors, but none actually emitted\n");
|
||||
try stderr.print("{} [transitive failure]\n", .{zcu.fmtAnalUnit(failed_unit)});
|
||||
try stderr.print("{f} [transitive failure]\n", .{zcu.fmtAnalUnit(failed_unit)});
|
||||
while (ref) |r| {
|
||||
try stderr.print("referenced by: {}{s}\n", .{
|
||||
try stderr.print("referenced by: {f}{s}\n", .{
|
||||
zcu.fmtAnalUnit(r.referencer),
|
||||
if (zcu.transitive_failed_analysis.contains(r.referencer)) " [transitive failure]" else "",
|
||||
});
|
||||
|
|
@ -4034,7 +4033,7 @@ pub fn addModuleErrorMsg(
|
|||
const err_src_loc = module_err_msg.src_loc.upgrade(zcu);
|
||||
const err_source = err_src_loc.file_scope.getSource(zcu) catch |err| {
|
||||
try eb.addRootErrorMessage(.{
|
||||
.msg = try eb.printString("unable to load '{}': {s}", .{
|
||||
.msg = try eb.printString("unable to load '{f}': {s}", .{
|
||||
err_src_loc.file_scope.path.fmt(zcu.comp), @errorName(err),
|
||||
}),
|
||||
});
|
||||
|
|
@ -4097,7 +4096,7 @@ pub fn addModuleErrorMsg(
|
|||
}
|
||||
|
||||
const src_loc = try eb.addSourceLocation(.{
|
||||
.src_path = try eb.printString("{}", .{err_src_loc.file_scope.path.fmt(zcu.comp)}),
|
||||
.src_path = try eb.printString("{f}", .{err_src_loc.file_scope.path.fmt(zcu.comp)}),
|
||||
.span_start = err_span.start,
|
||||
.span_main = err_span.main,
|
||||
.span_end = err_span.end,
|
||||
|
|
@ -4129,7 +4128,7 @@ pub fn addModuleErrorMsg(
|
|||
const gop = try notes.getOrPutContext(gpa, .{
|
||||
.msg = try eb.addString(module_note.msg),
|
||||
.src_loc = try eb.addSourceLocation(.{
|
||||
.src_path = try eb.printString("{}", .{note_src_loc.file_scope.path.fmt(zcu.comp)}),
|
||||
.src_path = try eb.printString("{f}", .{note_src_loc.file_scope.path.fmt(zcu.comp)}),
|
||||
.span_start = span.start,
|
||||
.span_main = span.main,
|
||||
.span_end = span.end,
|
||||
|
|
@ -4174,7 +4173,7 @@ fn addReferenceTraceFrame(
|
|||
try ref_traces.append(gpa, .{
|
||||
.decl_name = try eb.printString("{s}{s}", .{ name, if (inlined) " [inlined]" else "" }),
|
||||
.src_loc = try eb.addSourceLocation(.{
|
||||
.src_path = try eb.printString("{}", .{src.file_scope.path.fmt(zcu.comp)}),
|
||||
.src_path = try eb.printString("{f}", .{src.file_scope.path.fmt(zcu.comp)}),
|
||||
.span_start = span.start,
|
||||
.span_main = span.main,
|
||||
.span_end = span.end,
|
||||
|
|
@ -4835,7 +4834,7 @@ fn docsCopyFallible(comp: *Compilation) anyerror!void {
|
|||
var out_dir = docs_path.root_dir.handle.makeOpenPath(docs_path.sub_path, .{}) catch |err| {
|
||||
return comp.lockAndSetMiscFailure(
|
||||
.docs_copy,
|
||||
"unable to create output directory '{}': {s}",
|
||||
"unable to create output directory '{f}': {s}",
|
||||
.{ docs_path, @errorName(err) },
|
||||
);
|
||||
};
|
||||
|
|
@ -4855,7 +4854,7 @@ fn docsCopyFallible(comp: *Compilation) anyerror!void {
|
|||
var tar_file = out_dir.createFile("sources.tar", .{}) catch |err| {
|
||||
return comp.lockAndSetMiscFailure(
|
||||
.docs_copy,
|
||||
"unable to create '{}/sources.tar': {s}",
|
||||
"unable to create '{f}/sources.tar': {s}",
|
||||
.{ docs_path, @errorName(err) },
|
||||
);
|
||||
};
|
||||
|
|
@ -4884,7 +4883,7 @@ fn docsCopyModule(comp: *Compilation, module: *Package.Module, name: []const u8,
|
|||
const root_dir, const sub_path = root.openInfo(comp.dirs);
|
||||
break :d root_dir.openDir(sub_path, .{ .iterate = true });
|
||||
} catch |err| {
|
||||
return comp.lockAndSetMiscFailure(.docs_copy, "unable to open directory '{}': {s}", .{
|
||||
return comp.lockAndSetMiscFailure(.docs_copy, "unable to open directory '{f}': {s}", .{
|
||||
root.fmt(comp), @errorName(err),
|
||||
});
|
||||
};
|
||||
|
|
@ -4906,13 +4905,13 @@ fn docsCopyModule(comp: *Compilation, module: *Package.Module, name: []const u8,
|
|||
else => continue,
|
||||
}
|
||||
var file = mod_dir.openFile(entry.path, .{}) catch |err| {
|
||||
return comp.lockAndSetMiscFailure(.docs_copy, "unable to open '{}{s}': {s}", .{
|
||||
return comp.lockAndSetMiscFailure(.docs_copy, "unable to open '{f}{s}': {s}", .{
|
||||
root.fmt(comp), entry.path, @errorName(err),
|
||||
});
|
||||
};
|
||||
defer file.close();
|
||||
archiver.writeFile(entry.path, file) catch |err| {
|
||||
return comp.lockAndSetMiscFailure(.docs_copy, "unable to archive '{}{s}': {s}", .{
|
||||
return comp.lockAndSetMiscFailure(.docs_copy, "unable to archive '{f}{s}': {s}", .{
|
||||
root.fmt(comp), entry.path, @errorName(err),
|
||||
});
|
||||
};
|
||||
|
|
@ -5042,7 +5041,7 @@ fn workerDocsWasmFallible(comp: *Compilation, prog_node: std.Progress.Node) anye
|
|||
var out_dir = docs_path.root_dir.handle.makeOpenPath(docs_path.sub_path, .{}) catch |err| {
|
||||
return comp.lockAndSetMiscFailure(
|
||||
.docs_copy,
|
||||
"unable to create output directory '{}': {s}",
|
||||
"unable to create output directory '{f}': {s}",
|
||||
.{ docs_path, @errorName(err) },
|
||||
);
|
||||
};
|
||||
|
|
@ -5054,10 +5053,8 @@ fn workerDocsWasmFallible(comp: *Compilation, prog_node: std.Progress.Node) anye
|
|||
"main.wasm",
|
||||
.{},
|
||||
) catch |err| {
|
||||
return comp.lockAndSetMiscFailure(.docs_copy, "unable to copy '{}' to '{}': {s}", .{
|
||||
crt_file.full_object_path,
|
||||
docs_path,
|
||||
@errorName(err),
|
||||
return comp.lockAndSetMiscFailure(.docs_copy, "unable to copy '{f}' to '{f}': {s}", .{
|
||||
crt_file.full_object_path, docs_path, @errorName(err),
|
||||
});
|
||||
};
|
||||
}
|
||||
|
|
@ -5130,7 +5127,7 @@ fn workerUpdateBuiltinFile(comp: *Compilation, file: *Zcu.File) void {
|
|||
defer comp.mutex.unlock();
|
||||
comp.setMiscFailure(
|
||||
.write_builtin_zig,
|
||||
"unable to write '{}': {s}",
|
||||
"unable to write '{f}': {s}",
|
||||
.{ file.path.fmt(comp), @errorName(err) },
|
||||
);
|
||||
};
|
||||
|
|
@ -6033,7 +6030,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32
|
|||
// 24 is RT_MANIFEST
|
||||
const resource_type = 24;
|
||||
|
||||
const input = try std.fmt.allocPrint(arena, "{} {} \"{f}\"", .{
|
||||
const input = try std.fmt.allocPrint(arena, "{d} {d} \"{f}\"", .{
|
||||
resource_id, resource_type, fmtRcEscape(src_path),
|
||||
});
|
||||
|
||||
|
|
|
|||
|
|
@ -142,8 +142,8 @@ fn handleCommand(zcu: *Zcu, output: *std.ArrayListUnmanaged(u8), cmd_str: []cons
|
|||
const create_gen = zcu.incremental_debug_state.navs.get(nav_index) orelse return w.writeAll("unknown nav index");
|
||||
const nav = ip.getNav(nav_index);
|
||||
try w.print(
|
||||
\\name: '{}'
|
||||
\\fqn: '{}'
|
||||
\\name: '{f}'
|
||||
\\fqn: '{f}'
|
||||
\\status: {s}
|
||||
\\created on generation: {d}
|
||||
\\
|
||||
|
|
@ -260,7 +260,7 @@ fn handleCommand(zcu: *Zcu, output: *std.ArrayListUnmanaged(u8), cmd_str: []cons
|
|||
const ip_index: InternPool.Index = @enumFromInt(parseIndex(arg_str) orelse return w.writeAll("malformed ip index"));
|
||||
const create_gen = zcu.incremental_debug_state.types.get(ip_index) orelse return w.writeAll("unknown type");
|
||||
try w.print(
|
||||
\\name: '{}'
|
||||
\\name: '{f}'
|
||||
\\created on generation: {d}
|
||||
\\
|
||||
, .{
|
||||
|
|
@ -365,7 +365,7 @@ fn printType(ty: Type, zcu: *const Zcu, w: anytype) !void {
|
|||
.union_type,
|
||||
.enum_type,
|
||||
.opaque_type,
|
||||
=> try w.print("{}[{d}]", .{ ty.containerTypeName(ip).fmt(ip), @intFromEnum(ty.toIntern()) }),
|
||||
=> try w.print("{f}[{d}]", .{ ty.containerTypeName(ip).fmt(ip), @intFromEnum(ty.toIntern()) }),
|
||||
|
||||
else => unreachable,
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11406,7 +11406,7 @@ pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator)
|
|||
var it = instances.iterator();
|
||||
while (it.next()) |entry| {
|
||||
const generic_fn_owner_nav = ip.getNav(ip.funcDeclInfo(entry.key_ptr.*).owner_nav);
|
||||
try stderr_bw.print("{f} ({}): \n", .{ generic_fn_owner_nav.name.fmt(ip), entry.value_ptr.items.len });
|
||||
try stderr_bw.print("{f} ({f}): \n", .{ generic_fn_owner_nav.name.fmt(ip), entry.value_ptr.items.len });
|
||||
for (entry.value_ptr.items) |index| {
|
||||
const unwrapped_index = index.unwrap(ip);
|
||||
const func = ip.extraFuncInstance(unwrapped_index.tid, unwrapped_index.getExtra(ip), unwrapped_index.getData(ip));
|
||||
|
|
|
|||
|
|
@ -369,7 +369,7 @@ pub fn run(f: *Fetch) RunError!void {
|
|||
if (!std.mem.startsWith(u8, pkg_root.sub_path, expected_prefix)) {
|
||||
return f.fail(
|
||||
f.location_tok,
|
||||
try eb.printString("dependency path outside project: '{}'", .{pkg_root}),
|
||||
try eb.printString("dependency path outside project: '{f}'", .{pkg_root}),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
@ -436,14 +436,14 @@ pub fn run(f: *Fetch) RunError!void {
|
|||
}
|
||||
if (f.job_queue.read_only) return f.fail(
|
||||
f.name_tok,
|
||||
try eb.printString("package not found at '{}{s}'", .{
|
||||
try eb.printString("package not found at '{f}{s}'", .{
|
||||
cache_root, pkg_sub_path,
|
||||
}),
|
||||
);
|
||||
},
|
||||
else => |e| {
|
||||
try eb.addRootErrorMessage(.{
|
||||
.msg = try eb.printString("unable to open global package cache directory '{}{s}': {s}", .{
|
||||
.msg = try eb.printString("unable to open global package cache directory '{f}{s}': {s}", .{
|
||||
cache_root, pkg_sub_path, @errorName(e),
|
||||
}),
|
||||
});
|
||||
|
|
@ -620,7 +620,7 @@ pub fn computedPackageHash(f: *const Fetch) Package.Hash {
|
|||
const saturated_size = std.math.cast(u32, f.computed_hash.total_size) orelse std.math.maxInt(u32);
|
||||
if (f.manifest) |man| {
|
||||
var version_buffer: [32]u8 = undefined;
|
||||
const version: []const u8 = std.fmt.bufPrint(&version_buffer, "{}", .{man.version}) catch &version_buffer;
|
||||
const version: []const u8 = std.fmt.bufPrint(&version_buffer, "{f}", .{man.version}) catch &version_buffer;
|
||||
return .init(f.computed_hash.digest, man.name, version, man.id, saturated_size);
|
||||
}
|
||||
// In the future build.zig.zon fields will be added to allow overriding these values
|
||||
|
|
@ -638,7 +638,7 @@ fn checkBuildFileExistence(f: *Fetch) RunError!void {
|
|||
error.FileNotFound => {},
|
||||
else => |e| {
|
||||
try eb.addRootErrorMessage(.{
|
||||
.msg = try eb.printString("unable to access '{}{s}': {s}", .{
|
||||
.msg = try eb.printString("unable to access '{f}{s}': {s}", .{
|
||||
f.package_root, Package.build_zig_basename, @errorName(e),
|
||||
}),
|
||||
});
|
||||
|
|
@ -663,7 +663,7 @@ fn loadManifest(f: *Fetch, pkg_root: Cache.Path) RunError!void {
|
|||
else => |e| {
|
||||
const file_path = try pkg_root.join(arena, Manifest.basename);
|
||||
try eb.addRootErrorMessage(.{
|
||||
.msg = try eb.printString("unable to load package manifest '{}': {s}", .{
|
||||
.msg = try eb.printString("unable to load package manifest '{f}': {s}", .{
|
||||
file_path, @errorName(e),
|
||||
}),
|
||||
});
|
||||
|
|
@ -675,7 +675,7 @@ fn loadManifest(f: *Fetch, pkg_root: Cache.Path) RunError!void {
|
|||
ast.* = try std.zig.Ast.parse(arena, manifest_bytes, .zon);
|
||||
|
||||
if (ast.errors.len > 0) {
|
||||
const file_path = try std.fmt.allocPrint(arena, "{}" ++ fs.path.sep_str ++ Manifest.basename, .{pkg_root});
|
||||
const file_path = try std.fmt.allocPrint(arena, "{f}" ++ fs.path.sep_str ++ Manifest.basename, .{pkg_root});
|
||||
try std.zig.putAstErrorsIntoBundle(arena, ast.*, file_path, eb);
|
||||
return error.FetchFailed;
|
||||
}
|
||||
|
|
@ -688,7 +688,7 @@ fn loadManifest(f: *Fetch, pkg_root: Cache.Path) RunError!void {
|
|||
const manifest = &f.manifest.?;
|
||||
|
||||
if (manifest.errors.len > 0) {
|
||||
const src_path = try eb.printString("{}" ++ fs.path.sep_str ++ "{s}", .{ pkg_root, Manifest.basename });
|
||||
const src_path = try eb.printString("{f}" ++ fs.path.sep_str ++ "{s}", .{ pkg_root, Manifest.basename });
|
||||
try manifest.copyErrorsIntoBundle(ast.*, src_path, eb);
|
||||
return error.FetchFailed;
|
||||
}
|
||||
|
|
@ -843,7 +843,7 @@ fn srcLoc(
|
|||
const ast = f.parent_manifest_ast orelse return .none;
|
||||
const eb = &f.error_bundle;
|
||||
const start_loc = ast.tokenLocation(0, tok);
|
||||
const src_path = try eb.printString("{}" ++ fs.path.sep_str ++ Manifest.basename, .{f.parent_package_root});
|
||||
const src_path = try eb.printString("{f}" ++ fs.path.sep_str ++ Manifest.basename, .{f.parent_package_root});
|
||||
const msg_off = 0;
|
||||
return eb.addSourceLocation(.{
|
||||
.src_path = src_path,
|
||||
|
|
@ -977,7 +977,7 @@ fn initResource(f: *Fetch, uri: std.Uri, server_header_buffer: []u8) RunError!Re
|
|||
if (ascii.eqlIgnoreCase(uri.scheme, "file")) {
|
||||
const path = try uri.path.toRawMaybeAlloc(arena);
|
||||
return .{ .file = f.parent_package_root.openFile(path, .{}) catch |err| {
|
||||
return f.fail(f.location_tok, try eb.printString("unable to open '{}{s}': {s}", .{
|
||||
return f.fail(f.location_tok, try eb.printString("unable to open '{f}{s}': {s}", .{
|
||||
f.parent_package_root, path, @errorName(err),
|
||||
}));
|
||||
} };
|
||||
|
|
@ -1524,7 +1524,7 @@ fn computeHash(f: *Fetch, pkg_path: Cache.Path, filter: Filter) RunError!Compute
|
|||
|
||||
while (walker.next() catch |err| {
|
||||
try eb.addRootErrorMessage(.{ .msg = try eb.printString(
|
||||
"unable to walk temporary directory '{}': {s}",
|
||||
"unable to walk temporary directory '{f}': {s}",
|
||||
.{ pkg_path, @errorName(err) },
|
||||
) });
|
||||
return error.FetchFailed;
|
||||
|
|
|
|||
63
src/Sema.zig
63
src/Sema.zig
|
|
@ -1144,7 +1144,7 @@ fn analyzeBodyInner(
|
|||
|
||||
// The hashmap lookup in here is a little expensive, and LLVM fails to optimize it away.
|
||||
if (build_options.enable_logging) {
|
||||
std.log.scoped(.sema_zir).debug("sema ZIR {} %{d}", .{ path: {
|
||||
std.log.scoped(.sema_zir).debug("sema ZIR {f} %{d}", .{ path: {
|
||||
const file_index = block.src_base_inst.resolveFile(&zcu.intern_pool);
|
||||
const file = zcu.fileByIndex(file_index);
|
||||
break :path file.path.fmt(zcu.comp);
|
||||
|
|
@ -2763,7 +2763,7 @@ fn zirTupleDecl(
|
|||
const coerced_field_init = try sema.coerce(block, field_type, uncoerced_field_init, init_src);
|
||||
const field_init_val = try sema.resolveConstDefinedValue(block, init_src, coerced_field_init, .{ .simple = .tuple_field_default_value });
|
||||
if (field_init_val.canMutateComptimeVarState(zcu)) {
|
||||
const field_name = try zcu.intern_pool.getOrPutStringFmt(gpa, pt.tid, "{}", .{field_index}, .no_embedded_nulls);
|
||||
const field_name = try zcu.intern_pool.getOrPutStringFmt(gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls);
|
||||
return sema.failWithContainsReferenceToComptimeVar(block, init_src, field_name, "field default value", field_init_val);
|
||||
}
|
||||
break :init field_init_val.toIntern();
|
||||
|
|
@ -5574,9 +5574,8 @@ fn zirValidateDestructure(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Comp
|
|||
|
||||
if (operand_ty.arrayLen(zcu) != extra.expect_len) {
|
||||
return sema.failWithOwnedErrorMsg(block, msg: {
|
||||
const msg = try sema.errMsg(src, "expected {} elements for destructure, found {}", .{
|
||||
extra.expect_len,
|
||||
operand_ty.arrayLen(zcu),
|
||||
const msg = try sema.errMsg(src, "expected {d} elements for destructure, found {d}", .{
|
||||
extra.expect_len, operand_ty.arrayLen(zcu),
|
||||
});
|
||||
errdefer msg.destroy(sema.gpa);
|
||||
try sema.errNote(destructure_src, msg, "result destructured here", .{});
|
||||
|
|
@ -14078,7 +14077,7 @@ fn zirShl(
|
|||
});
|
||||
}
|
||||
} else if (scalar_rhs_ty.isSignedInt(zcu)) {
|
||||
return sema.fail(block, rhs_src, "shift by signed type '{}'", .{rhs_ty.fmt(pt)});
|
||||
return sema.fail(block, rhs_src, "shift by signed type '{f}'", .{rhs_ty.fmt(pt)});
|
||||
}
|
||||
|
||||
const runtime_src = if (maybe_lhs_val) |lhs_val| rs: {
|
||||
|
|
@ -14383,7 +14382,7 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
|
|||
const scalar_tag = scalar_ty.zigTypeTag(zcu);
|
||||
|
||||
if (scalar_tag != .int and scalar_tag != .bool)
|
||||
return sema.fail(block, operand_src, "bitwise not operation on type '{}'", .{operand_ty.fmt(pt)});
|
||||
return sema.fail(block, operand_src, "bitwise not operation on type '{f}'", .{operand_ty.fmt(pt)});
|
||||
|
||||
return analyzeBitNot(sema, block, operand, src);
|
||||
}
|
||||
|
|
@ -16999,7 +16998,7 @@ fn zirClosureGet(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat
|
|||
const file, const src_base_node = Zcu.LazySrcLoc.resolveBaseNode(block.src_base_inst, zcu).?;
|
||||
const tree = file.getTree(zcu) catch |err| {
|
||||
// In this case we emit a warning + a less precise source location.
|
||||
log.warn("unable to load {}: {s}", .{
|
||||
log.warn("unable to load {f}: {s}", .{
|
||||
file.path.fmt(zcu.comp), @errorName(err),
|
||||
});
|
||||
break :name null;
|
||||
|
|
@ -17027,7 +17026,7 @@ fn zirClosureGet(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat
|
|||
const file, const src_base_node = Zcu.LazySrcLoc.resolveBaseNode(block.src_base_inst, zcu).?;
|
||||
const tree = file.getTree(zcu) catch |err| {
|
||||
// In this case we emit a warning + a less precise source location.
|
||||
log.warn("unable to load {}: {s}", .{
|
||||
log.warn("unable to load {f}: {s}", .{
|
||||
file.path.fmt(zcu.comp), @errorName(err),
|
||||
});
|
||||
break :name null;
|
||||
|
|
@ -18268,7 +18267,7 @@ fn zirBoolNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
|
|||
const uncasted_ty = sema.typeOf(uncasted_operand);
|
||||
if (uncasted_ty.isVector(zcu)) {
|
||||
if (uncasted_ty.scalarType(zcu).zigTypeTag(zcu) != .bool) {
|
||||
return sema.fail(block, operand_src, "boolean not operation on type '{}'", .{
|
||||
return sema.fail(block, operand_src, "boolean not operation on type '{f}'", .{
|
||||
uncasted_ty.fmt(pt),
|
||||
});
|
||||
}
|
||||
|
|
@ -19299,13 +19298,13 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
|
|||
|
||||
if (host_size != 0) {
|
||||
if (bit_offset >= host_size * 8) {
|
||||
return sema.fail(block, bitoffset_src, "packed type '{f}' at bit offset {} starts {} bits after the end of a {} byte host integer", .{
|
||||
return sema.fail(block, bitoffset_src, "packed type '{f}' at bit offset {d} starts {d} bits after the end of a {d} byte host integer", .{
|
||||
elem_ty.fmt(pt), bit_offset, bit_offset - host_size * 8, host_size,
|
||||
});
|
||||
}
|
||||
const elem_bit_size = try elem_ty.bitSizeSema(pt);
|
||||
if (elem_bit_size > host_size * 8 - bit_offset) {
|
||||
return sema.fail(block, bitoffset_src, "packed type '{f}' at bit offset {} ends {} bits after the end of a {} byte host integer", .{
|
||||
return sema.fail(block, bitoffset_src, "packed type '{f}' at bit offset {d} ends {d} bits after the end of a {d} byte host integer", .{
|
||||
elem_ty.fmt(pt), bit_offset, elem_bit_size - (host_size * 8 - bit_offset), host_size,
|
||||
});
|
||||
}
|
||||
|
|
@ -20466,7 +20465,7 @@ fn zirIntFromBool(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
|
|||
const is_vector = operand_ty.zigTypeTag(zcu) == .vector;
|
||||
const operand_scalar_ty = operand_ty.scalarType(zcu);
|
||||
if (operand_scalar_ty.toIntern() != .bool_type) {
|
||||
return sema.fail(block, src, "expected 'bool', found '{}'", .{operand_scalar_ty.zigTypeTag(zcu)});
|
||||
return sema.fail(block, src, "expected 'bool', found '{s}'", .{operand_scalar_ty.zigTypeTag(zcu)});
|
||||
}
|
||||
const len = if (is_vector) operand_ty.vectorLen(zcu) else undefined;
|
||||
const dest_ty: Type = if (is_vector) try pt.vectorType(.{ .child = .u1_type, .len = len }) else .u1;
|
||||
|
|
@ -20749,7 +20748,7 @@ fn zirReify(
|
|||
64 => .f64,
|
||||
80 => .f80,
|
||||
128 => .f128,
|
||||
else => return sema.fail(block, src, "{}-bit float unsupported", .{float.bits}),
|
||||
else => return sema.fail(block, src, "{d}-bit float unsupported", .{float.bits}),
|
||||
};
|
||||
return Air.internedToRef(ty.toIntern());
|
||||
},
|
||||
|
|
@ -21640,7 +21639,7 @@ fn reifyTuple(
|
|||
return sema.fail(
|
||||
block,
|
||||
src,
|
||||
"tuple field name '{}' does not match field index {}",
|
||||
"tuple field name '{d}' does not match field index {d}",
|
||||
.{ field_name_index, field_idx },
|
||||
);
|
||||
}
|
||||
|
|
@ -22658,7 +22657,7 @@ fn ptrCastFull(
|
|||
|
||||
if (src_info.packed_offset.host_size != dest_info.packed_offset.host_size) {
|
||||
return sema.failWithOwnedErrorMsg(block, msg: {
|
||||
const msg = try sema.errMsg(src, "pointer host size '{}' cannot coerce into pointer host size '{}'", .{
|
||||
const msg = try sema.errMsg(src, "pointer host size '{d}' cannot coerce into pointer host size '{d}'", .{
|
||||
src_info.packed_offset.host_size,
|
||||
dest_info.packed_offset.host_size,
|
||||
});
|
||||
|
|
@ -22670,7 +22669,7 @@ fn ptrCastFull(
|
|||
|
||||
if (src_info.packed_offset.bit_offset != dest_info.packed_offset.bit_offset) {
|
||||
return sema.failWithOwnedErrorMsg(block, msg: {
|
||||
const msg = try sema.errMsg(src, "pointer bit offset '{}' cannot coerce into pointer bit offset '{}'", .{
|
||||
const msg = try sema.errMsg(src, "pointer bit offset '{d}' cannot coerce into pointer bit offset '{d}'", .{
|
||||
src_info.packed_offset.bit_offset,
|
||||
dest_info.packed_offset.bit_offset,
|
||||
});
|
||||
|
|
@ -23240,7 +23239,7 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
|
|||
return sema.fail(
|
||||
block,
|
||||
operand_src,
|
||||
"@byteSwap requires the number of bits to be evenly divisible by 8, but {f} has {} bits",
|
||||
"@byteSwap requires the number of bits to be evenly divisible by 8, but {f} has {d} bits",
|
||||
.{ scalar_ty.fmt(pt), bits },
|
||||
);
|
||||
}
|
||||
|
|
@ -23577,7 +23576,7 @@ fn checkNumericType(
|
|||
.comptime_float, .float, .comptime_int, .int => {},
|
||||
.vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
|
||||
.comptime_float, .float, .comptime_int, .int => {},
|
||||
else => |t| return sema.fail(block, ty_src, "expected number, found '{}'", .{t}),
|
||||
else => |t| return sema.fail(block, ty_src, "expected number, found '{s}'", .{t}),
|
||||
},
|
||||
else => return sema.fail(block, ty_src, "expected number, found '{f}'", .{ty.fmt(pt)}),
|
||||
}
|
||||
|
|
@ -24254,7 +24253,7 @@ fn analyzeShuffle(
|
|||
if (idx >= b_len) return sema.failWithOwnedErrorMsg(block, msg: {
|
||||
const msg = try sema.errMsg(mask_src, "mask element at index '{d}' selects out-of-bounds index", .{mask_idx});
|
||||
errdefer msg.destroy(sema.gpa);
|
||||
try sema.errNote(b_src, msg, "index '{d}' exceeds bounds of '{}' given here", .{ idx, b_ty.fmt(pt) });
|
||||
try sema.errNote(b_src, msg, "index '{d}' exceeds bounds of '{f}' given here", .{ idx, b_ty.fmt(pt) });
|
||||
break :msg msg;
|
||||
});
|
||||
}
|
||||
|
|
@ -25039,7 +25038,7 @@ fn analyzeMinMax(
|
|||
try sema.checkNumericType(block, operand_src, operand_ty);
|
||||
if (operand_ty.zigTypeTag(zcu) != .vector) {
|
||||
return sema.failWithOwnedErrorMsg(block, msg: {
|
||||
const msg = try sema.errMsg(operand_src, "expected vector, found '{}'", .{operand_ty.fmt(pt)});
|
||||
const msg = try sema.errMsg(operand_src, "expected vector, found '{f}'", .{operand_ty.fmt(pt)});
|
||||
errdefer msg.destroy(zcu.gpa);
|
||||
try sema.errNote(operand_srcs[0], msg, "vector operand here", .{});
|
||||
break :msg msg;
|
||||
|
|
@ -25047,7 +25046,7 @@ fn analyzeMinMax(
|
|||
}
|
||||
if (operand_ty.vectorLen(zcu) != vec_len) {
|
||||
return sema.failWithOwnedErrorMsg(block, msg: {
|
||||
const msg = try sema.errMsg(operand_src, "expected vector of length '{d}', found '{}'", .{ vec_len, operand_ty.fmt(pt) });
|
||||
const msg = try sema.errMsg(operand_src, "expected vector of length '{d}', found '{f}'", .{ vec_len, operand_ty.fmt(pt) });
|
||||
errdefer msg.destroy(zcu.gpa);
|
||||
try sema.errNote(operand_srcs[0], msg, "vector of length '{d}' here", .{vec_len});
|
||||
break :msg msg;
|
||||
|
|
@ -25060,7 +25059,7 @@ fn analyzeMinMax(
|
|||
const operand_ty = sema.typeOf(operand);
|
||||
if (operand_ty.zigTypeTag(zcu) == .vector) {
|
||||
return sema.failWithOwnedErrorMsg(block, msg: {
|
||||
const msg = try sema.errMsg(operand_srcs[0], "expected vector, found '{}'", .{first_operand_ty.fmt(pt)});
|
||||
const msg = try sema.errMsg(operand_srcs[0], "expected vector, found '{f}'", .{first_operand_ty.fmt(pt)});
|
||||
errdefer msg.destroy(zcu.gpa);
|
||||
try sema.errNote(operand_src, msg, "vector operand here", .{});
|
||||
break :msg msg;
|
||||
|
|
@ -29163,7 +29162,7 @@ fn coerceExtra(
|
|||
// return sema.fail(
|
||||
// block,
|
||||
// inst_src,
|
||||
// "type '{f}' cannot represent integer value '{}'",
|
||||
// "type '{f}' cannot represent integer value '{f}'",
|
||||
// .{ dest_ty.fmt(pt), val },
|
||||
// );
|
||||
//}
|
||||
|
|
@ -29370,7 +29369,7 @@ fn coerceExtra(
|
|||
try sema.errNote(param_src, msg, "parameter type declared here", .{});
|
||||
}
|
||||
|
||||
// TODO maybe add "cannot store an error in type '{}'" note
|
||||
// TODO maybe add "cannot store an error in type '{f}'" note
|
||||
|
||||
break :msg msg;
|
||||
};
|
||||
|
|
@ -29718,12 +29717,12 @@ const InMemoryCoercionResult = union(enum) {
|
|||
},
|
||||
.ptr_bit_range => |bit_range| {
|
||||
if (bit_range.actual_host != bit_range.wanted_host) {
|
||||
try sema.errNote(src, msg, "pointer host size '{}' cannot cast into pointer host size '{}'", .{
|
||||
try sema.errNote(src, msg, "pointer host size '{d}' cannot cast into pointer host size '{d}'", .{
|
||||
bit_range.actual_host, bit_range.wanted_host,
|
||||
});
|
||||
}
|
||||
if (bit_range.actual_offset != bit_range.wanted_offset) {
|
||||
try sema.errNote(src, msg, "pointer bit offset '{}' cannot cast into pointer bit offset '{}'", .{
|
||||
try sema.errNote(src, msg, "pointer bit offset '{d}' cannot cast into pointer bit offset '{d}'", .{
|
||||
bit_range.actual_offset, bit_range.wanted_offset,
|
||||
});
|
||||
}
|
||||
|
|
@ -34840,7 +34839,7 @@ fn checkBackingIntType(sema: *Sema, block: *Block, src: LazySrcLoc, backing_int_
|
|||
return sema.fail(
|
||||
block,
|
||||
src,
|
||||
"backing integer type '{f}' has bit size {} but the struct fields have a total bit size of {}",
|
||||
"backing integer type '{f}' has bit size {d} but the struct fields have a total bit size of {d}",
|
||||
.{ backing_int_ty.fmt(pt), backing_int_ty.bitSize(zcu), fields_bit_sum },
|
||||
);
|
||||
}
|
||||
|
|
@ -35183,11 +35182,7 @@ pub fn resolveUnionFieldTypes(sema: *Sema, ty: Type, union_type: InternPool.Load
|
|||
switch (union_type.flagsUnordered(ip).status) {
|
||||
.none => {},
|
||||
.field_types_wip => {
|
||||
const msg = try sema.errMsg(
|
||||
ty.srcLoc(zcu),
|
||||
"union '{f}' depends on itself",
|
||||
.{ty.fmt(pt)},
|
||||
);
|
||||
const msg = try sema.errMsg(ty.srcLoc(zcu), "union '{f}' depends on itself", .{ty.fmt(pt)});
|
||||
return sema.failWithOwnedErrorMsg(null, msg);
|
||||
},
|
||||
.have_field_types,
|
||||
|
|
@ -37194,7 +37189,7 @@ fn notePathToComptimeAllocPtr(sema: *Sema, msg: *Zcu.ErrorMsg, src: LazySrcLoc,
|
|||
if (intermediate_value_count == 0) {
|
||||
try first_path.print(arena, "{fi}", .{start_value_name.fmt(ip)});
|
||||
} else {
|
||||
try first_path.print(arena, "v{}", .{intermediate_value_count - 1});
|
||||
try first_path.print(arena, "v{d}", .{intermediate_value_count - 1});
|
||||
}
|
||||
|
||||
const comptime_ptr = try sema.notePathToComptimeAllocPtrInner(val, &first_path);
|
||||
|
|
|
|||
|
|
@ -513,7 +513,7 @@ fn lowerInt(
|
|||
switch (big_int.setFloat(val, .trunc)) {
|
||||
.inexact => return self.fail(
|
||||
node,
|
||||
"fractional component prevents float value '{}' from coercion to type '{f}'",
|
||||
"fractional component prevents float value '{d}' from coercion to type '{f}'",
|
||||
.{ val, res_ty.fmt(self.sema.pt) },
|
||||
),
|
||||
.exact => {},
|
||||
|
|
@ -524,7 +524,7 @@ fn lowerInt(
|
|||
if (!big_int.toConst().fitsInTwosComp(int_info.signedness, int_info.bits)) {
|
||||
return self.fail(
|
||||
node,
|
||||
"type '{f}' cannot represent integer value '{}'",
|
||||
"type '{f}' cannot represent integer value '{d}'",
|
||||
.{ res_ty.fmt(self.sema.pt), val },
|
||||
);
|
||||
}
|
||||
|
|
|
|||
20
src/Type.zig
20
src/Type.zig
|
|
@ -175,8 +175,8 @@ pub fn print(ty: Type, writer: *std.io.Writer, pt: Zcu.PerThread) std.io.Writer.
|
|||
|
||||
if (info.sentinel != .none) switch (info.flags.size) {
|
||||
.one, .c => unreachable,
|
||||
.many => try writer.print("[*:{}]", .{Value.fromInterned(info.sentinel).fmtValue(pt)}),
|
||||
.slice => try writer.print("[:{}]", .{Value.fromInterned(info.sentinel).fmtValue(pt)}),
|
||||
.many => try writer.print("[*:{f}]", .{Value.fromInterned(info.sentinel).fmtValue(pt)}),
|
||||
.slice => try writer.print("[:{f}]", .{Value.fromInterned(info.sentinel).fmtValue(pt)}),
|
||||
} else switch (info.flags.size) {
|
||||
.one => try writer.writeAll("*"),
|
||||
.many => try writer.writeAll("[*]"),
|
||||
|
|
@ -220,7 +220,7 @@ pub fn print(ty: Type, writer: *std.io.Writer, pt: Zcu.PerThread) std.io.Writer.
|
|||
try writer.print("[{d}]", .{array_type.len});
|
||||
try print(Type.fromInterned(array_type.child), writer, pt);
|
||||
} else {
|
||||
try writer.print("[{d}:{}]", .{
|
||||
try writer.print("[{d}:{f}]", .{
|
||||
array_type.len,
|
||||
Value.fromInterned(array_type.sentinel).fmtValue(pt),
|
||||
});
|
||||
|
|
@ -250,7 +250,7 @@ pub fn print(ty: Type, writer: *std.io.Writer, pt: Zcu.PerThread) std.io.Writer.
|
|||
},
|
||||
.inferred_error_set_type => |func_index| {
|
||||
const func_nav = ip.getNav(zcu.funcInfo(func_index).owner_nav);
|
||||
try writer.print("@typeInfo(@typeInfo(@TypeOf({})).@\"fn\".return_type.?).error_union.error_set", .{
|
||||
try writer.print("@typeInfo(@typeInfo(@TypeOf({f})).@\"fn\".return_type.?).error_union.error_set", .{
|
||||
func_nav.fqn.fmt(ip),
|
||||
});
|
||||
},
|
||||
|
|
@ -259,7 +259,7 @@ pub fn print(ty: Type, writer: *std.io.Writer, pt: Zcu.PerThread) std.io.Writer.
|
|||
try writer.writeAll("error{");
|
||||
for (names.get(ip), 0..) |name, i| {
|
||||
if (i != 0) try writer.writeByte(',');
|
||||
try writer.print("{}", .{name.fmt(ip)});
|
||||
try writer.print("{f}", .{name.fmt(ip)});
|
||||
}
|
||||
try writer.writeAll("}");
|
||||
},
|
||||
|
|
@ -302,7 +302,7 @@ pub fn print(ty: Type, writer: *std.io.Writer, pt: Zcu.PerThread) std.io.Writer.
|
|||
},
|
||||
.struct_type => {
|
||||
const name = ip.loadStructType(ty.toIntern()).name;
|
||||
try writer.print("{}", .{name.fmt(ip)});
|
||||
try writer.print("{f}", .{name.fmt(ip)});
|
||||
},
|
||||
.tuple_type => |tuple| {
|
||||
if (tuple.types.len == 0) {
|
||||
|
|
@ -313,22 +313,22 @@ pub fn print(ty: Type, writer: *std.io.Writer, pt: Zcu.PerThread) std.io.Writer.
|
|||
try writer.writeAll(if (i == 0) " " else ", ");
|
||||
if (val != .none) try writer.writeAll("comptime ");
|
||||
try print(Type.fromInterned(field_ty), writer, pt);
|
||||
if (val != .none) try writer.print(" = {}", .{Value.fromInterned(val).fmtValue(pt)});
|
||||
if (val != .none) try writer.print(" = {f}", .{Value.fromInterned(val).fmtValue(pt)});
|
||||
}
|
||||
try writer.writeAll(" }");
|
||||
},
|
||||
|
||||
.union_type => {
|
||||
const name = ip.loadUnionType(ty.toIntern()).name;
|
||||
try writer.print("{}", .{name.fmt(ip)});
|
||||
try writer.print("{f}", .{name.fmt(ip)});
|
||||
},
|
||||
.opaque_type => {
|
||||
const name = ip.loadOpaqueType(ty.toIntern()).name;
|
||||
try writer.print("{}", .{name.fmt(ip)});
|
||||
try writer.print("{f}", .{name.fmt(ip)});
|
||||
},
|
||||
.enum_type => {
|
||||
const name = ip.loadEnumType(ty.toIntern()).name;
|
||||
try writer.print("{}", .{name.fmt(ip)});
|
||||
try writer.print("{f}", .{name.fmt(ip)});
|
||||
},
|
||||
.func_type => |fn_info| {
|
||||
if (fn_info.is_noinline) {
|
||||
|
|
|
|||
34
src/Zcu.zig
34
src/Zcu.zig
|
|
@ -1112,7 +1112,7 @@ pub const File = struct {
|
|||
eb: *std.zig.ErrorBundle.Wip,
|
||||
) !std.zig.ErrorBundle.SourceLocationIndex {
|
||||
return eb.addSourceLocation(.{
|
||||
.src_path = try eb.printString("{}", .{file.path.fmt(zcu.comp)}),
|
||||
.src_path = try eb.printString("{f}", .{file.path.fmt(zcu.comp)}),
|
||||
.span_start = 0,
|
||||
.span_main = 0,
|
||||
.span_end = 0,
|
||||
|
|
@ -1133,7 +1133,7 @@ pub const File = struct {
|
|||
const end = start + tree.tokenSlice(tok).len;
|
||||
const loc = std.zig.findLineColumn(source.bytes, start);
|
||||
return eb.addSourceLocation(.{
|
||||
.src_path = try eb.printString("{}", .{file.path.fmt(zcu.comp)}),
|
||||
.src_path = try eb.printString("{f}", .{file.path.fmt(zcu.comp)}),
|
||||
.span_start = start,
|
||||
.span_main = start,
|
||||
.span_end = @intCast(end),
|
||||
|
|
@ -4238,17 +4238,17 @@ fn formatAnalUnit(data: FormatAnalUnit, writer: *std.io.Writer) std.io.Writer.Er
|
|||
const cu = ip.getComptimeUnit(cu_id);
|
||||
if (cu.zir_index.resolveFull(ip)) |resolved| {
|
||||
const file_path = zcu.fileByIndex(resolved.file).path;
|
||||
return writer.print("comptime(inst=('{}', %{}) [{}])", .{ file_path.fmt(zcu.comp), @intFromEnum(resolved.inst), @intFromEnum(cu_id) });
|
||||
return writer.print("comptime(inst=('{f}', %{}) [{}])", .{ file_path.fmt(zcu.comp), @intFromEnum(resolved.inst), @intFromEnum(cu_id) });
|
||||
} else {
|
||||
return writer.print("comptime(inst=<lost> [{}])", .{@intFromEnum(cu_id)});
|
||||
}
|
||||
},
|
||||
.nav_val => |nav| return writer.print("nav_val('{}' [{}])", .{ ip.getNav(nav).fqn.fmt(ip), @intFromEnum(nav) }),
|
||||
.nav_ty => |nav| return writer.print("nav_ty('{}' [{}])", .{ ip.getNav(nav).fqn.fmt(ip), @intFromEnum(nav) }),
|
||||
.type => |ty| return writer.print("ty('{}' [{}])", .{ Type.fromInterned(ty).containerTypeName(ip).fmt(ip), @intFromEnum(ty) }),
|
||||
.nav_val => |nav| return writer.print("nav_val('{f}' [{}])", .{ ip.getNav(nav).fqn.fmt(ip), @intFromEnum(nav) }),
|
||||
.nav_ty => |nav| return writer.print("nav_ty('{f}' [{}])", .{ ip.getNav(nav).fqn.fmt(ip), @intFromEnum(nav) }),
|
||||
.type => |ty| return writer.print("ty('{f}' [{}])", .{ Type.fromInterned(ty).containerTypeName(ip).fmt(ip), @intFromEnum(ty) }),
|
||||
.func => |func| {
|
||||
const nav = zcu.funcInfo(func).owner_nav;
|
||||
return writer.print("func('{}' [{}])", .{ ip.getNav(nav).fqn.fmt(ip), @intFromEnum(func) });
|
||||
return writer.print("func('{f}' [{}])", .{ ip.getNav(nav).fqn.fmt(ip), @intFromEnum(func) });
|
||||
},
|
||||
.memoized_state => return writer.writeAll("memoized_state"),
|
||||
}
|
||||
|
|
@ -4265,42 +4265,42 @@ fn formatDependee(data: FormatDependee, writer: *std.io.Writer) std.io.Writer.Er
|
|||
return writer.writeAll("inst(<lost>)");
|
||||
};
|
||||
const file_path = zcu.fileByIndex(info.file).path;
|
||||
return writer.print("inst('{}', %{d})", .{ file_path.fmt(zcu.comp), @intFromEnum(info.inst) });
|
||||
return writer.print("inst('{f}', %{d})", .{ file_path.fmt(zcu.comp), @intFromEnum(info.inst) });
|
||||
},
|
||||
.nav_val => |nav| {
|
||||
const fqn = ip.getNav(nav).fqn;
|
||||
return writer.print("nav_val('{}')", .{fqn.fmt(ip)});
|
||||
return writer.print("nav_val('{f}')", .{fqn.fmt(ip)});
|
||||
},
|
||||
.nav_ty => |nav| {
|
||||
const fqn = ip.getNav(nav).fqn;
|
||||
return writer.print("nav_ty('{}')", .{fqn.fmt(ip)});
|
||||
return writer.print("nav_ty('{f}')", .{fqn.fmt(ip)});
|
||||
},
|
||||
.interned => |ip_index| switch (ip.indexToKey(ip_index)) {
|
||||
.struct_type, .union_type, .enum_type => return writer.print("type('{}')", .{Type.fromInterned(ip_index).containerTypeName(ip).fmt(ip)}),
|
||||
.func => |f| return writer.print("ies('{}')", .{ip.getNav(f.owner_nav).fqn.fmt(ip)}),
|
||||
.struct_type, .union_type, .enum_type => return writer.print("type('{f}')", .{Type.fromInterned(ip_index).containerTypeName(ip).fmt(ip)}),
|
||||
.func => |f| return writer.print("ies('{f}')", .{ip.getNav(f.owner_nav).fqn.fmt(ip)}),
|
||||
else => unreachable,
|
||||
},
|
||||
.zon_file => |file| {
|
||||
const file_path = zcu.fileByIndex(file).path;
|
||||
return writer.print("zon_file('{}')", .{file_path.fmt(zcu.comp)});
|
||||
return writer.print("zon_file('{f}')", .{file_path.fmt(zcu.comp)});
|
||||
},
|
||||
.embed_file => |ef_idx| {
|
||||
const ef = ef_idx.get(zcu);
|
||||
return writer.print("embed_file('{}')", .{ef.path.fmt(zcu.comp)});
|
||||
return writer.print("embed_file('{f}')", .{ef.path.fmt(zcu.comp)});
|
||||
},
|
||||
.namespace => |ti| {
|
||||
const info = ti.resolveFull(ip) orelse {
|
||||
return writer.writeAll("namespace(<lost>)");
|
||||
};
|
||||
const file_path = zcu.fileByIndex(info.file).path;
|
||||
return writer.print("namespace('{}', %{d})", .{ file_path.fmt(zcu.comp), @intFromEnum(info.inst) });
|
||||
return writer.print("namespace('{f}', %{d})", .{ file_path.fmt(zcu.comp), @intFromEnum(info.inst) });
|
||||
},
|
||||
.namespace_name => |k| {
|
||||
const info = k.namespace.resolveFull(ip) orelse {
|
||||
return writer.print("namespace(<lost>, '{}')", .{k.name.fmt(ip)});
|
||||
return writer.print("namespace(<lost>, '{f}')", .{k.name.fmt(ip)});
|
||||
};
|
||||
const file_path = zcu.fileByIndex(info.file).path;
|
||||
return writer.print("namespace('{}', %{d}, '{}')", .{ file_path.fmt(zcu.comp), @intFromEnum(info.inst), k.name.fmt(ip) });
|
||||
return writer.print("namespace('{f}', %{d}, '{f}')", .{ file_path.fmt(zcu.comp), @intFromEnum(info.inst), k.name.fmt(ip) });
|
||||
},
|
||||
.memoized_state => return writer.writeAll("memoized_state"),
|
||||
}
|
||||
|
|
|
|||
|
|
@ -53,7 +53,7 @@ fn deinitFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) void {
|
|||
const zcu = pt.zcu;
|
||||
const gpa = zcu.gpa;
|
||||
const file = zcu.fileByIndex(file_index);
|
||||
log.debug("deinit File {}", .{file.path.fmt(zcu.comp)});
|
||||
log.debug("deinit File {f}", .{file.path.fmt(zcu.comp)});
|
||||
file.path.deinit(gpa);
|
||||
file.unload(gpa);
|
||||
if (file.prev_zir) |prev_zir| {
|
||||
|
|
@ -117,7 +117,7 @@ pub fn updateFile(
|
|||
var lock: std.fs.File.Lock = switch (file.status) {
|
||||
.never_loaded, .retryable_failure => lock: {
|
||||
// First, load the cached ZIR code, if any.
|
||||
log.debug("AstGen checking cache: {} (local={}, digest={s})", .{
|
||||
log.debug("AstGen checking cache: {f} (local={}, digest={s})", .{
|
||||
file.path.fmt(comp), want_local_cache, &hex_digest,
|
||||
});
|
||||
|
||||
|
|
@ -130,11 +130,11 @@ pub fn updateFile(
|
|||
stat.inode == file.stat.inode;
|
||||
|
||||
if (unchanged_metadata) {
|
||||
log.debug("unmodified metadata of file: {}", .{file.path.fmt(comp)});
|
||||
log.debug("unmodified metadata of file: {f}", .{file.path.fmt(comp)});
|
||||
return;
|
||||
}
|
||||
|
||||
log.debug("metadata changed: {}", .{file.path.fmt(comp)});
|
||||
log.debug("metadata changed: {f}", .{file.path.fmt(comp)});
|
||||
|
||||
break :lock .exclusive;
|
||||
},
|
||||
|
|
@ -221,12 +221,12 @@ pub fn updateFile(
|
|||
};
|
||||
switch (result) {
|
||||
.success => {
|
||||
log.debug("AstGen cached success: {}", .{file.path.fmt(comp)});
|
||||
log.debug("AstGen cached success: {f}", .{file.path.fmt(comp)});
|
||||
break false;
|
||||
},
|
||||
.invalid => {},
|
||||
.truncated => log.warn("unexpected EOF reading cached ZIR for {}", .{file.path.fmt(comp)}),
|
||||
.stale => log.debug("AstGen cache stale: {}", .{file.path.fmt(comp)}),
|
||||
.truncated => log.warn("unexpected EOF reading cached ZIR for {f}", .{file.path.fmt(comp)}),
|
||||
.stale => log.debug("AstGen cache stale: {f}", .{file.path.fmt(comp)}),
|
||||
}
|
||||
|
||||
// If we already have the exclusive lock then it is our job to update.
|
||||
|
|
@ -283,7 +283,7 @@ pub fn updateFile(
|
|||
},
|
||||
}
|
||||
|
||||
log.debug("AstGen fresh success: {}", .{file.path.fmt(comp)});
|
||||
log.debug("AstGen fresh success: {f}", .{file.path.fmt(comp)});
|
||||
}
|
||||
|
||||
file.stat = .{
|
||||
|
|
@ -2303,7 +2303,7 @@ pub fn updateBuiltinModule(pt: Zcu.PerThread, opts: Builtin) Allocator.Error!voi
|
|||
|
||||
Builtin.updateFileOnDisk(file, comp) catch |err| comp.setMiscFailure(
|
||||
.write_builtin_zig,
|
||||
"unable to write '{}': {s}",
|
||||
"unable to write '{f}': {s}",
|
||||
.{ file.path.fmt(comp), @errorName(err) },
|
||||
);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -566,13 +566,9 @@ const InstTracking = struct {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
inst_tracking: InstTracking,
|
||||
comptime _: []const u8,
|
||||
_: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) @TypeOf(writer).Error!void {
|
||||
if (!std.meta.eql(inst_tracking.long, inst_tracking.short)) try writer.print("|{f}| ", .{inst_tracking.long});
|
||||
pub fn format(inst_tracking: InstTracking, writer: *std.io.Writer, comptime f: []const u8) std.io.Writer.Error!void {
|
||||
comptime assert(f.len == 0);
|
||||
if (!std.meta.eql(inst_tracking.long, inst_tracking.short)) try writer.print("|{}| ", .{inst_tracking.long});
|
||||
try writer.print("{}", .{inst_tracking.short});
|
||||
}
|
||||
};
|
||||
|
|
@ -973,7 +969,7 @@ fn formatWipMir(data: FormatWipMirData, writer: *std.io.Writer) std.io.Writer.Er
|
|||
else => |e| return e,
|
||||
}).insts) |lowered_inst| {
|
||||
if (!first) try writer.writeAll("\ndebug(wip_mir): ");
|
||||
try writer.print(" | {f}", .{lowered_inst});
|
||||
try writer.print(" | {}", .{lowered_inst});
|
||||
first = false;
|
||||
}
|
||||
}
|
||||
|
|
@ -1156,7 +1152,7 @@ fn gen(func: *Func) !void {
|
|||
func.ret_mcv.long.address().offset(-func.ret_mcv.short.indirect.off),
|
||||
);
|
||||
func.ret_mcv.long = .{ .load_frame = .{ .index = frame_index } };
|
||||
tracking_log.debug("spill {f} to {f}", .{ func.ret_mcv.long, frame_index });
|
||||
tracking_log.debug("spill {} to {f}", .{ func.ret_mcv.long, frame_index });
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
|
|
@ -1656,7 +1652,7 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void {
|
|||
|
||||
if (std.debug.runtime_safety) {
|
||||
if (func.air_bookkeeping < old_air_bookkeeping + 1) {
|
||||
std.debug.panic("in codegen.zig, handling of AIR instruction %{d} ('{f}') did not do proper bookkeeping. Look for a missing call to finishAir.", .{ inst, air_tags[@intFromEnum(inst)] });
|
||||
std.debug.panic("in codegen.zig, handling of AIR instruction %{d} ('{}') did not do proper bookkeeping. Look for a missing call to finishAir.", .{ inst, air_tags[@intFromEnum(inst)] });
|
||||
}
|
||||
|
||||
{ // check consistency of tracked registers
|
||||
|
|
@ -1668,7 +1664,7 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void {
|
|||
for (tracking.getRegs()) |reg| {
|
||||
if (RegisterManager.indexOfRegIntoTracked(reg).? == index) break;
|
||||
} else return std.debug.panic(
|
||||
\\%{} takes up these regs: {any}, however this regs {any}, don't use it
|
||||
\\%{f} takes up these regs: {any}, however this regs {any}, don't use it
|
||||
, .{ tracked_inst, tracking.getRegs(), RegisterManager.regAtTrackedIndex(@intCast(index)) });
|
||||
}
|
||||
}
|
||||
|
|
@ -1726,7 +1722,7 @@ fn finishAirResult(func: *Func, inst: Air.Inst.Index, result: MCValue) void {
|
|||
else => {},
|
||||
}
|
||||
|
||||
tracking_log.debug("%{d} => {f} (birth)", .{ inst, result });
|
||||
tracking_log.debug("%{d} => {} (birth)", .{ inst, result });
|
||||
func.inst_tracking.putAssumeCapacityNoClobber(inst, InstTracking.init(result));
|
||||
// In some cases, an operand may be reused as the result.
|
||||
// If that operand died and was a register, it was freed by
|
||||
|
|
@ -1827,7 +1823,7 @@ fn computeFrameLayout(func: *Func) !FrameLayout {
|
|||
total_alloc_size + 64 + args_frame_size + spill_frame_size + call_frame_size,
|
||||
@intCast(frame_align[@intFromEnum(FrameIndex.base_ptr)].toByteUnits().?),
|
||||
);
|
||||
log.debug("frame size: {f}", .{acc_frame_size});
|
||||
log.debug("frame size: {d}", .{acc_frame_size});
|
||||
|
||||
// store the ra at total_size - 8, so it's the very first thing in the stack
|
||||
// relative to the fp
|
||||
|
|
@ -1888,7 +1884,7 @@ fn splitType(func: *Func, ty: Type) ![2]Type {
|
|||
},
|
||||
else => unreachable,
|
||||
},
|
||||
else => return func.fail("TODO: splitType class {f}", .{class}),
|
||||
else => return func.fail("TODO: splitType class {}", .{class}),
|
||||
};
|
||||
} else if (parts[0].abiSize(zcu) + parts[1].abiSize(zcu) == ty.abiSize(zcu)) return parts;
|
||||
return func.fail("TODO implement splitType for {f}", .{ty.fmt(func.pt)});
|
||||
|
|
|
|||
|
|
@ -92,12 +92,7 @@ pub const Inst = struct {
|
|||
},
|
||||
};
|
||||
|
||||
pub fn format(
|
||||
inst: Inst,
|
||||
comptime fmt: []const u8,
|
||||
_: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
pub fn format(inst: Inst, writer: *std.io.Writer, comptime fmt: []const u8) std.io.Writer.Error!void {
|
||||
assert(fmt.len == 0);
|
||||
try writer.print("Tag: {s}, Data: {s}", .{ @tagName(inst.tag), @tagName(inst.data) });
|
||||
}
|
||||
|
|
|
|||
|
|
@ -256,19 +256,14 @@ pub const FrameIndex = enum(u32) {
|
|||
return @intFromEnum(fi) < named_count;
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
fi: FrameIndex,
|
||||
comptime fmt: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) @TypeOf(writer).Error!void {
|
||||
pub fn format(fi: FrameIndex, writer: *std.io.Writer, comptime fmt: []const u8) std.io.Writer.Error!void {
|
||||
try writer.writeAll("FrameIndex");
|
||||
if (fi.isNamed()) {
|
||||
try writer.writeByte('.');
|
||||
try writer.writeAll(@tagName(fi));
|
||||
} else {
|
||||
try writer.writeByte('(');
|
||||
try std.fmt.formatType(@intFromEnum(fi), fmt, options, writer, 0);
|
||||
try writer.printInt(fmt, .{}, @intFromEnum(fi));
|
||||
try writer.writeByte(')');
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -723,7 +723,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
|||
|
||||
if (std.debug.runtime_safety) {
|
||||
if (self.air_bookkeeping < old_air_bookkeeping + 1) {
|
||||
std.debug.panic("in codegen.zig, handling of AIR instruction %{d} ('{}') did not do proper bookkeeping. Look for a missing call to finishAir.", .{ inst, air_tags[@intFromEnum(inst)] });
|
||||
std.debug.panic("in codegen.zig, handling of AIR instruction %{d} ('{s}') did not do proper bookkeeping. Look for a missing call to finishAir.", .{ inst, air_tags[@intFromEnum(inst)] });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1001,7 +1001,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) InnerError!void {
|
|||
switch (self.args[arg_index]) {
|
||||
.stack_offset => |off| {
|
||||
const abi_size = math.cast(u32, ty.abiSize(zcu)) orelse {
|
||||
return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(pt)});
|
||||
return self.fail("type '{f}' too big to fit into stack frame", .{ty.fmt(pt)});
|
||||
};
|
||||
const offset = off + abi_size;
|
||||
break :blk .{ .stack_offset = offset };
|
||||
|
|
@ -2748,7 +2748,7 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
|
|||
}
|
||||
|
||||
const abi_size = math.cast(u32, elem_ty.abiSize(zcu)) orelse {
|
||||
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(pt)});
|
||||
return self.fail("type '{f}' too big to fit into stack frame", .{elem_ty.fmt(pt)});
|
||||
};
|
||||
// TODO swap this for inst.ty.ptrAlign
|
||||
const abi_align = elem_ty.abiAlignment(zcu);
|
||||
|
|
@ -2760,7 +2760,7 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
|
|||
const zcu = pt.zcu;
|
||||
const elem_ty = self.typeOfIndex(inst);
|
||||
const abi_size = math.cast(u32, elem_ty.abiSize(zcu)) orelse {
|
||||
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(pt)});
|
||||
return self.fail("type '{f}' too big to fit into stack frame", .{elem_ty.fmt(pt)});
|
||||
};
|
||||
const abi_align = elem_ty.abiAlignment(zcu);
|
||||
self.stack_align = self.stack_align.max(abi_align);
|
||||
|
|
@ -4111,7 +4111,7 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue {
|
|||
while (true) {
|
||||
i -= 1;
|
||||
if (self.branch_stack.items[i].inst_table.get(inst)) |mcv| {
|
||||
log.debug("getResolvedInstValue %{} => {}", .{ inst, mcv });
|
||||
log.debug("getResolvedInstValue %{f} => {}", .{ inst, mcv });
|
||||
assert(mcv != .dead);
|
||||
return mcv;
|
||||
}
|
||||
|
|
@ -4382,7 +4382,7 @@ fn processDeath(self: *Self, inst: Air.Inst.Index) void {
|
|||
const prev_value = self.getResolvedInstValue(inst);
|
||||
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
|
||||
branch.inst_table.putAssumeCapacity(inst, .dead);
|
||||
log.debug("%{} death: {} -> .dead", .{ inst, prev_value });
|
||||
log.debug("%{f} death: {} -> .dead", .{ inst, prev_value });
|
||||
switch (prev_value) {
|
||||
.register => |reg| {
|
||||
self.register_manager.freeReg(reg);
|
||||
|
|
|
|||
|
|
@ -1463,7 +1463,7 @@ fn allocStack(cg: *CodeGen, ty: Type) !WValue {
|
|||
}
|
||||
|
||||
const abi_size = std.math.cast(u32, ty.abiSize(zcu)) orelse {
|
||||
return cg.fail("Type {} with ABI size of {d} exceeds stack frame size", .{
|
||||
return cg.fail("Type {f} with ABI size of {d} exceeds stack frame size", .{
|
||||
ty.fmt(pt), ty.abiSize(zcu),
|
||||
});
|
||||
};
|
||||
|
|
@ -1497,7 +1497,7 @@ fn allocStackPtr(cg: *CodeGen, inst: Air.Inst.Index) !WValue {
|
|||
|
||||
const abi_alignment = ptr_ty.ptrAlignment(zcu);
|
||||
const abi_size = std.math.cast(u32, pointee_ty.abiSize(zcu)) orelse {
|
||||
return cg.fail("Type {} with ABI size of {d} exceeds stack frame size", .{
|
||||
return cg.fail("Type {f} with ABI size of {d} exceeds stack frame size", .{
|
||||
pointee_ty.fmt(pt), pointee_ty.abiSize(zcu),
|
||||
});
|
||||
};
|
||||
|
|
@ -2046,7 +2046,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
|
|||
try cg.genInst(inst);
|
||||
|
||||
if (std.debug.runtime_safety and cg.air_bookkeeping < old_bookkeeping_value + 1) {
|
||||
std.debug.panic("Missing call to `finishAir` in AIR instruction %{d} ('{}')", .{
|
||||
std.debug.panic("Missing call to `finishAir` in AIR instruction %{d} ('{s}')", .{
|
||||
inst,
|
||||
cg.air.instructions.items(.tag)[@intFromEnum(inst)],
|
||||
});
|
||||
|
|
@ -2404,10 +2404,7 @@ fn store(cg: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErr
|
|||
try cg.memcpy(lhs, rhs, .{ .imm32 = @as(u32, @intCast(ty.abiSize(zcu))) });
|
||||
},
|
||||
else => if (abi_size > 8) {
|
||||
return cg.fail("TODO: `store` for type `{}` with abisize `{d}`", .{
|
||||
ty.fmt(pt),
|
||||
abi_size,
|
||||
});
|
||||
return cg.fail("TODO: `store` for type `{f}` with abisize `{d}`", .{ ty.fmt(pt), abi_size });
|
||||
},
|
||||
}
|
||||
try cg.emitWValue(lhs);
|
||||
|
|
@ -2596,10 +2593,7 @@ fn binOp(cg: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WV
|
|||
if (ty.zigTypeTag(zcu) == .int) {
|
||||
return cg.binOpBigInt(lhs, rhs, ty, op);
|
||||
} else {
|
||||
return cg.fail(
|
||||
"TODO: Implement binary operation for type: {}",
|
||||
.{ty.fmt(pt)},
|
||||
);
|
||||
return cg.fail("TODO: Implement binary operation for type: {f}", .{ty.fmt(pt)});
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2817,7 +2811,7 @@ fn airAbs(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
|||
|
||||
switch (scalar_ty.zigTypeTag(zcu)) {
|
||||
.int => if (ty.zigTypeTag(zcu) == .vector) {
|
||||
return cg.fail("TODO implement airAbs for {}", .{ty.fmt(pt)});
|
||||
return cg.fail("TODO implement airAbs for {f}", .{ty.fmt(pt)});
|
||||
} else {
|
||||
const int_bits = ty.intInfo(zcu).bits;
|
||||
const wasm_bits = toWasmBits(int_bits) orelse {
|
||||
|
|
@ -3244,7 +3238,7 @@ fn lowerConstant(cg: *CodeGen, val: Value, ty: Type) InnerError!WValue {
|
|||
return .{ .imm32 = @intFromBool(!val.isNull(zcu)) };
|
||||
},
|
||||
.aggregate => switch (ip.indexToKey(ty.ip_index)) {
|
||||
.array_type => return cg.fail("Wasm TODO: LowerConstant for {}", .{ty.fmt(pt)}),
|
||||
.array_type => return cg.fail("Wasm TODO: LowerConstant for {f}", .{ty.fmt(pt)}),
|
||||
.vector_type => {
|
||||
assert(determineSimdStoreStrategy(ty, zcu, cg.target) == .direct);
|
||||
var buf: [16]u8 = undefined;
|
||||
|
|
@ -3332,7 +3326,7 @@ fn emitUndefined(cg: *CodeGen, ty: Type) InnerError!WValue {
|
|||
},
|
||||
else => unreachable,
|
||||
},
|
||||
else => return cg.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag(zcu)}),
|
||||
else => return cg.fail("Wasm TODO: emitUndefined for type: {s}\n", .{ty.zigTypeTag(zcu)}),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -3608,7 +3602,7 @@ fn airNot(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
|||
} else {
|
||||
const int_info = operand_ty.intInfo(zcu);
|
||||
const wasm_bits = toWasmBits(int_info.bits) orelse {
|
||||
return cg.fail("TODO: Implement binary NOT for {}", .{operand_ty.fmt(pt)});
|
||||
return cg.fail("TODO: Implement binary NOT for {f}", .{operand_ty.fmt(pt)});
|
||||
};
|
||||
|
||||
switch (wasm_bits) {
|
||||
|
|
@ -3874,7 +3868,7 @@ fn airStructFieldVal(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
|||
},
|
||||
else => result: {
|
||||
const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, zcu)) orelse {
|
||||
return cg.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(pt)});
|
||||
return cg.fail("Field type '{f}' too big to fit into stack frame", .{field_ty.fmt(pt)});
|
||||
};
|
||||
if (isByRef(field_ty, zcu, cg.target)) {
|
||||
switch (operand) {
|
||||
|
|
@ -4360,7 +4354,7 @@ fn isNull(cg: *CodeGen, operand: WValue, optional_ty: Type, opcode: std.wasm.Opc
|
|||
// a pointer to the stack value
|
||||
if (payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
|
||||
const offset = std.math.cast(u32, payload_ty.abiSize(zcu)) orelse {
|
||||
return cg.fail("Optional type {} too big to fit into stack frame", .{optional_ty.fmt(pt)});
|
||||
return cg.fail("Optional type {f} too big to fit into stack frame", .{optional_ty.fmt(pt)});
|
||||
};
|
||||
try cg.addMemArg(.i32_load8_u, .{ .offset = operand.offset() + offset, .alignment = 1 });
|
||||
}
|
||||
|
|
@ -4430,7 +4424,7 @@ fn airOptionalPayloadPtrSet(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void
|
|||
}
|
||||
|
||||
const offset = std.math.cast(u32, payload_ty.abiSize(zcu)) orelse {
|
||||
return cg.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(pt)});
|
||||
return cg.fail("Optional type {f} too big to fit into stack frame", .{opt_ty.fmt(pt)});
|
||||
};
|
||||
|
||||
try cg.emitWValue(operand);
|
||||
|
|
@ -4462,7 +4456,7 @@ fn airWrapOptional(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
|||
break :result cg.reuseOperand(ty_op.operand, operand);
|
||||
}
|
||||
const offset = std.math.cast(u32, payload_ty.abiSize(zcu)) orelse {
|
||||
return cg.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(pt)});
|
||||
return cg.fail("Optional type {f} too big to fit into stack frame", .{op_ty.fmt(pt)});
|
||||
};
|
||||
|
||||
// Create optional type, set the non-null bit, and store the operand inside the optional type
|
||||
|
|
@ -6196,7 +6190,7 @@ fn airMulWithOverflow(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
|||
_ = try cg.load(overflow_ret, Type.i32, 0);
|
||||
try cg.addLocal(.local_set, overflow_bit.local.value);
|
||||
break :blk res;
|
||||
} else return cg.fail("TODO: @mulWithOverflow for {}", .{ty.fmt(pt)});
|
||||
} else return cg.fail("TODO: @mulWithOverflow for {f}", .{ty.fmt(pt)});
|
||||
var bin_op_local = try mul.toLocal(cg, ty);
|
||||
defer bin_op_local.free(cg);
|
||||
|
||||
|
|
@ -6749,7 +6743,7 @@ fn airMod(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
|||
const add = try cg.binOp(rem, rhs, ty, .add);
|
||||
break :result try cg.binOp(add, rhs, ty, .rem);
|
||||
}
|
||||
return cg.fail("TODO: @mod for {}", .{ty.fmt(pt)});
|
||||
return cg.fail("TODO: @mod for {f}", .{ty.fmt(pt)});
|
||||
};
|
||||
|
||||
return cg.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
|
||||
|
|
@ -6767,7 +6761,7 @@ fn airSatMul(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
|||
const lhs = try cg.resolveInst(bin_op.lhs);
|
||||
const rhs = try cg.resolveInst(bin_op.rhs);
|
||||
const wasm_bits = toWasmBits(int_info.bits) orelse {
|
||||
return cg.fail("TODO: mul_sat for {}", .{ty.fmt(pt)});
|
||||
return cg.fail("TODO: mul_sat for {f}", .{ty.fmt(pt)});
|
||||
};
|
||||
|
||||
switch (wasm_bits) {
|
||||
|
|
@ -6804,7 +6798,7 @@ fn airSatMul(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
|||
},
|
||||
64 => {
|
||||
if (!(int_info.bits == 64 and int_info.signedness == .signed)) {
|
||||
return cg.fail("TODO: mul_sat for {}", .{ty.fmt(pt)});
|
||||
return cg.fail("TODO: mul_sat for {f}", .{ty.fmt(pt)});
|
||||
}
|
||||
const overflow_ret = try cg.allocStack(Type.i32);
|
||||
_ = try cg.callIntrinsic(
|
||||
|
|
@ -6822,7 +6816,7 @@ fn airSatMul(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
|||
},
|
||||
128 => {
|
||||
if (!(int_info.bits == 128 and int_info.signedness == .signed)) {
|
||||
return cg.fail("TODO: mul_sat for {}", .{ty.fmt(pt)});
|
||||
return cg.fail("TODO: mul_sat for {f}", .{ty.fmt(pt)});
|
||||
}
|
||||
const overflow_ret = try cg.allocStack(Type.i32);
|
||||
const ret = try cg.callIntrinsic(
|
||||
|
|
|
|||
|
|
@ -158,15 +158,8 @@ pub fn modRmExt(encoding: Encoding) u3 {
|
|||
};
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
encoding: Encoding,
|
||||
comptime fmt: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
_ = options;
|
||||
_ = fmt;
|
||||
|
||||
pub fn format(encoding: Encoding, writer: *std.io.Writer, comptime fmt: []const u8) std.io.Writer.Error!void {
|
||||
comptime assert(fmt.len == 0);
|
||||
var opc = encoding.opcode();
|
||||
if (encoding.data.mode.isVex()) {
|
||||
try writer.writeAll("VEX.");
|
||||
|
|
|
|||
|
|
@ -728,19 +728,14 @@ pub const FrameIndex = enum(u32) {
|
|||
return @intFromEnum(fi) < named_count;
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
fi: FrameIndex,
|
||||
comptime fmt: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) @TypeOf(writer).Error!void {
|
||||
pub fn format(fi: FrameIndex, writer: *std.io.Writer, comptime fmt: []const u8) std.io.Writer.Error!void {
|
||||
try writer.writeAll("FrameIndex");
|
||||
if (fi.isNamed()) {
|
||||
try writer.writeByte('.');
|
||||
try writer.writeAll(@tagName(fi));
|
||||
} else {
|
||||
try writer.writeByte('(');
|
||||
try std.fmt.formatType(@intFromEnum(fi), fmt, options, writer, 0);
|
||||
try writer.printInt(fmt, .{}, @intFromEnum(fi));
|
||||
try writer.writeByte(')');
|
||||
}
|
||||
}
|
||||
|
|
@ -844,12 +839,8 @@ pub const Memory = struct {
|
|||
};
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
s: Size,
|
||||
comptime _: []const u8,
|
||||
_: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) @TypeOf(writer).Error!void {
|
||||
pub fn format(s: Size, writer: *std.io.Writer, comptime f: []const u8) std.io.Writer.Error!void {
|
||||
comptime assert(f.len == 0);
|
||||
if (s == .none) return;
|
||||
try writer.writeAll(@tagName(s));
|
||||
switch (s) {
|
||||
|
|
@ -914,12 +905,8 @@ pub const Immediate = union(enum) {
|
|||
return .{ .signed = x };
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
imm: Immediate,
|
||||
comptime _: []const u8,
|
||||
_: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) @TypeOf(writer).Error!void {
|
||||
pub fn format(imm: Immediate, writer: *std.io.Writer, comptime f: []const u8) std.io.Writer.Error!void {
|
||||
comptime assert(f.len == 0);
|
||||
switch (imm) {
|
||||
inline else => |int| try writer.print("{d}", .{int}),
|
||||
.nav => |nav_off| try writer.print("Nav({d}) + {d}", .{ @intFromEnum(nav_off.nav), nav_off.off }),
|
||||
|
|
|
|||
|
|
@ -237,7 +237,7 @@ pub fn generateLazySymbol(
|
|||
const target = &comp.root_mod.resolved_target.result;
|
||||
const endian = target.cpu.arch.endian();
|
||||
|
||||
log.debug("generateLazySymbol: kind = {s}, ty = {}", .{
|
||||
log.debug("generateLazySymbol: kind = {s}, ty = {f}", .{
|
||||
@tagName(lazy_sym.kind),
|
||||
Type.fromInterned(lazy_sym.ty).fmt(pt),
|
||||
});
|
||||
|
|
@ -277,7 +277,7 @@ pub fn generateLazySymbol(
|
|||
code.appendAssumeCapacity(0);
|
||||
}
|
||||
} else {
|
||||
return zcu.codegenFailType(lazy_sym.ty, "TODO implement generateLazySymbol for {s} {}", .{
|
||||
return zcu.codegenFailType(lazy_sym.ty, "TODO implement generateLazySymbol for {s} {f}", .{
|
||||
@tagName(lazy_sym.kind), Type.fromInterned(lazy_sym.ty).fmt(pt),
|
||||
});
|
||||
}
|
||||
|
|
@ -310,7 +310,7 @@ pub fn generateSymbol(
|
|||
const target = zcu.getTarget();
|
||||
const endian = target.cpu.arch.endian();
|
||||
|
||||
log.debug("generateSymbol: val = {}", .{val.fmtValue(pt)});
|
||||
log.debug("generateSymbol: val = {f}", .{val.fmtValue(pt)});
|
||||
|
||||
if (val.isUndefDeep(zcu)) {
|
||||
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
|
||||
|
|
@ -767,7 +767,7 @@ fn lowerUavRef(
|
|||
const uav_ty = Type.fromInterned(ip.typeOf(uav_val));
|
||||
const is_fn_body = uav_ty.zigTypeTag(zcu) == .@"fn";
|
||||
|
||||
log.debug("lowerUavRef: ty = {}", .{uav_ty.fmt(pt)});
|
||||
log.debug("lowerUavRef: ty = {f}", .{uav_ty.fmt(pt)});
|
||||
try code.ensureUnusedCapacity(gpa, ptr_width_bytes);
|
||||
|
||||
if (!is_fn_body and !uav_ty.hasRuntimeBits(zcu)) {
|
||||
|
|
@ -913,7 +913,7 @@ pub fn genNavRef(
|
|||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const nav = ip.getNav(nav_index);
|
||||
log.debug("genNavRef({})", .{nav.fqn.fmt(ip)});
|
||||
log.debug("genNavRef({f})", .{nav.fqn.fmt(ip)});
|
||||
|
||||
const lib_name, const linkage, const is_threadlocal = if (nav.getExtern(ip)) |e|
|
||||
.{ e.lib_name, e.linkage, e.is_threadlocal and zcu.comp.config.any_non_single_threaded }
|
||||
|
|
@ -1065,7 +1065,7 @@ pub fn lowerValue(pt: Zcu.PerThread, val: Value, target: *const std.Target) Allo
|
|||
const ip = &zcu.intern_pool;
|
||||
const ty = val.typeOf(zcu);
|
||||
|
||||
log.debug("lowerValue(@as({}, {}))", .{ ty.fmt(pt), val.fmtValue(pt) });
|
||||
log.debug("lowerValue(@as({f}, {f}))", .{ ty.fmt(pt), val.fmtValue(pt) });
|
||||
|
||||
if (val.isUndef(zcu)) return .undef;
|
||||
|
||||
|
|
|
|||
|
|
@ -388,7 +388,7 @@ fn formatCTypePoolString(data: CTypePoolStringFormatData, w: *std.io.Writer) std
|
|||
if (data.ctype_pool_string.toSlice(data.ctype_pool)) |slice|
|
||||
try formatIdentOptions(slice, w, data.solo)
|
||||
else
|
||||
try w.print("{}", .{data.ctype_pool_string.fmt(data.ctype_pool)});
|
||||
try w.print("{f}", .{data.ctype_pool_string.fmt(data.ctype_pool)});
|
||||
}
|
||||
pub fn fmtCTypePoolString(
|
||||
ctype_pool_string: CType.Pool.String,
|
||||
|
|
@ -2471,15 +2471,7 @@ const RenderCTypeTrailing = enum {
|
|||
no_space,
|
||||
maybe_space,
|
||||
|
||||
pub fn format(
|
||||
self: @This(),
|
||||
comptime fmt: []const u8,
|
||||
_: std.fmt.FormatOptions,
|
||||
w: *Writer,
|
||||
) @TypeOf(w).Error!void {
|
||||
if (fmt.len != 0)
|
||||
@compileError("invalid format string '" ++ fmt ++ "' for type '" ++
|
||||
@typeName(@This()) ++ "'");
|
||||
pub fn format(self: @This(), w: *Writer, comptime fmt: []const u8) Writer.Error!void {
|
||||
comptime assert(fmt.len == 0);
|
||||
switch (self) {
|
||||
.no_space => {},
|
||||
|
|
|
|||
|
|
@ -817,7 +817,7 @@ const NavGen = struct {
|
|||
const result_ty_id = try self.resolveType(ty, repr);
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
log.debug("lowering constant: ty = {}, val = {}, key = {s}", .{ ty.fmt(pt), val.fmtValue(pt), @tagName(ip.indexToKey(val.toIntern())) });
|
||||
log.debug("lowering constant: ty = {f}, val = {f}, key = {s}", .{ ty.fmt(pt), val.fmtValue(pt), @tagName(ip.indexToKey(val.toIntern())) });
|
||||
if (val.isUndefDeep(zcu)) {
|
||||
return self.spv.constUndef(result_ty_id);
|
||||
}
|
||||
|
|
@ -1147,7 +1147,7 @@ const NavGen = struct {
|
|||
return result_ptr_id;
|
||||
}
|
||||
|
||||
return self.fail("cannot perform pointer cast: '{}' to '{}'", .{
|
||||
return self.fail("cannot perform pointer cast: '{f}' to '{f}'", .{
|
||||
parent_ptr_ty.fmt(pt),
|
||||
oac.new_ptr_ty.fmt(pt),
|
||||
});
|
||||
|
|
@ -1464,7 +1464,7 @@ const NavGen = struct {
|
|||
const pt = self.pt;
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
log.debug("resolveType: ty = {}", .{ty.fmt(pt)});
|
||||
log.debug("resolveType: ty = {f}", .{ty.fmt(pt)});
|
||||
const target = self.spv.target;
|
||||
|
||||
const section = &self.spv.sections.types_globals_constants;
|
||||
|
|
@ -3070,7 +3070,7 @@ const NavGen = struct {
|
|||
try self.func.body.emit(self.spv.gpa, .OpFunctionEnd, {});
|
||||
try self.spv.addFunction(spv_decl_index, self.func);
|
||||
|
||||
try self.spv.debugNameFmt(initializer_id, "initializer of {}", .{nav.fqn.fmt(ip)});
|
||||
try self.spv.debugNameFmt(initializer_id, "initializer of {f}", .{nav.fqn.fmt(ip)});
|
||||
|
||||
try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpExtInst, .{
|
||||
.id_result_type = ptr_ty_id,
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
//! This file is auto-generated by tools/gen_spirv_spec.zig.
|
||||
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
|
||||
pub const Version = packed struct(Word) {
|
||||
padding: u8 = 0,
|
||||
|
|
@ -18,15 +19,11 @@ pub const IdResult = enum(Word) {
|
|||
none,
|
||||
_,
|
||||
|
||||
pub fn format(
|
||||
self: IdResult,
|
||||
comptime _: []const u8,
|
||||
_: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) @TypeOf(writer).Error!void {
|
||||
pub fn format(self: IdResult, writer: *std.io.Writer, comptime f: []const u8) std.io.Writer.Error!void {
|
||||
comptime assert(f.len == 0);
|
||||
switch (self) {
|
||||
.none => try writer.writeAll("(none)"),
|
||||
else => try writer.print("%{}", .{@intFromEnum(self)}),
|
||||
else => try writer.print("%{d}", .{@intFromEnum(self)}),
|
||||
}
|
||||
}
|
||||
};
|
||||
|
|
|
|||
32
src/link.zig
32
src/link.zig
|
|
@ -323,7 +323,7 @@ pub const Diags = struct {
|
|||
const main_msg = try m;
|
||||
errdefer gpa.free(main_msg);
|
||||
try diags.msgs.ensureUnusedCapacity(gpa, 1);
|
||||
const note = try std.fmt.allocPrint(gpa, "while parsing {}", .{path});
|
||||
const note = try std.fmt.allocPrint(gpa, "while parsing {f}", .{path});
|
||||
errdefer gpa.free(note);
|
||||
const notes = try gpa.create([1]Msg);
|
||||
errdefer gpa.destroy(notes);
|
||||
|
|
@ -1351,7 +1351,7 @@ pub fn doPrelinkTask(comp: *Compilation, task: PrelinkTask) void {
|
|||
.search_strategy = .paths_first,
|
||||
}) catch |archive_err| switch (archive_err) {
|
||||
error.LinkFailure => return, // error reported via diags
|
||||
else => |e| diags.addParseError(dso_path, "failed to parse archive {}: {s}", .{ archive_path, @errorName(e) }),
|
||||
else => |e| diags.addParseError(dso_path, "failed to parse archive {f}: {s}", .{ archive_path, @errorName(e) }),
|
||||
};
|
||||
},
|
||||
error.LinkFailure => return, // error reported via diags
|
||||
|
|
@ -1874,7 +1874,7 @@ pub fn resolveInputs(
|
|||
)) |lib_result| {
|
||||
switch (lib_result) {
|
||||
.ok => {},
|
||||
.no_match => fatal("{}: file not found", .{pq.path}),
|
||||
.no_match => fatal("{f}: file not found", .{pq.path}),
|
||||
}
|
||||
}
|
||||
continue;
|
||||
|
|
@ -1928,10 +1928,10 @@ fn resolveLibInput(
|
|||
.root_dir = lib_directory,
|
||||
.sub_path = try std.fmt.allocPrint(arena, "lib{s}.tbd", .{lib_name}),
|
||||
};
|
||||
try checked_paths.writer(gpa).print("\n {}", .{test_path});
|
||||
try checked_paths.writer(gpa).print("\n {f}", .{test_path});
|
||||
var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) {
|
||||
error.FileNotFound => break :tbd,
|
||||
else => |e| fatal("unable to search for tbd library '{}': {s}", .{ test_path, @errorName(e) }),
|
||||
else => |e| fatal("unable to search for tbd library '{f}': {s}", .{ test_path, @errorName(e) }),
|
||||
};
|
||||
errdefer file.close();
|
||||
return finishResolveLibInput(resolved_inputs, test_path, file, link_mode, name_query.query);
|
||||
|
|
@ -1947,7 +1947,7 @@ fn resolveLibInput(
|
|||
},
|
||||
}),
|
||||
};
|
||||
try checked_paths.writer(gpa).print("\n {}", .{test_path});
|
||||
try checked_paths.writer(gpa).print("\n {f}", .{test_path});
|
||||
switch (try resolvePathInputLib(gpa, arena, unresolved_inputs, resolved_inputs, ld_script_bytes, target, .{
|
||||
.path = test_path,
|
||||
.query = name_query.query,
|
||||
|
|
@ -1964,10 +1964,10 @@ fn resolveLibInput(
|
|||
.root_dir = lib_directory,
|
||||
.sub_path = try std.fmt.allocPrint(arena, "lib{s}.so", .{lib_name}),
|
||||
};
|
||||
try checked_paths.writer(gpa).print("\n {}", .{test_path});
|
||||
try checked_paths.writer(gpa).print("\n {f}", .{test_path});
|
||||
var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) {
|
||||
error.FileNotFound => break :so,
|
||||
else => |e| fatal("unable to search for so library '{}': {s}", .{
|
||||
else => |e| fatal("unable to search for so library '{f}': {s}", .{
|
||||
test_path, @errorName(e),
|
||||
}),
|
||||
};
|
||||
|
|
@ -1982,10 +1982,10 @@ fn resolveLibInput(
|
|||
.root_dir = lib_directory,
|
||||
.sub_path = try std.fmt.allocPrint(arena, "lib{s}.a", .{lib_name}),
|
||||
};
|
||||
try checked_paths.writer(gpa).print("\n {}", .{test_path});
|
||||
try checked_paths.writer(gpa).print("\n {f}", .{test_path});
|
||||
var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) {
|
||||
error.FileNotFound => break :mingw,
|
||||
else => |e| fatal("unable to search for static library '{}': {s}", .{ test_path, @errorName(e) }),
|
||||
else => |e| fatal("unable to search for static library '{f}': {s}", .{ test_path, @errorName(e) }),
|
||||
};
|
||||
errdefer file.close();
|
||||
return finishResolveLibInput(resolved_inputs, test_path, file, link_mode, name_query.query);
|
||||
|
|
@ -2037,7 +2037,7 @@ fn resolvePathInput(
|
|||
.shared_library => return try resolvePathInputLib(gpa, arena, unresolved_inputs, resolved_inputs, ld_script_bytes, target, pq, .dynamic, color),
|
||||
.object => {
|
||||
var file = pq.path.root_dir.handle.openFile(pq.path.sub_path, .{}) catch |err|
|
||||
fatal("failed to open object {}: {s}", .{ pq.path, @errorName(err) });
|
||||
fatal("failed to open object {f}: {s}", .{ pq.path, @errorName(err) });
|
||||
errdefer file.close();
|
||||
try resolved_inputs.append(gpa, .{ .object = .{
|
||||
.path = pq.path,
|
||||
|
|
@ -2049,7 +2049,7 @@ fn resolvePathInput(
|
|||
},
|
||||
.res => {
|
||||
var file = pq.path.root_dir.handle.openFile(pq.path.sub_path, .{}) catch |err|
|
||||
fatal("failed to open windows resource {}: {s}", .{ pq.path, @errorName(err) });
|
||||
fatal("failed to open windows resource {f}: {s}", .{ pq.path, @errorName(err) });
|
||||
errdefer file.close();
|
||||
try resolved_inputs.append(gpa, .{ .res = .{
|
||||
.path = pq.path,
|
||||
|
|
@ -2057,7 +2057,7 @@ fn resolvePathInput(
|
|||
} });
|
||||
return null;
|
||||
},
|
||||
else => fatal("{}: unrecognized file extension", .{pq.path}),
|
||||
else => fatal("{f}: unrecognized file extension", .{pq.path}),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2192,19 +2192,19 @@ pub fn openDso(path: Path, needed: bool, weak: bool, reexport: bool) !Input.Dso
|
|||
|
||||
pub fn openObjectInput(diags: *Diags, path: Path) error{LinkFailure}!Input {
|
||||
return .{ .object = openObject(path, false, false) catch |err| {
|
||||
return diags.failParse(path, "failed to open {}: {s}", .{ path, @errorName(err) });
|
||||
return diags.failParse(path, "failed to open {f}: {s}", .{ path, @errorName(err) });
|
||||
} };
|
||||
}
|
||||
|
||||
pub fn openArchiveInput(diags: *Diags, path: Path, must_link: bool, hidden: bool) error{LinkFailure}!Input {
|
||||
return .{ .archive = openObject(path, must_link, hidden) catch |err| {
|
||||
return diags.failParse(path, "failed to open {}: {s}", .{ path, @errorName(err) });
|
||||
return diags.failParse(path, "failed to open {f}: {s}", .{ path, @errorName(err) });
|
||||
} };
|
||||
}
|
||||
|
||||
pub fn openDsoInput(diags: *Diags, path: Path, needed: bool, weak: bool, reexport: bool) error{LinkFailure}!Input {
|
||||
return .{ .dso = openDso(path, needed, weak, reexport) catch |err| {
|
||||
return diags.failParse(path, "failed to open {}: {s}", .{ path, @errorName(err) });
|
||||
return diags.failParse(path, "failed to open {f}: {s}", .{ path, @errorName(err) });
|
||||
} };
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1213,7 +1213,7 @@ fn updateLazySymbolAtom(
|
|||
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer code_buffer.deinit(gpa);
|
||||
|
||||
const name = try allocPrint(gpa, "__lazy_{s}_{}", .{
|
||||
const name = try allocPrint(gpa, "__lazy_{s}_{f}", .{
|
||||
@tagName(sym.kind),
|
||||
Type.fromInterned(sym.ty).fmt(pt),
|
||||
});
|
||||
|
|
@ -1333,7 +1333,7 @@ fn updateNavCode(
|
|||
const ip = &zcu.intern_pool;
|
||||
const nav = ip.getNav(nav_index);
|
||||
|
||||
log.debug("updateNavCode {} 0x{x}", .{ nav.fqn.fmt(ip), nav_index });
|
||||
log.debug("updateNavCode {f} 0x{x}", .{ nav.fqn.fmt(ip), nav_index });
|
||||
|
||||
const target = &zcu.navFileScope(nav_index).mod.?.resolved_target.result;
|
||||
const required_alignment = switch (pt.navAlignment(nav_index)) {
|
||||
|
|
@ -1361,7 +1361,7 @@ fn updateNavCode(
|
|||
error.OutOfMemory => return error.OutOfMemory,
|
||||
else => |e| return coff.base.cgFail(nav_index, "failed to grow atom: {s}", .{@errorName(e)}),
|
||||
};
|
||||
log.debug("growing {} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), sym.value, vaddr });
|
||||
log.debug("growing {f} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), sym.value, vaddr });
|
||||
log.debug(" (required alignment 0x{x}", .{required_alignment});
|
||||
|
||||
if (vaddr != sym.value) {
|
||||
|
|
@ -1389,7 +1389,7 @@ fn updateNavCode(
|
|||
else => |e| return coff.base.cgFail(nav_index, "failed to allocate atom: {s}", .{@errorName(e)}),
|
||||
};
|
||||
errdefer coff.freeAtom(atom_index);
|
||||
log.debug("allocated atom for {} at 0x{x}", .{ nav.fqn.fmt(ip), vaddr });
|
||||
log.debug("allocated atom for {f} at 0x{x}", .{ nav.fqn.fmt(ip), vaddr });
|
||||
coff.getAtomPtr(atom_index).size = code_len;
|
||||
sym.value = vaddr;
|
||||
|
||||
|
|
@ -1454,7 +1454,7 @@ pub fn updateExports(
|
|||
|
||||
for (export_indices) |export_idx| {
|
||||
const exp = export_idx.ptr(zcu);
|
||||
log.debug("adding new export '{}'", .{exp.opts.name.fmt(&zcu.intern_pool)});
|
||||
log.debug("adding new export '{f}'", .{exp.opts.name.fmt(&zcu.intern_pool)});
|
||||
|
||||
if (exp.opts.section.toSlice(&zcu.intern_pool)) |section_name| {
|
||||
if (!mem.eql(u8, section_name, ".text")) {
|
||||
|
|
@ -1530,7 +1530,7 @@ pub fn deleteExport(
|
|||
const gpa = coff.base.comp.gpa;
|
||||
const sym_loc = SymbolWithLoc{ .sym_index = sym_index.*, .file = null };
|
||||
const sym = coff.getSymbolPtr(sym_loc);
|
||||
log.debug("deleting export '{}'", .{name.fmt(&zcu.intern_pool)});
|
||||
log.debug("deleting export '{f}'", .{name.fmt(&zcu.intern_pool)});
|
||||
assert(sym.storage_class == .EXTERNAL and sym.section_number != .UNDEFINED);
|
||||
sym.* = .{
|
||||
.name = [_]u8{0} ** 8,
|
||||
|
|
@ -1748,7 +1748,7 @@ pub fn getNavVAddr(
|
|||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const nav = ip.getNav(nav_index);
|
||||
log.debug("getNavVAddr {}({d})", .{ nav.fqn.fmt(ip), nav_index });
|
||||
log.debug("getNavVAddr {f}({d})", .{ nav.fqn.fmt(ip), nav_index });
|
||||
const sym_index = if (nav.getExtern(ip)) |e|
|
||||
try coff.getGlobalSymbol(nav.name.toSlice(ip), e.lib_name.toSlice(ip))
|
||||
else
|
||||
|
|
@ -2605,7 +2605,7 @@ fn logSymtab(coff: *Coff) void {
|
|||
}
|
||||
|
||||
log.debug("GOT entries:", .{});
|
||||
log.debug("{}", .{coff.got_table});
|
||||
log.debug("{f}", .{coff.got_table});
|
||||
}
|
||||
|
||||
fn logSections(coff: *Coff) void {
|
||||
|
|
|
|||
|
|
@ -973,7 +973,7 @@ const Entry = struct {
|
|||
else
|
||||
.main;
|
||||
if (sec.getUnit(ty_unit) == unit and unit.getEntry(other_entry) == entry)
|
||||
log.err("missing Type({}({d}))", .{
|
||||
log.err("missing Type({f}({d}))", .{
|
||||
Type.fromInterned(ty).fmt(.{ .tid = .main, .zcu = zcu }),
|
||||
@intFromEnum(ty),
|
||||
});
|
||||
|
|
@ -981,7 +981,7 @@ const Entry = struct {
|
|||
for (dwarf.navs.keys(), dwarf.navs.values()) |nav, other_entry| {
|
||||
const nav_unit = dwarf.getUnit(zcu.fileByIndex(ip.getNav(nav).srcInst(ip).resolveFile(ip)).mod.?) catch unreachable;
|
||||
if (sec.getUnit(nav_unit) == unit and unit.getEntry(other_entry) == entry)
|
||||
log.err("missing Nav({}({d}))", .{ ip.getNav(nav).fqn.fmt(ip), @intFromEnum(nav) });
|
||||
log.err("missing Nav({f}({d}))", .{ ip.getNav(nav).fqn.fmt(ip), @intFromEnum(nav) });
|
||||
}
|
||||
}
|
||||
@panic("missing dwarf relocation target");
|
||||
|
|
@ -1957,7 +1957,7 @@ pub const WipNav = struct {
|
|||
.{ .debug_output = .{ .dwarf = wip_nav } },
|
||||
);
|
||||
if (old_len + bytes != wip_nav.debug_info.items.len) {
|
||||
std.debug.print("{} [{}]: {} != {}\n", .{ ty.fmt(wip_nav.pt), ty.toIntern(), bytes, wip_nav.debug_info.items.len - old_len });
|
||||
std.debug.print("{f} [{}]: {} != {}\n", .{ ty.fmt(wip_nav.pt), ty.toIntern(), bytes, wip_nav.debug_info.items.len - old_len });
|
||||
unreachable;
|
||||
}
|
||||
}
|
||||
|
|
@ -2427,7 +2427,7 @@ fn initWipNavInner(
|
|||
const inst_info = nav.srcInst(ip).resolveFull(ip).?;
|
||||
const file = zcu.fileByIndex(inst_info.file);
|
||||
const decl = file.zir.?.getDeclaration(inst_info.inst);
|
||||
log.debug("initWipNav({s}:{d}:{d} %{d} = {})", .{
|
||||
log.debug("initWipNav({s}:{d}:{d} %{d} = {f})", .{
|
||||
file.sub_file_path,
|
||||
decl.src_line + 1,
|
||||
decl.src_column + 1,
|
||||
|
|
@ -2632,7 +2632,7 @@ pub fn finishWipNavFunc(
|
|||
const ip = &zcu.intern_pool;
|
||||
const nav = ip.getNav(nav_index);
|
||||
assert(wip_nav.func != .none);
|
||||
log.debug("finishWipNavFunc({})", .{nav.fqn.fmt(ip)});
|
||||
log.debug("finishWipNavFunc({f})", .{nav.fqn.fmt(ip)});
|
||||
|
||||
{
|
||||
const external_relocs = &dwarf.debug_aranges.section.getUnit(wip_nav.unit).getEntry(wip_nav.entry).external_relocs;
|
||||
|
|
@ -2733,7 +2733,7 @@ pub fn finishWipNav(
|
|||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const nav = ip.getNav(nav_index);
|
||||
log.debug("finishWipNav({})", .{nav.fqn.fmt(ip)});
|
||||
log.debug("finishWipNav({f})", .{nav.fqn.fmt(ip)});
|
||||
|
||||
try dwarf.debug_info.section.replaceEntry(wip_nav.unit, wip_nav.entry, dwarf, wip_nav.debug_info.items);
|
||||
if (wip_nav.debug_line.items.len > 0) {
|
||||
|
|
@ -2765,7 +2765,7 @@ fn updateComptimeNavInner(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPoo
|
|||
const inst_info = nav.srcInst(ip).resolveFull(ip).?;
|
||||
const file = zcu.fileByIndex(inst_info.file);
|
||||
const decl = file.zir.?.getDeclaration(inst_info.inst);
|
||||
log.debug("updateComptimeNav({s}:{d}:{d} %{d} = {})", .{
|
||||
log.debug("updateComptimeNav({s}:{d}:{d} %{d} = {f})", .{
|
||||
file.sub_file_path,
|
||||
decl.src_line + 1,
|
||||
decl.src_column + 1,
|
||||
|
|
@ -3215,7 +3215,7 @@ fn updateLazyType(
|
|||
const ty: Type = .fromInterned(type_index);
|
||||
switch (type_index) {
|
||||
.generic_poison_type => log.debug("updateLazyType({s})", .{"anytype"}),
|
||||
else => log.debug("updateLazyType({})", .{ty.fmt(pt)}),
|
||||
else => log.debug("updateLazyType({f})", .{ty.fmt(pt)}),
|
||||
}
|
||||
|
||||
var wip_nav: WipNav = .{
|
||||
|
|
@ -3243,7 +3243,7 @@ fn updateLazyType(
|
|||
const diw = wip_nav.debug_info.writer(dwarf.gpa);
|
||||
const name = switch (type_index) {
|
||||
.generic_poison_type => "",
|
||||
else => try std.fmt.allocPrint(dwarf.gpa, "{}", .{ty.fmt(pt)}),
|
||||
else => try std.fmt.allocPrint(dwarf.gpa, "{f}", .{ty.fmt(pt)}),
|
||||
};
|
||||
defer dwarf.gpa.free(name);
|
||||
|
||||
|
|
@ -3718,7 +3718,7 @@ fn updateLazyValue(
|
|||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
assert(ip.typeOf(value_index) != .type_type);
|
||||
log.debug("updateLazyValue(@as({}, {}))", .{
|
||||
log.debug("updateLazyValue(@as({f}, {f}))", .{
|
||||
Value.fromInterned(value_index).typeOf(zcu).fmt(pt),
|
||||
Value.fromInterned(value_index).fmtValue(pt),
|
||||
});
|
||||
|
|
@ -4110,7 +4110,7 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP
|
|||
const ip = &zcu.intern_pool;
|
||||
const ty: Type = .fromInterned(type_index);
|
||||
const ty_src_loc = ty.srcLoc(zcu);
|
||||
log.debug("updateContainerType({})", .{ty.fmt(pt)});
|
||||
log.debug("updateContainerType({f})", .{ty.fmt(pt)});
|
||||
|
||||
const inst_info = ty.typeDeclInst(zcu).?.resolveFull(ip).?;
|
||||
const file = zcu.fileByIndex(inst_info.file);
|
||||
|
|
@ -4239,7 +4239,7 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP
|
|||
};
|
||||
defer wip_nav.deinit();
|
||||
const diw = wip_nav.debug_info.writer(dwarf.gpa);
|
||||
const name = try std.fmt.allocPrint(dwarf.gpa, "{}", .{ty.fmt(pt)});
|
||||
const name = try std.fmt.allocPrint(dwarf.gpa, "{f}", .{ty.fmt(pt)});
|
||||
defer dwarf.gpa.free(name);
|
||||
|
||||
switch (ip.indexToKey(type_index)) {
|
||||
|
|
|
|||
|
|
@ -702,7 +702,7 @@ pub fn allocateChunk(self: *Elf, args: struct {
|
|||
shdr.sh_addr + res.value,
|
||||
shdr.sh_offset + res.value,
|
||||
});
|
||||
log.debug(" placement {}, {s}", .{
|
||||
log.debug(" placement {f}, {s}", .{
|
||||
res.placement,
|
||||
if (self.atom(res.placement)) |atom_ptr| atom_ptr.name(self) else "",
|
||||
});
|
||||
|
|
@ -869,7 +869,7 @@ fn flushInner(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id) !void {
|
|||
// Dump the state for easy debugging.
|
||||
// State can be dumped via `--debug-log link_state`.
|
||||
if (build_options.enable_logging) {
|
||||
state_log.debug("{}", .{self.dumpState()});
|
||||
state_log.debug("{f}", .{self.dumpState()});
|
||||
}
|
||||
|
||||
// Beyond this point, everything has been allocated a virtual address and we can resolve
|
||||
|
|
@ -3813,12 +3813,12 @@ fn reportDuplicates(self: *Elf, dupes: anytype) error{ HasDuplicates, OutOfMemor
|
|||
|
||||
var err = try diags.addErrorWithNotes(nnotes + 1);
|
||||
try err.addMsg("duplicate symbol definition: {s}", .{sym.name(self)});
|
||||
err.addNote("defined by {}", .{sym.file(self).?.fmtPath()});
|
||||
err.addNote("defined by {f}", .{sym.file(self).?.fmtPath()});
|
||||
|
||||
var inote: usize = 0;
|
||||
while (inote < @min(notes.items.len, max_notes)) : (inote += 1) {
|
||||
const file_ptr = self.file(notes.items[inote]).?;
|
||||
err.addNote("defined by {}", .{file_ptr.fmtPath()});
|
||||
err.addNote("defined by {f}", .{file_ptr.fmtPath()});
|
||||
}
|
||||
|
||||
if (notes.items.len > max_notes) {
|
||||
|
|
@ -3847,7 +3847,7 @@ pub fn addFileError(
|
|||
const diags = &self.base.comp.link_diags;
|
||||
var err = try diags.addErrorWithNotes(1);
|
||||
try err.addMsg(format, args);
|
||||
err.addNote("while parsing {}", .{self.file(file_index).?.fmtPath()});
|
||||
err.addNote("while parsing {f}", .{self.file(file_index).?.fmtPath()});
|
||||
}
|
||||
|
||||
pub fn failFile(
|
||||
|
|
@ -3874,7 +3874,7 @@ fn fmtShdr(self: *Elf, shdr: elf.Elf64_Shdr) std.fmt.Formatter(FormatShdr, forma
|
|||
|
||||
fn formatShdr(ctx: FormatShdr, writer: *std.io.Writer) std.io.Writer.Error!void {
|
||||
const shdr = ctx.shdr;
|
||||
try writer.print("{s} : @{x} ({x}) : align({x}) : size({x}) : entsize({x}) : flags({})", .{
|
||||
try writer.print("{s} : @{x} ({x}) : align({x}) : size({x}) : entsize({x}) : flags({f})", .{
|
||||
ctx.elf_file.getShString(shdr.sh_name), shdr.sh_offset,
|
||||
shdr.sh_addr, shdr.sh_addralign,
|
||||
shdr.sh_size, shdr.sh_entsize,
|
||||
|
|
@ -3979,7 +3979,7 @@ fn fmtDumpState(self: *Elf, writer: *std.io.Writer) std.io.Writer.Error!void {
|
|||
|
||||
if (self.zigObjectPtr()) |zig_object| {
|
||||
try writer.print("zig_object({d}) : {s}\n", .{ zig_object.index, zig_object.basename });
|
||||
try writer.print("{}{}", .{
|
||||
try writer.print("{f}{f}", .{
|
||||
zig_object.fmtAtoms(self),
|
||||
zig_object.fmtSymtab(self),
|
||||
});
|
||||
|
|
@ -3988,10 +3988,10 @@ fn fmtDumpState(self: *Elf, writer: *std.io.Writer) std.io.Writer.Error!void {
|
|||
|
||||
for (self.objects.items) |index| {
|
||||
const object = self.file(index).?.object;
|
||||
try writer.print("object({d}) : {}", .{ index, object.fmtPath() });
|
||||
try writer.print("object({d}) : {f}", .{ index, object.fmtPath() });
|
||||
if (!object.alive) try writer.writeAll(" : [*]");
|
||||
try writer.writeByte('\n');
|
||||
try writer.print("{}{}{}{}{}\n", .{
|
||||
try writer.print("{f}{f}{f}{f}{f}\n", .{
|
||||
object.fmtAtoms(self),
|
||||
object.fmtCies(self),
|
||||
object.fmtFdes(self),
|
||||
|
|
@ -4002,18 +4002,18 @@ fn fmtDumpState(self: *Elf, writer: *std.io.Writer) std.io.Writer.Error!void {
|
|||
|
||||
for (shared_objects) |index| {
|
||||
const shared_object = self.file(index).?.shared_object;
|
||||
try writer.print("shared_object({d}) : {} : needed({})", .{
|
||||
try writer.print("shared_object({d}) : {f} : needed({})", .{
|
||||
index, shared_object.path, shared_object.needed,
|
||||
});
|
||||
if (!shared_object.alive) try writer.writeAll(" : [*]");
|
||||
try writer.writeByte('\n');
|
||||
try writer.print("{}\n", .{shared_object.fmtSymtab(self)});
|
||||
try writer.print("{f}\n", .{shared_object.fmtSymtab(self)});
|
||||
}
|
||||
|
||||
if (self.linker_defined_index) |index| {
|
||||
const linker_defined = self.file(index).?.linker_defined;
|
||||
try writer.print("linker_defined({d}) : (linker defined)\n", .{index});
|
||||
try writer.print("{}\n", .{linker_defined.fmtSymtab(self)});
|
||||
try writer.print("{f}\n", .{linker_defined.fmtSymtab(self)});
|
||||
}
|
||||
|
||||
const slice = self.sections.slice();
|
||||
|
|
@ -4036,7 +4036,7 @@ fn fmtDumpState(self: *Elf, writer: *std.io.Writer) std.io.Writer.Error!void {
|
|||
|
||||
try writer.writeAll("Output groups\n");
|
||||
for (self.group_sections.items) |cg| {
|
||||
try writer.print(" shdr({d}) : GROUP({})\n", .{ cg.shndx, cg.cg_ref });
|
||||
try writer.print(" shdr({d}) : GROUP({f})\n", .{ cg.shndx, cg.cg_ref });
|
||||
}
|
||||
|
||||
try writer.writeAll("\nOutput merge sections\n");
|
||||
|
|
@ -4046,7 +4046,7 @@ fn fmtDumpState(self: *Elf, writer: *std.io.Writer) std.io.Writer.Error!void {
|
|||
|
||||
try writer.writeAll("\nOutput shdrs\n");
|
||||
for (slice.items(.shdr), slice.items(.phndx), 0..) |shdr, phndx, shndx| {
|
||||
try writer.print(" shdr({d}) : phdr({?d}) : {}\n", .{
|
||||
try writer.print(" shdr({d}) : phdr({d}) : {f}\n", .{
|
||||
shndx,
|
||||
phndx,
|
||||
self.fmtShdr(shdr),
|
||||
|
|
@ -4054,7 +4054,7 @@ fn fmtDumpState(self: *Elf, writer: *std.io.Writer) std.io.Writer.Error!void {
|
|||
}
|
||||
try writer.writeAll("\nOutput phdrs\n");
|
||||
for (self.phdrs.items, 0..) |phdr, phndx| {
|
||||
try writer.print(" phdr({d}) : {}\n", .{ phndx, self.fmtPhdr(phdr) });
|
||||
try writer.print(" phdr({d}) : {f}\n", .{ phndx, self.fmtPhdr(phdr) });
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -4192,15 +4192,9 @@ pub const Ref = struct {
|
|||
return ref.index == other.index and ref.file == other.file;
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
ref: Ref,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
try writer.print("ref({},{})", .{ ref.index, ref.file });
|
||||
pub fn format(ref: Ref, writer: *std.io.Writer, comptime f: []const u8) std.io.Writer.Error!void {
|
||||
comptime assert(f.len == 0);
|
||||
try writer.print("ref({d},{d})", .{ ref.index, ref.file });
|
||||
}
|
||||
};
|
||||
|
||||
|
|
@ -4395,7 +4389,7 @@ fn createThunks(elf_file: *Elf, atom_list: *AtomList) !void {
|
|||
for (atom_list.atoms.keys()[start..i]) |ref| {
|
||||
const atom_ptr = elf_file.atom(ref).?;
|
||||
const file_ptr = atom_ptr.file(elf_file).?;
|
||||
log.debug("atom({}) {s}", .{ ref, atom_ptr.name(elf_file) });
|
||||
log.debug("atom({f}) {s}", .{ ref, atom_ptr.name(elf_file) });
|
||||
for (atom_ptr.relocs(elf_file)) |rel| {
|
||||
const is_reachable = switch (cpu_arch) {
|
||||
.aarch64 => r: {
|
||||
|
|
|
|||
|
|
@ -83,7 +83,7 @@ pub fn parse(
|
|||
.alive = false,
|
||||
};
|
||||
|
||||
log.debug("extracting object '{}' from archive '{}'", .{
|
||||
log.debug("extracting object '{f}' from archive '{f}'", .{
|
||||
@as(Path, object.path), @as(Path, path),
|
||||
});
|
||||
|
||||
|
|
@ -201,19 +201,6 @@ pub const ArSymtab = struct {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
ar: ArSymtab,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
_ = ar;
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
_ = writer;
|
||||
@compileError("do not format ar symtab directly; use fmt instead");
|
||||
}
|
||||
|
||||
const Format = struct {
|
||||
ar: ArSymtab,
|
||||
elf_file: *Elf,
|
||||
|
|
@ -224,7 +211,7 @@ pub const ArSymtab = struct {
|
|||
for (ar.symtab.items, 0..) |entry, i| {
|
||||
const name = ar.strtab.getAssumeExists(entry.off);
|
||||
const file = elf_file.file(entry.file_index).?;
|
||||
try writer.print(" {d}: {s} in file({d})({})\n", .{ i, name, entry.file_index, file.fmtPath() });
|
||||
try writer.print(" {d}: {s} in file({d})({f})\n", .{ i, name, entry.file_index, file.fmtPath() });
|
||||
}
|
||||
}
|
||||
};
|
||||
|
|
@ -273,14 +260,8 @@ pub const ArStrtab = struct {
|
|||
try writer.writeAll(ar.buffer.items);
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
ar: ArStrtab,
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
pub fn format(ar: ArStrtab, writer: *std.io.Writer, comptime fmt: []const u8) std.io.Writer.Error!void {
|
||||
comptime assert(fmt.len == 0);
|
||||
try writer.print("{f}", .{std.ascii.hexEscape(ar.buffer.items, .lower)});
|
||||
}
|
||||
};
|
||||
|
|
|
|||
|
|
@ -142,7 +142,7 @@ pub fn freeListEligible(self: Atom, elf_file: *Elf) bool {
|
|||
}
|
||||
|
||||
pub fn free(self: *Atom, elf_file: *Elf) void {
|
||||
log.debug("freeAtom atom({}) ({s})", .{ self.ref(), self.name(elf_file) });
|
||||
log.debug("freeAtom atom({f}) ({s})", .{ self.ref(), self.name(elf_file) });
|
||||
|
||||
const comp = elf_file.base.comp;
|
||||
const gpa = comp.gpa;
|
||||
|
|
@ -316,7 +316,7 @@ pub fn scanRelocs(self: Atom, elf_file: *Elf, code: ?[]const u8, undefs: anytype
|
|||
};
|
||||
// Violation of One Definition Rule for COMDATs.
|
||||
// TODO convert into an error
|
||||
log.debug("{}: {s}: {s} refers to a discarded COMDAT section", .{
|
||||
log.debug("{f}: {s}: {s} refers to a discarded COMDAT section", .{
|
||||
file_ptr.fmtPath(),
|
||||
self.name(elf_file),
|
||||
sym_name,
|
||||
|
|
@ -519,11 +519,11 @@ fn dataType(symbol: *const Symbol, elf_file: *Elf) u2 {
|
|||
fn reportUnhandledRelocError(self: Atom, rel: elf.Elf64_Rela, elf_file: *Elf) RelocError!void {
|
||||
const diags = &elf_file.base.comp.link_diags;
|
||||
var err = try diags.addErrorWithNotes(1);
|
||||
try err.addMsg("fatal linker error: unhandled relocation type {} at offset 0x{x}", .{
|
||||
try err.addMsg("fatal linker error: unhandled relocation type {f} at offset 0x{x}", .{
|
||||
relocation.fmtRelocType(rel.r_type(), elf_file.getTarget().cpu.arch),
|
||||
rel.r_offset,
|
||||
});
|
||||
err.addNote("in {}:{s}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file) });
|
||||
err.addNote("in {f}:{s}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file) });
|
||||
return error.RelocFailure;
|
||||
}
|
||||
|
||||
|
|
@ -539,7 +539,7 @@ fn reportTextRelocError(
|
|||
rel.r_offset,
|
||||
symbol.name(elf_file),
|
||||
});
|
||||
err.addNote("in {}:{s}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file) });
|
||||
err.addNote("in {f}:{s}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file) });
|
||||
return error.RelocFailure;
|
||||
}
|
||||
|
||||
|
|
@ -555,7 +555,7 @@ fn reportPicError(
|
|||
rel.r_offset,
|
||||
symbol.name(elf_file),
|
||||
});
|
||||
err.addNote("in {}:{s}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file) });
|
||||
err.addNote("in {f}:{s}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file) });
|
||||
err.addNote("recompile with -fPIC", .{});
|
||||
return error.RelocFailure;
|
||||
}
|
||||
|
|
@ -572,7 +572,7 @@ fn reportNoPicError(
|
|||
rel.r_offset,
|
||||
symbol.name(elf_file),
|
||||
});
|
||||
err.addNote("in {}:{s}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file) });
|
||||
err.addNote("in {f}:{s}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file) });
|
||||
err.addNote("recompile with -fno-PIC", .{});
|
||||
return error.RelocFailure;
|
||||
}
|
||||
|
|
@ -823,7 +823,7 @@ pub fn resolveRelocsNonAlloc(self: Atom, elf_file: *Elf, code: []u8, undefs: any
|
|||
};
|
||||
// Violation of One Definition Rule for COMDATs.
|
||||
// TODO convert into an error
|
||||
log.debug("{}: {s}: {s} refers to a discarded COMDAT section", .{
|
||||
log.debug("{f}: {s}: {s} refers to a discarded COMDAT section", .{
|
||||
file_ptr.fmtPath(),
|
||||
self.name(elf_file),
|
||||
sym_name,
|
||||
|
|
@ -855,7 +855,7 @@ pub fn resolveRelocsNonAlloc(self: Atom, elf_file: *Elf, code: []u8, undefs: any
|
|||
|
||||
const args = ResolveArgs{ P, A, S, GOT, 0, 0, DTP };
|
||||
|
||||
relocs_log.debug(" {}: {x}: [{x} => {x}] ({s})", .{
|
||||
relocs_log.debug(" {f}: {x}: [{x} => {x}] ({s})", .{
|
||||
relocation.fmtRelocType(rel.r_type(), cpu_arch),
|
||||
rel.r_offset,
|
||||
P,
|
||||
|
|
@ -918,7 +918,7 @@ const Format = struct {
|
|||
fn default(f: Format, w: *std.io.Writer) std.io.Writer.Error!void {
|
||||
const atom = f.atom;
|
||||
const elf_file = f.elf_file;
|
||||
try w.print("atom({d}) : {s} : @{x} : shdr({d}) : align({x}) : size({x}) : prev({}) : next({})", .{
|
||||
try w.print("atom({d}) : {s} : @{x} : shdr({d}) : align({x}) : size({x}) : prev({f}) : next({f})", .{
|
||||
atom.atom_index, atom.name(elf_file), atom.address(elf_file),
|
||||
atom.output_section_index, atom.alignment.toByteUnits() orelse 0, atom.size,
|
||||
atom.prev_atom_ref, atom.next_atom_ref,
|
||||
|
|
@ -1169,7 +1169,7 @@ const x86_64 = struct {
|
|||
x86_64.relaxGotPcTlsDesc(code[r_offset - 3 ..], t) catch {
|
||||
var err = try diags.addErrorWithNotes(1);
|
||||
try err.addMsg("could not relax {s}", .{@tagName(r_type)});
|
||||
err.addNote("in {}:{s} at offset 0x{x}", .{
|
||||
err.addNote("in {f}:{s} at offset 0x{x}", .{
|
||||
atom.file(elf_file).?.fmtPath(),
|
||||
atom.name(elf_file),
|
||||
rel.r_offset,
|
||||
|
|
@ -1265,7 +1265,7 @@ const x86_64 = struct {
|
|||
}, t),
|
||||
else => return error.RelaxFailure,
|
||||
};
|
||||
relocs_log.debug(" relaxing {} => {}", .{ old_inst.encoding, inst.encoding });
|
||||
relocs_log.debug(" relaxing {f} => {f}", .{ old_inst.encoding, inst.encoding });
|
||||
const nop: Instruction = try .new(.none, .nop, &.{}, t);
|
||||
try encode(&.{ nop, inst }, code);
|
||||
}
|
||||
|
|
@ -1276,7 +1276,7 @@ const x86_64 = struct {
|
|||
switch (old_inst.encoding.mnemonic) {
|
||||
.mov => {
|
||||
const inst: Instruction = try .new(old_inst.prefix, .lea, &old_inst.ops, t);
|
||||
relocs_log.debug(" relaxing {} => {}", .{ old_inst.encoding, inst.encoding });
|
||||
relocs_log.debug(" relaxing {f} => {f}", .{ old_inst.encoding, inst.encoding });
|
||||
try encode(&.{inst}, code);
|
||||
},
|
||||
else => return error.RelaxFailure,
|
||||
|
|
@ -1310,11 +1310,11 @@ const x86_64 = struct {
|
|||
|
||||
else => {
|
||||
var err = try diags.addErrorWithNotes(1);
|
||||
try err.addMsg("TODO: rewrite {} when followed by {}", .{
|
||||
try err.addMsg("TODO: rewrite {f} when followed by {f}", .{
|
||||
relocation.fmtRelocType(rels[0].r_type(), .x86_64),
|
||||
relocation.fmtRelocType(rels[1].r_type(), .x86_64),
|
||||
});
|
||||
err.addNote("in {}:{s} at offset 0x{x}", .{
|
||||
err.addNote("in {f}:{s} at offset 0x{x}", .{
|
||||
self.file(elf_file).?.fmtPath(),
|
||||
self.name(elf_file),
|
||||
rels[0].r_offset,
|
||||
|
|
@ -1366,11 +1366,11 @@ const x86_64 = struct {
|
|||
|
||||
else => {
|
||||
var err = try diags.addErrorWithNotes(1);
|
||||
try err.addMsg("TODO: rewrite {} when followed by {}", .{
|
||||
try err.addMsg("TODO: rewrite {f} when followed by {f}", .{
|
||||
relocation.fmtRelocType(rels[0].r_type(), .x86_64),
|
||||
relocation.fmtRelocType(rels[1].r_type(), .x86_64),
|
||||
});
|
||||
err.addNote("in {}:{s} at offset 0x{x}", .{
|
||||
err.addNote("in {f}:{s} at offset 0x{x}", .{
|
||||
self.file(elf_file).?.fmtPath(),
|
||||
self.name(elf_file),
|
||||
rels[0].r_offset,
|
||||
|
|
@ -1408,7 +1408,7 @@ const x86_64 = struct {
|
|||
// TODO: hack to force imm32s in the assembler
|
||||
.{ .imm = .s(-129) },
|
||||
}, t) catch unreachable;
|
||||
relocs_log.debug(" relaxing {} => {}", .{ old_inst.encoding, inst.encoding });
|
||||
relocs_log.debug(" relaxing {f} => {f}", .{ old_inst.encoding, inst.encoding });
|
||||
encode(&.{inst}, code) catch unreachable;
|
||||
},
|
||||
else => unreachable,
|
||||
|
|
@ -1425,7 +1425,7 @@ const x86_64 = struct {
|
|||
// TODO: hack to force imm32s in the assembler
|
||||
.{ .imm = .s(-129) },
|
||||
}, target);
|
||||
relocs_log.debug(" relaxing {} => {}", .{ old_inst.encoding, inst.encoding });
|
||||
relocs_log.debug(" relaxing {f} => {f}", .{ old_inst.encoding, inst.encoding });
|
||||
try encode(&.{inst}, code);
|
||||
},
|
||||
else => return error.RelaxFailure,
|
||||
|
|
@ -1457,7 +1457,7 @@ const x86_64 = struct {
|
|||
std.mem.writeInt(i32, insts[12..][0..4], value, .little);
|
||||
try stream.seekBy(-4);
|
||||
try writer.writeAll(&insts);
|
||||
relocs_log.debug(" relaxing {} and {}", .{
|
||||
relocs_log.debug(" relaxing {f} and {f}", .{
|
||||
relocation.fmtRelocType(rels[0].r_type(), .x86_64),
|
||||
relocation.fmtRelocType(rels[1].r_type(), .x86_64),
|
||||
});
|
||||
|
|
@ -1465,11 +1465,11 @@ const x86_64 = struct {
|
|||
|
||||
else => {
|
||||
var err = try diags.addErrorWithNotes(1);
|
||||
try err.addMsg("fatal linker error: rewrite {} when followed by {}", .{
|
||||
try err.addMsg("fatal linker error: rewrite {f} when followed by {f}", .{
|
||||
relocation.fmtRelocType(rels[0].r_type(), .x86_64),
|
||||
relocation.fmtRelocType(rels[1].r_type(), .x86_64),
|
||||
});
|
||||
err.addNote("in {}:{s} at offset 0x{x}", .{
|
||||
err.addNote("in {f}:{s} at offset 0x{x}", .{
|
||||
self.file(elf_file).?.fmtPath(),
|
||||
self.name(elf_file),
|
||||
rels[0].r_offset,
|
||||
|
|
@ -1653,7 +1653,7 @@ const aarch64 = struct {
|
|||
// TODO: relax
|
||||
var err = try diags.addErrorWithNotes(1);
|
||||
try err.addMsg("TODO: relax ADR_GOT_PAGE", .{});
|
||||
err.addNote("in {}:{s} at offset 0x{x}", .{
|
||||
err.addNote("in {f}:{s} at offset 0x{x}", .{
|
||||
atom.file(elf_file).?.fmtPath(),
|
||||
atom.name(elf_file),
|
||||
r_offset,
|
||||
|
|
@ -1943,7 +1943,7 @@ const riscv = struct {
|
|||
// TODO: implement searching forward
|
||||
var err = try diags.addErrorWithNotes(1);
|
||||
try err.addMsg("TODO: find HI20 paired reloc scanning forward", .{});
|
||||
err.addNote("in {}:{s} at offset 0x{x}", .{
|
||||
err.addNote("in {f}:{s} at offset 0x{x}", .{
|
||||
atom.file(elf_file).?.fmtPath(),
|
||||
atom.name(elf_file),
|
||||
rel.r_offset,
|
||||
|
|
|
|||
|
|
@ -108,7 +108,7 @@ pub fn write(list: AtomList, buffer: *std.ArrayList(u8), undefs: anytype, elf_fi
|
|||
const off = math.cast(usize, atom_ptr.value - list.value) orelse return error.Overflow;
|
||||
const size = math.cast(usize, atom_ptr.size) orelse return error.Overflow;
|
||||
|
||||
log.debug(" atom({}) at 0x{x}", .{ ref, list.offset(elf_file) + off });
|
||||
log.debug(" atom({f}) at 0x{x}", .{ ref, list.offset(elf_file) + off });
|
||||
|
||||
const object = atom_ptr.file(elf_file).?.object;
|
||||
const code = try object.codeDecompressAlloc(elf_file, ref.index);
|
||||
|
|
@ -144,7 +144,7 @@ pub fn writeRelocatable(list: AtomList, buffer: *std.ArrayList(u8), elf_file: *E
|
|||
const off = math.cast(usize, atom_ptr.value - list.value) orelse return error.Overflow;
|
||||
const size = math.cast(usize, atom_ptr.size) orelse return error.Overflow;
|
||||
|
||||
log.debug(" atom({}) at 0x{x}", .{ ref, list.offset(elf_file) + off });
|
||||
log.debug(" atom({f}) at 0x{x}", .{ ref, list.offset(elf_file) + off });
|
||||
|
||||
const object = atom_ptr.file(elf_file).?.object;
|
||||
const code = try object.codeDecompressAlloc(elf_file, ref.index);
|
||||
|
|
@ -172,22 +172,24 @@ const Format = struct {
|
|||
elf_file: *Elf,
|
||||
|
||||
fn default(f: Format, writer: *std.io.Writer) std.io.Writer.Error!void {
|
||||
const list, const elf_file = f;
|
||||
const list = f.atom_list;
|
||||
try writer.print("list : @{x} : shdr({d}) : align({x}) : size({x})", .{
|
||||
list.address(elf_file), list.output_section_index,
|
||||
list.alignment.toByteUnits() orelse 0, list.size,
|
||||
list.address(f.elf_file),
|
||||
list.output_section_index,
|
||||
list.alignment.toByteUnits() orelse 0,
|
||||
list.size,
|
||||
});
|
||||
try writer.writeAll(" : atoms{ ");
|
||||
for (list.atoms.keys(), 0..) |ref, i| {
|
||||
try writer.print("{}", .{ref});
|
||||
try writer.print("{f}", .{ref});
|
||||
if (i < list.atoms.keys().len - 1) try writer.writeAll(", ");
|
||||
}
|
||||
try writer.writeAll(" }");
|
||||
}
|
||||
};
|
||||
|
||||
pub fn fmt(list: AtomList, elf_file: *Elf) std.fmt.Formatter(Format, Format.default) {
|
||||
return .{ .data = .{ list, elf_file } };
|
||||
pub fn fmt(atom_list: AtomList, elf_file: *Elf) std.fmt.Formatter(Format, Format.default) {
|
||||
return .{ .data = .{ .atom_list = atom_list, .elf_file = elf_file } };
|
||||
}
|
||||
|
||||
const assert = std.debug.assert;
|
||||
|
|
|
|||
|
|
@ -281,7 +281,7 @@ fn initAtoms(
|
|||
elf.SHT_GROUP => {
|
||||
if (shdr.sh_info >= self.symtab.items.len) {
|
||||
// TODO convert into an error
|
||||
log.debug("{}: invalid symbol index in sh_info", .{self.fmtPath()});
|
||||
log.debug("{f}: invalid symbol index in sh_info", .{self.fmtPath()});
|
||||
continue;
|
||||
}
|
||||
const group_info_sym = self.symtab.items[shdr.sh_info];
|
||||
|
|
@ -793,7 +793,7 @@ pub fn initInputMergeSections(self: *Object, elf_file: *Elf) !void {
|
|||
if (!isNull(data[end .. end + sh_entsize])) {
|
||||
var err = try diags.addErrorWithNotes(1);
|
||||
try err.addMsg("string not null terminated", .{});
|
||||
err.addNote("in {}:{s}", .{ self.fmtPath(), atom_ptr.name(elf_file) });
|
||||
err.addNote("in {f}:{s}", .{ self.fmtPath(), atom_ptr.name(elf_file) });
|
||||
return error.LinkFailure;
|
||||
}
|
||||
end += sh_entsize;
|
||||
|
|
@ -808,7 +808,7 @@ pub fn initInputMergeSections(self: *Object, elf_file: *Elf) !void {
|
|||
if (shdr.sh_size % sh_entsize != 0) {
|
||||
var err = try diags.addErrorWithNotes(1);
|
||||
try err.addMsg("size not a multiple of sh_entsize", .{});
|
||||
err.addNote("in {}:{s}", .{ self.fmtPath(), atom_ptr.name(elf_file) });
|
||||
err.addNote("in {f}:{s}", .{ self.fmtPath(), atom_ptr.name(elf_file) });
|
||||
return error.LinkFailure;
|
||||
}
|
||||
|
||||
|
|
@ -886,7 +886,7 @@ pub fn resolveMergeSubsections(self: *Object, elf_file: *Elf) error{
|
|||
var err = try diags.addErrorWithNotes(2);
|
||||
try err.addMsg("invalid symbol value: {x}", .{esym.st_value});
|
||||
err.addNote("for symbol {s}", .{sym.name(elf_file)});
|
||||
err.addNote("in {}", .{self.fmtPath()});
|
||||
err.addNote("in {f}", .{self.fmtPath()});
|
||||
return error.LinkFailure;
|
||||
};
|
||||
|
||||
|
|
@ -911,7 +911,7 @@ pub fn resolveMergeSubsections(self: *Object, elf_file: *Elf) error{
|
|||
const res = imsec.findSubsection(@intCast(@as(i64, @intCast(esym.st_value)) + rel.r_addend)) orelse {
|
||||
var err = try diags.addErrorWithNotes(1);
|
||||
try err.addMsg("invalid relocation at offset 0x{x}", .{rel.r_offset});
|
||||
err.addNote("in {}:{s}", .{ self.fmtPath(), atom_ptr.name(elf_file) });
|
||||
err.addNote("in {f}:{s}", .{ self.fmtPath(), atom_ptr.name(elf_file) });
|
||||
return error.LinkFailure;
|
||||
};
|
||||
|
||||
|
|
@ -1536,9 +1536,9 @@ pub fn fmtPath(self: Object) std.fmt.Formatter(Object, formatPath) {
|
|||
|
||||
fn formatPath(object: Object, writer: *std.io.Writer) std.io.Writer.Error!void {
|
||||
if (object.archive) |ar| {
|
||||
try writer.print("{}({})", .{ ar.path, object.path });
|
||||
try writer.print("{f}({f})", .{ ar.path, object.path });
|
||||
} else {
|
||||
try writer.print("{}", .{object.path});
|
||||
try writer.print("{f}", .{object.path});
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -519,21 +519,21 @@ pub fn fmtSymtab(self: SharedObject, elf_file: *Elf) std.fmt.Formatter(Format, F
|
|||
const Format = struct {
|
||||
shared: SharedObject,
|
||||
elf_file: *Elf,
|
||||
};
|
||||
|
||||
fn formatSymtab(f: Format, writer: *std.io.Writer) std.io.Writer.Error!void {
|
||||
const shared = f.shared;
|
||||
const elf_file = f.elf_file;
|
||||
try writer.writeAll(" globals\n");
|
||||
for (shared.symbols.items, 0..) |sym, i| {
|
||||
const ref = shared.resolveSymbol(@intCast(i), elf_file);
|
||||
if (elf_file.symbol(ref)) |ref_sym| {
|
||||
try writer.print(" {}\n", .{ref_sym.fmt(elf_file)});
|
||||
} else {
|
||||
try writer.print(" {s} : unclaimed\n", .{sym.name(elf_file)});
|
||||
fn symtab(f: Format, writer: *std.io.Writer) std.io.Writer.Error!void {
|
||||
const shared = f.shared;
|
||||
const elf_file = f.elf_file;
|
||||
try writer.writeAll(" globals\n");
|
||||
for (shared.symbols.items, 0..) |sym, i| {
|
||||
const ref = shared.resolveSymbol(@intCast(i), elf_file);
|
||||
if (elf_file.symbol(ref)) |ref_sym| {
|
||||
try writer.print(" {f}\n", .{ref_sym.fmt(elf_file)});
|
||||
} else {
|
||||
try writer.print(" {s} : unclaimed\n", .{sym.name(elf_file)});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const SharedObject = @This();
|
||||
|
||||
|
|
|
|||
|
|
@ -338,7 +338,7 @@ const Format = struct {
|
|||
fn default(f: Format, writer: *std.io.Writer) std.io.Writer.Error!void {
|
||||
const symbol = f.symbol;
|
||||
const elf_file = f.elf_file;
|
||||
try writer.print("%{d} : {s} : @{x}", .{
|
||||
try writer.print("%{d} : {f} : @{x}", .{
|
||||
symbol.esym_index,
|
||||
symbol.fmtName(elf_file),
|
||||
symbol.address(.{ .plt = false, .trampoline = false }, elf_file),
|
||||
|
|
|
|||
|
|
@ -82,7 +82,7 @@ const Format = struct {
|
|||
try writer.print("@{x} : size({x})\n", .{ thunk.value, thunk.size(elf_file) });
|
||||
for (thunk.symbols.keys()) |ref| {
|
||||
const sym = elf_file.symbol(ref).?;
|
||||
try writer.print(" {} : {s} : @{x}\n", .{ ref, sym.name(elf_file), sym.value });
|
||||
try writer.print(" {f} : {s} : @{x}\n", .{ ref, sym.name(elf_file), sym.value });
|
||||
}
|
||||
}
|
||||
};
|
||||
|
|
|
|||
|
|
@ -2199,7 +2199,7 @@ const Format = struct {
|
|||
self: *ZigObject,
|
||||
elf_file: *Elf,
|
||||
|
||||
fn symtab(f: Format, writer: *std.io.Writer.Error) std.io.Writer.Error!void {
|
||||
fn symtab(f: Format, writer: *std.io.Writer) std.io.Writer.Error!void {
|
||||
const self = f.self;
|
||||
const elf_file = f.elf_file;
|
||||
try writer.writeAll(" locals\n");
|
||||
|
|
@ -2214,7 +2214,7 @@ const Format = struct {
|
|||
}
|
||||
}
|
||||
|
||||
fn atoms(f: Format, writer: *std.io.Writer.Error) std.io.Writer.Error!void {
|
||||
fn atoms(f: Format, writer: *std.io.Writer) std.io.Writer.Error!void {
|
||||
try writer.writeAll(" atoms\n");
|
||||
for (f.self.atoms_indexes.items) |atom_index| {
|
||||
const atom_ptr = f.self.atom(atom_index) orelse continue;
|
||||
|
|
|
|||
|
|
@ -141,7 +141,7 @@ pub const Cie = struct {
|
|||
cie: Cie,
|
||||
elf_file: *Elf,
|
||||
|
||||
fn format2(f: Format, writer: *std.io.Writer) std.io.Writer.Error!void {
|
||||
fn default(f: Format, writer: *std.io.Writer) std.io.Writer.Error!void {
|
||||
const cie = f.cie;
|
||||
const elf_file = f.elf_file;
|
||||
const base_addr = cie.address(elf_file);
|
||||
|
|
@ -567,11 +567,11 @@ const riscv = struct {
|
|||
fn reportInvalidReloc(rec: anytype, elf_file: *Elf, rel: elf.Elf64_Rela) !void {
|
||||
const diags = &elf_file.base.comp.link_diags;
|
||||
var err = try diags.addErrorWithNotes(1);
|
||||
try err.addMsg("invalid relocation type {} at offset 0x{x}", .{
|
||||
try err.addMsg("invalid relocation type {f} at offset 0x{x}", .{
|
||||
relocation.fmtRelocType(rel.r_type(), elf_file.getTarget().cpu.arch),
|
||||
rel.r_offset,
|
||||
});
|
||||
err.addNote("in {}:.eh_frame", .{elf_file.file(rec.file_index).?.fmtPath()});
|
||||
err.addNote("in {f}:.eh_frame", .{elf_file.file(rec.file_index).?.fmtPath()});
|
||||
return error.RelocFailure;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -111,7 +111,7 @@ fn markLive(atom: *Atom, elf_file: *Elf) void {
|
|||
const target_sym = elf_file.symbol(ref) orelse continue;
|
||||
const target_atom = target_sym.atom(elf_file) orelse continue;
|
||||
target_atom.alive = true;
|
||||
gc_track_live_log.debug("{}marking live atom({d})", .{ track_live_level, target_atom.atom_index });
|
||||
gc_track_live_log.debug("{f}marking live atom({d})", .{ track_live_level, target_atom.atom_index });
|
||||
if (markAtom(target_atom)) markLive(target_atom, elf_file);
|
||||
}
|
||||
}
|
||||
|
|
@ -128,7 +128,7 @@ fn markLive(atom: *Atom, elf_file: *Elf) void {
|
|||
}
|
||||
const target_atom = target_sym.atom(elf_file) orelse continue;
|
||||
target_atom.alive = true;
|
||||
gc_track_live_log.debug("{}marking live atom({d})", .{ track_live_level, target_atom.atom_index });
|
||||
gc_track_live_log.debug("{f}marking live atom({d})", .{ track_live_level, target_atom.atom_index });
|
||||
if (markAtom(target_atom)) markLive(target_atom, elf_file);
|
||||
}
|
||||
}
|
||||
|
|
@ -170,7 +170,7 @@ pub fn dumpPrunedAtoms(elf_file: *Elf) !void {
|
|||
const atom = file.atom(atom_index) orelse continue;
|
||||
if (!atom.alive)
|
||||
// TODO should we simply print to stderr?
|
||||
try stderr.print("link: removing unused section '{s}' in file '{}'\n", .{
|
||||
try stderr.print("link: removing unused section '{s}' in file '{f}'\n", .{
|
||||
atom.name(elf_file),
|
||||
atom.file(elf_file).?.fmtPath(),
|
||||
});
|
||||
|
|
@ -185,15 +185,9 @@ const Level = struct {
|
|||
self.value += 1;
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
self: *const @This(),
|
||||
comptime unused_fmt_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
_ = unused_fmt_string;
|
||||
_ = options;
|
||||
try writer.writeByteNTimes(' ', self.value);
|
||||
pub fn format(self: *const @This(), w: *std.io.Writer, comptime fmt: []const u8) std.io.Writer.Error!void {
|
||||
comptime assert(fmt.len == 0);
|
||||
try w.splatByteAll(' ', self.value);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation) !void {
|
|||
try elf_file.allocateNonAllocSections();
|
||||
|
||||
if (build_options.enable_logging) {
|
||||
state_log.debug("{}", .{elf_file.dumpState()});
|
||||
state_log.debug("{f}", .{elf_file.dumpState()});
|
||||
}
|
||||
|
||||
try elf_file.writeMergeSections();
|
||||
|
|
@ -96,8 +96,8 @@ pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation) !void {
|
|||
};
|
||||
|
||||
if (build_options.enable_logging) {
|
||||
state_log.debug("ar_symtab\n{}\n", .{ar_symtab.fmt(elf_file)});
|
||||
state_log.debug("ar_strtab\n{}\n", .{ar_strtab});
|
||||
state_log.debug("ar_symtab\n{f}\n", .{ar_symtab.fmt(elf_file)});
|
||||
state_log.debug("ar_strtab\n{f}\n", .{ar_strtab});
|
||||
}
|
||||
|
||||
var buffer = std.ArrayList(u8).init(gpa);
|
||||
|
|
@ -170,7 +170,7 @@ pub fn flushObject(elf_file: *Elf, comp: *Compilation) !void {
|
|||
try elf_file.allocateNonAllocSections();
|
||||
|
||||
if (build_options.enable_logging) {
|
||||
state_log.debug("{}", .{elf_file.dumpState()});
|
||||
state_log.debug("{f}", .{elf_file.dumpState()});
|
||||
}
|
||||
|
||||
try writeAtoms(elf_file);
|
||||
|
|
|
|||
|
|
@ -616,7 +616,7 @@ pub const GotSection = struct {
|
|||
try writer.writeAll("GOT\n");
|
||||
for (got.entries.items) |entry| {
|
||||
const symbol = elf_file.symbol(entry.ref).?;
|
||||
try writer.print(" {d}@0x{x} => {}@0x{x} ({s})\n", .{
|
||||
try writer.print(" {d}@0x{x} => {f}@0x{x} ({s})\n", .{
|
||||
entry.cell_index,
|
||||
entry.address(elf_file),
|
||||
entry.ref,
|
||||
|
|
@ -752,7 +752,7 @@ pub const PltSection = struct {
|
|||
try writer.writeAll("PLT\n");
|
||||
for (plt.symbols.items, 0..) |ref, i| {
|
||||
const symbol = elf_file.symbol(ref).?;
|
||||
try writer.print(" {d}@0x{x} => {}@0x{x} ({s})\n", .{
|
||||
try writer.print(" {d}@0x{x} => {f}@0x{x} ({s})\n", .{
|
||||
i,
|
||||
symbol.pltAddress(elf_file),
|
||||
ref,
|
||||
|
|
|
|||
|
|
@ -437,7 +437,7 @@ fn coffLink(lld: *Lld, arena: Allocator) !void {
|
|||
try argv.append(try allocPrint(arena, "-PDBALTPATH:{s}", .{out_pdb_basename}));
|
||||
}
|
||||
if (comp.version) |version| {
|
||||
try argv.append(try allocPrint(arena, "-VERSION:{}.{}", .{ version.major, version.minor }));
|
||||
try argv.append(try allocPrint(arena, "-VERSION:{f}.{f}", .{ version.major, version.minor }));
|
||||
}
|
||||
|
||||
if (target_util.llvmMachineAbi(target)) |mabi| {
|
||||
|
|
@ -507,7 +507,7 @@ fn coffLink(lld: *Lld, arena: Allocator) !void {
|
|||
|
||||
if (comp.emit_implib) |raw_emit_path| {
|
||||
const path = try comp.resolveEmitPathFlush(arena, .temp, raw_emit_path);
|
||||
try argv.append(try allocPrint(arena, "-IMPLIB:{}", .{path}));
|
||||
try argv.append(try allocPrint(arena, "-IMPLIB:{f}", .{path}));
|
||||
}
|
||||
|
||||
if (comp.config.link_libc) {
|
||||
|
|
@ -533,7 +533,7 @@ fn coffLink(lld: *Lld, arena: Allocator) !void {
|
|||
},
|
||||
.object, .archive => |obj| {
|
||||
if (obj.must_link) {
|
||||
argv.appendAssumeCapacity(try allocPrint(arena, "-WHOLEARCHIVE:{}", .{@as(Cache.Path, obj.path)}));
|
||||
argv.appendAssumeCapacity(try allocPrint(arena, "-WHOLEARCHIVE:{f}", .{@as(Cache.Path, obj.path)}));
|
||||
} else {
|
||||
argv.appendAssumeCapacity(try obj.path.toString(arena));
|
||||
}
|
||||
|
|
@ -1216,7 +1216,7 @@ fn elfLink(lld: *Lld, arena: Allocator) !void {
|
|||
if (target.os.versionRange().gnuLibCVersion().?.order(rem_in) != .lt) continue;
|
||||
}
|
||||
|
||||
const lib_path = try std.fmt.allocPrint(arena, "{}{c}lib{s}.so.{d}", .{
|
||||
const lib_path = try std.fmt.allocPrint(arena, "{f}{c}lib{s}.so.{d}", .{
|
||||
comp.glibc_so_files.?.dir_path, fs.path.sep, lib.name, lib.sover,
|
||||
});
|
||||
try argv.append(lib_path);
|
||||
|
|
@ -1229,14 +1229,14 @@ fn elfLink(lld: *Lld, arena: Allocator) !void {
|
|||
}));
|
||||
} else if (target.isFreeBSDLibC()) {
|
||||
for (freebsd.libs) |lib| {
|
||||
const lib_path = try std.fmt.allocPrint(arena, "{}{c}lib{s}.so.{d}", .{
|
||||
const lib_path = try std.fmt.allocPrint(arena, "{f}{c}lib{s}.so.{d}", .{
|
||||
comp.freebsd_so_files.?.dir_path, fs.path.sep, lib.name, lib.sover,
|
||||
});
|
||||
try argv.append(lib_path);
|
||||
}
|
||||
} else if (target.isNetBSDLibC()) {
|
||||
for (netbsd.libs) |lib| {
|
||||
const lib_path = try std.fmt.allocPrint(arena, "{}{c}lib{s}.so.{d}", .{
|
||||
const lib_path = try std.fmt.allocPrint(arena, "{f}{c}lib{s}.so.{d}", .{
|
||||
comp.netbsd_so_files.?.dir_path, fs.path.sep, lib.name, lib.sover,
|
||||
});
|
||||
try argv.append(lib_path);
|
||||
|
|
|
|||
|
|
@ -4271,7 +4271,7 @@ pub const Platform = struct {
|
|||
pub fn allocPrintTarget(plat: Platform, gpa: Allocator, cpu_arch: std.Target.Cpu.Arch) error{OutOfMemory}![]u8 {
|
||||
var buffer = std.ArrayList(u8).init(gpa);
|
||||
defer buffer.deinit();
|
||||
try buffer.writer().print("{}", .{plat.fmtTarget(cpu_arch)});
|
||||
try buffer.writer().print("{f}", .{plat.fmtTarget(cpu_arch)});
|
||||
return buffer.toOwnedSlice();
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -71,7 +71,7 @@ pub fn unpack(self: *Archive, macho_file: *MachO, path: Path, handle_index: File
|
|||
.mtime = hdr.date() catch 0,
|
||||
};
|
||||
|
||||
log.debug("extracting object '{}' from archive '{}'", .{ object.path, path });
|
||||
log.debug("extracting object '{f}' from archive '{f}'", .{ object.path, path });
|
||||
|
||||
try self.objects.append(gpa, object);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -602,7 +602,7 @@ pub fn resolveRelocs(self: Atom, macho_file: *MachO, buffer: []u8) !void {
|
|||
};
|
||||
try macho_file.reportParseError2(
|
||||
file.getIndex(),
|
||||
"{s}: 0x{x}: 0x{x}: failed to relax relocation: type {}, target {s}",
|
||||
"{s}: 0x{x}: 0x{x}: failed to relax relocation: type {f}, target {s}",
|
||||
.{
|
||||
name,
|
||||
self.getAddress(macho_file),
|
||||
|
|
|
|||
|
|
@ -445,7 +445,7 @@ pub fn updateNav(self: *Plan9, pt: Zcu.PerThread, nav_index: InternPool.Nav.Inde
|
|||
.func => return,
|
||||
.variable => |variable| Value.fromInterned(variable.init),
|
||||
.@"extern" => {
|
||||
log.debug("found extern decl: {}", .{nav.name.fmt(ip)});
|
||||
log.debug("found extern decl: {f}", .{nav.name.fmt(ip)});
|
||||
return;
|
||||
},
|
||||
else => nav_val,
|
||||
|
|
@ -675,7 +675,7 @@ pub fn flush(
|
|||
const off = self.getAddr(text_i, .t);
|
||||
text_i += out.code.len;
|
||||
atom.offset = off;
|
||||
log.debug("write text nav 0x{x} ({}), lines {d} to {d}.;__GOT+0x{x} vaddr: 0x{x}", .{ nav_index, nav.name.fmt(&pt.zcu.intern_pool), out.start_line + 1, out.end_line, atom.got_index.? * 8, off });
|
||||
log.debug("write text nav 0x{x} ({f}), lines {d} to {d}.;__GOT+0x{x} vaddr: 0x{x}", .{ nav_index, nav.name.fmt(&pt.zcu.intern_pool), out.start_line + 1, out.end_line, atom.got_index.? * 8, off });
|
||||
if (!self.sixtyfour_bit) {
|
||||
mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @intCast(off), target.cpu.arch.endian());
|
||||
} else {
|
||||
|
|
@ -974,11 +974,11 @@ pub fn seeNav(self: *Plan9, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index)
|
|||
self.etext_edata_end_atom_indices[2] = atom_idx;
|
||||
}
|
||||
try self.updateFinish(pt, nav_index);
|
||||
log.debug("seeNav(extern) for {} (got_addr=0x{x})", .{
|
||||
log.debug("seeNav(extern) for {f} (got_addr=0x{x})", .{
|
||||
nav.name.fmt(ip),
|
||||
self.getAtom(atom_idx).getOffsetTableAddress(self),
|
||||
});
|
||||
} else log.debug("seeNav for {}", .{nav.name.fmt(ip)});
|
||||
} else log.debug("seeNav for {f}", .{nav.name.fmt(ip)});
|
||||
return atom_idx;
|
||||
}
|
||||
|
||||
|
|
@ -1043,7 +1043,7 @@ fn updateLazySymbolAtom(
|
|||
defer code_buffer.deinit(gpa);
|
||||
|
||||
// create the symbol for the name
|
||||
const name = try std.fmt.allocPrint(gpa, "__lazy_{s}_{}", .{
|
||||
const name = try std.fmt.allocPrint(gpa, "__lazy_{s}_{f}", .{
|
||||
@tagName(sym.kind),
|
||||
Type.fromInterned(sym.ty).fmt(pt),
|
||||
});
|
||||
|
|
@ -1314,7 +1314,7 @@ pub fn getNavVAddr(
|
|||
) !u64 {
|
||||
const ip = &pt.zcu.intern_pool;
|
||||
const nav = ip.getNav(nav_index);
|
||||
log.debug("getDeclVAddr for {}", .{nav.name.fmt(ip)});
|
||||
log.debug("getDeclVAddr for {f}", .{nav.name.fmt(ip)});
|
||||
if (nav.getExtern(ip) != null) {
|
||||
if (nav.name.eqlSlice("etext", ip)) {
|
||||
try self.addReloc(reloc_info.parent.atom_index, .{
|
||||
|
|
|
|||
|
|
@ -547,7 +547,7 @@ pub const SourceLocation = enum(u32) {
|
|||
switch (sl.unpack(wasm)) {
|
||||
.none => unreachable,
|
||||
.zig_object_nofile => diags.addError("zig compilation unit: " ++ f, args),
|
||||
.object_index => |i| diags.addError("{}: " ++ f, .{i.ptr(wasm).path} ++ args),
|
||||
.object_index => |i| diags.addError("{f}: " ++ f, .{i.ptr(wasm).path} ++ args),
|
||||
.source_location_index => @panic("TODO"),
|
||||
}
|
||||
}
|
||||
|
|
@ -579,9 +579,9 @@ pub const SourceLocation = enum(u32) {
|
|||
.object_index => |i| {
|
||||
const obj = i.ptr(wasm);
|
||||
return if (obj.archive_member_name.slice(wasm)) |obj_name|
|
||||
try bundle.printString("{} ({s}): {s}", .{ obj.path, std.fs.path.basename(obj_name), msg })
|
||||
try bundle.printString("{f} ({s}): {s}", .{ obj.path, std.fs.path.basename(obj_name), msg })
|
||||
else
|
||||
try bundle.printString("{}: {s}", .{ obj.path, msg });
|
||||
try bundle.printString("{f}: {s}", .{ obj.path, msg });
|
||||
},
|
||||
.source_location_index => @panic("TODO"),
|
||||
};
|
||||
|
|
@ -2126,14 +2126,8 @@ pub const FunctionType = extern struct {
|
|||
wasm: *const Wasm,
|
||||
ft: FunctionType,
|
||||
|
||||
pub fn format(
|
||||
self: Formatter,
|
||||
comptime format_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
if (format_string.len != 0) std.fmt.invalidFmtError(format_string, self);
|
||||
_ = options;
|
||||
pub fn format(self: Formatter, writer: *std.io.Writer, comptime f: []const u8) std.io.Writer.Error!void {
|
||||
comptime assert(f.len == 0);
|
||||
const params = self.ft.params.slice(self.wasm);
|
||||
const returns = self.ft.returns.slice(self.wasm);
|
||||
|
||||
|
|
@ -2912,9 +2906,8 @@ pub const Feature = packed struct(u8) {
|
|||
@"=",
|
||||
};
|
||||
|
||||
pub fn format(feature: Feature, comptime fmt: []const u8, opt: std.fmt.FormatOptions, writer: anytype) !void {
|
||||
_ = opt;
|
||||
_ = fmt;
|
||||
pub fn format(feature: Feature, writer: *std.io.Writer, comptime fmt: []const u8) std.io.Writer.Error!void {
|
||||
comptime assert(fmt.len == 0);
|
||||
try writer.print("{s} {s}", .{ @tagName(feature.prefix), @tagName(feature.tag) });
|
||||
}
|
||||
|
||||
|
|
@ -3036,7 +3029,7 @@ fn openParseObjectReportingFailure(wasm: *Wasm, path: Path) void {
|
|||
}
|
||||
|
||||
fn parseObject(wasm: *Wasm, obj: link.Input.Object) !void {
|
||||
log.debug("parseObject {}", .{obj.path});
|
||||
log.debug("parseObject {f}", .{obj.path});
|
||||
const gpa = wasm.base.comp.gpa;
|
||||
const gc_sections = wasm.base.gc_sections;
|
||||
|
||||
|
|
@ -3060,7 +3053,7 @@ fn parseObject(wasm: *Wasm, obj: link.Input.Object) !void {
|
|||
}
|
||||
|
||||
fn parseArchive(wasm: *Wasm, obj: link.Input.Object) !void {
|
||||
log.debug("parseArchive {}", .{obj.path});
|
||||
log.debug("parseArchive {f}", .{obj.path});
|
||||
const gpa = wasm.base.comp.gpa;
|
||||
const gc_sections = wasm.base.gc_sections;
|
||||
|
||||
|
|
@ -3196,7 +3189,7 @@ pub fn updateFunc(
|
|||
const is_obj = zcu.comp.config.output_mode == .Obj;
|
||||
const target = &zcu.comp.root_mod.resolved_target.result;
|
||||
const owner_nav = zcu.funcInfo(func_index).owner_nav;
|
||||
log.debug("updateFunc {}", .{ip.getNav(owner_nav).fqn.fmt(ip)});
|
||||
log.debug("updateFunc {f}", .{ip.getNav(owner_nav).fqn.fmt(ip)});
|
||||
|
||||
// For Wasm, we do not lower the MIR to code just yet. That lowering happens during `flush`,
|
||||
// after garbage collection, which can affect function and global indexes, which affects the
|
||||
|
|
@ -3307,7 +3300,7 @@ pub fn updateNav(wasm: *Wasm, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index
|
|||
.variable => |variable| .{ variable.init, variable.owner_nav },
|
||||
else => .{ nav.status.fully_resolved.val, nav_index },
|
||||
};
|
||||
//log.debug("updateNav {} {d}", .{ nav.fqn.fmt(ip), chased_nav_index });
|
||||
//log.debug("updateNav {f} {d}", .{ nav.fqn.fmt(ip), chased_nav_index });
|
||||
assert(!wasm.imports.contains(chased_nav_index));
|
||||
|
||||
if (nav_init != .none and !Value.fromInterned(nav_init).typeOf(zcu).hasRuntimeBits(zcu)) {
|
||||
|
|
@ -4347,7 +4340,7 @@ fn resolveFunctionSynthetic(
|
|||
});
|
||||
if (import.type != correct_func_type) {
|
||||
const diags = &wasm.base.comp.link_diags;
|
||||
return import.source_location.fail(diags, "synthetic function {s} {} imported with incorrect signature {}", .{
|
||||
return import.source_location.fail(diags, "synthetic function {s} {f} imported with incorrect signature {f}", .{
|
||||
@tagName(res), correct_func_type.fmt(wasm), import.type.fmt(wasm),
|
||||
});
|
||||
}
|
||||
|
|
|
|||
|
|
@ -39,14 +39,8 @@ pub fn TableSection(comptime Entry: type) type {
|
|||
return self.entries.items.len;
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
self: Self,
|
||||
comptime unused_format_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
_ = options;
|
||||
comptime assert(unused_format_string.len == 0);
|
||||
pub fn format(self: Self, writer: *std.io.Writer, comptime f: []const u8) std.io.Writer.Error!void {
|
||||
comptime assert(f.len == 0);
|
||||
try writer.writeAll("TableSection:\n");
|
||||
for (self.entries.items, 0..) |entry, i| {
|
||||
try writer.print(" {d} => {}\n", .{ i, entry });
|
||||
|
|
|
|||
|
|
@ -5296,7 +5296,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
|
|||
const s = fs.path.sep_str;
|
||||
const tmp_sub_path = "tmp" ++ s ++ results_tmp_file_nonce;
|
||||
const stdout = dirs.local_cache.handle.readFileAlloc(arena, tmp_sub_path, 50 * 1024 * 1024) catch |err| {
|
||||
fatal("unable to read results of configure phase from '{}{s}': {s}", .{
|
||||
fatal("unable to read results of configure phase from '{f}{s}': {s}", .{
|
||||
dirs.local_cache, tmp_sub_path, @errorName(err),
|
||||
});
|
||||
};
|
||||
|
|
|
|||
|
|
@ -64,7 +64,7 @@ pub fn cmdTargets(
|
|||
{
|
||||
var glibc_obj = try root_obj.beginTupleField("glibc", .{});
|
||||
for (glibc_abi.all_versions) |ver| {
|
||||
const tmp = try std.fmt.allocPrint(allocator, "{}", .{ver});
|
||||
const tmp = try std.fmt.allocPrint(allocator, "{f}", .{ver});
|
||||
defer allocator.free(tmp);
|
||||
try glibc_obj.field(tmp, .{});
|
||||
}
|
||||
|
|
|
|||
|
|
@ -76,14 +76,15 @@ pub fn print(
|
|||
.@"extern" => |e| try writer.print("(extern '{f}')", .{e.name.fmt(ip)}),
|
||||
.func => |func| try writer.print("(function '{f}')", .{ip.getNav(func.owner_nav).name.fmt(ip)}),
|
||||
.int => |int| switch (int.storage) {
|
||||
inline .u64, .i64, .big_int => |x| try writer.print("{}", .{x}),
|
||||
inline .u64, .i64 => |x| try writer.print("{d}", .{x}),
|
||||
.big_int => |x| try writer.print("{fd}", .{x}),
|
||||
.lazy_align => |ty| if (opt_sema != null) {
|
||||
const a = try Type.fromInterned(ty).abiAlignmentSema(pt);
|
||||
try writer.print("{}", .{a.toByteUnits() orelse 0});
|
||||
try writer.print("{d}", .{a.toByteUnits() orelse 0});
|
||||
} else try writer.print("@alignOf({f})", .{Type.fromInterned(ty).fmt(pt)}),
|
||||
.lazy_size => |ty| if (opt_sema != null) {
|
||||
const s = try Type.fromInterned(ty).abiSizeSema(pt);
|
||||
try writer.print("{}", .{s});
|
||||
try writer.print("{d}", .{s});
|
||||
} else try writer.print("@sizeOf({f})", .{Type.fromInterned(ty).fmt(pt)}),
|
||||
},
|
||||
.err => |err| try writer.print("error.{f}", .{
|
||||
|
|
|
|||
|
|
@ -1212,8 +1212,8 @@ const Writer = struct {
|
|||
|
||||
const name = self.code.nullTerminatedString(output.data.name);
|
||||
const constraint = self.code.nullTerminatedString(output.data.constraint);
|
||||
try stream.print("output({fp}, \"{f}\", ", .{
|
||||
std.zig.fmtId(name), std.zig.fmtString(constraint),
|
||||
try stream.print("output({f}, \"{f}\", ", .{
|
||||
std.zig.fmtIdP(name), std.zig.fmtString(constraint),
|
||||
});
|
||||
try self.writeFlag(stream, "->", is_type);
|
||||
try self.writeInstRef(stream, output.data.operand);
|
||||
|
|
@ -1231,8 +1231,8 @@ const Writer = struct {
|
|||
|
||||
const name = self.code.nullTerminatedString(input.data.name);
|
||||
const constraint = self.code.nullTerminatedString(input.data.constraint);
|
||||
try stream.print("input({fp}, \"{f}\", ", .{
|
||||
std.zig.fmtId(name), std.zig.fmtString(constraint),
|
||||
try stream.print("input({f}, \"{f}\", ", .{
|
||||
std.zig.fmtIdP(name), std.zig.fmtString(constraint),
|
||||
});
|
||||
try self.writeInstRef(stream, input.data.operand);
|
||||
try stream.writeAll(")");
|
||||
|
|
@ -1247,7 +1247,7 @@ const Writer = struct {
|
|||
const str_index = self.code.extra[extra_i];
|
||||
extra_i += 1;
|
||||
const clobber = self.code.nullTerminatedString(@enumFromInt(str_index));
|
||||
try stream.print("{fp}", .{std.zig.fmtId(clobber)});
|
||||
try stream.print("{f}", .{std.zig.fmtIdP(clobber)});
|
||||
if (i + 1 < clobbers_len) {
|
||||
try stream.writeAll(", ");
|
||||
}
|
||||
|
|
@ -1511,7 +1511,7 @@ const Writer = struct {
|
|||
try self.writeFlag(stream, "comptime ", field.is_comptime);
|
||||
if (field.name != .empty) {
|
||||
const field_name = self.code.nullTerminatedString(field.name);
|
||||
try stream.print("{fp}: ", .{std.zig.fmtId(field_name)});
|
||||
try stream.print("{f}: ", .{std.zig.fmtIdP(field_name)});
|
||||
} else {
|
||||
try stream.print("@\"{d}\": ", .{i});
|
||||
}
|
||||
|
|
@ -1674,7 +1674,7 @@ const Writer = struct {
|
|||
extra_index += 1;
|
||||
|
||||
try stream.splatByteAll(' ', self.indent);
|
||||
try stream.print("{fp}", .{std.zig.fmtId(field_name)});
|
||||
try stream.print("{f}", .{std.zig.fmtIdP(field_name)});
|
||||
|
||||
if (has_type) {
|
||||
const field_type = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
|
||||
|
|
@ -1808,7 +1808,7 @@ const Writer = struct {
|
|||
extra_index += 1;
|
||||
|
||||
try stream.splatByteAll(' ', self.indent);
|
||||
try stream.print("{fp}", .{std.zig.fmtId(field_name)});
|
||||
try stream.print("{f}", .{std.zig.fmtIdP(field_name)});
|
||||
|
||||
if (has_tag_value) {
|
||||
const tag_value_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
|
||||
|
|
@ -1913,7 +1913,7 @@ const Writer = struct {
|
|||
const name_index: Zir.NullTerminatedString = @enumFromInt(self.code.extra[extra_index]);
|
||||
const name = self.code.nullTerminatedString(name_index);
|
||||
try stream.splatByteAll(' ', self.indent);
|
||||
try stream.print("{fp},\n", .{std.zig.fmtId(name)});
|
||||
try stream.print("{f},\n", .{std.zig.fmtIdP(name)});
|
||||
}
|
||||
|
||||
self.indent -= 2;
|
||||
|
|
|
|||
|
|
@ -149,7 +149,7 @@ pub fn RegisterManager(
|
|||
/// Only the owner of the `RegisterLock` can unlock the
|
||||
/// register later.
|
||||
pub fn lockRegIndex(self: *Self, tracked_index: TrackedIndex) ?RegisterLock {
|
||||
log.debug("locking {f}", .{regAtTrackedIndex(tracked_index)});
|
||||
log.debug("locking {}", .{regAtTrackedIndex(tracked_index)});
|
||||
if (self.isRegIndexLocked(tracked_index)) {
|
||||
log.debug(" register already locked", .{});
|
||||
return null;
|
||||
|
|
@ -164,7 +164,7 @@ pub fn RegisterManager(
|
|||
/// Like `lockReg` but asserts the register was unused always
|
||||
/// returning a valid lock.
|
||||
pub fn lockRegIndexAssumeUnused(self: *Self, tracked_index: TrackedIndex) RegisterLock {
|
||||
log.debug("locking asserting free {f}", .{regAtTrackedIndex(tracked_index)});
|
||||
log.debug("locking asserting free {}", .{regAtTrackedIndex(tracked_index)});
|
||||
assert(!self.isRegIndexLocked(tracked_index));
|
||||
self.locked_registers.set(tracked_index);
|
||||
return RegisterLock{ .tracked_index = tracked_index };
|
||||
|
|
@ -202,7 +202,7 @@ pub fn RegisterManager(
|
|||
/// Requires `RegisterLock` to unlock a register.
|
||||
/// Call `lockReg` to obtain the lock first.
|
||||
pub fn unlockReg(self: *Self, lock: RegisterLock) void {
|
||||
log.debug("unlocking {f}", .{regAtTrackedIndex(lock.tracked_index)});
|
||||
log.debug("unlocking {}", .{regAtTrackedIndex(lock.tracked_index)});
|
||||
self.locked_registers.unset(lock.tracked_index);
|
||||
}
|
||||
|
||||
|
|
@ -238,7 +238,7 @@ pub fn RegisterManager(
|
|||
if (i < count) return null;
|
||||
|
||||
for (regs, insts) |reg, inst| {
|
||||
log.debug("tryAllocReg {f} for inst {f}", .{ reg, inst });
|
||||
log.debug("tryAllocReg {} for inst {f}", .{ reg, inst });
|
||||
self.markRegAllocated(reg);
|
||||
|
||||
if (inst) |tracked_inst| {
|
||||
|
|
@ -317,7 +317,7 @@ pub fn RegisterManager(
|
|||
tracked_index: TrackedIndex,
|
||||
inst: ?Air.Inst.Index,
|
||||
) AllocationError!void {
|
||||
log.debug("getReg {f} for inst {f}", .{ regAtTrackedIndex(tracked_index), inst });
|
||||
log.debug("getReg {} for inst {f}", .{ regAtTrackedIndex(tracked_index), inst });
|
||||
if (!self.isRegIndexFree(tracked_index)) {
|
||||
// Move the instruction that was previously there to a
|
||||
// stack allocation.
|
||||
|
|
@ -330,7 +330,7 @@ pub fn RegisterManager(
|
|||
self.getRegIndexAssumeFree(tracked_index, inst);
|
||||
}
|
||||
pub fn getReg(self: *Self, reg: Register, inst: ?Air.Inst.Index) AllocationError!void {
|
||||
log.debug("getting reg: {f}", .{reg});
|
||||
log.debug("getting reg: {}", .{reg});
|
||||
return self.getRegIndex(indexOfRegIntoTracked(reg) orelse return, inst);
|
||||
}
|
||||
pub fn getKnownReg(
|
||||
|
|
@ -349,7 +349,7 @@ pub fn RegisterManager(
|
|||
tracked_index: TrackedIndex,
|
||||
inst: ?Air.Inst.Index,
|
||||
) void {
|
||||
log.debug("getRegAssumeFree {f} for inst {f}", .{ regAtTrackedIndex(tracked_index), inst });
|
||||
log.debug("getRegAssumeFree {} for inst {f}", .{ regAtTrackedIndex(tracked_index), inst });
|
||||
self.markRegIndexAllocated(tracked_index);
|
||||
|
||||
assert(self.isRegIndexFree(tracked_index));
|
||||
|
|
@ -364,7 +364,7 @@ pub fn RegisterManager(
|
|||
|
||||
/// Marks the specified register as free
|
||||
pub fn freeRegIndex(self: *Self, tracked_index: TrackedIndex) void {
|
||||
log.debug("freeing register {f}", .{regAtTrackedIndex(tracked_index)});
|
||||
log.debug("freeing register {}", .{regAtTrackedIndex(tracked_index)});
|
||||
self.registers[tracked_index] = undefined;
|
||||
self.markRegIndexFree(tracked_index);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3327,7 +3327,7 @@ fn transConstantExpr(c: *Context, scope: *Scope, expr: *const clang.Expr, used:
|
|||
return maybeSuppressResult(c, used, as_node);
|
||||
},
|
||||
else => |kind| {
|
||||
return fail(c, error.UnsupportedTranslation, expr.getBeginLoc(), "unsupported constant expression kind '{}'", .{kind});
|
||||
return fail(c, error.UnsupportedTranslation, expr.getBeginLoc(), "unsupported constant expression kind '{f}'", .{kind});
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
@ -5832,7 +5832,7 @@ fn zigifyEscapeSequences(ctx: *Context, m: *MacroCtx) ![]const u8 {
|
|||
num += c - 'A' + 10;
|
||||
},
|
||||
else => {
|
||||
i += std.fmt.printInt(bytes[i..], num, 16, .lower, std.fmt.FormatOptions{ .fill = '0', .width = 2 });
|
||||
i += std.fmt.printInt(bytes[i..], num, 16, .lower, .{ .fill = '0', .width = 2 });
|
||||
num = 0;
|
||||
if (c == '\\')
|
||||
state = .escape
|
||||
|
|
@ -5858,7 +5858,7 @@ fn zigifyEscapeSequences(ctx: *Context, m: *MacroCtx) ![]const u8 {
|
|||
};
|
||||
num += c - '0';
|
||||
} else {
|
||||
i += std.fmt.printInt(bytes[i..], num, 16, .lower, std.fmt.FormatOptions{ .fill = '0', .width = 2 });
|
||||
i += std.fmt.printInt(bytes[i..], num, 16, .lower, .{ .fill = '0', .width = 2 });
|
||||
num = 0;
|
||||
count = 0;
|
||||
if (c == '\\')
|
||||
|
|
@ -5872,7 +5872,7 @@ fn zigifyEscapeSequences(ctx: *Context, m: *MacroCtx) ![]const u8 {
|
|||
}
|
||||
}
|
||||
if (state == .hex or state == .octal)
|
||||
i += std.fmt.printInt(bytes[i..], num, 16, .lower, std.fmt.FormatOptions{ .fill = '0', .width = 2 });
|
||||
i += std.fmt.printInt(bytes[i..], num, 16, .lower, .{ .fill = '0', .width = 2 });
|
||||
return bytes[0..i];
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue