Dwarf: rework self-hosted debug info from scratch

This is in preparation for incremental and actually being able to debug
executables built by the x86_64 backend.
This commit is contained in:
Jacob Young 2024-08-06 11:22:37 -04:00
parent 90989be0e3
commit ef11bc9899
50 changed files with 5215 additions and 3590 deletions

View file

@ -549,6 +549,15 @@ pub fn build(b: *std.Build) !void {
test_step.dependOn(tests.addStackTraceTests(b, test_filters, optimization_modes));
test_step.dependOn(tests.addCliTests(b));
test_step.dependOn(tests.addAssembleAndLinkTests(b, test_filters, optimization_modes));
if (tests.addDebuggerTests(b, .{
.test_filters = test_filters,
.gdb = b.option([]const u8, "gdb", "path to gdb binary"),
.lldb = b.option([]const u8, "lldb", "path to lldb binary"),
.optimize_modes = optimization_modes,
.skip_single_threaded = skip_single_threaded,
.skip_non_native = skip_non_native,
.skip_libc = skip_libc,
})) |test_debugger_step| test_step.dependOn(test_debugger_step);
try addWasiUpdateStep(b, version);

View file

@ -64,6 +64,7 @@ stage3-debug/bin/zig build \
stage3-debug/bin/zig build test docs \
--maxrss 21000000000 \
-Dlldb=$HOME/deps/lldb-zig/Debug/bin/lldb \
-fqemu \
-fwasmtime \
-Dstatic-llvm \

View file

@ -64,6 +64,7 @@ stage3-release/bin/zig build \
stage3-release/bin/zig build test docs \
--maxrss 21000000000 \
-Dlldb=$HOME/deps/lldb-zig/Release/bin/lldb \
-fqemu \
-fwasmtime \
-Dstatic-llvm \

View file

@ -359,6 +359,24 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
return m.len;
}
pub const FixedWriter = std.io.Writer(*Self, Allocator.Error, appendWriteFixed);
/// Initializes a Writer which will append to the list but will return
/// `error.OutOfMemory` rather than increasing capacity.
pub fn fixedWriter(self: *Self) FixedWriter {
return .{ .context = self };
}
/// The purpose of this function existing is to match `std.io.Writer` API.
fn appendWriteFixed(self: *Self, m: []const u8) error{OutOfMemory}!usize {
const available_capacity = self.capacity - self.items.len;
if (m.len > available_capacity)
return error.OutOfMemory;
self.appendSliceAssumeCapacity(m);
return m.len;
}
/// Append a value to the list `n` times.
/// Allocates more memory as necessary.
/// Invalidates element pointers if additional memory is needed.

View file

@ -95,6 +95,9 @@ pub const LNE = struct {
pub const set_discriminator = 0x04;
pub const lo_user = 0x80;
pub const hi_user = 0xff;
// Zig extensions
pub const ZIG_set_decl = 0xec;
};
pub const UT = struct {
@ -118,6 +121,8 @@ pub const LNCT = struct {
pub const lo_user = 0x2000;
pub const hi_user = 0x3fff;
pub const LLVM_source = 0x2001;
};
pub const RLE = struct {
@ -142,6 +147,37 @@ pub const CC = enum(u8) {
GNU_renesas_sh = 0x40,
GNU_borland_fastcall_i386 = 0x41,
BORLAND_safecall = 0xb0,
BORLAND_stdcall = 0xb1,
BORLAND_pascal = 0xb2,
BORLAND_msfastcall = 0xb3,
BORLAND_msreturn = 0xb4,
BORLAND_thiscall = 0xb5,
BORLAND_fastcall = 0xb6,
LLVM_vectorcall = 0xc0,
LLVM_Win64 = 0xc1,
LLVM_X86_64SysV = 0xc2,
LLVM_AAPCS = 0xc3,
LLVM_AAPCS_VFP = 0xc4,
LLVM_IntelOclBicc = 0xc5,
LLVM_SpirFunction = 0xc6,
LLVM_OpenCLKernel = 0xc7,
LLVM_Swift = 0xc8,
LLVM_PreserveMost = 0xc9,
LLVM_PreserveAll = 0xca,
LLVM_X86RegCall = 0xcb,
LLVM_M68kRTD = 0xcc,
LLVM_PreserveNone = 0xcd,
LLVM_RISCVVectorCall = 0xce,
LLVM_SwiftTail = 0xcf,
pub const lo_user = 0x40;
pub const hi_user = 0xff;
};
pub const ACCESS = struct {
pub const public = 0x01;
pub const protected = 0x02;
pub const private = 0x03;
};

View file

@ -218,6 +218,15 @@ pub const VMS_rtnbeg_pd_address = 0x2201;
// See http://gcc.gnu.org/wiki/DW_AT_GNAT_descriptive_type .
pub const use_GNAT_descriptive_type = 0x2301;
pub const GNAT_descriptive_type = 0x2302;
// Zig extensions.
pub const ZIG_parent = 0x2ccd;
pub const ZIG_padding = 0x2cce;
pub const ZIG_relative_decl = 0x2cd0;
pub const ZIG_decl_line_relative = 0x2cd1;
pub const ZIG_is_allowzero = 0x2ce1;
pub const ZIG_sentinel = 0x2ce2;
// UPC extension.
pub const upc_threads_scaled = 0x3210;
// PGI (STMicroelectronics) extensions.

View file

@ -35,6 +35,30 @@ pub const Fortran03 = 0x0022;
pub const Fortran08 = 0x0023;
pub const RenderScript = 0x0024;
pub const BLISS = 0x0025;
pub const Kotlin = 0x0026;
pub const Zig = 0x0027;
pub const Crystal = 0x0028;
pub const C_plus_plus_17 = 0x002a;
pub const C_plus_plus_20 = 0x002b;
pub const C17 = 0x002c;
pub const Fortran18 = 0x002d;
pub const Ada2005 = 0x002e;
pub const Ada2012 = 0x002f;
pub const HIP = 0x0030;
pub const Assembly = 0x0031;
pub const C_sharp = 0x0032;
pub const Mojo = 0x0033;
pub const GLSL = 0x0034;
pub const GLSL_ES = 0x0035;
pub const HLSL = 0x0036;
pub const OpenCL_CPP = 0x0037;
pub const CPP_for_OpenCL = 0x0038;
pub const SYCL = 0x0039;
pub const C_plus_plus_23 = 0x003a;
pub const Odin = 0x003b;
pub const Ruby = 0x0040;
pub const Move = 0x0041;
pub const Hylo = 0x0042;
pub const lo_user = 0x8000;
pub const hi_user = 0xffff;

View file

@ -419,7 +419,7 @@ pub const tty = @import("io/tty.zig");
/// A Writer that doesn't write to anything.
pub const null_writer: NullWriter = .{ .context = {} };
const NullWriter = Writer(void, error{}, dummyWrite);
pub const NullWriter = Writer(void, error{}, dummyWrite);
fn dummyWrite(context: void, data: []const u8) error{}!usize {
_ = context;
return data.len;

View file

@ -36,10 +36,14 @@ pub fn readUleb128(comptime T: type, reader: anytype) !T {
pub const readULEB128 = readUleb128;
/// Write a single unsigned integer as unsigned LEB128 to the given writer.
pub fn writeUleb128(writer: anytype, uint_value: anytype) !void {
const T = @TypeOf(uint_value);
const U = if (@typeInfo(T).Int.bits < 8) u8 else T;
var value: U = @intCast(uint_value);
pub fn writeUleb128(writer: anytype, arg: anytype) !void {
const Arg = @TypeOf(arg);
const Int = switch (Arg) {
comptime_int => std.math.IntFittingRange(arg, arg),
else => Arg,
};
const Value = if (@typeInfo(Int).Int.bits < 8) u8 else Int;
var value: Value = arg;
while (true) {
const byte: u8 = @truncate(value & 0x7f);
@ -118,16 +122,19 @@ pub fn readIleb128(comptime T: type, reader: anytype) !T {
pub const readILEB128 = readIleb128;
/// Write a single signed integer as signed LEB128 to the given writer.
pub fn writeIleb128(writer: anytype, int_value: anytype) !void {
const T = @TypeOf(int_value);
const S = if (@typeInfo(T).Int.bits < 8) i8 else T;
const U = std.meta.Int(.unsigned, @typeInfo(S).Int.bits);
var value: S = @intCast(int_value);
pub fn writeIleb128(writer: anytype, arg: anytype) !void {
const Arg = @TypeOf(arg);
const Int = switch (Arg) {
comptime_int => std.math.IntFittingRange(-arg - 1, arg),
else => Arg,
};
const Signed = if (@typeInfo(Int).Int.bits < 8) i8 else Int;
const Unsigned = std.meta.Int(.unsigned, @typeInfo(Signed).Int.bits);
var value: Signed = arg;
while (true) {
const uvalue: U = @bitCast(value);
const byte: u8 = @truncate(uvalue);
const unsigned: Unsigned = @bitCast(value);
const byte: u8 = @truncate(unsigned);
value >>= 6;
if (value == -1 or value == 0) {
try writer.writeByte(byte & 0x7F);
@ -147,17 +154,25 @@ pub fn writeIleb128(writer: anytype, int_value: anytype) !void {
/// "relocatable", meaning that it becomes possible to later go back and patch the number to be a
/// different value without shifting all the following code.
pub fn writeUnsignedFixed(comptime l: usize, ptr: *[l]u8, int: std.meta.Int(.unsigned, l * 7)) void {
const T = @TypeOf(int);
const U = if (@typeInfo(T).Int.bits < 8) u8 else T;
var value: U = @intCast(int);
writeUnsignedExtended(ptr, int);
}
comptime var i = 0;
inline while (i < (l - 1)) : (i += 1) {
const byte = @as(u8, @truncate(value)) | 0b1000_0000;
/// Same as `writeUnsignedFixed` but with a runtime-known length.
/// Asserts `slice.len > 0`.
pub fn writeUnsignedExtended(slice: []u8, arg: anytype) void {
const Arg = @TypeOf(arg);
const Int = switch (Arg) {
comptime_int => std.math.IntFittingRange(arg, arg),
else => Arg,
};
const Value = if (@typeInfo(Int).Int.bits < 8) u8 else Int;
var value: Value = arg;
for (slice[0 .. slice.len - 1]) |*byte| {
byte.* = @truncate(0x80 | value);
value >>= 7;
ptr[i] = byte;
}
ptr[i] = @truncate(value);
slice[slice.len - 1] = @as(u7, @intCast(value));
}
/// Deprecated: use `writeIleb128`

View file

@ -2092,6 +2092,12 @@ pub const Const = struct {
return bits;
}
/// Returns the number of bits required to represent the integer in twos-complement form
/// with the given signedness.
pub fn bitCountTwosCompForSignedness(self: Const, signedness: std.builtin.Signedness) usize {
return self.bitCountTwosComp() + @intFromBool(self.positive and signedness == .signed);
}
/// @popCount with two's complement semantics.
///
/// This returns the number of 1 bits set when the value would be represented in
@ -2147,9 +2153,7 @@ pub const Const = struct {
if (signedness == .unsigned and !self.positive) {
return false;
}
const req_bits = self.bitCountTwosComp() + @intFromBool(self.positive and signedness == .signed);
return bit_count >= req_bits;
return bit_count >= self.bitCountTwosCompForSignedness(signedness);
}
/// Returns whether self can fit into an integer of the requested type.

View file

@ -128,7 +128,7 @@ pub fn alignAllocLen(full_len: usize, alloc_len: usize, len_align: u29) usize {
assert(full_len >= alloc_len);
if (len_align == 0)
return alloc_len;
const adjusted = alignBackwardAnyAlign(full_len, len_align);
const adjusted = alignBackwardAnyAlign(usize, full_len, len_align);
assert(adjusted >= alloc_len);
return adjusted;
}
@ -4312,6 +4312,15 @@ test "sliceAsBytes preserves pointer attributes" {
try testing.expectEqual(in.alignment, out.alignment);
}
/// Round an address down to the next (or current) aligned address.
/// Unlike `alignForward`, `alignment` can be any positive number, not just a power of 2.
pub fn alignForwardAnyAlign(comptime T: type, addr: T, alignment: T) T {
if (isValidAlignGeneric(T, alignment))
return alignForward(T, addr, alignment);
assert(alignment != 0);
return alignBackwardAnyAlign(T, addr + (alignment - 1), alignment);
}
/// Round an address up to the next (or current) aligned address.
/// The alignment must be a power of 2 and greater than 0.
/// Asserts that rounding up the address does not cause integer overflow.
@ -4433,11 +4442,11 @@ test alignForward {
/// Round an address down to the previous (or current) aligned address.
/// Unlike `alignBackward`, `alignment` can be any positive number, not just a power of 2.
pub fn alignBackwardAnyAlign(i: usize, alignment: usize) usize {
if (isValidAlign(alignment))
return alignBackward(usize, i, alignment);
pub fn alignBackwardAnyAlign(comptime T: type, addr: T, alignment: T) T {
if (isValidAlignGeneric(T, alignment))
return alignBackward(T, addr, alignment);
assert(alignment != 0);
return i - @mod(i, alignment);
return addr - @mod(addr, alignment);
}
/// Round an address down to the previous (or current) aligned address.

View file

@ -4405,7 +4405,6 @@ fn globalVarDecl(
.decl_line = astgen.source_line,
.astgen = astgen,
.is_comptime = true,
.anon_name_strategy = .parent,
.instructions = gz.instructions,
.instructions_top = gz.instructions.items.len,
};
@ -4463,6 +4462,8 @@ fn globalVarDecl(
else
.none;
block_scope.anon_name_strategy = .parent;
const init_inst = try expr(
&block_scope,
&block_scope.base,
@ -4490,6 +4491,8 @@ fn globalVarDecl(
// Extern variable which has an explicit type.
const type_inst = try typeExpr(&block_scope, &block_scope.base, var_decl.ast.type_node);
block_scope.anon_name_strategy = .parent;
const var_inst = try block_scope.addVar(.{
.var_type = type_inst,
.lib_name = lib_name,

View file

@ -363,6 +363,7 @@ const Job = union(enum) {
/// It must be deinited when the job is processed.
air: Air,
},
codegen_type: InternPool.Index,
/// The `Cau` must be semantically analyzed (and possibly export itself).
/// This may be its first time being analyzed, or it may be outdated.
analyze_cau: InternPool.Cau.Index,
@ -423,6 +424,7 @@ const CodegenJob = union(enum) {
/// It must be deinited when the job is processed.
air: Air,
},
type: InternPool.Index,
};
pub const CObject = struct {
@ -3712,6 +3714,7 @@ fn processOneJob(tid: usize, comp: *Compilation, job: Job, prog_node: std.Progre
.air = func.air,
} });
},
.codegen_type => |ty| try comp.queueCodegenJob(tid, .{ .type = ty }),
.analyze_func => |func| {
const named_frame = tracy.namedFrame("analyze_func");
defer named_frame.end();
@ -4001,6 +4004,13 @@ fn processOneCodegenJob(tid: usize, comp: *Compilation, codegen_job: CodegenJob)
// This call takes ownership of `func.air`.
try pt.linkerUpdateFunc(func.func, func.air);
},
.type => |ty| {
const named_frame = tracy.namedFrame("codegen_type");
defer named_frame.end();
const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) };
try pt.linkerUpdateContainerType(ty);
},
}
}

View file

@ -4003,7 +4003,7 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType {
}
}
const LoadedEnumType = struct {
pub const LoadedEnumType = struct {
// TODO: the non-fqn will be needed by the new dwarf structure
/// The name of this enum type.
name: NullTerminatedString,

View file

@ -2845,6 +2845,11 @@ fn zirStructDecl(
try pt.scanNamespace(new_namespace_index, decls);
try mod.comp.queueJob(.{ .resolve_type_fully = wip_ty.index });
codegen_type: {
if (mod.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
try mod.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .cau = new_cau_index }));
try sema.declareDependency(.{ .interned = wip_ty.index });
return Air.internedToRef(wip_ty.finish(ip, new_cau_index.toOptional(), new_namespace_index));
@ -3213,6 +3218,11 @@ fn zirEnumDecl(
}
}
codegen_type: {
if (mod.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
try mod.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
return Air.internedToRef(wip_ty.index);
}
@ -3323,6 +3333,11 @@ fn zirUnionDecl(
try pt.scanNamespace(new_namespace_index, decls);
try mod.comp.queueJob(.{ .resolve_type_fully = wip_ty.index });
codegen_type: {
if (mod.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
try mod.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .cau = new_cau_index }));
try sema.declareDependency(.{ .interned = wip_ty.index });
return Air.internedToRef(wip_ty.finish(ip, new_cau_index.toOptional(), new_namespace_index));
@ -3396,6 +3411,11 @@ fn zirOpaqueDecl(
const decls = sema.code.bodySlice(extra_index, decls_len);
try pt.scanNamespace(new_namespace_index, decls);
codegen_type: {
if (mod.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
try mod.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
return Air.internedToRef(wip_ty.finish(ip, .none, new_namespace_index));
}
@ -22071,6 +22091,11 @@ fn reifyEnum(
return sema.fail(block, src, "non-exhaustive enum specified every value", .{});
}
codegen_type: {
if (mod.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
try mod.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
return Air.internedToRef(wip_ty.index);
}
@ -22318,6 +22343,11 @@ fn reifyUnion(
const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, new_namespace_index, wip_ty.index);
try mod.comp.queueJob(.{ .resolve_type_fully = wip_ty.index });
codegen_type: {
if (mod.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
try mod.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .cau = new_cau_index }));
return Air.internedToRef(wip_ty.finish(ip, new_cau_index.toOptional(), new_namespace_index));
}
@ -22591,6 +22621,11 @@ fn reifyStruct(
const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, new_namespace_index, wip_ty.index);
try mod.comp.queueJob(.{ .resolve_type_fully = wip_ty.index });
codegen_type: {
if (mod.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
try mod.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .cau = new_cau_index }));
return Air.internedToRef(wip_ty.finish(ip, new_cau_index.toOptional(), new_namespace_index));
}

View file

@ -2208,7 +2208,7 @@ pub fn errorSetHasField(ty: Type, name: []const u8, mod: *Module) bool {
const field_name_interned = ip.getString(name).unwrap() orelse return false;
return error_set_type.nameIndex(ip, field_name_interned) != null;
},
.inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) {
.inferred_error_set_type => |i| switch (ip.funcIesResolvedUnordered(i)) {
.anyerror_type => true,
.none => false,
else => |t| {

View file

@ -2737,7 +2737,7 @@ pub fn addUnitReference(zcu: *Zcu, src_unit: AnalUnit, referenced_unit: AnalUnit
pub fn errorSetBits(mod: *Zcu) u16 {
if (mod.error_limit == 0) return 0;
return std.math.log2_int_ceil(ErrorInt, mod.error_limit + 1); // +1 for no error
return @as(u16, std.math.log2_int(ErrorInt, mod.error_limit)) + 1;
}
pub fn errNote(
@ -3005,6 +3005,14 @@ pub const UnionLayout = struct {
tag_align: Alignment,
tag_size: u64,
padding: u32,
pub fn tagOffset(layout: UnionLayout) u64 {
return if (layout.tag_align.compare(.lt, layout.payload_align)) layout.payload_size else 0;
}
pub fn payloadOffset(layout: UnionLayout) u64 {
return if (layout.tag_align.compare(.lt, layout.payload_align)) 0 else layout.tag_size;
}
};
/// Returns the index of the active field, given the current tag value

View file

@ -911,6 +911,11 @@ fn createFileRootStruct(
try pt.scanNamespace(namespace_index, decls);
try zcu.comp.queueJob(.{ .resolve_type_fully = wip_ty.index });
codegen_type: {
if (zcu.comp.config.use_llvm) break :codegen_type;
if (file.mod.strip) break :codegen_type;
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
zcu.setFileRootType(file_index, wip_ty.index);
return wip_ty.finish(ip, new_cau_index.toOptional(), namespace_index);
}
@ -1332,7 +1337,10 @@ fn semaCau(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) !SemaCauResult {
// to the `codegen_nav` job.
try decl_ty.resolveFully(pt);
if (!decl_ty.isFnOrHasRuntimeBits(pt)) break :queue_codegen;
if (!decl_ty.isFnOrHasRuntimeBits(pt)) {
if (zcu.comp.config.use_llvm) break :queue_codegen;
if (file.mod.strip) break :queue_codegen;
}
try zcu.comp.queueJob(.{ .codegen_nav = nav_index });
}
@ -2588,6 +2596,22 @@ pub fn linkerUpdateNav(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !void
}
}
pub fn linkerUpdateContainerType(pt: Zcu.PerThread, ty: InternPool.Index) !void {
const zcu = pt.zcu;
const comp = zcu.comp;
const ip = &zcu.intern_pool;
const codegen_prog_node = zcu.codegen_prog_node.start(Type.fromInterned(ty).containerTypeName(ip).toSlice(ip), 0);
defer codegen_prog_node.end();
if (comp.bin_file) |lf| {
lf.updateContainerType(pt, ty) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => |e| log.err("codegen type failed: {s}", .{@errorName(e)}),
};
}
}
pub fn reportRetryableAstGenError(
pt: Zcu.PerThread,
src: Zcu.AstGenSrc,

View file

@ -18,7 +18,6 @@ const ErrorMsg = Zcu.ErrorMsg;
const Target = std.Target;
const Allocator = mem.Allocator;
const trace = @import("../../tracy.zig").trace;
const DW = std.dwarf;
const leb128 = std.leb;
const log = std.log.scoped(.codegen);
const build_options = @import("build_options");
@ -181,11 +180,11 @@ const DbgInfoReloc = struct {
}
}
fn genArgDbgInfo(reloc: DbgInfoReloc, function: Self) error{OutOfMemory}!void {
fn genArgDbgInfo(reloc: DbgInfoReloc, function: Self) !void {
switch (function.debug_output) {
.dwarf => |dw| {
const loc: link.File.Dwarf.NavState.DbgInfoLoc = switch (reloc.mcv) {
.register => |reg| .{ .register = reg.dwarfLocOp() },
const loc: link.File.Dwarf.Loc = switch (reloc.mcv) {
.register => |reg| .{ .reg = reg.dwarfNum() },
.stack_offset,
.stack_argument_offset,
=> |offset| blk: {
@ -194,15 +193,15 @@ const DbgInfoReloc = struct {
.stack_argument_offset => @as(i32, @intCast(function.saved_regs_stack_space + offset)),
else => unreachable,
};
break :blk .{ .stack = .{
.fp_register = Register.x29.dwarfLocOpDeref(),
.offset = adjusted_offset,
break :blk .{ .plus = .{
&.{ .breg = Register.x29.dwarfNum() },
&.{ .consts = adjusted_offset },
} };
},
else => unreachable, // not a possible argument
};
try dw.genArgDbgInfo(reloc.name, reloc.ty, function.owner_nav, loc);
try dw.genVarDebugInfo(.local_arg, reloc.name, reloc.ty, loc);
},
.plan9 => {},
.none => {},
@ -210,16 +209,10 @@ const DbgInfoReloc = struct {
}
fn genVarDbgInfo(reloc: DbgInfoReloc, function: Self) !void {
const is_ptr = switch (reloc.tag) {
.dbg_var_ptr => true,
.dbg_var_val => false,
else => unreachable,
};
switch (function.debug_output) {
.dwarf => |dw| {
const loc: link.File.Dwarf.NavState.DbgInfoLoc = switch (reloc.mcv) {
.register => |reg| .{ .register = reg.dwarfLocOp() },
.dwarf => |dwarf| {
const loc: link.File.Dwarf.Loc = switch (reloc.mcv) {
.register => |reg| .{ .reg = reg.dwarfNum() },
.ptr_stack_offset,
.stack_offset,
.stack_argument_offset,
@ -231,24 +224,20 @@ const DbgInfoReloc = struct {
.stack_argument_offset => @as(i32, @intCast(function.saved_regs_stack_space + offset)),
else => unreachable,
};
break :blk .{
.stack = .{
.fp_register = Register.x29.dwarfLocOpDeref(),
.offset = adjusted_offset,
},
};
break :blk .{ .plus = .{
&.{ .reg = Register.x29.dwarfNum() },
&.{ .consts = adjusted_offset },
} };
},
.memory => |address| .{ .memory = address },
.linker_load => |linker_load| .{ .linker_load = linker_load },
.immediate => |x| .{ .immediate = x },
.undef => .undef,
.none => .none,
.memory => |address| .{ .constu = address },
.immediate => |x| .{ .constu = x },
.none => .empty,
else => blk: {
log.debug("TODO generate debug info for {}", .{reloc.mcv});
break :blk .nop;
break :blk .empty;
},
};
try dw.genVarDbgInfo(reloc.name, reloc.ty, function.owner_nav, is_ptr, loc);
try dwarf.genVarDebugInfo(.local_var, reloc.name, reloc.ty, loc);
},
.plan9 => {},
.none => {},
@ -6207,7 +6196,7 @@ fn genTypedValue(self: *Self, val: Value) InnerError!MCValue {
.memory => |addr| .{ .memory = addr },
.load_got => |sym_index| .{ .linker_load = .{ .type = .got, .sym_index = sym_index } },
.load_direct => |sym_index| .{ .linker_load = .{ .type = .direct, .sym_index = sym_index } },
.load_symbol, .load_tlv, .lea_symbol => unreachable, // TODO
.load_symbol, .load_tlv, .lea_symbol, .lea_direct => unreachable, // TODO
},
.fail => |msg| {
self.err_msg = msg;

View file

@ -1,6 +1,5 @@
const std = @import("std");
const builtin = @import("builtin");
const DW = std.dwarf;
const assert = std.debug.assert;
const testing = std.testing;
@ -295,15 +294,8 @@ pub const Register = enum(u8) {
};
}
pub fn dwarfLocOp(self: Register) u8 {
return @as(u8, self.enc()) + DW.OP.reg0;
}
/// DWARF encodings that push a value onto the DWARF stack that is either
/// the contents of a register or the result of adding the contents a given
/// register to a given signed offset.
pub fn dwarfLocOpDeref(self: Register) u8 {
return @as(u8, self.enc()) + DW.OP.breg0;
pub fn dwarfNum(self: Register) u5 {
return self.enc();
}
};

View file

@ -18,7 +18,6 @@ const ErrorMsg = Zcu.ErrorMsg;
const Target = std.Target;
const Allocator = mem.Allocator;
const trace = @import("../../tracy.zig").trace;
const DW = std.dwarf;
const leb128 = std.leb;
const log = std.log.scoped(.codegen);
const build_options = @import("build_options");
@ -259,11 +258,11 @@ const DbgInfoReloc = struct {
}
}
fn genArgDbgInfo(reloc: DbgInfoReloc, function: Self) error{OutOfMemory}!void {
fn genArgDbgInfo(reloc: DbgInfoReloc, function: Self) !void {
switch (function.debug_output) {
.dwarf => |dw| {
const loc: link.File.Dwarf.NavState.DbgInfoLoc = switch (reloc.mcv) {
.register => |reg| .{ .register = reg.dwarfLocOp() },
const loc: link.File.Dwarf.Loc = switch (reloc.mcv) {
.register => |reg| .{ .reg = reg.dwarfNum() },
.stack_offset,
.stack_argument_offset,
=> blk: {
@ -272,15 +271,15 @@ const DbgInfoReloc = struct {
.stack_argument_offset => |offset| @as(i32, @intCast(function.saved_regs_stack_space + offset)),
else => unreachable,
};
break :blk .{ .stack = .{
.fp_register = DW.OP.breg11,
.offset = adjusted_stack_offset,
break :blk .{ .plus = .{
&.{ .reg = 11 },
&.{ .consts = adjusted_stack_offset },
} };
},
else => unreachable, // not a possible argument
};
try dw.genArgDbgInfo(reloc.name, reloc.ty, function.pt.zcu.funcInfo(function.func_index).owner_nav, loc);
try dw.genVarDebugInfo(.local_arg, reloc.name, reloc.ty, loc);
},
.plan9 => {},
.none => {},
@ -288,16 +287,10 @@ const DbgInfoReloc = struct {
}
fn genVarDbgInfo(reloc: DbgInfoReloc, function: Self) !void {
const is_ptr = switch (reloc.tag) {
.dbg_var_ptr => true,
.dbg_var_val => false,
else => unreachable,
};
switch (function.debug_output) {
.dwarf => |dw| {
const loc: link.File.Dwarf.NavState.DbgInfoLoc = switch (reloc.mcv) {
.register => |reg| .{ .register = reg.dwarfLocOp() },
const loc: link.File.Dwarf.Loc = switch (reloc.mcv) {
.register => |reg| .{ .reg = reg.dwarfNum() },
.ptr_stack_offset,
.stack_offset,
.stack_argument_offset,
@ -309,21 +302,20 @@ const DbgInfoReloc = struct {
.stack_argument_offset => @as(i32, @intCast(function.saved_regs_stack_space + offset)),
else => unreachable,
};
break :blk .{ .stack = .{
.fp_register = DW.OP.breg11,
.offset = adjusted_offset,
break :blk .{ .plus = .{
&.{ .reg = 11 },
&.{ .consts = adjusted_offset },
} };
},
.memory => |address| .{ .memory = address },
.immediate => |x| .{ .immediate = x },
.undef => .undef,
.none => .none,
.memory => |address| .{ .constu = address },
.immediate => |x| .{ .constu = x },
.none => .empty,
else => blk: {
log.debug("TODO generate debug info for {}", .{reloc.mcv});
break :blk .nop;
break :blk .empty;
},
};
try dw.genVarDbgInfo(reloc.name, reloc.ty, function.pt.zcu.funcInfo(function.func_index).owner_nav, is_ptr, loc);
try dw.genVarDebugInfo(.local_var, reloc.name, reloc.ty, loc);
},
.plan9 => {},
.none => {},
@ -6170,7 +6162,7 @@ fn genTypedValue(self: *Self, val: Value) InnerError!MCValue {
.mcv => |mcv| switch (mcv) {
.none => .none,
.undef => .undef,
.load_got, .load_symbol, .load_direct, .load_tlv, .lea_symbol => unreachable, // TODO
.load_got, .load_symbol, .load_direct, .load_tlv, .lea_symbol, .lea_direct => unreachable, // TODO
.immediate => |imm| .{ .immediate = @truncate(imm) },
.memory => |addr| .{ .memory = addr },
},

View file

@ -1,5 +1,4 @@
const std = @import("std");
const DW = std.dwarf;
const assert = std.debug.assert;
const testing = std.testing;
@ -158,12 +157,12 @@ pub const Register = enum(u5) {
/// Returns the unique 4-bit ID of this register which is used in
/// the machine code
pub fn id(self: Register) u4 {
return @as(u4, @truncate(@intFromEnum(self)));
pub fn id(reg: Register) u4 {
return @truncate(@intFromEnum(reg));
}
pub fn dwarfLocOp(self: Register) u8 {
return @as(u8, self.id()) + DW.OP.reg0;
pub fn dwarfNum(reg: Register) u4 {
return reg.id();
}
};

View file

@ -4677,9 +4677,7 @@ fn genArgDbgInfo(func: Func, inst: Air.Inst.Index, mcv: MCValue) !void {
switch (func.debug_output) {
.dwarf => |dw| switch (mcv) {
.register => |reg| try dw.genArgDbgInfo(name, ty, func.owner.nav_index, .{
.register = reg.dwarfLocOp(),
}),
.register => |reg| try dw.genVarDebugInfo(.local_arg, name, ty, .{ .reg = reg.dwarfNum() }),
.load_frame => {},
else => {},
},
@ -5184,43 +5182,30 @@ fn airDbgVar(func: *Func, inst: Air.Inst.Index) !void {
const name = func.air.nullTerminatedString(pl_op.payload);
const tag = func.air.instructions.items(.tag)[@intFromEnum(inst)];
try func.genVarDbgInfo(tag, ty, mcv, name);
try func.genVarDbgInfo(ty, mcv, name);
return func.finishAir(inst, .unreach, .{ operand, .none, .none });
}
fn genVarDbgInfo(
func: Func,
tag: Air.Inst.Tag,
ty: Type,
mcv: MCValue,
name: [:0]const u8,
name: []const u8,
) !void {
const is_ptr = switch (tag) {
.dbg_var_ptr => true,
.dbg_var_val => false,
else => unreachable,
};
switch (func.debug_output) {
.dwarf => |dw| {
const loc: link.File.Dwarf.NavState.DbgInfoLoc = switch (mcv) {
.register => |reg| .{ .register = reg.dwarfLocOp() },
.memory => |address| .{ .memory = address },
.load_symbol => |sym_off| loc: {
assert(sym_off.off == 0);
break :loc .{ .linker_load = .{ .type = .direct, .sym_index = sym_off.sym } };
},
.immediate => |x| .{ .immediate = x },
.undef => .undef,
.none => .none,
.dwarf => |dwarf| {
const loc: link.File.Dwarf.Loc = switch (mcv) {
.register => |reg| .{ .reg = reg.dwarfNum() },
.memory => |address| .{ .constu = address },
.immediate => |x| .{ .constu = x },
.none => .empty,
else => blk: {
// log.warn("TODO generate debug info for {}", .{mcv});
break :blk .nop;
break :blk .empty;
},
};
try dw.genVarDbgInfo(name, ty, func.owner.nav_index, is_ptr, loc);
try dwarf.genVarDebugInfo(.local_var, name, ty, loc);
},
.plan9 => {},
.none => {},
@ -8031,7 +8016,7 @@ fn genTypedValue(func: *Func, val: Value) InnerError!MCValue {
.load_tlv => |sym_index| .{ .lea_tlv = sym_index },
.immediate => |imm| .{ .immediate = imm },
.memory => |addr| .{ .memory = addr },
.load_got, .load_direct => {
.load_got, .load_direct, .lea_direct => {
return func.fail("TODO: genTypedValue {s}", .{@tagName(mcv)});
},
},

View file

@ -1,5 +1,4 @@
const std = @import("std");
const DW = std.dwarf;
const assert = std.debug.assert;
const testing = std.testing;
const Target = std.Target;
@ -207,8 +206,8 @@ pub const Register = enum(u8) {
return @truncate(@intFromEnum(reg));
}
pub fn dwarfLocOp(reg: Register) u8 {
return @as(u8, reg.id());
pub fn dwarfNum(reg: Register) u8 {
return reg.id();
}
pub fn bitSize(reg: Register, zcu: *const Zcu) u32 {

View file

@ -3579,18 +3579,15 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live
}
fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void {
const pt = self.pt;
const mod = pt.zcu;
const arg = self.air.instructions.items(.data)[@intFromEnum(inst)].arg;
const ty = arg.ty.toType();
const owner_nav = mod.funcInfo(self.func_index).owner_nav;
if (arg.name == .none) return;
const name = self.air.nullTerminatedString(@intFromEnum(arg.name));
switch (self.debug_output) {
.dwarf => |dw| switch (mcv) {
.register => |reg| try dw.genArgDbgInfo(name, ty, owner_nav, .{
.register = reg.dwarfLocOp(),
.register => |reg| try dw.genVarDebugInfo(.local_arg, name, ty, .{
.reg = reg.dwarfNum(),
}),
else => {},
},
@ -4127,7 +4124,7 @@ fn genTypedValue(self: *Self, val: Value) InnerError!MCValue {
.mcv => |mcv| switch (mcv) {
.none => .none,
.undef => .undef,
.load_got, .load_symbol, .load_direct, .load_tlv, .lea_symbol => unreachable, // TODO
.load_got, .load_symbol, .load_direct, .load_tlv, .lea_symbol, .lea_direct => unreachable, // TODO
.immediate => |imm| .{ .immediate = imm },
.memory => |addr| .{ .memory = addr },
},

View file

@ -1,5 +1,4 @@
const std = @import("std");
const DW = std.dwarf;
const assert = std.debug.assert;
const testing = std.testing;
@ -15,17 +14,17 @@ pub const Register = enum(u6) {
fp = 62, // frame pointer (i6)
// zig fmt: on
pub fn id(self: Register) u5 {
return @as(u5, @truncate(@intFromEnum(self)));
pub fn id(reg: Register) u5 {
return @truncate(@intFromEnum(reg));
}
pub fn enc(self: Register) u5 {
pub fn enc(reg: Register) u5 {
// For integer registers, enc() == id().
return self.id();
return reg.id();
}
pub fn dwarfLocOp(reg: Register) u8 {
return @as(u8, reg.id()) + DW.OP.reg0;
pub fn dwarfNum(reg: Register) u5 {
return reg.id();
}
};

View file

@ -742,7 +742,7 @@ const InnerError = error{
CodegenFail,
/// Compiler implementation could not handle a large integer.
Overflow,
};
} || link.File.UpdateDebugInfoError;
pub fn deinit(func: *CodeGen) void {
// in case of an error and we still have branches
@ -2588,8 +2588,8 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const name_nts = func.air.instructions.items(.data)[@intFromEnum(inst)].arg.name;
if (name_nts != .none) {
const name = func.air.nullTerminatedString(@intFromEnum(name_nts));
try dwarf.genArgDbgInfo(name, arg_ty, func.owner_nav, .{
.wasm_local = arg.local.value,
try dwarf.genVarDebugInfo(.local_arg, name, arg_ty, .{
.wasm_ext = .{ .local = arg.local.value },
});
}
},
@ -6455,6 +6455,7 @@ fn airDbgInlineBlock(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airDbgVar(func: *CodeGen, inst: Air.Inst.Index, is_ptr: bool) InnerError!void {
_ = is_ptr;
if (func.debug_output != .dwarf) return func.finishAir(inst, .none, &.{});
const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
@ -6466,14 +6467,14 @@ fn airDbgVar(func: *CodeGen, inst: Air.Inst.Index, is_ptr: bool) InnerError!void
const name = func.air.nullTerminatedString(pl_op.payload);
log.debug(" var name = ({s})", .{name});
const loc: link.File.Dwarf.NavState.DbgInfoLoc = switch (operand) {
.local => |local| .{ .wasm_local = local.value },
const loc: link.File.Dwarf.Loc = switch (operand) {
.local => |local| .{ .wasm_ext = .{ .local = local.value } },
else => blk: {
log.debug("TODO generate debug info for {}", .{operand});
break :blk .nop;
break :blk .empty;
},
};
try func.debug_output.dwarf.genVarDbgInfo(name, ty, func.owner_nav, is_ptr, loc);
try func.debug_output.dwarf.genVarDebugInfo(.local_var, name, ty, loc);
return func.finishAir(inst, .none, &.{});
}

View file

@ -1,5 +1,4 @@
const std = @import("std");
const DW = std.dwarf;
// zig fmt: off
pub const Register = enum(u8) {
@ -44,18 +43,8 @@ pub const Register = enum(u8) {
return @enumFromInt(@as(u8, self.id()) + 16);
}
pub fn dwarfLocOp(reg: Register) u8 {
return switch (reg.to32()) {
.eax => DW.OP.reg0,
.ecx => DW.OP.reg1,
.edx => DW.OP.reg2,
.ebx => DW.OP.reg3,
.esp => DW.OP.reg4,
.ebp => DW.OP.reg5,
.esi => DW.OP.reg6,
.edi => DW.OP.reg7,
else => unreachable,
};
pub fn dwarfNum(reg: Register) u8 {
return @intFromEnum(reg.to32());
}
};
@ -64,7 +53,7 @@ pub const Register = enum(u8) {
/// TODO this set is actually a set of caller-saved registers.
pub const callee_preserved_regs = [_]Register{ .eax, .ecx, .edx, .esi, .edi };
// TODO add these to Register enum and corresponding dwarfLocOp
// TODO add these to Register enum and corresponding dwarfNum
// // Return Address register. This is stored in `0(%esp, "")` and is not a physical register.
// RA = (8, "RA"),
//

View file

@ -18,7 +18,6 @@ const Allocator = mem.Allocator;
const CodeGenError = codegen.CodeGenError;
const Compilation = @import("../../Compilation.zig");
const DebugInfoOutput = codegen.DebugInfoOutput;
const DW = std.dwarf;
const ErrorMsg = Zcu.ErrorMsg;
const Result = codegen.Result;
const Emit = @import("Emit.zig");
@ -82,6 +81,9 @@ mir_instructions: std.MultiArrayList(Mir.Inst) = .{},
/// MIR extra data
mir_extra: std.ArrayListUnmanaged(u32) = .{},
stack_args: std.ArrayListUnmanaged(StackVar) = .{},
stack_vars: std.ArrayListUnmanaged(StackVar) = .{},
/// Byte offset within the source file of the ending curly.
end_di_line: u32,
end_di_column: u32,
@ -726,6 +728,12 @@ const InstTracking = struct {
}
};
const StackVar = struct {
name: []const u8,
type: Type,
frame_addr: FrameAddr,
};
const FrameAlloc = struct {
abi_size: u31,
spill_pad: u3,
@ -831,6 +839,8 @@ pub fn generate(
function.exitlude_jump_relocs.deinit(gpa);
function.mir_instructions.deinit(gpa);
function.mir_extra.deinit(gpa);
function.stack_args.deinit(gpa);
function.stack_vars.deinit(gpa);
}
wip_mir_log.debug("{}:", .{fmtNav(func.owner_nav, ip)});
@ -903,14 +913,17 @@ pub fn generate(
else => |e| return e,
};
var mir = Mir{
try function.genStackVarDebugInfo(.local_arg, function.stack_args.items);
try function.genStackVarDebugInfo(.local_var, function.stack_vars.items);
var mir: Mir = .{
.instructions = function.mir_instructions.toOwnedSlice(),
.extra = try function.mir_extra.toOwnedSlice(gpa),
.frame_locs = function.frame_locs.toOwnedSlice(),
};
defer mir.deinit(gpa);
var emit = Emit{
var emit: Emit = .{
.lower = .{
.bin_file = bin_file,
.allocator = gpa,
@ -2425,7 +2438,7 @@ fn computeFrameLayout(self: *Self, cc: std.builtin.CallingConvention) !FrameLayo
const callee_preserved_regs =
abi.getCalleePreservedRegs(abi.resolveCallingConvention(cc, self.target.*));
for (callee_preserved_regs) |reg| {
if (self.register_manager.isRegAllocated(reg) or true) {
if (self.register_manager.isRegAllocated(reg)) {
save_reg_list.push(callee_preserved_regs, reg);
}
}
@ -5985,10 +5998,7 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
switch (operand) {
.load_frame => |frame_addr| {
if (tag_abi_size <= 8) {
const off: i32 = if (layout.tag_align.compare(.lt, layout.payload_align))
@intCast(layout.payload_size)
else
0;
const off: i32 = @intCast(layout.tagOffset());
break :blk try self.copyToRegisterWithInstTracking(inst, tag_ty, .{
.load_frame = .{ .index = frame_addr.index, .off = frame_addr.off + off },
});
@ -6000,10 +6010,7 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
);
},
.register => {
const shift: u6 = if (layout.tag_align.compare(.lt, layout.payload_align))
@intCast(layout.payload_size * 8)
else
0;
const shift: u6 = @intCast(layout.tagOffset() * 8);
const result = try self.copyToRegisterWithInstTracking(inst, union_ty, operand);
try self.genShiftBinOpMir(
.{ ._r, .sh },
@ -11819,7 +11826,16 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
while (self.args[arg_index] == .none) arg_index += 1;
self.arg_index = arg_index + 1;
const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: {
const result: MCValue = if (self.debug_output == .none and self.liveness.isUnused(inst)) .unreach else result: {
const name = switch (self.debug_output) {
.none => "",
else => name: {
const name_nts = self.air.instructions.items(.data)[@intFromEnum(inst)].arg.name;
break :name self.air.nullTerminatedString(@intFromEnum(name_nts));
},
};
if (name.len == 0 and self.liveness.isUnused(inst)) break :result .unreach;
const arg_ty = self.typeOfIndex(inst);
const src_mcv = self.args[arg_index];
const dst_mcv = switch (src_mcv) {
@ -11922,90 +11938,86 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
else => return self.fail("TODO implement arg for {}", .{src_mcv}),
};
const name_nts = self.air.instructions.items(.data)[@intFromEnum(inst)].arg.name;
switch (name_nts) {
.none => {},
_ => try self.genArgDbgInfo(arg_ty, self.air.nullTerminatedString(@intFromEnum(name_nts)), src_mcv),
}
if (name.len > 0) try self.genVarDebugInfo(.local_arg, .dbg_var_val, name, arg_ty, dst_mcv);
if (self.liveness.isUnused(inst)) {
assert(self.debug_output != .none and name.len > 0);
try self.freeValue(dst_mcv);
break :result .none;
}
break :result dst_mcv;
};
return self.finishAir(inst, result, .{ .none, .none, .none });
}
fn genArgDbgInfo(self: Self, ty: Type, name: [:0]const u8, mcv: MCValue) !void {
fn genVarDebugInfo(
self: *Self,
var_tag: link.File.Dwarf.WipNav.VarTag,
tag: Air.Inst.Tag,
name: []const u8,
ty: Type,
mcv: MCValue,
) !void {
const stack_vars = switch (var_tag) {
.local_arg => &self.stack_args,
.local_var => &self.stack_vars,
};
switch (self.debug_output) {
.dwarf => |dw| {
const loc: link.File.Dwarf.NavState.DbgInfoLoc = switch (mcv) {
.register => |reg| .{ .register = reg.dwarfNum() },
.register_pair => |regs| .{ .register_pair = .{
regs[0].dwarfNum(), regs[1].dwarfNum(),
} },
// TODO use a frame index
.load_frame, .elementwise_regs_then_frame => return,
//.stack_offset => |off| .{
// .stack = .{
// // TODO handle -fomit-frame-pointer
// .fp_register = Register.rbp.dwarfNum(),
// .offset = -off,
// },
//},
else => unreachable, // not a valid function parameter
};
// TODO: this might need adjusting like the linkers do.
// Instead of flattening the owner and passing Decl.Index here we may
// want to special case LazySymbol in DWARF linker too.
try dw.genArgDbgInfo(name, ty, self.owner.nav_index, loc);
.dwarf => |dwarf| switch (tag) {
else => unreachable,
.dbg_var_ptr => {
const var_ty = ty.childType(self.pt.zcu);
switch (mcv) {
else => {
log.info("dbg_var_ptr({s}({}))", .{ @tagName(mcv), mcv });
unreachable;
},
.unreach, .dead, .elementwise_regs_then_frame, .reserved_frame, .air_ref => unreachable,
.lea_frame => |frame_addr| try stack_vars.append(self.gpa, .{
.name = name,
.type = var_ty,
.frame_addr = frame_addr,
}),
.lea_symbol => |sym_off| try dwarf.genVarDebugInfo(var_tag, name, var_ty, .{ .plus = .{
&.{ .addr = .{ .sym = sym_off.sym } },
&.{ .consts = sym_off.off },
} }),
}
},
.dbg_var_val => switch (mcv) {
.none => try dwarf.genVarDebugInfo(var_tag, name, ty, .empty),
.unreach, .dead, .elementwise_regs_then_frame, .reserved_frame, .air_ref => unreachable,
.immediate => |immediate| try dwarf.genVarDebugInfo(var_tag, name, ty, .{ .stack_value = &.{
.constu = immediate,
} }),
else => {
const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(ty, self.pt));
try self.genSetMem(.{ .frame = frame_index }, 0, ty, mcv, .{});
try stack_vars.append(self.gpa, .{
.name = name,
.type = ty,
.frame_addr = .{ .index = frame_index },
});
},
},
},
.plan9 => {},
.none => {},
}
}
fn genVarDbgInfo(
fn genStackVarDebugInfo(
self: Self,
tag: Air.Inst.Tag,
ty: Type,
mcv: MCValue,
name: [:0]const u8,
var_tag: link.File.Dwarf.WipNav.VarTag,
stack_vars: []const StackVar,
) !void {
const is_ptr = switch (tag) {
.dbg_var_ptr => true,
.dbg_var_val => false,
else => unreachable,
};
switch (self.debug_output) {
.dwarf => |dw| {
const loc: link.File.Dwarf.NavState.DbgInfoLoc = switch (mcv) {
.register => |reg| .{ .register = reg.dwarfNum() },
// TODO use a frame index
.load_frame, .lea_frame => return,
//=> |off| .{ .stack = .{
// .fp_register = Register.rbp.dwarfNum(),
// .offset = -off,
//} },
.memory => |address| .{ .memory = address },
.load_symbol => |sym_off| loc: {
assert(sym_off.off == 0);
break :loc .{ .linker_load = .{ .type = .direct, .sym_index = sym_off.sym } };
}, // TODO
.load_got => |sym_index| .{ .linker_load = .{ .type = .got, .sym_index = sym_index } },
.load_direct => |sym_index| .{
.linker_load = .{ .type = .direct, .sym_index = sym_index },
},
.immediate => |x| .{ .immediate = x },
.undef => .undef,
.none => .none,
else => blk: {
log.debug("TODO generate debug info for {}", .{mcv});
break :blk .nop;
},
};
// TODO: this might need adjusting like the linkers do.
// Instead of flattening the owner and passing Decl.Index here we may
// want to special case LazySymbol in DWARF linker too.
try dw.genVarDbgInfo(name, ty, self.owner.nav_index, is_ptr, loc);
.dwarf => |dwarf| for (stack_vars) |stack_var| {
const frame_loc = self.frame_locs.get(@intFromEnum(stack_var.frame_addr.index));
try dwarf.genVarDebugInfo(var_tag, stack_var.name, stack_var.type, .{ .plus = .{
&.{ .breg = frame_loc.base.dwarfNum() },
&.{ .consts = @as(i33, frame_loc.disp) + stack_var.frame_addr.off },
} });
},
.plan9 => {},
.none => {},
@ -13045,7 +13057,7 @@ fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void {
const name = self.air.nullTerminatedString(pl_op.payload);
const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)];
try self.genVarDbgInfo(tag, ty, mcv, name);
try self.genVarDebugInfo(.local_var, tag, name, ty, mcv);
return self.finishAir(inst, .unreach, .{ operand, .none, .none });
}
@ -13154,13 +13166,17 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
.lea_direct,
.lea_got,
.lea_tlv,
.lea_frame,
.lea_symbol,
.elementwise_regs_then_frame,
.reserved_frame,
.air_ref,
=> unreachable,
.lea_frame => {
self.eflags_inst = null;
return .{ .immediate = @intFromBool(false) };
},
.register => |opt_reg| {
if (some_info.off == 0) {
const some_abi_size: u32 = @intCast(some_info.ty.abiSize(pt));
@ -13402,7 +13418,8 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try self.resolveInst(un_op);
const ty = self.typeOf(un_op);
const result = switch (try self.isNull(inst, ty, operand)) {
const result: MCValue = switch (try self.isNull(inst, ty, operand)) {
.immediate => |imm| .{ .immediate = @intFromBool(imm == 0) },
.eflags => |cc| .{ .eflags = cc.negate() },
else => unreachable,
};
@ -15156,7 +15173,7 @@ fn genSetMem(
})).write(
self,
.{ .base = base, .mod = .{ .rm = .{
.size = self.memSize(ty),
.size = Memory.Size.fromBitSize(@min(self.memSize(ty).bitSize(), src_alias.bitSize())),
.disp = disp,
} } },
src_alias,
@ -15202,7 +15219,33 @@ fn genSetMem(
@tagName(src_mcv), ty.fmt(pt),
}),
},
.register_offset,
.register_offset => |reg_off| {
const src_reg = self.copyToTmpRegister(ty, src_mcv) catch |err| switch (err) {
error.OutOfRegisters => {
const src_reg = registerAlias(reg_off.reg, abi_size);
try self.asmRegisterMemory(.{ ._, .lea }, src_reg, .{
.base = .{ .reg = src_reg },
.mod = .{ .rm = .{
.size = .qword,
.disp = reg_off.off,
} },
});
try self.genSetMem(base, disp, ty, .{ .register = reg_off.reg }, opts);
return self.asmRegisterMemory(.{ ._, .lea }, src_reg, .{
.base = .{ .reg = src_reg },
.mod = .{ .rm = .{
.size = .qword,
.disp = -reg_off.off,
} },
});
},
else => |e| return e,
};
const src_lock = self.register_manager.lockRegAssumeUnused(src_reg);
defer self.register_manager.unlockReg(src_lock);
try self.genSetMem(base, disp, ty, .{ .register = src_reg }, opts);
},
.memory,
.indirect,
.load_direct,
@ -15422,9 +15465,14 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
const src_ty = self.typeOf(ty_op.operand);
const result = result: {
const src_mcv = try self.resolveInst(ty_op.operand);
if (dst_ty.isPtrAtRuntime(mod) and src_ty.isPtrAtRuntime(mod)) switch (src_mcv) {
.lea_frame => break :result src_mcv,
else => if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv,
};
const dst_rc = self.regClassForType(dst_ty);
const src_rc = self.regClassForType(src_ty);
const src_mcv = try self.resolveInst(ty_op.operand);
const src_lock = if (src_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null;
defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
@ -18236,10 +18284,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void {
const tag_val = try pt.enumValueFieldIndex(tag_ty, field_index);
const tag_int_val = try tag_val.intFromEnum(tag_ty, pt);
const tag_int = tag_int_val.toUnsignedInt(pt);
const tag_off: i32 = if (layout.tag_align.compare(.lt, layout.payload_align))
@intCast(layout.payload_size)
else
0;
const tag_off: i32 = @intCast(layout.tagOffset());
try self.genCopy(
tag_ty,
dst_mcv.address().offset(tag_off).deref(),
@ -18247,10 +18292,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void {
.{},
);
const pl_off: i32 = if (layout.tag_align.compare(.lt, layout.payload_align))
0
else
@intCast(layout.tag_size);
const pl_off: i32 = @intCast(layout.payloadOffset());
try self.genCopy(src_ty, dst_mcv.address().offset(pl_off).deref(), src_mcv, .{});
break :result dst_mcv;
@ -18790,6 +18832,7 @@ fn genTypedValue(self: *Self, val: Value) InnerError!MCValue {
.load_symbol => |sym_index| .{ .load_symbol = .{ .sym = sym_index } },
.lea_symbol => |sym_index| .{ .lea_symbol = .{ .sym = sym_index } },
.load_direct => |sym_index| .{ .load_direct = sym_index },
.lea_direct => |sym_index| .{ .lea_direct = sym_index },
.load_got => |sym_index| .{ .lea_got = sym_index },
.load_tlv => |sym_index| .{ .lea_tlv = sym_index },
},

View file

@ -14,7 +14,7 @@ relocs: std.ArrayListUnmanaged(Reloc) = .{},
pub const Error = Lower.Error || error{
EmitFail,
};
} || link.File.UpdateDebugInfoError;
pub fn emitMir(emit: *Emit) Error!void {
for (0..emit.lower.mir.instructions.len) |mir_i| {

View file

@ -1204,7 +1204,7 @@ pub const FrameLoc = struct {
pub fn resolveFrameLoc(mir: Mir, mem: Memory) Memory {
return switch (mem.info.base) {
.none, .reg, .reloc => mem,
.frame => if (mir.frame_locs.len > 0) Memory{
.frame => if (mir.frame_locs.len > 0) .{
.info = .{
.base = .reg,
.mod = mem.info.mod,

View file

@ -4,7 +4,6 @@ const expect = std.testing.expect;
const Allocator = std.mem.Allocator;
const ArrayList = std.ArrayList;
const DW = std.dwarf;
/// EFLAGS condition codes
pub const Condition = enum(u5) {

View file

@ -36,10 +36,10 @@ pub const CodeGenError = error{
OutOfMemory,
Overflow,
CodegenFail,
};
} || link.File.UpdateDebugInfoError;
pub const DebugInfoOutput = union(enum) {
dwarf: *link.File.Dwarf.NavState,
dwarf: *link.File.Dwarf.WipNav,
plan9: *link.File.Plan9.DebugInfoOutput,
none,
};
@ -819,6 +819,9 @@ pub const GenResult = union(enum) {
/// Decl with address deferred until the linker allocates everything in virtual memory.
/// Payload is a symbol index.
load_direct: u32,
/// Decl with address deferred until the linker allocates everything in virtual memory.
/// Payload is a symbol index.
lea_direct: u32,
/// Decl referenced via GOT with address deferred until the linker allocates
/// everything in virtual memory.
/// Payload is a symbol index.
@ -833,10 +836,6 @@ pub const GenResult = union(enum) {
lea_symbol: u32,
};
fn mcv(val: MCValue) GenResult {
return .{ .mcv = val };
}
fn fail(
gpa: Allocator,
src_loc: Zcu.LazySrcLoc,
@ -869,7 +868,7 @@ fn genNavRef(
8 => 0xaaaaaaaaaaaaaaaa,
else => unreachable,
};
return GenResult.mcv(.{ .immediate = imm });
return .{ .mcv = .{ .immediate = imm } };
}
const comp = lf.comp;
@ -878,12 +877,12 @@ fn genNavRef(
// TODO this feels clunky. Perhaps we should check for it in `genTypedValue`?
if (ty.castPtrToFn(zcu)) |fn_ty| {
if (zcu.typeToFunc(fn_ty).?.is_generic) {
return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(pt).toByteUnits().? });
return .{ .mcv = .{ .immediate = fn_ty.abiAlignment(pt).toByteUnits().? } };
}
} else if (ty.zigTypeTag(zcu) == .Pointer) {
const elem_ty = ty.elemType2(zcu);
if (!elem_ty.hasRuntimeBits(pt)) {
return GenResult.mcv(.{ .immediate = elem_ty.abiAlignment(pt).toByteUnits().? });
return .{ .mcv = .{ .immediate = elem_ty.abiAlignment(pt).toByteUnits().? } };
}
}
@ -900,40 +899,40 @@ fn genNavRef(
if (is_extern) {
const sym_index = try elf_file.getGlobalSymbol(name.toSlice(ip), lib_name.toSlice(ip));
zo.symbol(sym_index).flags.is_extern_ptr = true;
return GenResult.mcv(.{ .lea_symbol = sym_index });
return .{ .mcv = .{ .lea_symbol = sym_index } };
}
const sym_index = try zo.getOrCreateMetadataForNav(elf_file, nav_index);
if (!single_threaded and is_threadlocal) {
return GenResult.mcv(.{ .load_tlv = sym_index });
return .{ .mcv = .{ .load_tlv = sym_index } };
}
return GenResult.mcv(.{ .lea_symbol = sym_index });
return .{ .mcv = .{ .lea_symbol = sym_index } };
} else if (lf.cast(.macho)) |macho_file| {
const zo = macho_file.getZigObject().?;
if (is_extern) {
const sym_index = try macho_file.getGlobalSymbol(name.toSlice(ip), lib_name.toSlice(ip));
zo.symbols.items[sym_index].setSectionFlags(.{ .needs_got = true });
return GenResult.mcv(.{ .load_symbol = sym_index });
return .{ .mcv = .{ .load_symbol = sym_index } };
}
const sym_index = try zo.getOrCreateMetadataForNav(macho_file, nav_index);
const sym = zo.symbols.items[sym_index];
if (!single_threaded and is_threadlocal) {
return GenResult.mcv(.{ .load_tlv = sym.nlist_idx });
return .{ .mcv = .{ .load_tlv = sym.nlist_idx } };
}
return GenResult.mcv(.{ .load_symbol = sym.nlist_idx });
return .{ .mcv = .{ .load_symbol = sym.nlist_idx } };
} else if (lf.cast(.coff)) |coff_file| {
if (is_extern) {
// TODO audit this
const global_index = try coff_file.getGlobalSymbol(name.toSlice(ip), lib_name.toSlice(ip));
try coff_file.need_got_table.put(gpa, global_index, {}); // needs GOT
return GenResult.mcv(.{ .load_got = link.File.Coff.global_symbol_bit | global_index });
return .{ .mcv = .{ .load_got = link.File.Coff.global_symbol_bit | global_index } };
}
const atom_index = try coff_file.getOrCreateAtomForNav(nav_index);
const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
return GenResult.mcv(.{ .load_got = sym_index });
return .{ .mcv = .{ .load_got = sym_index } };
} else if (lf.cast(.plan9)) |p9| {
const atom_index = try p9.seeNav(pt, nav_index);
const atom = p9.getAtom(atom_index);
return GenResult.mcv(.{ .memory = atom.getOffsetTableAddress(p9) });
return .{ .mcv = .{ .memory = atom.getOffsetTableAddress(p9) } };
} else {
return GenResult.fail(gpa, src_loc, "TODO genNavRef for target {}", .{target});
}
@ -952,30 +951,40 @@ pub fn genTypedValue(
log.debug("genTypedValue: val = {}", .{val.fmtValue(pt)});
if (val.isUndef(zcu)) {
return GenResult.mcv(.undef);
}
if (!ty.isSlice(zcu)) switch (ip.indexToKey(val.toIntern())) {
.ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) {
.nav => |nav| return genNavRef(lf, pt, src_loc, val, nav, target),
else => {},
},
else => {},
};
if (val.isUndef(zcu)) return .{ .mcv = .undef };
switch (ty.zigTypeTag(zcu)) {
.Void => return GenResult.mcv(.none),
.Void => return .{ .mcv = .none },
.Pointer => switch (ty.ptrSize(zcu)) {
.Slice => {},
else => switch (val.toIntern()) {
.null_value => {
return GenResult.mcv(.{ .immediate = 0 });
return .{ .mcv = .{ .immediate = 0 } };
},
.none => {},
else => switch (ip.indexToKey(val.toIntern())) {
.int => {
return GenResult.mcv(.{ .immediate = val.toUnsignedInt(pt) });
return .{ .mcv = .{ .immediate = val.toUnsignedInt(pt) } };
},
.ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) {
.nav => |nav| return genNavRef(lf, pt, src_loc, val, nav, target),
.uav => |uav| if (Value.fromInterned(uav.val).typeOf(zcu).hasRuntimeBits(pt))
return switch (try lf.lowerUav(
pt,
uav.val,
Type.fromInterned(uav.orig_ty).ptrAlignment(pt),
src_loc,
)) {
.mcv => |mcv| return .{ .mcv = switch (mcv) {
.load_direct => |sym_index| .{ .lea_direct = sym_index },
.load_symbol => |sym_index| .{ .lea_symbol = sym_index },
else => unreachable,
} },
.fail => |em| return .{ .fail = em },
}
else
return .{ .mcv = .{ .immediate = Type.fromInterned(uav.orig_ty).ptrAlignment(pt)
.forward(@intCast((@as(u66, 1) << @intCast(target.ptrBitWidth() | 1)) / 3)) } },
else => {},
},
else => {},
},
@ -988,11 +997,11 @@ pub fn genTypedValue(
.signed => @bitCast(val.toSignedInt(pt)),
.unsigned => val.toUnsignedInt(pt),
};
return GenResult.mcv(.{ .immediate = unsigned });
return .{ .mcv = .{ .immediate = unsigned } };
}
},
.Bool => {
return GenResult.mcv(.{ .immediate = @intFromBool(val.toBool()) });
return .{ .mcv = .{ .immediate = @intFromBool(val.toBool()) } };
},
.Optional => {
if (ty.isPtrLikeOptional(zcu)) {
@ -1000,11 +1009,11 @@ pub fn genTypedValue(
lf,
pt,
src_loc,
val.optionalValue(zcu) orelse return GenResult.mcv(.{ .immediate = 0 }),
val.optionalValue(zcu) orelse return .{ .mcv = .{ .immediate = 0 } },
target,
);
} else if (ty.abiSize(pt) == 1) {
return GenResult.mcv(.{ .immediate = @intFromBool(!val.isNull(zcu)) });
return .{ .mcv = .{ .immediate = @intFromBool(!val.isNull(zcu)) } };
}
},
.Enum => {
@ -1020,7 +1029,7 @@ pub fn genTypedValue(
.ErrorSet => {
const err_name = ip.indexToKey(val.toIntern()).err.name;
const error_index = try pt.getErrorValue(err_name);
return GenResult.mcv(.{ .immediate = error_index });
return .{ .mcv = .{ .immediate = error_index } };
},
.ErrorUnion => {
const err_type = ty.errorUnionSet(zcu);

View file

@ -329,6 +329,9 @@ pub const File = struct {
}
}
pub const UpdateDebugInfoError = Dwarf.UpdateError;
pub const FlushDebugInfoError = Dwarf.FlushError;
pub const UpdateNavError = error{
OutOfMemory,
Overflow,
@ -365,7 +368,7 @@ pub const File = struct {
DeviceBusy,
InvalidArgument,
HotSwapUnavailableOnHostOperatingSystem,
};
} || UpdateDebugInfoError;
/// Called from within CodeGen to retrieve the symbol index of a global symbol.
/// If no symbol exists yet with this name, a new undefined global symbol will
@ -398,6 +401,16 @@ pub const File = struct {
}
}
pub fn updateContainerType(base: *File, pt: Zcu.PerThread, ty: InternPool.Index) UpdateNavError!void {
switch (base.tag) {
else => {},
inline .elf => |tag| {
dev.check(tag.devFeature());
return @as(*tag.Type(), @fieldParentPtr("base", base)).updateContainerType(pt, ty);
},
}
}
/// May be called before or after updateExports for any given Decl.
pub fn updateFunc(
base: *File,
@ -570,6 +583,7 @@ pub const File = struct {
Unseekable,
UnsupportedCpuArchitecture,
UnsupportedVersion,
UnexpectedEndOfFile,
} ||
fs.File.WriteFileError ||
fs.File.OpenError ||

View file

@ -1205,10 +1205,11 @@ pub fn updateNav(
const ip = &zcu.intern_pool;
const nav = ip.getNav(nav_index);
const init_val = switch (ip.indexToKey(nav.status.resolved.val)) {
.variable => |variable| variable.init,
const nav_val = zcu.navValue(nav_index);
const nav_init = switch (ip.indexToKey(nav_val.toIntern())) {
.variable => |variable| Value.fromInterned(variable.init),
.@"extern" => |@"extern"| {
if (ip.isFunctionType(nav.typeOf(ip))) return;
if (ip.isFunctionType(@"extern".ty)) return;
// TODO make this part of getGlobalSymbol
const name = nav.name.toSlice(ip);
const lib_name = @"extern".lib_name.toSlice(ip);
@ -1216,34 +1217,36 @@ pub fn updateNav(
try self.need_got_table.put(gpa, global_index, {});
return;
},
else => nav.status.resolved.val,
else => nav_val,
};
const atom_index = try self.getOrCreateAtomForNav(nav_index);
Atom.freeRelocations(self, atom_index);
const atom = self.getAtom(atom_index);
if (nav_init.typeOf(zcu).isFnOrHasRuntimeBits(pt)) {
const atom_index = try self.getOrCreateAtomForNav(nav_index);
Atom.freeRelocations(self, atom_index);
const atom = self.getAtom(atom_index);
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
const res = try codegen.generateSymbol(
&self.base,
pt,
zcu.navSrcLoc(nav_index),
Value.fromInterned(init_val),
&code_buffer,
.none,
.{ .parent_atom_index = atom.getSymbolIndex().? },
);
const code = switch (res) {
.ok => code_buffer.items,
.fail => |em| {
try zcu.failed_codegen.put(gpa, nav_index, em);
return;
},
};
const res = try codegen.generateSymbol(
&self.base,
pt,
zcu.navSrcLoc(nav_index),
nav_init,
&code_buffer,
.none,
.{ .parent_atom_index = atom.getSymbolIndex().? },
);
const code = switch (res) {
.ok => code_buffer.items,
.fail => |em| {
try zcu.failed_codegen.put(gpa, nav_index, em);
return;
},
};
try self.updateNavCode(pt, nav_index, code, .NULL);
try self.updateNavCode(pt, nav_index, code, .NULL);
}
// Exports will be updated by `Zcu.processExports` after the update.
}
@ -1290,10 +1293,10 @@ fn updateLazySymbolAtom(
},
};
const code_len = @as(u32, @intCast(code.len));
const code_len: u32 = @intCast(code.len);
const symbol = atom.getSymbolPtr(self);
try self.setSymbolName(symbol, name);
symbol.section_number = @as(coff.SectionNumber, @enumFromInt(section_index + 1));
symbol.section_number = @enumFromInt(section_index + 1);
symbol.type = .{ .complex_type = .NULL, .base_type = .NULL };
const vaddr = try self.allocateAtom(atom_index, code_len, @intCast(required_alignment.toByteUnits() orelse 0));

File diff suppressed because it is too large Load diff

View file

@ -143,6 +143,9 @@ debug_abbrev_section_index: ?u32 = null,
debug_str_section_index: ?u32 = null,
debug_aranges_section_index: ?u32 = null,
debug_line_section_index: ?u32 = null,
debug_line_str_section_index: ?u32 = null,
debug_loclists_section_index: ?u32 = null,
debug_rnglists_section_index: ?u32 = null,
copy_rel_section_index: ?u32 = null,
dynamic_section_index: ?u32 = null,
@ -492,12 +495,13 @@ pub fn getUavVAddr(self: *Elf, uav: InternPool.Index, reloc_info: link.File.Relo
}
/// Returns end pos of collision, if any.
fn detectAllocCollision(self: *Elf, start: u64, size: u64) ?u64 {
fn detectAllocCollision(self: *Elf, start: u64, size: u64) !?u64 {
const small_ptr = self.ptr_width == .p32;
const ehdr_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Ehdr) else @sizeOf(elf.Elf64_Ehdr);
if (start < ehdr_size)
return ehdr_size;
var at_end = true;
const end = start + padToIdeal(size);
if (self.shdr_table_offset) |off| {
@ -505,8 +509,9 @@ fn detectAllocCollision(self: *Elf, start: u64, size: u64) ?u64 {
const tight_size = self.shdrs.items.len * shdr_size;
const increased_size = padToIdeal(tight_size);
const test_end = off +| increased_size;
if (end > off and start < test_end) {
return test_end;
if (start < test_end) {
if (end > off) return test_end;
if (test_end < std.math.maxInt(u64)) at_end = false;
}
}
@ -514,8 +519,9 @@ fn detectAllocCollision(self: *Elf, start: u64, size: u64) ?u64 {
if (shdr.sh_type == elf.SHT_NOBITS) continue;
const increased_size = padToIdeal(shdr.sh_size);
const test_end = shdr.sh_offset +| increased_size;
if (end > shdr.sh_offset and start < test_end) {
return test_end;
if (start < test_end) {
if (end > shdr.sh_offset) return test_end;
if (test_end < std.math.maxInt(u64)) at_end = false;
}
}
@ -523,11 +529,13 @@ fn detectAllocCollision(self: *Elf, start: u64, size: u64) ?u64 {
if (phdr.p_type != elf.PT_LOAD) continue;
const increased_size = padToIdeal(phdr.p_filesz);
const test_end = phdr.p_offset +| increased_size;
if (end > phdr.p_offset and start < test_end) {
return test_end;
if (start < test_end) {
if (end > phdr.p_offset) return test_end;
if (test_end < std.math.maxInt(u64)) at_end = false;
}
}
if (at_end) try self.base.file.?.setEndPos(end);
return null;
}
@ -558,9 +566,9 @@ fn allocatedVirtualSize(self: *Elf, start: u64) u64 {
return min_pos - start;
}
pub fn findFreeSpace(self: *Elf, object_size: u64, min_alignment: u64) u64 {
pub fn findFreeSpace(self: *Elf, object_size: u64, min_alignment: u64) !u64 {
var start: u64 = 0;
while (self.detectAllocCollision(start, object_size)) |item_end| {
while (try self.detectAllocCollision(start, object_size)) |item_end| {
start = mem.alignForward(u64, item_end, min_alignment);
}
return start;
@ -580,9 +588,9 @@ pub fn initMetadata(self: *Elf, options: InitMetadataOptions) !void {
const zig_object = self.zigObjectPtr().?;
const fillSection = struct {
fn fillSection(elf_file: *Elf, shdr: *elf.Elf64_Shdr, size: u64, phndx: ?u16) void {
fn fillSection(elf_file: *Elf, shdr: *elf.Elf64_Shdr, size: u64, phndx: ?u16) !void {
if (elf_file.base.isRelocatable()) {
const off = elf_file.findFreeSpace(size, shdr.sh_addralign);
const off = try elf_file.findFreeSpace(size, shdr.sh_addralign);
shdr.sh_offset = off;
shdr.sh_size = size;
} else {
@ -599,7 +607,7 @@ pub fn initMetadata(self: *Elf, options: InitMetadataOptions) !void {
if (!self.base.isRelocatable()) {
if (self.phdr_zig_load_re_index == null) {
const filesz = options.program_code_size_hint;
const off = self.findFreeSpace(filesz, self.page_size);
const off = try self.findFreeSpace(filesz, self.page_size);
self.phdr_zig_load_re_index = try self.addPhdr(.{
.type = elf.PT_LOAD,
.offset = off,
@ -614,7 +622,7 @@ pub fn initMetadata(self: *Elf, options: InitMetadataOptions) !void {
if (self.phdr_zig_load_ro_index == null) {
const alignment = self.page_size;
const filesz: u64 = 1024;
const off = self.findFreeSpace(filesz, alignment);
const off = try self.findFreeSpace(filesz, alignment);
self.phdr_zig_load_ro_index = try self.addPhdr(.{
.type = elf.PT_LOAD,
.offset = off,
@ -629,7 +637,7 @@ pub fn initMetadata(self: *Elf, options: InitMetadataOptions) !void {
if (self.phdr_zig_load_rw_index == null) {
const alignment = self.page_size;
const filesz: u64 = 1024;
const off = self.findFreeSpace(filesz, alignment);
const off = try self.findFreeSpace(filesz, alignment);
self.phdr_zig_load_rw_index = try self.addPhdr(.{
.type = elf.PT_LOAD,
.offset = off,
@ -662,7 +670,7 @@ pub fn initMetadata(self: *Elf, options: InitMetadataOptions) !void {
.offset = std.math.maxInt(u64),
});
const shdr = &self.shdrs.items[self.zig_text_section_index.?];
fillSection(self, shdr, options.program_code_size_hint, self.phdr_zig_load_re_index);
try fillSection(self, shdr, options.program_code_size_hint, self.phdr_zig_load_re_index);
if (self.base.isRelocatable()) {
const rela_shndx = try self.addRelaShdr(try self.insertShString(".rela.text.zig"), self.zig_text_section_index.?);
try self.output_rela_sections.putNoClobber(gpa, self.zig_text_section_index.?, .{
@ -688,7 +696,7 @@ pub fn initMetadata(self: *Elf, options: InitMetadataOptions) !void {
.offset = std.math.maxInt(u64),
});
const shdr = &self.shdrs.items[self.zig_data_rel_ro_section_index.?];
fillSection(self, shdr, 1024, self.phdr_zig_load_ro_index);
try fillSection(self, shdr, 1024, self.phdr_zig_load_ro_index);
if (self.base.isRelocatable()) {
const rela_shndx = try self.addRelaShdr(
try self.insertShString(".rela.data.rel.ro.zig"),
@ -717,7 +725,7 @@ pub fn initMetadata(self: *Elf, options: InitMetadataOptions) !void {
.offset = std.math.maxInt(u64),
});
const shdr = &self.shdrs.items[self.zig_data_section_index.?];
fillSection(self, shdr, 1024, self.phdr_zig_load_rw_index);
try fillSection(self, shdr, 1024, self.phdr_zig_load_rw_index);
if (self.base.isRelocatable()) {
const rela_shndx = try self.addRelaShdr(
try self.insertShString(".rela.data.zig"),
@ -758,24 +766,16 @@ pub fn initMetadata(self: *Elf, options: InitMetadataOptions) !void {
try self.last_atom_and_free_list_table.putNoClobber(gpa, self.zig_bss_section_index.?, .{});
}
if (zig_object.dwarf) |*dw| {
if (zig_object.dwarf) |*dwarf| {
if (self.debug_str_section_index == null) {
assert(dw.strtab.buffer.items.len == 0);
try dw.strtab.buffer.append(gpa, 0);
self.debug_str_section_index = try self.addSection(.{
.name = try self.insertShString(".debug_str"),
.flags = elf.SHF_MERGE | elf.SHF_STRINGS,
.entsize = 1,
.type = elf.SHT_PROGBITS,
.addralign = 1,
.offset = std.math.maxInt(u64),
});
const shdr = &self.shdrs.items[self.debug_str_section_index.?];
const size = @as(u64, @intCast(dw.strtab.buffer.items.len));
const off = self.findFreeSpace(size, 1);
shdr.sh_offset = off;
shdr.sh_size = size;
zig_object.debug_strtab_dirty = true;
zig_object.debug_str_section_dirty = true;
try self.output_sections.putNoClobber(gpa, self.debug_str_section_index.?, .{});
}
@ -784,14 +784,8 @@ pub fn initMetadata(self: *Elf, options: InitMetadataOptions) !void {
.name = try self.insertShString(".debug_info"),
.type = elf.SHT_PROGBITS,
.addralign = 1,
.offset = std.math.maxInt(u64),
});
const shdr = &self.shdrs.items[self.debug_info_section_index.?];
const size: u64 = 200;
const off = self.findFreeSpace(size, 1);
shdr.sh_offset = off;
shdr.sh_size = size;
zig_object.debug_info_header_dirty = true;
zig_object.debug_info_section_dirty = true;
try self.output_sections.putNoClobber(gpa, self.debug_info_section_index.?, .{});
}
@ -800,13 +794,7 @@ pub fn initMetadata(self: *Elf, options: InitMetadataOptions) !void {
.name = try self.insertShString(".debug_abbrev"),
.type = elf.SHT_PROGBITS,
.addralign = 1,
.offset = std.math.maxInt(u64),
});
const shdr = &self.shdrs.items[self.debug_abbrev_section_index.?];
const size: u64 = 128;
const off = self.findFreeSpace(size, 1);
shdr.sh_offset = off;
shdr.sh_size = size;
zig_object.debug_abbrev_section_dirty = true;
try self.output_sections.putNoClobber(gpa, self.debug_abbrev_section_index.?, .{});
}
@ -816,13 +804,7 @@ pub fn initMetadata(self: *Elf, options: InitMetadataOptions) !void {
.name = try self.insertShString(".debug_aranges"),
.type = elf.SHT_PROGBITS,
.addralign = 16,
.offset = std.math.maxInt(u64),
});
const shdr = &self.shdrs.items[self.debug_aranges_section_index.?];
const size: u64 = 160;
const off = self.findFreeSpace(size, 16);
shdr.sh_offset = off;
shdr.sh_size = size;
zig_object.debug_aranges_section_dirty = true;
try self.output_sections.putNoClobber(gpa, self.debug_aranges_section_index.?, .{});
}
@ -832,62 +814,83 @@ pub fn initMetadata(self: *Elf, options: InitMetadataOptions) !void {
.name = try self.insertShString(".debug_line"),
.type = elf.SHT_PROGBITS,
.addralign = 1,
.offset = std.math.maxInt(u64),
});
const shdr = &self.shdrs.items[self.debug_line_section_index.?];
const size: u64 = 250;
const off = self.findFreeSpace(size, 1);
shdr.sh_offset = off;
shdr.sh_size = size;
zig_object.debug_line_header_dirty = true;
zig_object.debug_line_section_dirty = true;
try self.output_sections.putNoClobber(gpa, self.debug_line_section_index.?, .{});
}
}
// We need to find current max assumed file offset, and actually write to file to make it a reality.
var end_pos: u64 = 0;
for (self.shdrs.items) |shdr| {
if (shdr.sh_offset == std.math.maxInt(u64)) continue;
end_pos = @max(end_pos, shdr.sh_offset + shdr.sh_size);
if (self.debug_line_str_section_index == null) {
self.debug_line_str_section_index = try self.addSection(.{
.name = try self.insertShString(".debug_line_str"),
.flags = elf.SHF_MERGE | elf.SHF_STRINGS,
.entsize = 1,
.type = elf.SHT_PROGBITS,
.addralign = 1,
});
zig_object.debug_line_str_section_dirty = true;
try self.output_sections.putNoClobber(gpa, self.debug_line_str_section_index.?, .{});
}
if (self.debug_loclists_section_index == null) {
self.debug_loclists_section_index = try self.addSection(.{
.name = try self.insertShString(".debug_loclists"),
.type = elf.SHT_PROGBITS,
.addralign = 1,
});
zig_object.debug_loclists_section_dirty = true;
try self.output_sections.putNoClobber(gpa, self.debug_loclists_section_index.?, .{});
}
if (self.debug_rnglists_section_index == null) {
self.debug_rnglists_section_index = try self.addSection(.{
.name = try self.insertShString(".debug_rnglists"),
.type = elf.SHT_PROGBITS,
.addralign = 1,
});
zig_object.debug_rnglists_section_dirty = true;
try self.output_sections.putNoClobber(gpa, self.debug_rnglists_section_index.?, .{});
}
try dwarf.initMetadata();
}
try self.base.file.?.pwriteAll(&[1]u8{0}, end_pos);
}
pub fn growAllocSection(self: *Elf, shdr_index: u32, needed_size: u64) !void {
const shdr = &self.shdrs.items[shdr_index];
const maybe_phdr = if (self.phdr_to_shdr_table.get(shdr_index)) |phndx| &self.phdrs.items[phndx] else null;
const is_zerofill = shdr.sh_type == elf.SHT_NOBITS;
log.debug("allocated size {x} of {s}, needed size {x}", .{
self.allocatedSize(shdr.sh_offset),
self.getShString(shdr.sh_name),
needed_size,
});
if (needed_size > self.allocatedSize(shdr.sh_offset) and !is_zerofill) {
const existing_size = shdr.sh_size;
shdr.sh_size = 0;
// Must move the entire section.
const alignment = if (maybe_phdr) |phdr| phdr.p_align else shdr.sh_addralign;
const new_offset = self.findFreeSpace(needed_size, alignment);
if (shdr.sh_type != elf.SHT_NOBITS) {
const allocated_size = self.allocatedSize(shdr.sh_offset);
if (shdr.sh_offset + allocated_size == std.math.maxInt(u64)) {
try self.base.file.?.setEndPos(shdr.sh_offset + needed_size);
} else if (needed_size > allocated_size) {
const existing_size = shdr.sh_size;
shdr.sh_size = 0;
// Must move the entire section.
const alignment = if (maybe_phdr) |phdr| phdr.p_align else shdr.sh_addralign;
const new_offset = try self.findFreeSpace(needed_size, alignment);
log.debug("new '{s}' file offset 0x{x} to 0x{x}", .{
self.getShString(shdr.sh_name),
new_offset,
new_offset + existing_size,
});
log.debug("new '{s}' file offset 0x{x} to 0x{x}", .{
self.getShString(shdr.sh_name),
new_offset,
new_offset + existing_size,
});
const amt = try self.base.file.?.copyRangeAll(shdr.sh_offset, self.base.file.?, new_offset, existing_size);
// TODO figure out what to about this error condition - how to communicate it up.
if (amt != existing_size) return error.InputOutput;
const amt = try self.base.file.?.copyRangeAll(shdr.sh_offset, self.base.file.?, new_offset, existing_size);
// TODO figure out what to about this error condition - how to communicate it up.
if (amt != existing_size) return error.InputOutput;
shdr.sh_offset = new_offset;
if (maybe_phdr) |phdr| phdr.p_offset = new_offset;
}
shdr.sh_size = needed_size;
if (!is_zerofill) {
shdr.sh_offset = new_offset;
if (maybe_phdr) |phdr| phdr.p_offset = new_offset;
}
if (maybe_phdr) |phdr| phdr.p_filesz = needed_size;
}
shdr.sh_size = needed_size;
if (maybe_phdr) |phdr| {
const mem_capacity = self.allocatedVirtualSize(phdr.p_vaddr);
@ -915,11 +918,14 @@ pub fn growNonAllocSection(
) !void {
const shdr = &self.shdrs.items[shdr_index];
if (needed_size > self.allocatedSize(shdr.sh_offset)) {
const allocated_size = self.allocatedSize(shdr.sh_offset);
if (shdr.sh_offset + allocated_size == std.math.maxInt(u64)) {
try self.base.file.?.setEndPos(shdr.sh_offset + needed_size);
} else if (needed_size > allocated_size) {
const existing_size = shdr.sh_size;
shdr.sh_size = 0;
// Move all the symbols to a new file location.
const new_offset = self.findFreeSpace(needed_size, min_alignment);
const new_offset = try self.findFreeSpace(needed_size, min_alignment);
log.debug("new '{s}' file offset 0x{x} to 0x{x}", .{
self.getShString(shdr.sh_name),
@ -939,7 +945,6 @@ pub fn growNonAllocSection(
shdr.sh_offset = new_offset;
}
shdr.sh_size = needed_size;
self.markDirty(shdr_index);
@ -949,15 +954,21 @@ pub fn markDirty(self: *Elf, shdr_index: u32) void {
const zig_object = self.zigObjectPtr().?;
if (zig_object.dwarf) |_| {
if (self.debug_info_section_index.? == shdr_index) {
zig_object.debug_info_header_dirty = true;
} else if (self.debug_line_section_index.? == shdr_index) {
zig_object.debug_line_header_dirty = true;
zig_object.debug_info_section_dirty = true;
} else if (self.debug_abbrev_section_index.? == shdr_index) {
zig_object.debug_abbrev_section_dirty = true;
} else if (self.debug_str_section_index.? == shdr_index) {
zig_object.debug_strtab_dirty = true;
zig_object.debug_str_section_dirty = true;
} else if (self.debug_aranges_section_index.? == shdr_index) {
zig_object.debug_aranges_section_dirty = true;
} else if (self.debug_line_section_index.? == shdr_index) {
zig_object.debug_line_section_dirty = true;
} else if (self.debug_line_str_section_index.? == shdr_index) {
zig_object.debug_line_str_section_dirty = true;
} else if (self.debug_loclists_section_index.? == shdr_index) {
zig_object.debug_loclists_section_dirty = true;
} else if (self.debug_rnglists_section_index.? == shdr_index) {
zig_object.debug_rnglists_section_dirty = true;
}
}
}
@ -1306,6 +1317,8 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod
try self.base.file.?.pwriteAll(code, file_offset);
}
if (zo.dwarf) |*dwarf| try dwarf.resolveRelocs();
if (has_reloc_errors) return error.FlushFailure;
}
@ -2667,7 +2680,7 @@ pub fn writeShdrTable(self: *Elf) !void {
if (needed_size > self.allocatedSize(shoff)) {
self.shdr_table_offset = null;
self.shdr_table_offset = self.findFreeSpace(needed_size, shalign);
self.shdr_table_offset = try self.findFreeSpace(needed_size, shalign);
}
log.debug("writing section headers from 0x{x} to 0x{x}", .{
@ -2900,6 +2913,18 @@ pub fn updateNav(
return self.zigObjectPtr().?.updateNav(self, pt, nav);
}
pub fn updateContainerType(
self: *Elf,
pt: Zcu.PerThread,
ty: InternPool.Index,
) link.File.UpdateNavError!void {
if (build_options.skip_non_native and builtin.object_format != .elf) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
if (self.llvm_object) |_| return;
return self.zigObjectPtr().?.updateContainerType(pt, ty);
}
pub fn updateExports(
self: *Elf,
pt: Zcu.PerThread,
@ -3658,11 +3683,14 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) !void {
&self.zig_data_rel_ro_section_index,
&self.zig_data_section_index,
&self.zig_bss_section_index,
&self.debug_str_section_index,
&self.debug_info_section_index,
&self.debug_abbrev_section_index,
&self.debug_str_section_index,
&self.debug_aranges_section_index,
&self.debug_line_section_index,
&self.debug_line_str_section_index,
&self.debug_loclists_section_index,
&self.debug_rnglists_section_index,
}) |maybe_index| {
if (maybe_index.*) |*index| {
index.* = backlinks[index.*];
@ -3787,6 +3815,7 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) !void {
const atom_ptr = zo.atom(atom_index) orelse continue;
atom_ptr.output_section_index = backlinks[atom_ptr.output_section_index];
}
if (zo.dwarf) |*dwarf| dwarf.reloadSectionMetadata();
}
for (self.output_rela_sections.keys(), self.output_rela_sections.values()) |shndx, sec| {
@ -3992,7 +4021,7 @@ fn allocatePhdrTable(self: *Elf) error{OutOfMemory}!void {
/// Allocates alloc sections and creates load segments for sections
/// extracted from input object files.
pub fn allocateAllocSections(self: *Elf) error{OutOfMemory}!void {
pub fn allocateAllocSections(self: *Elf) !void {
// We use this struct to track maximum alignment of all TLS sections.
// According to https://github.com/rui314/mold/commit/bd46edf3f0fe9e1a787ea453c4657d535622e61f in mold,
// in-file offsets have to be aligned against the start of TLS program header.
@ -4112,7 +4141,7 @@ pub fn allocateAllocSections(self: *Elf) error{OutOfMemory}!void {
}
const first = self.shdrs.items[cover.items[0]];
var off = self.findFreeSpace(filesz, @"align");
var off = try self.findFreeSpace(filesz, @"align");
const phndx = try self.addPhdr(.{
.type = elf.PT_LOAD,
.offset = off,
@ -4147,7 +4176,7 @@ pub fn allocateNonAllocSections(self: *Elf) !void {
const needed_size = shdr.sh_size;
if (needed_size > self.allocatedSize(shdr.sh_offset)) {
shdr.sh_size = 0;
const new_offset = self.findFreeSpace(needed_size, shdr.sh_addralign);
const new_offset = try self.findFreeSpace(needed_size, shdr.sh_addralign);
if (self.isDebugSection(@intCast(shndx))) {
log.debug("moving {s} from 0x{x} to 0x{x}", .{
@ -4167,6 +4196,12 @@ pub fn allocateNonAllocSections(self: *Elf) !void {
break :blk zig_object.debug_aranges_section_zig_size;
if (shndx == self.debug_line_section_index.?)
break :blk zig_object.debug_line_section_zig_size;
if (shndx == self.debug_line_str_section_index.?)
break :blk zig_object.debug_line_str_section_zig_size;
if (shndx == self.debug_loclists_section_index.?)
break :blk zig_object.debug_loclists_section_zig_size;
if (shndx == self.debug_rnglists_section_index.?)
break :blk zig_object.debug_rnglists_section_zig_size;
unreachable;
};
const amt = try self.base.file.?.copyRangeAll(
@ -4275,6 +4310,12 @@ fn writeAtoms(self: *Elf) !void {
break :blk zig_object.debug_aranges_section_zig_size;
if (shndx == self.debug_line_section_index.?)
break :blk zig_object.debug_line_section_zig_size;
if (shndx == self.debug_line_str_section_index.?)
break :blk zig_object.debug_line_str_section_zig_size;
if (shndx == self.debug_loclists_section_index.?)
break :blk zig_object.debug_loclists_section_zig_size;
if (shndx == self.debug_rnglists_section_index.?)
break :blk zig_object.debug_rnglists_section_zig_size;
unreachable;
} else 0;
const sh_offset = shdr.sh_offset + base_offset;
@ -5044,6 +5085,9 @@ pub fn isDebugSection(self: Elf, shndx: u32) bool {
self.debug_str_section_index,
self.debug_aranges_section_index,
self.debug_line_section_index,
self.debug_line_str_section_index,
self.debug_loclists_section_index,
self.debug_rnglists_section_index,
}) |maybe_index| {
if (maybe_index) |index| {
if (index == shndx) return true;
@ -5109,7 +5153,7 @@ pub const AddSectionOpts = struct {
pub fn addSection(self: *Elf, opts: AddSectionOpts) !u32 {
const gpa = self.base.comp.gpa;
const index = @as(u32, @intCast(self.shdrs.items.len));
const index: u32 = @intCast(self.shdrs.items.len);
const shdr = try self.shdrs.addOne(gpa);
shdr.* = .{
.sh_name = opts.name,

View file

@ -201,11 +201,12 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
// The .debug_info section has `low_pc` and `high_pc` values which is the virtual address
// range of the compilation unit. When we expand the text section, this range changes,
// so the DW_TAG.compile_unit tag of the .debug_info section becomes dirty.
zig_object.debug_info_header_dirty = true;
zig_object.debug_info_section_dirty = true;
// This becomes dirty for the same reason. We could potentially make this more
// fine-grained with the addition of support for more compilation units. It is planned to
// model each package as a different compilation unit.
zig_object.debug_aranges_section_dirty = true;
zig_object.debug_rnglists_section_dirty = true;
}
}
shdr.sh_addralign = @max(shdr.sh_addralign, self.alignment.toByteUnits().?);

View file

@ -41,11 +41,14 @@ tls_variables: TlsTable = .{},
/// Table of tracked `Uav`s.
uavs: UavTable = .{},
debug_strtab_dirty: bool = false,
debug_info_section_dirty: bool = false,
debug_abbrev_section_dirty: bool = false,
debug_aranges_section_dirty: bool = false,
debug_info_header_dirty: bool = false,
debug_line_header_dirty: bool = false,
debug_str_section_dirty: bool = false,
debug_line_section_dirty: bool = false,
debug_line_str_section_dirty: bool = false,
debug_loclists_section_dirty: bool = false,
debug_rnglists_section_dirty: bool = false,
/// Size contribution of Zig's metadata to each debug section.
/// Used to track start of metadata from input object files.
@ -54,6 +57,9 @@ debug_abbrev_section_zig_size: u64 = 0,
debug_str_section_zig_size: u64 = 0,
debug_aranges_section_zig_size: u64 = 0,
debug_line_section_zig_size: u64 = 0,
debug_line_str_section_zig_size: u64 = 0,
debug_loclists_section_zig_size: u64 = 0,
debug_rnglists_section_zig_size: u64 = 0,
pub const global_symbol_bit: u32 = 0x80000000;
pub const symbol_mask: u32 = 0x7fffffff;
@ -76,10 +82,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf) !void {
switch (comp.config.debug_format) {
.strip => {},
.dwarf => |v| {
assert(v == .@"32");
self.dwarf = Dwarf.init(&elf_file.base, .dwarf32);
},
.dwarf => |v| self.dwarf = Dwarf.init(&elf_file.base, v),
.code_view => unreachable,
}
}
@ -119,8 +122,8 @@ pub fn deinit(self: *ZigObject, allocator: Allocator) void {
}
self.tls_variables.deinit(allocator);
if (self.dwarf) |*dw| {
dw.deinit();
if (self.dwarf) |*dwarf| {
dwarf.deinit();
}
}
@ -165,44 +168,14 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi
}
}
if (self.dwarf) |*dw| {
if (self.dwarf) |*dwarf| {
const pt: Zcu.PerThread = .{ .zcu = elf_file.base.comp.module.?, .tid = tid };
try dw.flushModule(pt);
try dwarf.flushModule(pt);
// TODO I need to re-think how to handle ZigObject's debug sections AND debug sections
// extracted from input object files correctly.
if (self.debug_abbrev_section_dirty) {
try dw.writeDbgAbbrev();
self.debug_abbrev_section_dirty = false;
}
if (self.debug_info_header_dirty) {
const text_shdr = elf_file.shdrs.items[elf_file.zig_text_section_index.?];
const low_pc = text_shdr.sh_addr;
const high_pc = text_shdr.sh_addr + text_shdr.sh_size;
try dw.writeDbgInfoHeader(pt.zcu, low_pc, high_pc);
self.debug_info_header_dirty = false;
}
if (self.debug_aranges_section_dirty) {
const text_shdr = elf_file.shdrs.items[elf_file.zig_text_section_index.?];
try dw.writeDbgAranges(text_shdr.sh_addr, text_shdr.sh_size);
self.debug_aranges_section_dirty = false;
}
if (self.debug_line_header_dirty) {
try dw.writeDbgLineHeader();
self.debug_line_header_dirty = false;
}
if (elf_file.debug_str_section_index) |shndx| {
if (self.debug_strtab_dirty or dw.strtab.buffer.items.len != elf_file.shdrs.items[shndx].sh_size) {
try elf_file.growNonAllocSection(shndx, dw.strtab.buffer.items.len, 1, false);
const shdr = elf_file.shdrs.items[shndx];
try elf_file.base.file.?.pwriteAll(dw.strtab.buffer.items, shdr.sh_offset);
self.debug_strtab_dirty = false;
}
}
self.debug_abbrev_section_dirty = false;
self.debug_aranges_section_dirty = false;
self.debug_rnglists_section_dirty = false;
self.debug_str_section_dirty = false;
self.saveDebugSectionsSizes(elf_file);
}
@ -213,7 +186,8 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi
// such as debug_line_header_dirty and debug_info_header_dirty.
assert(!self.debug_abbrev_section_dirty);
assert(!self.debug_aranges_section_dirty);
assert(!self.debug_strtab_dirty);
assert(!self.debug_rnglists_section_dirty);
assert(!self.debug_str_section_dirty);
}
fn saveDebugSectionsSizes(self: *ZigObject, elf_file: *Elf) void {
@ -232,6 +206,15 @@ fn saveDebugSectionsSizes(self: *ZigObject, elf_file: *Elf) void {
if (elf_file.debug_line_section_index) |shndx| {
self.debug_line_section_zig_size = elf_file.shdrs.items[shndx].sh_size;
}
if (elf_file.debug_line_str_section_index) |shndx| {
self.debug_line_str_section_zig_size = elf_file.shdrs.items[shndx].sh_size;
}
if (elf_file.debug_loclists_section_index) |shndx| {
self.debug_loclists_section_zig_size = elf_file.shdrs.items[shndx].sh_size;
}
if (elf_file.debug_rnglists_section_index) |shndx| {
self.debug_rnglists_section_zig_size = elf_file.shdrs.items[shndx].sh_size;
}
}
fn newSymbol(self: *ZigObject, allocator: Allocator, name_off: u32, st_bind: u4) !Symbol.Index {
@ -783,8 +766,8 @@ pub fn freeNav(self: *ZigObject, elf_file: *Elf, nav_index: InternPool.Nav.Index
kv.value.exports.deinit(gpa);
}
if (self.dwarf) |*dw| {
dw.freeNav(nav_index);
if (self.dwarf) |*dwarf| {
dwarf.freeNav(nav_index);
}
}
@ -1034,8 +1017,8 @@ pub fn updateFunc(
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
var dwarf_state = if (self.dwarf) |*dw| try dw.initNavState(pt, func.owner_nav) else null;
defer if (dwarf_state) |*ds| ds.deinit();
var debug_wip_nav = if (self.dwarf) |*dwarf| try dwarf.initWipNav(pt, func.owner_nav, sym_index) else null;
defer if (debug_wip_nav) |*wip_nav| wip_nav.deinit();
const res = try codegen.generateFunction(
&elf_file.base,
@ -1045,7 +1028,7 @@ pub fn updateFunc(
air,
liveness,
&code_buffer,
if (dwarf_state) |*ds| .{ .dwarf = ds } else .none,
if (debug_wip_nav) |*dn| .{ .dwarf = dn } else .none,
);
const code = switch (res) {
@ -1072,14 +1055,17 @@ pub fn updateFunc(
break :blk .{ atom_ptr.value, atom_ptr.alignment };
};
if (dwarf_state) |*ds| {
if (debug_wip_nav) |*wip_nav| {
const sym = self.symbol(sym_index);
try self.dwarf.?.commitNavState(
try self.dwarf.?.finishWipNav(
pt,
func.owner_nav,
@intCast(sym.address(.{}, elf_file)),
sym.atom(elf_file).?.size,
ds,
.{
.index = sym_index,
.addr = @intCast(sym.address(.{}, elf_file)),
.size = sym.atom(elf_file).?.size,
},
wip_nav,
);
}
@ -1152,59 +1138,75 @@ pub fn updateNav(
else => nav_val,
};
const sym_index = try self.getOrCreateMetadataForNav(elf_file, nav_index);
self.symbol(sym_index).atom(elf_file).?.freeRelocs(elf_file);
if (nav_init.typeOf(zcu).isFnOrHasRuntimeBits(pt)) {
const sym_index = try self.getOrCreateMetadataForNav(elf_file, nav_index);
self.symbol(sym_index).atom(elf_file).?.freeRelocs(elf_file);
var code_buffer = std.ArrayList(u8).init(zcu.gpa);
defer code_buffer.deinit();
var code_buffer = std.ArrayList(u8).init(zcu.gpa);
defer code_buffer.deinit();
var nav_state: ?Dwarf.NavState = if (self.dwarf) |*dw| try dw.initNavState(pt, nav_index) else null;
defer if (nav_state) |*ns| ns.deinit();
var debug_wip_nav = if (self.dwarf) |*dwarf| try dwarf.initWipNav(pt, nav_index, sym_index) else null;
defer if (debug_wip_nav) |*wip_nav| wip_nav.deinit();
// TODO implement .debug_info for global variables
const res = try codegen.generateSymbol(
&elf_file.base,
pt,
zcu.navSrcLoc(nav_index),
nav_init,
&code_buffer,
if (nav_state) |*ns| .{ .dwarf = ns } else .none,
.{ .parent_atom_index = sym_index },
);
const code = switch (res) {
.ok => code_buffer.items,
.fail => |em| {
try zcu.failed_codegen.put(zcu.gpa, nav_index, em);
return;
},
};
const shndx = try self.getNavShdrIndex(elf_file, zcu, nav_index, sym_index, code);
log.debug("setting shdr({x},{s}) for {}", .{
shndx,
elf_file.getShString(elf_file.shdrs.items[shndx].sh_name),
nav.fqn.fmt(ip),
});
if (elf_file.shdrs.items[shndx].sh_flags & elf.SHF_TLS != 0)
try self.updateTlv(elf_file, pt, nav_index, sym_index, shndx, code)
else
try self.updateNavCode(elf_file, pt, nav_index, sym_index, shndx, code, elf.STT_OBJECT);
if (nav_state) |*ns| {
const sym = self.symbol(sym_index);
try self.dwarf.?.commitNavState(
// TODO implement .debug_info for global variables
const res = try codegen.generateSymbol(
&elf_file.base,
pt,
nav_index,
@intCast(sym.address(.{}, elf_file)),
sym.atom(elf_file).?.size,
ns,
zcu.navSrcLoc(nav_index),
nav_init,
&code_buffer,
if (debug_wip_nav) |*wip_nav| .{ .dwarf = wip_nav } else .none,
.{ .parent_atom_index = sym_index },
);
}
const code = switch (res) {
.ok => code_buffer.items,
.fail => |em| {
try zcu.failed_codegen.put(zcu.gpa, nav_index, em);
return;
},
};
const shndx = try self.getNavShdrIndex(elf_file, zcu, nav_index, sym_index, code);
log.debug("setting shdr({x},{s}) for {}", .{
shndx,
elf_file.getShString(elf_file.shdrs.items[shndx].sh_name),
nav.fqn.fmt(ip),
});
if (elf_file.shdrs.items[shndx].sh_flags & elf.SHF_TLS != 0)
try self.updateTlv(elf_file, pt, nav_index, sym_index, shndx, code)
else
try self.updateNavCode(elf_file, pt, nav_index, sym_index, shndx, code, elf.STT_OBJECT);
if (debug_wip_nav) |*wip_nav| {
const sym = self.symbol(sym_index);
try self.dwarf.?.finishWipNav(
pt,
nav_index,
.{
.index = sym_index,
.addr = @intCast(sym.address(.{}, elf_file)),
.size = sym.atom(elf_file).?.size,
},
wip_nav,
);
}
} else if (self.dwarf) |*dwarf| try dwarf.updateComptimeNav(pt, nav_index);
// Exports will be updated by `Zcu.processExports` after the update.
}
pub fn updateContainerType(
self: *ZigObject,
pt: Zcu.PerThread,
ty: InternPool.Index,
) link.File.UpdateNavError!void {
const tracy = trace(@src());
defer tracy.end();
if (self.dwarf) |*dwarf| try dwarf.updateContainerType(pt, ty);
}
fn updateLazySymbol(
self: *ZigObject,
elf_file: *Elf,
@ -1441,8 +1443,8 @@ pub fn updateNavLineNumber(
log.debug("updateNavLineNumber {}({d})", .{ nav.fqn.fmt(ip), nav_index });
if (self.dwarf) |*dw| {
try dw.updateNavLineNumber(pt.zcu, nav_index);
if (self.dwarf) |*dwarf| {
try dwarf.updateNavLineNumber(pt.zcu, nav_index);
}
}

View file

@ -401,7 +401,7 @@ fn allocateAllocSections(elf_file: *Elf) !void {
const needed_size = shdr.sh_size;
if (needed_size > elf_file.allocatedSize(shdr.sh_offset)) {
shdr.sh_size = 0;
const new_offset = elf_file.findFreeSpace(needed_size, shdr.sh_addralign);
const new_offset = try elf_file.findFreeSpace(needed_size, shdr.sh_addralign);
shdr.sh_offset = new_offset;
shdr.sh_size = needed_size;
}
@ -434,6 +434,12 @@ fn writeAtoms(elf_file: *Elf) !void {
break :blk zig_object.debug_aranges_section_zig_size;
if (shndx == elf_file.debug_line_section_index.?)
break :blk zig_object.debug_line_section_zig_size;
if (shndx == elf_file.debug_line_str_section_index.?)
break :blk zig_object.debug_line_str_section_zig_size;
if (shndx == elf_file.debug_loclists_section_index.?)
break :blk zig_object.debug_loclists_section_zig_size;
if (shndx == elf_file.debug_rnglists_section_index.?)
break :blk zig_object.debug_rnglists_section_zig_size;
unreachable;
} else 0;
const sh_offset = shdr.sh_offset + base_offset;

View file

@ -94,6 +94,9 @@ debug_abbrev_sect_index: ?u8 = null,
debug_str_sect_index: ?u8 = null,
debug_aranges_sect_index: ?u8 = null,
debug_line_sect_index: ?u8 = null,
debug_line_str_sect_index: ?u8 = null,
debug_loclists_sect_index: ?u8 = null,
debug_rnglists_sect_index: ?u8 = null,
has_tlv: AtomicBool = AtomicBool.init(false),
binds_to_weak: AtomicBool = AtomicBool.init(false),
@ -1789,12 +1792,42 @@ pub fn sortSections(self: *MachO) !void {
self.sections.appendAssumeCapacity(slice.get(sorted.index));
}
for (&[_]*?u8{
&self.data_sect_index,
&self.got_sect_index,
&self.zig_text_sect_index,
&self.zig_got_sect_index,
&self.zig_const_sect_index,
&self.zig_data_sect_index,
&self.zig_bss_sect_index,
&self.stubs_sect_index,
&self.stubs_helper_sect_index,
&self.la_symbol_ptr_sect_index,
&self.tlv_ptr_sect_index,
&self.eh_frame_sect_index,
&self.unwind_info_sect_index,
&self.objc_stubs_sect_index,
&self.debug_str_sect_index,
&self.debug_info_sect_index,
&self.debug_abbrev_sect_index,
&self.debug_aranges_sect_index,
&self.debug_line_sect_index,
&self.debug_line_str_sect_index,
&self.debug_loclists_sect_index,
&self.debug_rnglists_sect_index,
}) |maybe_index| {
if (maybe_index.*) |*index| {
index.* = backlinks[index.*];
}
}
if (self.getZigObject()) |zo| {
for (zo.getAtoms()) |atom_index| {
const atom = zo.getAtom(atom_index) orelse continue;
if (!atom.isAlive()) continue;
atom.out_n_sect = backlinks[atom.out_n_sect];
}
if (zo.dwarf) |*dwarf| dwarf.reloadSectionMetadata();
}
for (self.objects.items) |index| {
@ -1813,32 +1846,6 @@ pub fn sortSections(self: *MachO) !void {
atom.out_n_sect = backlinks[atom.out_n_sect];
}
}
for (&[_]*?u8{
&self.data_sect_index,
&self.got_sect_index,
&self.zig_text_sect_index,
&self.zig_got_sect_index,
&self.zig_const_sect_index,
&self.zig_data_sect_index,
&self.zig_bss_sect_index,
&self.stubs_sect_index,
&self.stubs_helper_sect_index,
&self.la_symbol_ptr_sect_index,
&self.tlv_ptr_sect_index,
&self.eh_frame_sect_index,
&self.unwind_info_sect_index,
&self.objc_stubs_sect_index,
&self.debug_info_sect_index,
&self.debug_str_sect_index,
&self.debug_line_sect_index,
&self.debug_abbrev_sect_index,
&self.debug_info_sect_index,
}) |maybe_index| {
if (maybe_index.*) |*index| {
index.* = backlinks[index.*];
}
}
}
pub fn addAtomsToSections(self: *MachO) !void {
@ -2189,7 +2196,7 @@ fn allocateSections(self: *MachO) !void {
header.size = 0;
// Must move the entire section.
const new_offset = self.findFreeSpace(existing_size, page_size);
const new_offset = try self.findFreeSpace(existing_size, page_size);
log.debug("moving '{s},{s}' from 0x{x} to 0x{x}", .{
header.segName(),
@ -3066,32 +3073,36 @@ pub fn padToIdeal(actual_size: anytype) @TypeOf(actual_size) {
return actual_size +| (actual_size / ideal_factor);
}
fn detectAllocCollision(self: *MachO, start: u64, size: u64) ?u64 {
fn detectAllocCollision(self: *MachO, start: u64, size: u64) !?u64 {
// Conservatively commit one page size as reserved space for the headers as we
// expect it to grow and everything else be moved in flush anyhow.
const header_size = self.getPageSize();
if (start < header_size)
return header_size;
var at_end = true;
const end = start + padToIdeal(size);
for (self.sections.items(.header)) |header| {
if (header.isZerofill()) continue;
const increased_size = padToIdeal(header.size);
const test_end = header.offset +| increased_size;
if (end > header.offset and start < test_end) {
return test_end;
if (start < test_end) {
if (end > header.offset) return test_end;
if (test_end < std.math.maxInt(u64)) at_end = false;
}
}
for (self.segments.items) |seg| {
const increased_size = padToIdeal(seg.filesize);
const test_end = seg.fileoff +| increased_size;
if (end > seg.fileoff and start < test_end) {
return test_end;
if (start < test_end) {
if (end > seg.fileoff) return test_end;
if (test_end < std.math.maxInt(u64)) at_end = false;
}
}
if (at_end) try self.base.file.?.setEndPos(end);
return null;
}
@ -3159,9 +3170,9 @@ pub fn allocatedSizeVirtual(self: *MachO, start: u64) u64 {
return min_pos - start;
}
pub fn findFreeSpace(self: *MachO, object_size: u64, min_alignment: u32) u64 {
pub fn findFreeSpace(self: *MachO, object_size: u64, min_alignment: u32) !u64 {
var start: u64 = 0;
while (self.detectAllocCollision(start, object_size)) |item_end| {
while (try self.detectAllocCollision(start, object_size)) |item_end| {
start = mem.alignForward(u64, item_end, min_alignment);
}
return start;
@ -3210,7 +3221,7 @@ fn initMetadata(self: *MachO, options: InitMetadataOptions) !void {
{
const filesize = options.program_code_size_hint;
const off = self.findFreeSpace(filesize, self.getPageSize());
const off = try self.findFreeSpace(filesize, self.getPageSize());
self.zig_text_seg_index = try self.addSegment("__TEXT_ZIG", .{
.fileoff = off,
.filesize = filesize,
@ -3222,7 +3233,7 @@ fn initMetadata(self: *MachO, options: InitMetadataOptions) !void {
{
const filesize = options.symbol_count_hint * @sizeOf(u64);
const off = self.findFreeSpace(filesize, self.getPageSize());
const off = try self.findFreeSpace(filesize, self.getPageSize());
self.zig_got_seg_index = try self.addSegment("__GOT_ZIG", .{
.fileoff = off,
.filesize = filesize,
@ -3234,7 +3245,7 @@ fn initMetadata(self: *MachO, options: InitMetadataOptions) !void {
{
const filesize: u64 = 1024;
const off = self.findFreeSpace(filesize, self.getPageSize());
const off = try self.findFreeSpace(filesize, self.getPageSize());
self.zig_const_seg_index = try self.addSegment("__CONST_ZIG", .{
.fileoff = off,
.filesize = filesize,
@ -3246,7 +3257,7 @@ fn initMetadata(self: *MachO, options: InitMetadataOptions) !void {
{
const filesize: u64 = 1024;
const off = self.findFreeSpace(filesize, self.getPageSize());
const off = try self.findFreeSpace(filesize, self.getPageSize());
self.zig_data_seg_index = try self.addSegment("__DATA_ZIG", .{
.fileoff = off,
.filesize = filesize,
@ -3265,7 +3276,7 @@ fn initMetadata(self: *MachO, options: InitMetadataOptions) !void {
});
}
if (options.zo.dwarf) |_| {
if (options.zo.dwarf) |*dwarf| {
// Create dSYM bundle.
log.debug("creating {s}.dSYM bundle", .{options.emit.sub_path});
@ -3288,6 +3299,7 @@ fn initMetadata(self: *MachO, options: InitMetadataOptions) !void {
self.d_sym = .{ .allocator = gpa, .file = d_sym_file };
try self.d_sym.?.initMetadata(self);
try dwarf.initMetadata();
}
}
@ -3307,7 +3319,7 @@ fn initMetadata(self: *MachO, options: InitMetadataOptions) !void {
const sect = &macho_file.sections.items(.header)[sect_id];
const alignment = try math.powi(u32, 2, sect.@"align");
if (!sect.isZerofill()) {
sect.offset = math.cast(u32, macho_file.findFreeSpace(size, alignment)) orelse
sect.offset = math.cast(u32, try macho_file.findFreeSpace(size, alignment)) orelse
return error.Overflow;
}
sect.addr = macho_file.findFreeSpaceVirtual(size, alignment);
@ -3367,43 +3379,34 @@ fn initMetadata(self: *MachO, options: InitMetadataOptions) !void {
}
}
if (self.base.isRelocatable() and options.zo.dwarf != null) {
{
self.debug_str_sect_index = try self.addSection("__DWARF", "__debug_str", .{
.flags = macho.S_ATTR_DEBUG,
});
try allocSect(self, self.debug_str_sect_index.?, 200);
}
{
self.debug_info_sect_index = try self.addSection("__DWARF", "__debug_info", .{
.flags = macho.S_ATTR_DEBUG,
});
try allocSect(self, self.debug_info_sect_index.?, 200);
}
{
self.debug_abbrev_sect_index = try self.addSection("__DWARF", "__debug_abbrev", .{
.flags = macho.S_ATTR_DEBUG,
});
try allocSect(self, self.debug_abbrev_sect_index.?, 128);
}
{
self.debug_aranges_sect_index = try self.addSection("__DWARF", "__debug_aranges", .{
.alignment = 4,
.flags = macho.S_ATTR_DEBUG,
});
try allocSect(self, self.debug_aranges_sect_index.?, 160);
}
{
self.debug_line_sect_index = try self.addSection("__DWARF", "__debug_line", .{
.flags = macho.S_ATTR_DEBUG,
});
try allocSect(self, self.debug_line_sect_index.?, 250);
}
}
if (self.base.isRelocatable()) if (options.zo.dwarf) |*dwarf| {
self.debug_str_sect_index = try self.addSection("__DWARF", "__debug_str", .{
.flags = macho.S_ATTR_DEBUG,
});
self.debug_info_sect_index = try self.addSection("__DWARF", "__debug_info", .{
.flags = macho.S_ATTR_DEBUG,
});
self.debug_abbrev_sect_index = try self.addSection("__DWARF", "__debug_abbrev", .{
.flags = macho.S_ATTR_DEBUG,
});
self.debug_aranges_sect_index = try self.addSection("__DWARF", "__debug_aranges", .{
.alignment = 4,
.flags = macho.S_ATTR_DEBUG,
});
self.debug_line_sect_index = try self.addSection("__DWARF", "__debug_line", .{
.flags = macho.S_ATTR_DEBUG,
});
self.debug_line_str_sect_index = try self.addSection("__DWARF", "__debug_line_str", .{
.flags = macho.S_ATTR_DEBUG,
});
self.debug_loclists_sect_index = try self.addSection("__DWARF", "__debug_loclists", .{
.flags = macho.S_ATTR_DEBUG,
});
self.debug_rnglists_sect_index = try self.addSection("__DWARF", "__debug_rnglists", .{
.flags = macho.S_ATTR_DEBUG,
});
try dwarf.initMetadata();
};
}
pub fn growSection(self: *MachO, sect_index: u8, needed_size: u64) !void {
@ -3417,35 +3420,36 @@ pub fn growSection(self: *MachO, sect_index: u8, needed_size: u64) !void {
fn growSectionNonRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !void {
const sect = &self.sections.items(.header)[sect_index];
if (needed_size > self.allocatedSize(sect.offset) and !sect.isZerofill()) {
const existing_size = sect.size;
sect.size = 0;
// Must move the entire section.
const alignment = self.getPageSize();
const new_offset = self.findFreeSpace(needed_size, alignment);
log.debug("moving '{s},{s}' from 0x{x} to 0x{x}", .{
sect.segName(),
sect.sectName(),
sect.offset,
new_offset,
});
try self.copyRangeAllZeroOut(sect.offset, new_offset, existing_size);
sect.offset = @intCast(new_offset);
}
sect.size = needed_size;
const seg_id = self.sections.items(.segment_id)[sect_index];
const seg = &self.segments.items[seg_id];
seg.fileoff = sect.offset;
if (!sect.isZerofill()) {
const allocated_size = self.allocatedSize(sect.offset);
if (sect.offset + allocated_size == std.math.maxInt(u64)) {
try self.base.file.?.setEndPos(sect.offset + needed_size);
} else if (needed_size > allocated_size) {
const existing_size = sect.size;
sect.size = 0;
// Must move the entire section.
const alignment = self.getPageSize();
const new_offset = try self.findFreeSpace(needed_size, alignment);
log.debug("moving '{s},{s}' from 0x{x} to 0x{x}", .{
sect.segName(),
sect.sectName(),
sect.offset,
new_offset,
});
try self.copyRangeAllZeroOut(sect.offset, new_offset, existing_size);
sect.offset = @intCast(new_offset);
}
seg.filesize = needed_size;
}
sect.size = needed_size;
seg.fileoff = sect.offset;
const mem_capacity = self.allocatedSizeVirtual(seg.vmaddr);
if (needed_size > mem_capacity) {
@ -3464,30 +3468,34 @@ fn growSectionNonRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !vo
fn growSectionRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !void {
const sect = &self.sections.items(.header)[sect_index];
if (needed_size > self.allocatedSize(sect.offset) and !sect.isZerofill()) {
const existing_size = sect.size;
sect.size = 0;
if (!sect.isZerofill()) {
const allocated_size = self.allocatedSize(sect.offset);
if (sect.offset + allocated_size == std.math.maxInt(u64)) {
try self.base.file.?.setEndPos(sect.offset + needed_size);
} else if (needed_size > allocated_size) {
const existing_size = sect.size;
sect.size = 0;
// Must move the entire section.
const alignment = try math.powi(u32, 2, sect.@"align");
const new_offset = self.findFreeSpace(needed_size, alignment);
const new_addr = self.findFreeSpaceVirtual(needed_size, alignment);
// Must move the entire section.
const alignment = try math.powi(u32, 2, sect.@"align");
const new_offset = try self.findFreeSpace(needed_size, alignment);
const new_addr = self.findFreeSpaceVirtual(needed_size, alignment);
log.debug("new '{s},{s}' file offset 0x{x} to 0x{x} (0x{x} - 0x{x})", .{
sect.segName(),
sect.sectName(),
new_offset,
new_offset + existing_size,
new_addr,
new_addr + existing_size,
});
log.debug("new '{s},{s}' file offset 0x{x} to 0x{x} (0x{x} - 0x{x})", .{
sect.segName(),
sect.sectName(),
new_offset,
new_offset + existing_size,
new_addr,
new_addr + existing_size,
});
try self.copyRangeAll(sect.offset, new_offset, existing_size);
try self.copyRangeAll(sect.offset, new_offset, existing_size);
sect.offset = @intCast(new_offset);
sect.addr = new_addr;
sect.offset = @intCast(new_offset);
sect.addr = new_addr;
}
}
sect.size = needed_size;
}
@ -4591,7 +4599,6 @@ const std = @import("std");
const build_options = @import("build_options");
const builtin = @import("builtin");
const assert = std.debug.assert;
const dwarf = std.dwarf;
const fs = std.fs;
const log = std.log.scoped(.link);
const state_log = std.log.scoped(.link_state);

View file

@ -15,6 +15,9 @@ debug_abbrev_section_index: ?u8 = null,
debug_str_section_index: ?u8 = null,
debug_aranges_section_index: ?u8 = null,
debug_line_section_index: ?u8 = null,
debug_line_str_section_index: ?u8 = null,
debug_loclists_section_index: ?u8 = null,
debug_rnglists_section_index: ?u8 = null,
relocs: std.ArrayListUnmanaged(Reloc) = .{},
@ -56,13 +59,16 @@ pub fn initMetadata(self: *DebugSymbols, macho_file: *MachO) !void {
});
}
self.debug_str_section_index = try self.allocateSection("__debug_str", 200, 0);
self.debug_info_section_index = try self.allocateSection("__debug_info", 200, 0);
self.debug_abbrev_section_index = try self.allocateSection("__debug_abbrev", 128, 0);
self.debug_aranges_section_index = try self.allocateSection("__debug_aranges", 160, 4);
self.debug_line_section_index = try self.allocateSection("__debug_line", 250, 0);
self.debug_str_section_index = try self.createSection("__debug_str", 0);
self.debug_info_section_index = try self.createSection("__debug_info", 0);
self.debug_abbrev_section_index = try self.createSection("__debug_abbrev", 0);
self.debug_aranges_section_index = try self.createSection("__debug_aranges", 4);
self.debug_line_section_index = try self.createSection("__debug_line", 0);
self.debug_line_str_section_index = try self.createSection("__debug_line_str", 0);
self.debug_loclists_section_index = try self.createSection("__debug_loclists", 0);
self.debug_rnglists_section_index = try self.createSection("__debug_rnglists", 0);
self.linkedit_segment_cmd_index = @as(u8, @intCast(self.segments.items.len));
self.linkedit_segment_cmd_index = @intCast(self.segments.items.len);
try self.segments.append(self.allocator, .{
.segname = makeStaticString("__LINKEDIT"),
.maxprot = macho.PROT.READ,
@ -71,27 +77,17 @@ pub fn initMetadata(self: *DebugSymbols, macho_file: *MachO) !void {
});
}
fn allocateSection(self: *DebugSymbols, sectname: []const u8, size: u64, alignment: u16) !u8 {
fn createSection(self: *DebugSymbols, sectname: []const u8, alignment: u16) !u8 {
const segment = self.getDwarfSegmentPtr();
var sect = macho.section_64{
.sectname = makeStaticString(sectname),
.segname = segment.segname,
.size = @as(u32, @intCast(size)),
.@"align" = alignment,
};
const alignment_pow_2 = try math.powi(u32, 2, alignment);
const off = self.findFreeSpace(size, alignment_pow_2);
log.debug("found {s},{s} section free space 0x{x} to 0x{x}", .{
sect.segName(),
sect.sectName(),
off,
off + size,
});
log.debug("create {s},{s} section", .{ sect.segName(), sect.sectName() });
sect.offset = @as(u32, @intCast(off));
const index = @as(u8, @intCast(self.sections.items.len));
const index: u8 = @intCast(self.sections.items.len);
try self.sections.append(self.allocator, sect);
segment.cmdsize += @sizeOf(macho.section_64);
segment.nsects += 1;
@ -102,16 +98,19 @@ fn allocateSection(self: *DebugSymbols, sectname: []const u8, size: u64, alignme
pub fn growSection(
self: *DebugSymbols,
sect_index: u8,
needed_size: u32,
needed_size: u64,
requires_file_copy: bool,
macho_file: *MachO,
) !void {
const sect = self.getSectionPtr(sect_index);
if (needed_size > self.allocatedSize(sect.offset)) {
const allocated_size = self.allocatedSize(sect.offset);
if (sect.offset + allocated_size == std.math.maxInt(u64)) {
try self.file.setEndPos(sect.offset + needed_size);
} else if (needed_size > allocated_size) {
const existing_size = sect.size;
sect.size = 0; // free the space
const new_offset = self.findFreeSpace(needed_size, 1);
const new_offset = try self.findFreeSpace(needed_size, 1);
log.debug("moving {s} section: {} bytes from 0x{x} to 0x{x}", .{
sect.sectName(),
@ -130,7 +129,7 @@ pub fn growSection(
if (amt != existing_size) return error.InputOutput;
}
sect.offset = @as(u32, @intCast(new_offset));
sect.offset = @intCast(new_offset);
}
sect.size = needed_size;
@ -153,22 +152,27 @@ pub fn markDirty(self: *DebugSymbols, sect_index: u8, macho_file: *MachO) void {
}
}
fn detectAllocCollision(self: *DebugSymbols, start: u64, size: u64) ?u64 {
fn detectAllocCollision(self: *DebugSymbols, start: u64, size: u64) !?u64 {
var at_end = true;
const end = start + padToIdeal(size);
for (self.sections.items) |section| {
const increased_size = padToIdeal(section.size);
const test_end = section.offset + increased_size;
if (end > section.offset and start < test_end) {
return test_end;
if (start < test_end) {
if (end > section.offset) return test_end;
if (test_end < std.math.maxInt(u64)) at_end = false;
}
}
if (at_end) try self.file.setEndPos(end);
return null;
}
fn findFreeSpace(self: *DebugSymbols, object_size: u64, min_alignment: u64) u64 {
fn findFreeSpace(self: *DebugSymbols, object_size: u64, min_alignment: u64) !u64 {
const segment = self.getDwarfSegmentPtr();
var offset: u64 = segment.fileoff;
while (self.detectAllocCollision(offset, object_size)) |item_end| {
while (try self.detectAllocCollision(offset, object_size)) |item_end| {
offset = mem.alignForward(u64, item_end, min_alignment);
}
return offset;
@ -346,6 +350,7 @@ fn writeHeader(self: *DebugSymbols, macho_file: *MachO, ncmds: usize, sizeofcmds
}
fn allocatedSize(self: *DebugSymbols, start: u64) u64 {
if (start == 0) return 0;
const seg = self.getDwarfSegmentPtr();
assert(start >= seg.fileoff);
var min_pos: u64 = std.math.maxInt(u64);
@ -413,9 +418,9 @@ pub fn writeStrtab(self: *DebugSymbols, off: u32) !u32 {
pub fn getSectionIndexes(self: *DebugSymbols, segment_index: u8) struct { start: u8, end: u8 } {
var start: u8 = 0;
const nsects = for (self.segments.items, 0..) |seg, i| {
if (i == segment_index) break @as(u8, @intCast(seg.nsects));
start += @as(u8, @intCast(seg.nsects));
const nsects: u8 = for (self.segments.items, 0..) |seg, i| {
if (i == segment_index) break @intCast(seg.nsects);
start += @intCast(seg.nsects);
} else 0;
return .{ .start = start, .end = start + nsects };
}

View file

@ -55,8 +55,7 @@ pub fn init(self: *ZigObject, macho_file: *MachO) !void {
switch (comp.config.debug_format) {
.strip => {},
.dwarf => |v| {
assert(v == .@"32");
self.dwarf = Dwarf.init(&macho_file.base, .dwarf32);
self.dwarf = Dwarf.init(&macho_file.base, v);
self.debug_strtab_dirty = true;
self.debug_abbrev_dirty = true;
self.debug_aranges_dirty = true;
@ -101,8 +100,8 @@ pub fn deinit(self: *ZigObject, allocator: Allocator) void {
}
self.tlv_initializers.deinit(allocator);
if (self.dwarf) |*dw| {
dw.deinit();
if (self.dwarf) |*dwarf| {
dwarf.deinit();
}
}
@ -595,56 +594,13 @@ pub fn flushModule(self: *ZigObject, macho_file: *MachO, tid: Zcu.PerThread.Id)
if (metadata.const_state != .unused) metadata.const_state = .flushed;
}
if (self.dwarf) |*dw| {
if (self.dwarf) |*dwarf| {
const pt: Zcu.PerThread = .{ .zcu = macho_file.base.comp.module.?, .tid = tid };
try dw.flushModule(pt);
try dwarf.flushModule(pt);
if (self.debug_abbrev_dirty) {
try dw.writeDbgAbbrev();
self.debug_abbrev_dirty = false;
}
if (self.debug_info_header_dirty) {
// Currently only one compilation unit is supported, so the address range is simply
// identical to the main program header virtual address and memory size.
const text_section = macho_file.sections.items(.header)[macho_file.zig_text_sect_index.?];
const low_pc = text_section.addr;
const high_pc = text_section.addr + text_section.size;
try dw.writeDbgInfoHeader(pt.zcu, low_pc, high_pc);
self.debug_info_header_dirty = false;
}
if (self.debug_aranges_dirty) {
// Currently only one compilation unit is supported, so the address range is simply
// identical to the main program header virtual address and memory size.
const text_section = macho_file.sections.items(.header)[macho_file.zig_text_sect_index.?];
try dw.writeDbgAranges(text_section.addr, text_section.size);
self.debug_aranges_dirty = false;
}
if (self.debug_line_header_dirty) {
try dw.writeDbgLineHeader();
self.debug_line_header_dirty = false;
}
if (!macho_file.base.isRelocatable()) {
const d_sym = macho_file.getDebugSymbols().?;
const sect_index = d_sym.debug_str_section_index.?;
if (self.debug_strtab_dirty or dw.strtab.buffer.items.len != d_sym.getSection(sect_index).size) {
const needed_size = @as(u32, @intCast(dw.strtab.buffer.items.len));
try d_sym.growSection(sect_index, needed_size, false, macho_file);
try d_sym.file.pwriteAll(dw.strtab.buffer.items, d_sym.getSection(sect_index).offset);
self.debug_strtab_dirty = false;
}
} else {
const sect_index = macho_file.debug_str_sect_index.?;
if (self.debug_strtab_dirty or dw.strtab.buffer.items.len != macho_file.sections.items(.header)[sect_index].size) {
const needed_size = @as(u32, @intCast(dw.strtab.buffer.items.len));
try macho_file.growSection(sect_index, needed_size);
try macho_file.base.file.?.pwriteAll(dw.strtab.buffer.items, macho_file.sections.items(.header)[sect_index].offset);
self.debug_strtab_dirty = false;
}
}
self.debug_abbrev_dirty = false;
self.debug_aranges_dirty = false;
self.debug_strtab_dirty = false;
}
// The point of flushModule() is to commit changes, so in theory, nothing should
@ -816,8 +772,8 @@ pub fn updateFunc(
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
var dwarf_state = if (self.dwarf) |*dw| try dw.initNavState(pt, func.owner_nav) else null;
defer if (dwarf_state) |*ds| ds.deinit();
var dwarf_wip_nav = if (self.dwarf) |*dwarf| try dwarf.initWipNav(pt, func.owner_nav, sym_index) else null;
defer if (dwarf_wip_nav) |*wip_nav| wip_nav.deinit();
const res = try codegen.generateFunction(
&macho_file.base,
@ -827,7 +783,7 @@ pub fn updateFunc(
air,
liveness,
&code_buffer,
if (dwarf_state) |*ds| .{ .dwarf = ds } else .none,
if (dwarf_wip_nav) |*wip_nav| .{ .dwarf = wip_nav } else .none,
);
const code = switch (res) {
@ -841,14 +797,17 @@ pub fn updateFunc(
const sect_index = try self.getNavOutputSection(macho_file, zcu, func.owner_nav, code);
try self.updateNavCode(macho_file, pt, func.owner_nav, sym_index, sect_index, code);
if (dwarf_state) |*ds| {
if (dwarf_wip_nav) |*wip_nav| {
const sym = self.symbols.items[sym_index];
try self.dwarf.?.commitNavState(
try self.dwarf.?.finishWipNav(
pt,
func.owner_nav,
sym.getAddress(.{}, macho_file),
sym.getAtom(macho_file).?.size,
ds,
.{
.index = sym_index,
.addr = sym.getAddress(.{}, macho_file),
.size = sym.getAtom(macho_file).?.size,
},
wip_nav,
);
}
@ -866,6 +825,7 @@ pub fn updateNav(
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const nav_val = zcu.navValue(nav_index);
const nav_init = switch (ip.indexToKey(nav_val.toIntern())) {
.variable => |variable| Value.fromInterned(variable.init),
@ -882,48 +842,53 @@ pub fn updateNav(
else => nav_val,
};
const sym_index = try self.getOrCreateMetadataForNav(macho_file, nav_index);
self.symbols.items[sym_index].getAtom(macho_file).?.freeRelocs(macho_file);
if (nav_init.typeOf(zcu).isFnOrHasRuntimeBits(pt)) {
const sym_index = try self.getOrCreateMetadataForNav(macho_file, nav_index);
self.symbols.items[sym_index].getAtom(macho_file).?.freeRelocs(macho_file);
var code_buffer = std.ArrayList(u8).init(zcu.gpa);
defer code_buffer.deinit();
var code_buffer = std.ArrayList(u8).init(zcu.gpa);
defer code_buffer.deinit();
var nav_state: ?Dwarf.NavState = if (self.dwarf) |*dw| try dw.initNavState(pt, nav_index) else null;
defer if (nav_state) |*ns| ns.deinit();
var debug_wip_nav = if (self.dwarf) |*dwarf| try dwarf.initWipNav(pt, nav_index, sym_index) else null;
defer if (debug_wip_nav) |*wip_nav| wip_nav.deinit();
const res = try codegen.generateSymbol(
&macho_file.base,
pt,
zcu.navSrcLoc(nav_index),
nav_init,
&code_buffer,
if (nav_state) |*ns| .{ .dwarf = ns } else .none,
.{ .parent_atom_index = sym_index },
);
const code = switch (res) {
.ok => code_buffer.items,
.fail => |em| {
try zcu.failed_codegen.put(zcu.gpa, nav_index, em);
return;
},
};
const sect_index = try self.getNavOutputSection(macho_file, zcu, nav_index, code);
if (isThreadlocal(macho_file, nav_index))
try self.updateTlv(macho_file, pt, nav_index, sym_index, sect_index, code)
else
try self.updateNavCode(macho_file, pt, nav_index, sym_index, sect_index, code);
if (nav_state) |*ns| {
const sym = self.symbols.items[sym_index];
try self.dwarf.?.commitNavState(
const res = try codegen.generateSymbol(
&macho_file.base,
pt,
nav_index,
sym.getAddress(.{}, macho_file),
sym.getAtom(macho_file).?.size,
ns,
zcu.navSrcLoc(nav_index),
nav_init,
&code_buffer,
if (debug_wip_nav) |*wip_nav| .{ .dwarf = wip_nav } else .none,
.{ .parent_atom_index = sym_index },
);
}
const code = switch (res) {
.ok => code_buffer.items,
.fail => |em| {
try zcu.failed_codegen.put(zcu.gpa, nav_index, em);
return;
},
};
const sect_index = try self.getNavOutputSection(macho_file, zcu, nav_index, code);
if (isThreadlocal(macho_file, nav_index))
try self.updateTlv(macho_file, pt, nav_index, sym_index, sect_index, code)
else
try self.updateNavCode(macho_file, pt, nav_index, sym_index, sect_index, code);
if (debug_wip_nav) |*wip_nav| {
const sym = self.symbols.items[sym_index];
try self.dwarf.?.finishWipNav(
pt,
nav_index,
.{
.index = sym_index,
.addr = sym.getAddress(.{}, macho_file),
.size = sym.getAtom(macho_file).?.size,
},
wip_nav,
);
}
} else if (self.dwarf) |*dwarf| try dwarf.updateComptimeNav(pt, nav_index);
// Exports will be updated by `Zcu.processExports` after the update.
}
@ -1435,8 +1400,8 @@ pub fn updateNavLineNumber(
pt: Zcu.PerThread,
nav_index: InternPool.Nav.Index,
) !void {
if (self.dwarf) |*dw| {
try dw.updateNavLineNumber(pt.zcu, nav_index);
if (self.dwarf) |*dwarf| {
try dwarf.updateNavLineNumber(pt.zcu, nav_index);
}
}

View file

@ -465,7 +465,7 @@ fn allocateSections(macho_file: *MachO) !void {
const alignment = try math.powi(u32, 2, header.@"align");
if (!header.isZerofill()) {
if (needed_size > macho_file.allocatedSize(header.offset)) {
header.offset = math.cast(u32, macho_file.findFreeSpace(needed_size, alignment)) orelse
header.offset = math.cast(u32, try macho_file.findFreeSpace(needed_size, alignment)) orelse
return error.Overflow;
}
}

View file

@ -454,28 +454,31 @@ pub fn updateNav(self: *Plan9, pt: Zcu.PerThread, nav_index: InternPool.Nav.Inde
},
else => nav_val,
};
const atom_idx = try self.seeNav(pt, nav_index);
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
// TODO we need the symbol index for symbol in the table of locals for the containing atom
const res = try codegen.generateSymbol(&self.base, pt, zcu.navSrcLoc(nav_index), nav_init, &code_buffer, .none, .{
.parent_atom_index = @intCast(atom_idx),
});
const code = switch (res) {
.ok => code_buffer.items,
.fail => |em| {
try zcu.failed_codegen.put(gpa, nav_index, em);
return;
},
};
try self.data_nav_table.ensureUnusedCapacity(gpa, 1);
const duped_code = try gpa.dupe(u8, code);
self.getAtomPtr(self.navs.get(nav_index).?.index).code = .{ .code_ptr = null, .other = .{ .nav_index = nav_index } };
if (self.data_nav_table.fetchPutAssumeCapacity(nav_index, duped_code)) |old_entry| {
gpa.free(old_entry.value);
if (nav_init.typeOf(zcu).isFnOrHasRuntimeBits(pt)) {
const atom_idx = try self.seeNav(pt, nav_index);
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
// TODO we need the symbol index for symbol in the table of locals for the containing atom
const res = try codegen.generateSymbol(&self.base, pt, zcu.navSrcLoc(nav_index), nav_init, &code_buffer, .none, .{
.parent_atom_index = @intCast(atom_idx),
});
const code = switch (res) {
.ok => code_buffer.items,
.fail => |em| {
try zcu.failed_codegen.put(gpa, nav_index, em);
return;
},
};
try self.data_nav_table.ensureUnusedCapacity(gpa, 1);
const duped_code = try gpa.dupe(u8, code);
self.getAtomPtr(self.navs.get(nav_index).?.index).code = .{ .code_ptr = null, .other = .{ .nav_index = nav_index } };
if (self.data_nav_table.fetchPutAssumeCapacity(nav_index, duped_code)) |old_entry| {
gpa.free(old_entry.value);
}
try self.updateFinish(pt, nav_index);
}
return self.updateFinish(pt, nav_index);
}
/// called at the end of update{Decl,Func}

View file

@ -248,46 +248,49 @@ pub fn updateNav(
const ip = &zcu.intern_pool;
const nav = ip.getNav(nav_index);
const is_extern, const lib_name, const nav_init = switch (ip.indexToKey(nav.status.resolved.val)) {
.variable => |variable| .{ false, variable.lib_name, variable.init },
const nav_val = zcu.navValue(nav_index);
const is_extern, const lib_name, const nav_init = switch (ip.indexToKey(nav_val.toIntern())) {
.variable => |variable| .{ false, variable.lib_name, Value.fromInterned(variable.init) },
.func => return,
.@"extern" => |@"extern"| if (ip.isFunctionType(nav.typeOf(ip)))
return
else
.{ true, @"extern".lib_name, nav.status.resolved.val },
else => .{ false, .none, nav.status.resolved.val },
.{ true, @"extern".lib_name, nav_val },
else => .{ false, .none, nav_val },
};
const gpa = wasm_file.base.comp.gpa;
const atom_index = try zig_object.getOrCreateAtomForNav(wasm_file, pt, nav_index);
const atom = wasm_file.getAtomPtr(atom_index);
atom.clear();
if (nav_init.typeOf(zcu).isFnOrHasRuntimeBits(pt)) {
const gpa = wasm_file.base.comp.gpa;
const atom_index = try zig_object.getOrCreateAtomForNav(wasm_file, pt, nav_index);
const atom = wasm_file.getAtomPtr(atom_index);
atom.clear();
if (is_extern)
return zig_object.addOrUpdateImport(wasm_file, nav.name.toSlice(ip), atom.sym_index, lib_name.toSlice(ip), null);
if (is_extern)
return zig_object.addOrUpdateImport(wasm_file, nav.name.toSlice(ip), atom.sym_index, lib_name.toSlice(ip), null);
var code_writer = std.ArrayList(u8).init(gpa);
defer code_writer.deinit();
var code_writer = std.ArrayList(u8).init(gpa);
defer code_writer.deinit();
const res = try codegen.generateSymbol(
&wasm_file.base,
pt,
zcu.navSrcLoc(nav_index),
Value.fromInterned(nav_init),
&code_writer,
.none,
.{ .parent_atom_index = @intFromEnum(atom.sym_index) },
);
const res = try codegen.generateSymbol(
&wasm_file.base,
pt,
zcu.navSrcLoc(nav_index),
nav_init,
&code_writer,
.none,
.{ .parent_atom_index = @intFromEnum(atom.sym_index) },
);
const code = switch (res) {
.ok => code_writer.items,
.fail => |em| {
try zcu.failed_codegen.put(zcu.gpa, nav_index, em);
return;
},
};
const code = switch (res) {
.ok => code_writer.items,
.fail => |em| {
try zcu.failed_codegen.put(zcu.gpa, nav_index, em);
return;
},
};
return zig_object.finishUpdateNav(wasm_file, pt, nav_index, code);
try zig_object.finishUpdateNav(wasm_file, pt, nav_index, code);
}
}
pub fn updateFunc(

View file

@ -746,7 +746,7 @@ const Writer = struct {
fn writeIntBig(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
const inst_data = self.code.instructions.items(.data)[@intFromEnum(inst)].str;
const byte_count = inst_data.len * @sizeOf(std.math.big.Limb);
const limb_bytes = self.code.nullTerminatedString(inst_data.start)[0..byte_count];
const limb_bytes = self.code.string_bytes[@intFromEnum(inst_data.start)..][0..byte_count];
// limb_bytes is not aligned properly; we must allocate and copy the bytes
// in order to accomplish this.
const limbs = try self.gpa.alloc(std.math.big.Limb, inst_data.len);

View file

@ -10,6 +10,7 @@ const Zcu = @import("Zcu.zig");
const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
const expectEqualSlices = std.testing.expectEqualSlices;
const link = @import("link.zig");
const log = std.log.scoped(.register_manager);
@ -25,7 +26,7 @@ pub const AllocateRegistersError = error{
/// Can happen when spilling an instruction triggers a codegen
/// error, so we propagate that error
CodegenFail,
};
} || link.File.UpdateDebugInfoError;
pub fn RegisterManager(
comptime Function: type,

438
test/src/Debugger.zig Normal file
View file

@ -0,0 +1,438 @@
b: *std.Build,
options: Options,
root_step: *std.Build.Step,
pub const Options = struct {
test_filters: []const []const u8,
gdb: ?[]const u8,
lldb: ?[]const u8,
optimize_modes: []const std.builtin.OptimizeMode,
skip_single_threaded: bool,
skip_non_native: bool,
skip_libc: bool,
};
pub const Target = struct {
resolved: std.Build.ResolvedTarget,
optimize_mode: std.builtin.OptimizeMode = .Debug,
link_libc: ?bool = null,
single_threaded: ?bool = null,
pic: ?bool = null,
test_name_suffix: []const u8,
};
pub fn addTestsForTarget(db: *Debugger, target: Target) void {
db.addLldbTest(
"basic",
target,
&.{
.{
.path = "basic.zig",
.source =
\\const Basic = struct {
\\ void: void = {},
\\ bool_false: bool = false,
\\ bool_true: bool = true,
\\ u0_0: u0 = 0,
\\ u1_0: u1 = 0,
\\ u1_1: u1 = 1,
\\ u2_0: u2 = 0,
\\ u2_3: u2 = 3,
\\ u3_0: u3 = 0,
\\ u3_7: u3 = 7,
\\ u4_0: u4 = 0,
\\ u4_15: u4 = 15,
\\ u5_0: u5 = 0,
\\ u5_31: u5 = 31,
\\ u6_0: u6 = 0,
\\ u6_63: u6 = 63,
\\ u7_0: u7 = 0,
\\ u7_127: u7 = 127,
\\ u8_0: u8 = 0,
\\ u8_255: u8 = 255,
\\ u16_0: u16 = 0,
\\ u16_65535: u16 = 65535,
\\ u24_0: u24 = 0,
\\ u24_16777215: u24 = 16777215,
\\ u32_0: u32 = 0,
\\ u32_4294967295: u32 = 4294967295,
\\ i0_0: i0 = 0,
\\ @"i1_-1": i1 = -1,
\\ i1_0: i1 = 0,
\\ @"i2_-2": i2 = -2,
\\ i2_0: i2 = 0,
\\ i2_1: i2 = 1,
\\ @"i3_-4": i3 = -4,
\\ i3_0: i3 = 0,
\\ i3_3: i3 = 3,
\\ @"i4_-8": i4 = -8,
\\ i4_0: i4 = 0,
\\ i4_7: i4 = 7,
\\ @"i5_-16": i5 = -16,
\\ i5_0: i5 = 0,
\\ i5_15: i5 = 15,
\\ @"i6_-32": i6 = -32,
\\ i6_0: i6 = 0,
\\ i6_31: i6 = 31,
\\ @"i7_-64": i7 = -64,
\\ i7_0: i7 = 0,
\\ i7_63: i7 = 63,
\\ @"i8_-128": i8 = -128,
\\ i8_0: i8 = 0,
\\ i8_127: i8 = 127,
\\ @"i16_-32768": i16 = -32768,
\\ i16_0: i16 = 0,
\\ i16_32767: i16 = 32767,
\\ @"i24_-8388608": i24 = -8388608,
\\ i24_0: i24 = 0,
\\ i24_8388607: i24 = 8388607,
\\ @"i32_-2147483648": i32 = -2147483648,
\\ i32_0: i32 = 0,
\\ i32_2147483647: i32 = 2147483647,
\\ @"f16_42.625": f16 = 42.625,
\\ @"f32_-2730.65625": f32 = -2730.65625,
\\ @"f64_357913941.33203125": f64 = 357913941.33203125,
\\ @"f80_-91625968981.3330078125": f80 = -91625968981.3330078125,
\\ @"f128_384307168202282325.333332061767578125": f128 = 384307168202282325.333332061767578125,
\\};
\\fn testBasic(basic: Basic) void {
\\ _ = basic;
\\}
\\pub fn main() void {
\\ testBasic(.{});
\\}
\\
,
},
},
\\breakpoint set --file basic.zig --source-pattern-regexp '_ = basic;'
\\process launch
\\frame variable --show-types basic
\\breakpoint delete --force
,
&.{
\\(lldb) frame variable --show-types basic
\\(root.basic.Basic) basic = {
\\ (void) void = {}
\\ (bool) bool_false = false
\\ (bool) bool_true = true
\\ (u0) u0_0 = 0
\\ (u1) u1_0 = 0
\\ (u1) u1_1 = 1
\\ (u2) u2_0 = 0
\\ (u2) u2_3 = 3
\\ (u3) u3_0 = 0
\\ (u3) u3_7 = 7
\\ (u4) u4_0 = 0
\\ (u4) u4_15 = 15
\\ (u5) u5_0 = 0
\\ (u5) u5_31 = 31
\\ (u6) u6_0 = 0
\\ (u6) u6_63 = 63
\\ (u7) u7_0 = 0
\\ (u7) u7_127 = 127
\\ (u8) u8_0 = 0
\\ (u8) u8_255 = 255
\\ (u16) u16_0 = 0
\\ (u16) u16_65535 = 65535
\\ (u24) u24_0 = 0
\\ (u24) u24_16777215 = 16777215
\\ (u32) u32_0 = 0
\\ (u32) u32_4294967295 = 4294967295
\\ (i0) i0_0 = 0
\\ (i1) i1_-1 = -1
\\ (i1) i1_0 = 0
\\ (i2) i2_-2 = -2
\\ (i2) i2_0 = 0
\\ (i2) i2_1 = 1
\\ (i3) i3_-4 = -4
\\ (i3) i3_0 = 0
\\ (i3) i3_3 = 3
\\ (i4) i4_-8 = -8
\\ (i4) i4_0 = 0
\\ (i4) i4_7 = 7
\\ (i5) i5_-16 = -16
\\ (i5) i5_0 = 0
\\ (i5) i5_15 = 15
\\ (i6) i6_-32 = -32
\\ (i6) i6_0 = 0
\\ (i6) i6_31 = 31
\\ (i7) i7_-64 = -64
\\ (i7) i7_0 = 0
\\ (i7) i7_63 = 63
\\ (i8) i8_-128 = -128
\\ (i8) i8_0 = 0
\\ (i8) i8_127 = 127
\\ (i16) i16_-32768 = -32768
\\ (i16) i16_0 = 0
\\ (i16) i16_32767 = 32767
\\ (i24) i24_-8388608 = -8388608
\\ (i24) i24_0 = 0
\\ (i24) i24_8388607 = 8388607
\\ (i32) i32_-2147483648 = -2147483648
\\ (i32) i32_0 = 0
\\ (i32) i32_2147483647 = 2147483647
\\ (f16) f16_42.625 = 42.625
\\ (f32) f32_-2730.65625 = -2730.65625
\\ (f64) f64_357913941.33203125 = 357913941.33203125
\\ (f80) f80_-91625968981.3330078125 = -91625968981.3330078125
\\ (f128) f128_384307168202282325.333332061767578125 = 384307168202282325.333332061767578125
\\}
},
);
db.addLldbTest(
"storage",
target,
&.{
.{
.path = "storage.zig",
.source =
\\const global_const: u64 = 0x19e50dc8d6002077;
\\var global_var: u64 = 0xcc423cec08622e32;
\\threadlocal var global_threadlocal1: u64 = 0xb4d643528c042121;
\\threadlocal var global_threadlocal2: u64 = 0x43faea1cf5ad7a22;
\\fn testStorage(
\\ param1: u64,
\\ param2: u64,
\\ param3: u64,
\\ param4: u64,
\\ param5: u64,
\\ param6: u64,
\\ param7: u64,
\\ param8: u64,
\\) callconv(.C) void {
\\ const local_comptime_val: u64 = global_const *% global_const;
\\ const local_comptime_ptr: struct { u64 } = .{ local_comptime_val *% local_comptime_val };
\\ const local_const: u64 = global_var ^ global_threadlocal1 ^ global_threadlocal2 ^
\\ param1 ^ param2 ^ param3 ^ param4 ^ param5 ^ param6 ^ param7 ^ param8;
\\ var local_var: u64 = local_comptime_ptr[0] ^ local_const;
\\ local_var = local_var;
\\}
\\pub fn main() void {
\\ testStorage(
\\ 0x6a607e08125c7e00,
\\ 0x98944cb2a45a8b51,
\\ 0xa320cf10601ee6fb,
\\ 0x691ed3535bad3274,
\\ 0x63690e6867a5799f,
\\ 0x8e163f0ec76067f2,
\\ 0xf9a252c455fb4c06,
\\ 0xc88533722601e481,
\\ );
\\}
\\
,
},
},
\\breakpoint set --file storage.zig --source-pattern-regexp 'local_var = local_var;'
\\process launch
\\target variable --show-types --format hex global_const global_var global_threadlocal1 global_threadlocal2
\\frame variable --show-types --format hex param1 param2 param3 param4 param5 param6 param7 param8 local_comptime_val local_comptime_ptr.0 local_const local_var
\\breakpoint delete --force
,
&.{
\\(lldb) target variable --show-types --format hex global_const global_var global_threadlocal1 global_threadlocal2
\\(u64) global_const = 0x19e50dc8d6002077
\\(u64) global_var = 0xcc423cec08622e32
\\(u64) global_threadlocal1 = 0xb4d643528c042121
\\(u64) global_threadlocal2 = 0x43faea1cf5ad7a22
\\(lldb) frame variable --show-types --format hex param1 param2 param3 param4 param5 param6 param7 param8 local_comptime_val local_comptime_ptr.0 local_const local_var
\\(u64) param1 = 0x6a607e08125c7e00
\\(u64) param2 = 0x98944cb2a45a8b51
\\(u64) param3 = 0xa320cf10601ee6fb
\\(u64) param4 = 0x691ed3535bad3274
\\(u64) param5 = 0x63690e6867a5799f
\\(u64) param6 = 0x8e163f0ec76067f2
\\(u64) param7 = 0xf9a252c455fb4c06
\\(u64) param8 = 0xc88533722601e481
\\(u64) local_comptime_val = 0x69490636f81df751
\\(u64) local_comptime_ptr.0 = 0x82e834dae74767a1
\\(u64) local_const = 0xdffceb8b2f41e205
\\(u64) local_var = 0x5d14df51c80685a4
},
);
db.addLldbTest(
"slices",
target,
&.{
.{
.path = "slices.zig",
.source =
\\pub fn main() void {
\\ {
\\ var array: [4]u32 = .{ 1, 2, 4, 8 };
\\ const slice: []u32 = &array;
\\ _ = slice;
\\ }
\\}
\\
,
},
},
\\breakpoint set --file slices.zig --source-pattern-regexp '_ = slice;'
\\process launch
\\frame variable --show-types array slice
\\breakpoint delete --force
,
&.{
\\(lldb) frame variable --show-types array slice
\\([4]u32) array = {
\\ (u32) [0] = 1
\\ (u32) [1] = 2
\\ (u32) [2] = 4
\\ (u32) [3] = 8
\\}
\\([]u32) slice = {
\\ (u32) [0] = 1
\\ (u32) [1] = 2
\\ (u32) [2] = 4
\\ (u32) [3] = 8
\\}
},
);
db.addLldbTest(
"optionals",
target,
&.{
.{
.path = "optionals.zig",
.source =
\\pub fn main() void {
\\ {
\\ var null_u32: ?u32 = null;
\\ var maybe_u32: ?u32 = null;
\\ var nonnull_u32: ?u32 = 456;
\\ maybe_u32 = 123;
\\ _ = .{ &null_u32, &nonnull_u32 };
\\ }
\\}
\\
,
},
},
\\breakpoint set --file optionals.zig --source-pattern-regexp 'maybe_u32 = 123;'
\\process launch
\\frame variable null_u32 maybe_u32 nonnull_u32
\\breakpoint delete --force
\\
\\breakpoint set --file optionals.zig --source-pattern-regexp '_ = .{ &null_u32, &nonnull_u32 };'
\\process continue
\\frame variable --show-types null_u32 maybe_u32 nonnull_u32
\\breakpoint delete --force
,
&.{
\\(lldb) frame variable null_u32 maybe_u32 nonnull_u32
\\(?u32) null_u32 = null
\\(?u32) maybe_u32 = null
\\(?u32) nonnull_u32 = (nonnull_u32.? = 456)
,
\\(lldb) frame variable --show-types null_u32 maybe_u32 nonnull_u32
\\(?u32) null_u32 = null
\\(?u32) maybe_u32 = {
\\ (u32) maybe_u32.? = 123
\\}
\\(?u32) nonnull_u32 = {
\\ (u32) nonnull_u32.? = 456
\\}
},
);
}
const File = struct { path: []const u8, source: []const u8 };
fn addGdbTest(
db: *Debugger,
name: []const u8,
target: Target,
files: []const File,
commands: []const u8,
expected_output: []const []const u8,
) void {
db.addTest(
name,
target,
files,
&.{
db.options.gdb orelse return,
"--batch",
"--command",
},
commands,
&.{
"--args",
},
expected_output,
);
}
fn addLldbTest(
db: *Debugger,
name: []const u8,
target: Target,
files: []const File,
commands: []const u8,
expected_output: []const []const u8,
) void {
db.addTest(
name,
target,
files,
&.{
db.options.lldb orelse return,
"--batch",
"--source",
},
commands,
&.{
"--",
},
expected_output,
);
}
/// After a failure while running a script, the debugger starts accepting commands from stdin, and
/// because it is empty, the debugger exits normally with status 0. Choose a non-zero status to
/// return from the debugger script instead to detect it running to completion and indicate success.
const success = 99;
fn addTest(
db: *Debugger,
name: []const u8,
target: Target,
files: []const File,
db_argv1: []const []const u8,
commands: []const u8,
db_argv2: []const []const u8,
expected_output: []const []const u8,
) void {
for (db.options.test_filters) |test_filter| {
if (std.mem.indexOf(u8, name, test_filter)) |_| return;
}
const files_wf = db.b.addWriteFiles();
const exe = db.b.addExecutable(.{
.name = name,
.target = target.resolved,
.root_source_file = files_wf.add(files[0].path, files[0].source),
.optimize = target.optimize_mode,
.link_libc = target.link_libc,
.single_threaded = target.single_threaded,
.pic = target.pic,
.strip = false,
.use_llvm = false,
.use_lld = false,
});
for (files[1..]) |file| _ = files_wf.add(file.path, file.source);
const commands_wf = db.b.addWriteFiles();
const run = std.Build.Step.Run.create(db.b, db.b.fmt("run {s} {s}", .{ name, target.test_name_suffix }));
run.addArgs(db_argv1);
run.addFileArg(commands_wf.add(db.b.fmt("{s}.cmd", .{name}), db.b.fmt("{s}\n\nquit {d}\n", .{ commands, success })));
run.addArgs(db_argv2);
run.addArtifactArg(exe);
for (expected_output) |expected| run.addCheck(.{ .expect_stdout_match = db.b.fmt("{s}\n", .{expected}) });
run.addCheck(.{ .expect_term = .{ .Exited = success } });
run.setStdIn(.{ .bytes = "" });
db.root_step.dependOn(&run.step);
}
const Debugger = @This();
const std = @import("std");

View file

@ -17,6 +17,7 @@ pub const TranslateCContext = @import("src/TranslateC.zig");
pub const RunTranslatedCContext = @import("src/RunTranslatedC.zig");
pub const CompareOutputContext = @import("src/CompareOutput.zig");
pub const StackTracesContext = @import("src/StackTrace.zig");
pub const DebuggerContext = @import("src/Debugger.zig");
const TestTarget = struct {
target: std.Target.Query = .{},
@ -1283,3 +1284,36 @@ pub fn addCases(
test_filters,
);
}
pub fn addDebuggerTests(b: *std.Build, options: DebuggerContext.Options) ?*Step {
const step = b.step("test-debugger", "Run the debugger tests");
if (options.gdb == null and options.lldb == null) {
step.dependOn(&b.addFail("test-debugger requires -Dgdb and/or -Dlldb").step);
return null;
}
var context: DebuggerContext = .{
.b = b,
.options = options,
.root_step = step,
};
context.addTestsForTarget(.{
.resolved = b.resolveTargetQuery(.{
.cpu_arch = .x86_64,
.os_tag = .linux,
.abi = .none,
}),
.pic = false,
.test_name_suffix = "x86_64-linux",
});
context.addTestsForTarget(.{
.resolved = b.resolveTargetQuery(.{
.cpu_arch = .x86_64,
.os_tag = .linux,
.abi = .none,
}),
.pic = true,
.test_name_suffix = "x86_64-linux-pic",
});
return step;
}