test-standalone: update for std.debug changes

This commit is contained in:
mlugg 2025-09-13 10:29:36 +01:00
parent 51d08f4b9b
commit 4cb84f8e48
No known key found for this signature in database
GPG key ID: 3F5B7DCCBF4AF02E
4 changed files with 96 additions and 84 deletions

View file

@ -1,9 +1,10 @@
const std = @import("std");
const builtin = @import("builtin");
/// This tests the path where DWARF information is embedded in a COFF binary
pub fn build(b: *std.Build) void {
switch (builtin.cpu.arch) {
const host = b.graph.host;
switch (host.result.cpu.arch) {
.aarch64,
.x86,
.x86_64,
@ -15,10 +16,10 @@ pub fn build(b: *std.Build) void {
b.default_step = test_step;
const optimize: std.builtin.OptimizeMode = .Debug;
const target = if (builtin.os.tag == .windows)
b.standardTargetOptions(.{})
else
b.resolveTargetQuery(.{ .os_tag = .windows });
const target = switch (host.result.os.tag) {
.windows => host,
else => b.resolveTargetQuery(.{ .os_tag = .windows }),
};
const exe = b.addExecutable(.{
.name = "main",

View file

@ -1,27 +1,34 @@
const std = @import("std");
const assert = std.debug.assert;
const testing = std.testing;
const fatal = std.process.fatal;
extern fn add(a: u32, b: u32, addr: *usize) u32;
pub fn main() !void {
var gpa: std.heap.GeneralPurposeAllocator(.{}) = .init;
defer assert(gpa.deinit() == .ok);
const allocator = gpa.allocator();
pub fn main() void {
var debug_alloc_inst: std.heap.DebugAllocator(.{}) = .init;
defer std.debug.assert(debug_alloc_inst.deinit() == .ok);
const gpa = debug_alloc_inst.allocator();
var debug_info = try std.debug.SelfInfo.open(allocator);
defer debug_info.deinit();
var di: std.debug.SelfInfo = .init;
defer di.deinit(gpa);
var add_addr: usize = undefined;
_ = add(1, 2, &add_addr);
const module = try debug_info.getModuleForAddress(add_addr);
const symbol = try module.getSymbolAtAddress(allocator, add_addr);
defer if (symbol.source_location) |sl| allocator.free(sl.file_name);
const symbol = di.getSymbolAtAddress(gpa, add_addr) catch |err| fatal("failed to get symbol: {t}", .{err});
defer if (symbol.source_location) |sl| gpa.free(sl.file_name);
try testing.expectEqualStrings("add", symbol.name);
try testing.expect(symbol.source_location != null);
try testing.expectEqualStrings("shared_lib.c", std.fs.path.basename(symbol.source_location.?.file_name));
try testing.expectEqual(@as(u64, 3), symbol.source_location.?.line);
try testing.expectEqual(@as(u64, 0), symbol.source_location.?.column);
if (symbol.name == null) fatal("failed to resolve symbol name", .{});
if (symbol.compile_unit_name == null) fatal("failed to resolve compile unit", .{});
if (symbol.source_location == null) fatal("failed to resolve source location", .{});
if (!std.mem.eql(u8, symbol.name.?, "add")) {
fatal("incorrect symbol name '{s}'", .{symbol.name.?});
}
const sl = &symbol.source_location.?;
if (!std.mem.eql(u8, std.fs.path.basename(sl.file_name), "shared_lib.c")) {
fatal("incorrect file name '{s}'", .{sl.file_name});
}
if (sl.line != 3 or sl.column != 0) {
fatal("incorrect line/column :{d}:{d}", .{ sl.line, sl.column });
}
}

View file

@ -1,27 +1,19 @@
const std = @import("std");
const builtin = @import("builtin");
const debug = std.debug;
const testing = std.testing;
const fatal = std.process.fatal;
noinline fn frame3(expected: *[4]usize, unwound: *[4]usize) void {
noinline fn frame3(expected: *[4]usize, addr_buf: *[4]usize) std.builtin.StackTrace {
expected[0] = @returnAddress();
var context: debug.ThreadContext = undefined;
testing.expect(debug.getContext(&context)) catch @panic("failed to getContext");
const debug_info = debug.getSelfDebugInfo() catch @panic("failed to openSelfDebugInfo");
var it = debug.StackIterator.initWithContext(expected[0], debug_info, &context, @frameAddress()) catch @panic("failed to initWithContext");
defer it.deinit();
for (unwound) |*addr| {
if (it.next()) |return_address| addr.* = return_address;
}
return std.debug.captureCurrentStackTrace(.{
.first_address = @returnAddress(),
.allow_unsafe_unwind = true,
}, addr_buf);
}
noinline fn frame2(expected: *[4]usize, unwound: *[4]usize) void {
noinline fn frame2(expected: *[4]usize, addr_buf: *[4]usize) std.builtin.StackTrace {
// Exercise different __unwind_info / DWARF CFI encodings by forcing some registers to be restored
if (builtin.target.ofmt != .c) {
switch (builtin.cpu.arch) {
switch (builtin.target.cpu.arch) {
.x86 => {
if (builtin.omit_frame_pointer) {
asm volatile (
@ -67,10 +59,10 @@ noinline fn frame2(expected: *[4]usize, unwound: *[4]usize) void {
}
expected[1] = @returnAddress();
frame3(expected, unwound);
return frame3(expected, addr_buf);
}
noinline fn frame1(expected: *[4]usize, unwound: *[4]usize) void {
noinline fn frame1(expected: *[4]usize, addr_buf: *[4]usize) std.builtin.StackTrace {
expected[2] = @returnAddress();
// Use a stack frame that is too big to encode in __unwind_info's stack-immediate encoding
@ -78,22 +70,32 @@ noinline fn frame1(expected: *[4]usize, unwound: *[4]usize) void {
var pad: [std.math.maxInt(u8) * @sizeOf(usize) + 1]u8 = undefined;
_ = std.mem.doNotOptimizeAway(&pad);
frame2(expected, unwound);
return frame2(expected, addr_buf);
}
noinline fn frame0(expected: *[4]usize, unwound: *[4]usize) void {
noinline fn frame0(expected: *[4]usize, addr_buf: *[4]usize) std.builtin.StackTrace {
expected[3] = @returnAddress();
frame1(expected, unwound);
return frame1(expected, addr_buf);
}
pub fn main() !void {
// Disabled until the DWARF unwinder bugs on .aarch64 are solved
if (builtin.omit_frame_pointer and comptime builtin.target.os.tag.isDarwin() and builtin.cpu.arch == .aarch64) return;
if (!std.debug.have_ucontext or !std.debug.have_getcontext) return;
pub fn main() void {
if (std.posix.ucontext_t == void and builtin.omit_frame_pointer) {
// Stack unwinding is impossible.
return;
}
var expected: [4]usize = undefined;
var unwound: [4]usize = undefined;
frame0(&expected, &unwound);
try testing.expectEqual(expected, unwound);
var addr_buf: [4]usize = undefined;
const trace = frame0(&expected, &addr_buf);
// There may be *more* than 4 frames (due to the caller of `main`); that's okay.
if (trace.index < 4) {
fatal("expected at least 4 frames, got '{d}':\n{f}", .{ trace.index, &trace });
}
if (!std.mem.eql(usize, trace.instruction_addresses, &expected)) {
const expected_trace: std.builtin.StackTrace = .{
.index = 4,
.instruction_addresses = &expected,
};
fatal("expected trace:\n{f}\nactual trace:\n{f}", .{ &expected_trace, &trace });
}
}

View file

@ -1,26 +1,21 @@
/// Test StackIterator on 'freestanding' target. Based on unwind.zig.
//! Test StackIterator on 'freestanding' target. Based on unwind.zig.
const std = @import("std");
const builtin = @import("builtin");
const debug = std.debug;
noinline fn frame3(expected: *[4]usize, unwound: *[4]usize) void {
noinline fn frame3(expected: *[4]usize, addr_buf: *[4]usize) std.builtin.StackTrace {
expected[0] = @returnAddress();
var it = debug.StackIterator.init(@returnAddress(), @frameAddress());
defer it.deinit();
// Save StackIterator's frame addresses into `unwound`:
for (unwound) |*addr| {
if (it.next()) |return_address| addr.* = return_address;
}
return std.debug.captureCurrentStackTrace(.{
.first_address = @returnAddress(),
.allow_unsafe_unwind = true,
}, addr_buf);
}
noinline fn frame2(expected: *[4]usize, unwound: *[4]usize) void {
noinline fn frame2(expected: *[4]usize, addr_buf: *[4]usize) std.builtin.StackTrace {
expected[1] = @returnAddress();
frame3(expected, unwound);
return frame3(expected, addr_buf);
}
noinline fn frame1(expected: *[4]usize, unwound: *[4]usize) void {
noinline fn frame1(expected: *[4]usize, addr_buf: *[4]usize) std.builtin.StackTrace {
expected[2] = @returnAddress();
// Use a stack frame that is too big to encode in __unwind_info's stack-immediate encoding
@ -28,37 +23,44 @@ noinline fn frame1(expected: *[4]usize, unwound: *[4]usize) void {
var pad: [std.math.maxInt(u8) * @sizeOf(usize) + 1]u8 = undefined;
_ = std.mem.doNotOptimizeAway(&pad);
frame2(expected, unwound);
return frame2(expected, addr_buf);
}
noinline fn frame0(expected: *[4]usize, unwound: *[4]usize) void {
noinline fn frame0(expected: *[4]usize, addr_buf: *[4]usize) std.builtin.StackTrace {
expected[3] = @returnAddress();
frame1(expected, unwound);
return frame1(expected, addr_buf);
}
// No-OS entrypoint
/// No-OS entrypoint
export fn _start() callconv(.withStackAlign(.c, 1)) noreturn {
var expected: [4]usize = undefined;
var unwound: [4]usize = undefined;
frame0(&expected, &unwound);
var addr_buf: [4]usize = undefined;
const trace = frame0(&expected, &addr_buf);
// Verify result (no std.testing in freestanding)
var missed: c_int = 0;
for (expected, unwound) |expectFA, actualFA| {
if (expectFA != actualFA) {
missed += 1;
}
// Since we can't print from this freestanding test, we'll just use the exit
// code to communicate error conditions.
// Unlike `unwind.zig`, we can expect *exactly* 4 frames, as we are the
// actual entry point and the frame pointer will be 0 on entry.
if (trace.index != 4) exit(1);
if (trace.instruction_addresses[0] != expected[0]) exit(2);
if (trace.instruction_addresses[1] != expected[1]) exit(3);
if (trace.instruction_addresses[2] != expected[2]) exit(4);
if (trace.instruction_addresses[3] != expected[3]) exit(5);
exit(0);
}
// Need to compile with the target OS as "freestanding" or "other" to
// exercise the StackIterator code, but when run as a regression test
// need to actually exit. So assume we're running on x86_64-linux ...
fn exit(code: u8) noreturn {
// We are intentionally compiling with the target OS being "freestanding" to
// exercise std.debug, but we still need to exit the process somehow; so do
// the appropriate x86_64-linux syscall.
asm volatile (
\\movl $60, %%eax
\\syscall
:
: [missed] "{edi}" (missed),
: [code] "{edi}" (code),
: .{ .edi = true, .eax = true });
while (true) {} // unreached
unreachable;
}