replace usages of old std.debug APIs

src/crash_handler.zig is still TODO though, i am planning bigger changes there
This commit is contained in:
mlugg 2025-09-05 20:40:11 +01:00
parent 5709369d05
commit c2ada49354
No known key found for this signature in database
GPG key ID: 3F5B7DCCBF4AF02E
8 changed files with 29 additions and 77 deletions

View file

@ -2195,7 +2195,7 @@ fn dependencyInner(
sub_builder.runBuild(bz) catch @panic("unhandled error");
if (sub_builder.validateUserInputDidItFail()) {
std.debug.dumpCurrentStackTrace(@returnAddress());
std.debug.dumpCurrentStackTrace(.{ .first_address = @returnAddress() });
}
}

View file

@ -60,7 +60,7 @@ test_results: TestResults,
/// The return address associated with creation of this step that can be useful
/// to print along with debugging messages.
debug_stack_trace: []usize,
debug_stack_trace: std.builtin.StackTrace,
pub const TestResults = struct {
fail_count: u32 = 0,
@ -220,16 +220,9 @@ pub fn init(options: StepOptions) Step {
.state = .precheck_unstarted,
.max_rss = options.max_rss,
.debug_stack_trace = blk: {
if (!std.debug.sys_can_stack_trace) break :blk &.{};
const addresses = arena.alloc(usize, options.owner.debug_stack_frames_count) catch @panic("OOM");
@memset(addresses, 0);
const addr_buf = arena.alloc(usize, options.owner.debug_stack_frames_count) catch @panic("OOM");
const first_ret_addr = options.first_ret_addr orelse @returnAddress();
var stack_trace = std.builtin.StackTrace{
.instruction_addresses = addresses,
.index = 0,
};
std.debug.captureStackTrace(first_ret_addr, &stack_trace);
break :blk addresses;
break :blk std.debug.captureCurrentStackTrace(.{ .first_address = first_ret_addr }, addr_buf);
},
.result_error_msgs = .{},
.result_error_bundle = std.zig.ErrorBundle.empty,
@ -315,18 +308,9 @@ pub fn cast(step: *Step, comptime T: type) ?*T {
/// For debugging purposes, prints identifying information about this Step.
pub fn dump(step: *Step, w: *std.Io.Writer, tty_config: std.Io.tty.Config) void {
const debug_info = std.debug.getSelfDebugInfo() catch |err| {
w.print("Unable to dump stack trace: Unable to open debug info: {s}\n", .{
@errorName(err),
}) catch {};
return;
};
if (step.getStackTrace()) |stack_trace| {
w.print("name: '{s}'. creation stack trace:\n", .{step.name}) catch {};
std.debug.writeStackTrace(stack_trace, w, debug_info, tty_config) catch |err| {
w.print("Unable to dump stack trace: {s}\n", .{@errorName(err)}) catch {};
return;
};
std.debug.writeStackTrace(stack_trace, w, tty_config) catch {};
} else {
const field = "debug_stack_frames_count";
comptime assert(@hasField(Build, field));

View file

@ -38,20 +38,17 @@ pub const StackTrace = struct {
index: usize,
instruction_addresses: []usize,
pub fn format(self: StackTrace, writer: *std.Io.Writer) std.Io.Writer.Error!void {
pub fn format(st: *const StackTrace, writer: *std.Io.Writer) std.Io.Writer.Error!void {
// TODO: re-evaluate whether to use format() methods at all.
// Until then, avoid an error when using GeneralPurposeAllocator with WebAssembly
// where it tries to call detectTTYConfig here.
if (builtin.os.tag == .freestanding) return;
const debug_info = std.debug.getSelfDebugInfo() catch |err| {
return writer.print("\nUnable to print stack trace: Unable to open debug info: {s}\n", .{@errorName(err)});
};
const tty_config = std.Io.tty.detectConfig(std.fs.File.stderr());
// TODO: why on earth are we using stderr's ttyconfig?
// If we want colored output, we should just make a formatter out of `writeStackTrace`.
const tty_config = std.Io.tty.detectConfig(.stderr());
try writer.writeAll("\n");
std.debug.writeStackTrace(self, writer, debug_info, tty_config) catch |err| {
try writer.print("Unable to print stack trace: {s}\n", .{@errorName(err)});
};
try std.debug.writeStackTrace(st, writer, tty_config);
}
};

View file

@ -505,23 +505,14 @@ pub fn DebugAllocator(comptime config: Config) type {
return if (leaks) .leak else .ok;
}
fn collectStackTrace(first_trace_addr: usize, addresses: *[stack_n]usize) void {
if (stack_n == 0) return;
@memset(addresses, 0);
var stack_trace: StackTrace = .{
.instruction_addresses = addresses,
.index = 0,
};
std.debug.captureStackTrace(first_trace_addr, &stack_trace);
fn collectStackTrace(first_trace_addr: usize, addr_buf: *[stack_n]usize) void {
const st = std.debug.captureCurrentStackTrace(.{ .first_address = first_trace_addr }, addr_buf);
@memset(addr_buf[@min(st.index, addr_buf.len)..], 0);
}
fn reportDoubleFree(ret_addr: usize, alloc_stack_trace: StackTrace, free_stack_trace: StackTrace) void {
var addresses: [stack_n]usize = @splat(0);
var second_free_stack_trace: StackTrace = .{
.instruction_addresses = &addresses,
.index = 0,
};
std.debug.captureStackTrace(ret_addr, &second_free_stack_trace);
var addr_buf: [stack_n]usize = undefined;
const second_free_stack_trace = std.debug.captureCurrentStackTrace(.{ .first_address = ret_addr }, &addr_buf);
log.err("Double free detected. Allocation: {f} First free: {f} Second free: {f}", .{
alloc_stack_trace, free_stack_trace, second_free_stack_trace,
});
@ -562,12 +553,8 @@ pub fn DebugAllocator(comptime config: Config) type {
}
if (config.safety and old_mem.len != entry.value_ptr.bytes.len) {
var addresses: [stack_n]usize = [1]usize{0} ** stack_n;
var free_stack_trace: StackTrace = .{
.instruction_addresses = &addresses,
.index = 0,
};
std.debug.captureStackTrace(ret_addr, &free_stack_trace);
var addr_buf: [stack_n]usize = undefined;
const free_stack_trace = std.debug.captureCurrentStackTrace(.{ .first_address = ret_addr }, &addr_buf);
log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {f} Free: {f}", .{
entry.value_ptr.bytes.len,
old_mem.len,
@ -672,12 +659,8 @@ pub fn DebugAllocator(comptime config: Config) type {
}
if (config.safety and old_mem.len != entry.value_ptr.bytes.len) {
var addresses: [stack_n]usize = [1]usize{0} ** stack_n;
var free_stack_trace = StackTrace{
.instruction_addresses = &addresses,
.index = 0,
};
std.debug.captureStackTrace(ret_addr, &free_stack_trace);
var addr_buf: [stack_n]usize = undefined;
const free_stack_trace = std.debug.captureCurrentStackTrace(.{ .first_address = ret_addr }, &addr_buf);
log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {f} Free: {f}", .{
entry.value_ptr.bytes.len,
old_mem.len,
@ -900,12 +883,8 @@ pub fn DebugAllocator(comptime config: Config) type {
if (requested_size == 0) @panic("Invalid free");
const slot_alignment = bucket.log2PtrAligns(slot_count)[slot_index];
if (old_memory.len != requested_size or alignment != slot_alignment) {
var addresses: [stack_n]usize = [1]usize{0} ** stack_n;
var free_stack_trace: StackTrace = .{
.instruction_addresses = &addresses,
.index = 0,
};
std.debug.captureStackTrace(return_address, &free_stack_trace);
var addr_buf: [stack_n]usize = undefined;
const free_stack_trace = std.debug.captureCurrentStackTrace(.{ .first_address = return_address }, &addr_buf);
if (old_memory.len != requested_size) {
log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {f} Free: {f}", .{
requested_size,
@ -999,12 +978,8 @@ pub fn DebugAllocator(comptime config: Config) type {
if (requested_size == 0) @panic("Invalid free");
const slot_alignment = bucket.log2PtrAligns(slot_count)[slot_index];
if (memory.len != requested_size or alignment != slot_alignment) {
var addresses: [stack_n]usize = [1]usize{0} ** stack_n;
var free_stack_trace: StackTrace = .{
.instruction_addresses = &addresses,
.index = 0,
};
std.debug.captureStackTrace(return_address, &free_stack_trace);
var addr_buf: [stack_n]usize = undefined;
const free_stack_trace = std.debug.captureCurrentStackTrace(.{ .first_address = return_address }, &addr_buf);
if (memory.len != requested_size) {
log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {f} Free: {f}", .{
requested_size,

View file

@ -2849,7 +2849,7 @@ pub fn unexpectedError(err: Win32Error) UnexpectedError {
std.debug.print("error.Unexpected: GetLastError({d}): {f}\n", .{
err, std.unicode.fmtUtf16Le(buf_wstr[0..len]),
});
std.debug.dumpCurrentStackTrace(@returnAddress());
std.debug.dumpCurrentStackTrace(.{ .first_address = @returnAddress() });
}
return error.Unexpected;
}
@ -2863,7 +2863,7 @@ pub fn unexpectedWSAError(err: ws2_32.WinsockError) UnexpectedError {
pub fn unexpectedStatus(status: NTSTATUS) UnexpectedError {
if (std.posix.unexpected_error_tracing) {
std.debug.print("error.Unexpected NTSTATUS=0x{x}\n", .{@intFromEnum(status)});
std.debug.dumpCurrentStackTrace(@returnAddress());
std.debug.dumpCurrentStackTrace(.{ .first_address = @returnAddress() });
}
return error.Unexpected;
}

View file

@ -7591,7 +7591,7 @@ pub const UnexpectedError = error{
pub fn unexpectedErrno(err: E) UnexpectedError {
if (unexpected_error_tracing) {
std.debug.print("unexpected errno: {d}\n", .{@intFromEnum(err)});
std.debug.dumpCurrentStackTrace(null);
std.debug.dumpCurrentStackTrace(.{});
}
return error.Unexpected;
}

View file

@ -64,12 +64,8 @@ fn alloc(
const self: *FailingAllocator = @ptrCast(@alignCast(ctx));
if (self.alloc_index == self.fail_index) {
if (!self.has_induced_failure) {
@memset(&self.stack_addresses, 0);
var stack_trace = std.builtin.StackTrace{
.instruction_addresses = &self.stack_addresses,
.index = 0,
};
std.debug.captureStackTrace(return_address, &stack_trace);
const st = std.debug.captureCurrentStackTrace(return_address, &self.stack_addresses);
@memset(self.stack_addresses[@min(st.index, self.stack_addresses.len)..], 0);
self.has_induced_failure = true;
}
return null;

View file

@ -5070,7 +5070,7 @@ pub fn getKernError(err: std.c.kern_return_t) KernE {
pub fn unexpectedKernError(err: KernE) std.posix.UnexpectedError {
if (std.posix.unexpected_error_tracing) {
std.debug.print("unexpected error: {d}\n", .{@intFromEnum(err)});
std.debug.dumpCurrentStackTrace(null);
std.debug.dumpCurrentStackTrace(.{});
}
return error.Unexpected;
}