diff --git a/lib/compiler/test_runner.zig b/lib/compiler/test_runner.zig index 0d6f451947..fbf37f7ec9 100644 --- a/lib/compiler/test_runner.zig +++ b/lib/compiler/test_runner.zig @@ -148,7 +148,7 @@ fn mainServer() !void { error.SkipZigTest => .skip, else => s: { if (@errorReturnTrace()) |trace| { - std.debug.dumpStackTrace(trace); + std.debug.dumpStackTrace(trace.*); } break :s .fail; }, @@ -269,7 +269,7 @@ fn mainTerminal() void { std.debug.print("FAIL ({t})\n", .{err}); } if (@errorReturnTrace()) |trace| { - std.debug.dumpStackTrace(trace); + std.debug.dumpStackTrace(trace.*); } test_node.end(); }, diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig index aa922ff37b..d1985739bd 100644 --- a/lib/std/Build/Step.zig +++ b/lib/std/Build/Step.zig @@ -332,7 +332,7 @@ pub fn cast(step: *Step, comptime T: type) ?*T { pub fn dump(step: *Step, w: *Io.Writer, tty_config: Io.tty.Config) void { if (step.debug_stack_trace.instruction_addresses.len > 0) { w.print("name: '{s}'. creation stack trace:\n", .{step.name}) catch {}; - std.debug.writeStackTrace(&step.debug_stack_trace, w, tty_config) catch {}; + std.debug.writeStackTrace(step.debug_stack_trace, w, tty_config) catch {}; } else { const field = "debug_stack_frames_count"; comptime assert(@hasField(Build, field)); diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index 330cc6725d..18e6e72563 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -31,7 +31,7 @@ const max_iovecs_len = 8; const splat_buffer_size = 64; comptime { - assert(max_iovecs_len <= posix.IOV_MAX); + if (@TypeOf(posix.IOV_MAX) != void) assert(max_iovecs_len <= posix.IOV_MAX); } const Closure = struct { @@ -91,9 +91,7 @@ pub fn init( /// Statically initialize such that any call to the following functions will /// fail with `error.OutOfMemory`: -/// * `Io.VTable.async` /// * `Io.VTable.concurrent` -/// * `Io.VTable.groupAsync` /// When initialized this way, `deinit` is safe, but unnecessary to call. pub const init_single_threaded: Threaded = .{ .allocator = .failing, diff --git a/lib/std/Io/net/HostName.zig b/lib/std/Io/net/HostName.zig index 376f798e3e..ea1ffc4834 100644 --- a/lib/std/Io/net/HostName.zig +++ b/lib/std/Io/net/HostName.zig @@ -221,7 +221,7 @@ pub fn connect( defer { connect_many.cancel(io); if (!saw_end) while (true) switch (connect_many_queue.getOneUncancelable(io)) { - .connection => |loser| if (loser) |s| s.closeConst(io) else |_| continue, + .connection => |loser| if (loser) |s| s.close(io) else |_| continue, .end => break, }; } diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig index b5f950a08a..891c4f220e 100644 --- a/lib/std/Thread.zig +++ b/lib/std/Thread.zig @@ -577,7 +577,7 @@ fn callFn(comptime f: anytype, args: anytype) switch (Impl) { @call(.auto, f, args) catch |err| { std.debug.print("error: {s}\n", .{@errorName(err)}); if (@errorReturnTrace()) |trace| { - std.debug.dumpStackTrace(trace); + std.debug.dumpStackTrace(trace.*); } }; diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index c0be44b939..8f974161f8 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -37,19 +37,6 @@ pub const subsystem: ?std.Target.SubSystem = blk: { pub const StackTrace = struct { index: usize, instruction_addresses: []usize, - - pub fn format(st: *const StackTrace, writer: *std.Io.Writer) std.Io.Writer.Error!void { - // TODO: re-evaluate whether to use format() methods at all. - // Until then, avoid an error when using GeneralPurposeAllocator with WebAssembly - // where it tries to call detectTTYConfig here. - if (builtin.os.tag == .freestanding) return; - - // TODO: why on earth are we using stderr's ttyconfig? - // If we want colored output, we should just make a formatter out of `writeStackTrace`. - const tty_config = std.Io.tty.detectConfig(.stderr()); - try writer.writeAll("\n"); - try std.debug.writeStackTrace(st, writer, tty_config); - } }; /// This data structure is used by the Zig language code generation and diff --git a/lib/std/debug.zig b/lib/std/debug.zig index 7e09bfec8a..3d88123c64 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -1,4 +1,7 @@ const std = @import("std.zig"); +const Io = std.Io; +const Writer = std.Io.Writer; +const tty = std.Io.tty; const math = std.math; const mem = std.mem; const posix = std.posix; @@ -7,12 +10,11 @@ const testing = std.testing; const Allocator = mem.Allocator; const File = std.fs.File; const windows = std.os.windows; -const Writer = std.Io.Writer; -const tty = std.Io.tty; const builtin = @import("builtin"); const native_arch = builtin.cpu.arch; const native_os = builtin.os.tag; +const StackTrace = std.builtin.StackTrace; const root = @import("root"); @@ -545,13 +547,13 @@ pub fn defaultPanic( stderr.print("panic: ", .{}) catch break :trace; } else { const current_thread_id = std.Thread.getCurrentId(); - stderr.print("thread {} panic: ", .{current_thread_id}) catch break :trace; + stderr.print("thread {d} panic: ", .{current_thread_id}) catch break :trace; } stderr.print("{s}\n", .{msg}) catch break :trace; if (@errorReturnTrace()) |t| if (t.index > 0) { stderr.writeAll("error return context:\n") catch break :trace; - writeStackTrace(t, stderr, tty_config) catch break :trace; + writeStackTrace(t.*, stderr, tty_config) catch break :trace; stderr.writeAll("\nstack trace:\n") catch break :trace; }; writeCurrentStackTrace(.{ @@ -607,8 +609,8 @@ pub const StackUnwindOptions = struct { /// the given buffer, so `addr_buf` must have a lifetime at least equal to the `StackTrace`. /// /// See `writeCurrentStackTrace` to immediately print the trace instead of capturing it. -pub noinline fn captureCurrentStackTrace(options: StackUnwindOptions, addr_buf: []usize) std.builtin.StackTrace { - const empty_trace: std.builtin.StackTrace = .{ .index = 0, .instruction_addresses = &.{} }; +pub noinline fn captureCurrentStackTrace(options: StackUnwindOptions, addr_buf: []usize) StackTrace { + const empty_trace: StackTrace = .{ .index = 0, .instruction_addresses = &.{} }; if (!std.options.allow_stack_tracing) return empty_trace; var it = StackIterator.init(options.context) catch return empty_trace; defer it.deinit(); @@ -646,6 +648,9 @@ pub noinline fn captureCurrentStackTrace(options: StackUnwindOptions, addr_buf: /// /// See `captureCurrentStackTrace` to capture the trace addresses into a buffer instead of printing. pub noinline fn writeCurrentStackTrace(options: StackUnwindOptions, writer: *Writer, tty_config: tty.Config) Writer.Error!void { + var threaded: Io.Threaded = .init_single_threaded; + const io = threaded.io(); + if (!std.options.allow_stack_tracing) { tty_config.setColor(writer, .dim) catch {}; try writer.print("Cannot print stack trace: stack tracing is disabled\n", .{}); @@ -730,7 +735,7 @@ pub noinline fn writeCurrentStackTrace(options: StackUnwindOptions, writer: *Wri } // `ret_addr` is the return address, which is *after* the function call. // Subtract 1 to get an address *in* the function call for a better source location. - try printSourceAtAddress(di_gpa, di, writer, ret_addr -| StackIterator.ra_call_offset, tty_config); + try printSourceAtAddress(di_gpa, io, di, writer, ret_addr -| StackIterator.ra_call_offset, tty_config); printed_any_frame = true; }, }; @@ -754,14 +759,29 @@ pub fn dumpCurrentStackTrace(options: StackUnwindOptions) void { }; } +pub const FormatStackTrace = struct { + stack_trace: StackTrace, + tty_config: tty.Config, + + pub fn format(context: @This(), writer: *Io.Writer) Io.Writer.Error!void { + try writer.writeAll("\n"); + try writeStackTrace(context.stack_trace, writer, context.tty_config); + } +}; + /// Write a previously captured stack trace to `writer`, annotated with source locations. -pub fn writeStackTrace(st: *const std.builtin.StackTrace, writer: *Writer, tty_config: tty.Config) Writer.Error!void { +pub fn writeStackTrace(st: StackTrace, writer: *Writer, tty_config: tty.Config) Writer.Error!void { if (!std.options.allow_stack_tracing) { tty_config.setColor(writer, .dim) catch {}; try writer.print("Cannot print stack trace: stack tracing is disabled\n", .{}); tty_config.setColor(writer, .reset) catch {}; return; } + // We use an independent Io implementation here in case there was a problem + // with the application's Io implementation itself. + var threaded: Io.Threaded = .init_single_threaded; + const io = threaded.io(); + // Fetch `st.index` straight away. Aside from avoiding redundant loads, this prevents issues if // `st` is `@errorReturnTrace()` and errors are encountered while writing the stack trace. const n_frames = st.index; @@ -779,7 +799,7 @@ pub fn writeStackTrace(st: *const std.builtin.StackTrace, writer: *Writer, tty_c for (st.instruction_addresses[0..captured_frames]) |ret_addr| { // `ret_addr` is the return address, which is *after* the function call. // Subtract 1 to get an address *in* the function call for a better source location. - try printSourceAtAddress(di_gpa, di, writer, ret_addr -| StackIterator.ra_call_offset, tty_config); + try printSourceAtAddress(di_gpa, io, di, writer, ret_addr -| StackIterator.ra_call_offset, tty_config); } if (n_frames > captured_frames) { tty_config.setColor(writer, .bold) catch {}; @@ -788,7 +808,7 @@ pub fn writeStackTrace(st: *const std.builtin.StackTrace, writer: *Writer, tty_c } } /// A thin wrapper around `writeStackTrace` which writes to stderr and ignores write errors. -pub fn dumpStackTrace(st: *const std.builtin.StackTrace) void { +pub fn dumpStackTrace(st: StackTrace) void { const tty_config = tty.detectConfig(.stderr()); const stderr = lockStderrWriter(&.{}); defer unlockStderrWriter(); @@ -1075,8 +1095,8 @@ pub inline fn stripInstructionPtrAuthCode(ptr: usize) usize { return ptr; } -fn printSourceAtAddress(gpa: Allocator, debug_info: *SelfInfo, writer: *Writer, address: usize, tty_config: tty.Config) Writer.Error!void { - const symbol: Symbol = debug_info.getSymbol(gpa, address) catch |err| switch (err) { +fn printSourceAtAddress(gpa: Allocator, io: Io, debug_info: *SelfInfo, writer: *Writer, address: usize, tty_config: tty.Config) Writer.Error!void { + const symbol: Symbol = debug_info.getSymbol(gpa, io, address) catch |err| switch (err) { error.MissingDebugInfo, error.UnsupportedDebugInfo, error.InvalidDebugInfo, @@ -1581,11 +1601,14 @@ test "manage resources correctly" { } }; const gpa = std.testing.allocator; - var discarding: std.Io.Writer.Discarding = .init(&.{}); + var threaded: Io.Threaded = .init_single_threaded; + const io = threaded.io(); + var discarding: Io.Writer.Discarding = .init(&.{}); var di: SelfInfo = .init; defer di.deinit(gpa); try printSourceAtAddress( gpa, + io, &di, &discarding.writer, S.showMyTrace(), @@ -1659,11 +1682,11 @@ pub fn ConfigurableTrace(comptime size: usize, comptime stack_frame_count: usize stderr.print("{s}:\n", .{t.notes[i]}) catch return; var frames_array_mutable = frames_array; const frames = mem.sliceTo(frames_array_mutable[0..], 0); - const stack_trace: std.builtin.StackTrace = .{ + const stack_trace: StackTrace = .{ .index = frames.len, .instruction_addresses = frames, }; - writeStackTrace(&stack_trace, stderr, tty_config) catch return; + writeStackTrace(stack_trace, stderr, tty_config) catch return; } if (t.index > end) { stderr.print("{d} more traces not shown; consider increasing trace size\n", .{ diff --git a/lib/std/debug/SelfInfo/MachO.zig b/lib/std/debug/SelfInfo/MachO.zig index 8a0d9f0e1d..f7eb4465c5 100644 --- a/lib/std/debug/SelfInfo/MachO.zig +++ b/lib/std/debug/SelfInfo/MachO.zig @@ -30,7 +30,8 @@ pub fn deinit(si: *SelfInfo, gpa: Allocator) void { si.ofiles.deinit(gpa); } -pub fn getSymbol(si: *SelfInfo, gpa: Allocator, address: usize) Error!std.debug.Symbol { +pub fn getSymbol(si: *SelfInfo, gpa: Allocator, io: Io, address: usize) Error!std.debug.Symbol { + _ = io; const module = try si.findModule(gpa, address); defer si.mutex.unlock(); @@ -970,6 +971,7 @@ fn loadOFile(gpa: Allocator, o_file_path: []const u8) !OFile { } const std = @import("std"); +const Io = std.Io; const Allocator = std.mem.Allocator; const Dwarf = std.debug.Dwarf; const Error = std.debug.SelfInfoError; diff --git a/lib/std/debug/SelfInfo/Windows.zig b/lib/std/debug/SelfInfo/Windows.zig index 324b597d97..a0b26f8ec5 100644 --- a/lib/std/debug/SelfInfo/Windows.zig +++ b/lib/std/debug/SelfInfo/Windows.zig @@ -474,7 +474,7 @@ const Module = struct { break :pdb pdb; }; errdefer if (opt_pdb) |*pdb| { - pdb.file_reader.file.close(); + pdb.file_reader.file.close(io); pdb.deinit(); }; @@ -484,6 +484,7 @@ const Module = struct { return .{ .arena = arena_instance.state, + .io = io, .coff_image_base = coff_image_base, .mapped_file = mapped_file, .dwarf = opt_dwarf, diff --git a/lib/std/fs/Dir.zig b/lib/std/fs/Dir.zig index c4cbaf7196..aa1180a90d 100644 --- a/lib/std/fs/Dir.zig +++ b/lib/std/fs/Dir.zig @@ -1062,7 +1062,7 @@ pub fn makeOpenPath(self: Dir, sub_path: []const u8, open_dir_options: OpenOptio w.SYNCHRONIZE | w.FILE_TRAVERSE | (if (open_dir_options.iterate) w.FILE_LIST_DIRECTORY else @as(u32, 0)); - return self.makeOpenPathAccessMaskW(sub_path, base_flags, open_dir_options.no_follow); + return self.makeOpenPathAccessMaskW(sub_path, base_flags, !open_dir_options.follow_symlinks); }, else => { return self.openDir(sub_path, open_dir_options) catch |err| switch (err) { @@ -1575,8 +1575,7 @@ pub fn symLink( // when converting to an NT namespaced path. CreateSymbolicLink in // symLinkW will handle the necessary conversion. var target_path_w: windows.PathSpace = undefined; - try windows.checkWtf8ToWtf16LeOverflow(target_path, &target_path_w.data); - target_path_w.len = try std.unicode.wtf8ToWtf16Le(&target_path_w.data, target_path); + target_path_w.len = try windows.wtf8ToWtf16Le(&target_path_w.data, target_path); target_path_w.data[target_path_w.len] = 0; // However, we need to canonicalize any path separators to `\`, since if // the target path is relative, then it must use `\` as the path separator. diff --git a/lib/std/fs/File.zig b/lib/std/fs/File.zig index ca3fb47a5a..ba0f29fd87 100644 --- a/lib/std/fs/File.zig +++ b/lib/std/fs/File.zig @@ -564,8 +564,8 @@ pub fn updateTimes( mtime: Io.Timestamp, ) UpdateTimesError!void { if (builtin.os.tag == .windows) { - const atime_ft = windows.nanoSecondsToFileTime(atime.nanoseconds); - const mtime_ft = windows.nanoSecondsToFileTime(mtime.nanoseconds); + const atime_ft = windows.nanoSecondsToFileTime(atime); + const mtime_ft = windows.nanoSecondsToFileTime(mtime); return windows.SetFileTime(self.handle, null, &atime_ft, &mtime_ft); } const times = [2]posix.timespec{ diff --git a/lib/std/heap/debug_allocator.zig b/lib/std/heap/debug_allocator.zig index 8e66f722c3..4480009781 100644 --- a/lib/std/heap/debug_allocator.zig +++ b/lib/std/heap/debug_allocator.zig @@ -80,15 +80,15 @@ //! //! Resizing and remapping are forwarded directly to the backing allocator, //! except where such operations would change the category from large to small. +const builtin = @import("builtin"); +const StackTrace = std.builtin.StackTrace; const std = @import("std"); -const builtin = @import("builtin"); const log = std.log.scoped(.gpa); const math = std.math; const assert = std.debug.assert; const mem = std.mem; const Allocator = std.mem.Allocator; -const StackTrace = std.builtin.StackTrace; const default_page_size: usize = switch (builtin.os.tag) { // Makes `std.heap.PageAllocator` take the happy path. @@ -421,7 +421,12 @@ pub fn DebugAllocator(comptime config: Config) type { return usedBitsCount(slot_count) * @sizeOf(usize); } - fn detectLeaksInBucket(bucket: *BucketHeader, size_class_index: usize, used_bits_count: usize) usize { + fn detectLeaksInBucket( + bucket: *BucketHeader, + size_class_index: usize, + used_bits_count: usize, + tty_config: std.Io.tty.Config, + ) usize { const size_class = @as(usize, 1) << @as(Log2USize, @intCast(size_class_index)); const slot_count = slot_counts[size_class_index]; var leaks: usize = 0; @@ -436,7 +441,13 @@ pub fn DebugAllocator(comptime config: Config) type { const stack_trace = bucketStackTrace(bucket, slot_count, slot_index, .alloc); const page_addr = @intFromPtr(bucket) & ~(page_size - 1); const addr = page_addr + slot_index * size_class; - log.err("memory address 0x{x} leaked: {f}", .{ addr, stack_trace }); + log.err("memory address 0x{x} leaked: {f}", .{ + addr, + std.debug.FormatStackTrace{ + .stack_trace = stack_trace, + .tty_config = tty_config, + }, + }); leaks += 1; } } @@ -449,12 +460,14 @@ pub fn DebugAllocator(comptime config: Config) type { pub fn detectLeaks(self: *Self) usize { var leaks: usize = 0; + const tty_config = std.Io.tty.detectConfig(.stderr()); + for (self.buckets, 0..) |init_optional_bucket, size_class_index| { var optional_bucket = init_optional_bucket; const slot_count = slot_counts[size_class_index]; const used_bits_count = usedBitsCount(slot_count); while (optional_bucket) |bucket| { - leaks += detectLeaksInBucket(bucket, size_class_index, used_bits_count); + leaks += detectLeaksInBucket(bucket, size_class_index, used_bits_count, tty_config); optional_bucket = bucket.prev; } } @@ -464,7 +477,11 @@ pub fn DebugAllocator(comptime config: Config) type { if (config.retain_metadata and large_alloc.freed) continue; const stack_trace = large_alloc.getStackTrace(.alloc); log.err("memory address 0x{x} leaked: {f}", .{ - @intFromPtr(large_alloc.bytes.ptr), stack_trace, + @intFromPtr(large_alloc.bytes.ptr), + std.debug.FormatStackTrace{ + .stack_trace = stack_trace, + .tty_config = tty_config, + }, }); leaks += 1; } @@ -519,8 +536,20 @@ pub fn DebugAllocator(comptime config: Config) type { fn reportDoubleFree(ret_addr: usize, alloc_stack_trace: StackTrace, free_stack_trace: StackTrace) void { var addr_buf: [stack_n]usize = undefined; const second_free_stack_trace = std.debug.captureCurrentStackTrace(.{ .first_address = ret_addr }, &addr_buf); + const tty_config = std.Io.tty.detectConfig(.stderr()); log.err("Double free detected. Allocation: {f} First free: {f} Second free: {f}", .{ - alloc_stack_trace, free_stack_trace, second_free_stack_trace, + std.debug.FormatStackTrace{ + .stack_trace = alloc_stack_trace, + .tty_config = tty_config, + }, + std.debug.FormatStackTrace{ + .stack_trace = free_stack_trace, + .tty_config = tty_config, + }, + std.debug.FormatStackTrace{ + .stack_trace = second_free_stack_trace, + .tty_config = tty_config, + }, }); } @@ -561,11 +590,18 @@ pub fn DebugAllocator(comptime config: Config) type { if (config.safety and old_mem.len != entry.value_ptr.bytes.len) { var addr_buf: [stack_n]usize = undefined; const free_stack_trace = std.debug.captureCurrentStackTrace(.{ .first_address = ret_addr }, &addr_buf); + const tty_config = std.Io.tty.detectConfig(.stderr()); log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {f} Free: {f}", .{ entry.value_ptr.bytes.len, old_mem.len, - entry.value_ptr.getStackTrace(.alloc), - free_stack_trace, + std.debug.FormatStackTrace{ + .stack_trace = entry.value_ptr.getStackTrace(.alloc), + .tty_config = tty_config, + }, + std.debug.FormatStackTrace{ + .stack_trace = free_stack_trace, + .tty_config = tty_config, + }, }); } @@ -667,11 +703,18 @@ pub fn DebugAllocator(comptime config: Config) type { if (config.safety and old_mem.len != entry.value_ptr.bytes.len) { var addr_buf: [stack_n]usize = undefined; const free_stack_trace = std.debug.captureCurrentStackTrace(.{ .first_address = ret_addr }, &addr_buf); + const tty_config = std.Io.tty.detectConfig(.stderr()); log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {f} Free: {f}", .{ entry.value_ptr.bytes.len, old_mem.len, - entry.value_ptr.getStackTrace(.alloc), - free_stack_trace, + std.debug.FormatStackTrace{ + .stack_trace = entry.value_ptr.getStackTrace(.alloc), + .tty_config = tty_config, + }, + std.debug.FormatStackTrace{ + .stack_trace = free_stack_trace, + .tty_config = tty_config, + }, }); } @@ -892,19 +935,33 @@ pub fn DebugAllocator(comptime config: Config) type { var addr_buf: [stack_n]usize = undefined; const free_stack_trace = std.debug.captureCurrentStackTrace(.{ .first_address = return_address }, &addr_buf); if (old_memory.len != requested_size) { + const tty_config = std.Io.tty.detectConfig(.stderr()); log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {f} Free: {f}", .{ requested_size, old_memory.len, - bucketStackTrace(bucket, slot_count, slot_index, .alloc), - free_stack_trace, + std.debug.FormatStackTrace{ + .stack_trace = bucketStackTrace(bucket, slot_count, slot_index, .alloc), + .tty_config = tty_config, + }, + std.debug.FormatStackTrace{ + .stack_trace = free_stack_trace, + .tty_config = tty_config, + }, }); } if (alignment != slot_alignment) { + const tty_config = std.Io.tty.detectConfig(.stderr()); log.err("Allocation alignment {d} does not match free alignment {d}. Allocation: {f} Free: {f}", .{ slot_alignment.toByteUnits(), alignment.toByteUnits(), - bucketStackTrace(bucket, slot_count, slot_index, .alloc), - free_stack_trace, + std.debug.FormatStackTrace{ + .stack_trace = bucketStackTrace(bucket, slot_count, slot_index, .alloc), + .tty_config = tty_config, + }, + std.debug.FormatStackTrace{ + .stack_trace = free_stack_trace, + .tty_config = tty_config, + }, }); } } @@ -987,19 +1044,33 @@ pub fn DebugAllocator(comptime config: Config) type { var addr_buf: [stack_n]usize = undefined; const free_stack_trace = std.debug.captureCurrentStackTrace(.{ .first_address = return_address }, &addr_buf); if (memory.len != requested_size) { + const tty_config = std.Io.tty.detectConfig(.stderr()); log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {f} Free: {f}", .{ requested_size, memory.len, - bucketStackTrace(bucket, slot_count, slot_index, .alloc), - free_stack_trace, + std.debug.FormatStackTrace{ + .stack_trace = bucketStackTrace(bucket, slot_count, slot_index, .alloc), + .tty_config = tty_config, + }, + std.debug.FormatStackTrace{ + .stack_trace = free_stack_trace, + .tty_config = tty_config, + }, }); } if (alignment != slot_alignment) { + const tty_config = std.Io.tty.detectConfig(.stderr()); log.err("Allocation alignment {d} does not match free alignment {d}. Allocation: {f} Free: {f}", .{ slot_alignment.toByteUnits(), alignment.toByteUnits(), - bucketStackTrace(bucket, slot_count, slot_index, .alloc), - free_stack_trace, + std.debug.FormatStackTrace{ + .stack_trace = bucketStackTrace(bucket, slot_count, slot_index, .alloc), + .tty_config = tty_config, + }, + std.debug.FormatStackTrace{ + .stack_trace = free_stack_trace, + .tty_config = tty_config, + }, }); } } diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig index 1d49a890e4..fd6e2926c9 100644 --- a/lib/std/os/windows.zig +++ b/lib/std/os/windows.zig @@ -5,12 +5,14 @@ //! slices as well as APIs which accept null-terminated WTF16LE byte buffers. const builtin = @import("builtin"); +const native_arch = builtin.cpu.arch; + const std = @import("../std.zig"); +const Io = std.Io; const mem = std.mem; const assert = std.debug.assert; const math = std.math; const maxInt = std.math.maxInt; -const native_arch = builtin.cpu.arch; const UnexpectedError = std.posix.UnexpectedError; test { @@ -2219,25 +2221,25 @@ pub fn peb() *PEB { /// Universal Time (UTC). /// This function returns the number of nanoseconds since the canonical epoch, /// which is the POSIX one (Jan 01, 1970 AD). -pub fn fromSysTime(hns: i64) i128 { +pub fn fromSysTime(hns: i64) Io.Timestamp { const adjusted_epoch: i128 = hns + std.time.epoch.windows * (std.time.ns_per_s / 100); - return adjusted_epoch * 100; + return .fromNanoseconds(@intCast(adjusted_epoch * 100)); } -pub fn toSysTime(ns: i128) i64 { - const hns = @divFloor(ns, 100); +pub fn toSysTime(ns: Io.Timestamp) i64 { + const hns = @divFloor(ns.nanoseconds, 100); return @as(i64, @intCast(hns)) - std.time.epoch.windows * (std.time.ns_per_s / 100); } -pub fn fileTimeToNanoSeconds(ft: FILETIME) i128 { +pub fn fileTimeToNanoSeconds(ft: FILETIME) Io.Timestamp { const hns = (@as(i64, ft.dwHighDateTime) << 32) | ft.dwLowDateTime; return fromSysTime(hns); } /// Converts a number of nanoseconds since the POSIX epoch to a Windows FILETIME. -pub fn nanoSecondsToFileTime(ns: i128) FILETIME { +pub fn nanoSecondsToFileTime(ns: Io.Timestamp) FILETIME { const adjusted: u64 = @bitCast(toSysTime(ns)); - return FILETIME{ + return .{ .dwHighDateTime = @as(u32, @truncate(adjusted >> 32)), .dwLowDateTime = @as(u32, @truncate(adjusted)), }; @@ -5740,11 +5742,15 @@ pub fn ProcessBaseAddress(handle: HANDLE) ProcessBaseAddressError!HMODULE { return ppeb.ImageBaseAddress; } -pub fn checkWtf8ToWtf16LeOverflow(wtf8: []const u8, wtf16le: []const u16) error{ BadPathName, NameTooLong }!void { +pub fn wtf8ToWtf16Le(wtf16le: []u16, wtf8: []const u8) error{ BadPathName, NameTooLong }!usize { // Each u8 in UTF-8/WTF-8 correlates to at most one u16 in UTF-16LE/WTF-16LE. - if (wtf16le.len >= wtf8.len) return; - const utf16_len = std.unicode.calcUtf16LeLenImpl(wtf8, .can_encode_surrogate_half) catch - return error.BadPathName; - if (utf16_len > wtf16le.len) - return error.NameTooLong; + if (wtf16le.len < wtf8.len) { + const utf16_len = std.unicode.calcUtf16LeLenImpl(wtf8, .can_encode_surrogate_half) catch + return error.BadPathName; + if (utf16_len > wtf16le.len) + return error.NameTooLong; + } + return std.unicode.wtf8ToWtf16Le(wtf16le, wtf8) catch |err| switch (err) { + error.InvalidWtf8 => return error.BadPathName, + }; } diff --git a/lib/std/posix.zig b/lib/std/posix.zig index a58204e1dc..02dc6c6087 100644 --- a/lib/std/posix.zig +++ b/lib/std/posix.zig @@ -821,6 +821,9 @@ pub const ReadError = std.Io.File.ReadStreamingError; /// The corresponding POSIX limit is `maxInt(isize)`. pub fn read(fd: fd_t, buf: []u8) ReadError!usize { if (buf.len == 0) return 0; + if (native_os == .windows) { + return windows.ReadFile(fd, buf, null); + } if (native_os == .wasi and !builtin.link_libc) { const iovs = [1]iovec{iovec{ .base = buf.ptr, @@ -2918,8 +2921,7 @@ pub fn chdir(dir_path: []const u8) ChangeCurDirError!void { @compileError("WASI does not support os.chdir"); } else if (native_os == .windows) { var wtf16_dir_path: [windows.PATH_MAX_WIDE]u16 = undefined; - try windows.checkWtf8ToWtf16LeOverflow(dir_path, &wtf16_dir_path); - const len = try std.unicode.wtf8ToWtf16Le(&wtf16_dir_path, dir_path); + const len = try windows.wtf8ToWtf16Le(&wtf16_dir_path, dir_path); return chdirW(wtf16_dir_path[0..len]); } else { const dir_path_c = try toPosixPath(dir_path); @@ -2935,8 +2937,7 @@ pub fn chdirZ(dir_path: [*:0]const u8) ChangeCurDirError!void { if (native_os == .windows) { const dir_path_span = mem.span(dir_path); var wtf16_dir_path: [windows.PATH_MAX_WIDE]u16 = undefined; - try windows.checkWtf8ToWtf16LeOverflow(dir_path_span, &wtf16_dir_path); - const len = try std.unicode.wtf8ToWtf16Le(&wtf16_dir_path, dir_path_span); + const len = try windows.wtf8ToWtf16Le(&wtf16_dir_path, dir_path_span); return chdirW(wtf16_dir_path[0..len]); } else if (native_os == .wasi and !builtin.link_libc) { return chdir(mem.span(dir_path)); diff --git a/lib/std/posix/test.zig b/lib/std/posix/test.zig index e85f1d7471..3f4b11c1af 100644 --- a/lib/std/posix/test.zig +++ b/lib/std/posix/test.zig @@ -862,20 +862,6 @@ test "isatty" { try expectEqual(posix.isatty(file.handle), false); } -test "read with empty buffer" { - var tmp = tmpDir(.{}); - defer tmp.cleanup(); - - var file = try tmp.dir.createFile("read_empty", .{ .read = true }); - defer file.close(); - - const bytes = try a.alloc(u8, 0); - defer a.free(bytes); - - const rc = try posix.read(file.handle, bytes); - try expectEqual(rc, 0); -} - test "pread with empty buffer" { var tmp = tmpDir(.{}); defer tmp.cleanup(); diff --git a/lib/std/testing.zig b/lib/std/testing.zig index 02ad99932e..7cef6f9c58 100644 --- a/lib/std/testing.zig +++ b/lib/std/testing.zig @@ -1148,6 +1148,7 @@ pub fn checkAllAllocationFailures(backing_allocator: std.mem.Allocator, comptime } else |err| switch (err) { error.OutOfMemory => { if (failing_allocator_inst.allocated_bytes != failing_allocator_inst.freed_bytes) { + const tty_config = std.Io.tty.detectConfig(.stderr()); print( "\nfail_index: {d}/{d}\nallocated bytes: {d}\nfreed bytes: {d}\nallocations: {d}\ndeallocations: {d}\nallocation that was made to fail: {f}", .{ @@ -1157,7 +1158,10 @@ pub fn checkAllAllocationFailures(backing_allocator: std.mem.Allocator, comptime failing_allocator_inst.freed_bytes, failing_allocator_inst.allocations, failing_allocator_inst.deallocations, - failing_allocator_inst.getStackTrace(), + std.debug.FormatStackTrace{ + .stack_trace = failing_allocator_inst.getStackTrace(), + .tty_config = tty_config, + }, }, ); return error.MemoryLeakDetected; diff --git a/tools/incr-check.zig b/tools/incr-check.zig index 183d59bf88..4c5a2d9978 100644 --- a/tools/incr-check.zig +++ b/tools/incr-check.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const Io = std.Io; const Allocator = std.mem.Allocator; const Cache = std.Build.Cache; @@ -11,6 +12,12 @@ pub fn main() !void { defer arena_instance.deinit(); const arena = arena_instance.allocator(); + const gpa = arena; + + var threaded: Io.Threaded = .init(gpa); + defer threaded.deinit(); + const io = threaded.io(); + var opt_zig_exe: ?[]const u8 = null; var opt_input_file_name: ?[]const u8 = null; var opt_lib_dir: ?[]const u8 = null; @@ -53,7 +60,7 @@ pub fn main() !void { const input_file_name = opt_input_file_name orelse fatal("missing input file\n{s}", .{usage}); const input_file_bytes = try std.fs.cwd().readFileAlloc(input_file_name, arena, .limited(std.math.maxInt(u32))); - const case = try Case.parse(arena, input_file_bytes); + const case = try Case.parse(arena, io, input_file_bytes); // Check now: if there are any targets using the `cbe` backend, we need the lib dir. if (opt_lib_dir == null) { @@ -86,7 +93,7 @@ pub fn main() !void { else null; - const host = try std.zig.system.resolveTargetQuery(.{}); + const host = try std.zig.system.resolveTargetQuery(io, .{}); const debug_log_verbose = debug_zcu or debug_dwarf or debug_link; @@ -186,7 +193,7 @@ pub fn main() !void { try child.spawn(); - var poller = std.Io.poll(arena, Eval.StreamEnum, .{ + var poller = Io.poll(arena, Eval.StreamEnum, .{ .stdout = child.stdout.?, .stderr = child.stderr.?, }); @@ -226,7 +233,7 @@ const Eval = struct { cc_child_args: *std.ArrayListUnmanaged([]const u8), const StreamEnum = enum { stdout, stderr }; - const Poller = std.Io.Poller(StreamEnum); + const Poller = Io.Poller(StreamEnum); /// Currently this function assumes the previous updates have already been written. fn write(eval: *Eval, update: Case.Update) void { @@ -647,7 +654,7 @@ const Case = struct { msg: []const u8, }; - fn parse(arena: Allocator, bytes: []const u8) !Case { + fn parse(arena: Allocator, io: Io, bytes: []const u8) !Case { const fatal = std.process.fatal; var targets: std.ArrayListUnmanaged(Target) = .empty; @@ -683,7 +690,7 @@ const Case = struct { }, }) catch fatal("line {d}: invalid target query '{s}'", .{ line_n, query }); - const resolved = try std.zig.system.resolveTargetQuery(parsed_query); + const resolved = try std.zig.system.resolveTargetQuery(io, parsed_query); try targets.append(arena, .{ .query = query,