mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-09 23:29:03 +00:00
update all rename() to rename(io)
This commit is contained in:
parent
eef8347f66
commit
01674bcf27
23 changed files with 88 additions and 84 deletions
|
|
@ -478,14 +478,14 @@ pub fn main() !void {
|
|||
validateSystemLibraryOptions(builder);
|
||||
|
||||
if (help_menu) {
|
||||
var w = initStdoutWriter();
|
||||
var w = initStdoutWriter(io);
|
||||
printUsage(builder, w) catch return stdout_writer_allocation.err.?;
|
||||
w.flush() catch return stdout_writer_allocation.err.?;
|
||||
return;
|
||||
}
|
||||
|
||||
if (steps_menu) {
|
||||
var w = initStdoutWriter();
|
||||
var w = initStdoutWriter(io);
|
||||
printSteps(builder, w) catch return stdout_writer_allocation.err.?;
|
||||
w.flush() catch return stdout_writer_allocation.err.?;
|
||||
return;
|
||||
|
|
@ -1846,7 +1846,7 @@ fn createModuleDependenciesForStep(step: *Step) Allocator.Error!void {
|
|||
var stdio_buffer_allocation: [256]u8 = undefined;
|
||||
var stdout_writer_allocation: Io.File.Writer = undefined;
|
||||
|
||||
fn initStdoutWriter() *Writer {
|
||||
stdout_writer_allocation = Io.File.stdout().writerStreaming(&stdio_buffer_allocation);
|
||||
fn initStdoutWriter(io: Io) *Writer {
|
||||
stdout_writer_allocation = Io.File.stdout().writerStreaming(io, &stdio_buffer_allocation);
|
||||
return &stdout_writer_allocation.interface;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -183,7 +183,7 @@ fn cmdObjCopy(gpa: Allocator, arena: Allocator, args: []const []const u8) !void
|
|||
var output_file = try Io.Dir.cwd().createFile(io, output, .{ .mode = mode });
|
||||
defer output_file.close(io);
|
||||
|
||||
var out = output_file.writer(&output_buffer);
|
||||
var out = output_file.writer(io, &output_buffer);
|
||||
|
||||
switch (out_fmt) {
|
||||
.hex, .raw => {
|
||||
|
|
|
|||
|
|
@ -1134,13 +1134,13 @@ pub const Manifest = struct {
|
|||
/// lock from exclusive to shared.
|
||||
pub fn writeManifest(self: *Manifest) !void {
|
||||
assert(self.have_exclusive_lock);
|
||||
|
||||
const io = self.cache.io;
|
||||
const manifest_file = self.manifest_file.?;
|
||||
if (self.manifest_dirty) {
|
||||
self.manifest_dirty = false;
|
||||
|
||||
var buffer: [4000]u8 = undefined;
|
||||
var fw = manifest_file.writer(&buffer);
|
||||
var fw = manifest_file.writer(io, &buffer);
|
||||
writeDirtyManifestToStream(self, &fw) catch |err| switch (err) {
|
||||
error.WriteFailed => return fw.err.?,
|
||||
else => |e| return e,
|
||||
|
|
|
|||
|
|
@ -389,6 +389,7 @@ fn prepareTables(fuzz: *Fuzz, run_step: *Step.Run, coverage_id: u64) error{ OutO
|
|||
const target = run_step.producer.?.rootModuleTarget();
|
||||
var debug_info = std.debug.Info.load(
|
||||
fuzz.gpa,
|
||||
io,
|
||||
rebuilt_exe_path,
|
||||
&gop.value_ptr.coverage,
|
||||
target.ofmt,
|
||||
|
|
|
|||
|
|
@ -1709,7 +1709,7 @@ fn getZigArgs(compile: *Compile, fuzz: bool) ![][]const u8 {
|
|||
defer b.cache_root.handle.deleteFile(io, tmp_path) catch {
|
||||
// It's fine if the temporary file can't be cleaned up.
|
||||
};
|
||||
b.cache_root.handle.rename(io, tmp_path, args_file) catch |rename_err| switch (rename_err) {
|
||||
b.cache_root.handle.rename(tmp_path, b.cache_root.handle, args_file, io) catch |rename_err| switch (rename_err) {
|
||||
error.PathAlreadyExists => {
|
||||
// The args file was created by another concurrent build process.
|
||||
},
|
||||
|
|
@ -1827,14 +1827,14 @@ pub fn doAtomicSymLinks(
|
|||
// sym link for libfoo.so.1 to libfoo.so.1.2.3
|
||||
const major_only_path = b.pathJoin(&.{ out_dir, filename_major_only });
|
||||
const cwd: Io.Dir = .cwd();
|
||||
cwd.atomicSymLink(io, out_basename, major_only_path, .{}) catch |err| {
|
||||
cwd.symLinkAtomic(io, out_basename, major_only_path, .{}) catch |err| {
|
||||
return step.fail("unable to symlink {s} -> {s}: {s}", .{
|
||||
major_only_path, out_basename, @errorName(err),
|
||||
});
|
||||
};
|
||||
// sym link for libfoo.so to libfoo.so.1
|
||||
const name_only_path = b.pathJoin(&.{ out_dir, filename_name_only });
|
||||
cwd.atomicSymLink(io, filename_major_only, name_only_path, .{}) catch |err| {
|
||||
cwd.symLinkAtomic(io, filename_major_only, name_only_path, .{}) catch |err| {
|
||||
return step.fail("Unable to symlink {s} -> {s}: {s}", .{
|
||||
name_only_path, filename_major_only, @errorName(err),
|
||||
});
|
||||
|
|
|
|||
|
|
@ -498,7 +498,7 @@ fn make(step: *Step, make_options: Step.MakeOptions) !void {
|
|||
});
|
||||
};
|
||||
|
||||
b.cache_root.handle.rename(io, tmp_sub_path, sub_path) catch |err| switch (err) {
|
||||
b.cache_root.handle.rename(tmp_sub_path, b.cache_root.handle, sub_path, io) catch |err| switch (err) {
|
||||
error.PathAlreadyExists => {
|
||||
// Other process beat us to it. Clean up the temp file.
|
||||
b.cache_root.handle.deleteFile(io, tmp_sub_path) catch |e| {
|
||||
|
|
|
|||
|
|
@ -1042,27 +1042,21 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
|
|||
if (any_output) {
|
||||
const o_sub_path = "o" ++ fs.path.sep_str ++ &digest;
|
||||
|
||||
b.cache_root.handle.rename(tmp_dir_path, o_sub_path) catch |err| {
|
||||
b.cache_root.handle.rename(tmp_dir_path, b.cache_root.handle, o_sub_path, io) catch |err| {
|
||||
if (err == error.PathAlreadyExists) {
|
||||
b.cache_root.handle.deleteTree(o_sub_path) catch |del_err| {
|
||||
return step.fail("unable to remove dir '{f}'{s}: {s}", .{
|
||||
b.cache_root,
|
||||
tmp_dir_path,
|
||||
@errorName(del_err),
|
||||
return step.fail("unable to remove dir '{f}'{s}: {t}", .{
|
||||
b.cache_root, tmp_dir_path, del_err,
|
||||
});
|
||||
};
|
||||
b.cache_root.handle.rename(tmp_dir_path, o_sub_path) catch |retry_err| {
|
||||
return step.fail("unable to rename dir '{f}{s}' to '{f}{s}': {s}", .{
|
||||
b.cache_root, tmp_dir_path,
|
||||
b.cache_root, o_sub_path,
|
||||
@errorName(retry_err),
|
||||
b.cache_root.handle.rename(tmp_dir_path, b.cache_root.handle, o_sub_path, io) catch |retry_err| {
|
||||
return step.fail("unable to rename dir '{f}{s}' to '{f}{s}': {t}", .{
|
||||
b.cache_root, tmp_dir_path, b.cache_root, o_sub_path, retry_err,
|
||||
});
|
||||
};
|
||||
} else {
|
||||
return step.fail("unable to rename dir '{f}{s}' to '{f}{s}': {s}", .{
|
||||
b.cache_root, tmp_dir_path,
|
||||
b.cache_root, o_sub_path,
|
||||
@errorName(err),
|
||||
return step.fail("unable to rename dir '{f}{s}' to '{f}{s}': {t}", .{
|
||||
b.cache_root, tmp_dir_path, b.cache_root, o_sub_path, err,
|
||||
});
|
||||
}
|
||||
};
|
||||
|
|
|
|||
|
|
@ -523,7 +523,7 @@ pub fn serveTarFile(ws: *WebServer, request: *http.Server.Request, paths: []cons
|
|||
if (cached_cwd_path == null) cached_cwd_path = try std.process.getCwdAlloc(gpa);
|
||||
break :cwd cached_cwd_path.?;
|
||||
};
|
||||
try archiver.writeFile(io, path.sub_path, &file_reader, @intCast(stat.mtime.toSeconds()));
|
||||
try archiver.writeFile(path.sub_path, &file_reader, @intCast(stat.mtime.toSeconds()));
|
||||
}
|
||||
|
||||
// intentionally not calling `archiver.finishPedantically`
|
||||
|
|
@ -587,7 +587,7 @@ fn buildClientWasm(ws: *WebServer, arena: Allocator, optimize: std.builtin.Optim
|
|||
});
|
||||
defer poller.deinit();
|
||||
|
||||
try child.stdin.?.writeAll(@ptrCast(@as([]const std.zig.Client.Message.Header, &.{
|
||||
try child.stdin.?.writeStreamingAll(io, @ptrCast(@as([]const std.zig.Client.Message.Header, &.{
|
||||
.{ .tag = .update, .bytes_len = 0 },
|
||||
.{ .tag = .exit, .bytes_len = 0 },
|
||||
})));
|
||||
|
|
@ -638,7 +638,7 @@ fn buildClientWasm(ws: *WebServer, arena: Allocator, optimize: std.builtin.Optim
|
|||
child.stdin.?.close(io);
|
||||
child.stdin = null;
|
||||
|
||||
switch (try child.wait()) {
|
||||
switch (try child.wait(io)) {
|
||||
.Exited => |code| {
|
||||
if (code != 0) {
|
||||
log.err(
|
||||
|
|
|
|||
|
|
@ -677,7 +677,7 @@ pub const VTable = struct {
|
|||
dirDeleteFile: *const fn (?*anyopaque, Dir, []const u8) Dir.DeleteFileError!void,
|
||||
dirDeleteDir: *const fn (?*anyopaque, Dir, []const u8) Dir.DeleteDirError!void,
|
||||
dirRename: *const fn (?*anyopaque, old_dir: Dir, old_sub_path: []const u8, new_dir: Dir, new_sub_path: []const u8) Dir.RenameError!void,
|
||||
dirSymLink: *const fn (?*anyopaque, Dir, target_path: []const u8, sym_link_path: []const u8, Dir.SymLinkFlags) Dir.RenameError!void,
|
||||
dirSymLink: *const fn (?*anyopaque, Dir, target_path: []const u8, sym_link_path: []const u8, Dir.SymLinkFlags) Dir.SymLinkError!void,
|
||||
dirReadLink: *const fn (?*anyopaque, Dir, sub_path: []const u8, buffer: []u8) Dir.ReadLinkError!usize,
|
||||
dirSetOwner: *const fn (?*anyopaque, Dir, ?File.Uid, ?File.Gid) Dir.SetOwnerError!void,
|
||||
dirSetPermissions: *const fn (?*anyopaque, Dir, Dir.Permissions) Dir.SetPermissionsError!void,
|
||||
|
|
|
|||
|
|
@ -501,7 +501,7 @@ pub const WriteFileError = File.Writer.Error || File.OpenError;
|
|||
pub fn writeFile(dir: Dir, io: Io, options: WriteFileOptions) WriteFileError!void {
|
||||
var file = try dir.createFile(io, options.sub_path, options.flags);
|
||||
defer file.close(io);
|
||||
try file.writeAll(io, options.data);
|
||||
try file.writeStreamingAll(io, options.data);
|
||||
}
|
||||
|
||||
pub const PrevStatus = enum {
|
||||
|
|
@ -955,6 +955,13 @@ pub fn rename(
|
|||
return io.vtable.dirRename(io.userdata, old_dir, old_sub_path, new_dir, new_sub_path);
|
||||
}
|
||||
|
||||
pub fn renameAbsolute(io: Io, old_path: []const u8, new_path: []const u8) RenameError!void {
|
||||
assert(path.isAbsolute(old_path));
|
||||
assert(path.isAbsolute(new_path));
|
||||
const my_cwd = cwd();
|
||||
return io.vtable.dirRename(io.userdata, my_cwd, old_path, my_cwd, new_path);
|
||||
}
|
||||
|
||||
/// Use with `Dir.symLink`, `Dir.symLinkAtomic`, and `symLinkAbsolute` to
|
||||
/// specify whether the symlink will point to a file or a directory. This value
|
||||
/// is ignored on all hosts except Windows where creating symlinks to different
|
||||
|
|
@ -1053,7 +1060,7 @@ pub fn symLinkAtomic(
|
|||
temp_path[dirname.len + 1 ..][0..rand_len].* = std.fmt.hex(random_integer);
|
||||
|
||||
if (dir.symLink(io, target_path, temp_path, flags)) {
|
||||
return dir.rename(temp_path, dir, io, sym_link_path);
|
||||
return dir.rename(temp_path, dir, sym_link_path, io);
|
||||
} else |err| switch (err) {
|
||||
error.PathAlreadyExists => continue,
|
||||
else => |e| return e,
|
||||
|
|
|
|||
|
|
@ -928,14 +928,14 @@ test "Dir.rename files" {
|
|||
const missing_file_path = try ctx.transformPath("missing_file_name");
|
||||
const something_else_path = try ctx.transformPath("something_else");
|
||||
|
||||
try testing.expectError(error.FileNotFound, ctx.dir.rename(missing_file_path, something_else_path));
|
||||
try testing.expectError(error.FileNotFound, ctx.dir.rename(missing_file_path, ctx.dir, something_else_path, io));
|
||||
|
||||
// Renaming files
|
||||
const test_file_name = try ctx.transformPath("test_file");
|
||||
const renamed_test_file_name = try ctx.transformPath("test_file_renamed");
|
||||
var file = try ctx.dir.createFile(io, test_file_name, .{ .read = true });
|
||||
file.close(io);
|
||||
try ctx.dir.rename(test_file_name, renamed_test_file_name);
|
||||
try ctx.dir.rename(test_file_name, ctx.dir, renamed_test_file_name, io);
|
||||
|
||||
// Ensure the file was renamed
|
||||
try testing.expectError(error.FileNotFound, ctx.dir.openFile(io, test_file_name, .{}));
|
||||
|
|
@ -943,13 +943,13 @@ test "Dir.rename files" {
|
|||
file.close(io);
|
||||
|
||||
// Rename to self succeeds
|
||||
try ctx.dir.rename(renamed_test_file_name, renamed_test_file_name);
|
||||
try ctx.dir.rename(renamed_test_file_name, ctx.dir, renamed_test_file_name, io);
|
||||
|
||||
// Rename to existing file succeeds
|
||||
const existing_file_path = try ctx.transformPath("existing_file");
|
||||
var existing_file = try ctx.dir.createFile(io, existing_file_path, .{ .read = true });
|
||||
existing_file.close(io);
|
||||
try ctx.dir.rename(renamed_test_file_name, existing_file_path);
|
||||
try ctx.dir.rename(renamed_test_file_name, ctx.dir, existing_file_path, io);
|
||||
|
||||
try testing.expectError(error.FileNotFound, ctx.dir.openFile(io, renamed_test_file_name, .{}));
|
||||
file = try ctx.dir.openFile(io, existing_file_path, .{});
|
||||
|
|
@ -974,7 +974,7 @@ test "Dir.rename directories" {
|
|||
|
||||
// Renaming directories
|
||||
try ctx.dir.makeDir(test_dir_path);
|
||||
try ctx.dir.rename(test_dir_path, test_dir_renamed_path);
|
||||
try ctx.dir.rename(test_dir_path, ctx.dir, test_dir_renamed_path, io);
|
||||
|
||||
// Ensure the directory was renamed
|
||||
try testing.expectError(error.FileNotFound, ctx.dir.openDir(io, test_dir_path, .{}));
|
||||
|
|
@ -986,7 +986,7 @@ test "Dir.rename directories" {
|
|||
dir.close(io);
|
||||
|
||||
const test_dir_renamed_again_path = try ctx.transformPath("test_dir_renamed_again");
|
||||
try ctx.dir.rename(test_dir_renamed_path, test_dir_renamed_again_path);
|
||||
try ctx.dir.rename(test_dir_renamed_path, ctx.dir, test_dir_renamed_again_path, io);
|
||||
|
||||
// Ensure the directory was renamed and the file still exists in it
|
||||
try testing.expectError(error.FileNotFound, ctx.dir.openDir(io, test_dir_renamed_path, .{}));
|
||||
|
|
@ -1011,7 +1011,7 @@ test "Dir.rename directory onto empty dir" {
|
|||
|
||||
try ctx.dir.makeDir(test_dir_path);
|
||||
try ctx.dir.makeDir(target_dir_path);
|
||||
try ctx.dir.rename(test_dir_path, target_dir_path);
|
||||
try ctx.dir.rename(test_dir_path, ctx.dir, target_dir_path, io);
|
||||
|
||||
// Ensure the directory was renamed
|
||||
try testing.expectError(error.FileNotFound, ctx.dir.openDir(io, test_dir_path, .{}));
|
||||
|
|
@ -1039,7 +1039,7 @@ test "Dir.rename directory onto non-empty dir" {
|
|||
target_dir.close(io);
|
||||
|
||||
// Rename should fail with PathAlreadyExists if target_dir is non-empty
|
||||
try testing.expectError(error.PathAlreadyExists, ctx.dir.rename(test_dir_path, target_dir_path));
|
||||
try testing.expectError(error.PathAlreadyExists, ctx.dir.rename(test_dir_path, ctx.dir, target_dir_path, io));
|
||||
|
||||
// Ensure the directory was not renamed
|
||||
var dir = try ctx.dir.openDir(io, test_dir_path, .{});
|
||||
|
|
@ -1061,8 +1061,8 @@ test "Dir.rename file <-> dir" {
|
|||
var file = try ctx.dir.createFile(io, test_file_path, .{ .read = true });
|
||||
file.close(io);
|
||||
try ctx.dir.makeDir(test_dir_path);
|
||||
try testing.expectError(error.IsDir, ctx.dir.rename(test_file_path, test_dir_path));
|
||||
try testing.expectError(error.NotDir, ctx.dir.rename(test_dir_path, test_file_path));
|
||||
try testing.expectError(error.IsDir, ctx.dir.rename(test_file_path, ctx.dir, test_dir_path, io));
|
||||
try testing.expectError(error.NotDir, ctx.dir.rename(test_dir_path, ctx.dir, test_file_path, io));
|
||||
}
|
||||
}.impl);
|
||||
}
|
||||
|
|
@ -1081,7 +1081,7 @@ test "rename" {
|
|||
const renamed_test_file_name = "test_file_renamed";
|
||||
var file = try tmp_dir1.dir.createFile(io, test_file_name, .{ .read = true });
|
||||
file.close(io);
|
||||
try fs.rename(tmp_dir1.dir, test_file_name, tmp_dir2.dir, renamed_test_file_name);
|
||||
try Dir.rename(tmp_dir1.dir, test_file_name, tmp_dir2.dir, renamed_test_file_name, io);
|
||||
|
||||
// ensure the file was renamed
|
||||
try testing.expectError(error.FileNotFound, tmp_dir1.dir.openFile(io, test_file_name, .{}));
|
||||
|
|
@ -1458,7 +1458,7 @@ test "pwritev, preadv" {
|
|||
var src_file = try tmp.dir.createFile(io, "test.txt", .{ .read = true });
|
||||
defer src_file.close(io);
|
||||
|
||||
var writer = src_file.writer(&.{});
|
||||
var writer = src_file.writer(io, &.{});
|
||||
|
||||
try writer.seekTo(16);
|
||||
try writer.interface.writeVecAll(&lines);
|
||||
|
|
@ -1559,7 +1559,7 @@ test "sendfile" {
|
|||
var src_file = try dir.createFile(io, "sendfile1.txt", .{ .read = true });
|
||||
defer src_file.close(io);
|
||||
{
|
||||
var fw = src_file.writer(&.{});
|
||||
var fw = src_file.writer(io, &.{});
|
||||
try fw.interface.writeVecAll(&vecs);
|
||||
}
|
||||
|
||||
|
|
@ -1576,7 +1576,7 @@ test "sendfile" {
|
|||
var written_buf: [100]u8 = undefined;
|
||||
var file_reader = src_file.reader(io, &.{});
|
||||
var fallback_buffer: [50]u8 = undefined;
|
||||
var file_writer = dest_file.writer(&fallback_buffer);
|
||||
var file_writer = dest_file.writer(io, &fallback_buffer);
|
||||
try file_writer.interface.writeVecAll(&headers);
|
||||
try file_reader.seekTo(1);
|
||||
try testing.expectEqual(10, try file_writer.interface.sendFileAll(&file_reader, .limited(10)));
|
||||
|
|
@ -1614,7 +1614,7 @@ test "sendfile with buffered data" {
|
|||
try file_reader.interface.fill(8);
|
||||
|
||||
var fallback_buffer: [32]u8 = undefined;
|
||||
var file_writer = dest_file.writer(&fallback_buffer);
|
||||
var file_writer = dest_file.writer(io, &fallback_buffer);
|
||||
|
||||
try std.testing.expectEqual(4, try file_writer.interface.sendFileAll(&file_reader, .limited(4)));
|
||||
|
||||
|
|
@ -2017,7 +2017,7 @@ test "'.' and '..' in Io.Dir functions" {
|
|||
try ctx.dir.access(io, file_path, .{});
|
||||
|
||||
try ctx.dir.copyFile(file_path, ctx.dir, copy_path, .{});
|
||||
try ctx.dir.rename(copy_path, rename_path);
|
||||
try ctx.dir.rename(copy_path, ctx.dir, rename_path, io);
|
||||
const renamed_file = try ctx.dir.openFile(io, rename_path, .{});
|
||||
renamed_file.close(io);
|
||||
try ctx.dir.deleteFile(rename_path);
|
||||
|
|
@ -2140,7 +2140,7 @@ test "invalid UTF-8/WTF-8 paths" {
|
|||
|
||||
try testing.expectError(expected_err, ctx.dir.deleteDir(invalid_path));
|
||||
|
||||
try testing.expectError(expected_err, ctx.dir.rename(invalid_path, invalid_path));
|
||||
try testing.expectError(expected_err, ctx.dir.rename(invalid_path, ctx.dir, invalid_path, io));
|
||||
|
||||
try testing.expectError(expected_err, ctx.dir.symLink(invalid_path, invalid_path, .{}));
|
||||
if (native_os == .wasi) {
|
||||
|
|
@ -2173,7 +2173,7 @@ test "invalid UTF-8/WTF-8 paths" {
|
|||
try testing.expectError(expected_err, ctx.dir.realpathAlloc(testing.allocator, invalid_path));
|
||||
}
|
||||
|
||||
try testing.expectError(expected_err, fs.rename(ctx.dir, invalid_path, ctx.dir, invalid_path));
|
||||
try testing.expectError(expected_err, Dir.rename(ctx.dir, invalid_path, ctx.dir, invalid_path, io));
|
||||
|
||||
if (native_os != .wasi and ctx.path_type != .relative) {
|
||||
try testing.expectError(expected_err, Dir.copyFileAbsolute(invalid_path, invalid_path, .{}));
|
||||
|
|
@ -2299,7 +2299,7 @@ test "seekTo flushes buffered data" {
|
|||
defer file.close(io);
|
||||
{
|
||||
var buf: [16]u8 = undefined;
|
||||
var file_writer = File.writer(file, &buf);
|
||||
var file_writer = file.writer(io, file, &buf);
|
||||
|
||||
try file_writer.interface.writeAll(contents);
|
||||
try file_writer.seekTo(8);
|
||||
|
|
|
|||
|
|
@ -641,7 +641,7 @@ test "rename smoke test" {
|
|||
// Rename the file
|
||||
const new_file_path = try fs.path.join(a, &.{ base_path, "some_other_file" });
|
||||
defer a.free(new_file_path);
|
||||
try posix.rename(file_path, new_file_path);
|
||||
try Io.Dir.renameAbsolute(file_path, new_file_path);
|
||||
}
|
||||
|
||||
{
|
||||
|
|
@ -668,7 +668,7 @@ test "rename smoke test" {
|
|||
// Rename the directory
|
||||
const new_file_path = try fs.path.join(a, &.{ base_path, "some_other_dir" });
|
||||
defer a.free(new_file_path);
|
||||
try posix.rename(file_path, new_file_path);
|
||||
try Io.Dir.renameAbsolute(file_path, new_file_path);
|
||||
}
|
||||
|
||||
{
|
||||
|
|
|
|||
|
|
@ -612,7 +612,7 @@ pub fn pipeToFileSystem(io: Io, dir: Io.Dir, reader: *Io.Reader, options: PipeOp
|
|||
.file => {
|
||||
if (createDirAndFile(io, dir, file_name, fileMode(file.mode, options))) |fs_file| {
|
||||
defer fs_file.close(io);
|
||||
var file_writer = fs_file.writer(&file_contents_buffer);
|
||||
var file_writer = fs_file.writer(io, &file_contents_buffer);
|
||||
try it.streamRemaining(file, &file_writer.interface);
|
||||
try file_writer.interface.flush();
|
||||
} else |err| {
|
||||
|
|
|
|||
|
|
@ -570,7 +570,7 @@ pub const Iterator = struct {
|
|||
};
|
||||
defer out_file.close(io);
|
||||
var out_file_buffer: [1024]u8 = undefined;
|
||||
var file_writer = out_file.writer(&out_file_buffer);
|
||||
var file_writer = out_file.writer(io, &out_file_buffer);
|
||||
const local_data_file_offset: u64 =
|
||||
@as(u64, self.file_offset) +
|
||||
@as(u64, @sizeOf(LocalFileHeader)) +
|
||||
|
|
|
|||
|
|
@ -5320,7 +5320,7 @@ fn docsCopyFallible(comp: *Compilation) anyerror!void {
|
|||
defer tar_file.close(io);
|
||||
|
||||
var buffer: [1024]u8 = undefined;
|
||||
var tar_file_writer = tar_file.writer(&buffer);
|
||||
var tar_file_writer = tar_file.writer(io, &buffer);
|
||||
|
||||
var seen_table: std.AutoArrayHashMapUnmanaged(*Package.Module, []const u8) = .empty;
|
||||
defer seen_table.deinit(comp.gpa);
|
||||
|
|
@ -6490,7 +6490,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
|
|||
var o_dir = try comp.dirs.local_cache.handle.makeOpenPath(o_sub_path, .{});
|
||||
defer o_dir.close(io);
|
||||
const tmp_basename = fs.path.basename(out_obj_path);
|
||||
try fs.rename(zig_cache_tmp_dir, tmp_basename, o_dir, o_basename);
|
||||
try Io.Dir.rename(zig_cache_tmp_dir, tmp_basename, o_dir, o_basename, io);
|
||||
break :blk digest;
|
||||
};
|
||||
|
||||
|
|
@ -6738,7 +6738,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32
|
|||
var o_dir = try comp.dirs.local_cache.handle.makeOpenPath(o_sub_path, .{});
|
||||
defer o_dir.close(io);
|
||||
const tmp_basename = fs.path.basename(out_res_path);
|
||||
try fs.rename(zig_cache_tmp_dir, tmp_basename, o_dir, res_filename);
|
||||
try Io.Dir.rename(zig_cache_tmp_dir, tmp_basename, o_dir, res_filename, io);
|
||||
break :blk digest;
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -567,7 +567,7 @@ fn runResource(
|
|||
.root_dir = cache_root,
|
||||
.sub_path = try std.fmt.allocPrint(arena, "p" ++ s ++ "{s}", .{computed_package_hash.toSlice()}),
|
||||
};
|
||||
renameTmpIntoCache(cache_root.handle, package_sub_path, f.package_root.sub_path) catch |err| {
|
||||
renameTmpIntoCache(io, cache_root.handle, package_sub_path, f.package_root.sub_path) catch |err| {
|
||||
const src = try cache_root.join(arena, &.{tmp_dir_sub_path});
|
||||
const dest = try cache_root.join(arena, &.{f.package_root.sub_path});
|
||||
try eb.addRootErrorMessage(.{ .msg = try eb.printString(
|
||||
|
|
@ -1319,7 +1319,7 @@ fn unzip(
|
|||
defer zip_file.close(io);
|
||||
var zip_file_buffer: [4096]u8 = undefined;
|
||||
var zip_file_reader = b: {
|
||||
var zip_file_writer = zip_file.writer(&zip_file_buffer);
|
||||
var zip_file_writer = zip_file.writer(io, &zip_file_buffer);
|
||||
|
||||
_ = reader.streamRemaining(&zip_file_writer.interface) catch |err| switch (err) {
|
||||
error.ReadFailed => return error.ReadFailed,
|
||||
|
|
@ -1370,7 +1370,7 @@ fn unpackGitPack(f: *Fetch, out_dir: Io.Dir, resource: *Resource.Git) anyerror!U
|
|||
defer pack_file.close(io);
|
||||
var pack_file_buffer: [4096]u8 = undefined;
|
||||
var pack_file_reader = b: {
|
||||
var pack_file_writer = pack_file.writer(&pack_file_buffer);
|
||||
var pack_file_writer = pack_file.writer(io, &pack_file_buffer);
|
||||
const fetch_reader = &resource.fetch_stream.reader;
|
||||
_ = try fetch_reader.streamRemaining(&pack_file_writer.interface);
|
||||
try pack_file_writer.interface.flush();
|
||||
|
|
@ -1380,7 +1380,7 @@ fn unpackGitPack(f: *Fetch, out_dir: Io.Dir, resource: *Resource.Git) anyerror!U
|
|||
var index_file = try pack_dir.createFile(io, "pkg.idx", .{ .read = true });
|
||||
defer index_file.close(io);
|
||||
var index_file_buffer: [2000]u8 = undefined;
|
||||
var index_file_writer = index_file.writer(&index_file_buffer);
|
||||
var index_file_writer = index_file.writer(io, &index_file_buffer);
|
||||
{
|
||||
const index_prog_node = f.prog_node.start("Index pack", 0);
|
||||
defer index_prog_node.end();
|
||||
|
|
@ -1454,11 +1454,11 @@ fn recursiveDirectoryCopy(f: *Fetch, dir: Io.Dir, tmp_dir: Io.Dir) anyerror!void
|
|||
}
|
||||
}
|
||||
|
||||
pub fn renameTmpIntoCache(cache_dir: Io.Dir, tmp_dir_sub_path: []const u8, dest_dir_sub_path: []const u8) !void {
|
||||
pub fn renameTmpIntoCache(io: Io, cache_dir: Io.Dir, tmp_dir_sub_path: []const u8, dest_dir_sub_path: []const u8) !void {
|
||||
assert(dest_dir_sub_path[1] == fs.path.sep);
|
||||
var handled_missing_dir = false;
|
||||
while (true) {
|
||||
cache_dir.rename(tmp_dir_sub_path, dest_dir_sub_path) catch |err| switch (err) {
|
||||
cache_dir.rename(tmp_dir_sub_path, cache_dir, dest_dir_sub_path, io) catch |err| switch (err) {
|
||||
error.FileNotFound => {
|
||||
if (handled_missing_dir) return err;
|
||||
cache_dir.makeDir(dest_dir_sub_path[0..1]) catch |mkd_err| switch (mkd_err) {
|
||||
|
|
|
|||
|
|
@ -1594,7 +1594,7 @@ fn runRepositoryTest(io: Io, comptime format: Oid.Format, head_commit: []const u
|
|||
var index_file = try git_dir.dir.createFile(io, "testrepo.idx", .{ .read = true });
|
||||
defer index_file.close(io);
|
||||
var index_file_buffer: [2000]u8 = undefined;
|
||||
var index_file_writer = index_file.writer(&index_file_buffer);
|
||||
var index_file_writer = index_file.writer(io, &index_file_buffer);
|
||||
try indexPack(testing.allocator, format, &pack_file_reader, &index_file_writer);
|
||||
|
||||
// Arbitrary size limit on files read while checking the repository contents
|
||||
|
|
@ -1730,7 +1730,7 @@ pub fn main() !void {
|
|||
var index_file = try git_dir.createFile(io, "idx", .{ .read = true });
|
||||
defer index_file.close(io);
|
||||
var index_file_buffer: [4096]u8 = undefined;
|
||||
var index_file_writer = index_file.writer(&index_file_buffer);
|
||||
var index_file_writer = index_file.writer(io, &index_file_buffer);
|
||||
try indexPack(allocator, format, &pack_file_reader, &index_file_writer);
|
||||
|
||||
std.debug.print("Starting checkout...\n", .{});
|
||||
|
|
|
|||
14
src/Zcu.zig
14
src/Zcu.zig
|
|
@ -2981,7 +2981,13 @@ pub fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_br: *Io.Reader
|
|||
return zir;
|
||||
}
|
||||
|
||||
pub fn saveZirCache(gpa: Allocator, cache_file: Io.File, stat: Io.File.Stat, zir: Zir) (Io.File.WriteError || Allocator.Error)!void {
|
||||
pub fn saveZirCache(
|
||||
gpa: Allocator,
|
||||
io: Io,
|
||||
cache_file: Io.File,
|
||||
stat: Io.File.Stat,
|
||||
zir: Zir,
|
||||
) (Io.File.WriteError || Allocator.Error)!void {
|
||||
const safety_buffer = if (data_has_safety_tag)
|
||||
try gpa.alloc([8]u8, zir.instructions.len)
|
||||
else
|
||||
|
|
@ -3015,13 +3021,13 @@ pub fn saveZirCache(gpa: Allocator, cache_file: Io.File, stat: Io.File.Stat, zir
|
|||
zir.string_bytes,
|
||||
@ptrCast(zir.extra),
|
||||
};
|
||||
var cache_fw = cache_file.writer(&.{});
|
||||
var cache_fw = cache_file.writer(io, &.{});
|
||||
cache_fw.interface.writeVecAll(&vecs) catch |err| switch (err) {
|
||||
error.WriteFailed => return cache_fw.err.?,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn saveZoirCache(cache_file: Io.File, stat: Io.File.Stat, zoir: Zoir) Io.File.WriteError!void {
|
||||
pub fn saveZoirCache(io: Io, cache_file: Io.File, stat: Io.File.Stat, zoir: Zoir) Io.File.WriteError!void {
|
||||
const header: Zoir.Header = .{
|
||||
.nodes_len = @intCast(zoir.nodes.len),
|
||||
.extra_len = @intCast(zoir.extra.len),
|
||||
|
|
@ -3045,7 +3051,7 @@ pub fn saveZoirCache(cache_file: Io.File, stat: Io.File.Stat, zoir: Zoir) Io.Fil
|
|||
@ptrCast(zoir.compile_errors),
|
||||
@ptrCast(zoir.error_notes),
|
||||
};
|
||||
var cache_fw = cache_file.writer(&.{});
|
||||
var cache_fw = cache_file.writer(io, &.{});
|
||||
cache_fw.interface.writeVecAll(&vecs) catch |err| switch (err) {
|
||||
error.WriteFailed => return cache_fw.err.?,
|
||||
};
|
||||
|
|
|
|||
|
|
@ -278,18 +278,18 @@ pub fn updateFile(
|
|||
switch (file.getMode()) {
|
||||
.zig => {
|
||||
file.zir = try AstGen.generate(gpa, file.tree.?);
|
||||
Zcu.saveZirCache(gpa, cache_file, stat, file.zir.?) catch |err| switch (err) {
|
||||
Zcu.saveZirCache(gpa, io, cache_file, stat, file.zir.?) catch |err| switch (err) {
|
||||
error.OutOfMemory => |e| return e,
|
||||
else => log.warn("unable to write cached ZIR code for {f} to {f}{s}: {s}", .{
|
||||
file.path.fmt(comp), cache_directory, &hex_digest, @errorName(err),
|
||||
else => log.warn("unable to write cached ZIR code for {f} to {f}{s}: {t}", .{
|
||||
file.path.fmt(comp), cache_directory, &hex_digest, err,
|
||||
}),
|
||||
};
|
||||
},
|
||||
.zon => {
|
||||
file.zoir = try ZonGen.generate(gpa, file.tree.?, .{});
|
||||
Zcu.saveZoirCache(cache_file, stat, file.zoir.?) catch |err| {
|
||||
log.warn("unable to write cached ZOIR code for {f} to {f}{s}: {s}", .{
|
||||
file.path.fmt(comp), cache_directory, &hex_digest, @errorName(err),
|
||||
Zcu.saveZoirCache(io, cache_file, stat, file.zoir.?) catch |err| {
|
||||
log.warn("unable to write cached ZOIR code for {f} to {f}{s}: {t}", .{
|
||||
file.path.fmt(comp), cache_directory, &hex_digest, err,
|
||||
});
|
||||
};
|
||||
},
|
||||
|
|
|
|||
|
|
@ -380,7 +380,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
|
|||
const lib_final_file = try o_dir.createFile(io, final_lib_basename, .{ .truncate = true });
|
||||
defer lib_final_file.close(io);
|
||||
var buffer: [1024]u8 = undefined;
|
||||
var file_writer = lib_final_file.writer(&buffer);
|
||||
var file_writer = lib_final_file.writer(io, &buffer);
|
||||
try implib.writeCoffArchive(gpa, &file_writer.interface, members);
|
||||
try file_writer.interface.flush();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -612,7 +612,7 @@ pub const File = struct {
|
|||
});
|
||||
defer gpa.free(tmp_sub_path);
|
||||
try emit.root_dir.handle.copyFile(emit.sub_path, emit.root_dir.handle, tmp_sub_path, .{});
|
||||
try emit.root_dir.handle.rename(tmp_sub_path, emit.sub_path);
|
||||
try emit.root_dir.handle.rename(tmp_sub_path, emit.root_dir.handle, emit.sub_path, io);
|
||||
switch (builtin.os.tag) {
|
||||
.linux => std.posix.ptrace(std.os.linux.PTRACE.ATTACH, pid, 0, 0) catch |err| {
|
||||
log.warn("ptrace failure: {s}", .{@errorName(err)});
|
||||
|
|
|
|||
|
|
@ -1631,7 +1631,7 @@ fn spawnLld(comp: *Compilation, arena: Allocator, argv: []const []const u8) !voi
|
|||
{
|
||||
defer rsp_file.close(io);
|
||||
var rsp_file_buffer: [1024]u8 = undefined;
|
||||
var rsp_file_writer = rsp_file.writer(&rsp_file_buffer);
|
||||
var rsp_file_writer = rsp_file.writer(io, &rsp_file_buffer);
|
||||
const rsp_writer = &rsp_file_writer.interface;
|
||||
for (argv[2..]) |arg| {
|
||||
try rsp_writer.writeByte('"');
|
||||
|
|
|
|||
|
|
@ -3412,7 +3412,7 @@ fn buildOutputType(
|
|||
const sub_path = try std.fmt.allocPrint(arena, "tmp" ++ sep ++ "{x}-stdin{s}", .{
|
||||
&bin_digest, ext.canonicalName(target),
|
||||
});
|
||||
try dirs.local_cache.handle.rename(dump_path, sub_path);
|
||||
try dirs.local_cache.handle.rename(dump_path, dirs.local_cache.handle, sub_path, io);
|
||||
|
||||
// Convert `sub_path` to be relative to current working directory.
|
||||
src.src_path = try dirs.local_cache.join(arena, &.{sub_path});
|
||||
|
|
@ -7225,11 +7225,7 @@ fn createDependenciesModule(
|
|||
const hex_digest = hh.final();
|
||||
|
||||
const o_dir_sub_path = try arena.dupe(u8, "o" ++ fs.path.sep_str ++ hex_digest);
|
||||
try Package.Fetch.renameTmpIntoCache(
|
||||
dirs.local_cache.handle,
|
||||
tmp_dir_sub_path,
|
||||
o_dir_sub_path,
|
||||
);
|
||||
try Package.Fetch.renameTmpIntoCache(io, dirs.local_cache.handle, tmp_dir_sub_path, o_dir_sub_path);
|
||||
|
||||
const deps_mod = try Package.Module.create(arena, .{
|
||||
.paths = .{
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue