diff --git a/src/Compilation.zig b/src/Compilation.zig index 1a056c1c04..019aa498e1 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -154,10 +154,6 @@ win32_resource_work_queue: if (dev.env.supports(.win32_resource)) std.fifo.Linea /// since the last compilation, as well as scan for `@import` and queue up /// additional jobs corresponding to those new files. astgen_work_queue: std.fifo.LinearFifo(Zcu.File.Index, .Dynamic), -/// These jobs are to inspect the file system stat() and if the embedded file has changed -/// on disk, mark the corresponding Decl outdated and queue up an `analyze_decl` -/// task for it. -embed_file_work_queue: std.fifo.LinearFifo(*Zcu.EmbedFile, .Dynamic), /// The ErrorMsg memory is owned by the `CObject`, using Compilation's general purpose allocator. /// This data is accessed by multiple threads and is protected by `mutex`. @@ -1465,7 +1461,6 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil .c_object_work_queue = std.fifo.LinearFifo(*CObject, .Dynamic).init(gpa), .win32_resource_work_queue = if (dev.env.supports(.win32_resource)) std.fifo.LinearFifo(*Win32Resource, .Dynamic).init(gpa) else .{}, .astgen_work_queue = std.fifo.LinearFifo(Zcu.File.Index, .Dynamic).init(gpa), - .embed_file_work_queue = std.fifo.LinearFifo(*Zcu.EmbedFile, .Dynamic).init(gpa), .c_source_files = options.c_source_files, .rc_source_files = options.rc_source_files, .cache_parent = cache, @@ -1932,7 +1927,6 @@ pub fn destroy(comp: *Compilation) void { comp.c_object_work_queue.deinit(); comp.win32_resource_work_queue.deinit(); comp.astgen_work_queue.deinit(); - comp.embed_file_work_queue.deinit(); comp.windows_libs.deinit(gpa); @@ -2247,11 +2241,6 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { } } - // Put a work item in for checking if any files used with `@embedFile` changed. - try comp.embed_file_work_queue.ensureUnusedCapacity(zcu.embed_table.count()); - for (zcu.embed_table.values()) |embed_file| { - comp.embed_file_work_queue.writeItemAssumeCapacity(embed_file); - } if (comp.file_system_inputs) |fsi| { const ip = &zcu.intern_pool; for (zcu.embed_table.values()) |embed_file| { @@ -3235,9 +3224,6 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { try addZirErrorMessages(&bundle, file); } } - for (zcu.failed_embed_files.values()) |error_msg| { - try addModuleErrorMsg(zcu, &bundle, error_msg.*); - } var sorted_failed_analysis: std.AutoArrayHashMapUnmanaged(InternPool.AnalUnit, *Zcu.ErrorMsg).DataList.Slice = s: { const SortOrder = struct { zcu: *Zcu, @@ -3812,9 +3798,10 @@ fn performAllTheWorkInner( } } - while (comp.embed_file_work_queue.readItem()) |embed_file| { - comp.thread_pool.spawnWg(&astgen_wait_group, workerCheckEmbedFile, .{ - comp, embed_file, + for (0.., zcu.embed_table.values()) |ef_index_usize, ef| { + const ef_index: Zcu.EmbedFile.Index = @enumFromInt(ef_index_usize); + comp.thread_pool.spawnWgId(&astgen_wait_group, workerCheckEmbedFile, .{ + comp, ef_index, ef, }); } } @@ -4377,33 +4364,33 @@ fn workerUpdateBuiltinZigFile( }; } -fn workerCheckEmbedFile(comp: *Compilation, embed_file: *Zcu.EmbedFile) void { - comp.detectEmbedFileUpdate(embed_file) catch |err| { - comp.reportRetryableEmbedFileError(embed_file, err) catch |oom| switch (oom) { - // Swallowing this error is OK because it's implied to be OOM when - // there is a missing `failed_embed_files` error message. - error.OutOfMemory => {}, - }; - return; +fn workerCheckEmbedFile(tid: usize, comp: *Compilation, ef_index: Zcu.EmbedFile.Index, ef: *Zcu.EmbedFile) void { + comp.detectEmbedFileUpdate(@enumFromInt(tid), ef_index, ef) catch |err| switch (err) { + error.OutOfMemory => { + comp.mutex.lock(); + defer comp.mutex.unlock(); + comp.setAllocFailure(); + }, }; } -fn detectEmbedFileUpdate(comp: *Compilation, embed_file: *Zcu.EmbedFile) !void { +fn detectEmbedFileUpdate(comp: *Compilation, tid: Zcu.PerThread.Id, ef_index: Zcu.EmbedFile.Index, ef: *Zcu.EmbedFile) !void { const zcu = comp.zcu.?; - const ip = &zcu.intern_pool; - var file = try embed_file.owner.root.openFile(embed_file.sub_file_path.toSlice(ip), .{}); - defer file.close(); + const pt: Zcu.PerThread = .activate(zcu, tid); + defer pt.deactivate(); - const stat = try file.stat(); + const old_val = ef.val; + const old_err = ef.err; - const unchanged_metadata = - stat.size == embed_file.stat.size and - stat.mtime == embed_file.stat.mtime and - stat.inode == embed_file.stat.inode; + try pt.updateEmbedFile(ef, null); - if (unchanged_metadata) return; + if (ef.val != .none and ef.val == old_val) return; // success, value unchanged + if (ef.val == .none and old_val == .none and ef.err == old_err) return; // failure, error unchanged - @panic("TODO: handle embed file incremental update"); + comp.mutex.lock(); + defer comp.mutex.unlock(); + + try zcu.markDependeeOutdated(.not_marked_po, .{ .embed_file = ef_index }); } pub fn obtainCObjectCacheManifest( @@ -4802,30 +4789,6 @@ fn reportRetryableWin32ResourceError( } } -fn reportRetryableEmbedFileError( - comp: *Compilation, - embed_file: *Zcu.EmbedFile, - err: anyerror, -) error{OutOfMemory}!void { - const zcu = comp.zcu.?; - const gpa = zcu.gpa; - const src_loc = embed_file.src_loc; - const ip = &zcu.intern_pool; - const err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "unable to load '{}/{s}': {s}", .{ - embed_file.owner.root, - embed_file.sub_file_path.toSlice(ip), - @errorName(err), - }); - - errdefer err_msg.destroy(gpa); - - { - comp.mutex.lock(); - defer comp.mutex.unlock(); - try zcu.failed_embed_files.putNoClobber(gpa, embed_file, err_msg); - } -} - fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Progress.Node) !void { if (comp.config.c_frontend == .aro) { return comp.failCObj(c_object, "aro does not support compiling C objects yet", .{}); diff --git a/src/InternPool.zig b/src/InternPool.zig index a92e93705c..514fb1b63f 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -42,6 +42,10 @@ nav_ty_deps: std.AutoArrayHashMapUnmanaged(Nav.Index, DepEntry.Index), /// * a container type requiring resolution (invalidated when the type must be recreated at a new index) /// Value is index into `dep_entries` of the first dependency on this interned value. interned_deps: std.AutoArrayHashMapUnmanaged(Index, DepEntry.Index), +/// Dependencies on an embedded file. +/// Introduced by `@embedFile`; invalidated when the file changes. +/// Value is index into `dep_entries` of the first dependency on this `Zcu.EmbedFile`. +embed_file_deps: std.AutoArrayHashMapUnmanaged(Zcu.EmbedFile.Index, DepEntry.Index), /// Dependencies on the full set of names in a ZIR namespace. /// Key refers to a `struct_decl`, `union_decl`, etc. /// Value is index into `dep_entries` of the first dependency on this namespace. @@ -90,6 +94,7 @@ pub const empty: InternPool = .{ .nav_val_deps = .empty, .nav_ty_deps = .empty, .interned_deps = .empty, + .embed_file_deps = .empty, .namespace_deps = .empty, .namespace_name_deps = .empty, .memoized_state_main_deps = .none, @@ -824,6 +829,7 @@ pub const Dependee = union(enum) { nav_val: Nav.Index, nav_ty: Nav.Index, interned: Index, + embed_file: Zcu.EmbedFile.Index, namespace: TrackedInst.Index, namespace_name: NamespaceNameKey, memoized_state: MemoizedStateStage, @@ -875,6 +881,7 @@ pub fn dependencyIterator(ip: *const InternPool, dependee: Dependee) DependencyI .nav_val => |x| ip.nav_val_deps.get(x), .nav_ty => |x| ip.nav_ty_deps.get(x), .interned => |x| ip.interned_deps.get(x), + .embed_file => |x| ip.embed_file_deps.get(x), .namespace => |x| ip.namespace_deps.get(x), .namespace_name => |x| ip.namespace_name_deps.get(x), .memoized_state => |stage| switch (stage) { @@ -945,6 +952,7 @@ pub fn addDependency(ip: *InternPool, gpa: Allocator, depender: AnalUnit, depend .nav_val => ip.nav_val_deps, .nav_ty => ip.nav_ty_deps, .interned => ip.interned_deps, + .embed_file => ip.embed_file_deps, .namespace => ip.namespace_deps, .namespace_name => ip.namespace_name_deps, .memoized_state => comptime unreachable, @@ -6612,6 +6620,7 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { ip.nav_val_deps.deinit(gpa); ip.nav_ty_deps.deinit(gpa); ip.interned_deps.deinit(gpa); + ip.embed_file_deps.deinit(gpa); ip.namespace_deps.deinit(gpa); ip.namespace_name_deps.deinit(gpa); diff --git a/src/Sema.zig b/src/Sema.zig index 833b05413f..0b06eba519 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -13949,6 +13949,8 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A defer tracy.end(); const pt = sema.pt; + const zcu = pt.zcu; + const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); const name = try sema.resolveConstString(block, operand_src, inst_data.operand, .{ .simple = .operand_embedFile }); @@ -13957,18 +13959,24 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A return sema.fail(block, operand_src, "file path name cannot be empty", .{}); } - const val = pt.embedFile(block.getFileScope(pt.zcu), name, operand_src) catch |err| switch (err) { + const ef_idx = pt.embedFile(block.getFileScope(zcu), name) catch |err| switch (err) { error.ImportOutsideModulePath => { return sema.fail(block, operand_src, "embed of file outside package path: '{s}'", .{name}); }, - else => { - // TODO: these errors are file system errors; make sure an update() will - // retry this and not cache the file system error, which may be transient. - return sema.fail(block, operand_src, "unable to open '{s}': {s}", .{ name, @errorName(err) }); + error.CurrentWorkingDirectoryUnlinked => { + // TODO: this should be some kind of retryable failure, in case the cwd is put back + return sema.fail(block, operand_src, "unable to resolve '{s}': working directory has been unlinked", .{name}); }, + error.OutOfMemory => |e| return e, }; + try sema.declareDependency(.{ .embed_file = ef_idx }); - return Air.internedToRef(val); + const result = ef_idx.get(zcu); + if (result.val == .none) { + return sema.fail(block, operand_src, "unable to open '{s}': {s}", .{ name, @errorName(result.err.?) }); + } + + return Air.internedToRef(result.val); } fn zirRetErrValueCode(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { diff --git a/src/Zcu.zig b/src/Zcu.zig index c75cd5d40c..120188cedf 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -143,8 +143,6 @@ compile_log_sources: std.AutoArrayHashMapUnmanaged(AnalUnit, extern struct { /// Using a map here for consistency with the other fields here. /// The ErrorMsg memory is owned by the `File`, using Module's general purpose allocator. failed_files: std.AutoArrayHashMapUnmanaged(*File, ?*ErrorMsg) = .empty, -/// The ErrorMsg memory is owned by the `EmbedFile`, using Module's general purpose allocator. -failed_embed_files: std.AutoArrayHashMapUnmanaged(*EmbedFile, *ErrorMsg) = .empty, failed_exports: std.AutoArrayHashMapUnmanaged(Export.Index, *ErrorMsg) = .empty, /// If analysis failed due to a cimport error, the corresponding Clang errors /// are stored here. @@ -893,13 +891,23 @@ pub const File = struct { }; pub const EmbedFile = struct { - /// Relative to the owning module's root directory. - sub_file_path: InternPool.NullTerminatedString, /// Module that this file is a part of, managed externally. owner: *Package.Module, - stat: Cache.File.Stat, + /// Relative to the owning module's root directory. + sub_file_path: InternPool.NullTerminatedString, + + /// `.none` means the file was not loaded, so `stat` is undefined. val: InternPool.Index, - src_loc: LazySrcLoc, + /// If this is `null` and `val` is `.none`, the file has never been loaded. + err: ?(std.fs.File.OpenError || std.fs.File.StatError || std.fs.File.ReadError || error{UnexpectedEof}), + stat: Cache.File.Stat, + + pub const Index = enum(u32) { + _, + pub fn get(idx: Index, zcu: *const Zcu) *EmbedFile { + return zcu.embed_table.values()[@intFromEnum(idx)]; + } + }; }; /// This struct holds data necessary to construct API-facing `AllErrors.Message`. @@ -2459,11 +2467,6 @@ pub fn deinit(zcu: *Zcu) void { } zcu.failed_files.deinit(gpa); - for (zcu.failed_embed_files.values()) |msg| { - msg.destroy(gpa); - } - zcu.failed_embed_files.deinit(gpa); - for (zcu.failed_exports.values()) |value| { value.destroy(gpa); } @@ -3882,6 +3885,14 @@ fn formatDependee(data: struct { dependee: InternPool.Dependee, zcu: *Zcu }, com .func => |f| return writer.print("ies('{}')", .{ip.getNav(f.owner_nav).fqn.fmt(ip)}), else => unreachable, }, + .embed_file => |ef_idx| { + const ef = ef_idx.get(zcu); + return writer.print("embed_file('{s}')", .{std.fs.path.fmtJoin(&.{ + ef.owner.root.root_dir.path orelse "", + ef.owner.root.sub_path, + ef.sub_file_path.toSlice(ip), + })}); + }, .namespace => |ti| { const info = ti.resolveFull(ip) orelse { return writer.writeAll("namespace()"); diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index c6a16ee00c..9e21b655d1 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -2117,32 +2117,32 @@ pub fn embedFile( pt: Zcu.PerThread, cur_file: *Zcu.File, import_string: []const u8, - src_loc: Zcu.LazySrcLoc, -) !InternPool.Index { +) error{ + OutOfMemory, + ImportOutsideModulePath, + CurrentWorkingDirectoryUnlinked, +}!Zcu.EmbedFile.Index { const zcu = pt.zcu; const gpa = zcu.gpa; - if (cur_file.mod.deps.get(import_string)) |pkg| { + if (cur_file.mod.deps.get(import_string)) |mod| { const resolved_path = try std.fs.path.resolve(gpa, &.{ - pkg.root.root_dir.path orelse ".", - pkg.root.sub_path, - pkg.root_src_path, + mod.root.root_dir.path orelse ".", + mod.root.sub_path, + mod.root_src_path, }); - var keep_resolved_path = false; - defer if (!keep_resolved_path) gpa.free(resolved_path); + errdefer gpa.free(resolved_path); const gop = try zcu.embed_table.getOrPut(gpa, resolved_path); - errdefer { - assert(std.mem.eql(u8, zcu.embed_table.pop().key, resolved_path)); - keep_resolved_path = false; + errdefer assert(std.mem.eql(u8, zcu.embed_table.pop().key, resolved_path)); + + if (gop.found_existing) { + gpa.free(resolved_path); // we're not using this key + return @enumFromInt(gop.index); } - if (gop.found_existing) return gop.value_ptr.*.val; - keep_resolved_path = true; - const sub_file_path = try gpa.dupe(u8, pkg.root_src_path); - errdefer gpa.free(sub_file_path); - - return pt.newEmbedFile(pkg, sub_file_path, resolved_path, gop.value_ptr, src_loc); + gop.value_ptr.* = try pt.newEmbedFile(mod, mod.root_src_path, resolved_path); + return @enumFromInt(gop.index); } // The resolved path is used as the key in the table, to detect if a file @@ -2154,17 +2154,15 @@ pub fn embedFile( "..", import_string, }); - - var keep_resolved_path = false; - defer if (!keep_resolved_path) gpa.free(resolved_path); + errdefer gpa.free(resolved_path); const gop = try zcu.embed_table.getOrPut(gpa, resolved_path); - errdefer { - assert(std.mem.eql(u8, zcu.embed_table.pop().key, resolved_path)); - keep_resolved_path = false; + errdefer assert(std.mem.eql(u8, zcu.embed_table.pop().key, resolved_path)); + + if (gop.found_existing) { + gpa.free(resolved_path); // we're not using this key + return @enumFromInt(gop.index); } - if (gop.found_existing) return gop.value_ptr.*.val; - keep_resolved_path = true; const resolved_root_path = try std.fs.path.resolve(gpa, &.{ cur_file.mod.root.root_dir.path orelse ".", @@ -2172,101 +2170,156 @@ pub fn embedFile( }); defer gpa.free(resolved_root_path); - const sub_file_path = p: { - const relative = try std.fs.path.relative(gpa, resolved_root_path, resolved_path); - errdefer gpa.free(relative); - - if (!isUpDir(relative) and !std.fs.path.isAbsolute(relative)) { - break :p relative; - } - return error.ImportOutsideModulePath; + const sub_file_path = std.fs.path.relative(gpa, resolved_root_path, resolved_path) catch |err| switch (err) { + error.Unexpected => unreachable, + else => |e| return e, }; defer gpa.free(sub_file_path); - return pt.newEmbedFile(cur_file.mod, sub_file_path, resolved_path, gop.value_ptr, src_loc); + if (isUpDir(sub_file_path) or std.fs.path.isAbsolute(sub_file_path)) { + return error.ImportOutsideModulePath; + } + + gop.value_ptr.* = try pt.newEmbedFile(cur_file.mod, sub_file_path, resolved_path); + return @enumFromInt(gop.index); } -/// https://github.com/ziglang/zig/issues/14307 -fn newEmbedFile( +pub fn updateEmbedFile( pt: Zcu.PerThread, - pkg: *Module, - sub_file_path: []const u8, - resolved_path: []const u8, - result: **Zcu.EmbedFile, - src_loc: Zcu.LazySrcLoc, -) !InternPool.Index { + ef: *Zcu.EmbedFile, + /// If not `null`, the interned file data is stored here, if it was loaded. + /// `newEmbedFile` uses this to add the file to the `whole` cache manifest. + ip_str_out: ?*?InternPool.String, +) Allocator.Error!void { + pt.updateEmbedFileInner(ef, ip_str_out) catch |err| switch (err) { + error.OutOfMemory => |e| return e, + else => |e| { + ef.val = .none; + ef.err = e; + ef.stat = undefined; + }, + }; +} + +fn updateEmbedFileInner( + pt: Zcu.PerThread, + ef: *Zcu.EmbedFile, + ip_str_out: ?*?InternPool.String, +) !void { + const tid = pt.tid; const zcu = pt.zcu; const gpa = zcu.gpa; const ip = &zcu.intern_pool; - const new_file = try gpa.create(Zcu.EmbedFile); - errdefer gpa.destroy(new_file); - - var file = try pkg.root.openFile(sub_file_path, .{}); + var file = try ef.owner.root.openFile(ef.sub_file_path.toSlice(ip), .{}); defer file.close(); - const actual_stat = try file.stat(); - const stat: Cache.File.Stat = .{ - .size = actual_stat.size, - .inode = actual_stat.inode, - .mtime = actual_stat.mtime, - }; - const size = std.math.cast(usize, actual_stat.size) orelse return error.Overflow; + const stat: Cache.File.Stat = .fromFs(try file.stat()); - const strings = ip.getLocal(pt.tid).getMutableStrings(gpa); - const bytes = try strings.addManyAsSlice(try std.math.add(usize, size, 1)); - const actual_read = try file.readAll(bytes[0][0..size]); - if (actual_read != size) return error.UnexpectedEndOfFile; - bytes[0][size] = 0; - - const comp = zcu.comp; - switch (comp.cache_use) { - .whole => |whole| if (whole.cache_manifest) |man| { - const copied_resolved_path = try gpa.dupe(u8, resolved_path); - errdefer gpa.free(copied_resolved_path); - whole.cache_manifest_mutex.lock(); - defer whole.cache_manifest_mutex.unlock(); - try man.addFilePostContents(copied_resolved_path, bytes[0][0..size], stat); - }, - .incremental => {}, + if (ef.val != .none) { + const old_stat = ef.stat; + const unchanged_metadata = + stat.size == old_stat.size and + stat.mtime == old_stat.mtime and + stat.inode == old_stat.inode; + if (unchanged_metadata) return; } - const array_ty = try pt.intern(.{ .array_type = .{ + const size = std.math.cast(usize, stat.size) orelse return error.FileTooBig; + const size_plus_one = std.math.add(usize, size, 1) catch return error.FileTooBig; + + // The loaded bytes of the file, including a sentinel 0 byte. + const ip_str: InternPool.String = str: { + const strings = ip.getLocal(tid).getMutableStrings(gpa); + const old_len = strings.mutate.len; + errdefer strings.shrinkRetainingCapacity(old_len); + const bytes = (try strings.addManyAsSlice(size_plus_one))[0]; + const actual_read = try file.readAll(bytes[0..size]); + if (actual_read != size) return error.UnexpectedEof; + bytes[size] = 0; + break :str try ip.getOrPutTrailingString(gpa, tid, @intCast(bytes.len), .maybe_embedded_nulls); + }; + if (ip_str_out) |p| p.* = ip_str; + + const array_ty = try pt.arrayType(.{ .len = size, .sentinel = .zero_u8, .child = .u8_type, - } }); - const array_val = try pt.intern(.{ .aggregate = .{ - .ty = array_ty, - .storage = .{ .bytes = try ip.getOrPutTrailingString(gpa, pt.tid, @intCast(bytes[0].len), .maybe_embedded_nulls) }, - } }); + }); + const ptr_ty = try pt.singleConstPtrType(array_ty); - const ptr_ty = (try pt.ptrType(.{ - .child = array_ty, - .flags = .{ - .alignment = .none, - .is_const = true, - .address_space = .generic, - }, - })).toIntern(); + const array_val = try pt.intern(.{ .aggregate = .{ + .ty = array_ty.toIntern(), + .storage = .{ .bytes = ip_str }, + } }); const ptr_val = try pt.intern(.{ .ptr = .{ - .ty = ptr_ty, + .ty = ptr_ty.toIntern(), .base_addr = .{ .uav = .{ .val = array_val, - .orig_ty = ptr_ty, + .orig_ty = ptr_ty.toIntern(), } }, .byte_offset = 0, } }); - result.* = new_file; + ef.val = ptr_val; + ef.err = null; + ef.stat = stat; +} + +fn newEmbedFile( + pt: Zcu.PerThread, + mod: *Module, + /// The path of the file to embed relative to the root of `mod`. + sub_file_path: []const u8, + /// The resolved path of the file to embed. + resolved_path: []const u8, +) !*Zcu.EmbedFile { + const zcu = pt.zcu; + const comp = zcu.comp; + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; + + if (comp.file_system_inputs) |fsi| + try comp.appendFileSystemInput(fsi, mod.root, sub_file_path); + + const new_file = try gpa.create(Zcu.EmbedFile); + errdefer gpa.destroy(new_file); + new_file.* = .{ + .owner = mod, .sub_file_path = try ip.getOrPutString(gpa, pt.tid, sub_file_path, .no_embedded_nulls), - .owner = pkg, - .stat = stat, - .val = ptr_val, - .src_loc = src_loc, + .val = .none, + .err = null, + .stat = undefined, }; - return ptr_val; + + var opt_ip_str: ?InternPool.String = null; + try pt.updateEmbedFile(new_file, &opt_ip_str); + + // Add the file contents to the `whole` cache manifest if necessary. + cache: { + const whole = switch (zcu.comp.cache_use) { + .whole => |whole| whole, + .incremental => break :cache, + }; + const man = whole.cache_manifest orelse break :cache; + const ip_str = opt_ip_str orelse break :cache; + + const copied_resolved_path = try gpa.dupe(u8, resolved_path); + errdefer gpa.free(copied_resolved_path); + + const array_len = Value.fromInterned(new_file.val).typeOf(zcu).childType(zcu).arrayLen(zcu); + + whole.cache_manifest_mutex.lock(); + defer whole.cache_manifest_mutex.unlock(); + + man.addFilePostContents(copied_resolved_path, ip_str.toSlice(array_len, ip), new_file.stat) catch |err| switch (err) { + error.Unexpected => unreachable, + else => |e| return e, + }; + } + + return new_file; } pub fn scanNamespace( diff --git a/test/incremental/change_embed_file b/test/incremental/change_embed_file new file mode 100644 index 0000000000..3cb29be2ed --- /dev/null +++ b/test/incremental/change_embed_file @@ -0,0 +1,46 @@ +#target=x86_64-linux-selfhosted +#target=x86_64-linux-cbe +#target=x86_64-windows-cbe +#target=wasm32-wasi-selfhosted +#update=initial version +#file=main.zig +const std = @import("std"); +const string = @embedFile("string.txt"); +pub fn main() !void { + try std.io.getStdOut().writeAll(string); +} +#file=string.txt +Hello, World! +#expect_stdout="Hello, World!\n" + +#update=change file contents +#file=string.txt +Hello again, World! +#expect_stdout="Hello again, World!\n" + +#update=delete file +#rm_file=string.txt +#expect_error=ignored + +#update=remove reference to file +#file=main.zig +const std = @import("std"); +const string = @embedFile("string.txt"); +pub fn main() !void { + try std.io.getStdOut().writeAll("a hardcoded string\n"); +} +#expect_stdout="a hardcoded string\n" + +#update=re-introduce reference to file +#file=main.zig +const std = @import("std"); +const string = @embedFile("string.txt"); +pub fn main() !void { + try std.io.getStdOut().writeAll(string); +} +#expect_error=ignore + +#update=recreate file +#file=string.txt +We're back, World! +#expect_stdout="We're back, World!\n" diff --git a/tools/incr-check.zig b/tools/incr-check.zig index 144e05ccc4..bbdef19043 100644 --- a/tools/incr-check.zig +++ b/tools/incr-check.zig @@ -608,6 +608,7 @@ const Case = struct { var targets: std.ArrayListUnmanaged(Target) = .empty; var updates: std.ArrayListUnmanaged(Update) = .empty; var changes: std.ArrayListUnmanaged(FullContents) = .empty; + var deletes: std.ArrayListUnmanaged([]const u8) = .empty; var it = std.mem.splitScalar(u8, bytes, '\n'); var line_n: usize = 1; var root_source_file: ?[]const u8 = null; @@ -647,13 +648,14 @@ const Case = struct { if (updates.items.len > 0) { const last_update = &updates.items[updates.items.len - 1]; last_update.changes = try changes.toOwnedSlice(arena); + last_update.deletes = try deletes.toOwnedSlice(arena); } try updates.append(arena, .{ .name = val, .outcome = .unknown, }); } else if (std.mem.eql(u8, key, "file")) { - if (updates.items.len == 0) fatal("line {d}: expect directive before update", .{line_n}); + if (updates.items.len == 0) fatal("line {d}: file directive before update", .{line_n}); if (root_source_file == null) root_source_file = val; @@ -674,6 +676,9 @@ const Case = struct { .name = val, .bytes = src, }); + } else if (std.mem.eql(u8, key, "rm_file")) { + if (updates.items.len == 0) fatal("line {d}: rm_file directive before update", .{line_n}); + try deletes.append(arena, val); } else if (std.mem.eql(u8, key, "expect_stdout")) { if (updates.items.len == 0) fatal("line {d}: expect directive before update", .{line_n}); const last_update = &updates.items[updates.items.len - 1]; @@ -701,6 +706,7 @@ const Case = struct { if (changes.items.len > 0) { const last_update = &updates.items[updates.items.len - 1]; last_update.changes = changes.items; // arena so no need for toOwnedSlice + last_update.deletes = deletes.items; } return .{