compiler: rework inferred error sets

* move inferred error sets into InternPool.
   - they are now represented by pointing directly at the corresponding
     function body value.
 * inferred error set working memory is now in Sema and expires after
   the Sema for the function corresponding to the inferred error set is
   finished having its body analyzed.
 * error sets use a InternPool.Index.Slice rather than an actual slice
   to avoid lifetime issues.
This commit is contained in:
Andrew Kelley 2023-07-08 23:39:37 -07:00
parent 55e89255e1
commit f3dc53f6b5
7 changed files with 1038 additions and 740 deletions

View file

@ -1669,8 +1669,9 @@ pub fn ArrayHashMapUnmanaged(
inline fn checkedHash(ctx: anytype, key: anytype) u32 { inline fn checkedHash(ctx: anytype, key: anytype) u32 {
comptime std.hash_map.verifyContext(@TypeOf(ctx), @TypeOf(key), K, u32, true); comptime std.hash_map.verifyContext(@TypeOf(ctx), @TypeOf(key), K, u32, true);
// If you get a compile error on the next line, it means that // If you get a compile error on the next line, it means that your
const hash = ctx.hash(key); // your generic hash function doesn't accept your key // generic hash function doesn't accept your key.
const hash = ctx.hash(key);
if (@TypeOf(hash) != u32) { if (@TypeOf(hash) != u32) {
@compileError("Context " ++ @typeName(@TypeOf(ctx)) ++ " has a generic hash function that returns the wrong type!\n" ++ @compileError("Context " ++ @typeName(@TypeOf(ctx)) ++ " has a generic hash function that returns the wrong type!\n" ++
@typeName(u32) ++ " was expected, but found " ++ @typeName(@TypeOf(hash))); @typeName(u32) ++ " was expected, but found " ++ @typeName(@TypeOf(hash)));
@ -1679,8 +1680,9 @@ pub fn ArrayHashMapUnmanaged(
} }
inline fn checkedEql(ctx: anytype, a: anytype, b: K, b_index: usize) bool { inline fn checkedEql(ctx: anytype, a: anytype, b: K, b_index: usize) bool {
comptime std.hash_map.verifyContext(@TypeOf(ctx), @TypeOf(a), K, u32, true); comptime std.hash_map.verifyContext(@TypeOf(ctx), @TypeOf(a), K, u32, true);
// If you get a compile error on the next line, it means that // If you get a compile error on the next line, it means that your
const eql = ctx.eql(a, b, b_index); // your generic eql function doesn't accept (self, adapt key, K, index) // generic eql function doesn't accept (self, adapt key, K, index).
const eql = ctx.eql(a, b, b_index);
if (@TypeOf(eql) != bool) { if (@TypeOf(eql) != bool) {
@compileError("Context " ++ @typeName(@TypeOf(ctx)) ++ " has a generic eql function that returns the wrong type!\n" ++ @compileError("Context " ++ @typeName(@TypeOf(ctx)) ++ " has a generic eql function that returns the wrong type!\n" ++
@typeName(bool) ++ " was expected, but found " ++ @typeName(@TypeOf(eql))); @typeName(bool) ++ " was expected, but found " ++ @typeName(@TypeOf(eql)));

File diff suppressed because it is too large Load diff

View file

@ -1297,98 +1297,6 @@ pub const Union = struct {
} }
}; };
/// Some extern function struct memory is owned by the Decl's TypedValue.Managed
/// arena allocator.
pub const ExternFn = struct {
/// The Decl that corresponds to the function itself.
owner_decl: Decl.Index,
/// Library name if specified.
/// For example `extern "c" fn write(...) usize` would have 'c' as library name.
/// Allocated with Module's allocator; outlives the ZIR code.
lib_name: ?[*:0]const u8,
pub fn deinit(extern_fn: *ExternFn, gpa: Allocator) void {
if (extern_fn.lib_name) |lib_name| {
gpa.free(mem.sliceTo(lib_name, 0));
}
}
};
/// This struct is used to keep track of any dependencies related to functions instances
/// that return inferred error sets. Note that a function may be associated to
/// multiple different error sets, for example an inferred error set which
/// this function returns, but also any inferred error sets of called inline
/// or comptime functions.
pub const InferredErrorSet = struct {
/// The function from which this error set originates.
func: InternPool.Index,
/// All currently known errors that this error set contains. This includes
/// direct additions via `return error.Foo;`, and possibly also errors that
/// are returned from any dependent functions. When the inferred error set is
/// fully resolved, this map contains all the errors that the function might return.
errors: NameMap = .{},
/// Other inferred error sets which this inferred error set should include.
inferred_error_sets: std.AutoArrayHashMapUnmanaged(InferredErrorSet.Index, void) = .{},
/// Whether the function returned anyerror. This is true if either of
/// the dependent functions returns anyerror.
is_anyerror: bool = false,
/// Whether this error set is already fully resolved. If true, resolving
/// can skip resolving any dependents of this inferred error set.
is_resolved: bool = false,
pub const NameMap = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void);
pub const Index = enum(u32) {
_,
pub fn toOptional(i: InferredErrorSet.Index) InferredErrorSet.OptionalIndex {
return @as(InferredErrorSet.OptionalIndex, @enumFromInt(@intFromEnum(i)));
}
};
pub const OptionalIndex = enum(u32) {
none = std.math.maxInt(u32),
_,
pub fn init(oi: ?InferredErrorSet.Index) InferredErrorSet.OptionalIndex {
return @as(InferredErrorSet.OptionalIndex, @enumFromInt(@intFromEnum(oi orelse return .none)));
}
pub fn unwrap(oi: InferredErrorSet.OptionalIndex) ?InferredErrorSet.Index {
if (oi == .none) return null;
return @as(InferredErrorSet.Index, @enumFromInt(@intFromEnum(oi)));
}
};
pub fn addErrorSet(
self: *InferredErrorSet,
err_set_ty: Type,
ip: *InternPool,
gpa: Allocator,
) !void {
switch (err_set_ty.toIntern()) {
.anyerror_type => {
self.is_anyerror = true;
},
else => switch (ip.indexToKey(err_set_ty.toIntern())) {
.error_set_type => |error_set_type| {
for (error_set_type.names) |name| {
try self.errors.put(gpa, name, {});
}
},
.inferred_error_set_type => |ies_index| {
try self.inferred_error_sets.put(gpa, ies_index, {});
},
else => unreachable,
},
}
}
};
pub const DeclAdapter = struct { pub const DeclAdapter = struct {
mod: *Module, mod: *Module,
@ -3220,10 +3128,6 @@ pub fn structPtr(mod: *Module, index: Struct.Index) *Struct {
return mod.intern_pool.structPtr(index); return mod.intern_pool.structPtr(index);
} }
pub fn inferredErrorSetPtr(mod: *Module, index: InferredErrorSet.Index) *InferredErrorSet {
return mod.intern_pool.inferredErrorSetPtr(index);
}
pub fn namespacePtrUnwrap(mod: *Module, index: Namespace.OptionalIndex) ?*Namespace { pub fn namespacePtrUnwrap(mod: *Module, index: Namespace.OptionalIndex) ?*Namespace {
return mod.namespacePtr(index.unwrap() orelse return null); return mod.namespacePtr(index.unwrap() orelse return null);
} }
@ -4261,6 +4165,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
.owner_decl_index = new_decl_index, .owner_decl_index = new_decl_index,
.func_index = .none, .func_index = .none,
.fn_ret_ty = Type.void, .fn_ret_ty = Type.void,
.fn_ret_ty_ies = null,
.owner_func_index = .none, .owner_func_index = .none,
.comptime_mutable_decls = &comptime_mutable_decls, .comptime_mutable_decls = &comptime_mutable_decls,
}; };
@ -4342,6 +4247,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
.owner_decl_index = decl_index, .owner_decl_index = decl_index,
.func_index = .none, .func_index = .none,
.fn_ret_ty = Type.void, .fn_ret_ty = Type.void,
.fn_ret_ty_ies = null,
.owner_func_index = .none, .owner_func_index = .none,
.comptime_mutable_decls = &comptime_mutable_decls, .comptime_mutable_decls = &comptime_mutable_decls,
}; };
@ -5289,12 +5195,19 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato
.owner_decl_index = decl_index, .owner_decl_index = decl_index,
.func_index = func_index, .func_index = func_index,
.fn_ret_ty = fn_ty_info.return_type.toType(), .fn_ret_ty = fn_ty_info.return_type.toType(),
.fn_ret_ty_ies = null,
.owner_func_index = func_index, .owner_func_index = func_index,
.branch_quota = @max(func.branchQuota(ip).*, Sema.default_branch_quota), .branch_quota = @max(func.branchQuota(ip).*, Sema.default_branch_quota),
.comptime_mutable_decls = &comptime_mutable_decls, .comptime_mutable_decls = &comptime_mutable_decls,
}; };
defer sema.deinit(); defer sema.deinit();
if (func.analysis(ip).inferred_error_set) {
const ies = try arena.create(Sema.InferredErrorSet);
ies.* = .{ .func = func_index };
sema.fn_ret_ty_ies = ies;
}
// reset in case calls to errorable functions are removed. // reset in case calls to errorable functions are removed.
func.analysis(ip).calls_or_awaits_errorable_fn = false; func.analysis(ip).calls_or_awaits_errorable_fn = false;
@ -5433,7 +5346,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len +
inner_block.instructions.items.len); inner_block.instructions.items.len);
const main_block_index = sema.addExtraAssumeCapacity(Air.Block{ const main_block_index = sema.addExtraAssumeCapacity(Air.Block{
.body_len = @as(u32, @intCast(inner_block.instructions.items.len)), .body_len = @intCast(inner_block.instructions.items.len),
}); });
sema.air_extra.appendSliceAssumeCapacity(inner_block.instructions.items); sema.air_extra.appendSliceAssumeCapacity(inner_block.instructions.items);
sema.air_extra.items[@intFromEnum(Air.ExtraIndex.main_block)] = main_block_index; sema.air_extra.items[@intFromEnum(Air.ExtraIndex.main_block)] = main_block_index;
@ -5445,7 +5358,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato
// Crucially, this happens *after* we set the function state to success above, // Crucially, this happens *after* we set the function state to success above,
// so that dependencies on the function body will now be satisfied rather than // so that dependencies on the function body will now be satisfied rather than
// result in circular dependency errors. // result in circular dependency errors.
sema.resolveFnTypes(fn_ty) catch |err| switch (err) { sema.resolveFnTypes(&inner_block, LazySrcLoc.nodeOffset(0), fn_ty) catch |err| switch (err) {
error.NeededSourceLocation => unreachable, error.NeededSourceLocation => unreachable,
error.GenericPoison => unreachable, error.GenericPoison => unreachable,
error.ComptimeReturn => unreachable, error.ComptimeReturn => unreachable,
@ -6595,7 +6508,8 @@ pub fn errorUnionType(mod: *Module, error_set_ty: Type, payload_ty: Type) Alloca
pub fn singleErrorSetType(mod: *Module, name: InternPool.NullTerminatedString) Allocator.Error!Type { pub fn singleErrorSetType(mod: *Module, name: InternPool.NullTerminatedString) Allocator.Error!Type {
const names: *const [1]InternPool.NullTerminatedString = &name; const names: *const [1]InternPool.NullTerminatedString = &name;
return (try mod.intern_pool.get(mod.gpa, .{ .error_set_type = .{ .names = names } })).toType(); const new_ty = try mod.intern_pool.getErrorSetType(mod.gpa, names);
return new_ty.toType();
} }
/// Sorts `names` in place. /// Sorts `names` in place.
@ -6609,7 +6523,7 @@ pub fn errorSetFromUnsortedNames(
{}, {},
InternPool.NullTerminatedString.indexLessThan, InternPool.NullTerminatedString.indexLessThan,
); );
const new_ty = try mod.intern(.{ .error_set_type = .{ .names = names } }); const new_ty = try mod.intern_pool.getErrorSetType(mod.gpa, names);
return new_ty.toType(); return new_ty.toType();
} }
@ -6956,16 +6870,6 @@ pub fn typeToFunc(mod: *Module, ty: Type) ?InternPool.Key.FuncType {
return mod.intern_pool.indexToFuncType(ty.toIntern()); return mod.intern_pool.indexToFuncType(ty.toIntern());
} }
pub fn typeToInferredErrorSet(mod: *Module, ty: Type) ?*InferredErrorSet {
const index = typeToInferredErrorSetIndex(mod, ty).unwrap() orelse return null;
return mod.inferredErrorSetPtr(index);
}
pub fn typeToInferredErrorSetIndex(mod: *Module, ty: Type) InferredErrorSet.OptionalIndex {
if (ty.ip_index == .none) return .none;
return mod.intern_pool.indexToInferredErrorSetType(ty.toIntern());
}
pub fn funcOwnerDeclPtr(mod: *Module, func_index: InternPool.Index) *Decl { pub fn funcOwnerDeclPtr(mod: *Module, func_index: InternPool.Index) *Decl {
return mod.declPtr(mod.funcOwnerDeclIndex(func_index)); return mod.declPtr(mod.funcOwnerDeclIndex(func_index));
} }
@ -6974,6 +6878,10 @@ pub fn funcOwnerDeclIndex(mod: *Module, func_index: InternPool.Index) Decl.Index
return mod.funcInfo(func_index).owner_decl; return mod.funcInfo(func_index).owner_decl;
} }
pub fn iesFuncIndex(mod: *const Module, ies_index: InternPool.Index) InternPool.Index {
return mod.intern_pool.iesFuncIndex(ies_index);
}
pub fn funcInfo(mod: *Module, func_index: InternPool.Index) InternPool.Key.Func { pub fn funcInfo(mod: *Module, func_index: InternPool.Index) InternPool.Key.Func {
return mod.intern_pool.indexToKey(func_index).func; return mod.intern_pool.indexToKey(func_index).func;
} }
@ -7040,19 +6948,3 @@ pub fn getParamName(mod: *Module, func_index: InternPool.Index, index: u32) [:0]
else => unreachable, else => unreachable,
}; };
} }
pub fn hasInferredErrorSet(mod: *Module, func: InternPool.Key.Func) bool {
const owner_decl = mod.declPtr(func.owner_decl);
const zir = owner_decl.getFileScope(mod).zir;
const zir_tags = zir.instructions.items(.tag);
switch (zir_tags[func.zir_body_inst]) {
.func => return false,
.func_inferred => return true,
.func_fancy => {
const inst_data = zir.instructions.items(.data)[func.zir_body_inst].pl_node;
const extra = zir.extraData(Zir.Inst.FuncFancy, inst_data.payload_index);
return extra.data.bits.is_inferred_error;
},
else => unreachable,
}
}

File diff suppressed because it is too large Load diff

View file

@ -6061,8 +6061,6 @@ pub const FuncGen = struct {
.is_var_args = false, .is_var_args = false,
.is_generic = false, .is_generic = false,
.is_noinline = false, .is_noinline = false,
.align_is_generic = false,
.cc_is_generic = false,
.section_is_generic = false, .section_is_generic = false,
.addrspace_is_generic = false, .addrspace_is_generic = false,
}); });
@ -10657,30 +10655,31 @@ fn llvmField(ty: Type, field_index: usize, mod: *Module) ?LlvmField {
} }
fn firstParamSRet(fn_info: InternPool.Key.FuncType, mod: *Module) bool { fn firstParamSRet(fn_info: InternPool.Key.FuncType, mod: *Module) bool {
if (!fn_info.return_type.toType().hasRuntimeBitsIgnoreComptime(mod)) return false; const return_type = fn_info.return_type.toType();
if (!return_type.hasRuntimeBitsIgnoreComptime(mod)) return false;
const target = mod.getTarget(); const target = mod.getTarget();
switch (fn_info.cc) { switch (fn_info.cc) {
.Unspecified, .Inline => return isByRef(fn_info.return_type.toType(), mod), .Unspecified, .Inline => return isByRef(return_type, mod),
.C => switch (target.cpu.arch) { .C => switch (target.cpu.arch) {
.mips, .mipsel => return false, .mips, .mipsel => return false,
.x86_64 => switch (target.os.tag) { .x86_64 => switch (target.os.tag) {
.windows => return x86_64_abi.classifyWindows(fn_info.return_type.toType(), mod) == .memory, .windows => return x86_64_abi.classifyWindows(return_type, mod) == .memory,
else => return firstParamSRetSystemV(fn_info.return_type.toType(), mod), else => return firstParamSRetSystemV(return_type, mod),
}, },
.wasm32 => return wasm_c_abi.classifyType(fn_info.return_type.toType(), mod)[0] == .indirect, .wasm32 => return wasm_c_abi.classifyType(return_type, mod)[0] == .indirect,
.aarch64, .aarch64_be => return aarch64_c_abi.classifyType(fn_info.return_type.toType(), mod) == .memory, .aarch64, .aarch64_be => return aarch64_c_abi.classifyType(return_type, mod) == .memory,
.arm, .armeb => switch (arm_c_abi.classifyType(fn_info.return_type.toType(), mod, .ret)) { .arm, .armeb => switch (arm_c_abi.classifyType(return_type, mod, .ret)) {
.memory, .i64_array => return true, .memory, .i64_array => return true,
.i32_array => |size| return size != 1, .i32_array => |size| return size != 1,
.byval => return false, .byval => return false,
}, },
.riscv32, .riscv64 => return riscv_c_abi.classifyType(fn_info.return_type.toType(), mod) == .memory, .riscv32, .riscv64 => return riscv_c_abi.classifyType(return_type, mod) == .memory,
else => return false, // TODO investigate C ABI for other architectures else => return false, // TODO investigate C ABI for other architectures
}, },
.SysV => return firstParamSRetSystemV(fn_info.return_type.toType(), mod), .SysV => return firstParamSRetSystemV(return_type, mod),
.Win64 => return x86_64_abi.classifyWindows(fn_info.return_type.toType(), mod) == .memory, .Win64 => return x86_64_abi.classifyWindows(return_type, mod) == .memory,
.Stdcall => return !isScalar(mod, fn_info.return_type.toType()), .Stdcall => return !isScalar(mod, return_type),
else => return false, else => return false,
} }
} }

View file

@ -1043,6 +1043,7 @@ pub fn commitDeclState(
var dbg_line_buffer = &decl_state.dbg_line; var dbg_line_buffer = &decl_state.dbg_line;
var dbg_info_buffer = &decl_state.dbg_info; var dbg_info_buffer = &decl_state.dbg_info;
const decl = mod.declPtr(decl_index); const decl = mod.declPtr(decl_index);
const ip = &mod.intern_pool;
const target_endian = self.target.cpu.arch.endian(); const target_endian = self.target.cpu.arch.endian();
@ -1241,20 +1242,9 @@ pub fn commitDeclState(
while (sym_index < decl_state.abbrev_table.items.len) : (sym_index += 1) { while (sym_index < decl_state.abbrev_table.items.len) : (sym_index += 1) {
const symbol = &decl_state.abbrev_table.items[sym_index]; const symbol = &decl_state.abbrev_table.items[sym_index];
const ty = symbol.type; const ty = symbol.type;
const deferred: bool = blk: { if (ip.isErrorSetType(ty.toIntern())) continue;
if (ty.isAnyError(mod)) break :blk true;
switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.inferred_error_set_type => |ies_index| {
const ies = mod.inferredErrorSetPtr(ies_index);
if (!ies.is_resolved) break :blk true;
},
else => {},
}
break :blk false;
};
if (deferred) continue;
symbol.offset = @as(u32, @intCast(dbg_info_buffer.items.len)); symbol.offset = @intCast(dbg_info_buffer.items.len);
try decl_state.addDbgInfoType(mod, di_atom_index, ty); try decl_state.addDbgInfoType(mod, di_atom_index, ty);
} }
} }
@ -1265,18 +1255,7 @@ pub fn commitDeclState(
if (reloc.target) |target| { if (reloc.target) |target| {
const symbol = decl_state.abbrev_table.items[target]; const symbol = decl_state.abbrev_table.items[target];
const ty = symbol.type; const ty = symbol.type;
const deferred: bool = blk: { if (ip.isErrorSetType(ty.toIntern())) {
if (ty.isAnyError(mod)) break :blk true;
switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.inferred_error_set_type => |ies_index| {
const ies = mod.inferredErrorSetPtr(ies_index);
if (!ies.is_resolved) break :blk true;
},
else => {},
}
break :blk false;
};
if (deferred) {
log.debug("resolving %{d} deferred until flush", .{target}); log.debug("resolving %{d} deferred until flush", .{target});
try self.global_abbrev_relocs.append(gpa, .{ try self.global_abbrev_relocs.append(gpa, .{
.target = null, .target = null,
@ -2505,18 +2484,18 @@ pub fn flushModule(self: *Dwarf, module: *Module) !void {
defer arena_alloc.deinit(); defer arena_alloc.deinit();
const arena = arena_alloc.allocator(); const arena = arena_alloc.allocator();
// TODO: don't create a zig type for this, just make the dwarf info
// without touching the zig type system.
const names = try arena.dupe(InternPool.NullTerminatedString, module.global_error_set.keys());
std.mem.sort(InternPool.NullTerminatedString, names, {}, InternPool.NullTerminatedString.indexLessThan);
const error_ty = try module.intern(.{ .error_set_type = .{ .names = names } });
var dbg_info_buffer = std.ArrayList(u8).init(arena); var dbg_info_buffer = std.ArrayList(u8).init(arena);
try addDbgInfoErrorSet(module, error_ty.toType(), self.target, &dbg_info_buffer); try addDbgInfoErrorSetNames(
module,
Type.anyerror,
module.global_error_set.keys(),
self.target,
&dbg_info_buffer,
);
const di_atom_index = try self.createAtom(.di_atom); const di_atom_index = try self.createAtom(.di_atom);
log.debug("updateDeclDebugInfoAllocation in flushModule", .{}); log.debug("updateDeclDebugInfoAllocation in flushModule", .{});
try self.updateDeclDebugInfoAllocation(di_atom_index, @as(u32, @intCast(dbg_info_buffer.items.len))); try self.updateDeclDebugInfoAllocation(di_atom_index, @intCast(dbg_info_buffer.items.len));
log.debug("writeDeclDebugInfo in flushModule", .{}); log.debug("writeDeclDebugInfo in flushModule", .{});
try self.writeDeclDebugInfo(di_atom_index, dbg_info_buffer.items); try self.writeDeclDebugInfo(di_atom_index, dbg_info_buffer.items);
@ -2633,6 +2612,17 @@ fn addDbgInfoErrorSet(
ty: Type, ty: Type,
target: std.Target, target: std.Target,
dbg_info_buffer: *std.ArrayList(u8), dbg_info_buffer: *std.ArrayList(u8),
) !void {
return addDbgInfoErrorSetNames(mod, ty, ty.errorSetNames(mod), target, dbg_info_buffer);
}
fn addDbgInfoErrorSetNames(
mod: *Module,
/// Used for printing the type name only.
ty: Type,
error_names: []const InternPool.NullTerminatedString,
target: std.Target,
dbg_info_buffer: *std.ArrayList(u8),
) !void { ) !void {
const target_endian = target.cpu.arch.endian(); const target_endian = target.cpu.arch.endian();
@ -2655,7 +2645,6 @@ fn addDbgInfoErrorSet(
// DW.AT.const_value, DW.FORM.data8 // DW.AT.const_value, DW.FORM.data8
mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), 0, target_endian); mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), 0, target_endian);
const error_names = ty.errorSetNames(mod);
for (error_names) |error_name_ip| { for (error_names) |error_name_ip| {
const int = try mod.getErrorValue(error_name_ip); const int = try mod.getErrorValue(error_name_ip);
const error_name = mod.intern_pool.stringToSlice(error_name_ip); const error_name = mod.intern_pool.stringToSlice(error_name_ip);

View file

@ -251,20 +251,19 @@ pub const Type = struct {
return; return;
}, },
.inferred_error_set_type => |index| { .inferred_error_set_type => |index| {
const ies = mod.inferredErrorSetPtr(index); const func = mod.iesFuncIndex(index);
const func = ies.func;
try writer.writeAll("@typeInfo(@typeInfo(@TypeOf("); try writer.writeAll("@typeInfo(@typeInfo(@TypeOf(");
const owner_decl = mod.funcOwnerDeclPtr(func); const owner_decl = mod.funcOwnerDeclPtr(func);
try owner_decl.renderFullyQualifiedName(mod, writer); try owner_decl.renderFullyQualifiedName(mod, writer);
try writer.writeAll(")).Fn.return_type.?).ErrorUnion.error_set"); try writer.writeAll(")).Fn.return_type.?).ErrorUnion.error_set");
}, },
.error_set_type => |error_set_type| { .error_set_type => |error_set_type| {
const ip = &mod.intern_pool;
const names = error_set_type.names; const names = error_set_type.names;
try writer.writeAll("error{"); try writer.writeAll("error{");
for (names, 0..) |name, i| { for (names.get(ip), 0..) |name, i| {
if (i != 0) try writer.writeByte(','); if (i != 0) try writer.writeByte(',');
try writer.print("{}", .{name.fmt(&mod.intern_pool)}); try writer.print("{}", .{name.fmt(ip)});
} }
try writer.writeAll("}"); try writer.writeAll("}");
}, },
@ -2051,21 +2050,19 @@ pub const Type = struct {
/// Asserts that the type is an error union. /// Asserts that the type is an error union.
pub fn errorUnionSet(ty: Type, mod: *Module) Type { pub fn errorUnionSet(ty: Type, mod: *Module) Type {
return mod.intern_pool.indexToKey(ty.toIntern()).error_union_type.error_set_type.toType(); return mod.intern_pool.errorUnionSet(ty.toIntern()).toType();
} }
/// Returns false for unresolved inferred error sets. /// Returns false for unresolved inferred error sets.
pub fn errorSetIsEmpty(ty: Type, mod: *Module) bool { pub fn errorSetIsEmpty(ty: Type, mod: *Module) bool {
const ip = &mod.intern_pool;
return switch (ty.toIntern()) { return switch (ty.toIntern()) {
.anyerror_type => false, .anyerror_type => false,
else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { else => switch (ip.indexToKey(ty.toIntern())) {
.error_set_type => |error_set_type| error_set_type.names.len == 0, .error_set_type => |error_set_type| error_set_type.names.len == 0,
.inferred_error_set_type => |index| { .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) {
const inferred_error_set = mod.inferredErrorSetPtr(index); .none, .anyerror_type => false,
// Can't know for sure. else => |t| ip.indexToKey(t).error_set_type.names.len == 0,
if (!inferred_error_set.is_resolved) return false;
if (inferred_error_set.is_anyerror) return false;
return inferred_error_set.errors.count() == 0;
}, },
else => unreachable, else => unreachable,
}, },
@ -2076,10 +2073,11 @@ pub const Type = struct {
/// Note that the result may be a false negative if the type did not get error set /// Note that the result may be a false negative if the type did not get error set
/// resolution prior to this call. /// resolution prior to this call.
pub fn isAnyError(ty: Type, mod: *Module) bool { pub fn isAnyError(ty: Type, mod: *Module) bool {
const ip = &mod.intern_pool;
return switch (ty.toIntern()) { return switch (ty.toIntern()) {
.anyerror_type => true, .anyerror_type => true,
else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { else => switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.inferred_error_set_type => |i| mod.inferredErrorSetPtr(i).is_anyerror, .inferred_error_set_type => |i| ip.funcIesResolved(i).* == .anyerror_type,
else => false, else => false,
}, },
}; };
@ -2103,13 +2101,11 @@ pub const Type = struct {
return switch (ty) { return switch (ty) {
.anyerror_type => true, .anyerror_type => true,
else => switch (ip.indexToKey(ty)) { else => switch (ip.indexToKey(ty)) {
.error_set_type => |error_set_type| { .error_set_type => |error_set_type| error_set_type.nameIndex(ip, name) != null,
return error_set_type.nameIndex(ip, name) != null; .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) {
}, .anyerror_type => true,
.inferred_error_set_type => |index| { .none => false,
const ies = ip.inferredErrorSetPtrConst(index); else => |t| ip.indexToKey(t).error_set_type.nameIndex(ip, name) != null,
if (ies.is_anyerror) return true;
return ies.errors.contains(name);
}, },
else => unreachable, else => unreachable,
}, },
@ -2129,12 +2125,14 @@ pub const Type = struct {
const field_name_interned = ip.getString(name).unwrap() orelse return false; const field_name_interned = ip.getString(name).unwrap() orelse return false;
return error_set_type.nameIndex(ip, field_name_interned) != null; return error_set_type.nameIndex(ip, field_name_interned) != null;
}, },
.inferred_error_set_type => |index| { .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) {
const ies = ip.inferredErrorSetPtr(index); .anyerror_type => true,
if (ies.is_anyerror) return true; .none => false,
// If the string is not interned, then the field certainly is not present. else => |t| {
const field_name_interned = ip.getString(name).unwrap() orelse return false; // If the string is not interned, then the field certainly is not present.
return ies.errors.contains(field_name_interned); const field_name_interned = ip.getString(name).unwrap() orelse return false;
return ip.indexToKey(t).error_set_type.nameIndex(ip, field_name_interned) != null;
},
}, },
else => unreachable, else => unreachable,
}, },
@ -2943,14 +2941,15 @@ pub const Type = struct {
} }
// Asserts that `ty` is an error set and not `anyerror`. // Asserts that `ty` is an error set and not `anyerror`.
// Asserts that `ty` is resolved if it is an inferred error set.
pub fn errorSetNames(ty: Type, mod: *Module) []const InternPool.NullTerminatedString { pub fn errorSetNames(ty: Type, mod: *Module) []const InternPool.NullTerminatedString {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) { const ip = &mod.intern_pool;
.error_set_type => |x| x.names, return switch (ip.indexToKey(ty.toIntern())) {
.inferred_error_set_type => |index| { .error_set_type => |x| x.names.get(ip),
const inferred_error_set = mod.inferredErrorSetPtr(index); .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) {
assert(inferred_error_set.is_resolved); .none => unreachable, // unresolved inferred error set
assert(!inferred_error_set.is_anyerror); .anyerror_type => unreachable,
return inferred_error_set.errors.keys(); else => |t| ip.indexToKey(t).error_set_type.names.get(ip),
}, },
else => unreachable, else => unreachable,
}; };