compiler: rework inferred error sets

* move inferred error sets into InternPool.
   - they are now represented by pointing directly at the corresponding
     function body value.
 * inferred error set working memory is now in Sema and expires after
   the Sema for the function corresponding to the inferred error set is
   finished having its body analyzed.
 * error sets use a InternPool.Index.Slice rather than an actual slice
   to avoid lifetime issues.
This commit is contained in:
Andrew Kelley 2023-07-08 23:39:37 -07:00
parent 55e89255e1
commit f3dc53f6b5
7 changed files with 1038 additions and 740 deletions

View file

@ -1669,8 +1669,9 @@ pub fn ArrayHashMapUnmanaged(
inline fn checkedHash(ctx: anytype, key: anytype) u32 {
comptime std.hash_map.verifyContext(@TypeOf(ctx), @TypeOf(key), K, u32, true);
// If you get a compile error on the next line, it means that
const hash = ctx.hash(key); // your generic hash function doesn't accept your key
// If you get a compile error on the next line, it means that your
// generic hash function doesn't accept your key.
const hash = ctx.hash(key);
if (@TypeOf(hash) != u32) {
@compileError("Context " ++ @typeName(@TypeOf(ctx)) ++ " has a generic hash function that returns the wrong type!\n" ++
@typeName(u32) ++ " was expected, but found " ++ @typeName(@TypeOf(hash)));
@ -1679,8 +1680,9 @@ pub fn ArrayHashMapUnmanaged(
}
inline fn checkedEql(ctx: anytype, a: anytype, b: K, b_index: usize) bool {
comptime std.hash_map.verifyContext(@TypeOf(ctx), @TypeOf(a), K, u32, true);
// If you get a compile error on the next line, it means that
const eql = ctx.eql(a, b, b_index); // your generic eql function doesn't accept (self, adapt key, K, index)
// If you get a compile error on the next line, it means that your
// generic eql function doesn't accept (self, adapt key, K, index).
const eql = ctx.eql(a, b, b_index);
if (@TypeOf(eql) != bool) {
@compileError("Context " ++ @typeName(@TypeOf(ctx)) ++ " has a generic eql function that returns the wrong type!\n" ++
@typeName(bool) ++ " was expected, but found " ++ @typeName(@TypeOf(eql)));

File diff suppressed because it is too large Load diff

View file

@ -1297,98 +1297,6 @@ pub const Union = struct {
}
};
/// Some extern function struct memory is owned by the Decl's TypedValue.Managed
/// arena allocator.
pub const ExternFn = struct {
/// The Decl that corresponds to the function itself.
owner_decl: Decl.Index,
/// Library name if specified.
/// For example `extern "c" fn write(...) usize` would have 'c' as library name.
/// Allocated with Module's allocator; outlives the ZIR code.
lib_name: ?[*:0]const u8,
pub fn deinit(extern_fn: *ExternFn, gpa: Allocator) void {
if (extern_fn.lib_name) |lib_name| {
gpa.free(mem.sliceTo(lib_name, 0));
}
}
};
/// This struct is used to keep track of any dependencies related to functions instances
/// that return inferred error sets. Note that a function may be associated to
/// multiple different error sets, for example an inferred error set which
/// this function returns, but also any inferred error sets of called inline
/// or comptime functions.
pub const InferredErrorSet = struct {
/// The function from which this error set originates.
func: InternPool.Index,
/// All currently known errors that this error set contains. This includes
/// direct additions via `return error.Foo;`, and possibly also errors that
/// are returned from any dependent functions. When the inferred error set is
/// fully resolved, this map contains all the errors that the function might return.
errors: NameMap = .{},
/// Other inferred error sets which this inferred error set should include.
inferred_error_sets: std.AutoArrayHashMapUnmanaged(InferredErrorSet.Index, void) = .{},
/// Whether the function returned anyerror. This is true if either of
/// the dependent functions returns anyerror.
is_anyerror: bool = false,
/// Whether this error set is already fully resolved. If true, resolving
/// can skip resolving any dependents of this inferred error set.
is_resolved: bool = false,
pub const NameMap = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void);
pub const Index = enum(u32) {
_,
pub fn toOptional(i: InferredErrorSet.Index) InferredErrorSet.OptionalIndex {
return @as(InferredErrorSet.OptionalIndex, @enumFromInt(@intFromEnum(i)));
}
};
pub const OptionalIndex = enum(u32) {
none = std.math.maxInt(u32),
_,
pub fn init(oi: ?InferredErrorSet.Index) InferredErrorSet.OptionalIndex {
return @as(InferredErrorSet.OptionalIndex, @enumFromInt(@intFromEnum(oi orelse return .none)));
}
pub fn unwrap(oi: InferredErrorSet.OptionalIndex) ?InferredErrorSet.Index {
if (oi == .none) return null;
return @as(InferredErrorSet.Index, @enumFromInt(@intFromEnum(oi)));
}
};
pub fn addErrorSet(
self: *InferredErrorSet,
err_set_ty: Type,
ip: *InternPool,
gpa: Allocator,
) !void {
switch (err_set_ty.toIntern()) {
.anyerror_type => {
self.is_anyerror = true;
},
else => switch (ip.indexToKey(err_set_ty.toIntern())) {
.error_set_type => |error_set_type| {
for (error_set_type.names) |name| {
try self.errors.put(gpa, name, {});
}
},
.inferred_error_set_type => |ies_index| {
try self.inferred_error_sets.put(gpa, ies_index, {});
},
else => unreachable,
},
}
}
};
pub const DeclAdapter = struct {
mod: *Module,
@ -3220,10 +3128,6 @@ pub fn structPtr(mod: *Module, index: Struct.Index) *Struct {
return mod.intern_pool.structPtr(index);
}
pub fn inferredErrorSetPtr(mod: *Module, index: InferredErrorSet.Index) *InferredErrorSet {
return mod.intern_pool.inferredErrorSetPtr(index);
}
pub fn namespacePtrUnwrap(mod: *Module, index: Namespace.OptionalIndex) ?*Namespace {
return mod.namespacePtr(index.unwrap() orelse return null);
}
@ -4261,6 +4165,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
.owner_decl_index = new_decl_index,
.func_index = .none,
.fn_ret_ty = Type.void,
.fn_ret_ty_ies = null,
.owner_func_index = .none,
.comptime_mutable_decls = &comptime_mutable_decls,
};
@ -4342,6 +4247,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
.owner_decl_index = decl_index,
.func_index = .none,
.fn_ret_ty = Type.void,
.fn_ret_ty_ies = null,
.owner_func_index = .none,
.comptime_mutable_decls = &comptime_mutable_decls,
};
@ -5289,12 +5195,19 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato
.owner_decl_index = decl_index,
.func_index = func_index,
.fn_ret_ty = fn_ty_info.return_type.toType(),
.fn_ret_ty_ies = null,
.owner_func_index = func_index,
.branch_quota = @max(func.branchQuota(ip).*, Sema.default_branch_quota),
.comptime_mutable_decls = &comptime_mutable_decls,
};
defer sema.deinit();
if (func.analysis(ip).inferred_error_set) {
const ies = try arena.create(Sema.InferredErrorSet);
ies.* = .{ .func = func_index };
sema.fn_ret_ty_ies = ies;
}
// reset in case calls to errorable functions are removed.
func.analysis(ip).calls_or_awaits_errorable_fn = false;
@ -5433,7 +5346,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len +
inner_block.instructions.items.len);
const main_block_index = sema.addExtraAssumeCapacity(Air.Block{
.body_len = @as(u32, @intCast(inner_block.instructions.items.len)),
.body_len = @intCast(inner_block.instructions.items.len),
});
sema.air_extra.appendSliceAssumeCapacity(inner_block.instructions.items);
sema.air_extra.items[@intFromEnum(Air.ExtraIndex.main_block)] = main_block_index;
@ -5445,7 +5358,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato
// Crucially, this happens *after* we set the function state to success above,
// so that dependencies on the function body will now be satisfied rather than
// result in circular dependency errors.
sema.resolveFnTypes(fn_ty) catch |err| switch (err) {
sema.resolveFnTypes(&inner_block, LazySrcLoc.nodeOffset(0), fn_ty) catch |err| switch (err) {
error.NeededSourceLocation => unreachable,
error.GenericPoison => unreachable,
error.ComptimeReturn => unreachable,
@ -6595,7 +6508,8 @@ pub fn errorUnionType(mod: *Module, error_set_ty: Type, payload_ty: Type) Alloca
pub fn singleErrorSetType(mod: *Module, name: InternPool.NullTerminatedString) Allocator.Error!Type {
const names: *const [1]InternPool.NullTerminatedString = &name;
return (try mod.intern_pool.get(mod.gpa, .{ .error_set_type = .{ .names = names } })).toType();
const new_ty = try mod.intern_pool.getErrorSetType(mod.gpa, names);
return new_ty.toType();
}
/// Sorts `names` in place.
@ -6609,7 +6523,7 @@ pub fn errorSetFromUnsortedNames(
{},
InternPool.NullTerminatedString.indexLessThan,
);
const new_ty = try mod.intern(.{ .error_set_type = .{ .names = names } });
const new_ty = try mod.intern_pool.getErrorSetType(mod.gpa, names);
return new_ty.toType();
}
@ -6956,16 +6870,6 @@ pub fn typeToFunc(mod: *Module, ty: Type) ?InternPool.Key.FuncType {
return mod.intern_pool.indexToFuncType(ty.toIntern());
}
pub fn typeToInferredErrorSet(mod: *Module, ty: Type) ?*InferredErrorSet {
const index = typeToInferredErrorSetIndex(mod, ty).unwrap() orelse return null;
return mod.inferredErrorSetPtr(index);
}
pub fn typeToInferredErrorSetIndex(mod: *Module, ty: Type) InferredErrorSet.OptionalIndex {
if (ty.ip_index == .none) return .none;
return mod.intern_pool.indexToInferredErrorSetType(ty.toIntern());
}
pub fn funcOwnerDeclPtr(mod: *Module, func_index: InternPool.Index) *Decl {
return mod.declPtr(mod.funcOwnerDeclIndex(func_index));
}
@ -6974,6 +6878,10 @@ pub fn funcOwnerDeclIndex(mod: *Module, func_index: InternPool.Index) Decl.Index
return mod.funcInfo(func_index).owner_decl;
}
pub fn iesFuncIndex(mod: *const Module, ies_index: InternPool.Index) InternPool.Index {
return mod.intern_pool.iesFuncIndex(ies_index);
}
pub fn funcInfo(mod: *Module, func_index: InternPool.Index) InternPool.Key.Func {
return mod.intern_pool.indexToKey(func_index).func;
}
@ -7040,19 +6948,3 @@ pub fn getParamName(mod: *Module, func_index: InternPool.Index, index: u32) [:0]
else => unreachable,
};
}
pub fn hasInferredErrorSet(mod: *Module, func: InternPool.Key.Func) bool {
const owner_decl = mod.declPtr(func.owner_decl);
const zir = owner_decl.getFileScope(mod).zir;
const zir_tags = zir.instructions.items(.tag);
switch (zir_tags[func.zir_body_inst]) {
.func => return false,
.func_inferred => return true,
.func_fancy => {
const inst_data = zir.instructions.items(.data)[func.zir_body_inst].pl_node;
const extra = zir.extraData(Zir.Inst.FuncFancy, inst_data.payload_index);
return extra.data.bits.is_inferred_error;
},
else => unreachable,
}
}

View file

@ -38,6 +38,10 @@ error_return_trace_index_on_fn_entry: Air.Inst.Ref = .none,
/// generic function which uses a type expression for the return type.
/// The type will be `void` in the case that `func` is `null`.
fn_ret_ty: Type,
/// In case of the return type being an error union with an inferred error
/// set, this is the inferred error set. `null` otherwise. Allocated with
/// `Sema.arena`.
fn_ret_ty_ies: ?*InferredErrorSet,
branch_quota: u32 = default_branch_quota,
branch_count: u32 = 0,
/// Populated when returning `error.ComptimeBreak`. Used to communicate the
@ -128,6 +132,46 @@ const Alignment = InternPool.Alignment;
pub const default_branch_quota = 1000;
pub const default_reference_trace_len = 2;
pub const InferredErrorSet = struct {
/// The function body from which this error set originates.
func: InternPool.Index,
/// All currently known errors that this error set contains. This includes
/// direct additions via `return error.Foo;`, and possibly also errors that
/// are returned from any dependent functions. When the inferred error set is
/// fully resolved, this map contains all the errors that the function might return.
errors: NameMap = .{},
/// Other inferred error sets which this inferred error set should include.
inferred_error_sets: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{},
pub const NameMap = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void);
pub fn addErrorSet(
self: *InferredErrorSet,
err_set_ty: Type,
ip: *InternPool,
arena: Allocator,
) !void {
switch (err_set_ty.toIntern()) {
.anyerror_type => {
ip.funcIesResolved(self.func).* = .anyerror_type;
},
else => switch (ip.indexToKey(err_set_ty.toIntern())) {
.error_set_type => |error_set_type| {
for (error_set_type.names.get(ip)) |name| {
try self.errors.put(arena, name, {});
}
},
.inferred_error_set_type => {
try self.inferred_error_sets.put(arena, err_set_ty.toIntern(), {});
},
else => unreachable,
},
}
}
};
/// Stores the mapping from `Zir.Inst.Index -> Air.Inst.Ref`, which is used by sema to resolve
/// instructions during analysis.
/// Instead of a hash table approach, InstMap is simply a slice that is indexed into using the
@ -1120,7 +1164,7 @@ fn analyzeBodyInner(
.shl_sat => try sema.zirShl(block, inst, .shl_sat),
.ret_ptr => try sema.zirRetPtr(block),
.ret_type => try sema.addType(sema.fn_ret_ty),
.ret_type => Air.internedToRef(sema.fn_ret_ty.toIntern()),
// Instructions that we know to *always* be noreturn based solely on their tag.
// These functions match the return type of analyzeBody so that we can
@ -3392,7 +3436,7 @@ fn zirErrorSetDecl(
const src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.ErrorSetDecl, inst_data.payload_index);
var names: Module.InferredErrorSet.NameMap = .{};
var names: InferredErrorSet.NameMap = .{};
try names.ensureUnusedCapacity(sema.arena, extra.data.fields_len);
var extra_index = @as(u32, @intCast(extra.end));
@ -6933,12 +6977,10 @@ fn analyzeCall(
.return_type = owner_info.return_type,
.comptime_bits = 0,
.noalias_bits = owner_info.noalias_bits,
.alignment = owner_info.alignment,
.cc = owner_info.cc,
.alignment = if (owner_info.align_is_generic) null else owner_info.alignment,
.cc = if (owner_info.cc_is_generic) null else owner_info.cc,
.is_var_args = owner_info.is_var_args,
.is_noinline = owner_info.is_noinline,
.align_is_generic = owner_info.align_is_generic,
.cc_is_generic = owner_info.cc_is_generic,
.section_is_generic = owner_info.section_is_generic,
.addrspace_is_generic = owner_info.addrspace_is_generic,
.is_generic = owner_info.is_generic,
@ -7001,21 +7043,25 @@ fn analyzeCall(
try sema.resolveInst(fn_info.ret_ty_ref);
const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 };
const bare_return_type = try sema.analyzeAsType(&child_block, ret_ty_src, ret_ty_inst);
// Create a fresh inferred error set type for inline/comptime calls.
const fn_ret_ty = blk: {
if (mod.hasInferredErrorSet(module_fn)) {
const ies_index = try mod.intern_pool.createInferredErrorSet(gpa, .{
.func = module_fn_index,
});
const error_set_ty = try mod.intern(.{ .inferred_error_set_type = ies_index });
break :blk try mod.errorUnionType(error_set_ty.toType(), bare_return_type);
}
break :blk bare_return_type;
};
new_fn_info.return_type = fn_ret_ty.toIntern();
const parent_fn_ret_ty = sema.fn_ret_ty;
sema.fn_ret_ty = fn_ret_ty;
const parent_fn_ret_ty_ies = sema.fn_ret_ty_ies;
sema.fn_ret_ty = bare_return_type;
sema.fn_ret_ty_ies = null;
defer sema.fn_ret_ty = parent_fn_ret_ty;
defer sema.fn_ret_ty_ies = parent_fn_ret_ty_ies;
if (module_fn.analysis(ip).inferred_error_set) {
// Create a fresh inferred error set type for inline/comptime calls.
const error_set_ty = try mod.intern(.{ .inferred_error_set_type = module_fn_index });
const ies = try sema.arena.create(InferredErrorSet);
ies.* = .{ .func = module_fn_index };
sema.fn_ret_ty_ies = ies;
sema.fn_ret_ty = (try ip.get(gpa, .{ .error_union_type = .{
.error_set_type = error_set_ty,
.payload_type = bare_return_type.toIntern(),
} })).toType();
ip.funcIesResolved(module_fn_index).* = .none;
}
// This `res2` is here instead of directly breaking from `res` due to a stage1
// bug generating invalid LLVM IR.
@ -7059,7 +7105,7 @@ fn analyzeCall(
}
if (is_comptime_call and ensure_result_used) {
try sema.ensureResultUsed(block, fn_ret_ty, call_src);
try sema.ensureResultUsed(block, sema.fn_ret_ty, call_src);
}
const result = result: {
@ -7089,7 +7135,7 @@ fn analyzeCall(
if (should_memoize and is_comptime_call) {
const result_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, result, "");
const result_interned = try result_val.intern(fn_ret_ty, mod);
const result_interned = try result_val.intern(sema.fn_ret_ty, mod);
// TODO: check whether any external comptime memory was mutated by the
// comptime function call. If so, then do not memoize the call here.
@ -7114,7 +7160,7 @@ fn analyzeCall(
if (i < fn_params_len) {
const opts: CoerceOpts = .{ .param_src = .{
.func_inst = func,
.param_i = @as(u32, @intCast(i)),
.param_i = @intCast(i),
} };
const param_ty = func_ty_info.param_types.get(ip)[i].toType();
args[i] = sema.analyzeCallArg(
@ -7433,6 +7479,7 @@ fn instantiateGenericCall(
.owner_decl_index = sema.owner_decl_index,
.func_index = sema.owner_func_index,
.fn_ret_ty = Type.void,
.fn_ret_ty_ies = null,
.owner_func_index = .none,
.comptime_args = comptime_args,
.generic_owner = generic_owner,
@ -7769,6 +7816,7 @@ fn zirIntFromError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD
defer tracy.end();
const mod = sema.mod;
const ip = &mod.intern_pool;
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const src = LazySrcLoc.nodeOffset(extra.node);
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
@ -7779,7 +7827,7 @@ fn zirIntFromError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD
if (val.isUndef(mod)) {
return sema.addConstUndef(Type.err_int);
}
const err_name = mod.intern_pool.indexToKey(val.toIntern()).err.name;
const err_name = ip.indexToKey(val.toIntern()).err.name;
return sema.addConstant(try mod.intValue(
Type.err_int,
try mod.getErrorValue(err_name),
@ -7787,17 +7835,19 @@ fn zirIntFromError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD
}
const op_ty = sema.typeOf(uncasted_operand);
try sema.resolveInferredErrorSetTy(block, src, op_ty);
if (!op_ty.isAnyError(mod)) {
const names = op_ty.errorSetNames(mod);
switch (try sema.resolveInferredErrorSetTy(block, src, op_ty.toIntern())) {
.anyerror_type => {},
else => |err_set_ty_index| {
const names = ip.indexToKey(err_set_ty_index).error_set_type.names;
switch (names.len) {
0 => return sema.addConstant(try mod.intValue(Type.err_int, 0)),
1 => {
const int = @as(Module.ErrorInt, @intCast(mod.global_error_set.getIndex(names[0]).?));
const int: Module.ErrorInt = @intCast(mod.global_error_set.getIndex(names.get(ip)[0]).?);
return sema.addIntUnsigned(Type.err_int, int);
},
else => {},
}
},
}
try sema.requireRuntimeBlock(block, src, operand_src);
@ -7846,6 +7896,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
defer tracy.end();
const mod = sema.mod;
const ip = &mod.intern_pool;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
@ -7874,23 +7925,25 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
return Air.Inst.Ref.anyerror_type;
}
if (mod.typeToInferredErrorSetIndex(lhs_ty).unwrap()) |ies_index| {
try sema.resolveInferredErrorSet(block, src, ies_index);
// isAnyError might have changed from a false negative to a true positive after resolution.
if (lhs_ty.isAnyError(mod)) {
return Air.Inst.Ref.anyerror_type;
if (ip.isInferredErrorSetType(lhs_ty.toIntern())) {
switch (try sema.resolveInferredErrorSet(block, src, lhs_ty.toIntern())) {
// isAnyError might have changed from a false negative to a true
// positive after resolution.
.anyerror_type => return .anyerror_type,
else => {},
}
}
if (mod.typeToInferredErrorSetIndex(rhs_ty).unwrap()) |ies_index| {
try sema.resolveInferredErrorSet(block, src, ies_index);
// isAnyError might have changed from a false negative to a true positive after resolution.
if (rhs_ty.isAnyError(mod)) {
return Air.Inst.Ref.anyerror_type;
if (ip.isInferredErrorSetType(rhs_ty.toIntern())) {
switch (try sema.resolveInferredErrorSet(block, src, rhs_ty.toIntern())) {
// isAnyError might have changed from a false negative to a true
// positive after resolution.
.anyerror_type => return .anyerror_type,
else => {},
}
}
const err_set_ty = try sema.errorSetMerge(lhs_ty, rhs_ty);
return sema.addType(err_set_ty);
return Air.internedToRef(err_set_ty.toIntern());
}
fn zirEnumLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@ -8569,6 +8622,12 @@ fn checkCallConvSupportsVarArgs(sema: *Sema, block: *Block, src: LazySrcLoc, cc:
}
}
const Section = union(enum) {
generic,
default,
explicit: InternPool.NullTerminatedString,
};
fn funcCommon(
sema: *Sema,
block: *Block,
@ -8578,7 +8637,7 @@ fn funcCommon(
alignment: ?Alignment,
/// null means generic poison
address_space: ?std.builtin.AddressSpace,
section: InternPool.GetFuncDeclKey.Section,
section: Section,
/// null means generic poison
cc: ?std.builtin.CallingConvention,
/// this might be Type.generic_poison
@ -8709,6 +8768,36 @@ fn funcCommon(
const param_types = block.params.items(.ty);
const opt_func_index: InternPool.Index = i: {
if (!is_source_decl) {
assert(has_body);
assert(!is_generic);
assert(comptime_bits == 0);
assert(cc != null);
assert(section != .generic);
assert(address_space != null);
assert(!var_args);
break :i try ip.getFuncInstance(gpa, .{
.param_types = param_types,
.noalias_bits = noalias_bits,
.bare_return_type = bare_return_type.toIntern(),
.cc = cc_resolved,
.alignment = alignment.?,
.is_noinline = is_noinline,
.inferred_error_set = inferred_error_set,
.generic_owner = sema.generic_owner,
});
}
// extern_func and func_decl functions take ownership of `sema.owner_decl`.
sema.owner_decl.@"linksection" = switch (section) {
.generic => .none,
.default => .none,
.explicit => |section_name| section_name.toOptional(),
};
sema.owner_decl.alignment = alignment orelse .none;
sema.owner_decl.@"addrspace" = address_space orelse .generic;
if (is_extern) {
assert(comptime_bits == 0);
assert(cc != null);
@ -8734,26 +8823,19 @@ fn funcCommon(
if (!has_body) break :i .none;
if (is_source_decl) {
if (inferred_error_set)
if (inferred_error_set) {
try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src);
break :i try ip.getFuncDeclIes(gpa, .{
.owner_decl = sema.owner_decl_index,
const fn_owner_decl = if (sema.generic_owner != .none)
mod.funcOwnerDeclIndex(sema.generic_owner)
else
sema.owner_decl_index;
break :i try ip.getFuncDecl(gpa, .{
.fn_owner_decl = fn_owner_decl,
.param_types = param_types,
.noalias_bits = noalias_bits,
.comptime_bits = comptime_bits,
.return_type = bare_return_type.toIntern(),
.inferred_error_set = inferred_error_set,
.bare_return_type = bare_return_type.toIntern(),
.cc = cc,
.alignment = alignment,
.section = section,
.address_space = address_space,
.section_is_generic = section == .generic,
.addrspace_is_generic = address_space == null,
.is_var_args = var_args,
.is_generic = final_is_generic,
.is_noinline = is_noinline,
@ -8766,22 +8848,30 @@ fn funcCommon(
});
}
assert(!is_generic);
assert(comptime_bits == 0);
assert(cc != null);
assert(section != .generic);
assert(address_space != null);
assert(!var_args);
break :i try ip.getFuncInstance(gpa, .{
const func_ty = try ip.getFuncType(gpa, .{
.param_types = param_types,
.noalias_bits = noalias_bits,
.comptime_bits = comptime_bits,
.return_type = bare_return_type.toIntern(),
.cc = cc_resolved,
.alignment = alignment.?,
.cc = cc,
.alignment = alignment,
.section_is_generic = section == .generic,
.addrspace_is_generic = address_space == null,
.is_var_args = var_args,
.is_generic = final_is_generic,
.is_noinline = is_noinline,
});
.generic_owner = sema.generic_owner,
break :i try ip.getFuncDecl(gpa, .{
.owner_decl = sema.owner_decl_index,
.ty = func_ty,
.cc = cc,
.is_noinline = is_noinline,
.zir_body_inst = func_inst,
.lbrace_line = src_locs.lbrace_line,
.rbrace_line = src_locs.rbrace_line,
.lbrace_column = @as(u16, @truncate(src_locs.columns)),
.rbrace_column = @as(u16, @truncate(src_locs.columns >> 16)),
});
};
@ -8913,10 +9003,8 @@ fn funcCommon(
.noalias_bits = noalias_bits,
.comptime_bits = comptime_bits,
.return_type = return_type.toIntern(),
.cc = cc_resolved,
.cc_is_generic = cc == null,
.alignment = alignment orelse .none,
.align_is_generic = alignment == null,
.cc = cc,
.alignment = alignment,
.section_is_generic = section == .generic,
.addrspace_is_generic = address_space == null,
.is_var_args = var_args,
@ -10254,7 +10342,7 @@ const SwitchProngAnalysis = struct {
return sema.bitCast(block, item_ty, spa.operand, operand_src, null);
}
var names: Module.InferredErrorSet.NameMap = .{};
var names: InferredErrorSet.NameMap = .{};
try names.ensureUnusedCapacity(sema.arena, case_vals.len);
for (case_vals) |err| {
const err_val = sema.resolveConstValue(block, .unneeded, err, "") catch unreachable;
@ -10622,9 +10710,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
}
}
try sema.resolveInferredErrorSetTy(block, src, operand_ty);
if (operand_ty.isAnyError(mod)) {
switch (try sema.resolveInferredErrorSetTy(block, src, operand_ty.toIntern())) {
.anyerror_type => {
if (special_prong != .@"else") {
return sema.fail(
block,
@ -10634,11 +10721,13 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
);
}
else_error_ty = Type.anyerror;
} else else_validation: {
},
else => |err_set_ty_index| else_validation: {
const error_names = ip.indexToKey(err_set_ty_index).error_set_type.names;
var maybe_msg: ?*Module.ErrorMsg = null;
errdefer if (maybe_msg) |msg| msg.destroy(sema.gpa);
for (operand_ty.errorSetNames(mod)) |error_name| {
for (error_names.get(ip)) |error_name| {
if (!seen_errors.contains(error_name) and special_prong != .@"else") {
const msg = maybe_msg orelse blk: {
maybe_msg = try sema.errMsg(
@ -10666,7 +10755,9 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
return sema.failWithOwnedErrorMsg(msg);
}
if (special_prong == .@"else" and seen_errors.count() == operand_ty.errorSetNames(mod).len) {
if (special_prong == .@"else" and
seen_errors.count() == error_names.len)
{
// In order to enable common patterns for generic code allow simple else bodies
// else => unreachable,
// else => return,
@ -10702,10 +10793,9 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
);
}
const error_names = operand_ty.errorSetNames(mod);
var names: Module.InferredErrorSet.NameMap = .{};
var names: InferredErrorSet.NameMap = .{};
try names.ensureUnusedCapacity(sema.arena, error_names.len);
for (error_names) |error_name| {
for (error_names.get(ip)) |error_name| {
if (seen_errors.contains(error_name)) continue;
names.putAssumeCapacityNoClobber(error_name, {});
@ -10713,6 +10803,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
// No need to keep the hash map metadata correct; here we
// extract the (sorted) keys only.
else_error_ty = try mod.errorSetFromUnsortedNames(names.keys());
},
}
},
.Int, .ComptimeInt => {
@ -16444,17 +16535,17 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
try sema.queueFullTypeResolution(error_field_ty);
// If the error set is inferred it must be resolved at this point
try sema.resolveInferredErrorSetTy(block, src, ty);
// Build our list of Error values
// Optional value is only null if anyerror
// Value can be zero-length slice otherwise
const error_field_vals = if (ty.isAnyError(mod)) null else blk: {
const vals = try sema.arena.alloc(InternPool.Index, ty.errorSetNames(mod).len);
const error_field_vals = switch (try sema.resolveInferredErrorSetTy(block, src, ty.toIntern())) {
.anyerror_type => null,
else => |err_set_ty_index| blk: {
const names = ip.indexToKey(err_set_ty_index).error_set_type.names;
const vals = try sema.arena.alloc(InternPool.Index, names.len);
for (vals, 0..) |*field_val, i| {
// TODO: write something like getCoercedInts to avoid needing to dupe
const name = try sema.arena.dupe(u8, ip.stringToSlice(ty.errorSetNames(mod)[i]));
const name = try sema.arena.dupe(u8, ip.stringToSlice(names.get(ip)[i]));
const name_val = v: {
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
@ -16488,6 +16579,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
break :blk vals;
},
};
// Build our ?[]const Error value
@ -18055,7 +18147,9 @@ fn addToInferredErrorSet(sema: *Sema, uncasted_operand: Air.Inst.Ref) !void {
const ip = &mod.intern_pool;
assert(sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion);
if (mod.typeToInferredErrorSet(sema.fn_ret_ty.errorUnionSet(mod))) |ies| {
if (ip.isInferredErrorSetType(sema.fn_ret_ty.errorUnionSet(mod).toIntern())) {
const ies = sema.fn_ret_ty_ies.?;
assert(ies.func == sema.func_index);
const op_ty = sema.typeOf(uncasted_operand);
switch (op_ty.zigTypeTag(mod)) {
.ErrorSet => try ies.addErrorSet(op_ty, ip, gpa),
@ -19508,7 +19602,7 @@ fn zirReify(
return sema.addType(Type.anyerror);
const len = try sema.usizeCast(block, src, payload_val.sliceLen(mod));
var names: Module.InferredErrorSet.NameMap = .{};
var names: InferredErrorSet.NameMap = .{};
try names.ensureUnusedCapacity(sema.arena, len);
for (0..len) |i| {
const elem_val = try payload_val.elemValue(mod, i);
@ -20019,8 +20113,6 @@ fn zirReify(
.is_var_args = is_var_args,
.is_generic = false,
.is_noinline = false,
.align_is_generic = false,
.cc_is_generic = false,
.section_is_generic = false,
.addrspace_is_generic = false,
});
@ -20524,8 +20616,8 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat
break :disjoint true;
}
try sema.resolveInferredErrorSetTy(block, src, dest_ty);
try sema.resolveInferredErrorSetTy(block, operand_src, operand_ty);
_ = try sema.resolveInferredErrorSetTy(block, src, dest_ty.toIntern());
_ = try sema.resolveInferredErrorSetTy(block, operand_src, operand_ty.toIntern());
for (dest_ty.errorSetNames(mod)) |dest_err_name| {
if (Type.errorSetHasFieldIp(ip, operand_ty.toIntern(), dest_err_name))
break :disjoint false;
@ -23505,7 +23597,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
break :blk mod.toEnum(std.builtin.AddressSpace, addrspace_tv.val);
} else target_util.defaultAddressSpace(target, .function);
const section: InternPool.GetFuncDeclKey.Section = if (extra.data.bits.has_section_body) blk: {
const section: Section = if (extra.data.bits.has_section_body) blk: {
const body_len = sema.code.extra[extra_index];
extra_index += 1;
const body = sema.code.extra[extra_index..][0..body_len];
@ -27750,42 +27842,22 @@ fn coerceInMemoryAllowedErrorSets(
return .ok;
}
if (mod.typeToInferredErrorSetIndex(dest_ty).unwrap()) |dst_ies_index| {
const dst_ies = mod.inferredErrorSetPtr(dst_ies_index);
// We will make an effort to return `ok` without resolving either error set, to
// avoid unnecessary "unable to resolve error set" dependency loop errors.
switch (src_ty.toIntern()) {
.anyerror_type => {},
else => switch (ip.indexToKey(src_ty.toIntern())) {
.inferred_error_set_type => |src_index| {
// If both are inferred error sets of functions, and
// the dest includes the source function, the coercion is OK.
// This check is important because it works without forcing a full resolution
// of inferred error sets.
if (dst_ies.inferred_error_sets.contains(src_index)) {
return .ok;
}
},
.error_set_type => |error_set_type| {
for (error_set_type.names) |name| {
if (!dst_ies.errors.contains(name)) break;
} else return .ok;
},
else => unreachable,
},
}
if (dst_ies.func == sema.owner_func_index) {
if (ip.isInferredErrorSetType(dest_ty.toIntern())) {
const dst_ies_func_index = ip.iesFuncIndex(dest_ty.toIntern());
if (sema.fn_ret_ty_ies) |dst_ies| {
if (dst_ies.func == dst_ies_func_index) {
// We are trying to coerce an error set to the current function's
// inferred error set.
try dst_ies.addErrorSet(src_ty, ip, gpa);
return .ok;
}
}
try sema.resolveInferredErrorSet(block, dest_src, dst_ies_index);
// isAnyError might have changed from a false negative to a true positive after resolution.
if (dest_ty.isAnyError(mod)) {
return .ok;
switch (try sema.resolveInferredErrorSet(block, dest_src, dest_ty.toIntern())) {
// isAnyError might have changed from a false negative to a true
// positive after resolution.
.anyerror_type => return .ok,
else => {},
}
}
@ -27800,17 +27872,15 @@ fn coerceInMemoryAllowedErrorSets(
},
else => switch (ip.indexToKey(src_ty.toIntern())) {
.inferred_error_set_type => |src_index| {
const src_data = mod.inferredErrorSetPtr(src_index);
try sema.resolveInferredErrorSet(block, src_src, src_index);
.inferred_error_set_type => {
const resolved_src_ty = try sema.resolveInferredErrorSet(block, src_src, src_ty.toIntern());
// src anyerror status might have changed after the resolution.
if (src_ty.isAnyError(mod)) {
if (resolved_src_ty == .anyerror_type) {
// dest_ty.isAnyError(mod) == true is already checked for at this point.
return .from_anyerror;
}
for (src_data.errors.keys()) |key| {
for (ip.indexToKey(resolved_src_ty).error_set_type.names.get(ip)) |key| {
if (!Type.errorSetHasFieldIp(ip, dest_ty.toIntern(), key)) {
try missing_error_buf.append(key);
}
@ -27825,7 +27895,7 @@ fn coerceInMemoryAllowedErrorSets(
return .ok;
},
.error_set_type => |error_set_type| {
for (error_set_type.names) |name| {
for (error_set_type.names.get(ip)) |name| {
if (!Type.errorSetHasFieldIp(ip, dest_ty.toIntern(), name)) {
try missing_error_buf.append(name);
}
@ -30341,73 +30411,72 @@ fn analyzeIsNonErrComptimeOnly(
operand: Air.Inst.Ref,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const ip = &mod.intern_pool;
const operand_ty = sema.typeOf(operand);
const ot = operand_ty.zigTypeTag(mod);
if (ot != .ErrorSet and ot != .ErrorUnion) return Air.Inst.Ref.bool_true;
if (ot == .ErrorSet) return Air.Inst.Ref.bool_false;
if (ot != .ErrorSet and ot != .ErrorUnion) return .bool_true;
if (ot == .ErrorSet) return .bool_false;
assert(ot == .ErrorUnion);
const payload_ty = operand_ty.errorUnionPayload(mod);
if (payload_ty.zigTypeTag(mod) == .NoReturn) {
return Air.Inst.Ref.bool_false;
return .bool_false;
}
if (Air.refToIndex(operand)) |operand_inst| {
switch (sema.air_instructions.items(.tag)[operand_inst]) {
.wrap_errunion_payload => return Air.Inst.Ref.bool_true,
.wrap_errunion_err => return Air.Inst.Ref.bool_false,
.wrap_errunion_payload => return .bool_true,
.wrap_errunion_err => return .bool_false,
else => {},
}
} else if (operand == .undef) {
return sema.addConstUndef(Type.bool);
} else if (@intFromEnum(operand) < InternPool.static_len) {
// None of the ref tags can be errors.
return Air.Inst.Ref.bool_true;
return .bool_true;
}
const maybe_operand_val = try sema.resolveMaybeUndefVal(operand);
// exception if the error union error set is known to be empty,
// we allow the comparison but always make it comptime-known.
const set_ty = operand_ty.errorUnionSet(mod);
switch (set_ty.toIntern()) {
const set_ty = ip.errorUnionSet(operand_ty.toIntern());
switch (set_ty) {
.anyerror_type => {},
else => switch (mod.intern_pool.indexToKey(set_ty.toIntern())) {
else => switch (ip.indexToKey(set_ty)) {
.error_set_type => |error_set_type| {
if (error_set_type.names.len == 0) return Air.Inst.Ref.bool_true;
if (error_set_type.names.len == 0) return .bool_true;
},
.inferred_error_set_type => |ies_index| blk: {
.inferred_error_set_type => |func_index| blk: {
// If the error set is empty, we must return a comptime true or false.
// However we want to avoid unnecessarily resolving an inferred error set
// in case it is already non-empty.
const ies = mod.inferredErrorSetPtr(ies_index);
if (ies.is_anyerror) break :blk;
if (ies.errors.count() != 0) break :blk;
switch (ip.funcIesResolved(func_index).*) {
.anyerror_type => break :blk,
.none => {},
else => |i| if (ip.indexToKey(i).error_set_type.names.len != 0) break :blk,
}
if (maybe_operand_val == null) {
if (sema.fn_ret_ty_ies) |ies| if (ies.func == func_index) {
// Try to avoid resolving inferred error set if possible.
if (ies.errors.count() != 0) break :blk;
if (ies.is_anyerror) break :blk;
for (ies.inferred_error_sets.keys()) |other_ies_index| {
if (ies_index == other_ies_index) continue;
if (set_ty == other_ies_index) continue;
const other_resolved =
try sema.resolveInferredErrorSet(block, src, other_ies_index);
const other_ies = mod.inferredErrorSetPtr(other_ies_index);
if (other_ies.is_anyerror) {
ies.is_anyerror = true;
ies.is_resolved = true;
if (other_resolved == .anyerror_type) {
ip.funcIesResolved(func_index).* = .anyerror_type;
break :blk;
}
if (other_ies.errors.count() != 0) break :blk;
if (ip.indexToKey(other_resolved).error_set_type.names.len != 0)
break :blk;
}
if (ies.func == sema.owner_func_index) {
// We're checking the inferred errorset of the current function and none of
// its child inferred error sets contained any errors meaning that any value
// so far with this type can't contain errors either.
return Air.Inst.Ref.bool_true;
}
try sema.resolveInferredErrorSet(block, src, ies_index);
if (ies.is_anyerror) break :blk;
if (ies.errors.count() == 0) return Air.Inst.Ref.bool_true;
return .bool_true;
};
const resolved_ty = try sema.resolveInferredErrorSet(block, src, set_ty);
if (resolved_ty == .anyerror_type)
break :blk;
if (ip.indexToKey(resolved_ty).error_set_type.names.len == 0)
return .bool_true;
}
},
else => unreachable,
@ -30419,12 +30488,12 @@ fn analyzeIsNonErrComptimeOnly(
return sema.addConstUndef(Type.bool);
}
if (err_union.getErrorName(mod) == .none) {
return Air.Inst.Ref.bool_true;
return .bool_true;
} else {
return Air.Inst.Ref.bool_false;
return .bool_false;
}
}
return Air.Inst.Ref.none;
return .none;
}
fn analyzeIsNonErr(
@ -31365,16 +31434,19 @@ fn wrapErrorUnionSet(
if (error_set_type.nameIndex(ip, expected_name) != null) break :ok;
return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty);
},
.inferred_error_set_type => |ies_index| ok: {
const ies = mod.inferredErrorSetPtr(ies_index);
const expected_name = mod.intern_pool.indexToKey(val.toIntern()).err.name;
.inferred_error_set_type => |func_index| ok: {
// We carefully do this in an order that avoids unnecessarily
// resolving the destination error set type.
if (ies.is_anyerror) break :ok;
if (ies.errors.contains(expected_name)) break :ok;
if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) break :ok;
const expected_name = mod.intern_pool.indexToKey(val.toIntern()).err.name;
switch (ip.funcIesResolved(func_index).*) {
.anyerror_type => break :ok,
.none => if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) {
break :ok;
},
else => |i| if (ip.indexToKey(i).error_set_type.nameIndex(ip, expected_name) != null) {
break :ok;
},
}
return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty);
},
@ -32862,10 +32934,13 @@ fn typeIsArrayLike(sema: *Sema, ty: Type) ?ArrayLike {
};
}
pub fn resolveFnTypes(sema: *Sema, fn_ty: Type) CompileError!void {
pub fn resolveFnTypes(sema: *Sema, block: *Block, src: LazySrcLoc, fn_ty: Type) CompileError!void {
const mod = sema.mod;
const ip = &mod.intern_pool;
const fn_ty_info = mod.typeToFunc(fn_ty).?;
if (sema.fn_ret_ty_ies) |ies| try sema.resolveInferredErrorSetPtr(block, src, ies);
try sema.resolveTypeFully(fn_ty_info.return_type.toType());
if (mod.comp.bin_file.options.error_return_tracing and fn_ty_info.return_type.toType().isError(mod)) {
@ -33173,6 +33248,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi
.owner_decl_index = decl_index,
.func_index = .none,
.fn_ret_ty = Type.void,
.fn_ret_ty_ies = null,
.owner_func_index = .none,
.comptime_mutable_decls = &comptime_mutable_decls,
};
@ -33223,6 +33299,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi
.owner_decl_index = decl_index,
.func_index = .none,
.fn_ret_ty = Type.void,
.fn_ret_ty_ies = null,
.owner_func_index = .none,
.comptime_mutable_decls = undefined,
};
@ -33797,30 +33874,31 @@ fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_obj: *Module.Union) Compi
union_obj.status = .have_field_types;
}
/// Returns a normal error set corresponding to the fully populated inferred
/// error set.
fn resolveInferredErrorSet(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
ies_index: Module.InferredErrorSet.Index,
) CompileError!void {
ies_index: InternPool.Index,
) CompileError!InternPool.Index {
const mod = sema.mod;
const ip = &mod.intern_pool;
const ies = mod.inferredErrorSetPtr(ies_index);
if (ies.is_resolved) return;
const func = mod.funcInfo(ies.func);
if (func.analysis(ip).state == .in_progress) {
const func_index = ip.iesFuncIndex(ies_index);
const func = mod.funcInfo(func_index);
const resolved_ty = func.resolvedErrorSet(ip).*;
if (resolved_ty != .none) return resolved_ty;
if (func.analysis(ip).state == .in_progress)
return sema.fail(block, src, "unable to resolve inferred error set", .{});
}
// In order to ensure that all dependencies are properly added to the set, we
// need to ensure the function body is analyzed of the inferred error set.
// However, in the case of comptime/inline function calls with inferred error sets,
// each call gets a new InferredErrorSet object, which contains the same
// `InternPool.Index`. Not only is the function not relevant to the inferred error set
// in this case, it may be a generic function which would cause an assertion failure
// if we called `ensureFuncBodyAnalyzed` on it here.
// In order to ensure that all dependencies are properly added to the set,
// we need to ensure the function body is analyzed of the inferred error
// set. However, in the case of comptime/inline function calls with
// inferred error sets, each call gets a new InferredErrorSet object, which
// contains the `InternPool.Index` of the callee. Not only is the function
// not relevant to the inferred error set in this case, it may be a generic
// function which would cause an assertion failure if we called
// `ensureFuncBodyAnalyzed` on it here.
const ies_func_owner_decl = mod.declPtr(func.owner_decl);
const ies_func_info = mod.typeToFunc(ies_func_owner_decl.ty).?;
// if ies declared by a inline function with generic return type, the return_type should be generic_poison,
@ -33828,7 +33906,7 @@ fn resolveInferredErrorSet(
// so here we can simply skip this case.
if (ies_func_info.return_type == .generic_poison_type) {
assert(ies_func_info.cc == .Inline);
} else if (mod.typeToInferredErrorSet(ies_func_info.return_type.toType().errorUnionSet(mod)).? == ies) {
} else if (ip.errorUnionSet(ies_func_info.return_type) == ies_index) {
if (ies_func_info.is_generic) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "unable to resolve inferred error set of generic function", .{});
@ -33841,33 +33919,62 @@ fn resolveInferredErrorSet(
}
// In this case we are dealing with the actual InferredErrorSet object that
// corresponds to the function, not one created to track an inline/comptime call.
try sema.ensureFuncBodyAnalyzed(ies.func);
try sema.ensureFuncBodyAnalyzed(func_index);
}
ies.is_resolved = true;
// This will now have been resolved by the logic at the end of `Module.analyzeFnBody`
// which calls `resolveInferredErrorSetPtr`.
const final_resolved_ty = func.resolvedErrorSet(ip).*;
assert(final_resolved_ty != .none);
return final_resolved_ty;
}
fn resolveInferredErrorSetPtr(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
ies: *InferredErrorSet,
) CompileError!void {
const mod = sema.mod;
const ip = &mod.intern_pool;
const func = mod.funcInfo(ies.func);
if (func.resolvedErrorSet(ip).* != .none) return;
const ies_index = ip.errorUnionSet(sema.fn_ret_ty.toIntern());
for (ies.inferred_error_sets.keys()) |other_ies_index| {
if (ies_index == other_ies_index) continue;
try sema.resolveInferredErrorSet(block, src, other_ies_index);
switch (try sema.resolveInferredErrorSet(block, src, other_ies_index)) {
.anyerror_type => {
func.resolvedErrorSet(ip).* = .anyerror_type;
return;
},
else => |error_set_ty_index| {
const names = ip.indexToKey(error_set_ty_index).error_set_type.names;
for (names.get(ip)) |name| {
try ies.errors.put(sema.arena, name, {});
}
},
}
}
const other_ies = mod.inferredErrorSetPtr(other_ies_index);
for (other_ies.errors.keys()) |key| {
try ies.errors.put(sema.gpa, key, {});
}
if (other_ies.is_anyerror)
ies.is_anyerror = true;
}
const resolved_error_set_ty = try mod.errorSetFromUnsortedNames(ies.errors.keys());
func.resolvedErrorSet(ip).* = resolved_error_set_ty.toIntern();
}
fn resolveInferredErrorSetTy(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
ty: Type,
) CompileError!void {
ty: InternPool.Index,
) CompileError!InternPool.Index {
const mod = sema.mod;
if (mod.typeToInferredErrorSetIndex(ty).unwrap()) |ies_index| {
try sema.resolveInferredErrorSet(block, src, ies_index);
const ip = &mod.intern_pool;
switch (ip.indexToKey(ty)) {
.error_set_type => return ty,
.inferred_error_set_type => return sema.resolveInferredErrorSet(block, src, ty),
else => unreachable,
}
}
@ -33937,6 +34044,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
.owner_decl_index = decl_index,
.func_index = .none,
.fn_ret_ty = Type.void,
.fn_ret_ty_ies = null,
.owner_func_index = .none,
.comptime_mutable_decls = &comptime_mutable_decls,
};
@ -34282,6 +34390,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
.owner_decl_index = decl_index,
.func_index = .none,
.fn_ret_ty = Type.void,
.fn_ret_ty_ies = null,
.owner_func_index = .none,
.comptime_mutable_decls = &comptime_mutable_decls,
};
@ -34893,6 +35002,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.var_args_param_type,
.none,
=> unreachable,
_ => switch (mod.intern_pool.items.items(.tag)[@intFromEnum(ty.toIntern())]) {
.type_int_signed, // i0 handled above
.type_int_unsigned, // u0 handled above
@ -34901,6 +35011,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.type_optional, // ?noreturn handled above
.type_anyframe,
.type_error_union,
.type_anyerror_union,
.type_error_set,
.type_inferred_error_set,
.type_opaque,
@ -36354,7 +36465,7 @@ fn errorSetMerge(sema: *Sema, lhs: Type, rhs: Type) !Type {
const arena = sema.arena;
const lhs_names = lhs.errorSetNames(mod);
const rhs_names = rhs.errorSetNames(mod);
var names: Module.InferredErrorSet.NameMap = .{};
var names: InferredErrorSet.NameMap = .{};
try names.ensureUnusedCapacity(arena, lhs_names.len);
for (lhs_names) |name| {

View file

@ -6061,8 +6061,6 @@ pub const FuncGen = struct {
.is_var_args = false,
.is_generic = false,
.is_noinline = false,
.align_is_generic = false,
.cc_is_generic = false,
.section_is_generic = false,
.addrspace_is_generic = false,
});
@ -10657,30 +10655,31 @@ fn llvmField(ty: Type, field_index: usize, mod: *Module) ?LlvmField {
}
fn firstParamSRet(fn_info: InternPool.Key.FuncType, mod: *Module) bool {
if (!fn_info.return_type.toType().hasRuntimeBitsIgnoreComptime(mod)) return false;
const return_type = fn_info.return_type.toType();
if (!return_type.hasRuntimeBitsIgnoreComptime(mod)) return false;
const target = mod.getTarget();
switch (fn_info.cc) {
.Unspecified, .Inline => return isByRef(fn_info.return_type.toType(), mod),
.Unspecified, .Inline => return isByRef(return_type, mod),
.C => switch (target.cpu.arch) {
.mips, .mipsel => return false,
.x86_64 => switch (target.os.tag) {
.windows => return x86_64_abi.classifyWindows(fn_info.return_type.toType(), mod) == .memory,
else => return firstParamSRetSystemV(fn_info.return_type.toType(), mod),
.windows => return x86_64_abi.classifyWindows(return_type, mod) == .memory,
else => return firstParamSRetSystemV(return_type, mod),
},
.wasm32 => return wasm_c_abi.classifyType(fn_info.return_type.toType(), mod)[0] == .indirect,
.aarch64, .aarch64_be => return aarch64_c_abi.classifyType(fn_info.return_type.toType(), mod) == .memory,
.arm, .armeb => switch (arm_c_abi.classifyType(fn_info.return_type.toType(), mod, .ret)) {
.wasm32 => return wasm_c_abi.classifyType(return_type, mod)[0] == .indirect,
.aarch64, .aarch64_be => return aarch64_c_abi.classifyType(return_type, mod) == .memory,
.arm, .armeb => switch (arm_c_abi.classifyType(return_type, mod, .ret)) {
.memory, .i64_array => return true,
.i32_array => |size| return size != 1,
.byval => return false,
},
.riscv32, .riscv64 => return riscv_c_abi.classifyType(fn_info.return_type.toType(), mod) == .memory,
.riscv32, .riscv64 => return riscv_c_abi.classifyType(return_type, mod) == .memory,
else => return false, // TODO investigate C ABI for other architectures
},
.SysV => return firstParamSRetSystemV(fn_info.return_type.toType(), mod),
.Win64 => return x86_64_abi.classifyWindows(fn_info.return_type.toType(), mod) == .memory,
.Stdcall => return !isScalar(mod, fn_info.return_type.toType()),
.SysV => return firstParamSRetSystemV(return_type, mod),
.Win64 => return x86_64_abi.classifyWindows(return_type, mod) == .memory,
.Stdcall => return !isScalar(mod, return_type),
else => return false,
}
}

View file

@ -1043,6 +1043,7 @@ pub fn commitDeclState(
var dbg_line_buffer = &decl_state.dbg_line;
var dbg_info_buffer = &decl_state.dbg_info;
const decl = mod.declPtr(decl_index);
const ip = &mod.intern_pool;
const target_endian = self.target.cpu.arch.endian();
@ -1241,20 +1242,9 @@ pub fn commitDeclState(
while (sym_index < decl_state.abbrev_table.items.len) : (sym_index += 1) {
const symbol = &decl_state.abbrev_table.items[sym_index];
const ty = symbol.type;
const deferred: bool = blk: {
if (ty.isAnyError(mod)) break :blk true;
switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.inferred_error_set_type => |ies_index| {
const ies = mod.inferredErrorSetPtr(ies_index);
if (!ies.is_resolved) break :blk true;
},
else => {},
}
break :blk false;
};
if (deferred) continue;
if (ip.isErrorSetType(ty.toIntern())) continue;
symbol.offset = @as(u32, @intCast(dbg_info_buffer.items.len));
symbol.offset = @intCast(dbg_info_buffer.items.len);
try decl_state.addDbgInfoType(mod, di_atom_index, ty);
}
}
@ -1265,18 +1255,7 @@ pub fn commitDeclState(
if (reloc.target) |target| {
const symbol = decl_state.abbrev_table.items[target];
const ty = symbol.type;
const deferred: bool = blk: {
if (ty.isAnyError(mod)) break :blk true;
switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.inferred_error_set_type => |ies_index| {
const ies = mod.inferredErrorSetPtr(ies_index);
if (!ies.is_resolved) break :blk true;
},
else => {},
}
break :blk false;
};
if (deferred) {
if (ip.isErrorSetType(ty.toIntern())) {
log.debug("resolving %{d} deferred until flush", .{target});
try self.global_abbrev_relocs.append(gpa, .{
.target = null,
@ -2505,18 +2484,18 @@ pub fn flushModule(self: *Dwarf, module: *Module) !void {
defer arena_alloc.deinit();
const arena = arena_alloc.allocator();
// TODO: don't create a zig type for this, just make the dwarf info
// without touching the zig type system.
const names = try arena.dupe(InternPool.NullTerminatedString, module.global_error_set.keys());
std.mem.sort(InternPool.NullTerminatedString, names, {}, InternPool.NullTerminatedString.indexLessThan);
const error_ty = try module.intern(.{ .error_set_type = .{ .names = names } });
var dbg_info_buffer = std.ArrayList(u8).init(arena);
try addDbgInfoErrorSet(module, error_ty.toType(), self.target, &dbg_info_buffer);
try addDbgInfoErrorSetNames(
module,
Type.anyerror,
module.global_error_set.keys(),
self.target,
&dbg_info_buffer,
);
const di_atom_index = try self.createAtom(.di_atom);
log.debug("updateDeclDebugInfoAllocation in flushModule", .{});
try self.updateDeclDebugInfoAllocation(di_atom_index, @as(u32, @intCast(dbg_info_buffer.items.len)));
try self.updateDeclDebugInfoAllocation(di_atom_index, @intCast(dbg_info_buffer.items.len));
log.debug("writeDeclDebugInfo in flushModule", .{});
try self.writeDeclDebugInfo(di_atom_index, dbg_info_buffer.items);
@ -2633,6 +2612,17 @@ fn addDbgInfoErrorSet(
ty: Type,
target: std.Target,
dbg_info_buffer: *std.ArrayList(u8),
) !void {
return addDbgInfoErrorSetNames(mod, ty, ty.errorSetNames(mod), target, dbg_info_buffer);
}
fn addDbgInfoErrorSetNames(
mod: *Module,
/// Used for printing the type name only.
ty: Type,
error_names: []const InternPool.NullTerminatedString,
target: std.Target,
dbg_info_buffer: *std.ArrayList(u8),
) !void {
const target_endian = target.cpu.arch.endian();
@ -2655,7 +2645,6 @@ fn addDbgInfoErrorSet(
// DW.AT.const_value, DW.FORM.data8
mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), 0, target_endian);
const error_names = ty.errorSetNames(mod);
for (error_names) |error_name_ip| {
const int = try mod.getErrorValue(error_name_ip);
const error_name = mod.intern_pool.stringToSlice(error_name_ip);

View file

@ -251,20 +251,19 @@ pub const Type = struct {
return;
},
.inferred_error_set_type => |index| {
const ies = mod.inferredErrorSetPtr(index);
const func = ies.func;
const func = mod.iesFuncIndex(index);
try writer.writeAll("@typeInfo(@typeInfo(@TypeOf(");
const owner_decl = mod.funcOwnerDeclPtr(func);
try owner_decl.renderFullyQualifiedName(mod, writer);
try writer.writeAll(")).Fn.return_type.?).ErrorUnion.error_set");
},
.error_set_type => |error_set_type| {
const ip = &mod.intern_pool;
const names = error_set_type.names;
try writer.writeAll("error{");
for (names, 0..) |name, i| {
for (names.get(ip), 0..) |name, i| {
if (i != 0) try writer.writeByte(',');
try writer.print("{}", .{name.fmt(&mod.intern_pool)});
try writer.print("{}", .{name.fmt(ip)});
}
try writer.writeAll("}");
},
@ -2051,21 +2050,19 @@ pub const Type = struct {
/// Asserts that the type is an error union.
pub fn errorUnionSet(ty: Type, mod: *Module) Type {
return mod.intern_pool.indexToKey(ty.toIntern()).error_union_type.error_set_type.toType();
return mod.intern_pool.errorUnionSet(ty.toIntern()).toType();
}
/// Returns false for unresolved inferred error sets.
pub fn errorSetIsEmpty(ty: Type, mod: *Module) bool {
const ip = &mod.intern_pool;
return switch (ty.toIntern()) {
.anyerror_type => false,
else => switch (mod.intern_pool.indexToKey(ty.toIntern())) {
else => switch (ip.indexToKey(ty.toIntern())) {
.error_set_type => |error_set_type| error_set_type.names.len == 0,
.inferred_error_set_type => |index| {
const inferred_error_set = mod.inferredErrorSetPtr(index);
// Can't know for sure.
if (!inferred_error_set.is_resolved) return false;
if (inferred_error_set.is_anyerror) return false;
return inferred_error_set.errors.count() == 0;
.inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) {
.none, .anyerror_type => false,
else => |t| ip.indexToKey(t).error_set_type.names.len == 0,
},
else => unreachable,
},
@ -2076,10 +2073,11 @@ pub const Type = struct {
/// Note that the result may be a false negative if the type did not get error set
/// resolution prior to this call.
pub fn isAnyError(ty: Type, mod: *Module) bool {
const ip = &mod.intern_pool;
return switch (ty.toIntern()) {
.anyerror_type => true,
else => switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.inferred_error_set_type => |i| mod.inferredErrorSetPtr(i).is_anyerror,
.inferred_error_set_type => |i| ip.funcIesResolved(i).* == .anyerror_type,
else => false,
},
};
@ -2103,13 +2101,11 @@ pub const Type = struct {
return switch (ty) {
.anyerror_type => true,
else => switch (ip.indexToKey(ty)) {
.error_set_type => |error_set_type| {
return error_set_type.nameIndex(ip, name) != null;
},
.inferred_error_set_type => |index| {
const ies = ip.inferredErrorSetPtrConst(index);
if (ies.is_anyerror) return true;
return ies.errors.contains(name);
.error_set_type => |error_set_type| error_set_type.nameIndex(ip, name) != null,
.inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) {
.anyerror_type => true,
.none => false,
else => |t| ip.indexToKey(t).error_set_type.nameIndex(ip, name) != null,
},
else => unreachable,
},
@ -2129,12 +2125,14 @@ pub const Type = struct {
const field_name_interned = ip.getString(name).unwrap() orelse return false;
return error_set_type.nameIndex(ip, field_name_interned) != null;
},
.inferred_error_set_type => |index| {
const ies = ip.inferredErrorSetPtr(index);
if (ies.is_anyerror) return true;
.inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) {
.anyerror_type => true,
.none => false,
else => |t| {
// If the string is not interned, then the field certainly is not present.
const field_name_interned = ip.getString(name).unwrap() orelse return false;
return ies.errors.contains(field_name_interned);
return ip.indexToKey(t).error_set_type.nameIndex(ip, field_name_interned) != null;
},
},
else => unreachable,
},
@ -2943,14 +2941,15 @@ pub const Type = struct {
}
// Asserts that `ty` is an error set and not `anyerror`.
// Asserts that `ty` is resolved if it is an inferred error set.
pub fn errorSetNames(ty: Type, mod: *Module) []const InternPool.NullTerminatedString {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.error_set_type => |x| x.names,
.inferred_error_set_type => |index| {
const inferred_error_set = mod.inferredErrorSetPtr(index);
assert(inferred_error_set.is_resolved);
assert(!inferred_error_set.is_anyerror);
return inferred_error_set.errors.keys();
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.error_set_type => |x| x.names.get(ip),
.inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) {
.none => unreachable, // unresolved inferred error set
.anyerror_type => unreachable,
else => |t| ip.indexToKey(t).error_set_type.names.get(ip),
},
else => unreachable,
};