Sema: move inferred_alloc_const/mut_type to InternPool

Now, all types are migrated to use `InternPool`. The `Type.Tag` enum is
deleted in this commit.
This commit is contained in:
Andrew Kelley 2023-05-20 17:55:40 -07:00
parent 65d65f5dda
commit 7e19c95668
11 changed files with 601 additions and 873 deletions

View file

@ -905,6 +905,8 @@ pub const Inst = struct {
const_slice_u8_sentinel_0_type = @enumToInt(InternPool.Index.const_slice_u8_sentinel_0_type),
anyerror_void_error_union_type = @enumToInt(InternPool.Index.anyerror_void_error_union_type),
generic_poison_type = @enumToInt(InternPool.Index.generic_poison_type),
inferred_alloc_const_type = @enumToInt(InternPool.Index.inferred_alloc_const_type),
inferred_alloc_mut_type = @enumToInt(InternPool.Index.inferred_alloc_mut_type),
empty_struct_type = @enumToInt(InternPool.Index.empty_struct_type),
undef = @enumToInt(InternPool.Index.undef),
zero = @enumToInt(InternPool.Index.zero),

View file

@ -959,6 +959,8 @@ pub const Index = enum(u32) {
const_slice_u8_sentinel_0_type,
anyerror_void_error_union_type,
generic_poison_type,
inferred_alloc_const_type,
inferred_alloc_mut_type,
/// `@TypeOf(.{})`
empty_struct_type,
@ -1009,10 +1011,7 @@ pub const Index = enum(u32) {
pub fn toType(i: Index) @import("type.zig").Type {
assert(i != .none);
return .{
.ip_index = i,
.legacy = undefined,
};
return .{ .ip_index = i };
}
pub fn toValue(i: Index) @import("value.zig").Value {
@ -1195,6 +1194,10 @@ pub const static_keys = [_]Key{
// generic_poison_type
.{ .simple_type = .generic_poison },
// inferred_alloc_const_type
.{ .simple_type = .inferred_alloc_const },
// inferred_alloc_mut_type
.{ .simple_type = .inferred_alloc_mut },
// empty_struct_type
.{ .anon_struct_type = .{
@ -1568,6 +1571,12 @@ pub const SimpleType = enum(u32) {
type_info,
generic_poison,
/// TODO: remove this from `SimpleType`; instead make it only a special `Index` tag like
/// `var_args_param_type`.
inferred_alloc_const,
/// TODO: remove this from `SimpleType`; instead make it only a special `Index` tag like
/// `var_args_param_type`.
inferred_alloc_mut,
};
pub const SimpleValue = enum(u32) {

View file

@ -6818,7 +6818,7 @@ pub fn singleConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type {
}
pub fn adjustPtrTypeChild(mod: *Module, ptr_ty: Type, new_child: Type) Allocator.Error!Type {
const info = ptr_ty.ptrInfoIp(mod.intern_pool);
const info = Type.ptrInfoIp(mod.intern_pool, ptr_ty.toIntern());
return mod.ptrType(.{
.elem_type = new_child.toIntern(),

View file

@ -904,10 +904,10 @@ fn analyzeBodyInner(
const air_inst: Air.Inst.Ref = switch (tags[inst]) {
// zig fmt: off
.alloc => try sema.zirAlloc(block, inst),
.alloc_inferred => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_const)),
.alloc_inferred_mut => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_mut)),
.alloc_inferred_comptime => try sema.zirAllocInferredComptime(inst, Type.initTag(.inferred_alloc_const)),
.alloc_inferred_comptime_mut => try sema.zirAllocInferredComptime(inst, Type.initTag(.inferred_alloc_mut)),
.alloc_inferred => try sema.zirAllocInferred(block, inst, .{ .ip_index = .inferred_alloc_const_type }),
.alloc_inferred_mut => try sema.zirAllocInferred(block, inst, .{ .ip_index = .inferred_alloc_mut_type }),
.alloc_inferred_comptime => try sema.zirAllocInferredComptime(inst, .{ .ip_index = .inferred_alloc_const_type }),
.alloc_inferred_comptime_mut => try sema.zirAllocInferredComptime(inst, .{ .ip_index = .inferred_alloc_mut_type }),
.alloc_mut => try sema.zirAllocMut(block, inst),
.alloc_comptime_mut => try sema.zirAllocComptime(block, inst),
.make_ptr_const => try sema.zirMakePtrConst(block, inst),
@ -3471,9 +3471,9 @@ fn zirAllocExtended(
} else 0;
const inferred_alloc_ty = if (small.is_const)
Type.initTag(.inferred_alloc_const)
Type{ .ip_index = .inferred_alloc_const_type }
else
Type.initTag(.inferred_alloc_mut);
Type{ .ip_index = .inferred_alloc_mut_type };
if (block.is_comptime or small.is_comptime) {
if (small.has_type) {
@ -3707,9 +3707,10 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
assert(sema.air_instructions.items(.tag)[ptr_inst] == .constant);
const value_index = sema.air_instructions.items(.data)[ptr_inst].ty_pl.payload;
const ptr_val = sema.air_values.items[value_index];
const var_is_mut = switch (sema.typeOf(ptr).tag()) {
.inferred_alloc_const => false,
.inferred_alloc_mut => true,
const var_is_mut = switch (sema.typeOf(ptr).toIntern()) {
.inferred_alloc_const_type => false,
.inferred_alloc_mut_type => true,
else => unreachable,
};
const target = sema.mod.getTarget();
@ -7451,7 +7452,7 @@ fn instantiateGenericCall(
};
arg_val.hashUncoerced(arg_ty, &hasher, mod);
if (is_anytype) {
arg_ty.hashWithHasher(&hasher, mod);
std.hash.autoHash(&hasher, arg_ty.toIntern());
generic_args[i] = .{
.ty = arg_ty,
.val = arg_val,
@ -7465,7 +7466,7 @@ fn instantiateGenericCall(
};
}
} else if (is_anytype) {
arg_ty.hashWithHasher(&hasher, mod);
std.hash.autoHash(&hasher, arg_ty.toIntern());
generic_args[i] = .{
.ty = arg_ty,
.val = Value.generic_poison,
@ -8233,7 +8234,7 @@ fn zirEnumLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const inst_data = sema.code.instructions.items(.data)[inst].str_tok;
const duped_name = try sema.arena.dupe(u8, inst_data.get(sema.code));
return sema.addConstant(
.{ .ip_index = .enum_literal_type, .legacy = undefined },
.{ .ip_index = .enum_literal_type },
try Value.Tag.enum_literal.create(sema.arena, duped_name),
);
}
@ -13278,9 +13279,12 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
const rhs_val = maybe_rhs_val orelse unreachable;
const rem = lhs_val.floatRem(rhs_val, resolved_type, sema.arena, mod) catch unreachable;
if (!rem.compareAllWithZero(.eq, mod)) {
return sema.fail(block, src, "ambiguous coercion of division operands '{s}' and '{s}'; non-zero remainder '{}'", .{
@tagName(lhs_ty.tag()), @tagName(rhs_ty.tag()), rem.fmtValue(resolved_type, sema.mod),
});
return sema.fail(
block,
src,
"ambiguous coercion of division operands '{}' and '{}'; non-zero remainder '{}'",
.{ lhs_ty.fmt(mod), rhs_ty.fmt(mod), rem.fmtValue(resolved_type, mod) },
);
}
}
@ -13386,7 +13390,12 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
const air_tag = if (is_int) blk: {
if (lhs_ty.isSignedInt(mod) or rhs_ty.isSignedInt(mod)) {
return sema.fail(block, src, "division with '{s}' and '{s}': signed integers must use @divTrunc, @divFloor, or @divExact", .{ @tagName(lhs_ty.tag()), @tagName(rhs_ty.tag()) });
return sema.fail(
block,
src,
"division with '{}' and '{}': signed integers must use @divTrunc, @divFloor, or @divExact",
.{ lhs_ty.fmt(mod), rhs_ty.fmt(mod) },
);
}
break :blk Air.Inst.Tag.div_trunc;
} else switch (block.float_mode) {
@ -23367,7 +23376,7 @@ fn validateRunTimeType(
};
}
const TypeSet = std.HashMapUnmanaged(Type, void, Type.HashContext64, std.hash_map.default_max_load_percentage);
const TypeSet = std.AutoHashMapUnmanaged(InternPool.Index, void);
fn explainWhyTypeIsComptime(
sema: *Sema,
@ -23453,7 +23462,7 @@ fn explainWhyTypeIsComptimeInner(
},
.Struct => {
if ((try type_set.getOrPutContext(sema.gpa, ty, .{ .mod = mod })).found_existing) return;
if ((try type_set.getOrPut(sema.gpa, ty.toIntern())).found_existing) return;
if (mod.typeToStruct(ty)) |struct_obj| {
for (struct_obj.fields.values(), 0..) |field, i| {
@ -23472,7 +23481,7 @@ fn explainWhyTypeIsComptimeInner(
},
.Union => {
if ((try type_set.getOrPutContext(sema.gpa, ty, .{ .mod = mod })).found_existing) return;
if ((try type_set.getOrPut(sema.gpa, ty.toIntern())).found_existing) return;
if (mod.typeToUnion(ty)) |union_obj| {
for (union_obj.fields.values(), 0..) |field, i| {
@ -27459,8 +27468,8 @@ fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref {
// different behavior depending on whether the types were inferred.
// Something seems wrong here.
if (prev_ptr_ty.ip_index == .none) {
if (prev_ptr_ty.tag() == .inferred_alloc_mut) return null;
if (prev_ptr_ty.tag() == .inferred_alloc_const) return null;
if (prev_ptr_ty.ip_index == .inferred_alloc_mut_type) return null;
if (prev_ptr_ty.ip_index == .inferred_alloc_const_type) return null;
}
const prev_ptr_child_ty = prev_ptr_ty.childType(mod);
@ -31677,6 +31686,9 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
.enum_literal,
.type_info,
=> true,
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
},
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false;
@ -31931,6 +31943,8 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type {
.bool_false => unreachable,
.empty_struct => unreachable,
.generic_poison => unreachable,
.inferred_alloc_const_type => unreachable,
.inferred_alloc_mut_type => unreachable,
.type_info_type => return sema.getBuiltinType("Type"),
.extern_options_type => return sema.getBuiltinType("ExternOptions"),
@ -33032,16 +33046,9 @@ fn getBuiltinType(sema: *Sema, name: []const u8) CompileError!Type {
/// TODO assert the return value matches `ty.onePossibleValue`
pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
const mod = sema.mod;
switch (ty.ip_index) {
.empty_struct_type => return Value.empty_struct,
.none => switch (ty.tag()) {
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
},
else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
return switch (ty.ip_index) {
.empty_struct_type => Value.empty_struct,
else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.int_type => |int_type| {
if (int_type.bits == 0) {
return try mod.intValue(ty, 0);
@ -33123,6 +33130,8 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.undefined => Value.undef,
.generic_poison => return error.GenericPoison,
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
},
.struct_type => |struct_type| {
const resolved_ty = try sema.resolveTypeFields(ty);
@ -33245,7 +33254,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.enum_tag => unreachable,
.aggregate => unreachable,
},
}
};
}
/// Returns the type of the AIR instruction.
@ -33563,16 +33572,15 @@ fn usizeCast(sema: *Sema, block: *Block, src: LazySrcLoc, int: u64) CompileError
/// This logic must be kept in sync with `Type.isPtrLikeOptional`.
fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type {
const mod = sema.mod;
if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) {
return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.ptr_type => |ptr_type| switch (ptr_type.size) {
.Slice => return null,
.C => return ptr_type.elem_type.toType(),
.One, .Many => return ty,
.Slice => null,
.C => ptr_type.elem_type.toType(),
.One, .Many => ty,
},
.opt_type => |opt_child| switch (mod.intern_pool.indexToKey(opt_child)) {
.ptr_type => |ptr_type| switch (ptr_type.size) {
.Slice, .C => return null,
.Slice, .C => null,
.Many, .One => {
if (ptr_type.is_allowzero) return null;
@ -33585,15 +33593,10 @@ fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type {
return payload_ty;
},
},
else => return null,
else => null,
},
else => return null,
else => null,
};
switch (ty.tag()) {
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
}
}
/// `generic_poison` will return false.
@ -33677,6 +33680,9 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
.enum_literal,
.type_info,
=> true,
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
},
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false;

View file

@ -2112,6 +2112,8 @@ pub const Inst = struct {
const_slice_u8_sentinel_0_type = @enumToInt(InternPool.Index.const_slice_u8_sentinel_0_type),
anyerror_void_error_union_type = @enumToInt(InternPool.Index.anyerror_void_error_union_type),
generic_poison_type = @enumToInt(InternPool.Index.generic_poison_type),
inferred_alloc_const_type = @enumToInt(InternPool.Index.inferred_alloc_const_type),
inferred_alloc_mut_type = @enumToInt(InternPool.Index.inferred_alloc_mut_type),
empty_struct_type = @enumToInt(InternPool.Index.empty_struct_type),
undef = @enumToInt(InternPool.Index.undef),
zero = @enumToInt(InternPool.Index.zero),

View file

@ -5367,116 +5367,111 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
// Ensure complete type definition is visible before accessing fields.
_ = try f.typeToIndex(struct_ty, .complete);
const field_name: CValue = switch (struct_ty.ip_index) {
.none => switch (struct_ty.tag()) {
else => unreachable,
},
else => switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) {
.struct_type => switch (struct_ty.containerLayout(mod)) {
.Auto, .Extern => if (struct_ty.isSimpleTuple(mod))
.{ .field = extra.field_index }
else
.{ .identifier = struct_ty.structFieldName(extra.field_index, mod) },
.Packed => {
const struct_obj = mod.typeToStruct(struct_ty).?;
const int_info = struct_ty.intInfo(mod);
const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1));
const bit_offset = struct_obj.packedFieldBitOffset(mod, extra.field_index);
const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
const field_int_signedness = if (inst_ty.isAbiInt(mod))
inst_ty.intInfo(mod).signedness
else
.unsigned;
const field_int_ty = try mod.intType(field_int_signedness, @intCast(u16, inst_ty.bitSize(mod)));
const temp_local = try f.allocLocal(inst, field_int_ty);
try f.writeCValue(writer, temp_local, .Other);
try writer.writeAll(" = zig_wrap_");
try f.object.dg.renderTypeForBuiltinFnName(writer, field_int_ty);
try writer.writeAll("((");
try f.renderType(writer, field_int_ty);
try writer.writeByte(')');
const cant_cast = int_info.bits > 64;
if (cant_cast) {
if (field_int_ty.bitSize(mod) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
try writer.writeAll("zig_lo_");
try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty);
try writer.writeByte('(');
}
if (bit_offset > 0) {
try writer.writeAll("zig_shr_");
try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty);
try writer.writeByte('(');
}
try f.writeCValue(writer, struct_byval, .Other);
if (bit_offset > 0) {
try writer.writeAll(", ");
try f.object.dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
try writer.writeByte(')');
}
if (cant_cast) try writer.writeByte(')');
try f.object.dg.renderBuiltinInfo(writer, field_int_ty, .bits);
try writer.writeAll(");\n");
if (inst_ty.eql(field_int_ty, f.object.dg.module)) return temp_local;
const local = try f.allocLocal(inst, inst_ty);
try writer.writeAll("memcpy(");
try f.writeCValue(writer, .{ .local_ref = local.new_local }, .FunctionArgument);
try writer.writeAll(", ");
try f.writeCValue(writer, .{ .local_ref = temp_local.new_local }, .FunctionArgument);
try writer.writeAll(", sizeof(");
try f.renderType(writer, inst_ty);
try writer.writeAll("));\n");
try freeLocal(f, inst, temp_local.new_local, 0);
return local;
},
},
.anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len == 0)
const field_name: CValue = switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) {
.struct_type => switch (struct_ty.containerLayout(mod)) {
.Auto, .Extern => if (struct_ty.isSimpleTuple(mod))
.{ .field = extra.field_index }
else
.{ .identifier = struct_ty.structFieldName(extra.field_index, mod) },
.Packed => {
const struct_obj = mod.typeToStruct(struct_ty).?;
const int_info = struct_ty.intInfo(mod);
.union_type => |union_type| field_name: {
const union_obj = mod.unionPtr(union_type.index);
if (union_obj.layout == .Packed) {
const operand_lval = if (struct_byval == .constant) blk: {
const operand_local = try f.allocLocal(inst, struct_ty);
try f.writeCValue(writer, operand_local, .Other);
try writer.writeAll(" = ");
try f.writeCValue(writer, struct_byval, .Initializer);
try writer.writeAll(";\n");
break :blk operand_local;
} else struct_byval;
const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1));
const local = try f.allocLocal(inst, inst_ty);
try writer.writeAll("memcpy(&");
try f.writeCValue(writer, local, .Other);
try writer.writeAll(", &");
try f.writeCValue(writer, operand_lval, .Other);
try writer.writeAll(", sizeof(");
try f.renderType(writer, inst_ty);
try writer.writeAll("));\n");
const bit_offset = struct_obj.packedFieldBitOffset(mod, extra.field_index);
const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
if (struct_byval == .constant) {
try freeLocal(f, inst, operand_lval.new_local, 0);
}
const field_int_signedness = if (inst_ty.isAbiInt(mod))
inst_ty.intInfo(mod).signedness
else
.unsigned;
const field_int_ty = try mod.intType(field_int_signedness, @intCast(u16, inst_ty.bitSize(mod)));
return local;
} else {
const name = union_obj.fields.keys()[extra.field_index];
break :field_name if (union_type.hasTag()) .{
.payload_identifier = name,
} else .{
.identifier = name,
};
const temp_local = try f.allocLocal(inst, field_int_ty);
try f.writeCValue(writer, temp_local, .Other);
try writer.writeAll(" = zig_wrap_");
try f.object.dg.renderTypeForBuiltinFnName(writer, field_int_ty);
try writer.writeAll("((");
try f.renderType(writer, field_int_ty);
try writer.writeByte(')');
const cant_cast = int_info.bits > 64;
if (cant_cast) {
if (field_int_ty.bitSize(mod) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
try writer.writeAll("zig_lo_");
try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty);
try writer.writeByte('(');
}
if (bit_offset > 0) {
try writer.writeAll("zig_shr_");
try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty);
try writer.writeByte('(');
}
try f.writeCValue(writer, struct_byval, .Other);
if (bit_offset > 0) {
try writer.writeAll(", ");
try f.object.dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
try writer.writeByte(')');
}
if (cant_cast) try writer.writeByte(')');
try f.object.dg.renderBuiltinInfo(writer, field_int_ty, .bits);
try writer.writeAll(");\n");
if (inst_ty.eql(field_int_ty, f.object.dg.module)) return temp_local;
const local = try f.allocLocal(inst, inst_ty);
try writer.writeAll("memcpy(");
try f.writeCValue(writer, .{ .local_ref = local.new_local }, .FunctionArgument);
try writer.writeAll(", ");
try f.writeCValue(writer, .{ .local_ref = temp_local.new_local }, .FunctionArgument);
try writer.writeAll(", sizeof(");
try f.renderType(writer, inst_ty);
try writer.writeAll("));\n");
try freeLocal(f, inst, temp_local.new_local, 0);
return local;
},
else => unreachable,
},
.anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len == 0)
.{ .field = extra.field_index }
else
.{ .identifier = struct_ty.structFieldName(extra.field_index, mod) },
.union_type => |union_type| field_name: {
const union_obj = mod.unionPtr(union_type.index);
if (union_obj.layout == .Packed) {
const operand_lval = if (struct_byval == .constant) blk: {
const operand_local = try f.allocLocal(inst, struct_ty);
try f.writeCValue(writer, operand_local, .Other);
try writer.writeAll(" = ");
try f.writeCValue(writer, struct_byval, .Initializer);
try writer.writeAll(";\n");
break :blk operand_local;
} else struct_byval;
const local = try f.allocLocal(inst, inst_ty);
try writer.writeAll("memcpy(&");
try f.writeCValue(writer, local, .Other);
try writer.writeAll(", &");
try f.writeCValue(writer, operand_lval, .Other);
try writer.writeAll(", sizeof(");
try f.renderType(writer, inst_ty);
try writer.writeAll("));\n");
if (struct_byval == .constant) {
try freeLocal(f, inst, operand_lval.new_local, 0);
}
return local;
} else {
const name = union_obj.fields.keys()[extra.field_index];
break :field_name if (union_type.hasTag()) .{
.payload_identifier = name,
} else .{
.identifier = name,
};
}
},
else => unreachable,
};
const local = try f.allocLocal(inst, inst_ty);

View file

@ -381,12 +381,7 @@ pub const Object = struct {
/// This is an ArrayHashMap as opposed to a HashMap because in `flushModule` we
/// want to iterate over it while adding entries to it.
pub const DITypeMap = std.ArrayHashMapUnmanaged(
Type,
AnnotatedDITypePtr,
Type.HashContext32,
true,
);
pub const DITypeMap = std.AutoArrayHashMapUnmanaged(InternPool.Index, AnnotatedDITypePtr);
pub fn create(gpa: Allocator, options: link.Options) !*Object {
const obj = try gpa.create(Object);
@ -1437,7 +1432,7 @@ pub const Object = struct {
const gpa = o.gpa;
// Be careful not to reference this `gop` variable after any recursive calls
// to `lowerDebugType`.
const gop = try o.di_type_map.getOrPutContext(gpa, ty, .{ .mod = o.module });
const gop = try o.di_type_map.getOrPut(gpa, ty.toIntern());
if (gop.found_existing) {
const annotated = gop.value_ptr.*;
const di_type = annotated.toDIType();
@ -1450,7 +1445,7 @@ pub const Object = struct {
};
return o.lowerDebugTypeImpl(entry, resolve, di_type);
}
errdefer assert(o.di_type_map.orderedRemoveContext(ty, .{ .mod = o.module }));
errdefer assert(o.di_type_map.orderedRemove(ty.toIntern()));
const entry: Object.DITypeMap.Entry = .{
.key_ptr = gop.key_ptr,
.value_ptr = gop.value_ptr,
@ -1465,7 +1460,7 @@ pub const Object = struct {
resolve: DebugResolveStatus,
opt_fwd_decl: ?*llvm.DIType,
) Allocator.Error!*llvm.DIType {
const ty = gop.key_ptr.*;
const ty = gop.key_ptr.toType();
const gpa = o.gpa;
const target = o.target;
const dib = o.di_builder.?;
@ -1498,7 +1493,7 @@ pub const Object = struct {
const enum_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index);
// The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
// means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(enum_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(enum_di_ty));
return enum_di_ty;
}
@ -1558,7 +1553,7 @@ pub const Object = struct {
"",
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(enum_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(enum_di_ty));
return enum_di_ty;
},
.Float => {
@ -1577,7 +1572,7 @@ pub const Object = struct {
},
.Pointer => {
// Normalize everything that the debug info does not represent.
const ptr_info = ty.ptrInfoIp(mod.intern_pool);
const ptr_info = Type.ptrInfoIp(mod.intern_pool, ty.toIntern());
if (ptr_info.sentinel != .none or
ptr_info.address_space != .generic or
@ -1603,7 +1598,7 @@ pub const Object = struct {
});
const ptr_di_ty = try o.lowerDebugType(bland_ptr_ty, resolve);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.init(ptr_di_ty, resolve), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.init(ptr_di_ty, resolve));
return ptr_di_ty;
}
@ -1682,7 +1677,7 @@ pub const Object = struct {
);
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty));
return full_di_ty;
}
@ -1696,7 +1691,7 @@ pub const Object = struct {
name,
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(ptr_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(ptr_di_ty));
return ptr_di_ty;
},
.Opaque => {
@ -1718,7 +1713,7 @@ pub const Object = struct {
);
// The recursive call to `lowerDebugType` va `namespaceToDebugScope`
// means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(opaque_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(opaque_di_ty));
return opaque_di_ty;
},
.Array => {
@ -1729,7 +1724,7 @@ pub const Object = struct {
@intCast(c_int, ty.arrayLen(mod)),
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(array_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(array_di_ty));
return array_di_ty;
},
.Vector => {
@ -1761,7 +1756,7 @@ pub const Object = struct {
ty.vectorLen(mod),
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(vector_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(vector_di_ty));
return vector_di_ty;
},
.Optional => {
@ -1777,7 +1772,7 @@ pub const Object = struct {
if (ty.optionalReprIsPayload(mod)) {
const ptr_di_ty = try o.lowerDebugType(child_ty, resolve);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.init(ptr_di_ty, resolve), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.init(ptr_di_ty, resolve));
return ptr_di_ty;
}
@ -1850,7 +1845,7 @@ pub const Object = struct {
);
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty));
return full_di_ty;
},
.ErrorUnion => {
@ -1858,7 +1853,7 @@ pub const Object = struct {
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
const err_set_di_ty = try o.lowerDebugType(Type.anyerror, .full);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(err_set_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(err_set_di_ty));
return err_set_di_ty;
}
const name = try ty.nameAlloc(gpa, o.module);
@ -1941,7 +1936,7 @@ pub const Object = struct {
);
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty));
return full_di_ty;
},
.ErrorSet => {
@ -2038,7 +2033,7 @@ pub const Object = struct {
);
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty));
return full_di_ty;
},
.struct_type => |struct_type| s: {
@ -2057,7 +2052,7 @@ pub const Object = struct {
dib.replaceTemporary(fwd_decl, struct_di_ty);
// The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
// means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(struct_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(struct_di_ty));
return struct_di_ty;
}
},
@ -2070,7 +2065,7 @@ pub const Object = struct {
dib.replaceTemporary(fwd_decl, struct_di_ty);
// The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
// means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(struct_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(struct_di_ty));
return struct_di_ty;
}
@ -2126,7 +2121,7 @@ pub const Object = struct {
);
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty));
return full_di_ty;
},
.Union => {
@ -2155,7 +2150,7 @@ pub const Object = struct {
dib.replaceTemporary(fwd_decl, union_di_ty);
// The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
// means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(union_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(union_di_ty));
return union_di_ty;
}
@ -2182,7 +2177,7 @@ pub const Object = struct {
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
// means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty));
return full_di_ty;
}
@ -2241,7 +2236,7 @@ pub const Object = struct {
if (layout.tag_size == 0) {
dib.replaceTemporary(fwd_decl, union_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(union_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(union_di_ty));
return union_di_ty;
}
@ -2302,7 +2297,7 @@ pub const Object = struct {
);
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty));
return full_di_ty;
},
.Fn => {
@ -2349,7 +2344,7 @@ pub const Object = struct {
0,
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(fn_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(fn_di_ty));
return fn_di_ty;
},
.ComptimeInt => unreachable,

View file

@ -87,12 +87,7 @@ pub const DeclState = struct {
dbg_info: std.ArrayList(u8),
abbrev_type_arena: std.heap.ArenaAllocator,
abbrev_table: std.ArrayListUnmanaged(AbbrevEntry) = .{},
abbrev_resolver: std.HashMapUnmanaged(
Type,
u32,
Type.HashContext64,
std.hash_map.default_max_load_percentage,
) = .{},
abbrev_resolver: std.AutoHashMapUnmanaged(InternPool.Index, u32) = .{},
abbrev_relocs: std.ArrayListUnmanaged(AbbrevRelocation) = .{},
exprloc_relocs: std.ArrayListUnmanaged(ExprlocRelocation) = .{},
@ -142,9 +137,7 @@ pub const DeclState = struct {
/// @symbol signifies a type abbreviation posititioned somewhere in the .debug_abbrev section
/// which we use as our target of the relocation.
fn addTypeRelocGlobal(self: *DeclState, atom_index: Atom.Index, ty: Type, offset: u32) !void {
const resolv = self.abbrev_resolver.getContext(ty, .{
.mod = self.mod,
}) orelse blk: {
const resolv = self.abbrev_resolver.get(ty.toIntern()) orelse blk: {
const sym_index = @intCast(u32, self.abbrev_table.items.len);
try self.abbrev_table.append(self.gpa, .{
.atom_index = atom_index,
@ -152,12 +145,8 @@ pub const DeclState = struct {
.offset = undefined,
});
log.debug("%{d}: {}", .{ sym_index, ty.fmt(self.mod) });
try self.abbrev_resolver.putNoClobberContext(self.gpa, ty, sym_index, .{
.mod = self.mod,
});
break :blk self.abbrev_resolver.getContext(ty, .{
.mod = self.mod,
}).?;
try self.abbrev_resolver.putNoClobber(self.gpa, ty.toIntern(), sym_index);
break :blk sym_index;
};
log.debug("{x}: %{d} + 0", .{ offset, resolv });
try self.abbrev_relocs.append(self.gpa, .{

View file

@ -366,13 +366,7 @@ const Writer = struct {
}
fn writeType(w: *Writer, s: anytype, ty: Type) !void {
switch (ty.ip_index) {
.none => switch (ty.tag()) {
.inferred_alloc_const => try s.writeAll("(inferred_alloc_const)"),
.inferred_alloc_mut => try s.writeAll("(inferred_alloc_mut)"),
},
else => try ty.print(s, w.module),
}
return ty.print(s, w.module);
}
fn writeTy(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {

File diff suppressed because it is too large Load diff

View file

@ -2159,9 +2159,7 @@ pub const Value = struct {
.Null,
=> {},
.Type => {
return val.toType().hashWithHasher(hasher, mod);
},
.Type => unreachable, // handled via ip_index check above
.Float => {
// For hash/eql purposes, we treat floats as their IEEE integer representation.
switch (ty.floatBits(mod.getTarget())) {
@ -2310,9 +2308,7 @@ pub const Value = struct {
.Null,
.Struct, // It sure would be nice to do something clever with structs.
=> |zig_type_tag| std.hash.autoHash(hasher, zig_type_tag),
.Type => {
val.toType().hashWithHasher(hasher, mod);
},
.Type => unreachable, // handled above with the ip_index check
.Float, .ComptimeFloat => std.hash.autoHash(hasher, @bitCast(u128, val.toFloat(f128, mod))),
.Bool, .Int, .ComptimeInt, .Pointer, .Fn => switch (val.tag()) {
.slice => {