Sema: move inferred_alloc_const/mut_type to InternPool

Now, all types are migrated to use `InternPool`. The `Type.Tag` enum is
deleted in this commit.
This commit is contained in:
Andrew Kelley 2023-05-20 17:55:40 -07:00
parent 65d65f5dda
commit 7e19c95668
11 changed files with 601 additions and 873 deletions

View file

@ -905,6 +905,8 @@ pub const Inst = struct {
const_slice_u8_sentinel_0_type = @enumToInt(InternPool.Index.const_slice_u8_sentinel_0_type),
anyerror_void_error_union_type = @enumToInt(InternPool.Index.anyerror_void_error_union_type),
generic_poison_type = @enumToInt(InternPool.Index.generic_poison_type),
inferred_alloc_const_type = @enumToInt(InternPool.Index.inferred_alloc_const_type),
inferred_alloc_mut_type = @enumToInt(InternPool.Index.inferred_alloc_mut_type),
empty_struct_type = @enumToInt(InternPool.Index.empty_struct_type),
undef = @enumToInt(InternPool.Index.undef),
zero = @enumToInt(InternPool.Index.zero),

View file

@ -959,6 +959,8 @@ pub const Index = enum(u32) {
const_slice_u8_sentinel_0_type,
anyerror_void_error_union_type,
generic_poison_type,
inferred_alloc_const_type,
inferred_alloc_mut_type,
/// `@TypeOf(.{})`
empty_struct_type,
@ -1009,10 +1011,7 @@ pub const Index = enum(u32) {
pub fn toType(i: Index) @import("type.zig").Type {
assert(i != .none);
return .{
.ip_index = i,
.legacy = undefined,
};
return .{ .ip_index = i };
}
pub fn toValue(i: Index) @import("value.zig").Value {
@ -1195,6 +1194,10 @@ pub const static_keys = [_]Key{
// generic_poison_type
.{ .simple_type = .generic_poison },
// inferred_alloc_const_type
.{ .simple_type = .inferred_alloc_const },
// inferred_alloc_mut_type
.{ .simple_type = .inferred_alloc_mut },
// empty_struct_type
.{ .anon_struct_type = .{
@ -1568,6 +1571,12 @@ pub const SimpleType = enum(u32) {
type_info,
generic_poison,
/// TODO: remove this from `SimpleType`; instead make it only a special `Index` tag like
/// `var_args_param_type`.
inferred_alloc_const,
/// TODO: remove this from `SimpleType`; instead make it only a special `Index` tag like
/// `var_args_param_type`.
inferred_alloc_mut,
};
pub const SimpleValue = enum(u32) {

View file

@ -6818,7 +6818,7 @@ pub fn singleConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type {
}
pub fn adjustPtrTypeChild(mod: *Module, ptr_ty: Type, new_child: Type) Allocator.Error!Type {
const info = ptr_ty.ptrInfoIp(mod.intern_pool);
const info = Type.ptrInfoIp(mod.intern_pool, ptr_ty.toIntern());
return mod.ptrType(.{
.elem_type = new_child.toIntern(),

View file

@ -904,10 +904,10 @@ fn analyzeBodyInner(
const air_inst: Air.Inst.Ref = switch (tags[inst]) {
// zig fmt: off
.alloc => try sema.zirAlloc(block, inst),
.alloc_inferred => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_const)),
.alloc_inferred_mut => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_mut)),
.alloc_inferred_comptime => try sema.zirAllocInferredComptime(inst, Type.initTag(.inferred_alloc_const)),
.alloc_inferred_comptime_mut => try sema.zirAllocInferredComptime(inst, Type.initTag(.inferred_alloc_mut)),
.alloc_inferred => try sema.zirAllocInferred(block, inst, .{ .ip_index = .inferred_alloc_const_type }),
.alloc_inferred_mut => try sema.zirAllocInferred(block, inst, .{ .ip_index = .inferred_alloc_mut_type }),
.alloc_inferred_comptime => try sema.zirAllocInferredComptime(inst, .{ .ip_index = .inferred_alloc_const_type }),
.alloc_inferred_comptime_mut => try sema.zirAllocInferredComptime(inst, .{ .ip_index = .inferred_alloc_mut_type }),
.alloc_mut => try sema.zirAllocMut(block, inst),
.alloc_comptime_mut => try sema.zirAllocComptime(block, inst),
.make_ptr_const => try sema.zirMakePtrConst(block, inst),
@ -3471,9 +3471,9 @@ fn zirAllocExtended(
} else 0;
const inferred_alloc_ty = if (small.is_const)
Type.initTag(.inferred_alloc_const)
Type{ .ip_index = .inferred_alloc_const_type }
else
Type.initTag(.inferred_alloc_mut);
Type{ .ip_index = .inferred_alloc_mut_type };
if (block.is_comptime or small.is_comptime) {
if (small.has_type) {
@ -3707,9 +3707,10 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
assert(sema.air_instructions.items(.tag)[ptr_inst] == .constant);
const value_index = sema.air_instructions.items(.data)[ptr_inst].ty_pl.payload;
const ptr_val = sema.air_values.items[value_index];
const var_is_mut = switch (sema.typeOf(ptr).tag()) {
.inferred_alloc_const => false,
.inferred_alloc_mut => true,
const var_is_mut = switch (sema.typeOf(ptr).toIntern()) {
.inferred_alloc_const_type => false,
.inferred_alloc_mut_type => true,
else => unreachable,
};
const target = sema.mod.getTarget();
@ -7451,7 +7452,7 @@ fn instantiateGenericCall(
};
arg_val.hashUncoerced(arg_ty, &hasher, mod);
if (is_anytype) {
arg_ty.hashWithHasher(&hasher, mod);
std.hash.autoHash(&hasher, arg_ty.toIntern());
generic_args[i] = .{
.ty = arg_ty,
.val = arg_val,
@ -7465,7 +7466,7 @@ fn instantiateGenericCall(
};
}
} else if (is_anytype) {
arg_ty.hashWithHasher(&hasher, mod);
std.hash.autoHash(&hasher, arg_ty.toIntern());
generic_args[i] = .{
.ty = arg_ty,
.val = Value.generic_poison,
@ -8233,7 +8234,7 @@ fn zirEnumLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const inst_data = sema.code.instructions.items(.data)[inst].str_tok;
const duped_name = try sema.arena.dupe(u8, inst_data.get(sema.code));
return sema.addConstant(
.{ .ip_index = .enum_literal_type, .legacy = undefined },
.{ .ip_index = .enum_literal_type },
try Value.Tag.enum_literal.create(sema.arena, duped_name),
);
}
@ -13278,9 +13279,12 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
const rhs_val = maybe_rhs_val orelse unreachable;
const rem = lhs_val.floatRem(rhs_val, resolved_type, sema.arena, mod) catch unreachable;
if (!rem.compareAllWithZero(.eq, mod)) {
return sema.fail(block, src, "ambiguous coercion of division operands '{s}' and '{s}'; non-zero remainder '{}'", .{
@tagName(lhs_ty.tag()), @tagName(rhs_ty.tag()), rem.fmtValue(resolved_type, sema.mod),
});
return sema.fail(
block,
src,
"ambiguous coercion of division operands '{}' and '{}'; non-zero remainder '{}'",
.{ lhs_ty.fmt(mod), rhs_ty.fmt(mod), rem.fmtValue(resolved_type, mod) },
);
}
}
@ -13386,7 +13390,12 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
const air_tag = if (is_int) blk: {
if (lhs_ty.isSignedInt(mod) or rhs_ty.isSignedInt(mod)) {
return sema.fail(block, src, "division with '{s}' and '{s}': signed integers must use @divTrunc, @divFloor, or @divExact", .{ @tagName(lhs_ty.tag()), @tagName(rhs_ty.tag()) });
return sema.fail(
block,
src,
"division with '{}' and '{}': signed integers must use @divTrunc, @divFloor, or @divExact",
.{ lhs_ty.fmt(mod), rhs_ty.fmt(mod) },
);
}
break :blk Air.Inst.Tag.div_trunc;
} else switch (block.float_mode) {
@ -23367,7 +23376,7 @@ fn validateRunTimeType(
};
}
const TypeSet = std.HashMapUnmanaged(Type, void, Type.HashContext64, std.hash_map.default_max_load_percentage);
const TypeSet = std.AutoHashMapUnmanaged(InternPool.Index, void);
fn explainWhyTypeIsComptime(
sema: *Sema,
@ -23453,7 +23462,7 @@ fn explainWhyTypeIsComptimeInner(
},
.Struct => {
if ((try type_set.getOrPutContext(sema.gpa, ty, .{ .mod = mod })).found_existing) return;
if ((try type_set.getOrPut(sema.gpa, ty.toIntern())).found_existing) return;
if (mod.typeToStruct(ty)) |struct_obj| {
for (struct_obj.fields.values(), 0..) |field, i| {
@ -23472,7 +23481,7 @@ fn explainWhyTypeIsComptimeInner(
},
.Union => {
if ((try type_set.getOrPutContext(sema.gpa, ty, .{ .mod = mod })).found_existing) return;
if ((try type_set.getOrPut(sema.gpa, ty.toIntern())).found_existing) return;
if (mod.typeToUnion(ty)) |union_obj| {
for (union_obj.fields.values(), 0..) |field, i| {
@ -27459,8 +27468,8 @@ fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref {
// different behavior depending on whether the types were inferred.
// Something seems wrong here.
if (prev_ptr_ty.ip_index == .none) {
if (prev_ptr_ty.tag() == .inferred_alloc_mut) return null;
if (prev_ptr_ty.tag() == .inferred_alloc_const) return null;
if (prev_ptr_ty.ip_index == .inferred_alloc_mut_type) return null;
if (prev_ptr_ty.ip_index == .inferred_alloc_const_type) return null;
}
const prev_ptr_child_ty = prev_ptr_ty.childType(mod);
@ -31677,6 +31686,9 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
.enum_literal,
.type_info,
=> true,
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
},
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false;
@ -31931,6 +31943,8 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type {
.bool_false => unreachable,
.empty_struct => unreachable,
.generic_poison => unreachable,
.inferred_alloc_const_type => unreachable,
.inferred_alloc_mut_type => unreachable,
.type_info_type => return sema.getBuiltinType("Type"),
.extern_options_type => return sema.getBuiltinType("ExternOptions"),
@ -33032,16 +33046,9 @@ fn getBuiltinType(sema: *Sema, name: []const u8) CompileError!Type {
/// TODO assert the return value matches `ty.onePossibleValue`
pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
const mod = sema.mod;
switch (ty.ip_index) {
.empty_struct_type => return Value.empty_struct,
.none => switch (ty.tag()) {
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
},
else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
return switch (ty.ip_index) {
.empty_struct_type => Value.empty_struct,
else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.int_type => |int_type| {
if (int_type.bits == 0) {
return try mod.intValue(ty, 0);
@ -33123,6 +33130,8 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.undefined => Value.undef,
.generic_poison => return error.GenericPoison,
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
},
.struct_type => |struct_type| {
const resolved_ty = try sema.resolveTypeFields(ty);
@ -33245,7 +33254,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.enum_tag => unreachable,
.aggregate => unreachable,
},
}
};
}
/// Returns the type of the AIR instruction.
@ -33563,16 +33572,15 @@ fn usizeCast(sema: *Sema, block: *Block, src: LazySrcLoc, int: u64) CompileError
/// This logic must be kept in sync with `Type.isPtrLikeOptional`.
fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type {
const mod = sema.mod;
if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) {
return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.ptr_type => |ptr_type| switch (ptr_type.size) {
.Slice => return null,
.C => return ptr_type.elem_type.toType(),
.One, .Many => return ty,
.Slice => null,
.C => ptr_type.elem_type.toType(),
.One, .Many => ty,
},
.opt_type => |opt_child| switch (mod.intern_pool.indexToKey(opt_child)) {
.ptr_type => |ptr_type| switch (ptr_type.size) {
.Slice, .C => return null,
.Slice, .C => null,
.Many, .One => {
if (ptr_type.is_allowzero) return null;
@ -33585,15 +33593,10 @@ fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type {
return payload_ty;
},
},
else => return null,
else => null,
},
else => return null,
else => null,
};
switch (ty.tag()) {
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
}
}
/// `generic_poison` will return false.
@ -33677,6 +33680,9 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
.enum_literal,
.type_info,
=> true,
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
},
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false;

View file

@ -2112,6 +2112,8 @@ pub const Inst = struct {
const_slice_u8_sentinel_0_type = @enumToInt(InternPool.Index.const_slice_u8_sentinel_0_type),
anyerror_void_error_union_type = @enumToInt(InternPool.Index.anyerror_void_error_union_type),
generic_poison_type = @enumToInt(InternPool.Index.generic_poison_type),
inferred_alloc_const_type = @enumToInt(InternPool.Index.inferred_alloc_const_type),
inferred_alloc_mut_type = @enumToInt(InternPool.Index.inferred_alloc_mut_type),
empty_struct_type = @enumToInt(InternPool.Index.empty_struct_type),
undef = @enumToInt(InternPool.Index.undef),
zero = @enumToInt(InternPool.Index.zero),

View file

@ -5367,11 +5367,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
// Ensure complete type definition is visible before accessing fields.
_ = try f.typeToIndex(struct_ty, .complete);
const field_name: CValue = switch (struct_ty.ip_index) {
.none => switch (struct_ty.tag()) {
else => unreachable,
},
else => switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) {
const field_name: CValue = switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) {
.struct_type => switch (struct_ty.containerLayout(mod)) {
.Auto, .Extern => if (struct_ty.isSimpleTuple(mod))
.{ .field = extra.field_index }
@ -5476,7 +5472,6 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
}
},
else => unreachable,
},
};
const local = try f.allocLocal(inst, inst_ty);

View file

@ -381,12 +381,7 @@ pub const Object = struct {
/// This is an ArrayHashMap as opposed to a HashMap because in `flushModule` we
/// want to iterate over it while adding entries to it.
pub const DITypeMap = std.ArrayHashMapUnmanaged(
Type,
AnnotatedDITypePtr,
Type.HashContext32,
true,
);
pub const DITypeMap = std.AutoArrayHashMapUnmanaged(InternPool.Index, AnnotatedDITypePtr);
pub fn create(gpa: Allocator, options: link.Options) !*Object {
const obj = try gpa.create(Object);
@ -1437,7 +1432,7 @@ pub const Object = struct {
const gpa = o.gpa;
// Be careful not to reference this `gop` variable after any recursive calls
// to `lowerDebugType`.
const gop = try o.di_type_map.getOrPutContext(gpa, ty, .{ .mod = o.module });
const gop = try o.di_type_map.getOrPut(gpa, ty.toIntern());
if (gop.found_existing) {
const annotated = gop.value_ptr.*;
const di_type = annotated.toDIType();
@ -1450,7 +1445,7 @@ pub const Object = struct {
};
return o.lowerDebugTypeImpl(entry, resolve, di_type);
}
errdefer assert(o.di_type_map.orderedRemoveContext(ty, .{ .mod = o.module }));
errdefer assert(o.di_type_map.orderedRemove(ty.toIntern()));
const entry: Object.DITypeMap.Entry = .{
.key_ptr = gop.key_ptr,
.value_ptr = gop.value_ptr,
@ -1465,7 +1460,7 @@ pub const Object = struct {
resolve: DebugResolveStatus,
opt_fwd_decl: ?*llvm.DIType,
) Allocator.Error!*llvm.DIType {
const ty = gop.key_ptr.*;
const ty = gop.key_ptr.toType();
const gpa = o.gpa;
const target = o.target;
const dib = o.di_builder.?;
@ -1498,7 +1493,7 @@ pub const Object = struct {
const enum_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index);
// The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
// means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(enum_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(enum_di_ty));
return enum_di_ty;
}
@ -1558,7 +1553,7 @@ pub const Object = struct {
"",
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(enum_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(enum_di_ty));
return enum_di_ty;
},
.Float => {
@ -1577,7 +1572,7 @@ pub const Object = struct {
},
.Pointer => {
// Normalize everything that the debug info does not represent.
const ptr_info = ty.ptrInfoIp(mod.intern_pool);
const ptr_info = Type.ptrInfoIp(mod.intern_pool, ty.toIntern());
if (ptr_info.sentinel != .none or
ptr_info.address_space != .generic or
@ -1603,7 +1598,7 @@ pub const Object = struct {
});
const ptr_di_ty = try o.lowerDebugType(bland_ptr_ty, resolve);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.init(ptr_di_ty, resolve), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.init(ptr_di_ty, resolve));
return ptr_di_ty;
}
@ -1682,7 +1677,7 @@ pub const Object = struct {
);
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty));
return full_di_ty;
}
@ -1696,7 +1691,7 @@ pub const Object = struct {
name,
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(ptr_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(ptr_di_ty));
return ptr_di_ty;
},
.Opaque => {
@ -1718,7 +1713,7 @@ pub const Object = struct {
);
// The recursive call to `lowerDebugType` va `namespaceToDebugScope`
// means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(opaque_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(opaque_di_ty));
return opaque_di_ty;
},
.Array => {
@ -1729,7 +1724,7 @@ pub const Object = struct {
@intCast(c_int, ty.arrayLen(mod)),
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(array_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(array_di_ty));
return array_di_ty;
},
.Vector => {
@ -1761,7 +1756,7 @@ pub const Object = struct {
ty.vectorLen(mod),
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(vector_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(vector_di_ty));
return vector_di_ty;
},
.Optional => {
@ -1777,7 +1772,7 @@ pub const Object = struct {
if (ty.optionalReprIsPayload(mod)) {
const ptr_di_ty = try o.lowerDebugType(child_ty, resolve);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.init(ptr_di_ty, resolve), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.init(ptr_di_ty, resolve));
return ptr_di_ty;
}
@ -1850,7 +1845,7 @@ pub const Object = struct {
);
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty));
return full_di_ty;
},
.ErrorUnion => {
@ -1858,7 +1853,7 @@ pub const Object = struct {
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
const err_set_di_ty = try o.lowerDebugType(Type.anyerror, .full);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(err_set_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(err_set_di_ty));
return err_set_di_ty;
}
const name = try ty.nameAlloc(gpa, o.module);
@ -1941,7 +1936,7 @@ pub const Object = struct {
);
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty));
return full_di_ty;
},
.ErrorSet => {
@ -2038,7 +2033,7 @@ pub const Object = struct {
);
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty));
return full_di_ty;
},
.struct_type => |struct_type| s: {
@ -2057,7 +2052,7 @@ pub const Object = struct {
dib.replaceTemporary(fwd_decl, struct_di_ty);
// The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
// means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(struct_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(struct_di_ty));
return struct_di_ty;
}
},
@ -2070,7 +2065,7 @@ pub const Object = struct {
dib.replaceTemporary(fwd_decl, struct_di_ty);
// The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
// means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(struct_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(struct_di_ty));
return struct_di_ty;
}
@ -2126,7 +2121,7 @@ pub const Object = struct {
);
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty));
return full_di_ty;
},
.Union => {
@ -2155,7 +2150,7 @@ pub const Object = struct {
dib.replaceTemporary(fwd_decl, union_di_ty);
// The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
// means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(union_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(union_di_ty));
return union_di_ty;
}
@ -2182,7 +2177,7 @@ pub const Object = struct {
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
// means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty));
return full_di_ty;
}
@ -2241,7 +2236,7 @@ pub const Object = struct {
if (layout.tag_size == 0) {
dib.replaceTemporary(fwd_decl, union_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(union_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(union_di_ty));
return union_di_ty;
}
@ -2302,7 +2297,7 @@ pub const Object = struct {
);
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty));
return full_di_ty;
},
.Fn => {
@ -2349,7 +2344,7 @@ pub const Object = struct {
0,
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(fn_di_ty), .{ .mod = o.module });
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(fn_di_ty));
return fn_di_ty;
},
.ComptimeInt => unreachable,

View file

@ -87,12 +87,7 @@ pub const DeclState = struct {
dbg_info: std.ArrayList(u8),
abbrev_type_arena: std.heap.ArenaAllocator,
abbrev_table: std.ArrayListUnmanaged(AbbrevEntry) = .{},
abbrev_resolver: std.HashMapUnmanaged(
Type,
u32,
Type.HashContext64,
std.hash_map.default_max_load_percentage,
) = .{},
abbrev_resolver: std.AutoHashMapUnmanaged(InternPool.Index, u32) = .{},
abbrev_relocs: std.ArrayListUnmanaged(AbbrevRelocation) = .{},
exprloc_relocs: std.ArrayListUnmanaged(ExprlocRelocation) = .{},
@ -142,9 +137,7 @@ pub const DeclState = struct {
/// @symbol signifies a type abbreviation posititioned somewhere in the .debug_abbrev section
/// which we use as our target of the relocation.
fn addTypeRelocGlobal(self: *DeclState, atom_index: Atom.Index, ty: Type, offset: u32) !void {
const resolv = self.abbrev_resolver.getContext(ty, .{
.mod = self.mod,
}) orelse blk: {
const resolv = self.abbrev_resolver.get(ty.toIntern()) orelse blk: {
const sym_index = @intCast(u32, self.abbrev_table.items.len);
try self.abbrev_table.append(self.gpa, .{
.atom_index = atom_index,
@ -152,12 +145,8 @@ pub const DeclState = struct {
.offset = undefined,
});
log.debug("%{d}: {}", .{ sym_index, ty.fmt(self.mod) });
try self.abbrev_resolver.putNoClobberContext(self.gpa, ty, sym_index, .{
.mod = self.mod,
});
break :blk self.abbrev_resolver.getContext(ty, .{
.mod = self.mod,
}).?;
try self.abbrev_resolver.putNoClobber(self.gpa, ty.toIntern(), sym_index);
break :blk sym_index;
};
log.debug("{x}: %{d} + 0", .{ offset, resolv });
try self.abbrev_relocs.append(self.gpa, .{

View file

@ -366,13 +366,7 @@ const Writer = struct {
}
fn writeType(w: *Writer, s: anytype, ty: Type) !void {
switch (ty.ip_index) {
.none => switch (ty.tag()) {
.inferred_alloc_const => try s.writeAll("(inferred_alloc_const)"),
.inferred_alloc_mut => try s.writeAll("(inferred_alloc_mut)"),
},
else => try ty.print(s, w.module),
}
return ty.print(s, w.module);
}
fn writeTy(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {

View file

@ -11,36 +11,19 @@ const TypedValue = @import("TypedValue.zig");
const Sema = @import("Sema.zig");
const InternPool = @import("InternPool.zig");
const file_struct = @This();
/// Both types and values are canonically represented by a single 32-bit integer
/// which is an index into an `InternPool` data structure.
/// This struct abstracts around this storage by providing methods only
/// applicable to types rather than values in general.
pub const Type = struct {
/// We are migrating towards using this for every Type object. However, many
/// types are still represented the legacy way. This is indicated by using
/// InternPool.Index.none.
ip_index: InternPool.Index,
/// This is the raw data, with no bookkeeping, no memory awareness, no de-duplication.
/// This union takes advantage of the fact that the first page of memory
/// is unmapped, giving us 4096 possible enum tags that have no payload.
legacy: extern union {
/// If the tag value is less than Tag.no_payload_count, then no pointer
/// dereference is needed.
tag_if_small_enough: Tag,
ptr_otherwise: *Payload,
},
pub fn zigTypeTag(ty: Type, mod: *const Module) std.builtin.TypeId {
return ty.zigTypeTagOrPoison(mod) catch unreachable;
}
pub fn zigTypeTagOrPoison(ty: Type, mod: *const Module) error{GenericPoison}!std.builtin.TypeId {
switch (ty.ip_index) {
.none => switch (ty.tag()) {
.inferred_alloc_const,
.inferred_alloc_mut,
=> return .Pointer,
},
else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.int_type => .Int,
.ptr_type => .Pointer,
.array_type => .Array,
@ -105,6 +88,8 @@ pub const Type = struct {
.type_info => .Union,
.generic_poison => return error.GenericPoison,
.inferred_alloc_const, .inferred_alloc_mut => return .Pointer,
},
// values, not types
@ -118,8 +103,7 @@ pub const Type = struct {
.enum_tag => unreachable,
.simple_value => unreachable,
.aggregate => unreachable,
},
}
};
}
pub fn baseZigTypeTag(self: Type, mod: *Module) std.builtin.TypeId {
@ -171,68 +155,6 @@ pub const Type = struct {
};
}
pub fn initTag(comptime small_tag: Tag) Type {
comptime assert(@enumToInt(small_tag) < Tag.no_payload_count);
return Type{
.ip_index = .none,
.legacy = .{ .tag_if_small_enough = small_tag },
};
}
pub fn initPayload(payload: *Payload) Type {
assert(@enumToInt(payload.tag) >= Tag.no_payload_count);
return Type{
.ip_index = .none,
.legacy = .{ .ptr_otherwise = payload },
};
}
pub fn tag(ty: Type) Tag {
assert(ty.ip_index == .none);
if (@enumToInt(ty.legacy.tag_if_small_enough) < Tag.no_payload_count) {
return ty.legacy.tag_if_small_enough;
} else {
return ty.legacy.ptr_otherwise.tag;
}
}
/// Prefer `castTag` to this.
pub fn cast(self: Type, comptime T: type) ?*T {
if (self.ip_index != .none) {
return null;
}
if (@hasField(T, "base_tag")) {
return self.castTag(T.base_tag);
}
if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) {
return null;
}
inline for (@typeInfo(Tag).Enum.fields) |field| {
if (field.value < Tag.no_payload_count)
continue;
const t = @intToEnum(Tag, field.value);
if (self.legacy.ptr_otherwise.tag == t) {
if (T == t.Type()) {
return @fieldParentPtr(T, "base", self.legacy.ptr_otherwise);
}
return null;
}
}
unreachable;
}
pub fn castTag(self: Type, comptime t: Tag) ?*t.Type() {
if (self.ip_index != .none) return null;
if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count)
return null;
if (self.legacy.ptr_otherwise.tag == t)
return @fieldParentPtr(t.Type(), "base", self.legacy.ptr_otherwise);
return null;
}
/// If it is a function pointer, returns the function type. Otherwise returns null.
pub fn castPtrToFn(ty: Type, mod: *const Module) ?Type {
if (ty.zigTypeTag(mod) != .Pointer) return null;
@ -260,8 +182,8 @@ pub const Type = struct {
};
}
pub fn ptrInfoIp(ty: Type, ip: InternPool) InternPool.Key.PtrType {
return switch (ip.indexToKey(ty.ip_index)) {
pub fn ptrInfoIp(ip: InternPool, ty: InternPool.Index) InternPool.Key.PtrType {
return switch (ip.indexToKey(ty)) {
.ptr_type => |p| p,
.opt_type => |child| switch (ip.indexToKey(child)) {
.ptr_type => |p| p,
@ -272,134 +194,27 @@ pub const Type = struct {
}
pub fn ptrInfo(ty: Type, mod: *const Module) Payload.Pointer.Data {
return Payload.Pointer.Data.fromKey(ptrInfoIp(ty, mod.intern_pool));
return Payload.Pointer.Data.fromKey(ptrInfoIp(mod.intern_pool, ty.ip_index));
}
pub fn eql(a: Type, b: Type, mod: *Module) bool {
if (a.ip_index != .none or b.ip_index != .none) {
pub fn eql(a: Type, b: Type, mod: *const Module) bool {
_ = mod; // TODO: remove this parameter
assert(a.ip_index != .none);
assert(b.ip_index != .none);
// The InternPool data structure hashes based on Key to make interned objects
// unique. An Index can be treated simply as u32 value for the
// purpose of Type/Value hashing and equality.
return a.ip_index == b.ip_index;
}
// As a shortcut, if the small tags / addresses match, we're done.
if (a.legacy.tag_if_small_enough == b.legacy.tag_if_small_enough) return true;
switch (a.tag()) {
.inferred_alloc_const,
.inferred_alloc_mut,
=> {
if (b.zigTypeTag(mod) != .Pointer) return false;
const info_a = a.ptrInfo(mod);
const info_b = b.ptrInfo(mod);
if (!info_a.pointee_type.eql(info_b.pointee_type, mod))
return false;
if (info_a.@"align" != info_b.@"align")
return false;
if (info_a.@"addrspace" != info_b.@"addrspace")
return false;
if (info_a.bit_offset != info_b.bit_offset)
return false;
if (info_a.host_size != info_b.host_size)
return false;
if (info_a.vector_index != info_b.vector_index)
return false;
if (info_a.@"allowzero" != info_b.@"allowzero")
return false;
if (info_a.mutable != info_b.mutable)
return false;
if (info_a.@"volatile" != info_b.@"volatile")
return false;
if (info_a.size != info_b.size)
return false;
const sentinel_a = info_a.sentinel;
const sentinel_b = info_b.sentinel;
if (sentinel_a) |sa| {
if (sentinel_b) |sb| {
if (!sa.eql(sb, info_a.pointee_type, mod))
return false;
} else {
return false;
}
} else {
if (sentinel_b != null)
return false;
}
return true;
},
}
}
pub fn hash(self: Type, mod: *Module) u64 {
var hasher = std.hash.Wyhash.init(0);
self.hashWithHasher(&hasher, mod);
return hasher.final();
}
pub fn hashWithHasher(ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void {
if (ty.ip_index != .none) {
pub fn hash(ty: Type, mod: *const Module) u32 {
_ = mod; // TODO: remove this parameter
assert(ty.ip_index != .none);
// The InternPool data structure hashes based on Key to make interned objects
// unique. An Index can be treated simply as u32 value for the
// purpose of Type/Value hashing and equality.
std.hash.autoHash(hasher, ty.ip_index);
return;
return std.hash.uint32(@enumToInt(ty.ip_index));
}
switch (ty.tag()) {
.inferred_alloc_const,
.inferred_alloc_mut,
=> {
std.hash.autoHash(hasher, std.builtin.TypeId.Pointer);
const info = ty.ptrInfo(mod);
hashWithHasher(info.pointee_type, hasher, mod);
hashSentinel(info.sentinel, info.pointee_type, hasher, mod);
std.hash.autoHash(hasher, info.@"align");
std.hash.autoHash(hasher, info.@"addrspace");
std.hash.autoHash(hasher, info.bit_offset);
std.hash.autoHash(hasher, info.host_size);
std.hash.autoHash(hasher, info.vector_index);
std.hash.autoHash(hasher, info.@"allowzero");
std.hash.autoHash(hasher, info.mutable);
std.hash.autoHash(hasher, info.@"volatile");
std.hash.autoHash(hasher, info.size);
},
}
}
fn hashSentinel(opt_val: ?Value, ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void {
if (opt_val) |s| {
std.hash.autoHash(hasher, true);
s.hash(ty, hasher, mod);
} else {
std.hash.autoHash(hasher, false);
}
}
pub const HashContext64 = struct {
mod: *Module,
pub fn hash(self: @This(), t: Type) u64 {
return t.hash(self.mod);
}
pub fn eql(self: @This(), a: Type, b: Type) bool {
return a.eql(b, self.mod);
}
};
pub const HashContext32 = struct {
mod: *Module,
pub fn hash(self: @This(), t: Type) u32 {
return @truncate(u32, t.hash(self.mod));
}
pub fn eql(self: @This(), a: Type, b: Type, b_index: usize) bool {
_ = b_index;
return a.eql(b, self.mod);
}
};
pub fn format(ty: Type, comptime unused_fmt_string: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
_ = ty;
@ -460,12 +275,7 @@ pub const Type = struct {
/// Prints a name suitable for `@typeName`.
pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void {
switch (ty.ip_index) {
.none => switch (ty.tag()) {
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
},
else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.int_type => |int_type| {
const sign_char: u8 = switch (int_type.signedness) {
.signed => 'i',
@ -667,7 +477,6 @@ pub const Type = struct {
.opt => unreachable,
.enum_tag => unreachable,
.aggregate => unreachable,
},
}
}
@ -699,15 +508,10 @@ pub const Type = struct {
ignore_comptime_only: bool,
strat: AbiAlignmentAdvancedStrat,
) RuntimeBitsError!bool {
switch (ty.ip_index) {
return switch (ty.ip_index) {
// False because it is a comptime-only type.
.empty_struct_type => return false,
.none => switch (ty.tag()) {
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
},
else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.empty_struct_type => false,
else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.int_type => |int_type| int_type.bits != 0,
.ptr_type => |ptr_type| {
// Pointers to zero-bit types still have a runtime address; however, pointers
@ -802,6 +606,8 @@ pub const Type = struct {
=> false,
.generic_poison => unreachable,
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
},
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse {
@ -880,7 +686,7 @@ pub const Type = struct {
.enum_tag => unreachable,
.aggregate => unreachable,
},
}
};
}
/// true if and only if the type has a well-defined memory layout
@ -950,6 +756,9 @@ pub const Type = struct {
.type_info,
.generic_poison,
=> false,
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
},
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse {
@ -1167,10 +976,7 @@ pub const Type = struct {
.f80 => switch (target.c_type_bit_size(.longdouble)) {
80 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) },
else => {
const u80_ty: Type = .{
.ip_index = .u80_type,
.legacy = undefined,
};
const u80_ty: Type = .{ .ip_index = .u80_type };
return AbiAlignmentAdvanced{ .scalar = abiAlignment(u80_ty, mod) };
},
},
@ -1194,6 +1000,8 @@ pub const Type = struct {
.noreturn => unreachable,
.generic_poison => unreachable,
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
},
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse
@ -1562,10 +1370,7 @@ pub const Type = struct {
.f80 => switch (target.c_type_bit_size(.longdouble)) {
80 => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) },
else => {
const u80_ty: Type = .{
.ip_index = .u80_type,
.legacy = undefined,
};
const u80_ty: Type = .{ .ip_index = .u80_type };
return AbiSizeAdvanced{ .scalar = abiSize(u80_ty, mod) };
},
},
@ -1605,6 +1410,8 @@ pub const Type = struct {
.type_info => unreachable,
.noreturn => unreachable,
.generic_poison => unreachable,
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
},
.struct_type => |struct_type| switch (ty.containerLayout(mod)) {
.Packed => {
@ -1835,6 +1642,8 @@ pub const Type = struct {
.undefined => unreachable,
.enum_literal => unreachable,
.generic_poison => unreachable,
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
.atomic_order => unreachable, // missing call to resolveTypeFields
.atomic_rmw_op => unreachable, // missing call to resolveTypeFields
@ -1927,17 +1736,13 @@ pub const Type = struct {
}
pub fn isSinglePointer(ty: Type, mod: *const Module) bool {
switch (ty.ip_index) {
.none => return switch (ty.tag()) {
.inferred_alloc_const,
.inferred_alloc_mut,
=> true,
},
else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
return switch (ty.ip_index) {
.inferred_alloc_const_type, .inferred_alloc_mut_type => true,
else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.ptr_type => |ptr_info| ptr_info.size == .One,
else => false,
},
}
};
}
/// Asserts `ty` is a pointer.
@ -1948,11 +1753,7 @@ pub const Type = struct {
/// Returns `null` if `ty` is not a pointer.
pub fn ptrSizeOrNull(ty: Type, mod: *const Module) ?std.builtin.Type.Pointer.Size {
return switch (ty.ip_index) {
.none => switch (ty.tag()) {
.inferred_alloc_const,
.inferred_alloc_mut,
=> .One,
},
.inferred_alloc_const_type, .inferred_alloc_mut_type => .One,
else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.ptr_type => |ptr_info| ptr_info.size,
else => null,
@ -2625,10 +2426,6 @@ pub const Type = struct {
while (true) switch (ty.ip_index) {
.empty_struct_type => return Value.empty_struct,
.none => switch (ty.tag()) {
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
},
else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.int_type => |int_type| {
if (int_type.bits == 0) {
@ -2710,6 +2507,8 @@ pub const Type = struct {
.undefined => return Value.undef,
.generic_poison => unreachable,
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
},
.struct_type => |struct_type| {
if (mod.structPtrUnwrap(struct_type.index)) |s| {
@ -2888,6 +2687,9 @@ pub const Type = struct {
.enum_literal,
.type_info,
=> true,
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
},
.struct_type => |struct_type| {
// A struct with no fields is not comptime-only.
@ -3343,11 +3145,7 @@ pub const Type = struct {
/// Supports structs and unions.
pub fn structFieldOffset(ty: Type, index: usize, mod: *Module) u64 {
switch (ty.ip_index) {
.none => switch (ty.tag()) {
else => unreachable,
},
else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
assert(struct_obj.haveLayout());
@ -3397,7 +3195,6 @@ pub const Type = struct {
},
else => unreachable,
},
}
}
@ -3445,25 +3242,6 @@ pub const Type = struct {
return ty.ip_index == .generic_poison_type;
}
/// This enum does not directly correspond to `std.builtin.TypeId` because
/// it has extra enum tags in it, as a way of using less memory. For example,
/// even though Zig recognizes `*align(10) i32` and `*i32` both as Pointer types
/// but with different alignment values, in this data structure they are represented
/// with different enum tags, because the the former requires more payload data than the latter.
/// See `zigTypeTag` for the function that corresponds to `std.builtin.TypeId`.
pub const Tag = enum(usize) {
/// This is a special value that tracks a set of types that have been stored
/// to an inferred allocation. It does not support most of the normal type queries.
/// However it does respond to `isConstPtr`, `ptrSize`, `zigTypeTag`, etc.
inferred_alloc_mut,
/// Same as `inferred_alloc_mut` but the local is `var` not `const`.
inferred_alloc_const, // See last_no_payload_tag below.
// After this, the tag requires a payload.
pub const last_no_payload_tag = Tag.inferred_alloc_const;
pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1;
};
pub fn isTuple(ty: Type, mod: *Module) bool {
return switch (ty.ip_index) {
.none => false,
@ -3511,14 +3289,9 @@ pub const Type = struct {
};
}
/// The sub-types are named after what fields they contain.
pub const Payload = struct {
tag: Tag,
/// TODO: remove this data structure since we have `InternPool.Key.PtrType`.
pub const Pointer = struct {
data: Data,
pub const Data = struct {
pointee_type: Type,
sentinel: ?Value = null,
@ -3568,64 +3341,60 @@ pub const Type = struct {
};
};
pub const @"u1": Type = .{ .ip_index = .u1_type, .legacy = undefined };
pub const @"u8": Type = .{ .ip_index = .u8_type, .legacy = undefined };
pub const @"u16": Type = .{ .ip_index = .u16_type, .legacy = undefined };
pub const @"u29": Type = .{ .ip_index = .u29_type, .legacy = undefined };
pub const @"u32": Type = .{ .ip_index = .u32_type, .legacy = undefined };
pub const @"u64": Type = .{ .ip_index = .u64_type, .legacy = undefined };
pub const @"u128": Type = .{ .ip_index = .u128_type, .legacy = undefined };
pub const @"u1": Type = .{ .ip_index = .u1_type };
pub const @"u8": Type = .{ .ip_index = .u8_type };
pub const @"u16": Type = .{ .ip_index = .u16_type };
pub const @"u29": Type = .{ .ip_index = .u29_type };
pub const @"u32": Type = .{ .ip_index = .u32_type };
pub const @"u64": Type = .{ .ip_index = .u64_type };
pub const @"u128": Type = .{ .ip_index = .u128_type };
pub const @"i8": Type = .{ .ip_index = .i8_type, .legacy = undefined };
pub const @"i16": Type = .{ .ip_index = .i16_type, .legacy = undefined };
pub const @"i32": Type = .{ .ip_index = .i32_type, .legacy = undefined };
pub const @"i64": Type = .{ .ip_index = .i64_type, .legacy = undefined };
pub const @"i128": Type = .{ .ip_index = .i128_type, .legacy = undefined };
pub const @"i8": Type = .{ .ip_index = .i8_type };
pub const @"i16": Type = .{ .ip_index = .i16_type };
pub const @"i32": Type = .{ .ip_index = .i32_type };
pub const @"i64": Type = .{ .ip_index = .i64_type };
pub const @"i128": Type = .{ .ip_index = .i128_type };
pub const @"f16": Type = .{ .ip_index = .f16_type, .legacy = undefined };
pub const @"f32": Type = .{ .ip_index = .f32_type, .legacy = undefined };
pub const @"f64": Type = .{ .ip_index = .f64_type, .legacy = undefined };
pub const @"f80": Type = .{ .ip_index = .f80_type, .legacy = undefined };
pub const @"f128": Type = .{ .ip_index = .f128_type, .legacy = undefined };
pub const @"f16": Type = .{ .ip_index = .f16_type };
pub const @"f32": Type = .{ .ip_index = .f32_type };
pub const @"f64": Type = .{ .ip_index = .f64_type };
pub const @"f80": Type = .{ .ip_index = .f80_type };
pub const @"f128": Type = .{ .ip_index = .f128_type };
pub const @"bool": Type = .{ .ip_index = .bool_type, .legacy = undefined };
pub const @"usize": Type = .{ .ip_index = .usize_type, .legacy = undefined };
pub const @"isize": Type = .{ .ip_index = .isize_type, .legacy = undefined };
pub const @"comptime_int": Type = .{ .ip_index = .comptime_int_type, .legacy = undefined };
pub const @"comptime_float": Type = .{ .ip_index = .comptime_float_type, .legacy = undefined };
pub const @"void": Type = .{ .ip_index = .void_type, .legacy = undefined };
pub const @"type": Type = .{ .ip_index = .type_type, .legacy = undefined };
pub const @"anyerror": Type = .{ .ip_index = .anyerror_type, .legacy = undefined };
pub const @"anyopaque": Type = .{ .ip_index = .anyopaque_type, .legacy = undefined };
pub const @"anyframe": Type = .{ .ip_index = .anyframe_type, .legacy = undefined };
pub const @"null": Type = .{ .ip_index = .null_type, .legacy = undefined };
pub const @"undefined": Type = .{ .ip_index = .undefined_type, .legacy = undefined };
pub const @"noreturn": Type = .{ .ip_index = .noreturn_type, .legacy = undefined };
pub const @"bool": Type = .{ .ip_index = .bool_type };
pub const @"usize": Type = .{ .ip_index = .usize_type };
pub const @"isize": Type = .{ .ip_index = .isize_type };
pub const @"comptime_int": Type = .{ .ip_index = .comptime_int_type };
pub const @"comptime_float": Type = .{ .ip_index = .comptime_float_type };
pub const @"void": Type = .{ .ip_index = .void_type };
pub const @"type": Type = .{ .ip_index = .type_type };
pub const @"anyerror": Type = .{ .ip_index = .anyerror_type };
pub const @"anyopaque": Type = .{ .ip_index = .anyopaque_type };
pub const @"anyframe": Type = .{ .ip_index = .anyframe_type };
pub const @"null": Type = .{ .ip_index = .null_type };
pub const @"undefined": Type = .{ .ip_index = .undefined_type };
pub const @"noreturn": Type = .{ .ip_index = .noreturn_type };
pub const @"c_char": Type = .{ .ip_index = .c_char_type, .legacy = undefined };
pub const @"c_short": Type = .{ .ip_index = .c_short_type, .legacy = undefined };
pub const @"c_ushort": Type = .{ .ip_index = .c_ushort_type, .legacy = undefined };
pub const @"c_int": Type = .{ .ip_index = .c_int_type, .legacy = undefined };
pub const @"c_uint": Type = .{ .ip_index = .c_uint_type, .legacy = undefined };
pub const @"c_long": Type = .{ .ip_index = .c_long_type, .legacy = undefined };
pub const @"c_ulong": Type = .{ .ip_index = .c_ulong_type, .legacy = undefined };
pub const @"c_longlong": Type = .{ .ip_index = .c_longlong_type, .legacy = undefined };
pub const @"c_ulonglong": Type = .{ .ip_index = .c_ulonglong_type, .legacy = undefined };
pub const @"c_longdouble": Type = .{ .ip_index = .c_longdouble_type, .legacy = undefined };
pub const @"c_char": Type = .{ .ip_index = .c_char_type };
pub const @"c_short": Type = .{ .ip_index = .c_short_type };
pub const @"c_ushort": Type = .{ .ip_index = .c_ushort_type };
pub const @"c_int": Type = .{ .ip_index = .c_int_type };
pub const @"c_uint": Type = .{ .ip_index = .c_uint_type };
pub const @"c_long": Type = .{ .ip_index = .c_long_type };
pub const @"c_ulong": Type = .{ .ip_index = .c_ulong_type };
pub const @"c_longlong": Type = .{ .ip_index = .c_longlong_type };
pub const @"c_ulonglong": Type = .{ .ip_index = .c_ulonglong_type };
pub const @"c_longdouble": Type = .{ .ip_index = .c_longdouble_type };
pub const const_slice_u8: Type = .{ .ip_index = .const_slice_u8_type, .legacy = undefined };
pub const manyptr_u8: Type = .{ .ip_index = .manyptr_u8_type, .legacy = undefined };
pub const const_slice_u8: Type = .{ .ip_index = .const_slice_u8_type };
pub const manyptr_u8: Type = .{ .ip_index = .manyptr_u8_type };
pub const single_const_pointer_to_comptime_int: Type = .{
.ip_index = .single_const_pointer_to_comptime_int_type,
.legacy = undefined,
};
pub const const_slice_u8_sentinel_0: Type = .{
.ip_index = .const_slice_u8_sentinel_0_type,
.legacy = undefined,
};
pub const empty_struct_literal: Type = .{ .ip_index = .empty_struct_type, .legacy = undefined };
pub const const_slice_u8_sentinel_0: Type = .{ .ip_index = .const_slice_u8_sentinel_0_type };
pub const empty_struct_literal: Type = .{ .ip_index = .empty_struct_type };
pub const generic_poison: Type = .{ .ip_index = .generic_poison_type, .legacy = undefined };
pub const generic_poison: Type = .{ .ip_index = .generic_poison_type };
pub const err_int = Type.u16;
@ -3709,33 +3478,4 @@ pub const Type = struct {
/// This is only used for comptime asserts. Bump this number when you make a change
/// to packed struct layout to find out all the places in the codebase you need to edit!
pub const packed_struct_layout_version = 2;
/// This function is used in the debugger pretty formatters in tools/ to fetch the
/// Tag to Payload mapping to facilitate fancy debug printing for this type.
fn dbHelper(self: *Type, tag_to_payload_map: *map: {
const tags = @typeInfo(Tag).Enum.fields;
var fields: [tags.len]std.builtin.Type.StructField = undefined;
for (&fields, tags) |*field, t| field.* = .{
.name = t.name,
.type = *if (t.value < Tag.no_payload_count) void else @field(Tag, t.name).Type(),
.default_value = null,
.is_comptime = false,
.alignment = 0,
};
break :map @Type(.{ .Struct = .{
.layout = .Extern,
.fields = &fields,
.decls = &.{},
.is_tuple = false,
} });
}) void {
_ = self;
_ = tag_to_payload_map;
}
comptime {
if (builtin.mode == .Debug) {
_ = &dbHelper;
}
}
};

View file

@ -2159,9 +2159,7 @@ pub const Value = struct {
.Null,
=> {},
.Type => {
return val.toType().hashWithHasher(hasher, mod);
},
.Type => unreachable, // handled via ip_index check above
.Float => {
// For hash/eql purposes, we treat floats as their IEEE integer representation.
switch (ty.floatBits(mod.getTarget())) {
@ -2310,9 +2308,7 @@ pub const Value = struct {
.Null,
.Struct, // It sure would be nice to do something clever with structs.
=> |zig_type_tag| std.hash.autoHash(hasher, zig_type_tag),
.Type => {
val.toType().hashWithHasher(hasher, mod);
},
.Type => unreachable, // handled above with the ip_index check
.Float, .ComptimeFloat => std.hash.autoHash(hasher, @bitCast(u128, val.toFloat(f128, mod))),
.Bool, .Int, .ComptimeInt, .Pointer, .Fn => switch (val.tag()) {
.slice => {