stage2: move struct types and aggregate values to InternPool

This commit is contained in:
Andrew Kelley 2023-05-10 12:16:24 -07:00
parent 275652f620
commit 8297f28546
22 changed files with 1573 additions and 1283 deletions

View file

@ -1,5 +1,10 @@
//! All interned objects have both a value and a type. //! All interned objects have both a value and a type.
//! This data structure is self-contained, with the following exceptions:
//! * type_struct via Module.Struct.Index
//! * type_opaque via Module.Namespace.Index and Module.Decl.Index
/// Maps `Key` to `Index`. `Key` objects are not stored anywhere; they are
/// constructed lazily.
map: std.AutoArrayHashMapUnmanaged(void, void) = .{}, map: std.AutoArrayHashMapUnmanaged(void, void) = .{},
items: std.MultiArrayList(Item) = .{}, items: std.MultiArrayList(Item) = .{},
extra: std.ArrayListUnmanaged(u32) = .{}, extra: std.ArrayListUnmanaged(u32) = .{},
@ -9,6 +14,13 @@ extra: std.ArrayListUnmanaged(u32) = .{},
/// violate the above mechanism. /// violate the above mechanism.
limbs: std.ArrayListUnmanaged(u64) = .{}, limbs: std.ArrayListUnmanaged(u64) = .{},
/// Struct objects are stored in this data structure because:
/// * They contain pointers such as the field maps.
/// * They need to be mutated after creation.
allocated_structs: std.SegmentedList(Module.Struct, 0) = .{},
/// When a Struct object is freed from `allocated_structs`, it is pushed into this stack.
structs_free_list: std.ArrayListUnmanaged(Module.Struct.Index) = .{},
const std = @import("std"); const std = @import("std");
const Allocator = std.mem.Allocator; const Allocator = std.mem.Allocator;
const assert = std.debug.assert; const assert = std.debug.assert;
@ -17,8 +29,7 @@ const BigIntMutable = std.math.big.int.Mutable;
const Limb = std.math.big.Limb; const Limb = std.math.big.Limb;
const InternPool = @This(); const InternPool = @This();
const DeclIndex = @import("Module.zig").Decl.Index; const Module = @import("Module.zig");
const NamespaceIndex = @import("Module.zig").Namespace.Index;
const KeyAdapter = struct { const KeyAdapter = struct {
intern_pool: *const InternPool, intern_pool: *const InternPool,
@ -45,11 +56,20 @@ pub const Key = union(enum) {
payload_type: Index, payload_type: Index,
}, },
simple_type: SimpleType, simple_type: SimpleType,
/// If `empty_struct_type` is handled separately, then this value may be
/// safely assumed to never be `none`.
struct_type: StructType,
union_type: struct {
fields_len: u32,
// TODO move Module.Union data to InternPool
},
opaque_type: OpaqueType,
simple_value: SimpleValue, simple_value: SimpleValue,
extern_func: struct { extern_func: struct {
ty: Index, ty: Index,
/// The Decl that corresponds to the function itself. /// The Decl that corresponds to the function itself.
decl: DeclIndex, decl: Module.Decl.Index,
/// Library name if specified. /// Library name if specified.
/// For example `extern "c" fn write(...) usize` would have 'c' as library name. /// For example `extern "c" fn write(...) usize` would have 'c' as library name.
/// Index into the string table bytes. /// Index into the string table bytes.
@ -62,13 +82,11 @@ pub const Key = union(enum) {
ty: Index, ty: Index,
tag: BigIntConst, tag: BigIntConst,
}, },
struct_type: StructType, /// An instance of a struct, array, or vector.
opaque_type: OpaqueType, /// Each element/field stored as an `Index`.
/// In the case of sentinel-terminated arrays, the sentinel value *is* stored,
union_type: struct { /// so the slice length will be one more than the type's array length.
fields_len: u32, aggregate: Aggregate,
// TODO move Module.Union data to InternPool
},
pub const IntType = std.builtin.Type.Int; pub const IntType = std.builtin.Type.Int;
@ -113,16 +131,27 @@ pub const Key = union(enum) {
child: Index, child: Index,
}; };
pub const StructType = struct {
fields_len: u32,
// TODO move Module.Struct data to InternPool
};
pub const OpaqueType = struct { pub const OpaqueType = struct {
/// The Decl that corresponds to the opaque itself. /// The Decl that corresponds to the opaque itself.
decl: DeclIndex, decl: Module.Decl.Index,
/// Represents the declarations inside this opaque. /// Represents the declarations inside this opaque.
namespace: NamespaceIndex, namespace: Module.Namespace.Index,
};
/// There are three possibilities here:
/// * `@TypeOf(.{})` (untyped empty struct literal)
/// - namespace == .none, index == .none
/// * A struct which has a namepace, but no fields.
/// - index == .none
/// * A struct which has fields as well as a namepace.
pub const StructType = struct {
/// This will be `none` only in the case of `@TypeOf(.{})`
/// (`Index.empty_struct_type`).
namespace: Module.Namespace.OptionalIndex,
/// The `none` tag is used to represent two cases:
/// * `@TypeOf(.{})`, in which case `namespace` will also be `none`.
/// * A struct with no fields, in which case `namespace` will be populated.
index: Module.Struct.OptionalIndex,
}; };
pub const Int = struct { pub const Int = struct {
@ -156,18 +185,24 @@ pub const Key = union(enum) {
addr: Addr, addr: Addr,
pub const Addr = union(enum) { pub const Addr = union(enum) {
decl: DeclIndex, decl: Module.Decl.Index,
int: Index, int: Index,
}; };
}; };
/// `null` is represented by the `val` field being `none`. /// `null` is represented by the `val` field being `none`.
pub const Opt = struct { pub const Opt = struct {
/// This is the optional type; not the payload type.
ty: Index, ty: Index,
/// This could be `none`, indicating the optional is `null`. /// This could be `none`, indicating the optional is `null`.
val: Index, val: Index,
}; };
pub const Aggregate = struct {
ty: Index,
fields: []const Index,
};
pub fn hash32(key: Key) u32 { pub fn hash32(key: Key) u32 {
return @truncate(u32, key.hash64()); return @truncate(u32, key.hash64());
} }
@ -193,8 +228,15 @@ pub const Key = union(enum) {
.simple_value, .simple_value,
.extern_func, .extern_func,
.opt, .opt,
.struct_type,
=> |info| std.hash.autoHash(hasher, info), => |info| std.hash.autoHash(hasher, info),
.union_type => |union_type| {
_ = union_type;
@panic("TODO");
},
.opaque_type => |opaque_type| std.hash.autoHash(hasher, opaque_type.decl),
.int => |int| { .int => |int| {
// Canonicalize all integers by converting them to BigIntConst. // Canonicalize all integers by converting them to BigIntConst.
var buffer: Key.Int.Storage.BigIntSpace = undefined; var buffer: Key.Int.Storage.BigIntSpace = undefined;
@ -221,16 +263,10 @@ pub const Key = union(enum) {
for (enum_tag.tag.limbs) |limb| std.hash.autoHash(hasher, limb); for (enum_tag.tag.limbs) |limb| std.hash.autoHash(hasher, limb);
}, },
.struct_type => |struct_type| { .aggregate => |aggregate| {
if (struct_type.fields_len != 0) { std.hash.autoHash(hasher, aggregate.ty);
@panic("TODO"); for (aggregate.fields) |field| std.hash.autoHash(hasher, field);
}
}, },
.union_type => |union_type| {
_ = union_type;
@panic("TODO");
},
.opaque_type => |opaque_type| std.hash.autoHash(hasher, opaque_type.decl),
} }
} }
@ -280,6 +316,10 @@ pub const Key = union(enum) {
const b_info = b.opt; const b_info = b.opt;
return std.meta.eql(a_info, b_info); return std.meta.eql(a_info, b_info);
}, },
.struct_type => |a_info| {
const b_info = b.struct_type;
return std.meta.eql(a_info, b_info);
},
.ptr => |a_info| { .ptr => |a_info| {
const b_info = b.ptr; const b_info = b.ptr;
@ -331,16 +371,6 @@ pub const Key = union(enum) {
@panic("TODO"); @panic("TODO");
}, },
.struct_type => |a_info| {
const b_info = b.struct_type;
// TODO: remove this special case for empty_struct
if (a_info.fields_len == 0 and b_info.fields_len == 0)
return true;
@panic("TODO");
},
.union_type => |a_info| { .union_type => |a_info| {
const b_info = b.union_type; const b_info = b.union_type;
@ -353,6 +383,11 @@ pub const Key = union(enum) {
const b_info = b.opaque_type; const b_info = b.opaque_type;
return a_info.decl == b_info.decl; return a_info.decl == b_info.decl;
}, },
.aggregate => |a_info| {
const b_info = b.aggregate;
if (a_info.ty != b_info.ty) return false;
return std.mem.eql(Index, a_info.fields, b_info.fields);
},
} }
} }
@ -375,6 +410,7 @@ pub const Key = union(enum) {
.opt, .opt,
.extern_func, .extern_func,
.enum_tag, .enum_tag,
.aggregate,
=> |x| return x.ty, => |x| return x.ty,
.simple_value => |s| switch (s) { .simple_value => |s| switch (s) {
@ -471,6 +507,7 @@ pub const Index = enum(u32) {
anyerror_void_error_union_type, anyerror_void_error_union_type,
generic_poison_type, generic_poison_type,
var_args_param_type, var_args_param_type,
/// `@TypeOf(.{})`
empty_struct_type, empty_struct_type,
/// `undefined` (untyped) /// `undefined` (untyped)
@ -691,7 +728,8 @@ pub const static_keys = [_]Key{
// empty_struct_type // empty_struct_type
.{ .struct_type = .{ .{ .struct_type = .{
.fields_len = 0, .namespace = .none,
.index = .none,
} }, } },
.{ .simple_value = .undefined }, .{ .simple_value = .undefined },
@ -792,16 +830,18 @@ pub const Tag = enum(u8) {
/// An opaque type. /// An opaque type.
/// data is index of Key.OpaqueType in extra. /// data is index of Key.OpaqueType in extra.
type_opaque, type_opaque,
/// A struct type.
/// data is Module.Struct.OptionalIndex
/// The `none` tag is used to represent `@TypeOf(.{})`.
type_struct,
/// A struct type that has only a namespace; no fields, and there is no
/// Module.Struct object allocated for it.
/// data is Module.Namespace.Index.
type_struct_ns,
/// A value that can be represented with only an enum tag. /// A value that can be represented with only an enum tag.
/// data is SimpleValue enum value. /// data is SimpleValue enum value.
simple_value, simple_value,
/// The SimpleType and SimpleValue enums are exposed via the InternPool API using
/// SimpleType and SimpleValue as the Key data themselves.
/// This tag is for miscellaneous types and values that can be represented with
/// only an enum tag, but will be presented via the API with a different Key.
/// data is SimpleInternal enum value.
simple_internal,
/// A pointer to an integer value. /// A pointer to an integer value.
/// data is extra index of PtrInt, which contains the type and address. /// data is extra index of PtrInt, which contains the type and address.
/// Only pointer types are allowed to have this encoding. Optional types must use /// Only pointer types are allowed to have this encoding. Optional types must use
@ -809,6 +849,8 @@ pub const Tag = enum(u8) {
ptr_int, ptr_int,
/// An optional value that is non-null. /// An optional value that is non-null.
/// data is Index of the payload value. /// data is Index of the payload value.
/// In order to use this encoding, one must ensure that the `InternPool`
/// already contains the optional type corresponding to this payload.
opt_payload, opt_payload,
/// An optional value that is null. /// An optional value that is null.
/// data is Index of the payload type. /// data is Index of the payload type.
@ -859,6 +901,13 @@ pub const Tag = enum(u8) {
extern_func, extern_func,
/// A regular function. /// A regular function.
func, func,
/// This represents the only possible value for *some* types which have
/// only one possible value. Not all only-possible-values are encoded this way;
/// for example structs which have all comptime fields are not encoded this way.
/// The set of values that are encoded this way is:
/// * A struct which has 0 fields.
/// data is Index of the type, which is known to be zero bits at runtime.
only_possible_value,
}; };
/// Having `SimpleType` and `SimpleValue` in separate enums makes it easier to /// Having `SimpleType` and `SimpleValue` in separate enums makes it easier to
@ -912,9 +961,12 @@ pub const SimpleType = enum(u32) {
}; };
pub const SimpleValue = enum(u32) { pub const SimpleValue = enum(u32) {
/// This is untyped `undefined`.
undefined, undefined,
void, void,
/// This is untyped `null`.
null, null,
/// This is the untyped empty struct literal: `.{}`
empty_struct, empty_struct,
true, true,
false, false,
@ -923,12 +975,6 @@ pub const SimpleValue = enum(u32) {
generic_poison, generic_poison,
}; };
pub const SimpleInternal = enum(u32) {
/// This is the empty struct type. Note that empty_struct value is exposed
/// via SimpleValue.
type_empty_struct,
};
pub const Pointer = struct { pub const Pointer = struct {
child: Index, child: Index,
sentinel: Index, sentinel: Index,
@ -1005,7 +1051,7 @@ pub const ErrorUnion = struct {
/// 0. field name: null-terminated string index for each fields_len; declaration order /// 0. field name: null-terminated string index for each fields_len; declaration order
pub const EnumSimple = struct { pub const EnumSimple = struct {
/// The Decl that corresponds to the enum itself. /// The Decl that corresponds to the enum itself.
decl: DeclIndex, decl: Module.Decl.Index,
/// An integer type which is used for the numerical value of the enum. This /// An integer type which is used for the numerical value of the enum. This
/// is inferred by Zig to be the smallest power of two unsigned int that /// is inferred by Zig to be the smallest power of two unsigned int that
/// fits the number of fields. It is stored here to avoid unnecessary /// fits the number of fields. It is stored here to avoid unnecessary
@ -1091,6 +1137,10 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void {
ip.items.deinit(gpa); ip.items.deinit(gpa);
ip.extra.deinit(gpa); ip.extra.deinit(gpa);
ip.limbs.deinit(gpa); ip.limbs.deinit(gpa);
ip.structs_free_list.deinit(gpa);
ip.allocated_structs.deinit(gpa);
ip.* = undefined; ip.* = undefined;
} }
@ -1167,20 +1217,38 @@ pub fn indexToKey(ip: InternPool, index: Index) Key {
.type_enum_simple => @panic("TODO"), .type_enum_simple => @panic("TODO"),
.type_opaque => .{ .opaque_type = ip.extraData(Key.OpaqueType, data) }, .type_opaque => .{ .opaque_type = ip.extraData(Key.OpaqueType, data) },
.type_struct => {
.simple_internal => switch (@intToEnum(SimpleInternal, data)) { const struct_index = @intToEnum(Module.Struct.OptionalIndex, data);
.type_empty_struct => .{ .struct_type = .{ const namespace = if (struct_index.unwrap()) |i|
.fields_len = 0, ip.structPtrConst(i).namespace.toOptional()
} }, else
.none;
return .{ .struct_type = .{
.index = struct_index,
.namespace = namespace,
} };
}, },
.type_struct_ns => .{ .struct_type = .{
.index = .none,
.namespace = @intToEnum(Module.Namespace.Index, data).toOptional(),
} },
.opt_null => .{ .opt = .{ .opt_null => .{ .opt = .{
.ty = @intToEnum(Index, data), .ty = @intToEnum(Index, data),
.val = .none, .val = .none,
} }, } },
.opt_payload => .{ .opt = .{ .opt_payload => {
.ty = indexToKey(ip, @intToEnum(Index, data)).typeOf(), const payload_val = @intToEnum(Index, data);
.val = @intToEnum(Index, data), // The existence of `opt_payload` guarantees that the optional type will be
} }, // stored in the `InternPool`.
const opt_ty = ip.getAssumeExists(.{
.opt_type = indexToKey(ip, payload_val).typeOf(),
});
return .{ .opt = .{
.ty = opt_ty,
.val = payload_val,
} };
},
.ptr_int => { .ptr_int => {
const info = ip.extraData(PtrInt, data); const info = ip.extraData(PtrInt, data);
return .{ .ptr = .{ return .{ .ptr = .{
@ -1225,6 +1293,16 @@ pub fn indexToKey(ip: InternPool, index: Index) Key {
.float_f128 => @panic("TODO"), .float_f128 => @panic("TODO"),
.extern_func => @panic("TODO"), .extern_func => @panic("TODO"),
.func => @panic("TODO"), .func => @panic("TODO"),
.only_possible_value => {
const ty = @intToEnum(Index, data);
return switch (ip.indexToKey(ty)) {
.struct_type => .{ .aggregate = .{
.ty = ty,
.fields = &.{},
} },
else => unreachable,
};
},
}; };
} }
@ -1359,12 +1437,15 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
}, },
.struct_type => |struct_type| { .struct_type => |struct_type| {
if (struct_type.fields_len != 0) { ip.items.appendAssumeCapacity(if (struct_type.index.unwrap()) |i| .{
@panic("TODO"); // handle structs other than empty_struct .tag = .type_struct,
} .data = @enumToInt(i),
ip.items.appendAssumeCapacity(.{ } else if (struct_type.namespace.unwrap()) |i| .{
.tag = .simple_internal, .tag = .type_struct_ns,
.data = @enumToInt(SimpleInternal.type_empty_struct), .data = @enumToInt(i),
} else .{
.tag = .type_struct,
.data = @enumToInt(Module.Struct.OptionalIndex.none),
}); });
}, },
@ -1398,6 +1479,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
.opt => |opt| { .opt => |opt| {
assert(opt.ty != .none); assert(opt.ty != .none);
assert(ip.isOptionalType(opt.ty));
ip.items.appendAssumeCapacity(if (opt.val == .none) .{ ip.items.appendAssumeCapacity(if (opt.val == .none) .{
.tag = .opt_null, .tag = .opt_null,
.data = @enumToInt(opt.ty), .data = @enumToInt(opt.ty),
@ -1549,10 +1631,35 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
const tag: Tag = if (enum_tag.tag.positive) .enum_tag_positive else .enum_tag_negative; const tag: Tag = if (enum_tag.tag.positive) .enum_tag_positive else .enum_tag_negative;
try addInt(ip, gpa, enum_tag.ty, tag, enum_tag.tag.limbs); try addInt(ip, gpa, enum_tag.ty, tag, enum_tag.tag.limbs);
}, },
.aggregate => |aggregate| {
if (aggregate.fields.len == 0) {
ip.items.appendAssumeCapacity(.{
.tag = .only_possible_value,
.data = @enumToInt(aggregate.ty),
});
return @intToEnum(Index, ip.items.len - 1);
}
@panic("TODO");
},
} }
return @intToEnum(Index, ip.items.len - 1); return @intToEnum(Index, ip.items.len - 1);
} }
pub fn getAssumeExists(ip: InternPool, key: Key) Index {
const adapter: KeyAdapter = .{ .intern_pool = &ip };
const index = ip.map.getIndexAdapted(key, adapter).?;
return @intToEnum(Index, index);
}
/// This operation only happens under compile error conditions.
/// Leak the index until the next garbage collection.
pub fn remove(ip: *InternPool, index: Index) void {
_ = ip;
_ = index;
@panic("TODO this is a bit problematic to implement, could we maybe just never support a remove() operation on InternPool?");
}
fn addInt(ip: *InternPool, gpa: Allocator, ty: Index, tag: Tag, limbs: []const Limb) !void { fn addInt(ip: *InternPool, gpa: Allocator, ty: Index, tag: Tag, limbs: []const Limb) !void {
const limbs_len = @intCast(u32, limbs.len); const limbs_len = @intCast(u32, limbs.len);
try ip.reserveLimbs(gpa, @typeInfo(Int).Struct.fields.len + limbs_len); try ip.reserveLimbs(gpa, @typeInfo(Int).Struct.fields.len + limbs_len);
@ -1578,8 +1685,8 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 {
ip.extra.appendAssumeCapacity(switch (field.type) { ip.extra.appendAssumeCapacity(switch (field.type) {
u32 => @field(extra, field.name), u32 => @field(extra, field.name),
Index => @enumToInt(@field(extra, field.name)), Index => @enumToInt(@field(extra, field.name)),
DeclIndex => @enumToInt(@field(extra, field.name)), Module.Decl.Index => @enumToInt(@field(extra, field.name)),
NamespaceIndex => @enumToInt(@field(extra, field.name)), Module.Namespace.Index => @enumToInt(@field(extra, field.name)),
i32 => @bitCast(u32, @field(extra, field.name)), i32 => @bitCast(u32, @field(extra, field.name)),
Pointer.Flags => @bitCast(u32, @field(extra, field.name)), Pointer.Flags => @bitCast(u32, @field(extra, field.name)),
Pointer.PackedOffset => @bitCast(u32, @field(extra, field.name)), Pointer.PackedOffset => @bitCast(u32, @field(extra, field.name)),
@ -1635,8 +1742,8 @@ fn extraData(ip: InternPool, comptime T: type, index: usize) T {
@field(result, field.name) = switch (field.type) { @field(result, field.name) = switch (field.type) {
u32 => int32, u32 => int32,
Index => @intToEnum(Index, int32), Index => @intToEnum(Index, int32),
DeclIndex => @intToEnum(DeclIndex, int32), Module.Decl.Index => @intToEnum(Module.Decl.Index, int32),
NamespaceIndex => @intToEnum(NamespaceIndex, int32), Module.Namespace.Index => @intToEnum(Module.Namespace.Index, int32),
i32 => @bitCast(i32, int32), i32 => @bitCast(i32, int32),
Pointer.Flags => @bitCast(Pointer.Flags, int32), Pointer.Flags => @bitCast(Pointer.Flags, int32),
Pointer.PackedOffset => @bitCast(Pointer.PackedOffset, int32), Pointer.PackedOffset => @bitCast(Pointer.PackedOffset, int32),
@ -1808,6 +1915,20 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
} }
} }
pub fn indexToStruct(ip: *InternPool, val: Index) Module.Struct.OptionalIndex {
const tags = ip.items.items(.tag);
if (val == .none) return .none;
if (tags[@enumToInt(val)] != .type_struct) return .none;
const datas = ip.items.items(.data);
return @intToEnum(Module.Struct.Index, datas[@enumToInt(val)]).toOptional();
}
pub fn isOptionalType(ip: InternPool, ty: Index) bool {
const tags = ip.items.items(.tag);
if (ty == .none) return false;
return tags[@enumToInt(ty)] == .type_optional;
}
pub fn dump(ip: InternPool) void { pub fn dump(ip: InternPool) void {
dumpFallible(ip, std.heap.page_allocator) catch return; dumpFallible(ip, std.heap.page_allocator) catch return;
} }
@ -1859,9 +1980,10 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void {
.type_error_union => @sizeOf(ErrorUnion), .type_error_union => @sizeOf(ErrorUnion),
.type_enum_simple => @sizeOf(EnumSimple), .type_enum_simple => @sizeOf(EnumSimple),
.type_opaque => @sizeOf(Key.OpaqueType), .type_opaque => @sizeOf(Key.OpaqueType),
.type_struct => 0,
.type_struct_ns => 0,
.simple_type => 0, .simple_type => 0,
.simple_value => 0, .simple_value => 0,
.simple_internal => 0,
.ptr_int => @sizeOf(PtrInt), .ptr_int => @sizeOf(PtrInt),
.opt_null => 0, .opt_null => 0,
.opt_payload => 0, .opt_payload => 0,
@ -1887,6 +2009,7 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void {
.float_f128 => @sizeOf(Float128), .float_f128 => @sizeOf(Float128),
.extern_func => @panic("TODO"), .extern_func => @panic("TODO"),
.func => @panic("TODO"), .func => @panic("TODO"),
.only_possible_value => 0,
}); });
} }
const SortContext = struct { const SortContext = struct {
@ -1905,3 +2028,34 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void {
}); });
} }
} }
pub fn structPtr(ip: *InternPool, index: Module.Struct.Index) *Module.Struct {
return ip.allocated_structs.at(@enumToInt(index));
}
pub fn structPtrConst(ip: InternPool, index: Module.Struct.Index) *const Module.Struct {
return ip.allocated_structs.at(@enumToInt(index));
}
pub fn structPtrUnwrapConst(ip: InternPool, index: Module.Struct.OptionalIndex) ?*const Module.Struct {
return structPtrConst(ip, index.unwrap() orelse return null);
}
pub fn createStruct(
ip: *InternPool,
gpa: Allocator,
initialization: Module.Struct,
) Allocator.Error!Module.Struct.Index {
if (ip.structs_free_list.popOrNull()) |index| return index;
const ptr = try ip.allocated_structs.addOne(gpa);
ptr.* = initialization;
return @intToEnum(Module.Struct.Index, ip.allocated_structs.len - 1);
}
pub fn destroyStruct(ip: *InternPool, gpa: Allocator, index: Module.Struct.Index) void {
ip.structPtr(index).* = undefined;
ip.structs_free_list.append(gpa, index) catch {
// In order to keep `destroyStruct` a non-fallible function, we ignore memory
// allocation failures here, instead leaking the Struct until garbage collection.
};
}

View file

@ -839,11 +839,14 @@ pub const Decl = struct {
/// If the Decl has a value and it is a struct, return it, /// If the Decl has a value and it is a struct, return it,
/// otherwise null. /// otherwise null.
pub fn getStruct(decl: *Decl) ?*Struct { pub fn getStruct(decl: *Decl, mod: *Module) ?*Struct {
if (!decl.owns_tv) return null; return mod.structPtrUnwrap(getStructIndex(decl, mod));
const ty = (decl.val.castTag(.ty) orelse return null).data; }
const struct_obj = (ty.castTag(.@"struct") orelse return null).data;
return struct_obj; pub fn getStructIndex(decl: *Decl, mod: *Module) Struct.OptionalIndex {
if (!decl.owns_tv) return .none;
const ty = (decl.val.castTag(.ty) orelse return .none).data;
return mod.intern_pool.indexToStruct(ty.ip_index);
} }
/// If the Decl has a value and it is a union, return it, /// If the Decl has a value and it is a union, return it,
@ -884,32 +887,29 @@ pub const Decl = struct {
/// Only returns it if the Decl is the owner. /// Only returns it if the Decl is the owner.
pub fn getInnerNamespaceIndex(decl: *Decl, mod: *Module) Namespace.OptionalIndex { pub fn getInnerNamespaceIndex(decl: *Decl, mod: *Module) Namespace.OptionalIndex {
if (!decl.owns_tv) return .none; if (!decl.owns_tv) return .none;
if (decl.val.ip_index == .none) { switch (decl.val.ip_index) {
const ty = (decl.val.castTag(.ty) orelse return .none).data; .empty_struct_type => return .none,
switch (ty.tag()) { .none => {
.@"struct" => { const ty = (decl.val.castTag(.ty) orelse return .none).data;
const struct_obj = ty.castTag(.@"struct").?.data; switch (ty.tag()) {
return struct_obj.namespace.toOptional(); .enum_full, .enum_nonexhaustive => {
}, const enum_obj = ty.cast(Type.Payload.EnumFull).?.data;
.enum_full, .enum_nonexhaustive => { return enum_obj.namespace.toOptional();
const enum_obj = ty.cast(Type.Payload.EnumFull).?.data; },
return enum_obj.namespace.toOptional(); .@"union", .union_safety_tagged, .union_tagged => {
}, const union_obj = ty.cast(Type.Payload.Union).?.data;
.empty_struct => { return union_obj.namespace.toOptional();
@panic("TODO"); },
},
.@"union", .union_safety_tagged, .union_tagged => {
const union_obj = ty.cast(Type.Payload.Union).?.data;
return union_obj.namespace.toOptional();
},
else => return .none, else => return .none,
} }
},
else => return switch (mod.intern_pool.indexToKey(decl.val.ip_index)) {
.opaque_type => |opaque_type| opaque_type.namespace.toOptional(),
.struct_type => |struct_type| struct_type.namespace,
else => .none,
},
} }
return switch (mod.intern_pool.indexToKey(decl.val.ip_index)) {
.opaque_type => |opaque_type| opaque_type.namespace.toOptional(),
else => .none,
};
} }
/// Same as `getInnerNamespaceIndex` but additionally obtains the pointer. /// Same as `getInnerNamespaceIndex` but additionally obtains the pointer.
@ -1046,6 +1046,28 @@ pub const Struct = struct {
is_tuple: bool, is_tuple: bool,
assumed_runtime_bits: bool = false, assumed_runtime_bits: bool = false,
pub const Index = enum(u32) {
_,
pub fn toOptional(i: Index) OptionalIndex {
return @intToEnum(OptionalIndex, @enumToInt(i));
}
};
pub const OptionalIndex = enum(u32) {
none = std.math.maxInt(u32),
_,
pub fn init(oi: ?Index) OptionalIndex {
return @intToEnum(OptionalIndex, @enumToInt(oi orelse return .none));
}
pub fn unwrap(oi: OptionalIndex) ?Index {
if (oi == .none) return null;
return @intToEnum(Index, @enumToInt(oi));
}
};
pub const Fields = std.StringArrayHashMapUnmanaged(Field); pub const Fields = std.StringArrayHashMapUnmanaged(Field);
/// The `Type` and `Value` memory is owned by the arena of the Struct's owner_decl. /// The `Type` and `Value` memory is owned by the arena of the Struct's owner_decl.
@ -1111,12 +1133,7 @@ pub const Struct = struct {
} }
pub fn srcLoc(s: Struct, mod: *Module) SrcLoc { pub fn srcLoc(s: Struct, mod: *Module) SrcLoc {
const owner_decl = mod.declPtr(s.owner_decl); return mod.declPtr(s.owner_decl).srcLoc(mod);
return .{
.file_scope = owner_decl.getFileScope(mod),
.parent_decl_node = owner_decl.src_node,
.lazy = LazySrcLoc.nodeOffset(0),
};
} }
pub fn fieldSrcLoc(s: Struct, mod: *Module, query: FieldSrcQuery) SrcLoc { pub fn fieldSrcLoc(s: Struct, mod: *Module, query: FieldSrcQuery) SrcLoc {
@ -3622,6 +3639,16 @@ pub fn namespacePtr(mod: *Module, index: Namespace.Index) *Namespace {
return mod.allocated_namespaces.at(@enumToInt(index)); return mod.allocated_namespaces.at(@enumToInt(index));
} }
pub fn structPtr(mod: *Module, index: Struct.Index) *Struct {
return mod.intern_pool.structPtr(index);
}
/// This one accepts an index from the InternPool and asserts that it is not
/// the anonymous empty struct type.
pub fn structPtrUnwrap(mod: *Module, index: Struct.OptionalIndex) ?*Struct {
return structPtr(mod, index.unwrap() orelse return null);
}
/// Returns true if and only if the Decl is the top level struct associated with a File. /// Returns true if and only if the Decl is the top level struct associated with a File.
pub fn declIsRoot(mod: *Module, decl_index: Decl.Index) bool { pub fn declIsRoot(mod: *Module, decl_index: Decl.Index) bool {
const decl = mod.declPtr(decl_index); const decl = mod.declPtr(decl_index);
@ -4078,7 +4105,7 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void {
if (!decl.owns_tv) continue; if (!decl.owns_tv) continue;
if (decl.getStruct()) |struct_obj| { if (decl.getStruct(mod)) |struct_obj| {
struct_obj.zir_index = inst_map.get(struct_obj.zir_index) orelse { struct_obj.zir_index = inst_map.get(struct_obj.zir_index) orelse {
try file.deleted_decls.append(gpa, decl_index); try file.deleted_decls.append(gpa, decl_index);
continue; continue;
@ -4597,36 +4624,50 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
errdefer new_decl_arena.deinit(); errdefer new_decl_arena.deinit();
const new_decl_arena_allocator = new_decl_arena.allocator(); const new_decl_arena_allocator = new_decl_arena.allocator();
const struct_obj = try new_decl_arena_allocator.create(Module.Struct); // Because these three things each reference each other, `undefined`
const struct_ty = try Type.Tag.@"struct".create(new_decl_arena_allocator, struct_obj); // placeholders are used before being set after the struct type gains an
const struct_val = try Value.Tag.ty.create(new_decl_arena_allocator, struct_ty); // InternPool index.
const ty_ty = comptime Type.type; const new_namespace_index = try mod.createNamespace(.{
struct_obj.* = .{ .parent = .none,
.owner_decl = undefined, // set below .ty = undefined,
.file_scope = file,
});
const new_namespace = mod.namespacePtr(new_namespace_index);
errdefer mod.destroyNamespace(new_namespace_index);
const new_decl_index = try mod.allocateNewDecl(new_namespace_index, 0, null);
const new_decl = mod.declPtr(new_decl_index);
errdefer @panic("TODO error handling");
const struct_index = try mod.createStruct(.{
.owner_decl = new_decl_index,
.fields = .{}, .fields = .{},
.zir_index = undefined, // set below .zir_index = undefined, // set below
.layout = .Auto, .layout = .Auto,
.status = .none, .status = .none,
.known_non_opv = undefined, .known_non_opv = undefined,
.is_tuple = undefined, // set below .is_tuple = undefined, // set below
.namespace = try mod.createNamespace(.{ .namespace = new_namespace_index,
.parent = .none, });
.ty = struct_ty, errdefer mod.destroyStruct(struct_index);
.file_scope = file,
}), const struct_ty = try mod.intern_pool.get(gpa, .{ .struct_type = .{
}; .index = struct_index.toOptional(),
const new_decl_index = try mod.allocateNewDecl(struct_obj.namespace, 0, null); .namespace = new_namespace_index.toOptional(),
const new_decl = mod.declPtr(new_decl_index); } });
errdefer mod.intern_pool.remove(struct_ty);
new_namespace.ty = struct_ty.toType();
file.root_decl = new_decl_index.toOptional(); file.root_decl = new_decl_index.toOptional();
struct_obj.owner_decl = new_decl_index;
new_decl.name = try file.fullyQualifiedNameZ(gpa); new_decl.name = try file.fullyQualifiedNameZ(gpa);
new_decl.src_line = 0; new_decl.src_line = 0;
new_decl.is_pub = true; new_decl.is_pub = true;
new_decl.is_exported = false; new_decl.is_exported = false;
new_decl.has_align = false; new_decl.has_align = false;
new_decl.has_linksection_or_addrspace = false; new_decl.has_linksection_or_addrspace = false;
new_decl.ty = ty_ty; new_decl.ty = Type.type;
new_decl.val = struct_val; new_decl.val = struct_ty.toValue();
new_decl.@"align" = 0; new_decl.@"align" = 0;
new_decl.@"linksection" = null; new_decl.@"linksection" = null;
new_decl.has_tv = true; new_decl.has_tv = true;
@ -4639,6 +4680,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
if (file.status == .success_zir) { if (file.status == .success_zir) {
assert(file.zir_loaded); assert(file.zir_loaded);
const main_struct_inst = Zir.main_struct_inst; const main_struct_inst = Zir.main_struct_inst;
const struct_obj = mod.structPtr(struct_index);
struct_obj.zir_index = main_struct_inst; struct_obj.zir_index = main_struct_inst;
const extended = file.zir.instructions.items(.data)[main_struct_inst].extended; const extended = file.zir.instructions.items(.data)[main_struct_inst].extended;
const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small);
@ -4665,7 +4707,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
var wip_captures = try WipCaptureScope.init(gpa, new_decl_arena_allocator, null); var wip_captures = try WipCaptureScope.init(gpa, new_decl_arena_allocator, null);
defer wip_captures.deinit(); defer wip_captures.deinit();
if (sema.analyzeStructDecl(new_decl, main_struct_inst, struct_obj)) |_| { if (sema.analyzeStructDecl(new_decl, main_struct_inst, struct_index)) |_| {
try wip_captures.finalize(); try wip_captures.finalize();
new_decl.analysis = .complete; new_decl.analysis = .complete;
} else |err| switch (err) { } else |err| switch (err) {
@ -4761,11 +4803,12 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
if (mod.declIsRoot(decl_index)) { if (mod.declIsRoot(decl_index)) {
log.debug("semaDecl root {*} ({s})", .{ decl, decl.name }); log.debug("semaDecl root {*} ({s})", .{ decl, decl.name });
const main_struct_inst = Zir.main_struct_inst; const main_struct_inst = Zir.main_struct_inst;
const struct_obj = decl.getStruct().?; const struct_index = decl.getStructIndex(mod).unwrap().?;
const struct_obj = mod.structPtr(struct_index);
// This might not have gotten set in `semaFile` if the first time had // This might not have gotten set in `semaFile` if the first time had
// a ZIR failure, so we set it here in case. // a ZIR failure, so we set it here in case.
struct_obj.zir_index = main_struct_inst; struct_obj.zir_index = main_struct_inst;
try sema.analyzeStructDecl(decl, main_struct_inst, struct_obj); try sema.analyzeStructDecl(decl, main_struct_inst, struct_index);
decl.analysis = .complete; decl.analysis = .complete;
decl.generation = mod.generation; decl.generation = mod.generation;
return false; return false;
@ -5970,6 +6013,14 @@ pub fn destroyNamespace(mod: *Module, index: Namespace.Index) void {
}; };
} }
pub fn createStruct(mod: *Module, initialization: Struct) Allocator.Error!Struct.Index {
return mod.intern_pool.createStruct(mod.gpa, initialization);
}
pub fn destroyStruct(mod: *Module, index: Struct.Index) void {
return mod.intern_pool.destroyStruct(mod.gpa, index);
}
pub fn allocateNewDecl( pub fn allocateNewDecl(
mod: *Module, mod: *Module,
namespace: Namespace.Index, namespace: Namespace.Index,
@ -7202,12 +7253,7 @@ pub fn atomicPtrAlignment(
} }
pub fn opaqueSrcLoc(mod: *Module, opaque_type: InternPool.Key.OpaqueType) SrcLoc { pub fn opaqueSrcLoc(mod: *Module, opaque_type: InternPool.Key.OpaqueType) SrcLoc {
const owner_decl = mod.declPtr(opaque_type.decl); return mod.declPtr(opaque_type.decl).srcLoc(mod);
return .{
.file_scope = owner_decl.getFileScope(mod),
.parent_decl_node = owner_decl.src_node,
.lazy = LazySrcLoc.nodeOffset(0),
};
} }
pub fn opaqueFullyQualifiedName(mod: *Module, opaque_type: InternPool.Key.OpaqueType) ![:0]u8 { pub fn opaqueFullyQualifiedName(mod: *Module, opaque_type: InternPool.Key.OpaqueType) ![:0]u8 {
@ -7221,3 +7267,12 @@ pub fn declFileScope(mod: *Module, decl_index: Decl.Index) *File {
pub fn namespaceDeclIndex(mod: *Module, namespace_index: Namespace.Index) Decl.Index { pub fn namespaceDeclIndex(mod: *Module, namespace_index: Namespace.Index) Decl.Index {
return mod.namespacePtr(namespace_index).getDeclIndex(mod); return mod.namespacePtr(namespace_index).getDeclIndex(mod);
} }
/// Returns null in the following cases:
/// * `@TypeOf(.{})`
/// * A struct which has no fields (`struct {}`).
/// * Not a struct.
pub fn typeToStruct(mod: *Module, ty: Type) ?*Struct {
const struct_index = mod.intern_pool.indexToStruct(ty.ip_index).unwrap() orelse return null;
return mod.structPtr(struct_index);
}

File diff suppressed because it is too large Load diff

View file

@ -180,7 +180,7 @@ pub fn print(
switch (field_ptr.container_ty.tag()) { switch (field_ptr.container_ty.tag()) {
.tuple => return writer.print(".@\"{d}\"", .{field_ptr.field_index}), .tuple => return writer.print(".@\"{d}\"", .{field_ptr.field_index}),
else => { else => {
const field_name = field_ptr.container_ty.structFieldName(field_ptr.field_index); const field_name = field_ptr.container_ty.structFieldName(field_ptr.field_index, mod);
return writer.print(".{s}", .{field_name}); return writer.print(".{s}", .{field_name});
}, },
} }
@ -381,21 +381,27 @@ fn printAggregate(
} }
if (ty.zigTypeTag(mod) == .Struct) { if (ty.zigTypeTag(mod) == .Struct) {
try writer.writeAll(".{"); try writer.writeAll(".{");
const max_len = std.math.min(ty.structFieldCount(), max_aggregate_items); const max_len = std.math.min(ty.structFieldCount(mod), max_aggregate_items);
var i: u32 = 0; var i: u32 = 0;
while (i < max_len) : (i += 1) { while (i < max_len) : (i += 1) {
if (i != 0) try writer.writeAll(", "); if (i != 0) try writer.writeAll(", ");
switch (ty.tag()) { switch (ty.ip_index) {
.anon_struct, .@"struct" => try writer.print(".{s} = ", .{ty.structFieldName(i)}), .none => switch (ty.tag()) {
else => {}, .anon_struct => try writer.print(".{s} = ", .{ty.structFieldName(i, mod)}),
else => {},
},
else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.struct_type => try writer.print(".{s} = ", .{ty.structFieldName(i, mod)}),
else => {},
},
} }
try print(.{ try print(.{
.ty = ty.structFieldType(i), .ty = ty.structFieldType(i, mod),
.val = try val.fieldValue(ty, mod, i), .val = try val.fieldValue(ty, mod, i),
}, writer, level - 1, mod); }, writer, level - 1, mod);
} }
if (ty.structFieldCount() > max_aggregate_items) { if (ty.structFieldCount(mod) > max_aggregate_items) {
try writer.writeAll(", ..."); try writer.writeAll(", ...");
} }
return writer.writeAll("}"); return writer.writeAll("}");

View file

@ -4119,7 +4119,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
const mod = self.bin_file.options.module.?; const mod = self.bin_file.options.module.?;
const mcv = try self.resolveInst(operand); const mcv = try self.resolveInst(operand);
const struct_ty = self.typeOf(operand); const struct_ty = self.typeOf(operand);
const struct_field_ty = struct_ty.structFieldType(index); const struct_field_ty = struct_ty.structFieldType(index, mod);
const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod));
switch (mcv) { switch (mcv) {
@ -5466,10 +5466,10 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
const reg_lock = self.register_manager.lockReg(rwo.reg); const reg_lock = self.register_manager.lockReg(rwo.reg);
defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg); defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg);
const wrapped_ty = ty.structFieldType(0); const wrapped_ty = ty.structFieldType(0, mod);
try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg }); try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg });
const overflow_bit_ty = ty.structFieldType(1); const overflow_bit_ty = ty.structFieldType(1, mod);
const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod)); const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod));
const raw_cond_reg = try self.register_manager.allocReg(null, gp); const raw_cond_reg = try self.register_manager.allocReg(null, gp);
const cond_reg = self.registerAlias(raw_cond_reg, overflow_bit_ty); const cond_reg = self.registerAlias(raw_cond_reg, overflow_bit_ty);

View file

@ -21,7 +21,7 @@ pub fn classifyType(ty: Type, mod: *Module) Class {
var maybe_float_bits: ?u16 = null; var maybe_float_bits: ?u16 = null;
switch (ty.zigTypeTag(mod)) { switch (ty.zigTypeTag(mod)) {
.Struct => { .Struct => {
if (ty.containerLayout() == .Packed) return .byval; if (ty.containerLayout(mod) == .Packed) return .byval;
const float_count = countFloats(ty, mod, &maybe_float_bits); const float_count = countFloats(ty, mod, &maybe_float_bits);
if (float_count <= sret_float_count) return .{ .float_array = float_count }; if (float_count <= sret_float_count) return .{ .float_array = float_count };
@ -31,7 +31,7 @@ pub fn classifyType(ty: Type, mod: *Module) Class {
return .integer; return .integer;
}, },
.Union => { .Union => {
if (ty.containerLayout() == .Packed) return .byval; if (ty.containerLayout(mod) == .Packed) return .byval;
const float_count = countFloats(ty, mod, &maybe_float_bits); const float_count = countFloats(ty, mod, &maybe_float_bits);
if (float_count <= sret_float_count) return .{ .float_array = float_count }; if (float_count <= sret_float_count) return .{ .float_array = float_count };
@ -90,11 +90,11 @@ fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u8 {
return max_count; return max_count;
}, },
.Struct => { .Struct => {
const fields_len = ty.structFieldCount(); const fields_len = ty.structFieldCount(mod);
var count: u8 = 0; var count: u8 = 0;
var i: u32 = 0; var i: u32 = 0;
while (i < fields_len) : (i += 1) { while (i < fields_len) : (i += 1) {
const field_ty = ty.structFieldType(i); const field_ty = ty.structFieldType(i, mod);
const field_count = countFloats(field_ty, mod, maybe_float_bits); const field_count = countFloats(field_ty, mod, maybe_float_bits);
if (field_count == invalid) return invalid; if (field_count == invalid) return invalid;
count += field_count; count += field_count;
@ -125,10 +125,10 @@ pub fn getFloatArrayType(ty: Type, mod: *Module) ?Type {
return null; return null;
}, },
.Struct => { .Struct => {
const fields_len = ty.structFieldCount(); const fields_len = ty.structFieldCount(mod);
var i: u32 = 0; var i: u32 = 0;
while (i < fields_len) : (i += 1) { while (i < fields_len) : (i += 1) {
const field_ty = ty.structFieldType(i); const field_ty = ty.structFieldType(i, mod);
if (getFloatArrayType(field_ty, mod)) |some| return some; if (getFloatArrayType(field_ty, mod)) |some| return some;
} }
return null; return null;

View file

@ -2910,7 +2910,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
const mcv = try self.resolveInst(operand); const mcv = try self.resolveInst(operand);
const struct_ty = self.typeOf(operand); const struct_ty = self.typeOf(operand);
const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod));
const struct_field_ty = struct_ty.structFieldType(index); const struct_field_ty = struct_ty.structFieldType(index, mod);
switch (mcv) { switch (mcv) {
.dead, .unreach => unreachable, .dead, .unreach => unreachable,
@ -5404,10 +5404,10 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
const reg_lock = self.register_manager.lockReg(reg); const reg_lock = self.register_manager.lockReg(reg);
defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg); defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg);
const wrapped_ty = ty.structFieldType(0); const wrapped_ty = ty.structFieldType(0, mod);
try self.genSetStack(wrapped_ty, stack_offset, .{ .register = reg }); try self.genSetStack(wrapped_ty, stack_offset, .{ .register = reg });
const overflow_bit_ty = ty.structFieldType(1); const overflow_bit_ty = ty.structFieldType(1, mod);
const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod)); const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod));
const cond_reg = try self.register_manager.allocReg(null, gp); const cond_reg = try self.register_manager.allocReg(null, gp);

View file

@ -32,7 +32,7 @@ pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class {
switch (ty.zigTypeTag(mod)) { switch (ty.zigTypeTag(mod)) {
.Struct => { .Struct => {
const bit_size = ty.bitSize(mod); const bit_size = ty.bitSize(mod);
if (ty.containerLayout() == .Packed) { if (ty.containerLayout(mod) == .Packed) {
if (bit_size > 64) return .memory; if (bit_size > 64) return .memory;
return .byval; return .byval;
} }
@ -40,10 +40,10 @@ pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class {
const float_count = countFloats(ty, mod, &maybe_float_bits); const float_count = countFloats(ty, mod, &maybe_float_bits);
if (float_count <= byval_float_count) return .byval; if (float_count <= byval_float_count) return .byval;
const fields = ty.structFieldCount(); const fields = ty.structFieldCount(mod);
var i: u32 = 0; var i: u32 = 0;
while (i < fields) : (i += 1) { while (i < fields) : (i += 1) {
const field_ty = ty.structFieldType(i); const field_ty = ty.structFieldType(i, mod);
const field_alignment = ty.structFieldAlign(i, mod); const field_alignment = ty.structFieldAlign(i, mod);
const field_size = field_ty.bitSize(mod); const field_size = field_ty.bitSize(mod);
if (field_size > 32 or field_alignment > 32) { if (field_size > 32 or field_alignment > 32) {
@ -54,7 +54,7 @@ pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class {
}, },
.Union => { .Union => {
const bit_size = ty.bitSize(mod); const bit_size = ty.bitSize(mod);
if (ty.containerLayout() == .Packed) { if (ty.containerLayout(mod) == .Packed) {
if (bit_size > 64) return .memory; if (bit_size > 64) return .memory;
return .byval; return .byval;
} }
@ -132,11 +132,11 @@ fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u32 {
return max_count; return max_count;
}, },
.Struct => { .Struct => {
const fields_len = ty.structFieldCount(); const fields_len = ty.structFieldCount(mod);
var count: u32 = 0; var count: u32 = 0;
var i: u32 = 0; var i: u32 = 0;
while (i < fields_len) : (i += 1) { while (i < fields_len) : (i += 1) {
const field_ty = ty.structFieldType(i); const field_ty = ty.structFieldType(i, mod);
const field_count = countFloats(field_ty, mod, maybe_float_bits); const field_count = countFloats(field_ty, mod, maybe_float_bits);
if (field_count == invalid) return invalid; if (field_count == invalid) return invalid;
count += field_count; count += field_count;

View file

@ -15,7 +15,7 @@ pub fn classifyType(ty: Type, mod: *Module) Class {
switch (ty.zigTypeTag(mod)) { switch (ty.zigTypeTag(mod)) {
.Struct => { .Struct => {
const bit_size = ty.bitSize(mod); const bit_size = ty.bitSize(mod);
if (ty.containerLayout() == .Packed) { if (ty.containerLayout(mod) == .Packed) {
if (bit_size > max_byval_size) return .memory; if (bit_size > max_byval_size) return .memory;
return .byval; return .byval;
} }
@ -26,7 +26,7 @@ pub fn classifyType(ty: Type, mod: *Module) Class {
}, },
.Union => { .Union => {
const bit_size = ty.bitSize(mod); const bit_size = ty.bitSize(mod);
if (ty.containerLayout() == .Packed) { if (ty.containerLayout(mod) == .Packed) {
if (bit_size > max_byval_size) return .memory; if (bit_size > max_byval_size) return .memory;
return .byval; return .byval;
} }

View file

@ -3993,10 +3993,10 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
const reg_lock = self.register_manager.lockReg(rwo.reg); const reg_lock = self.register_manager.lockReg(rwo.reg);
defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg); defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg);
const wrapped_ty = ty.structFieldType(0); const wrapped_ty = ty.structFieldType(0, mod);
try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg }); try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg });
const overflow_bit_ty = ty.structFieldType(1); const overflow_bit_ty = ty.structFieldType(1, mod);
const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod)); const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod));
const cond_reg = try self.register_manager.allocReg(null, gp); const cond_reg = try self.register_manager.allocReg(null, gp);

View file

@ -1006,9 +1006,9 @@ fn typeToValtype(ty: Type, mod: *Module) wasm.Valtype {
if (info.bits > 32 and info.bits <= 128) break :blk wasm.Valtype.i64; if (info.bits > 32 and info.bits <= 128) break :blk wasm.Valtype.i64;
break :blk wasm.Valtype.i32; // represented as pointer to stack break :blk wasm.Valtype.i32; // represented as pointer to stack
}, },
.Struct => switch (ty.containerLayout()) { .Struct => switch (ty.containerLayout(mod)) {
.Packed => { .Packed => {
const struct_obj = ty.castTag(.@"struct").?.data; const struct_obj = mod.typeToStruct(ty).?;
return typeToValtype(struct_obj.backing_int_ty, mod); return typeToValtype(struct_obj.backing_int_ty, mod);
}, },
else => wasm.Valtype.i32, else => wasm.Valtype.i32,
@ -1017,7 +1017,7 @@ fn typeToValtype(ty: Type, mod: *Module) wasm.Valtype {
.direct => wasm.Valtype.v128, .direct => wasm.Valtype.v128,
.unrolled => wasm.Valtype.i32, .unrolled => wasm.Valtype.i32,
}, },
.Union => switch (ty.containerLayout()) { .Union => switch (ty.containerLayout(mod)) {
.Packed => { .Packed => {
const int_ty = mod.intType(.unsigned, @intCast(u16, ty.bitSize(mod))) catch @panic("out of memory"); const int_ty = mod.intType(.unsigned, @intCast(u16, ty.bitSize(mod))) catch @panic("out of memory");
return typeToValtype(int_ty, mod); return typeToValtype(int_ty, mod);
@ -1747,8 +1747,7 @@ fn isByRef(ty: Type, mod: *Module) bool {
return ty.hasRuntimeBitsIgnoreComptime(mod); return ty.hasRuntimeBitsIgnoreComptime(mod);
}, },
.Struct => { .Struct => {
if (ty.castTag(.@"struct")) |struct_ty| { if (mod.typeToStruct(ty)) |struct_obj| {
const struct_obj = struct_ty.data;
if (struct_obj.layout == .Packed and struct_obj.haveFieldTypes()) { if (struct_obj.layout == .Packed and struct_obj.haveFieldTypes()) {
return isByRef(struct_obj.backing_int_ty, mod); return isByRef(struct_obj.backing_int_ty, mod);
} }
@ -2954,11 +2953,11 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue
const parent_ty = field_ptr.container_ty; const parent_ty = field_ptr.container_ty;
const field_offset = switch (parent_ty.zigTypeTag(mod)) { const field_offset = switch (parent_ty.zigTypeTag(mod)) {
.Struct => switch (parent_ty.containerLayout()) { .Struct => switch (parent_ty.containerLayout(mod)) {
.Packed => parent_ty.packedStructFieldByteOffset(field_ptr.field_index, mod), .Packed => parent_ty.packedStructFieldByteOffset(field_ptr.field_index, mod),
else => parent_ty.structFieldOffset(field_ptr.field_index, mod), else => parent_ty.structFieldOffset(field_ptr.field_index, mod),
}, },
.Union => switch (parent_ty.containerLayout()) { .Union => switch (parent_ty.containerLayout(mod)) {
.Packed => 0, .Packed => 0,
else => blk: { else => blk: {
const layout: Module.Union.Layout = parent_ty.unionGetLayout(mod); const layout: Module.Union.Layout = parent_ty.unionGetLayout(mod);
@ -3158,7 +3157,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
return WValue{ .imm32 = @boolToInt(is_pl) }; return WValue{ .imm32 = @boolToInt(is_pl) };
}, },
.Struct => { .Struct => {
const struct_obj = ty.castTag(.@"struct").?.data; const struct_obj = mod.typeToStruct(ty).?;
assert(struct_obj.layout == .Packed); assert(struct_obj.layout == .Packed);
var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer
val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0) catch unreachable; val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0) catch unreachable;
@ -3225,7 +3224,7 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
return WValue{ .imm32 = 0xaaaaaaaa }; return WValue{ .imm32 = 0xaaaaaaaa };
}, },
.Struct => { .Struct => {
const struct_obj = ty.castTag(.@"struct").?.data; const struct_obj = mod.typeToStruct(ty).?;
assert(struct_obj.layout == .Packed); assert(struct_obj.layout == .Packed);
return func.emitUndefined(struct_obj.backing_int_ty); return func.emitUndefined(struct_obj.backing_int_ty);
}, },
@ -3635,7 +3634,7 @@ fn structFieldPtr(
) InnerError!WValue { ) InnerError!WValue {
const mod = func.bin_file.base.options.module.?; const mod = func.bin_file.base.options.module.?;
const result_ty = func.typeOfIndex(inst); const result_ty = func.typeOfIndex(inst);
const offset = switch (struct_ty.containerLayout()) { const offset = switch (struct_ty.containerLayout(mod)) {
.Packed => switch (struct_ty.zigTypeTag(mod)) { .Packed => switch (struct_ty.zigTypeTag(mod)) {
.Struct => offset: { .Struct => offset: {
if (result_ty.ptrInfo(mod).host_size != 0) { if (result_ty.ptrInfo(mod).host_size != 0) {
@ -3668,13 +3667,13 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const struct_ty = func.typeOf(struct_field.struct_operand); const struct_ty = func.typeOf(struct_field.struct_operand);
const operand = try func.resolveInst(struct_field.struct_operand); const operand = try func.resolveInst(struct_field.struct_operand);
const field_index = struct_field.field_index; const field_index = struct_field.field_index;
const field_ty = struct_ty.structFieldType(field_index); const field_ty = struct_ty.structFieldType(field_index, mod);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return func.finishAir(inst, .none, &.{struct_field.struct_operand}); if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return func.finishAir(inst, .none, &.{struct_field.struct_operand});
const result = switch (struct_ty.containerLayout()) { const result = switch (struct_ty.containerLayout(mod)) {
.Packed => switch (struct_ty.zigTypeTag(mod)) { .Packed => switch (struct_ty.zigTypeTag(mod)) {
.Struct => result: { .Struct => result: {
const struct_obj = struct_ty.castTag(.@"struct").?.data; const struct_obj = mod.typeToStruct(struct_ty).?;
const offset = struct_obj.packedFieldBitOffset(mod, field_index); const offset = struct_obj.packedFieldBitOffset(mod, field_index);
const backing_ty = struct_obj.backing_int_ty; const backing_ty = struct_obj.backing_int_ty;
const wasm_bits = toWasmBits(backing_ty.intInfo(mod).bits) orelse { const wasm_bits = toWasmBits(backing_ty.intInfo(mod).bits) orelse {
@ -4998,12 +4997,12 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
} }
break :result_value result; break :result_value result;
}, },
.Struct => switch (result_ty.containerLayout()) { .Struct => switch (result_ty.containerLayout(mod)) {
.Packed => { .Packed => {
if (isByRef(result_ty, mod)) { if (isByRef(result_ty, mod)) {
return func.fail("TODO: airAggregateInit for packed structs larger than 64 bits", .{}); return func.fail("TODO: airAggregateInit for packed structs larger than 64 bits", .{});
} }
const struct_obj = result_ty.castTag(.@"struct").?.data; const struct_obj = mod.typeToStruct(result_ty).?;
const fields = struct_obj.fields.values(); const fields = struct_obj.fields.values();
const backing_type = struct_obj.backing_int_ty; const backing_type = struct_obj.backing_int_ty;
@ -5051,7 +5050,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
for (elements, 0..) |elem, elem_index| { for (elements, 0..) |elem, elem_index| {
if ((try result_ty.structFieldValueComptime(mod, elem_index)) != null) continue; if ((try result_ty.structFieldValueComptime(mod, elem_index)) != null) continue;
const elem_ty = result_ty.structFieldType(elem_index); const elem_ty = result_ty.structFieldType(elem_index, mod);
const elem_size = @intCast(u32, elem_ty.abiSize(mod)); const elem_size = @intCast(u32, elem_ty.abiSize(mod));
const value = try func.resolveInst(elem); const value = try func.resolveInst(elem);
try func.store(offset, value, elem_ty, 0); try func.store(offset, value, elem_ty, 0);

View file

@ -26,14 +26,14 @@ pub fn classifyType(ty: Type, mod: *Module) [2]Class {
if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return none; if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return none;
switch (ty.zigTypeTag(mod)) { switch (ty.zigTypeTag(mod)) {
.Struct => { .Struct => {
if (ty.containerLayout() == .Packed) { if (ty.containerLayout(mod) == .Packed) {
if (ty.bitSize(mod) <= 64) return direct; if (ty.bitSize(mod) <= 64) return direct;
return .{ .direct, .direct }; return .{ .direct, .direct };
} }
// When the struct type is non-scalar // When the struct type is non-scalar
if (ty.structFieldCount() > 1) return memory; if (ty.structFieldCount(mod) > 1) return memory;
// When the struct's alignment is non-natural // When the struct's alignment is non-natural
const field = ty.structFields().values()[0]; const field = ty.structFields(mod).values()[0];
if (field.abi_align != 0) { if (field.abi_align != 0) {
if (field.abi_align > field.ty.abiAlignment(mod)) { if (field.abi_align > field.ty.abiAlignment(mod)) {
return memory; return memory;
@ -64,7 +64,7 @@ pub fn classifyType(ty: Type, mod: *Module) [2]Class {
return direct; return direct;
}, },
.Union => { .Union => {
if (ty.containerLayout() == .Packed) { if (ty.containerLayout(mod) == .Packed) {
if (ty.bitSize(mod) <= 64) return direct; if (ty.bitSize(mod) <= 64) return direct;
return .{ .direct, .direct }; return .{ .direct, .direct };
} }
@ -96,19 +96,19 @@ pub fn classifyType(ty: Type, mod: *Module) [2]Class {
pub fn scalarType(ty: Type, mod: *Module) Type { pub fn scalarType(ty: Type, mod: *Module) Type {
switch (ty.zigTypeTag(mod)) { switch (ty.zigTypeTag(mod)) {
.Struct => { .Struct => {
switch (ty.containerLayout()) { switch (ty.containerLayout(mod)) {
.Packed => { .Packed => {
const struct_obj = ty.castTag(.@"struct").?.data; const struct_obj = mod.typeToStruct(ty).?;
return scalarType(struct_obj.backing_int_ty, mod); return scalarType(struct_obj.backing_int_ty, mod);
}, },
else => { else => {
std.debug.assert(ty.structFieldCount() == 1); std.debug.assert(ty.structFieldCount(mod) == 1);
return scalarType(ty.structFieldType(0), mod); return scalarType(ty.structFieldType(0, mod), mod);
}, },
} }
}, },
.Union => { .Union => {
if (ty.containerLayout() != .Packed) { if (ty.containerLayout(mod) != .Packed) {
const layout = ty.unionGetLayout(mod); const layout = ty.unionGetLayout(mod);
if (layout.payload_size == 0 and layout.tag_size != 0) { if (layout.payload_size == 0 and layout.tag_size != 0) {
return scalarType(ty.unionTagTypeSafety().?, mod); return scalarType(ty.unionTagTypeSafety().?, mod);

View file

@ -3252,13 +3252,13 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
try self.genSetMem( try self.genSetMem(
.{ .frame = frame_index }, .{ .frame = frame_index },
@intCast(i32, tuple_ty.structFieldOffset(1, mod)), @intCast(i32, tuple_ty.structFieldOffset(1, mod)),
tuple_ty.structFieldType(1), tuple_ty.structFieldType(1, mod),
.{ .eflags = cc }, .{ .eflags = cc },
); );
try self.genSetMem( try self.genSetMem(
.{ .frame = frame_index }, .{ .frame = frame_index },
@intCast(i32, tuple_ty.structFieldOffset(0, mod)), @intCast(i32, tuple_ty.structFieldOffset(0, mod)),
tuple_ty.structFieldType(0), tuple_ty.structFieldType(0, mod),
partial_mcv, partial_mcv,
); );
break :result .{ .load_frame = .{ .index = frame_index } }; break :result .{ .load_frame = .{ .index = frame_index } };
@ -3289,7 +3289,7 @@ fn genSetFrameTruncatedOverflowCompare(
}; };
defer if (src_lock) |lock| self.register_manager.unlockReg(lock); defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
const ty = tuple_ty.structFieldType(0); const ty = tuple_ty.structFieldType(0, mod);
const int_info = ty.intInfo(mod); const int_info = ty.intInfo(mod);
const hi_limb_bits = (int_info.bits - 1) % 64 + 1; const hi_limb_bits = (int_info.bits - 1) % 64 + 1;
@ -3336,7 +3336,7 @@ fn genSetFrameTruncatedOverflowCompare(
try self.genSetMem( try self.genSetMem(
.{ .frame = frame_index }, .{ .frame = frame_index },
@intCast(i32, tuple_ty.structFieldOffset(1, mod)), @intCast(i32, tuple_ty.structFieldOffset(1, mod)),
tuple_ty.structFieldType(1), tuple_ty.structFieldType(1, mod),
if (overflow_cc) |_| .{ .register = overflow_reg.to8() } else .{ .eflags = .ne }, if (overflow_cc) |_| .{ .register = overflow_reg.to8() } else .{ .eflags = .ne },
); );
} }
@ -3393,13 +3393,13 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
try self.genSetMem( try self.genSetMem(
.{ .frame = frame_index }, .{ .frame = frame_index },
@intCast(i32, tuple_ty.structFieldOffset(0, mod)), @intCast(i32, tuple_ty.structFieldOffset(0, mod)),
tuple_ty.structFieldType(0), tuple_ty.structFieldType(0, mod),
partial_mcv, partial_mcv,
); );
try self.genSetMem( try self.genSetMem(
.{ .frame = frame_index }, .{ .frame = frame_index },
@intCast(i32, tuple_ty.structFieldOffset(1, mod)), @intCast(i32, tuple_ty.structFieldOffset(1, mod)),
tuple_ty.structFieldType(1), tuple_ty.structFieldType(1, mod),
.{ .immediate = 0 }, // cc being set is impossible .{ .immediate = 0 }, // cc being set is impossible
); );
} else try self.genSetFrameTruncatedOverflowCompare( } else try self.genSetFrameTruncatedOverflowCompare(
@ -5563,7 +5563,7 @@ fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32
const ptr_field_ty = self.typeOfIndex(inst); const ptr_field_ty = self.typeOfIndex(inst);
const ptr_container_ty = self.typeOf(operand); const ptr_container_ty = self.typeOf(operand);
const container_ty = ptr_container_ty.childType(mod); const container_ty = ptr_container_ty.childType(mod);
const field_offset = @intCast(i32, switch (container_ty.containerLayout()) { const field_offset = @intCast(i32, switch (container_ty.containerLayout(mod)) {
.Auto, .Extern => container_ty.structFieldOffset(index, mod), .Auto, .Extern => container_ty.structFieldOffset(index, mod),
.Packed => if (container_ty.zigTypeTag(mod) == .Struct and .Packed => if (container_ty.zigTypeTag(mod) == .Struct and
ptr_field_ty.ptrInfo(mod).host_size == 0) ptr_field_ty.ptrInfo(mod).host_size == 0)
@ -5591,16 +5591,16 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
const container_ty = self.typeOf(operand); const container_ty = self.typeOf(operand);
const container_rc = regClassForType(container_ty, mod); const container_rc = regClassForType(container_ty, mod);
const field_ty = container_ty.structFieldType(index); const field_ty = container_ty.structFieldType(index, mod);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none;
const field_rc = regClassForType(field_ty, mod); const field_rc = regClassForType(field_ty, mod);
const field_is_gp = field_rc.supersetOf(gp); const field_is_gp = field_rc.supersetOf(gp);
const src_mcv = try self.resolveInst(operand); const src_mcv = try self.resolveInst(operand);
const field_off = switch (container_ty.containerLayout()) { const field_off = switch (container_ty.containerLayout(mod)) {
.Auto, .Extern => @intCast(u32, container_ty.structFieldOffset(index, mod) * 8), .Auto, .Extern => @intCast(u32, container_ty.structFieldOffset(index, mod) * 8),
.Packed => if (container_ty.castTag(.@"struct")) |struct_obj| .Packed => if (mod.typeToStruct(container_ty)) |struct_obj|
struct_obj.data.packedFieldBitOffset(mod, index) struct_obj.packedFieldBitOffset(mod, index)
else else
0, 0,
}; };
@ -10036,13 +10036,13 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal
try self.genSetMem( try self.genSetMem(
base, base,
disp + @intCast(i32, ty.structFieldOffset(0, mod)), disp + @intCast(i32, ty.structFieldOffset(0, mod)),
ty.structFieldType(0), ty.structFieldType(0, mod),
.{ .register = ro.reg }, .{ .register = ro.reg },
); );
try self.genSetMem( try self.genSetMem(
base, base,
disp + @intCast(i32, ty.structFieldOffset(1, mod)), disp + @intCast(i32, ty.structFieldOffset(1, mod)),
ty.structFieldType(1), ty.structFieldType(1, mod),
.{ .eflags = ro.eflags }, .{ .eflags = ro.eflags },
); );
}, },
@ -11259,8 +11259,8 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
.Struct => { .Struct => {
const frame_index = const frame_index =
try self.allocFrameIndex(FrameAlloc.initType(result_ty, mod)); try self.allocFrameIndex(FrameAlloc.initType(result_ty, mod));
if (result_ty.containerLayout() == .Packed) { if (result_ty.containerLayout(mod) == .Packed) {
const struct_obj = result_ty.castTag(.@"struct").?.data; const struct_obj = mod.typeToStruct(result_ty).?;
try self.genInlineMemset( try self.genInlineMemset(
.{ .lea_frame = .{ .index = frame_index } }, .{ .lea_frame = .{ .index = frame_index } },
.{ .immediate = 0 }, .{ .immediate = 0 },
@ -11269,7 +11269,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
for (elements, 0..) |elem, elem_i| { for (elements, 0..) |elem, elem_i| {
if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue; if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue;
const elem_ty = result_ty.structFieldType(elem_i); const elem_ty = result_ty.structFieldType(elem_i, mod);
const elem_bit_size = @intCast(u32, elem_ty.bitSize(mod)); const elem_bit_size = @intCast(u32, elem_ty.bitSize(mod));
if (elem_bit_size > 64) { if (elem_bit_size > 64) {
return self.fail( return self.fail(
@ -11341,7 +11341,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
} else for (elements, 0..) |elem, elem_i| { } else for (elements, 0..) |elem, elem_i| {
if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue; if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue;
const elem_ty = result_ty.structFieldType(elem_i); const elem_ty = result_ty.structFieldType(elem_i, mod);
const elem_off = @intCast(i32, result_ty.structFieldOffset(elem_i, mod)); const elem_off = @intCast(i32, result_ty.structFieldOffset(elem_i, mod));
const elem_mcv = try self.resolveInst(elem); const elem_mcv = try self.resolveInst(elem);
const mat_elem_mcv = switch (elem_mcv) { const mat_elem_mcv = switch (elem_mcv) {

View file

@ -41,7 +41,7 @@ pub fn classifyWindows(ty: Type, mod: *Module) Class {
1, 2, 4, 8 => return .integer, 1, 2, 4, 8 => return .integer,
else => switch (ty.zigTypeTag(mod)) { else => switch (ty.zigTypeTag(mod)) {
.Int => return .win_i128, .Int => return .win_i128,
.Struct, .Union => if (ty.containerLayout() == .Packed) { .Struct, .Union => if (ty.containerLayout(mod) == .Packed) {
return .win_i128; return .win_i128;
} else { } else {
return .memory; return .memory;
@ -210,7 +210,7 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class {
// "If the size of the aggregate exceeds a single eightbyte, each is classified // "If the size of the aggregate exceeds a single eightbyte, each is classified
// separately.". // separately.".
const ty_size = ty.abiSize(mod); const ty_size = ty.abiSize(mod);
if (ty.containerLayout() == .Packed) { if (ty.containerLayout(mod) == .Packed) {
assert(ty_size <= 128); assert(ty_size <= 128);
result[0] = .integer; result[0] = .integer;
if (ty_size > 64) result[1] = .integer; if (ty_size > 64) result[1] = .integer;
@ -221,7 +221,7 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class {
var result_i: usize = 0; // out of 8 var result_i: usize = 0; // out of 8
var byte_i: usize = 0; // out of 8 var byte_i: usize = 0; // out of 8
const fields = ty.structFields(); const fields = ty.structFields(mod);
for (fields.values()) |field| { for (fields.values()) |field| {
if (field.abi_align != 0) { if (field.abi_align != 0) {
if (field.abi_align < field.ty.abiAlignment(mod)) { if (field.abi_align < field.ty.abiAlignment(mod)) {
@ -329,7 +329,7 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class {
// "If the size of the aggregate exceeds a single eightbyte, each is classified // "If the size of the aggregate exceeds a single eightbyte, each is classified
// separately.". // separately.".
const ty_size = ty.abiSize(mod); const ty_size = ty.abiSize(mod);
if (ty.containerLayout() == .Packed) { if (ty.containerLayout(mod) == .Packed) {
assert(ty_size <= 128); assert(ty_size <= 128);
result[0] = .integer; result[0] = .integer;
if (ty_size > 64) result[1] = .integer; if (ty_size > 64) result[1] = .integer;

View file

@ -503,8 +503,8 @@ pub fn generateSymbol(
return Result.ok; return Result.ok;
}, },
.Struct => { .Struct => {
if (typed_value.ty.containerLayout() == .Packed) { if (typed_value.ty.containerLayout(mod) == .Packed) {
const struct_obj = typed_value.ty.castTag(.@"struct").?.data; const struct_obj = mod.typeToStruct(typed_value.ty).?;
const fields = struct_obj.fields.values(); const fields = struct_obj.fields.values();
const field_vals = typed_value.val.castTag(.aggregate).?.data; const field_vals = typed_value.val.castTag(.aggregate).?.data;
const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow;
@ -539,7 +539,7 @@ pub fn generateSymbol(
const struct_begin = code.items.len; const struct_begin = code.items.len;
const field_vals = typed_value.val.castTag(.aggregate).?.data; const field_vals = typed_value.val.castTag(.aggregate).?.data;
for (field_vals, 0..) |field_val, index| { for (field_vals, 0..) |field_val, index| {
const field_ty = typed_value.ty.structFieldType(index); const field_ty = typed_value.ty.structFieldType(index, mod);
if (!field_ty.hasRuntimeBits(mod)) continue; if (!field_ty.hasRuntimeBits(mod)) continue;
switch (try generateSymbol(bin_file, src_loc, .{ switch (try generateSymbol(bin_file, src_loc, .{

View file

@ -820,7 +820,7 @@ pub const DeclGen = struct {
try dg.renderValue(writer, Type.bool, val, initializer_type); try dg.renderValue(writer, Type.bool, val, initializer_type);
return writer.writeAll(" }"); return writer.writeAll(" }");
}, },
.Struct => switch (ty.containerLayout()) { .Struct => switch (ty.containerLayout(mod)) {
.Auto, .Extern => { .Auto, .Extern => {
if (!location.isInitializer()) { if (!location.isInitializer()) {
try writer.writeByte('('); try writer.writeByte('(');
@ -830,9 +830,9 @@ pub const DeclGen = struct {
try writer.writeByte('{'); try writer.writeByte('{');
var empty = true; var empty = true;
for (0..ty.structFieldCount()) |field_i| { for (0..ty.structFieldCount(mod)) |field_i| {
if (ty.structFieldIsComptime(field_i)) continue; if (ty.structFieldIsComptime(field_i, mod)) continue;
const field_ty = ty.structFieldType(field_i); const field_ty = ty.structFieldType(field_i, mod);
if (!field_ty.hasRuntimeBits(mod)) continue; if (!field_ty.hasRuntimeBits(mod)) continue;
if (!empty) try writer.writeByte(','); if (!empty) try writer.writeByte(',');
@ -1328,7 +1328,7 @@ pub const DeclGen = struct {
}, },
else => unreachable, else => unreachable,
}, },
.Struct => switch (ty.containerLayout()) { .Struct => switch (ty.containerLayout(mod)) {
.Auto, .Extern => { .Auto, .Extern => {
const field_vals = val.castTag(.aggregate).?.data; const field_vals = val.castTag(.aggregate).?.data;
@ -1341,8 +1341,8 @@ pub const DeclGen = struct {
try writer.writeByte('{'); try writer.writeByte('{');
var empty = true; var empty = true;
for (field_vals, 0..) |field_val, field_i| { for (field_vals, 0..) |field_val, field_i| {
if (ty.structFieldIsComptime(field_i)) continue; if (ty.structFieldIsComptime(field_i, mod)) continue;
const field_ty = ty.structFieldType(field_i); const field_ty = ty.structFieldType(field_i, mod);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
if (!empty) try writer.writeByte(','); if (!empty) try writer.writeByte(',');
@ -1363,8 +1363,8 @@ pub const DeclGen = struct {
var eff_num_fields: usize = 0; var eff_num_fields: usize = 0;
for (0..field_vals.len) |field_i| { for (0..field_vals.len) |field_i| {
if (ty.structFieldIsComptime(field_i)) continue; if (ty.structFieldIsComptime(field_i, mod)) continue;
const field_ty = ty.structFieldType(field_i); const field_ty = ty.structFieldType(field_i, mod);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
eff_num_fields += 1; eff_num_fields += 1;
@ -1386,8 +1386,8 @@ pub const DeclGen = struct {
var eff_index: usize = 0; var eff_index: usize = 0;
var needs_closing_paren = false; var needs_closing_paren = false;
for (field_vals, 0..) |field_val, field_i| { for (field_vals, 0..) |field_val, field_i| {
if (ty.structFieldIsComptime(field_i)) continue; if (ty.structFieldIsComptime(field_i, mod)) continue;
const field_ty = ty.structFieldType(field_i); const field_ty = ty.structFieldType(field_i, mod);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
const cast_context = IntCastContext{ .value = .{ .value = field_val } }; const cast_context = IntCastContext{ .value = .{ .value = field_val } };
@ -1416,8 +1416,8 @@ pub const DeclGen = struct {
// a << a_off | b << b_off | c << c_off // a << a_off | b << b_off | c << c_off
var empty = true; var empty = true;
for (field_vals, 0..) |field_val, field_i| { for (field_vals, 0..) |field_val, field_i| {
if (ty.structFieldIsComptime(field_i)) continue; if (ty.structFieldIsComptime(field_i, mod)) continue;
const field_ty = ty.structFieldType(field_i); const field_ty = ty.structFieldType(field_i, mod);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
if (!empty) try writer.writeAll(" | "); if (!empty) try writer.writeAll(" | ");
@ -1453,7 +1453,7 @@ pub const DeclGen = struct {
const field_i = ty.unionTagFieldIndex(union_obj.tag, mod).?; const field_i = ty.unionTagFieldIndex(union_obj.tag, mod).?;
const field_ty = ty.unionFields().values()[field_i].ty; const field_ty = ty.unionFields().values()[field_i].ty;
const field_name = ty.unionFields().keys()[field_i]; const field_name = ty.unionFields().keys()[field_i];
if (ty.containerLayout() == .Packed) { if (ty.containerLayout(mod) == .Packed) {
if (field_ty.hasRuntimeBits(mod)) { if (field_ty.hasRuntimeBits(mod)) {
if (field_ty.isPtrAtRuntime(mod)) { if (field_ty.isPtrAtRuntime(mod)) {
try writer.writeByte('('); try writer.writeByte('(');
@ -5218,25 +5218,25 @@ fn fieldLocation(
end: void, end: void,
} { } {
return switch (container_ty.zigTypeTag(mod)) { return switch (container_ty.zigTypeTag(mod)) {
.Struct => switch (container_ty.containerLayout()) { .Struct => switch (container_ty.containerLayout(mod)) {
.Auto, .Extern => for (field_index..container_ty.structFieldCount()) |next_field_index| { .Auto, .Extern => for (field_index..container_ty.structFieldCount(mod)) |next_field_index| {
if (container_ty.structFieldIsComptime(next_field_index)) continue; if (container_ty.structFieldIsComptime(next_field_index, mod)) continue;
const field_ty = container_ty.structFieldType(next_field_index); const field_ty = container_ty.structFieldType(next_field_index, mod);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
break .{ .field = if (container_ty.isSimpleTuple()) break .{ .field = if (container_ty.isSimpleTuple())
.{ .field = next_field_index } .{ .field = next_field_index }
else else
.{ .identifier = container_ty.structFieldName(next_field_index) } }; .{ .identifier = container_ty.structFieldName(next_field_index, mod) } };
} else if (container_ty.hasRuntimeBitsIgnoreComptime(mod)) .end else .begin, } else if (container_ty.hasRuntimeBitsIgnoreComptime(mod)) .end else .begin,
.Packed => if (field_ptr_ty.ptrInfo(mod).host_size == 0) .Packed => if (field_ptr_ty.ptrInfo(mod).host_size == 0)
.{ .byte_offset = container_ty.packedStructFieldByteOffset(field_index, mod) } .{ .byte_offset = container_ty.packedStructFieldByteOffset(field_index, mod) }
else else
.begin, .begin,
}, },
.Union => switch (container_ty.containerLayout()) { .Union => switch (container_ty.containerLayout(mod)) {
.Auto, .Extern => { .Auto, .Extern => {
const field_ty = container_ty.structFieldType(field_index); const field_ty = container_ty.structFieldType(field_index, mod);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) if (!field_ty.hasRuntimeBitsIgnoreComptime(mod))
return if (container_ty.unionTagTypeSafety() != null and return if (container_ty.unionTagTypeSafety() != null and
!container_ty.unionHasAllZeroBitFieldTypes(mod)) !container_ty.unionHasAllZeroBitFieldTypes(mod))
@ -5417,101 +5417,111 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
// Ensure complete type definition is visible before accessing fields. // Ensure complete type definition is visible before accessing fields.
_ = try f.typeToIndex(struct_ty, .complete); _ = try f.typeToIndex(struct_ty, .complete);
const field_name: CValue = switch (struct_ty.tag()) { const field_name: CValue = switch (struct_ty.ip_index) {
.tuple, .anon_struct, .@"struct" => switch (struct_ty.containerLayout()) { .none => switch (struct_ty.tag()) {
.Auto, .Extern => if (struct_ty.isSimpleTuple()) .tuple, .anon_struct => if (struct_ty.isSimpleTuple())
.{ .field = extra.field_index } .{ .field = extra.field_index }
else else
.{ .identifier = struct_ty.structFieldName(extra.field_index) }, .{ .identifier = struct_ty.structFieldName(extra.field_index, mod) },
.Packed => {
const struct_obj = struct_ty.castTag(.@"struct").?.data;
const int_info = struct_ty.intInfo(mod);
const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1)); .@"union", .union_safety_tagged, .union_tagged => if (struct_ty.containerLayout(mod) == .Packed) {
const operand_lval = if (struct_byval == .constant) blk: {
const bit_offset = struct_obj.packedFieldBitOffset(mod, extra.field_index); const operand_local = try f.allocLocal(inst, struct_ty);
const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset); try f.writeCValue(writer, operand_local, .Other);
try writer.writeAll(" = ");
const field_int_signedness = if (inst_ty.isAbiInt(mod)) try f.writeCValue(writer, struct_byval, .Initializer);
inst_ty.intInfo(mod).signedness try writer.writeAll(";\n");
else break :blk operand_local;
.unsigned; } else struct_byval;
const field_int_ty = try mod.intType(field_int_signedness, @intCast(u16, inst_ty.bitSize(mod)));
const temp_local = try f.allocLocal(inst, field_int_ty);
try f.writeCValue(writer, temp_local, .Other);
try writer.writeAll(" = zig_wrap_");
try f.object.dg.renderTypeForBuiltinFnName(writer, field_int_ty);
try writer.writeAll("((");
try f.renderType(writer, field_int_ty);
try writer.writeByte(')');
const cant_cast = int_info.bits > 64;
if (cant_cast) {
if (field_int_ty.bitSize(mod) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
try writer.writeAll("zig_lo_");
try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty);
try writer.writeByte('(');
}
if (bit_offset > 0) {
try writer.writeAll("zig_shr_");
try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty);
try writer.writeByte('(');
}
try f.writeCValue(writer, struct_byval, .Other);
if (bit_offset > 0) {
try writer.writeAll(", ");
try f.object.dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
try writer.writeByte(')');
}
if (cant_cast) try writer.writeByte(')');
try f.object.dg.renderBuiltinInfo(writer, field_int_ty, .bits);
try writer.writeAll(");\n");
if (inst_ty.eql(field_int_ty, f.object.dg.module)) return temp_local;
const local = try f.allocLocal(inst, inst_ty); const local = try f.allocLocal(inst, inst_ty);
try writer.writeAll("memcpy("); try writer.writeAll("memcpy(&");
try f.writeCValue(writer, .{ .local_ref = local.new_local }, .FunctionArgument); try f.writeCValue(writer, local, .Other);
try writer.writeAll(", "); try writer.writeAll(", &");
try f.writeCValue(writer, .{ .local_ref = temp_local.new_local }, .FunctionArgument); try f.writeCValue(writer, operand_lval, .Other);
try writer.writeAll(", sizeof("); try writer.writeAll(", sizeof(");
try f.renderType(writer, inst_ty); try f.renderType(writer, inst_ty);
try writer.writeAll("));\n"); try writer.writeAll("));\n");
try freeLocal(f, inst, temp_local.new_local, 0);
if (struct_byval == .constant) {
try freeLocal(f, inst, operand_lval.new_local, 0);
}
return local; return local;
} else field_name: {
const name = struct_ty.unionFields().keys()[extra.field_index];
break :field_name if (struct_ty.unionTagTypeSafety()) |_|
.{ .payload_identifier = name }
else
.{ .identifier = name };
}, },
else => unreachable,
}, },
.@"union", .union_safety_tagged, .union_tagged => if (struct_ty.containerLayout() == .Packed) { else => switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) {
const operand_lval = if (struct_byval == .constant) blk: { .struct_type => switch (struct_ty.containerLayout(mod)) {
const operand_local = try f.allocLocal(inst, struct_ty); .Auto, .Extern => if (struct_ty.isSimpleTuple())
try f.writeCValue(writer, operand_local, .Other); .{ .field = extra.field_index }
try writer.writeAll(" = "); else
try f.writeCValue(writer, struct_byval, .Initializer); .{ .identifier = struct_ty.structFieldName(extra.field_index, mod) },
try writer.writeAll(";\n"); .Packed => {
break :blk operand_local; const struct_obj = mod.typeToStruct(struct_ty).?;
} else struct_byval; const int_info = struct_ty.intInfo(mod);
const local = try f.allocLocal(inst, inst_ty); const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1));
try writer.writeAll("memcpy(&");
try f.writeCValue(writer, local, .Other);
try writer.writeAll(", &");
try f.writeCValue(writer, operand_lval, .Other);
try writer.writeAll(", sizeof(");
try f.renderType(writer, inst_ty);
try writer.writeAll("));\n");
if (struct_byval == .constant) { const bit_offset = struct_obj.packedFieldBitOffset(mod, extra.field_index);
try freeLocal(f, inst, operand_lval.new_local, 0); const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
}
return local; const field_int_signedness = if (inst_ty.isAbiInt(mod))
} else field_name: { inst_ty.intInfo(mod).signedness
const name = struct_ty.unionFields().keys()[extra.field_index]; else
break :field_name if (struct_ty.unionTagTypeSafety()) |_| .unsigned;
.{ .payload_identifier = name } const field_int_ty = try mod.intType(field_int_signedness, @intCast(u16, inst_ty.bitSize(mod)));
else
.{ .identifier = name }; const temp_local = try f.allocLocal(inst, field_int_ty);
try f.writeCValue(writer, temp_local, .Other);
try writer.writeAll(" = zig_wrap_");
try f.object.dg.renderTypeForBuiltinFnName(writer, field_int_ty);
try writer.writeAll("((");
try f.renderType(writer, field_int_ty);
try writer.writeByte(')');
const cant_cast = int_info.bits > 64;
if (cant_cast) {
if (field_int_ty.bitSize(mod) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
try writer.writeAll("zig_lo_");
try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty);
try writer.writeByte('(');
}
if (bit_offset > 0) {
try writer.writeAll("zig_shr_");
try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty);
try writer.writeByte('(');
}
try f.writeCValue(writer, struct_byval, .Other);
if (bit_offset > 0) {
try writer.writeAll(", ");
try f.object.dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
try writer.writeByte(')');
}
if (cant_cast) try writer.writeByte(')');
try f.object.dg.renderBuiltinInfo(writer, field_int_ty, .bits);
try writer.writeAll(");\n");
if (inst_ty.eql(field_int_ty, f.object.dg.module)) return temp_local;
const local = try f.allocLocal(inst, inst_ty);
try writer.writeAll("memcpy(");
try f.writeCValue(writer, .{ .local_ref = local.new_local }, .FunctionArgument);
try writer.writeAll(", ");
try f.writeCValue(writer, .{ .local_ref = temp_local.new_local }, .FunctionArgument);
try writer.writeAll(", sizeof(");
try f.renderType(writer, inst_ty);
try writer.writeAll("));\n");
try freeLocal(f, inst, temp_local.new_local, 0);
return local;
},
},
else => unreachable,
}, },
else => unreachable,
}; };
const local = try f.allocLocal(inst, inst_ty); const local = try f.allocLocal(inst, inst_ty);
@ -6805,17 +6815,17 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
try a.end(f, writer); try a.end(f, writer);
} }
}, },
.Struct => switch (inst_ty.containerLayout()) { .Struct => switch (inst_ty.containerLayout(mod)) {
.Auto, .Extern => for (resolved_elements, 0..) |element, field_i| { .Auto, .Extern => for (resolved_elements, 0..) |element, field_i| {
if (inst_ty.structFieldIsComptime(field_i)) continue; if (inst_ty.structFieldIsComptime(field_i, mod)) continue;
const field_ty = inst_ty.structFieldType(field_i); const field_ty = inst_ty.structFieldType(field_i, mod);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
const a = try Assignment.start(f, writer, field_ty); const a = try Assignment.start(f, writer, field_ty);
try f.writeCValueMember(writer, local, if (inst_ty.isSimpleTuple()) try f.writeCValueMember(writer, local, if (inst_ty.isSimpleTuple())
.{ .field = field_i } .{ .field = field_i }
else else
.{ .identifier = inst_ty.structFieldName(field_i) }); .{ .identifier = inst_ty.structFieldName(field_i, mod) });
try a.assign(f, writer); try a.assign(f, writer);
try f.writeCValue(writer, element, .Other); try f.writeCValue(writer, element, .Other);
try a.end(f, writer); try a.end(f, writer);
@ -6831,8 +6841,8 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
var empty = true; var empty = true;
for (0..elements.len) |field_i| { for (0..elements.len) |field_i| {
if (inst_ty.structFieldIsComptime(field_i)) continue; if (inst_ty.structFieldIsComptime(field_i, mod)) continue;
const field_ty = inst_ty.structFieldType(field_i); const field_ty = inst_ty.structFieldType(field_i, mod);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
if (!empty) { if (!empty) {
@ -6844,8 +6854,8 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
} }
empty = true; empty = true;
for (resolved_elements, 0..) |element, field_i| { for (resolved_elements, 0..) |element, field_i| {
if (inst_ty.structFieldIsComptime(field_i)) continue; if (inst_ty.structFieldIsComptime(field_i, mod)) continue;
const field_ty = inst_ty.structFieldType(field_i); const field_ty = inst_ty.structFieldType(field_i, mod);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
if (!empty) try writer.writeAll(", "); if (!empty) try writer.writeAll(", ");

View file

@ -299,7 +299,7 @@ pub const CType = extern union {
pub fn fieldAlign(struct_ty: Type, field_i: usize, mod: *Module) AlignAs { pub fn fieldAlign(struct_ty: Type, field_i: usize, mod: *Module) AlignAs {
return init( return init(
struct_ty.structFieldAlign(field_i, mod), struct_ty.structFieldAlign(field_i, mod),
struct_ty.structFieldType(field_i).abiAlignment(mod), struct_ty.structFieldType(field_i, mod).abiAlignment(mod),
); );
} }
pub fn unionPayloadAlign(union_ty: Type, mod: *Module) AlignAs { pub fn unionPayloadAlign(union_ty: Type, mod: *Module) AlignAs {
@ -1486,23 +1486,23 @@ pub const CType = extern union {
} }
}, },
.Struct, .Union => |zig_ty_tag| if (ty.containerLayout() == .Packed) { .Struct, .Union => |zig_ty_tag| if (ty.containerLayout(mod) == .Packed) {
if (ty.castTag(.@"struct")) |struct_obj| { if (mod.typeToStruct(ty)) |struct_obj| {
try self.initType(struct_obj.data.backing_int_ty, kind, lookup); try self.initType(struct_obj.backing_int_ty, kind, lookup);
} else { } else {
const bits = @intCast(u16, ty.bitSize(mod)); const bits = @intCast(u16, ty.bitSize(mod));
const int_ty = try mod.intType(.unsigned, bits); const int_ty = try mod.intType(.unsigned, bits);
try self.initType(int_ty, kind, lookup); try self.initType(int_ty, kind, lookup);
} }
} else if (ty.isTupleOrAnonStruct()) { } else if (ty.isTupleOrAnonStruct(mod)) {
if (lookup.isMutable()) { if (lookup.isMutable()) {
for (0..switch (zig_ty_tag) { for (0..switch (zig_ty_tag) {
.Struct => ty.structFieldCount(), .Struct => ty.structFieldCount(mod),
.Union => ty.unionFields().count(), .Union => ty.unionFields().count(),
else => unreachable, else => unreachable,
}) |field_i| { }) |field_i| {
const field_ty = ty.structFieldType(field_i); const field_ty = ty.structFieldType(field_i, mod);
if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or
!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
_ = try lookup.typeToIndex(field_ty, switch (kind) { _ = try lookup.typeToIndex(field_ty, switch (kind) {
.forward, .forward_parameter => .forward, .forward, .forward_parameter => .forward,
@ -1579,11 +1579,11 @@ pub const CType = extern union {
} else { } else {
var is_packed = false; var is_packed = false;
for (0..switch (zig_ty_tag) { for (0..switch (zig_ty_tag) {
.Struct => ty.structFieldCount(), .Struct => ty.structFieldCount(mod),
.Union => ty.unionFields().count(), .Union => ty.unionFields().count(),
else => unreachable, else => unreachable,
}) |field_i| { }) |field_i| {
const field_ty = ty.structFieldType(field_i); const field_ty = ty.structFieldType(field_i, mod);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
const field_align = AlignAs.fieldAlign(ty, field_i, mod); const field_align = AlignAs.fieldAlign(ty, field_i, mod);
@ -1929,15 +1929,15 @@ pub const CType = extern union {
=> { => {
const zig_ty_tag = ty.zigTypeTag(mod); const zig_ty_tag = ty.zigTypeTag(mod);
const fields_len = switch (zig_ty_tag) { const fields_len = switch (zig_ty_tag) {
.Struct => ty.structFieldCount(), .Struct => ty.structFieldCount(mod),
.Union => ty.unionFields().count(), .Union => ty.unionFields().count(),
else => unreachable, else => unreachable,
}; };
var c_fields_len: usize = 0; var c_fields_len: usize = 0;
for (0..fields_len) |field_i| { for (0..fields_len) |field_i| {
const field_ty = ty.structFieldType(field_i); const field_ty = ty.structFieldType(field_i, mod);
if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or
!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
c_fields_len += 1; c_fields_len += 1;
} }
@ -1945,8 +1945,8 @@ pub const CType = extern union {
const fields_pl = try arena.alloc(Payload.Fields.Field, c_fields_len); const fields_pl = try arena.alloc(Payload.Fields.Field, c_fields_len);
var c_field_i: usize = 0; var c_field_i: usize = 0;
for (0..fields_len) |field_i| { for (0..fields_len) |field_i| {
const field_ty = ty.structFieldType(field_i); const field_ty = ty.structFieldType(field_i, mod);
if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or
!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
defer c_field_i += 1; defer c_field_i += 1;
@ -1955,7 +1955,7 @@ pub const CType = extern union {
std.fmt.allocPrintZ(arena, "f{}", .{field_i}) std.fmt.allocPrintZ(arena, "f{}", .{field_i})
else else
arena.dupeZ(u8, switch (zig_ty_tag) { arena.dupeZ(u8, switch (zig_ty_tag) {
.Struct => ty.structFieldName(field_i), .Struct => ty.structFieldName(field_i, mod),
.Union => ty.unionFields().keys()[field_i], .Union => ty.unionFields().keys()[field_i],
else => unreachable, else => unreachable,
}), }),
@ -2074,7 +2074,7 @@ pub const CType = extern union {
.fwd_anon_struct, .fwd_anon_struct,
.fwd_anon_union, .fwd_anon_union,
=> { => {
if (!ty.isTupleOrAnonStruct()) return false; if (!ty.isTupleOrAnonStruct(mod)) return false;
var name_buf: [ var name_buf: [
std.fmt.count("f{}", .{std.math.maxInt(usize)}) std.fmt.count("f{}", .{std.math.maxInt(usize)})
@ -2084,12 +2084,12 @@ pub const CType = extern union {
const zig_ty_tag = ty.zigTypeTag(mod); const zig_ty_tag = ty.zigTypeTag(mod);
var c_field_i: usize = 0; var c_field_i: usize = 0;
for (0..switch (zig_ty_tag) { for (0..switch (zig_ty_tag) {
.Struct => ty.structFieldCount(), .Struct => ty.structFieldCount(mod),
.Union => ty.unionFields().count(), .Union => ty.unionFields().count(),
else => unreachable, else => unreachable,
}) |field_i| { }) |field_i| {
const field_ty = ty.structFieldType(field_i); const field_ty = ty.structFieldType(field_i, mod);
if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or
!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
defer c_field_i += 1; defer c_field_i += 1;
@ -2105,7 +2105,7 @@ pub const CType = extern union {
if (ty.isSimpleTuple()) if (ty.isSimpleTuple())
std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable
else switch (zig_ty_tag) { else switch (zig_ty_tag) {
.Struct => ty.structFieldName(field_i), .Struct => ty.structFieldName(field_i, mod),
.Union => ty.unionFields().keys()[field_i], .Union => ty.unionFields().keys()[field_i],
else => unreachable, else => unreachable,
}, },
@ -2210,12 +2210,12 @@ pub const CType = extern union {
const zig_ty_tag = ty.zigTypeTag(mod); const zig_ty_tag = ty.zigTypeTag(mod);
for (0..switch (ty.zigTypeTag(mod)) { for (0..switch (ty.zigTypeTag(mod)) {
.Struct => ty.structFieldCount(), .Struct => ty.structFieldCount(mod),
.Union => ty.unionFields().count(), .Union => ty.unionFields().count(),
else => unreachable, else => unreachable,
}) |field_i| { }) |field_i| {
const field_ty = ty.structFieldType(field_i); const field_ty = ty.structFieldType(field_i, mod);
if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or
!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
self.updateHasherRecurse(hasher, field_ty, switch (self.kind) { self.updateHasherRecurse(hasher, field_ty, switch (self.kind) {
@ -2227,7 +2227,7 @@ pub const CType = extern union {
hasher.update(if (ty.isSimpleTuple()) hasher.update(if (ty.isSimpleTuple())
std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable
else switch (zig_ty_tag) { else switch (zig_ty_tag) {
.Struct => ty.structFieldName(field_i), .Struct => ty.structFieldName(field_i, mod),
.Union => ty.unionFields().keys()[field_i], .Union => ty.unionFields().keys()[field_i],
else => unreachable, else => unreachable,
}); });

View file

@ -1986,8 +1986,7 @@ pub const Object = struct {
const name = try ty.nameAlloc(gpa, o.module); const name = try ty.nameAlloc(gpa, o.module);
defer gpa.free(name); defer gpa.free(name);
if (ty.castTag(.@"struct")) |payload| { if (mod.typeToStruct(ty)) |struct_obj| {
const struct_obj = payload.data;
if (struct_obj.layout == .Packed and struct_obj.haveFieldTypes()) { if (struct_obj.layout == .Packed and struct_obj.haveFieldTypes()) {
assert(struct_obj.haveLayout()); assert(struct_obj.haveLayout());
const info = struct_obj.backing_int_ty.intInfo(mod); const info = struct_obj.backing_int_ty.intInfo(mod);
@ -2075,8 +2074,7 @@ pub const Object = struct {
return full_di_ty; return full_di_ty;
} }
if (ty.castTag(.@"struct")) |payload| { if (mod.typeToStruct(ty)) |struct_obj| {
const struct_obj = payload.data;
if (!struct_obj.haveFieldTypes()) { if (!struct_obj.haveFieldTypes()) {
// This can happen if a struct type makes it all the way to // This can happen if a struct type makes it all the way to
// flush() without ever being instantiated or referenced (even // flush() without ever being instantiated or referenced (even
@ -2105,8 +2103,8 @@ pub const Object = struct {
return struct_di_ty; return struct_di_ty;
} }
const fields = ty.structFields(); const fields = ty.structFields(mod);
const layout = ty.containerLayout(); const layout = ty.containerLayout(mod);
var di_fields: std.ArrayListUnmanaged(*llvm.DIType) = .{}; var di_fields: std.ArrayListUnmanaged(*llvm.DIType) = .{};
defer di_fields.deinit(gpa); defer di_fields.deinit(gpa);
@ -2116,7 +2114,7 @@ pub const Object = struct {
comptime assert(struct_layout_version == 2); comptime assert(struct_layout_version == 2);
var offset: u64 = 0; var offset: u64 = 0;
var it = ty.castTag(.@"struct").?.data.runtimeFieldIterator(mod); var it = mod.typeToStruct(ty).?.runtimeFieldIterator(mod);
while (it.next()) |field_and_index| { while (it.next()) |field_and_index| {
const field = field_and_index.field; const field = field_and_index.field;
const field_size = field.ty.abiSize(mod); const field_size = field.ty.abiSize(mod);
@ -2990,7 +2988,7 @@ pub const DeclGen = struct {
return llvm_struct_ty; return llvm_struct_ty;
} }
const struct_obj = t.castTag(.@"struct").?.data; const struct_obj = mod.typeToStruct(t).?;
if (struct_obj.layout == .Packed) { if (struct_obj.layout == .Packed) {
assert(struct_obj.haveLayout()); assert(struct_obj.haveLayout());
@ -3696,7 +3694,7 @@ pub const DeclGen = struct {
} }
} }
const struct_obj = tv.ty.castTag(.@"struct").?.data; const struct_obj = mod.typeToStruct(tv.ty).?;
if (struct_obj.layout == .Packed) { if (struct_obj.layout == .Packed) {
assert(struct_obj.haveLayout()); assert(struct_obj.haveLayout());
@ -4043,7 +4041,7 @@ pub const DeclGen = struct {
const llvm_u32 = dg.context.intType(32); const llvm_u32 = dg.context.intType(32);
switch (parent_ty.zigTypeTag(mod)) { switch (parent_ty.zigTypeTag(mod)) {
.Union => { .Union => {
if (parent_ty.containerLayout() == .Packed) { if (parent_ty.containerLayout(mod) == .Packed) {
return parent_llvm_ptr; return parent_llvm_ptr;
} }
@ -4065,14 +4063,14 @@ pub const DeclGen = struct {
return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
}, },
.Struct => { .Struct => {
if (parent_ty.containerLayout() == .Packed) { if (parent_ty.containerLayout(mod) == .Packed) {
if (!byte_aligned) return parent_llvm_ptr; if (!byte_aligned) return parent_llvm_ptr;
const llvm_usize = dg.context.intType(target.ptrBitWidth()); const llvm_usize = dg.context.intType(target.ptrBitWidth());
const base_addr = parent_llvm_ptr.constPtrToInt(llvm_usize); const base_addr = parent_llvm_ptr.constPtrToInt(llvm_usize);
// count bits of fields before this one // count bits of fields before this one
const prev_bits = b: { const prev_bits = b: {
var b: usize = 0; var b: usize = 0;
for (parent_ty.structFields().values()[0..field_index]) |field| { for (parent_ty.structFields(mod).values()[0..field_index]) |field| {
if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
b += @intCast(usize, field.ty.bitSize(mod)); b += @intCast(usize, field.ty.bitSize(mod));
} }
@ -5983,7 +5981,7 @@ pub const FuncGen = struct {
const struct_ty = self.typeOf(struct_field.struct_operand); const struct_ty = self.typeOf(struct_field.struct_operand);
const struct_llvm_val = try self.resolveInst(struct_field.struct_operand); const struct_llvm_val = try self.resolveInst(struct_field.struct_operand);
const field_index = struct_field.field_index; const field_index = struct_field.field_index;
const field_ty = struct_ty.structFieldType(field_index); const field_ty = struct_ty.structFieldType(field_index, mod);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) { if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return null; return null;
} }
@ -5991,9 +5989,9 @@ pub const FuncGen = struct {
if (!isByRef(struct_ty, mod)) { if (!isByRef(struct_ty, mod)) {
assert(!isByRef(field_ty, mod)); assert(!isByRef(field_ty, mod));
switch (struct_ty.zigTypeTag(mod)) { switch (struct_ty.zigTypeTag(mod)) {
.Struct => switch (struct_ty.containerLayout()) { .Struct => switch (struct_ty.containerLayout(mod)) {
.Packed => { .Packed => {
const struct_obj = struct_ty.castTag(.@"struct").?.data; const struct_obj = mod.typeToStruct(struct_ty).?;
const bit_offset = struct_obj.packedFieldBitOffset(mod, field_index); const bit_offset = struct_obj.packedFieldBitOffset(mod, field_index);
const containing_int = struct_llvm_val; const containing_int = struct_llvm_val;
const shift_amt = containing_int.typeOf().constInt(bit_offset, .False); const shift_amt = containing_int.typeOf().constInt(bit_offset, .False);
@ -6019,7 +6017,7 @@ pub const FuncGen = struct {
}, },
}, },
.Union => { .Union => {
assert(struct_ty.containerLayout() == .Packed); assert(struct_ty.containerLayout(mod) == .Packed);
const containing_int = struct_llvm_val; const containing_int = struct_llvm_val;
const elem_llvm_ty = try self.dg.lowerType(field_ty); const elem_llvm_ty = try self.dg.lowerType(field_ty);
if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) { if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) {
@ -6041,7 +6039,7 @@ pub const FuncGen = struct {
switch (struct_ty.zigTypeTag(mod)) { switch (struct_ty.zigTypeTag(mod)) {
.Struct => { .Struct => {
assert(struct_ty.containerLayout() != .Packed); assert(struct_ty.containerLayout(mod) != .Packed);
var ptr_ty_buf: Type.Payload.Pointer = undefined; var ptr_ty_buf: Type.Payload.Pointer = undefined;
const llvm_field_index = llvmFieldIndex(struct_ty, field_index, mod, &ptr_ty_buf).?; const llvm_field_index = llvmFieldIndex(struct_ty, field_index, mod, &ptr_ty_buf).?;
const struct_llvm_ty = try self.dg.lowerType(struct_ty); const struct_llvm_ty = try self.dg.lowerType(struct_ty);
@ -9289,8 +9287,8 @@ pub const FuncGen = struct {
return vector; return vector;
}, },
.Struct => { .Struct => {
if (result_ty.containerLayout() == .Packed) { if (result_ty.containerLayout(mod) == .Packed) {
const struct_obj = result_ty.castTag(.@"struct").?.data; const struct_obj = mod.typeToStruct(result_ty).?;
assert(struct_obj.haveLayout()); assert(struct_obj.haveLayout());
const big_bits = struct_obj.backing_int_ty.bitSize(mod); const big_bits = struct_obj.backing_int_ty.bitSize(mod);
const int_llvm_ty = self.context.intType(@intCast(c_uint, big_bits)); const int_llvm_ty = self.context.intType(@intCast(c_uint, big_bits));
@ -9795,7 +9793,7 @@ pub const FuncGen = struct {
const mod = self.dg.module; const mod = self.dg.module;
const struct_ty = struct_ptr_ty.childType(mod); const struct_ty = struct_ptr_ty.childType(mod);
switch (struct_ty.zigTypeTag(mod)) { switch (struct_ty.zigTypeTag(mod)) {
.Struct => switch (struct_ty.containerLayout()) { .Struct => switch (struct_ty.containerLayout(mod)) {
.Packed => { .Packed => {
const result_ty = self.typeOfIndex(inst); const result_ty = self.typeOfIndex(inst);
const result_ty_info = result_ty.ptrInfo(mod); const result_ty_info = result_ty.ptrInfo(mod);
@ -9838,7 +9836,7 @@ pub const FuncGen = struct {
}, },
.Union => { .Union => {
const layout = struct_ty.unionGetLayout(mod); const layout = struct_ty.unionGetLayout(mod);
if (layout.payload_size == 0 or struct_ty.containerLayout() == .Packed) return struct_ptr; if (layout.payload_size == 0 or struct_ty.containerLayout(mod) == .Packed) return struct_ptr;
const payload_index = @boolToInt(layout.tag_align >= layout.payload_align); const payload_index = @boolToInt(layout.tag_align >= layout.payload_align);
const union_llvm_ty = try self.dg.lowerType(struct_ty); const union_llvm_ty = try self.dg.lowerType(struct_ty);
const union_field_ptr = self.builder.buildStructGEP(union_llvm_ty, struct_ptr, payload_index, ""); const union_field_ptr = self.builder.buildStructGEP(union_llvm_ty, struct_ptr, payload_index, "");
@ -10530,11 +10528,11 @@ fn llvmFieldIndex(
} }
return null; return null;
} }
const layout = ty.containerLayout(); const layout = ty.containerLayout(mod);
assert(layout != .Packed); assert(layout != .Packed);
var llvm_field_index: c_uint = 0; var llvm_field_index: c_uint = 0;
var it = ty.castTag(.@"struct").?.data.runtimeFieldIterator(mod); var it = mod.typeToStruct(ty).?.runtimeFieldIterator(mod);
while (it.next()) |field_and_index| { while (it.next()) |field_and_index| {
const field = field_and_index.field; const field = field_and_index.field;
const field_align = field.alignment(mod, layout); const field_align = field.alignment(mod, layout);
@ -11113,7 +11111,7 @@ fn isByRef(ty: Type, mod: *Module) bool {
.Array, .Frame => return ty.hasRuntimeBits(mod), .Array, .Frame => return ty.hasRuntimeBits(mod),
.Struct => { .Struct => {
// Packed structs are represented to LLVM as integers. // Packed structs are represented to LLVM as integers.
if (ty.containerLayout() == .Packed) return false; if (ty.containerLayout(mod) == .Packed) return false;
if (ty.isSimpleTupleOrAnonStruct()) { if (ty.isSimpleTupleOrAnonStruct()) {
const tuple = ty.tupleFields(); const tuple = ty.tupleFields();
var count: usize = 0; var count: usize = 0;
@ -11127,7 +11125,7 @@ fn isByRef(ty: Type, mod: *Module) bool {
return false; return false;
} }
var count: usize = 0; var count: usize = 0;
const fields = ty.structFields(); const fields = ty.structFields(mod);
for (fields.values()) |field| { for (fields.values()) |field| {
if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue; if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue;
@ -11137,7 +11135,7 @@ fn isByRef(ty: Type, mod: *Module) bool {
} }
return false; return false;
}, },
.Union => switch (ty.containerLayout()) { .Union => switch (ty.containerLayout(mod)) {
.Packed => return false, .Packed => return false,
else => return ty.hasRuntimeBits(mod), else => return ty.hasRuntimeBits(mod),
}, },
@ -11176,8 +11174,8 @@ fn isScalar(mod: *Module, ty: Type) bool {
.Vector, .Vector,
=> true, => true,
.Struct => ty.containerLayout() == .Packed, .Struct => ty.containerLayout(mod) == .Packed,
.Union => ty.containerLayout() == .Packed, .Union => ty.containerLayout(mod) == .Packed,
else => false, else => false,
}; };
} }

View file

@ -685,7 +685,7 @@ pub const DeclGen = struct {
if (ty.isSimpleTupleOrAnonStruct()) { if (ty.isSimpleTupleOrAnonStruct()) {
unreachable; // TODO unreachable; // TODO
} else { } else {
const struct_ty = ty.castTag(.@"struct").?.data; const struct_ty = mod.typeToStruct(ty).?;
if (struct_ty.layout == .Packed) { if (struct_ty.layout == .Packed) {
return dg.todo("packed struct constants", .{}); return dg.todo("packed struct constants", .{});
@ -1306,7 +1306,7 @@ pub const DeclGen = struct {
} }); } });
} }
const struct_ty = ty.castTag(.@"struct").?.data; const struct_ty = mod.typeToStruct(ty).?;
if (struct_ty.layout == .Packed) { if (struct_ty.layout == .Packed) {
return try self.resolveType(struct_ty.backing_int_ty, .direct); return try self.resolveType(struct_ty.backing_int_ty, .direct);
@ -2576,7 +2576,7 @@ pub const DeclGen = struct {
const struct_ty = self.typeOf(struct_field.struct_operand); const struct_ty = self.typeOf(struct_field.struct_operand);
const object_id = try self.resolve(struct_field.struct_operand); const object_id = try self.resolve(struct_field.struct_operand);
const field_index = struct_field.field_index; const field_index = struct_field.field_index;
const field_ty = struct_ty.structFieldType(field_index); const field_ty = struct_ty.structFieldType(field_index, mod);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return null;
@ -2595,7 +2595,7 @@ pub const DeclGen = struct {
const mod = self.module; const mod = self.module;
const object_ty = object_ptr_ty.childType(mod); const object_ty = object_ptr_ty.childType(mod);
switch (object_ty.zigTypeTag(mod)) { switch (object_ty.zigTypeTag(mod)) {
.Struct => switch (object_ty.containerLayout()) { .Struct => switch (object_ty.containerLayout(mod)) {
.Packed => unreachable, // TODO .Packed => unreachable, // TODO
else => { else => {
const field_index_ty_ref = try self.intType(.unsigned, 32); const field_index_ty_ref = try self.intType(.unsigned, 32);

View file

@ -360,13 +360,13 @@ pub const DeclState = struct {
dbg_info_buffer.appendSliceAssumeCapacity(struct_name); dbg_info_buffer.appendSliceAssumeCapacity(struct_name);
dbg_info_buffer.appendAssumeCapacity(0); dbg_info_buffer.appendAssumeCapacity(0);
const struct_obj = ty.castTag(.@"struct").?.data; const struct_obj = mod.typeToStruct(ty).?;
if (struct_obj.layout == .Packed) { if (struct_obj.layout == .Packed) {
log.debug("TODO implement .debug_info for packed structs", .{}); log.debug("TODO implement .debug_info for packed structs", .{});
break :blk; break :blk;
} }
const fields = ty.structFields(); const fields = ty.structFields(mod);
for (fields.keys(), 0..) |field_name, field_index| { for (fields.keys(), 0..) |field_name, field_index| {
const field = fields.get(field_name).?; const field = fields.get(field_name).?;
if (!field.ty.hasRuntimeBits(mod)) continue; if (!field.ty.hasRuntimeBits(mod)) continue;

File diff suppressed because it is too large Load diff

View file

@ -996,10 +996,10 @@ pub const Value = struct {
const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8;
return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0); return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
}, },
.Struct => switch (ty.containerLayout()) { .Struct => switch (ty.containerLayout(mod)) {
.Auto => return error.IllDefinedMemoryLayout, .Auto => return error.IllDefinedMemoryLayout,
.Extern => { .Extern => {
const fields = ty.structFields().values(); const fields = ty.structFields(mod).values();
const field_vals = val.castTag(.aggregate).?.data; const field_vals = val.castTag(.aggregate).?.data;
for (fields, 0..) |field, i| { for (fields, 0..) |field, i| {
const off = @intCast(usize, ty.structFieldOffset(i, mod)); const off = @intCast(usize, ty.structFieldOffset(i, mod));
@ -1017,7 +1017,7 @@ pub const Value = struct {
const int = mod.global_error_set.get(val.castTag(.@"error").?.data.name).?; const int = mod.global_error_set.get(val.castTag(.@"error").?.data.name).?;
std.mem.writeInt(Int, buffer[0..@sizeOf(Int)], @intCast(Int, int), endian); std.mem.writeInt(Int, buffer[0..@sizeOf(Int)], @intCast(Int, int), endian);
}, },
.Union => switch (ty.containerLayout()) { .Union => switch (ty.containerLayout(mod)) {
.Auto => return error.IllDefinedMemoryLayout, .Auto => return error.IllDefinedMemoryLayout,
.Extern => return error.Unimplemented, .Extern => return error.Unimplemented,
.Packed => { .Packed => {
@ -1119,12 +1119,12 @@ pub const Value = struct {
bits += elem_bit_size; bits += elem_bit_size;
} }
}, },
.Struct => switch (ty.containerLayout()) { .Struct => switch (ty.containerLayout(mod)) {
.Auto => unreachable, // Sema is supposed to have emitted a compile error already .Auto => unreachable, // Sema is supposed to have emitted a compile error already
.Extern => unreachable, // Handled in non-packed writeToMemory .Extern => unreachable, // Handled in non-packed writeToMemory
.Packed => { .Packed => {
var bits: u16 = 0; var bits: u16 = 0;
const fields = ty.structFields().values(); const fields = ty.structFields(mod).values();
const field_vals = val.castTag(.aggregate).?.data; const field_vals = val.castTag(.aggregate).?.data;
for (fields, 0..) |field, i| { for (fields, 0..) |field, i| {
const field_bits = @intCast(u16, field.ty.bitSize(mod)); const field_bits = @intCast(u16, field.ty.bitSize(mod));
@ -1133,7 +1133,7 @@ pub const Value = struct {
} }
}, },
}, },
.Union => switch (ty.containerLayout()) { .Union => switch (ty.containerLayout(mod)) {
.Auto => unreachable, // Sema is supposed to have emitted a compile error already .Auto => unreachable, // Sema is supposed to have emitted a compile error already
.Extern => unreachable, // Handled in non-packed writeToMemory .Extern => unreachable, // Handled in non-packed writeToMemory
.Packed => { .Packed => {
@ -1236,14 +1236,14 @@ pub const Value = struct {
const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8;
return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena); return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena);
}, },
.Struct => switch (ty.containerLayout()) { .Struct => switch (ty.containerLayout(mod)) {
.Auto => unreachable, // Sema is supposed to have emitted a compile error already .Auto => unreachable, // Sema is supposed to have emitted a compile error already
.Extern => { .Extern => {
const fields = ty.structFields().values(); const fields = ty.structFields(mod).values();
const field_vals = try arena.alloc(Value, fields.len); const field_vals = try arena.alloc(Value, fields.len);
for (fields, 0..) |field, i| { for (fields, 0..) |field, i| {
const off = @intCast(usize, ty.structFieldOffset(i, mod)); const off = @intCast(usize, ty.structFieldOffset(i, mod));
const sz = @intCast(usize, ty.structFieldType(i).abiSize(mod)); const sz = @intCast(usize, ty.structFieldType(i, mod).abiSize(mod));
field_vals[i] = try readFromMemory(field.ty, mod, buffer[off..(off + sz)], arena); field_vals[i] = try readFromMemory(field.ty, mod, buffer[off..(off + sz)], arena);
} }
return Tag.aggregate.create(arena, field_vals); return Tag.aggregate.create(arena, field_vals);
@ -1346,12 +1346,12 @@ pub const Value = struct {
} }
return Tag.aggregate.create(arena, elems); return Tag.aggregate.create(arena, elems);
}, },
.Struct => switch (ty.containerLayout()) { .Struct => switch (ty.containerLayout(mod)) {
.Auto => unreachable, // Sema is supposed to have emitted a compile error already .Auto => unreachable, // Sema is supposed to have emitted a compile error already
.Extern => unreachable, // Handled by non-packed readFromMemory .Extern => unreachable, // Handled by non-packed readFromMemory
.Packed => { .Packed => {
var bits: u16 = 0; var bits: u16 = 0;
const fields = ty.structFields().values(); const fields = ty.structFields(mod).values();
const field_vals = try arena.alloc(Value, fields.len); const field_vals = try arena.alloc(Value, fields.len);
for (fields, 0..) |field, i| { for (fields, 0..) |field, i| {
const field_bits = @intCast(u16, field.ty.bitSize(mod)); const field_bits = @intCast(u16, field.ty.bitSize(mod));
@ -1996,7 +1996,7 @@ pub const Value = struct {
} }
if (ty.zigTypeTag(mod) == .Struct) { if (ty.zigTypeTag(mod) == .Struct) {
const fields = ty.structFields().values(); const fields = ty.structFields(mod).values();
assert(fields.len == a_field_vals.len); assert(fields.len == a_field_vals.len);
for (fields, 0..) |field, i| { for (fields, 0..) |field, i| {
if (!(try eqlAdvanced(a_field_vals[i], field.ty, b_field_vals[i], field.ty, mod, opt_sema))) { if (!(try eqlAdvanced(a_field_vals[i], field.ty, b_field_vals[i], field.ty, mod, opt_sema))) {
@ -2019,7 +2019,7 @@ pub const Value = struct {
.@"union" => { .@"union" => {
const a_union = a.castTag(.@"union").?.data; const a_union = a.castTag(.@"union").?.data;
const b_union = b.castTag(.@"union").?.data; const b_union = b.castTag(.@"union").?.data;
switch (ty.containerLayout()) { switch (ty.containerLayout(mod)) {
.Packed, .Extern => { .Packed, .Extern => {
const tag_ty = ty.unionTagTypeHypothetical(); const tag_ty = ty.unionTagTypeHypothetical();
if (!(try eqlAdvanced(a_union.tag, tag_ty, b_union.tag, tag_ty, mod, opt_sema))) { if (!(try eqlAdvanced(a_union.tag, tag_ty, b_union.tag, tag_ty, mod, opt_sema))) {
@ -2252,7 +2252,7 @@ pub const Value = struct {
.aggregate => { .aggregate => {
const field_values = val.castTag(.aggregate).?.data; const field_values = val.castTag(.aggregate).?.data;
for (field_values, 0..) |field_val, i| { for (field_values, 0..) |field_val, i| {
const field_ty = ty.structFieldType(i); const field_ty = ty.structFieldType(i, mod);
field_val.hash(field_ty, hasher, mod); field_val.hash(field_ty, hasher, mod);
} }
}, },
@ -2623,7 +2623,7 @@ pub const Value = struct {
const data = val.castTag(.field_ptr).?.data; const data = val.castTag(.field_ptr).?.data;
if (data.container_ptr.pointerDecl()) |decl_index| { if (data.container_ptr.pointerDecl()) |decl_index| {
const container_decl = mod.declPtr(decl_index); const container_decl = mod.declPtr(decl_index);
const field_type = data.container_ty.structFieldType(data.field_index); const field_type = data.container_ty.structFieldType(data.field_index, mod);
const field_val = try container_decl.val.fieldValue(field_type, mod, data.field_index); const field_val = try container_decl.val.fieldValue(field_type, mod, data.field_index);
return field_val.elemValue(mod, index); return field_val.elemValue(mod, index);
} else unreachable; } else unreachable;
@ -2758,16 +2758,6 @@ pub const Value = struct {
pub fn fieldValue(val: Value, ty: Type, mod: *Module, index: usize) !Value { pub fn fieldValue(val: Value, ty: Type, mod: *Module, index: usize) !Value {
switch (val.ip_index) { switch (val.ip_index) {
.undef => return Value.undef, .undef => return Value.undef,
.empty_struct => {
if (ty.isSimpleTupleOrAnonStruct()) {
const tuple = ty.tupleFields();
return tuple.values[index];
}
if (try ty.structFieldValueComptime(mod, index)) |some| {
return some;
}
unreachable;
},
.none => switch (val.tag()) { .none => switch (val.tag()) {
.aggregate => { .aggregate => {
@ -2784,7 +2774,10 @@ pub const Value = struct {
else => unreachable, else => unreachable,
}, },
else => unreachable, else => return switch (mod.intern_pool.indexToKey(val.ip_index)) {
.aggregate => |aggregate| aggregate.fields[index].toValue(),
else => unreachable,
},
} }
} }