mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-06 05:44:20 +00:00
stage2: move function types to InternPool
This commit is contained in:
parent
6a9a918fbe
commit
17882162b3
23 changed files with 822 additions and 792 deletions
|
|
@ -143,7 +143,7 @@ pub const Mode = OptimizeMode;
|
|||
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
/// therefore must be kept in sync with the compiler implementation.
|
||||
pub const CallingConvention = enum {
|
||||
pub const CallingConvention = enum(u8) {
|
||||
/// This is the default Zig calling convention used when not using `export` on `fn`
|
||||
/// and no other calling convention is specified.
|
||||
Unspecified,
|
||||
|
|
|
|||
|
|
@ -845,7 +845,6 @@ pub const Inst = struct {
|
|||
|
||||
pub const Ref = enum(u32) {
|
||||
u1_type = @enumToInt(InternPool.Index.u1_type),
|
||||
u5_type = @enumToInt(InternPool.Index.u5_type),
|
||||
u8_type = @enumToInt(InternPool.Index.u8_type),
|
||||
i8_type = @enumToInt(InternPool.Index.i8_type),
|
||||
u16_type = @enumToInt(InternPool.Index.u16_type),
|
||||
|
|
@ -914,8 +913,8 @@ pub const Inst = struct {
|
|||
zero_u8 = @enumToInt(InternPool.Index.zero_u8),
|
||||
one = @enumToInt(InternPool.Index.one),
|
||||
one_usize = @enumToInt(InternPool.Index.one_usize),
|
||||
one_u5 = @enumToInt(InternPool.Index.one_u5),
|
||||
four_u5 = @enumToInt(InternPool.Index.four_u5),
|
||||
one_u8 = @enumToInt(InternPool.Index.one_u8),
|
||||
four_u8 = @enumToInt(InternPool.Index.four_u8),
|
||||
negative_one = @enumToInt(InternPool.Index.negative_one),
|
||||
calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c),
|
||||
calling_convention_inline = @enumToInt(InternPool.Index.calling_convention_inline),
|
||||
|
|
@ -1383,7 +1382,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type {
|
|||
|
||||
.call, .call_always_tail, .call_never_tail, .call_never_inline => {
|
||||
const callee_ty = air.typeOf(datas[inst].pl_op.operand, ip);
|
||||
return callee_ty.fnReturnType();
|
||||
return callee_ty.fnReturnTypeIp(ip);
|
||||
},
|
||||
|
||||
.slice_elem_val, .ptr_elem_val, .array_elem_val => {
|
||||
|
|
|
|||
|
|
@ -148,6 +148,7 @@ pub const Key = union(enum) {
|
|||
union_type: UnionType,
|
||||
opaque_type: OpaqueType,
|
||||
enum_type: EnumType,
|
||||
func_type: FuncType,
|
||||
|
||||
/// Typed `undefined`. This will never be `none`; untyped `undefined` is represented
|
||||
/// via `simple_value` and has a named `Index` tag for it.
|
||||
|
|
@ -185,6 +186,13 @@ pub const Key = union(enum) {
|
|||
/// If zero use pointee_type.abiAlignment()
|
||||
/// When creating pointer types, if alignment is equal to pointee type
|
||||
/// abi alignment, this value should be set to 0 instead.
|
||||
///
|
||||
/// Please don't change this to u32 or u29. If you want to save bits,
|
||||
/// migrate the rest of the codebase to use the `Alignment` type rather
|
||||
/// than using byte units. The LLVM backend can only handle `c_uint`
|
||||
/// byte units; we can emit a semantic analysis error if alignment that
|
||||
/// overflows that amount is attempted to be used, but it shouldn't
|
||||
/// affect the other backends.
|
||||
alignment: u64 = 0,
|
||||
/// If this is non-zero it means the pointer points to a sub-byte
|
||||
/// range of data, which is backed by a "host integer" with this
|
||||
|
|
@ -358,6 +366,44 @@ pub const Key = union(enum) {
|
|||
}
|
||||
};
|
||||
|
||||
pub const FuncType = struct {
|
||||
param_types: []Index,
|
||||
return_type: Index,
|
||||
/// Tells whether a parameter is comptime. See `paramIsComptime` helper
|
||||
/// method for accessing this.
|
||||
comptime_bits: u32,
|
||||
/// Tells whether a parameter is noalias. See `paramIsNoalias` helper
|
||||
/// method for accessing this.
|
||||
noalias_bits: u32,
|
||||
/// If zero use default target function code alignment.
|
||||
///
|
||||
/// Please don't change this to u32 or u29. If you want to save bits,
|
||||
/// migrate the rest of the codebase to use the `Alignment` type rather
|
||||
/// than using byte units. The LLVM backend can only handle `c_uint`
|
||||
/// byte units; we can emit a semantic analysis error if alignment that
|
||||
/// overflows that amount is attempted to be used, but it shouldn't
|
||||
/// affect the other backends.
|
||||
alignment: u64,
|
||||
cc: std.builtin.CallingConvention,
|
||||
is_var_args: bool,
|
||||
is_generic: bool,
|
||||
is_noinline: bool,
|
||||
align_is_generic: bool,
|
||||
cc_is_generic: bool,
|
||||
section_is_generic: bool,
|
||||
addrspace_is_generic: bool,
|
||||
|
||||
pub fn paramIsComptime(self: @This(), i: u5) bool {
|
||||
assert(i < self.param_types.len);
|
||||
return @truncate(u1, self.comptime_bits >> i) != 0;
|
||||
}
|
||||
|
||||
pub fn paramIsNoalias(self: @This(), i: u5) bool {
|
||||
assert(i < self.param_types.len);
|
||||
return @truncate(u1, self.noalias_bits >> i) != 0;
|
||||
}
|
||||
};
|
||||
|
||||
pub const Int = struct {
|
||||
ty: Index,
|
||||
storage: Storage,
|
||||
|
|
@ -512,6 +558,18 @@ pub const Key = union(enum) {
|
|||
for (anon_struct_type.values) |elem| std.hash.autoHash(hasher, elem);
|
||||
for (anon_struct_type.names) |elem| std.hash.autoHash(hasher, elem);
|
||||
},
|
||||
|
||||
.func_type => |func_type| {
|
||||
for (func_type.param_types) |param_type| std.hash.autoHash(hasher, param_type);
|
||||
std.hash.autoHash(hasher, func_type.return_type);
|
||||
std.hash.autoHash(hasher, func_type.comptime_bits);
|
||||
std.hash.autoHash(hasher, func_type.noalias_bits);
|
||||
std.hash.autoHash(hasher, func_type.alignment);
|
||||
std.hash.autoHash(hasher, func_type.cc);
|
||||
std.hash.autoHash(hasher, func_type.is_var_args);
|
||||
std.hash.autoHash(hasher, func_type.is_generic);
|
||||
std.hash.autoHash(hasher, func_type.is_noinline);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -670,6 +728,20 @@ pub const Key = union(enum) {
|
|||
std.mem.eql(Index, a_info.values, b_info.values) and
|
||||
std.mem.eql(NullTerminatedString, a_info.names, b_info.names);
|
||||
},
|
||||
|
||||
.func_type => |a_info| {
|
||||
const b_info = b.func_type;
|
||||
|
||||
return std.mem.eql(Index, a_info.param_types, b_info.param_types) and
|
||||
a_info.return_type == b_info.return_type and
|
||||
a_info.comptime_bits == b_info.comptime_bits and
|
||||
a_info.noalias_bits == b_info.noalias_bits and
|
||||
a_info.alignment == b_info.alignment and
|
||||
a_info.cc == b_info.cc and
|
||||
a_info.is_var_args == b_info.is_var_args and
|
||||
a_info.is_generic == b_info.is_generic and
|
||||
a_info.is_noinline == b_info.is_noinline;
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -687,6 +759,7 @@ pub const Key = union(enum) {
|
|||
.opaque_type,
|
||||
.enum_type,
|
||||
.anon_struct_type,
|
||||
.func_type,
|
||||
=> .type_type,
|
||||
|
||||
inline .ptr,
|
||||
|
|
@ -734,7 +807,6 @@ pub const Index = enum(u32) {
|
|||
pub const last_value: Index = .empty_struct;
|
||||
|
||||
u1_type,
|
||||
u5_type,
|
||||
u8_type,
|
||||
i8_type,
|
||||
u16_type,
|
||||
|
|
@ -811,10 +883,10 @@ pub const Index = enum(u32) {
|
|||
one,
|
||||
/// `1` (usize)
|
||||
one_usize,
|
||||
/// `1` (u5)
|
||||
one_u5,
|
||||
/// `4` (u5)
|
||||
four_u5,
|
||||
/// `1` (u8)
|
||||
one_u8,
|
||||
/// `4` (u8)
|
||||
four_u8,
|
||||
/// `-1` (comptime_int)
|
||||
negative_one,
|
||||
/// `std.builtin.CallingConvention.C`
|
||||
|
|
@ -880,12 +952,6 @@ pub const static_keys = [_]Key{
|
|||
.bits = 1,
|
||||
} },
|
||||
|
||||
// u5_type
|
||||
.{ .int_type = .{
|
||||
.signedness = .unsigned,
|
||||
.bits = 5,
|
||||
} },
|
||||
|
||||
.{ .int_type = .{
|
||||
.signedness = .unsigned,
|
||||
.bits = 8,
|
||||
|
|
@ -1074,14 +1140,14 @@ pub const static_keys = [_]Key{
|
|||
.storage = .{ .u64 = 1 },
|
||||
} },
|
||||
|
||||
// one_u5
|
||||
// one_u8
|
||||
.{ .int = .{
|
||||
.ty = .u5_type,
|
||||
.ty = .u8_type,
|
||||
.storage = .{ .u64 = 1 },
|
||||
} },
|
||||
// four_u5
|
||||
// four_u8
|
||||
.{ .int = .{
|
||||
.ty = .u5_type,
|
||||
.ty = .u8_type,
|
||||
.storage = .{ .u64 = 4 },
|
||||
} },
|
||||
// negative_one
|
||||
|
|
@ -1092,12 +1158,12 @@ pub const static_keys = [_]Key{
|
|||
// calling_convention_c
|
||||
.{ .enum_tag = .{
|
||||
.ty = .calling_convention_type,
|
||||
.int = .one_u5,
|
||||
.int = .one_u8,
|
||||
} },
|
||||
// calling_convention_inline
|
||||
.{ .enum_tag = .{
|
||||
.ty = .calling_convention_type,
|
||||
.int = .four_u5,
|
||||
.int = .four_u8,
|
||||
} },
|
||||
|
||||
.{ .simple_value = .void },
|
||||
|
|
@ -1181,6 +1247,9 @@ pub const Tag = enum(u8) {
|
|||
/// An untagged union type which has a safety tag.
|
||||
/// `data` is `Module.Union.Index`.
|
||||
type_union_safety,
|
||||
/// A function body type.
|
||||
/// `data` is extra index to `TypeFunction`.
|
||||
type_function,
|
||||
|
||||
/// Typed `undefined`.
|
||||
/// `data` is `Index` of the type.
|
||||
|
|
@ -1283,6 +1352,29 @@ pub const Tag = enum(u8) {
|
|||
aggregate,
|
||||
};
|
||||
|
||||
/// Trailing:
|
||||
/// 0. param_type: Index for each params_len
|
||||
pub const TypeFunction = struct {
|
||||
params_len: u32,
|
||||
return_type: Index,
|
||||
comptime_bits: u32,
|
||||
noalias_bits: u32,
|
||||
flags: Flags,
|
||||
|
||||
pub const Flags = packed struct(u32) {
|
||||
alignment: Alignment,
|
||||
cc: std.builtin.CallingConvention,
|
||||
is_var_args: bool,
|
||||
is_generic: bool,
|
||||
is_noinline: bool,
|
||||
align_is_generic: bool,
|
||||
cc_is_generic: bool,
|
||||
section_is_generic: bool,
|
||||
addrspace_is_generic: bool,
|
||||
_: u11 = 0,
|
||||
};
|
||||
};
|
||||
|
||||
/// Trailing:
|
||||
/// 0. element: Index for each len
|
||||
/// len is determined by the aggregate type.
|
||||
|
|
@ -1371,24 +1463,6 @@ pub const Pointer = struct {
|
|||
flags: Flags,
|
||||
packed_offset: PackedOffset,
|
||||
|
||||
/// Stored as a power-of-two, with one special value to indicate none.
|
||||
pub const Alignment = enum(u6) {
|
||||
none = std.math.maxInt(u6),
|
||||
_,
|
||||
|
||||
pub fn toByteUnits(a: Alignment, default: u64) u64 {
|
||||
return switch (a) {
|
||||
.none => default,
|
||||
_ => @as(u64, 1) << @enumToInt(a),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn fromByteUnits(n: u64) Alignment {
|
||||
if (n == 0) return .none;
|
||||
return @intToEnum(Alignment, @ctz(n));
|
||||
}
|
||||
};
|
||||
|
||||
pub const Flags = packed struct(u32) {
|
||||
size: Size,
|
||||
alignment: Alignment,
|
||||
|
|
@ -1409,6 +1483,24 @@ pub const Pointer = struct {
|
|||
pub const VectorIndex = Key.PtrType.VectorIndex;
|
||||
};
|
||||
|
||||
/// Stored as a power-of-two, with one special value to indicate none.
|
||||
pub const Alignment = enum(u6) {
|
||||
none = std.math.maxInt(u6),
|
||||
_,
|
||||
|
||||
pub fn toByteUnits(a: Alignment, default: u64) u64 {
|
||||
return switch (a) {
|
||||
.none => default,
|
||||
_ => @as(u64, 1) << @enumToInt(a),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn fromByteUnits(n: u64) Alignment {
|
||||
if (n == 0) return .none;
|
||||
return @intToEnum(Alignment, @ctz(n));
|
||||
}
|
||||
};
|
||||
|
||||
/// Used for non-sentineled arrays that have length fitting in u32, as well as
|
||||
/// vectors.
|
||||
pub const Vector = struct {
|
||||
|
|
@ -1765,6 +1857,7 @@ pub fn indexToKey(ip: InternPool, index: Index) Key {
|
|||
},
|
||||
.type_enum_explicit => indexToKeyEnum(ip, data, .explicit),
|
||||
.type_enum_nonexhaustive => indexToKeyEnum(ip, data, .nonexhaustive),
|
||||
.type_function => .{ .func_type = indexToKeyFuncType(ip, data) },
|
||||
|
||||
.undef => .{ .undef = @intToEnum(Index, data) },
|
||||
.opt_null => .{ .opt = .{
|
||||
|
|
@ -1896,6 +1989,29 @@ pub fn indexToKey(ip: InternPool, index: Index) Key {
|
|||
};
|
||||
}
|
||||
|
||||
fn indexToKeyFuncType(ip: InternPool, data: u32) Key.FuncType {
|
||||
const type_function = ip.extraDataTrail(TypeFunction, data);
|
||||
const param_types = @ptrCast(
|
||||
[]Index,
|
||||
ip.extra.items[type_function.end..][0..type_function.data.params_len],
|
||||
);
|
||||
return .{
|
||||
.param_types = param_types,
|
||||
.return_type = type_function.data.return_type,
|
||||
.comptime_bits = type_function.data.comptime_bits,
|
||||
.noalias_bits = type_function.data.noalias_bits,
|
||||
.alignment = type_function.data.flags.alignment.toByteUnits(0),
|
||||
.cc = type_function.data.flags.cc,
|
||||
.is_var_args = type_function.data.flags.is_var_args,
|
||||
.is_generic = type_function.data.flags.is_generic,
|
||||
.is_noinline = type_function.data.flags.is_noinline,
|
||||
.align_is_generic = type_function.data.flags.align_is_generic,
|
||||
.cc_is_generic = type_function.data.flags.cc_is_generic,
|
||||
.section_is_generic = type_function.data.flags.section_is_generic,
|
||||
.addrspace_is_generic = type_function.data.flags.addrspace_is_generic,
|
||||
};
|
||||
}
|
||||
|
||||
/// Asserts the integer tag type is already present in the InternPool.
|
||||
fn getEnumIntTagType(ip: InternPool, fields_len: u32) Index {
|
||||
return ip.getAssumeExists(.{ .int_type = .{
|
||||
|
|
@ -1977,7 +2093,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
|
|||
.child = ptr_type.elem_type,
|
||||
.sentinel = ptr_type.sentinel,
|
||||
.flags = .{
|
||||
.alignment = Pointer.Alignment.fromByteUnits(ptr_type.alignment),
|
||||
.alignment = Alignment.fromByteUnits(ptr_type.alignment),
|
||||
.is_const = ptr_type.is_const,
|
||||
.is_volatile = ptr_type.is_volatile,
|
||||
.is_allowzero = ptr_type.is_allowzero,
|
||||
|
|
@ -2163,6 +2279,37 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
|
|||
}
|
||||
},
|
||||
|
||||
.func_type => |func_type| {
|
||||
assert(func_type.return_type != .none);
|
||||
for (func_type.param_types) |param_type| assert(param_type != .none);
|
||||
|
||||
const params_len = @intCast(u32, func_type.param_types.len);
|
||||
|
||||
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(TypeFunction).Struct.fields.len +
|
||||
params_len);
|
||||
ip.items.appendAssumeCapacity(.{
|
||||
.tag = .type_function,
|
||||
.data = ip.addExtraAssumeCapacity(TypeFunction{
|
||||
.params_len = params_len,
|
||||
.return_type = func_type.return_type,
|
||||
.comptime_bits = func_type.comptime_bits,
|
||||
.noalias_bits = func_type.noalias_bits,
|
||||
.flags = .{
|
||||
.alignment = Alignment.fromByteUnits(func_type.alignment),
|
||||
.cc = func_type.cc,
|
||||
.is_var_args = func_type.is_var_args,
|
||||
.is_generic = func_type.is_generic,
|
||||
.is_noinline = func_type.is_noinline,
|
||||
.align_is_generic = func_type.align_is_generic,
|
||||
.cc_is_generic = func_type.cc_is_generic,
|
||||
.section_is_generic = func_type.section_is_generic,
|
||||
.addrspace_is_generic = func_type.addrspace_is_generic,
|
||||
},
|
||||
}),
|
||||
});
|
||||
ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, func_type.param_types));
|
||||
},
|
||||
|
||||
.extern_func => @panic("TODO"),
|
||||
|
||||
.ptr => |ptr| switch (ptr.addr) {
|
||||
|
|
@ -2736,6 +2883,7 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 {
|
|||
OptionalMapIndex => @enumToInt(@field(extra, field.name)),
|
||||
i32 => @bitCast(u32, @field(extra, field.name)),
|
||||
Pointer.Flags => @bitCast(u32, @field(extra, field.name)),
|
||||
TypeFunction.Flags => @bitCast(u32, @field(extra, field.name)),
|
||||
Pointer.PackedOffset => @bitCast(u32, @field(extra, field.name)),
|
||||
Pointer.VectorIndex => @enumToInt(@field(extra, field.name)),
|
||||
else => @compileError("bad field type: " ++ @typeName(field.type)),
|
||||
|
|
@ -2797,6 +2945,7 @@ fn extraDataTrail(ip: InternPool, comptime T: type, index: usize) struct { data:
|
|||
OptionalMapIndex => @intToEnum(OptionalMapIndex, int32),
|
||||
i32 => @bitCast(i32, int32),
|
||||
Pointer.Flags => @bitCast(Pointer.Flags, int32),
|
||||
TypeFunction.Flags => @bitCast(TypeFunction.Flags, int32),
|
||||
Pointer.PackedOffset => @bitCast(Pointer.PackedOffset, int32),
|
||||
Pointer.VectorIndex => @intToEnum(Pointer.VectorIndex, int32),
|
||||
else => @compileError("bad field type: " ++ @typeName(field.type)),
|
||||
|
|
@ -2988,17 +3137,17 @@ pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, int: Key.Int, new_ty: Ind
|
|||
}
|
||||
}
|
||||
|
||||
pub fn indexToStruct(ip: *InternPool, val: Index) Module.Struct.OptionalIndex {
|
||||
pub fn indexToStructType(ip: InternPool, val: Index) Module.Struct.OptionalIndex {
|
||||
assert(val != .none);
|
||||
const tags = ip.items.items(.tag);
|
||||
if (val == .none) return .none;
|
||||
if (tags[@enumToInt(val)] != .type_struct) return .none;
|
||||
const datas = ip.items.items(.data);
|
||||
return @intToEnum(Module.Struct.Index, datas[@enumToInt(val)]).toOptional();
|
||||
}
|
||||
|
||||
pub fn indexToUnion(ip: *InternPool, val: Index) Module.Union.OptionalIndex {
|
||||
pub fn indexToUnionType(ip: InternPool, val: Index) Module.Union.OptionalIndex {
|
||||
assert(val != .none);
|
||||
const tags = ip.items.items(.tag);
|
||||
if (val == .none) return .none;
|
||||
switch (tags[@enumToInt(val)]) {
|
||||
.type_union_tagged, .type_union_untagged, .type_union_safety => {},
|
||||
else => return .none,
|
||||
|
|
@ -3007,6 +3156,16 @@ pub fn indexToUnion(ip: *InternPool, val: Index) Module.Union.OptionalIndex {
|
|||
return @intToEnum(Module.Union.Index, datas[@enumToInt(val)]).toOptional();
|
||||
}
|
||||
|
||||
pub fn indexToFuncType(ip: InternPool, val: Index) ?Key.FuncType {
|
||||
assert(val != .none);
|
||||
const tags = ip.items.items(.tag);
|
||||
const datas = ip.items.items(.data);
|
||||
switch (tags[@enumToInt(val)]) {
|
||||
.type_function => return indexToKeyFuncType(ip, datas[@enumToInt(val)]),
|
||||
else => return null,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn isOptionalType(ip: InternPool, ty: Index) bool {
|
||||
const tags = ip.items.items(.tag);
|
||||
if (ty == .none) return false;
|
||||
|
|
@ -3092,6 +3251,11 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void {
|
|||
.type_union_safety,
|
||||
=> @sizeOf(Module.Union) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl),
|
||||
|
||||
.type_function => b: {
|
||||
const info = ip.extraData(TypeFunction, data);
|
||||
break :b @sizeOf(TypeFunction) + (@sizeOf(u32) * info.params_len);
|
||||
},
|
||||
|
||||
.undef => 0,
|
||||
.simple_type => 0,
|
||||
.simple_value => 0,
|
||||
|
|
|
|||
|
|
@ -846,7 +846,7 @@ pub const Decl = struct {
|
|||
pub fn getStructIndex(decl: *Decl, mod: *Module) Struct.OptionalIndex {
|
||||
if (!decl.owns_tv) return .none;
|
||||
const ty = (decl.val.castTag(.ty) orelse return .none).data;
|
||||
return mod.intern_pool.indexToStruct(ty.ip_index);
|
||||
return mod.intern_pool.indexToStructType(ty.ip_index);
|
||||
}
|
||||
|
||||
/// If the Decl has a value and it is a union, return it,
|
||||
|
|
@ -4764,7 +4764,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
|
|||
decl.analysis = .complete;
|
||||
decl.generation = mod.generation;
|
||||
|
||||
const is_inline = decl.ty.fnCallingConvention() == .Inline;
|
||||
const is_inline = decl.ty.fnCallingConvention(mod) == .Inline;
|
||||
if (decl.is_exported) {
|
||||
const export_src: LazySrcLoc = .{ .token_offset = @boolToInt(decl.is_pub) };
|
||||
if (is_inline) {
|
||||
|
|
@ -5617,6 +5617,9 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air {
|
|||
const decl_arena_allocator = decl.value_arena.?.acquire(gpa, &decl_arena);
|
||||
defer decl.value_arena.?.release(&decl_arena);
|
||||
|
||||
const fn_ty = decl.ty;
|
||||
const fn_ty_info = mod.typeToFunc(fn_ty).?;
|
||||
|
||||
var sema: Sema = .{
|
||||
.mod = mod,
|
||||
.gpa = gpa,
|
||||
|
|
@ -5626,7 +5629,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air {
|
|||
.owner_decl = decl,
|
||||
.owner_decl_index = decl_index,
|
||||
.func = func,
|
||||
.fn_ret_ty = decl.ty.fnReturnType(),
|
||||
.fn_ret_ty = fn_ty_info.return_type.toType(),
|
||||
.owner_func = func,
|
||||
.branch_quota = @max(func.branch_quota, Sema.default_branch_quota),
|
||||
};
|
||||
|
|
@ -5664,8 +5667,6 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air {
|
|||
// This could be a generic function instantiation, however, in which case we need to
|
||||
// map the comptime parameters to constant values and only emit arg AIR instructions
|
||||
// for the runtime ones.
|
||||
const fn_ty = decl.ty;
|
||||
const fn_ty_info = fn_ty.fnInfo();
|
||||
const runtime_params_len = @intCast(u32, fn_ty_info.param_types.len);
|
||||
try inner_block.instructions.ensureTotalCapacityPrecise(gpa, runtime_params_len);
|
||||
try sema.air_instructions.ensureUnusedCapacity(gpa, fn_info.total_params_len * 2); // * 2 for the `addType`
|
||||
|
|
@ -5692,7 +5693,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air {
|
|||
sema.inst_map.putAssumeCapacityNoClobber(inst, arg);
|
||||
total_param_index += 1;
|
||||
continue;
|
||||
} else fn_ty_info.param_types[runtime_param_index];
|
||||
} else fn_ty_info.param_types[runtime_param_index].toType();
|
||||
|
||||
const opt_opv = sema.typeHasOnePossibleValue(param_ty) catch |err| switch (err) {
|
||||
error.NeededSourceLocation => unreachable,
|
||||
|
|
@ -6864,6 +6865,10 @@ pub fn singleConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type {
|
|||
return ptrType(mod, .{ .elem_type = child_type.ip_index, .is_const = true });
|
||||
}
|
||||
|
||||
pub fn funcType(mod: *Module, info: InternPool.Key.FuncType) Allocator.Error!Type {
|
||||
return (try intern(mod, .{ .func_type = info })).toType();
|
||||
}
|
||||
|
||||
/// Supports optionals in addition to pointers.
|
||||
pub fn ptrIntValue(mod: *Module, ty: Type, x: u64) Allocator.Error!Value {
|
||||
if (ty.isPtrLikeOptional(mod)) {
|
||||
|
|
@ -6996,6 +7001,16 @@ pub fn floatValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value {
|
|||
return i.toValue();
|
||||
}
|
||||
|
||||
pub fn nullValue(mod: *Module, opt_ty: Type) Allocator.Error!Value {
|
||||
const ip = &mod.intern_pool;
|
||||
assert(ip.isOptionalType(opt_ty.ip_index));
|
||||
const result = try ip.get(mod.gpa, .{ .opt = .{
|
||||
.ty = opt_ty.ip_index,
|
||||
.val = .none,
|
||||
} });
|
||||
return result.toValue();
|
||||
}
|
||||
|
||||
pub fn smallestUnsignedInt(mod: *Module, max: u64) Allocator.Error!Type {
|
||||
return intType(mod, .unsigned, Type.smallestUnsignedBits(max));
|
||||
}
|
||||
|
|
@ -7201,15 +7216,22 @@ pub fn namespaceDeclIndex(mod: *Module, namespace_index: Namespace.Index) Decl.I
|
|||
/// * A struct which has no fields (`struct {}`).
|
||||
/// * Not a struct.
|
||||
pub fn typeToStruct(mod: *Module, ty: Type) ?*Struct {
|
||||
const struct_index = mod.intern_pool.indexToStruct(ty.ip_index).unwrap() orelse return null;
|
||||
if (ty.ip_index == .none) return null;
|
||||
const struct_index = mod.intern_pool.indexToStructType(ty.ip_index).unwrap() orelse return null;
|
||||
return mod.structPtr(struct_index);
|
||||
}
|
||||
|
||||
pub fn typeToUnion(mod: *Module, ty: Type) ?*Union {
|
||||
const union_index = mod.intern_pool.indexToUnion(ty.ip_index).unwrap() orelse return null;
|
||||
if (ty.ip_index == .none) return null;
|
||||
const union_index = mod.intern_pool.indexToUnionType(ty.ip_index).unwrap() orelse return null;
|
||||
return mod.unionPtr(union_index);
|
||||
}
|
||||
|
||||
pub fn typeToFunc(mod: *Module, ty: Type) ?InternPool.Key.FuncType {
|
||||
if (ty.ip_index == .none) return null;
|
||||
return mod.intern_pool.indexToFuncType(ty.ip_index);
|
||||
}
|
||||
|
||||
pub fn fieldSrcLoc(mod: *Module, owner_decl_index: Decl.Index, query: FieldSrcQuery) SrcLoc {
|
||||
@setCold(true);
|
||||
const owner_decl = mod.declPtr(owner_decl_index);
|
||||
|
|
|
|||
334
src/Sema.zig
334
src/Sema.zig
File diff suppressed because it is too large
Load diff
|
|
@ -2052,7 +2052,6 @@ pub const Inst = struct {
|
|||
/// and `[]Ref`.
|
||||
pub const Ref = enum(u32) {
|
||||
u1_type = @enumToInt(InternPool.Index.u1_type),
|
||||
u5_type = @enumToInt(InternPool.Index.u5_type),
|
||||
u8_type = @enumToInt(InternPool.Index.u8_type),
|
||||
i8_type = @enumToInt(InternPool.Index.i8_type),
|
||||
u16_type = @enumToInt(InternPool.Index.u16_type),
|
||||
|
|
@ -2121,8 +2120,8 @@ pub const Inst = struct {
|
|||
zero_u8 = @enumToInt(InternPool.Index.zero_u8),
|
||||
one = @enumToInt(InternPool.Index.one),
|
||||
one_usize = @enumToInt(InternPool.Index.one_usize),
|
||||
one_u5 = @enumToInt(InternPool.Index.one_u5),
|
||||
four_u5 = @enumToInt(InternPool.Index.four_u5),
|
||||
one_u8 = @enumToInt(InternPool.Index.one_u8),
|
||||
four_u8 = @enumToInt(InternPool.Index.four_u8),
|
||||
negative_one = @enumToInt(InternPool.Index.negative_one),
|
||||
calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c),
|
||||
calling_convention_inline = @enumToInt(InternPool.Index.calling_convention_inline),
|
||||
|
|
|
|||
|
|
@ -472,7 +472,7 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 {
|
|||
|
||||
fn gen(self: *Self) !void {
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const cc = self.fn_type.fnCallingConvention();
|
||||
const cc = self.fn_type.fnCallingConvention(mod);
|
||||
if (cc != .Naked) {
|
||||
// stp fp, lr, [sp, #-16]!
|
||||
_ = try self.addInst(.{
|
||||
|
|
@ -1146,7 +1146,7 @@ fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void {
|
|||
.stack_offset => blk: {
|
||||
// self.ret_mcv is an address to where this function
|
||||
// should store its result into
|
||||
const ret_ty = self.fn_type.fnReturnType();
|
||||
const ret_ty = self.fn_type.fnReturnType(mod);
|
||||
const ptr_ty = try mod.singleMutPtrType(ret_ty);
|
||||
|
||||
// addr_reg will contain the address of where to store the
|
||||
|
|
@ -4271,7 +4271,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
|
|||
|
||||
if (info.return_value == .stack_offset) {
|
||||
log.debug("airCall: return by reference", .{});
|
||||
const ret_ty = fn_ty.fnReturnType();
|
||||
const ret_ty = fn_ty.fnReturnType(mod);
|
||||
const ret_abi_size = @intCast(u32, ret_ty.abiSize(mod));
|
||||
const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(mod));
|
||||
const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst);
|
||||
|
|
@ -4428,10 +4428,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
|
|||
}
|
||||
|
||||
fn airRet(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const un_op = self.air.instructions.items(.data)[inst].un_op;
|
||||
const operand = try self.resolveInst(un_op);
|
||||
const ret_ty = self.fn_type.fnReturnType();
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const ret_ty = self.fn_type.fnReturnType(mod);
|
||||
|
||||
switch (self.ret_mcv) {
|
||||
.none => {},
|
||||
|
|
@ -4460,10 +4460,11 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void {
|
|||
}
|
||||
|
||||
fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const un_op = self.air.instructions.items(.data)[inst].un_op;
|
||||
const ptr = try self.resolveInst(un_op);
|
||||
const ptr_ty = self.typeOf(un_op);
|
||||
const ret_ty = self.fn_type.fnReturnType();
|
||||
const ret_ty = self.fn_type.fnReturnType(mod);
|
||||
|
||||
switch (self.ret_mcv) {
|
||||
.none => {},
|
||||
|
|
@ -4483,7 +4484,6 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
|
|||
// location.
|
||||
const op_inst = Air.refToIndex(un_op).?;
|
||||
if (self.air.instructions.items(.tag)[op_inst] != .ret_ptr) {
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const abi_size = @intCast(u32, ret_ty.abiSize(mod));
|
||||
const abi_align = ret_ty.abiAlignment(mod);
|
||||
|
||||
|
|
@ -6226,12 +6226,11 @@ const CallMCValues = struct {
|
|||
|
||||
/// Caller must call `CallMCValues.deinit`.
|
||||
fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
|
||||
const cc = fn_ty.fnCallingConvention();
|
||||
const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen());
|
||||
defer self.gpa.free(param_types);
|
||||
fn_ty.fnParamTypes(param_types);
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const fn_info = mod.typeToFunc(fn_ty).?;
|
||||
const cc = fn_info.cc;
|
||||
var result: CallMCValues = .{
|
||||
.args = try self.gpa.alloc(MCValue, param_types.len),
|
||||
.args = try self.gpa.alloc(MCValue, fn_info.param_types.len),
|
||||
// These undefined values must be populated before returning from this function.
|
||||
.return_value = undefined,
|
||||
.stack_byte_count = undefined,
|
||||
|
|
@ -6239,8 +6238,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
|
|||
};
|
||||
errdefer self.gpa.free(result.args);
|
||||
|
||||
const ret_ty = fn_ty.fnReturnType();
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const ret_ty = fn_ty.fnReturnType(mod);
|
||||
|
||||
switch (cc) {
|
||||
.Naked => {
|
||||
|
|
@ -6271,8 +6269,8 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
|
|||
}
|
||||
}
|
||||
|
||||
for (param_types, 0..) |ty, i| {
|
||||
const param_size = @intCast(u32, ty.abiSize(mod));
|
||||
for (fn_info.param_types, 0..) |ty, i| {
|
||||
const param_size = @intCast(u32, ty.toType().abiSize(mod));
|
||||
if (param_size == 0) {
|
||||
result.args[i] = .{ .none = {} };
|
||||
continue;
|
||||
|
|
@ -6280,14 +6278,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
|
|||
|
||||
// We round up NCRN only for non-Apple platforms which allow the 16-byte aligned
|
||||
// values to spread across odd-numbered registers.
|
||||
if (ty.abiAlignment(mod) == 16 and !self.target.isDarwin()) {
|
||||
if (ty.toType().abiAlignment(mod) == 16 and !self.target.isDarwin()) {
|
||||
// Round up NCRN to the next even number
|
||||
ncrn += ncrn % 2;
|
||||
}
|
||||
|
||||
if (std.math.divCeil(u32, param_size, 8) catch unreachable <= 8 - ncrn) {
|
||||
if (param_size <= 8) {
|
||||
result.args[i] = .{ .register = self.registerAlias(c_abi_int_param_regs[ncrn], ty) };
|
||||
result.args[i] = .{ .register = self.registerAlias(c_abi_int_param_regs[ncrn], ty.toType()) };
|
||||
ncrn += 1;
|
||||
} else {
|
||||
return self.fail("TODO MCValues with multiple registers", .{});
|
||||
|
|
@ -6298,7 +6296,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
|
|||
ncrn = 8;
|
||||
// TODO Apple allows the arguments on the stack to be non-8-byte aligned provided
|
||||
// that the entire stack space consumed by the arguments is 8-byte aligned.
|
||||
if (ty.abiAlignment(mod) == 8) {
|
||||
if (ty.toType().abiAlignment(mod) == 8) {
|
||||
if (nsaa % 8 != 0) {
|
||||
nsaa += 8 - (nsaa % 8);
|
||||
}
|
||||
|
|
@ -6336,10 +6334,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
|
|||
|
||||
var stack_offset: u32 = 0;
|
||||
|
||||
for (param_types, 0..) |ty, i| {
|
||||
if (ty.abiSize(mod) > 0) {
|
||||
const param_size = @intCast(u32, ty.abiSize(mod));
|
||||
const param_alignment = ty.abiAlignment(mod);
|
||||
for (fn_info.param_types, 0..) |ty, i| {
|
||||
if (ty.toType().abiSize(mod) > 0) {
|
||||
const param_size = @intCast(u32, ty.toType().abiSize(mod));
|
||||
const param_alignment = ty.toType().abiAlignment(mod);
|
||||
|
||||
stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, param_alignment);
|
||||
result.args[i] = .{ .stack_argument_offset = stack_offset };
|
||||
|
|
|
|||
|
|
@ -478,7 +478,7 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 {
|
|||
|
||||
fn gen(self: *Self) !void {
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const cc = self.fn_type.fnCallingConvention();
|
||||
const cc = self.fn_type.fnCallingConvention(mod);
|
||||
if (cc != .Naked) {
|
||||
// push {fp, lr}
|
||||
const push_reloc = try self.addNop();
|
||||
|
|
@ -1123,7 +1123,7 @@ fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void {
|
|||
.stack_offset => blk: {
|
||||
// self.ret_mcv is an address to where this function
|
||||
// should store its result into
|
||||
const ret_ty = self.fn_type.fnReturnType();
|
||||
const ret_ty = self.fn_type.fnReturnType(mod);
|
||||
const ptr_ty = try mod.singleMutPtrType(ret_ty);
|
||||
|
||||
// addr_reg will contain the address of where to store the
|
||||
|
|
@ -4250,7 +4250,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
|
|||
// untouched by the parameter passing code
|
||||
const r0_lock: ?RegisterLock = if (info.return_value == .stack_offset) blk: {
|
||||
log.debug("airCall: return by reference", .{});
|
||||
const ret_ty = fn_ty.fnReturnType();
|
||||
const ret_ty = fn_ty.fnReturnType(mod);
|
||||
const ret_abi_size = @intCast(u32, ret_ty.abiSize(mod));
|
||||
const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(mod));
|
||||
const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst);
|
||||
|
|
@ -4350,7 +4350,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
|
|||
if (RegisterManager.indexOfRegIntoTracked(reg) == null) {
|
||||
// Save function return value into a tracked register
|
||||
log.debug("airCall: copying {} as it is not tracked", .{reg});
|
||||
const new_reg = try self.copyToTmpRegister(fn_ty.fnReturnType(), info.return_value);
|
||||
const new_reg = try self.copyToTmpRegister(fn_ty.fnReturnType(mod), info.return_value);
|
||||
break :result MCValue{ .register = new_reg };
|
||||
}
|
||||
},
|
||||
|
|
@ -4374,10 +4374,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
|
|||
}
|
||||
|
||||
fn airRet(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const un_op = self.air.instructions.items(.data)[inst].un_op;
|
||||
const operand = try self.resolveInst(un_op);
|
||||
const ret_ty = self.fn_type.fnReturnType();
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const ret_ty = self.fn_type.fnReturnType(mod);
|
||||
|
||||
switch (self.ret_mcv) {
|
||||
.none => {},
|
||||
|
|
@ -4406,10 +4406,11 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void {
|
|||
}
|
||||
|
||||
fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const un_op = self.air.instructions.items(.data)[inst].un_op;
|
||||
const ptr = try self.resolveInst(un_op);
|
||||
const ptr_ty = self.typeOf(un_op);
|
||||
const ret_ty = self.fn_type.fnReturnType();
|
||||
const ret_ty = self.fn_type.fnReturnType(mod);
|
||||
|
||||
switch (self.ret_mcv) {
|
||||
.none => {},
|
||||
|
|
@ -4429,7 +4430,6 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
|
|||
// location.
|
||||
const op_inst = Air.refToIndex(un_op).?;
|
||||
if (self.air.instructions.items(.tag)[op_inst] != .ret_ptr) {
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const abi_size = @intCast(u32, ret_ty.abiSize(mod));
|
||||
const abi_align = ret_ty.abiAlignment(mod);
|
||||
|
||||
|
|
@ -6171,12 +6171,11 @@ const CallMCValues = struct {
|
|||
|
||||
/// Caller must call `CallMCValues.deinit`.
|
||||
fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
|
||||
const cc = fn_ty.fnCallingConvention();
|
||||
const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen());
|
||||
defer self.gpa.free(param_types);
|
||||
fn_ty.fnParamTypes(param_types);
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const fn_info = mod.typeToFunc(fn_ty).?;
|
||||
const cc = fn_info.cc;
|
||||
var result: CallMCValues = .{
|
||||
.args = try self.gpa.alloc(MCValue, param_types.len),
|
||||
.args = try self.gpa.alloc(MCValue, fn_info.param_types.len),
|
||||
// These undefined values must be populated before returning from this function.
|
||||
.return_value = undefined,
|
||||
.stack_byte_count = undefined,
|
||||
|
|
@ -6184,8 +6183,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
|
|||
};
|
||||
errdefer self.gpa.free(result.args);
|
||||
|
||||
const ret_ty = fn_ty.fnReturnType();
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const ret_ty = fn_ty.fnReturnType(mod);
|
||||
|
||||
switch (cc) {
|
||||
.Naked => {
|
||||
|
|
@ -6219,11 +6217,11 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
|
|||
}
|
||||
}
|
||||
|
||||
for (param_types, 0..) |ty, i| {
|
||||
if (ty.abiAlignment(mod) == 8)
|
||||
for (fn_info.param_types, 0..) |ty, i| {
|
||||
if (ty.toType().abiAlignment(mod) == 8)
|
||||
ncrn = std.mem.alignForwardGeneric(usize, ncrn, 2);
|
||||
|
||||
const param_size = @intCast(u32, ty.abiSize(mod));
|
||||
const param_size = @intCast(u32, ty.toType().abiSize(mod));
|
||||
if (std.math.divCeil(u32, param_size, 4) catch unreachable <= 4 - ncrn) {
|
||||
if (param_size <= 4) {
|
||||
result.args[i] = .{ .register = c_abi_int_param_regs[ncrn] };
|
||||
|
|
@ -6235,7 +6233,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
|
|||
return self.fail("TODO MCValues split between registers and stack", .{});
|
||||
} else {
|
||||
ncrn = 4;
|
||||
if (ty.abiAlignment(mod) == 8)
|
||||
if (ty.toType().abiAlignment(mod) == 8)
|
||||
nsaa = std.mem.alignForwardGeneric(u32, nsaa, 8);
|
||||
|
||||
result.args[i] = .{ .stack_argument_offset = nsaa };
|
||||
|
|
@ -6269,10 +6267,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
|
|||
|
||||
var stack_offset: u32 = 0;
|
||||
|
||||
for (param_types, 0..) |ty, i| {
|
||||
if (ty.abiSize(mod) > 0) {
|
||||
const param_size = @intCast(u32, ty.abiSize(mod));
|
||||
const param_alignment = ty.abiAlignment(mod);
|
||||
for (fn_info.param_types, 0..) |ty, i| {
|
||||
if (ty.toType().abiSize(mod) > 0) {
|
||||
const param_size = @intCast(u32, ty.toType().abiSize(mod));
|
||||
const param_alignment = ty.toType().abiAlignment(mod);
|
||||
|
||||
stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, param_alignment);
|
||||
result.args[i] = .{ .stack_argument_offset = stack_offset };
|
||||
|
|
|
|||
|
|
@ -347,7 +347,8 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 {
|
|||
}
|
||||
|
||||
fn gen(self: *Self) !void {
|
||||
const cc = self.fn_type.fnCallingConvention();
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const cc = self.fn_type.fnCallingConvention(mod);
|
||||
if (cc != .Naked) {
|
||||
// TODO Finish function prologue and epilogue for riscv64.
|
||||
|
||||
|
|
@ -1803,7 +1804,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
|
|||
}
|
||||
|
||||
fn ret(self: *Self, mcv: MCValue) !void {
|
||||
const ret_ty = self.fn_type.fnReturnType();
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const ret_ty = self.fn_type.fnReturnType(mod);
|
||||
try self.setRegOrMem(ret_ty, self.ret_mcv, mcv);
|
||||
// Just add space for an instruction, patch this later
|
||||
const index = try self.addInst(.{
|
||||
|
|
@ -2621,12 +2623,11 @@ const CallMCValues = struct {
|
|||
|
||||
/// Caller must call `CallMCValues.deinit`.
|
||||
fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
|
||||
const cc = fn_ty.fnCallingConvention();
|
||||
const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen());
|
||||
defer self.gpa.free(param_types);
|
||||
fn_ty.fnParamTypes(param_types);
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const fn_info = mod.typeToFunc(fn_ty).?;
|
||||
const cc = fn_info.cc;
|
||||
var result: CallMCValues = .{
|
||||
.args = try self.gpa.alloc(MCValue, param_types.len),
|
||||
.args = try self.gpa.alloc(MCValue, fn_info.param_types.len),
|
||||
// These undefined values must be populated before returning from this function.
|
||||
.return_value = undefined,
|
||||
.stack_byte_count = undefined,
|
||||
|
|
@ -2634,8 +2635,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
|
|||
};
|
||||
errdefer self.gpa.free(result.args);
|
||||
|
||||
const ret_ty = fn_ty.fnReturnType();
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const ret_ty = fn_ty.fnReturnType(mod);
|
||||
|
||||
switch (cc) {
|
||||
.Naked => {
|
||||
|
|
@ -2655,8 +2655,8 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
|
|||
var next_stack_offset: u32 = 0;
|
||||
const argument_registers = [_]Register{ .a0, .a1, .a2, .a3, .a4, .a5, .a6, .a7 };
|
||||
|
||||
for (param_types, 0..) |ty, i| {
|
||||
const param_size = @intCast(u32, ty.abiSize(mod));
|
||||
for (fn_info.param_types, 0..) |ty, i| {
|
||||
const param_size = @intCast(u32, ty.toType().abiSize(mod));
|
||||
if (param_size <= 8) {
|
||||
if (next_register < argument_registers.len) {
|
||||
result.args[i] = .{ .register = argument_registers[next_register] };
|
||||
|
|
|
|||
|
|
@ -363,7 +363,8 @@ pub fn generate(
|
|||
}
|
||||
|
||||
fn gen(self: *Self) !void {
|
||||
const cc = self.fn_type.fnCallingConvention();
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const cc = self.fn_type.fnCallingConvention(mod);
|
||||
if (cc != .Naked) {
|
||||
// TODO Finish function prologue and epilogue for sparc64.
|
||||
|
||||
|
|
@ -4458,12 +4459,11 @@ fn realStackOffset(off: u32) u32 {
|
|||
|
||||
/// Caller must call `CallMCValues.deinit`.
|
||||
fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) !CallMCValues {
|
||||
const cc = fn_ty.fnCallingConvention();
|
||||
const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen());
|
||||
defer self.gpa.free(param_types);
|
||||
fn_ty.fnParamTypes(param_types);
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const fn_info = mod.typeToFunc(fn_ty).?;
|
||||
const cc = fn_info.cc;
|
||||
var result: CallMCValues = .{
|
||||
.args = try self.gpa.alloc(MCValue, param_types.len),
|
||||
.args = try self.gpa.alloc(MCValue, fn_info.param_types.len),
|
||||
// These undefined values must be populated before returning from this function.
|
||||
.return_value = undefined,
|
||||
.stack_byte_count = undefined,
|
||||
|
|
@ -4471,8 +4471,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
|
|||
};
|
||||
errdefer self.gpa.free(result.args);
|
||||
|
||||
const ret_ty = fn_ty.fnReturnType();
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const ret_ty = fn_ty.fnReturnType(mod);
|
||||
|
||||
switch (cc) {
|
||||
.Naked => {
|
||||
|
|
@ -4495,8 +4494,8 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
|
|||
.callee => abi.c_abi_int_param_regs_callee_view,
|
||||
};
|
||||
|
||||
for (param_types, 0..) |ty, i| {
|
||||
const param_size = @intCast(u32, ty.abiSize(mod));
|
||||
for (fn_info.param_types, 0..) |ty, i| {
|
||||
const param_size = @intCast(u32, ty.toType().abiSize(mod));
|
||||
if (param_size <= 8) {
|
||||
if (next_register < argument_registers.len) {
|
||||
result.args[i] = .{ .register = argument_registers[next_register] };
|
||||
|
|
@ -4580,7 +4579,8 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue {
|
|||
}
|
||||
|
||||
fn ret(self: *Self, mcv: MCValue) !void {
|
||||
const ret_ty = self.fn_type.fnReturnType();
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const ret_ty = self.fn_type.fnReturnType(mod);
|
||||
try self.setRegOrMem(ret_ty, self.ret_mcv, mcv);
|
||||
|
||||
// Just add space for a branch instruction, patch this later
|
||||
|
|
|
|||
|
|
@ -1145,7 +1145,7 @@ fn ensureAllocLocal(func: *CodeGen, ty: Type) InnerError!WValue {
|
|||
fn genFunctype(
|
||||
gpa: Allocator,
|
||||
cc: std.builtin.CallingConvention,
|
||||
params: []const Type,
|
||||
params: []const InternPool.Index,
|
||||
return_type: Type,
|
||||
mod: *Module,
|
||||
) !wasm.Type {
|
||||
|
|
@ -1170,7 +1170,8 @@ fn genFunctype(
|
|||
}
|
||||
|
||||
// param types
|
||||
for (params) |param_type| {
|
||||
for (params) |param_type_ip| {
|
||||
const param_type = param_type_ip.toType();
|
||||
if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue;
|
||||
|
||||
switch (cc) {
|
||||
|
|
@ -1234,9 +1235,9 @@ pub fn generate(
|
|||
}
|
||||
|
||||
fn genFunc(func: *CodeGen) InnerError!void {
|
||||
const fn_info = func.decl.ty.fnInfo();
|
||||
const mod = func.bin_file.base.options.module.?;
|
||||
var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, mod);
|
||||
const fn_info = mod.typeToFunc(func.decl.ty).?;
|
||||
var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type.toType(), mod);
|
||||
defer func_type.deinit(func.gpa);
|
||||
_ = try func.bin_file.storeDeclType(func.decl_index, func_type);
|
||||
|
||||
|
|
@ -1345,10 +1346,8 @@ const CallWValues = struct {
|
|||
|
||||
fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWValues {
|
||||
const mod = func.bin_file.base.options.module.?;
|
||||
const cc = fn_ty.fnCallingConvention();
|
||||
const param_types = try func.gpa.alloc(Type, fn_ty.fnParamLen());
|
||||
defer func.gpa.free(param_types);
|
||||
fn_ty.fnParamTypes(param_types);
|
||||
const fn_info = mod.typeToFunc(fn_ty).?;
|
||||
const cc = fn_info.cc;
|
||||
var result: CallWValues = .{
|
||||
.args = &.{},
|
||||
.return_value = .none,
|
||||
|
|
@ -1360,8 +1359,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
|
|||
|
||||
// Check if we store the result as a pointer to the stack rather than
|
||||
// by value
|
||||
const fn_info = fn_ty.fnInfo();
|
||||
if (firstParamSRet(fn_info.cc, fn_info.return_type, mod)) {
|
||||
if (firstParamSRet(fn_info.cc, fn_info.return_type.toType(), mod)) {
|
||||
// the sret arg will be passed as first argument, therefore we
|
||||
// set the `return_value` before allocating locals for regular args.
|
||||
result.return_value = .{ .local = .{ .value = func.local_index, .references = 1 } };
|
||||
|
|
@ -1370,8 +1368,8 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
|
|||
|
||||
switch (cc) {
|
||||
.Unspecified => {
|
||||
for (param_types) |ty| {
|
||||
if (!ty.hasRuntimeBitsIgnoreComptime(mod)) {
|
||||
for (fn_info.param_types) |ty| {
|
||||
if (!ty.toType().hasRuntimeBitsIgnoreComptime(mod)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
@ -1380,8 +1378,8 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
|
|||
}
|
||||
},
|
||||
.C => {
|
||||
for (param_types) |ty| {
|
||||
const ty_classes = abi.classifyType(ty, mod);
|
||||
for (fn_info.param_types) |ty| {
|
||||
const ty_classes = abi.classifyType(ty.toType(), mod);
|
||||
for (ty_classes) |class| {
|
||||
if (class == .none) continue;
|
||||
try args.append(.{ .local = .{ .value = func.local_index, .references = 1 } });
|
||||
|
|
@ -2095,11 +2093,11 @@ fn genBody(func: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
|
|||
}
|
||||
|
||||
fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
const mod = func.bin_file.base.options.module.?;
|
||||
const un_op = func.air.instructions.items(.data)[inst].un_op;
|
||||
const operand = try func.resolveInst(un_op);
|
||||
const fn_info = func.decl.ty.fnInfo();
|
||||
const ret_ty = fn_info.return_type;
|
||||
const mod = func.bin_file.base.options.module.?;
|
||||
const fn_info = mod.typeToFunc(func.decl.ty).?;
|
||||
const ret_ty = fn_info.return_type.toType();
|
||||
|
||||
// result must be stored in the stack and we return a pointer
|
||||
// to the stack instead
|
||||
|
|
@ -2146,8 +2144,8 @@ fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
|||
break :result try func.allocStack(Type.usize); // create pointer to void
|
||||
}
|
||||
|
||||
const fn_info = func.decl.ty.fnInfo();
|
||||
if (firstParamSRet(fn_info.cc, fn_info.return_type, mod)) {
|
||||
const fn_info = mod.typeToFunc(func.decl.ty).?;
|
||||
if (firstParamSRet(fn_info.cc, fn_info.return_type.toType(), mod)) {
|
||||
break :result func.return_value;
|
||||
}
|
||||
|
||||
|
|
@ -2163,12 +2161,12 @@ fn airRetLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
|||
const operand = try func.resolveInst(un_op);
|
||||
const ret_ty = func.typeOf(un_op).childType(mod);
|
||||
|
||||
const fn_info = func.decl.ty.fnInfo();
|
||||
const fn_info = mod.typeToFunc(func.decl.ty).?;
|
||||
if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
|
||||
if (ret_ty.isError(mod)) {
|
||||
try func.addImm32(0);
|
||||
}
|
||||
} else if (!firstParamSRet(fn_info.cc, fn_info.return_type, mod)) {
|
||||
} else if (!firstParamSRet(fn_info.cc, fn_info.return_type.toType(), mod)) {
|
||||
// leave on the stack
|
||||
_ = try func.load(operand, ret_ty, 0);
|
||||
}
|
||||
|
|
@ -2191,9 +2189,9 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
|
|||
.Pointer => ty.childType(mod),
|
||||
else => unreachable,
|
||||
};
|
||||
const ret_ty = fn_ty.fnReturnType();
|
||||
const fn_info = fn_ty.fnInfo();
|
||||
const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type, mod);
|
||||
const ret_ty = fn_ty.fnReturnType(mod);
|
||||
const fn_info = mod.typeToFunc(fn_ty).?;
|
||||
const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type.toType(), mod);
|
||||
|
||||
const callee: ?Decl.Index = blk: {
|
||||
const func_val = (try func.air.value(pl_op.operand, mod)) orelse break :blk null;
|
||||
|
|
@ -2203,8 +2201,8 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
|
|||
break :blk function.data.owner_decl;
|
||||
} else if (func_val.castTag(.extern_fn)) |extern_fn| {
|
||||
const ext_decl = mod.declPtr(extern_fn.data.owner_decl);
|
||||
const ext_info = ext_decl.ty.fnInfo();
|
||||
var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type, mod);
|
||||
const ext_info = mod.typeToFunc(ext_decl.ty).?;
|
||||
var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type.toType(), mod);
|
||||
defer func_type.deinit(func.gpa);
|
||||
const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_fn.data.owner_decl);
|
||||
const atom = func.bin_file.getAtomPtr(atom_index);
|
||||
|
|
@ -2235,7 +2233,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
|
|||
const arg_ty = func.typeOf(arg);
|
||||
if (!arg_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
|
||||
|
||||
try func.lowerArg(fn_ty.fnInfo().cc, arg_ty, arg_val);
|
||||
try func.lowerArg(mod.typeToFunc(fn_ty).?.cc, arg_ty, arg_val);
|
||||
}
|
||||
|
||||
if (callee) |direct| {
|
||||
|
|
@ -2248,7 +2246,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
|
|||
const operand = try func.resolveInst(pl_op.operand);
|
||||
try func.emitWValue(operand);
|
||||
|
||||
var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, mod);
|
||||
var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type.toType(), mod);
|
||||
defer fn_type.deinit(func.gpa);
|
||||
|
||||
const fn_type_index = try func.bin_file.putOrGetFuncType(fn_type);
|
||||
|
|
@ -2264,7 +2262,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
|
|||
} else if (first_param_sret) {
|
||||
break :result_value sret;
|
||||
// TODO: Make this less fragile and optimize
|
||||
} else if (fn_ty.fnInfo().cc == .C and ret_ty.zigTypeTag(mod) == .Struct or ret_ty.zigTypeTag(mod) == .Union) {
|
||||
} else if (mod.typeToFunc(fn_ty).?.cc == .C and ret_ty.zigTypeTag(mod) == .Struct or ret_ty.zigTypeTag(mod) == .Union) {
|
||||
const result_local = try func.allocLocal(ret_ty);
|
||||
try func.addLabel(.local_set, result_local.local.value);
|
||||
const scalar_type = abi.scalarType(ret_ty, mod);
|
||||
|
|
@ -2528,7 +2526,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
|||
const mod = func.bin_file.base.options.module.?;
|
||||
const arg_index = func.arg_index;
|
||||
const arg = func.args[arg_index];
|
||||
const cc = func.decl.ty.fnInfo().cc;
|
||||
const cc = mod.typeToFunc(func.decl.ty).?.cc;
|
||||
const arg_ty = func.typeOfIndex(inst);
|
||||
if (cc == .C) {
|
||||
const arg_classes = abi.classifyType(arg_ty, mod);
|
||||
|
|
@ -2647,9 +2645,9 @@ fn binOpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) Inner
|
|||
}
|
||||
|
||||
switch (op) {
|
||||
.mul => return func.callIntrinsic("__multi3", &.{ ty, ty }, ty, &.{ lhs, rhs }),
|
||||
.shr => return func.callIntrinsic("__lshrti3", &.{ ty, Type.i32 }, ty, &.{ lhs, rhs }),
|
||||
.shl => return func.callIntrinsic("__ashlti3", &.{ ty, Type.i32 }, ty, &.{ lhs, rhs }),
|
||||
.mul => return func.callIntrinsic("__multi3", &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }),
|
||||
.shr => return func.callIntrinsic("__lshrti3", &.{ ty.toIntern(), .i32_type }, ty, &.{ lhs, rhs }),
|
||||
.shl => return func.callIntrinsic("__ashlti3", &.{ ty.toIntern(), .i32_type }, ty, &.{ lhs, rhs }),
|
||||
.xor => {
|
||||
const result = try func.allocStack(ty);
|
||||
try func.emitWValue(result);
|
||||
|
|
@ -2839,7 +2837,7 @@ fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) In
|
|||
};
|
||||
|
||||
// fma requires three operands
|
||||
var param_types_buffer: [3]Type = .{ ty, ty, ty };
|
||||
var param_types_buffer: [3]InternPool.Index = .{ ty.ip_index, ty.ip_index, ty.ip_index };
|
||||
const param_types = param_types_buffer[0..args.len];
|
||||
return func.callIntrinsic(fn_name, param_types, ty, args);
|
||||
}
|
||||
|
|
@ -5298,7 +5296,7 @@ fn fpext(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!
|
|||
// call __extendhfsf2(f16) f32
|
||||
const f32_result = try func.callIntrinsic(
|
||||
"__extendhfsf2",
|
||||
&.{Type.f16},
|
||||
&.{.f16_type},
|
||||
Type.f32,
|
||||
&.{operand},
|
||||
);
|
||||
|
|
@ -5316,7 +5314,7 @@ fn fpext(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!
|
|||
target_util.compilerRtFloatAbbrev(wanted_bits),
|
||||
}) catch unreachable;
|
||||
|
||||
return func.callIntrinsic(fn_name, &.{given}, wanted, &.{operand});
|
||||
return func.callIntrinsic(fn_name, &.{given.ip_index}, wanted, &.{operand});
|
||||
}
|
||||
|
||||
fn airFptrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
|
|
@ -5347,7 +5345,7 @@ fn fptrunc(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro
|
|||
} else operand;
|
||||
|
||||
// call __truncsfhf2(f32) f16
|
||||
return func.callIntrinsic("__truncsfhf2", &.{Type.f32}, Type.f16, &.{op});
|
||||
return func.callIntrinsic("__truncsfhf2", &.{.f32_type}, Type.f16, &.{op});
|
||||
}
|
||||
|
||||
var fn_name_buf: [12]u8 = undefined;
|
||||
|
|
@ -5356,7 +5354,7 @@ fn fptrunc(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro
|
|||
target_util.compilerRtFloatAbbrev(wanted_bits),
|
||||
}) catch unreachable;
|
||||
|
||||
return func.callIntrinsic(fn_name, &.{given}, wanted, &.{operand});
|
||||
return func.callIntrinsic(fn_name, &.{given.ip_index}, wanted, &.{operand});
|
||||
}
|
||||
|
||||
fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
|
|
@ -5842,7 +5840,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
|||
|
||||
const bin_op = try func.callIntrinsic(
|
||||
"__multi3",
|
||||
&[_]Type{Type.i64} ** 4,
|
||||
&[_]InternPool.Index{.i64_type} ** 4,
|
||||
Type.i128,
|
||||
&.{ lhs, lhs_shifted, rhs, rhs_shifted },
|
||||
);
|
||||
|
|
@ -5866,19 +5864,19 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
|||
|
||||
const mul1 = try func.callIntrinsic(
|
||||
"__multi3",
|
||||
&[_]Type{Type.i64} ** 4,
|
||||
&[_]InternPool.Index{.i64_type} ** 4,
|
||||
Type.i128,
|
||||
&.{ lhs_lsb, zero, rhs_msb, zero },
|
||||
);
|
||||
const mul2 = try func.callIntrinsic(
|
||||
"__multi3",
|
||||
&[_]Type{Type.i64} ** 4,
|
||||
&[_]InternPool.Index{.i64_type} ** 4,
|
||||
Type.i128,
|
||||
&.{ rhs_lsb, zero, lhs_msb, zero },
|
||||
);
|
||||
const mul3 = try func.callIntrinsic(
|
||||
"__multi3",
|
||||
&[_]Type{Type.i64} ** 4,
|
||||
&[_]InternPool.Index{.i64_type} ** 4,
|
||||
Type.i128,
|
||||
&.{ lhs_msb, zero, rhs_msb, zero },
|
||||
);
|
||||
|
|
@ -5977,7 +5975,7 @@ fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
|||
// call to compiler-rt `fn fmaf(f32, f32, f32) f32`
|
||||
var result = try func.callIntrinsic(
|
||||
"fmaf",
|
||||
&.{ Type.f32, Type.f32, Type.f32 },
|
||||
&.{ .f32_type, .f32_type, .f32_type },
|
||||
Type.f32,
|
||||
&.{ rhs_ext, lhs_ext, addend_ext },
|
||||
);
|
||||
|
|
@ -6707,7 +6705,7 @@ fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
|||
fn callIntrinsic(
|
||||
func: *CodeGen,
|
||||
name: []const u8,
|
||||
param_types: []const Type,
|
||||
param_types: []const InternPool.Index,
|
||||
return_type: Type,
|
||||
args: []const WValue,
|
||||
) InnerError!WValue {
|
||||
|
|
@ -6735,8 +6733,8 @@ fn callIntrinsic(
|
|||
// Lower all arguments to the stack before we call our function
|
||||
for (args, 0..) |arg, arg_i| {
|
||||
assert(!(want_sret_param and arg == .stack));
|
||||
assert(param_types[arg_i].hasRuntimeBitsIgnoreComptime(mod));
|
||||
try func.lowerArg(.C, param_types[arg_i], arg);
|
||||
assert(param_types[arg_i].toType().hasRuntimeBitsIgnoreComptime(mod));
|
||||
try func.lowerArg(.C, param_types[arg_i].toType(), arg);
|
||||
}
|
||||
|
||||
// Actually call our intrinsic
|
||||
|
|
@ -6938,7 +6936,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
|
|||
try writer.writeByte(std.wasm.opcode(.end));
|
||||
|
||||
const slice_ty = Type.const_slice_u8_sentinel_0;
|
||||
const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty}, slice_ty, mod);
|
||||
const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty.ip_index}, slice_ty, mod);
|
||||
return func.bin_file.createFunction(func_name, func_type, &body_list, &relocs);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@ const Liveness = @import("../../Liveness.zig");
|
|||
const Lower = @import("Lower.zig");
|
||||
const Mir = @import("Mir.zig");
|
||||
const Module = @import("../../Module.zig");
|
||||
const InternPool = @import("../../InternPool.zig");
|
||||
const Target = std.Target;
|
||||
const Type = @import("../../type.zig").Type;
|
||||
const TypedValue = @import("../../TypedValue.zig");
|
||||
|
|
@ -697,7 +698,8 @@ pub fn generate(
|
|||
FrameAlloc.init(.{ .size = 0, .alignment = 1 }),
|
||||
);
|
||||
|
||||
var call_info = function.resolveCallingConventionValues(fn_type, &.{}, .args_frame) catch |err| switch (err) {
|
||||
const fn_info = mod.typeToFunc(fn_type).?;
|
||||
var call_info = function.resolveCallingConventionValues(fn_info, &.{}, .args_frame) catch |err| switch (err) {
|
||||
error.CodegenFail => return Result{ .fail = function.err_msg.? },
|
||||
error.OutOfRegisters => return Result{
|
||||
.fail = try ErrorMsg.create(
|
||||
|
|
@ -1566,7 +1568,7 @@ fn asmMemoryRegisterImmediate(
|
|||
|
||||
fn gen(self: *Self) InnerError!void {
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const cc = self.fn_type.fnCallingConvention();
|
||||
const cc = self.fn_type.fnCallingConvention(mod);
|
||||
if (cc != .Naked) {
|
||||
try self.asmRegister(.{ ._, .push }, .rbp);
|
||||
const backpatch_push_callee_preserved_regs = try self.asmPlaceholder();
|
||||
|
|
@ -8042,7 +8044,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
|
|||
else => unreachable,
|
||||
};
|
||||
|
||||
var info = try self.resolveCallingConventionValues(fn_ty, args[fn_ty.fnParamLen()..], .call_frame);
|
||||
const fn_info = mod.typeToFunc(fn_ty).?;
|
||||
|
||||
var info = try self.resolveCallingConventionValues(fn_info, args[fn_info.param_types.len..], .call_frame);
|
||||
defer info.deinit(self);
|
||||
|
||||
// We need a properly aligned and sized call frame to be able to call this function.
|
||||
|
|
@ -8083,7 +8087,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
|
|||
const ret_lock = switch (info.return_value.long) {
|
||||
.none, .unreach => null,
|
||||
.indirect => |reg_off| lock: {
|
||||
const ret_ty = fn_ty.fnReturnType();
|
||||
const ret_ty = fn_info.return_type.toType();
|
||||
const frame_index = try self.allocFrameIndex(FrameAlloc.initType(ret_ty, mod));
|
||||
try self.genSetReg(reg_off.reg, Type.usize, .{
|
||||
.lea_frame = .{ .index = frame_index, .off = -reg_off.off },
|
||||
|
|
@ -8199,9 +8203,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
|
|||
}
|
||||
|
||||
fn airRet(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const un_op = self.air.instructions.items(.data)[inst].un_op;
|
||||
const operand = try self.resolveInst(un_op);
|
||||
const ret_ty = self.fn_type.fnReturnType();
|
||||
const ret_ty = self.fn_type.fnReturnType(mod);
|
||||
switch (self.ret_mcv.short) {
|
||||
.none => {},
|
||||
.register => try self.genCopy(ret_ty, self.ret_mcv.short, operand),
|
||||
|
|
@ -11683,18 +11688,23 @@ const CallMCValues = struct {
|
|||
/// Caller must call `CallMCValues.deinit`.
|
||||
fn resolveCallingConventionValues(
|
||||
self: *Self,
|
||||
fn_ty: Type,
|
||||
fn_info: InternPool.Key.FuncType,
|
||||
var_args: []const Air.Inst.Ref,
|
||||
stack_frame_base: FrameIndex,
|
||||
) !CallMCValues {
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const cc = fn_ty.fnCallingConvention();
|
||||
const param_len = fn_ty.fnParamLen();
|
||||
const param_types = try self.gpa.alloc(Type, param_len + var_args.len);
|
||||
const cc = fn_info.cc;
|
||||
const param_types = try self.gpa.alloc(Type, fn_info.param_types.len + var_args.len);
|
||||
defer self.gpa.free(param_types);
|
||||
fn_ty.fnParamTypes(param_types);
|
||||
|
||||
for (param_types[0..fn_info.param_types.len], fn_info.param_types) |*dest, src| {
|
||||
dest.* = src.toType();
|
||||
}
|
||||
// TODO: promote var arg types
|
||||
for (param_types[param_len..], var_args) |*param_ty, arg| param_ty.* = self.typeOf(arg);
|
||||
for (param_types[fn_info.param_types.len..], var_args) |*param_ty, arg| {
|
||||
param_ty.* = self.typeOf(arg);
|
||||
}
|
||||
|
||||
var result: CallMCValues = .{
|
||||
.args = try self.gpa.alloc(MCValue, param_types.len),
|
||||
// These undefined values must be populated before returning from this function.
|
||||
|
|
@ -11704,7 +11714,7 @@ fn resolveCallingConventionValues(
|
|||
};
|
||||
errdefer self.gpa.free(result.args);
|
||||
|
||||
const ret_ty = fn_ty.fnReturnType();
|
||||
const ret_ty = fn_info.return_type.toType();
|
||||
|
||||
switch (cc) {
|
||||
.Naked => {
|
||||
|
|
|
|||
|
|
@ -1081,7 +1081,7 @@ fn genDeclRef(
|
|||
|
||||
// TODO this feels clunky. Perhaps we should check for it in `genTypedValue`?
|
||||
if (tv.ty.castPtrToFn(mod)) |fn_ty| {
|
||||
if (fn_ty.fnInfo().is_generic) {
|
||||
if (mod.typeToFunc(fn_ty).?.is_generic) {
|
||||
return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(mod) });
|
||||
}
|
||||
} else if (tv.ty.zigTypeTag(mod) == .Pointer) {
|
||||
|
|
|
|||
|
|
@ -1507,7 +1507,7 @@ pub const DeclGen = struct {
|
|||
const fn_decl = mod.declPtr(fn_decl_index);
|
||||
const fn_cty_idx = try dg.typeToIndex(fn_decl.ty, kind);
|
||||
|
||||
const fn_info = fn_decl.ty.fnInfo();
|
||||
const fn_info = mod.typeToFunc(fn_decl.ty).?;
|
||||
if (fn_info.cc == .Naked) {
|
||||
switch (kind) {
|
||||
.forward => try w.writeAll("zig_naked_decl "),
|
||||
|
|
@ -1517,7 +1517,7 @@ pub const DeclGen = struct {
|
|||
}
|
||||
if (fn_decl.val.castTag(.function)) |func_payload|
|
||||
if (func_payload.data.is_cold) try w.writeAll("zig_cold ");
|
||||
if (fn_info.return_type.ip_index == .noreturn_type) try w.writeAll("zig_noreturn ");
|
||||
if (fn_info.return_type == .noreturn_type) try w.writeAll("zig_noreturn ");
|
||||
|
||||
const trailing = try renderTypePrefix(
|
||||
dg.decl_index,
|
||||
|
|
@ -3455,7 +3455,7 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue {
|
|||
} else {
|
||||
try reap(f, inst, &.{un_op});
|
||||
// Not even allowed to return void in a naked function.
|
||||
if (if (f.object.dg.decl) |decl| decl.ty.fnCallingConvention() != .Naked else true)
|
||||
if (if (f.object.dg.decl) |decl| decl.ty.fnCallingConvention(mod) != .Naked else true)
|
||||
try writer.writeAll("return;\n");
|
||||
}
|
||||
return .none;
|
||||
|
|
@ -4094,7 +4094,7 @@ fn airCall(
|
|||
) !CValue {
|
||||
const mod = f.object.dg.module;
|
||||
// Not even allowed to call panic in a naked function.
|
||||
if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention() == .Naked) return .none;
|
||||
if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention(mod) == .Naked) return .none;
|
||||
|
||||
const gpa = f.object.dg.gpa;
|
||||
const writer = f.object.writer();
|
||||
|
|
@ -4143,7 +4143,7 @@ fn airCall(
|
|||
else => unreachable,
|
||||
};
|
||||
|
||||
const ret_ty = fn_ty.fnReturnType();
|
||||
const ret_ty = fn_ty.fnReturnType(mod);
|
||||
const lowered_ret_ty = try lowerFnRetTy(ret_ty, mod);
|
||||
|
||||
const result_local = result: {
|
||||
|
|
@ -4622,8 +4622,9 @@ fn airFence(f: *Function, inst: Air.Inst.Index) !CValue {
|
|||
}
|
||||
|
||||
fn airUnreach(f: *Function) !CValue {
|
||||
const mod = f.object.dg.module;
|
||||
// Not even allowed to call unreachable in a naked function.
|
||||
if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention() == .Naked) return .none;
|
||||
if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention(mod) == .Naked) return .none;
|
||||
|
||||
try f.object.writer().writeAll("zig_unreachable();\n");
|
||||
return .none;
|
||||
|
|
|
|||
|
|
@ -1720,7 +1720,7 @@ pub const CType = extern union {
|
|||
.Opaque => self.init(.void),
|
||||
|
||||
.Fn => {
|
||||
const info = ty.fnInfo();
|
||||
const info = mod.typeToFunc(ty).?;
|
||||
if (!info.is_generic) {
|
||||
if (lookup.isMutable()) {
|
||||
const param_kind: Kind = switch (kind) {
|
||||
|
|
@ -1728,10 +1728,10 @@ pub const CType = extern union {
|
|||
.complete, .parameter, .global => .parameter,
|
||||
.payload => unreachable,
|
||||
};
|
||||
_ = try lookup.typeToIndex(info.return_type, param_kind);
|
||||
_ = try lookup.typeToIndex(info.return_type.toType(), param_kind);
|
||||
for (info.param_types) |param_type| {
|
||||
if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue;
|
||||
_ = try lookup.typeToIndex(param_type, param_kind);
|
||||
if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
|
||||
_ = try lookup.typeToIndex(param_type.toType(), param_kind);
|
||||
}
|
||||
}
|
||||
self.init(if (info.is_var_args) .varargs_function else .function);
|
||||
|
|
@ -2013,7 +2013,7 @@ pub const CType = extern union {
|
|||
.function,
|
||||
.varargs_function,
|
||||
=> {
|
||||
const info = ty.fnInfo();
|
||||
const info = mod.typeToFunc(ty).?;
|
||||
assert(!info.is_generic);
|
||||
const param_kind: Kind = switch (kind) {
|
||||
.forward, .forward_parameter => .forward_parameter,
|
||||
|
|
@ -2023,21 +2023,21 @@ pub const CType = extern union {
|
|||
|
||||
var c_params_len: usize = 0;
|
||||
for (info.param_types) |param_type| {
|
||||
if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue;
|
||||
if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
|
||||
c_params_len += 1;
|
||||
}
|
||||
|
||||
const params_pl = try arena.alloc(Index, c_params_len);
|
||||
var c_param_i: usize = 0;
|
||||
for (info.param_types) |param_type| {
|
||||
if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue;
|
||||
params_pl[c_param_i] = store.set.typeToIndex(param_type, mod, param_kind).?;
|
||||
if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
|
||||
params_pl[c_param_i] = store.set.typeToIndex(param_type.toType(), mod, param_kind).?;
|
||||
c_param_i += 1;
|
||||
}
|
||||
|
||||
const fn_pl = try arena.create(Payload.Function);
|
||||
fn_pl.* = .{ .base = .{ .tag = t }, .data = .{
|
||||
.return_type = store.set.typeToIndex(info.return_type, mod, param_kind).?,
|
||||
.return_type = store.set.typeToIndex(info.return_type.toType(), mod, param_kind).?,
|
||||
.param_types = params_pl,
|
||||
} };
|
||||
return initPayload(fn_pl);
|
||||
|
|
@ -2145,7 +2145,7 @@ pub const CType = extern union {
|
|||
=> {
|
||||
if (ty.zigTypeTag(mod) != .Fn) return false;
|
||||
|
||||
const info = ty.fnInfo();
|
||||
const info = mod.typeToFunc(ty).?;
|
||||
assert(!info.is_generic);
|
||||
const data = cty.cast(Payload.Function).?.data;
|
||||
const param_kind: Kind = switch (self.kind) {
|
||||
|
|
@ -2154,18 +2154,18 @@ pub const CType = extern union {
|
|||
.payload => unreachable,
|
||||
};
|
||||
|
||||
if (!self.eqlRecurse(info.return_type, data.return_type, param_kind))
|
||||
if (!self.eqlRecurse(info.return_type.toType(), data.return_type, param_kind))
|
||||
return false;
|
||||
|
||||
var c_param_i: usize = 0;
|
||||
for (info.param_types) |param_type| {
|
||||
if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue;
|
||||
if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
|
||||
|
||||
if (c_param_i >= data.param_types.len) return false;
|
||||
const param_cty = data.param_types[c_param_i];
|
||||
c_param_i += 1;
|
||||
|
||||
if (!self.eqlRecurse(param_type, param_cty, param_kind))
|
||||
if (!self.eqlRecurse(param_type.toType(), param_cty, param_kind))
|
||||
return false;
|
||||
}
|
||||
return c_param_i == data.param_types.len;
|
||||
|
|
@ -2258,7 +2258,7 @@ pub const CType = extern union {
|
|||
.function,
|
||||
.varargs_function,
|
||||
=> {
|
||||
const info = ty.fnInfo();
|
||||
const info = mod.typeToFunc(ty).?;
|
||||
assert(!info.is_generic);
|
||||
const param_kind: Kind = switch (self.kind) {
|
||||
.forward, .forward_parameter => .forward_parameter,
|
||||
|
|
@ -2266,10 +2266,10 @@ pub const CType = extern union {
|
|||
.payload => unreachable,
|
||||
};
|
||||
|
||||
self.updateHasherRecurse(hasher, info.return_type, param_kind);
|
||||
self.updateHasherRecurse(hasher, info.return_type.toType(), param_kind);
|
||||
for (info.param_types) |param_type| {
|
||||
if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue;
|
||||
self.updateHasherRecurse(hasher, param_type, param_kind);
|
||||
if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
|
||||
self.updateHasherRecurse(hasher, param_type.toType(), param_kind);
|
||||
}
|
||||
},
|
||||
|
||||
|
|
|
|||
|
|
@ -954,17 +954,17 @@ pub const Object = struct {
|
|||
builder.positionBuilderAtEnd(entry_block);
|
||||
|
||||
// This gets the LLVM values from the function and stores them in `dg.args`.
|
||||
const fn_info = decl.ty.fnInfo();
|
||||
const fn_info = mod.typeToFunc(decl.ty).?;
|
||||
const sret = firstParamSRet(fn_info, mod);
|
||||
const ret_ptr = if (sret) llvm_func.getParam(0) else null;
|
||||
const gpa = dg.gpa;
|
||||
|
||||
if (ccAbiPromoteInt(fn_info.cc, mod, fn_info.return_type)) |s| switch (s) {
|
||||
if (ccAbiPromoteInt(fn_info.cc, mod, fn_info.return_type.toType())) |s| switch (s) {
|
||||
.signed => dg.addAttr(llvm_func, 0, "signext"),
|
||||
.unsigned => dg.addAttr(llvm_func, 0, "zeroext"),
|
||||
};
|
||||
|
||||
const err_return_tracing = fn_info.return_type.isError(mod) and
|
||||
const err_return_tracing = fn_info.return_type.toType().isError(mod) and
|
||||
mod.comp.bin_file.options.error_return_tracing;
|
||||
|
||||
const err_ret_trace = if (err_return_tracing)
|
||||
|
|
@ -986,7 +986,7 @@ pub const Object = struct {
|
|||
.byval => {
|
||||
assert(!it.byval_attr);
|
||||
const param_index = it.zig_index - 1;
|
||||
const param_ty = fn_info.param_types[param_index];
|
||||
const param_ty = fn_info.param_types[param_index].toType();
|
||||
const param = llvm_func.getParam(llvm_arg_i);
|
||||
try args.ensureUnusedCapacity(1);
|
||||
|
||||
|
|
@ -1005,7 +1005,7 @@ pub const Object = struct {
|
|||
llvm_arg_i += 1;
|
||||
},
|
||||
.byref => {
|
||||
const param_ty = fn_info.param_types[it.zig_index - 1];
|
||||
const param_ty = fn_info.param_types[it.zig_index - 1].toType();
|
||||
const param_llvm_ty = try dg.lowerType(param_ty);
|
||||
const param = llvm_func.getParam(llvm_arg_i);
|
||||
const alignment = param_ty.abiAlignment(mod);
|
||||
|
|
@ -1024,7 +1024,7 @@ pub const Object = struct {
|
|||
}
|
||||
},
|
||||
.byref_mut => {
|
||||
const param_ty = fn_info.param_types[it.zig_index - 1];
|
||||
const param_ty = fn_info.param_types[it.zig_index - 1].toType();
|
||||
const param_llvm_ty = try dg.lowerType(param_ty);
|
||||
const param = llvm_func.getParam(llvm_arg_i);
|
||||
const alignment = param_ty.abiAlignment(mod);
|
||||
|
|
@ -1044,7 +1044,7 @@ pub const Object = struct {
|
|||
},
|
||||
.abi_sized_int => {
|
||||
assert(!it.byval_attr);
|
||||
const param_ty = fn_info.param_types[it.zig_index - 1];
|
||||
const param_ty = fn_info.param_types[it.zig_index - 1].toType();
|
||||
const param = llvm_func.getParam(llvm_arg_i);
|
||||
llvm_arg_i += 1;
|
||||
|
||||
|
|
@ -1071,7 +1071,7 @@ pub const Object = struct {
|
|||
},
|
||||
.slice => {
|
||||
assert(!it.byval_attr);
|
||||
const param_ty = fn_info.param_types[it.zig_index - 1];
|
||||
const param_ty = fn_info.param_types[it.zig_index - 1].toType();
|
||||
const ptr_info = param_ty.ptrInfo(mod);
|
||||
|
||||
if (math.cast(u5, it.zig_index - 1)) |i| {
|
||||
|
|
@ -1104,7 +1104,7 @@ pub const Object = struct {
|
|||
.multiple_llvm_types => {
|
||||
assert(!it.byval_attr);
|
||||
const field_types = it.llvm_types_buffer[0..it.llvm_types_len];
|
||||
const param_ty = fn_info.param_types[it.zig_index - 1];
|
||||
const param_ty = fn_info.param_types[it.zig_index - 1].toType();
|
||||
const param_llvm_ty = try dg.lowerType(param_ty);
|
||||
const param_alignment = param_ty.abiAlignment(mod);
|
||||
const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, param_alignment, target);
|
||||
|
|
@ -1135,7 +1135,7 @@ pub const Object = struct {
|
|||
args.appendAssumeCapacity(casted);
|
||||
},
|
||||
.float_array => {
|
||||
const param_ty = fn_info.param_types[it.zig_index - 1];
|
||||
const param_ty = fn_info.param_types[it.zig_index - 1].toType();
|
||||
const param_llvm_ty = try dg.lowerType(param_ty);
|
||||
const param = llvm_func.getParam(llvm_arg_i);
|
||||
llvm_arg_i += 1;
|
||||
|
|
@ -1153,7 +1153,7 @@ pub const Object = struct {
|
|||
}
|
||||
},
|
||||
.i32_array, .i64_array => {
|
||||
const param_ty = fn_info.param_types[it.zig_index - 1];
|
||||
const param_ty = fn_info.param_types[it.zig_index - 1].toType();
|
||||
const param_llvm_ty = try dg.lowerType(param_ty);
|
||||
const param = llvm_func.getParam(llvm_arg_i);
|
||||
llvm_arg_i += 1;
|
||||
|
|
@ -1182,7 +1182,7 @@ pub const Object = struct {
|
|||
const line_number = decl.src_line + 1;
|
||||
const is_internal_linkage = decl.val.tag() != .extern_fn and
|
||||
!mod.decl_exports.contains(decl_index);
|
||||
const noret_bit: c_uint = if (fn_info.return_type.isNoReturn())
|
||||
const noret_bit: c_uint = if (fn_info.return_type == .noreturn_type)
|
||||
llvm.DIFlags.NoReturn
|
||||
else
|
||||
0;
|
||||
|
|
@ -2331,26 +2331,26 @@ pub const Object = struct {
|
|||
return full_di_ty;
|
||||
},
|
||||
.Fn => {
|
||||
const fn_info = ty.fnInfo();
|
||||
const fn_info = mod.typeToFunc(ty).?;
|
||||
|
||||
var param_di_types = std.ArrayList(*llvm.DIType).init(gpa);
|
||||
defer param_di_types.deinit();
|
||||
|
||||
// Return type goes first.
|
||||
if (fn_info.return_type.hasRuntimeBitsIgnoreComptime(mod)) {
|
||||
if (fn_info.return_type.toType().hasRuntimeBitsIgnoreComptime(mod)) {
|
||||
const sret = firstParamSRet(fn_info, mod);
|
||||
const di_ret_ty = if (sret) Type.void else fn_info.return_type;
|
||||
const di_ret_ty = if (sret) Type.void else fn_info.return_type.toType();
|
||||
try param_di_types.append(try o.lowerDebugType(di_ret_ty, .full));
|
||||
|
||||
if (sret) {
|
||||
const ptr_ty = try mod.singleMutPtrType(fn_info.return_type);
|
||||
const ptr_ty = try mod.singleMutPtrType(fn_info.return_type.toType());
|
||||
try param_di_types.append(try o.lowerDebugType(ptr_ty, .full));
|
||||
}
|
||||
} else {
|
||||
try param_di_types.append(try o.lowerDebugType(Type.void, .full));
|
||||
}
|
||||
|
||||
if (fn_info.return_type.isError(mod) and
|
||||
if (fn_info.return_type.toType().isError(mod) and
|
||||
o.module.comp.bin_file.options.error_return_tracing)
|
||||
{
|
||||
const ptr_ty = try mod.singleMutPtrType(o.getStackTraceType());
|
||||
|
|
@ -2358,13 +2358,13 @@ pub const Object = struct {
|
|||
}
|
||||
|
||||
for (fn_info.param_types) |param_ty| {
|
||||
if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
|
||||
if (!param_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
|
||||
|
||||
if (isByRef(param_ty, mod)) {
|
||||
const ptr_ty = try mod.singleMutPtrType(param_ty);
|
||||
if (isByRef(param_ty.toType(), mod)) {
|
||||
const ptr_ty = try mod.singleMutPtrType(param_ty.toType());
|
||||
try param_di_types.append(try o.lowerDebugType(ptr_ty, .full));
|
||||
} else {
|
||||
try param_di_types.append(try o.lowerDebugType(param_ty, .full));
|
||||
try param_di_types.append(try o.lowerDebugType(param_ty.toType(), .full));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2565,7 +2565,7 @@ pub const DeclGen = struct {
|
|||
if (gop.found_existing) return gop.value_ptr.*;
|
||||
|
||||
assert(decl.has_tv);
|
||||
const fn_info = zig_fn_type.fnInfo();
|
||||
const fn_info = mod.typeToFunc(zig_fn_type).?;
|
||||
const target = mod.getTarget();
|
||||
const sret = firstParamSRet(fn_info, mod);
|
||||
|
||||
|
|
@ -2598,11 +2598,11 @@ pub const DeclGen = struct {
|
|||
dg.addArgAttr(llvm_fn, 0, "nonnull"); // Sret pointers must not be address 0
|
||||
dg.addArgAttr(llvm_fn, 0, "noalias");
|
||||
|
||||
const raw_llvm_ret_ty = try dg.lowerType(fn_info.return_type);
|
||||
const raw_llvm_ret_ty = try dg.lowerType(fn_info.return_type.toType());
|
||||
llvm_fn.addSretAttr(raw_llvm_ret_ty);
|
||||
}
|
||||
|
||||
const err_return_tracing = fn_info.return_type.isError(mod) and
|
||||
const err_return_tracing = fn_info.return_type.toType().isError(mod) and
|
||||
mod.comp.bin_file.options.error_return_tracing;
|
||||
|
||||
if (err_return_tracing) {
|
||||
|
|
@ -2626,13 +2626,13 @@ pub const DeclGen = struct {
|
|||
}
|
||||
|
||||
if (fn_info.alignment != 0) {
|
||||
llvm_fn.setAlignment(fn_info.alignment);
|
||||
llvm_fn.setAlignment(@intCast(c_uint, fn_info.alignment));
|
||||
}
|
||||
|
||||
// Function attributes that are independent of analysis results of the function body.
|
||||
dg.addCommonFnAttributes(llvm_fn);
|
||||
|
||||
if (fn_info.return_type.isNoReturn()) {
|
||||
if (fn_info.return_type == .noreturn_type) {
|
||||
dg.addFnAttr(llvm_fn, "noreturn");
|
||||
}
|
||||
|
||||
|
|
@ -2645,15 +2645,15 @@ pub const DeclGen = struct {
|
|||
while (it.next()) |lowering| switch (lowering) {
|
||||
.byval => {
|
||||
const param_index = it.zig_index - 1;
|
||||
const param_ty = fn_info.param_types[param_index];
|
||||
const param_ty = fn_info.param_types[param_index].toType();
|
||||
if (!isByRef(param_ty, mod)) {
|
||||
dg.addByValParamAttrs(llvm_fn, param_ty, param_index, fn_info, it.llvm_index - 1);
|
||||
}
|
||||
},
|
||||
.byref => {
|
||||
const param_ty = fn_info.param_types[it.zig_index - 1];
|
||||
const param_llvm_ty = try dg.lowerType(param_ty);
|
||||
const alignment = param_ty.abiAlignment(mod);
|
||||
const param_llvm_ty = try dg.lowerType(param_ty.toType());
|
||||
const alignment = param_ty.toType().abiAlignment(mod);
|
||||
dg.addByRefParamAttrs(llvm_fn, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty);
|
||||
},
|
||||
.byref_mut => {
|
||||
|
|
@ -3142,7 +3142,7 @@ pub const DeclGen = struct {
|
|||
|
||||
fn lowerTypeFn(dg: *DeclGen, fn_ty: Type) Allocator.Error!*llvm.Type {
|
||||
const mod = dg.module;
|
||||
const fn_info = fn_ty.fnInfo();
|
||||
const fn_info = mod.typeToFunc(fn_ty).?;
|
||||
const llvm_ret_ty = try lowerFnRetTy(dg, fn_info);
|
||||
|
||||
var llvm_params = std.ArrayList(*llvm.Type).init(dg.gpa);
|
||||
|
|
@ -3152,7 +3152,7 @@ pub const DeclGen = struct {
|
|||
try llvm_params.append(dg.context.pointerType(0));
|
||||
}
|
||||
|
||||
if (fn_info.return_type.isError(mod) and
|
||||
if (fn_info.return_type.toType().isError(mod) and
|
||||
mod.comp.bin_file.options.error_return_tracing)
|
||||
{
|
||||
const ptr_ty = try mod.singleMutPtrType(dg.object.getStackTraceType());
|
||||
|
|
@ -3163,19 +3163,19 @@ pub const DeclGen = struct {
|
|||
while (it.next()) |lowering| switch (lowering) {
|
||||
.no_bits => continue,
|
||||
.byval => {
|
||||
const param_ty = fn_info.param_types[it.zig_index - 1];
|
||||
const param_ty = fn_info.param_types[it.zig_index - 1].toType();
|
||||
try llvm_params.append(try dg.lowerType(param_ty));
|
||||
},
|
||||
.byref, .byref_mut => {
|
||||
try llvm_params.append(dg.context.pointerType(0));
|
||||
},
|
||||
.abi_sized_int => {
|
||||
const param_ty = fn_info.param_types[it.zig_index - 1];
|
||||
const param_ty = fn_info.param_types[it.zig_index - 1].toType();
|
||||
const abi_size = @intCast(c_uint, param_ty.abiSize(mod));
|
||||
try llvm_params.append(dg.context.intType(abi_size * 8));
|
||||
},
|
||||
.slice => {
|
||||
const param_ty = fn_info.param_types[it.zig_index - 1];
|
||||
const param_ty = fn_info.param_types[it.zig_index - 1].toType();
|
||||
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
|
||||
const ptr_ty = if (param_ty.zigTypeTag(mod) == .Optional)
|
||||
param_ty.optionalChild(mod).slicePtrFieldType(&buf, mod)
|
||||
|
|
@ -3195,7 +3195,7 @@ pub const DeclGen = struct {
|
|||
try llvm_params.append(dg.context.intType(16));
|
||||
},
|
||||
.float_array => |count| {
|
||||
const param_ty = fn_info.param_types[it.zig_index - 1];
|
||||
const param_ty = fn_info.param_types[it.zig_index - 1].toType();
|
||||
const float_ty = try dg.lowerType(aarch64_c_abi.getFloatArrayType(param_ty, mod).?);
|
||||
const field_count = @intCast(c_uint, count);
|
||||
const arr_ty = float_ty.arrayType(field_count);
|
||||
|
|
@ -3223,7 +3223,7 @@ pub const DeclGen = struct {
|
|||
const mod = dg.module;
|
||||
const lower_elem_ty = switch (elem_ty.zigTypeTag(mod)) {
|
||||
.Opaque => true,
|
||||
.Fn => !elem_ty.fnInfo().is_generic,
|
||||
.Fn => !mod.typeToFunc(elem_ty).?.is_generic,
|
||||
.Array => elem_ty.childType(mod).hasRuntimeBitsIgnoreComptime(mod),
|
||||
else => elem_ty.hasRuntimeBitsIgnoreComptime(mod),
|
||||
};
|
||||
|
|
@ -4204,7 +4204,7 @@ pub const DeclGen = struct {
|
|||
|
||||
const is_fn_body = decl.ty.zigTypeTag(mod) == .Fn;
|
||||
if ((!is_fn_body and !decl.ty.hasRuntimeBits(mod)) or
|
||||
(is_fn_body and decl.ty.fnInfo().is_generic))
|
||||
(is_fn_body and mod.typeToFunc(decl.ty).?.is_generic))
|
||||
{
|
||||
return self.lowerPtrToVoid(tv.ty);
|
||||
}
|
||||
|
|
@ -4354,7 +4354,7 @@ pub const DeclGen = struct {
|
|||
llvm_fn: *llvm.Value,
|
||||
param_ty: Type,
|
||||
param_index: u32,
|
||||
fn_info: Type.Payload.Function.Data,
|
||||
fn_info: InternPool.Key.FuncType,
|
||||
llvm_arg_i: u32,
|
||||
) void {
|
||||
const mod = dg.module;
|
||||
|
|
@ -4774,8 +4774,8 @@ pub const FuncGen = struct {
|
|||
.Pointer => callee_ty.childType(mod),
|
||||
else => unreachable,
|
||||
};
|
||||
const fn_info = zig_fn_ty.fnInfo();
|
||||
const return_type = fn_info.return_type;
|
||||
const fn_info = mod.typeToFunc(zig_fn_ty).?;
|
||||
const return_type = fn_info.return_type.toType();
|
||||
const llvm_fn = try self.resolveInst(pl_op.operand);
|
||||
const target = mod.getTarget();
|
||||
const sret = firstParamSRet(fn_info, mod);
|
||||
|
|
@ -4790,7 +4790,7 @@ pub const FuncGen = struct {
|
|||
break :blk ret_ptr;
|
||||
};
|
||||
|
||||
const err_return_tracing = fn_info.return_type.isError(mod) and
|
||||
const err_return_tracing = return_type.isError(mod) and
|
||||
self.dg.module.comp.bin_file.options.error_return_tracing;
|
||||
if (err_return_tracing) {
|
||||
try llvm_args.append(self.err_ret_trace.?);
|
||||
|
|
@ -4971,14 +4971,14 @@ pub const FuncGen = struct {
|
|||
while (it.next()) |lowering| switch (lowering) {
|
||||
.byval => {
|
||||
const param_index = it.zig_index - 1;
|
||||
const param_ty = fn_info.param_types[param_index];
|
||||
const param_ty = fn_info.param_types[param_index].toType();
|
||||
if (!isByRef(param_ty, mod)) {
|
||||
self.dg.addByValParamAttrs(call, param_ty, param_index, fn_info, it.llvm_index - 1);
|
||||
}
|
||||
},
|
||||
.byref => {
|
||||
const param_index = it.zig_index - 1;
|
||||
const param_ty = fn_info.param_types[param_index];
|
||||
const param_ty = fn_info.param_types[param_index].toType();
|
||||
const param_llvm_ty = try self.dg.lowerType(param_ty);
|
||||
const alignment = param_ty.abiAlignment(mod);
|
||||
self.dg.addByRefParamAttrs(call, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty);
|
||||
|
|
@ -4998,7 +4998,7 @@ pub const FuncGen = struct {
|
|||
|
||||
.slice => {
|
||||
assert(!it.byval_attr);
|
||||
const param_ty = fn_info.param_types[it.zig_index - 1];
|
||||
const param_ty = fn_info.param_types[it.zig_index - 1].toType();
|
||||
const ptr_info = param_ty.ptrInfo(mod);
|
||||
const llvm_arg_i = it.llvm_index - 2;
|
||||
|
||||
|
|
@ -5023,7 +5023,7 @@ pub const FuncGen = struct {
|
|||
};
|
||||
}
|
||||
|
||||
if (return_type.isNoReturn() and attr != .AlwaysTail) {
|
||||
if (fn_info.return_type == .noreturn_type and attr != .AlwaysTail) {
|
||||
return null;
|
||||
}
|
||||
|
||||
|
|
@ -5088,9 +5088,9 @@ pub const FuncGen = struct {
|
|||
_ = self.builder.buildRetVoid();
|
||||
return null;
|
||||
}
|
||||
const fn_info = self.dg.decl.ty.fnInfo();
|
||||
const fn_info = mod.typeToFunc(self.dg.decl.ty).?;
|
||||
if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
|
||||
if (fn_info.return_type.isError(mod)) {
|
||||
if (fn_info.return_type.toType().isError(mod)) {
|
||||
// Functions with an empty error set are emitted with an error code
|
||||
// return type and return zero so they can be function pointers coerced
|
||||
// to functions that return anyerror.
|
||||
|
|
@ -5135,9 +5135,9 @@ pub const FuncGen = struct {
|
|||
const un_op = self.air.instructions.items(.data)[inst].un_op;
|
||||
const ptr_ty = self.typeOf(un_op);
|
||||
const ret_ty = ptr_ty.childType(mod);
|
||||
const fn_info = self.dg.decl.ty.fnInfo();
|
||||
const fn_info = mod.typeToFunc(self.dg.decl.ty).?;
|
||||
if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
|
||||
if (fn_info.return_type.isError(mod)) {
|
||||
if (fn_info.return_type.toType().isError(mod)) {
|
||||
// Functions with an empty error set are emitted with an error code
|
||||
// return type and return zero so they can be function pointers coerced
|
||||
// to functions that return anyerror.
|
||||
|
|
@ -6148,25 +6148,21 @@ pub const FuncGen = struct {
|
|||
defer self.gpa.free(fqn);
|
||||
|
||||
const is_internal_linkage = !mod.decl_exports.contains(decl_index);
|
||||
var fn_ty_pl: Type.Payload.Function = .{
|
||||
.base = .{ .tag = .function },
|
||||
.data = .{
|
||||
.param_types = &.{},
|
||||
.comptime_params = undefined,
|
||||
.return_type = Type.void,
|
||||
.alignment = 0,
|
||||
.noalias_bits = 0,
|
||||
.cc = .Unspecified,
|
||||
.is_var_args = false,
|
||||
.is_generic = false,
|
||||
.is_noinline = false,
|
||||
.align_is_generic = false,
|
||||
.cc_is_generic = false,
|
||||
.section_is_generic = false,
|
||||
.addrspace_is_generic = false,
|
||||
},
|
||||
};
|
||||
const fn_ty = Type.initPayload(&fn_ty_pl.base);
|
||||
const fn_ty = try mod.funcType(.{
|
||||
.param_types = &.{},
|
||||
.return_type = .void_type,
|
||||
.alignment = 0,
|
||||
.noalias_bits = 0,
|
||||
.comptime_bits = 0,
|
||||
.cc = .Unspecified,
|
||||
.is_var_args = false,
|
||||
.is_generic = false,
|
||||
.is_noinline = false,
|
||||
.align_is_generic = false,
|
||||
.cc_is_generic = false,
|
||||
.section_is_generic = false,
|
||||
.addrspace_is_generic = false,
|
||||
});
|
||||
const subprogram = dib.createFunction(
|
||||
di_file.toScope(),
|
||||
decl.name,
|
||||
|
|
@ -10546,31 +10542,31 @@ fn llvmField(ty: Type, field_index: usize, mod: *Module) ?LlvmField {
|
|||
}
|
||||
}
|
||||
|
||||
fn firstParamSRet(fn_info: Type.Payload.Function.Data, mod: *Module) bool {
|
||||
if (!fn_info.return_type.hasRuntimeBitsIgnoreComptime(mod)) return false;
|
||||
fn firstParamSRet(fn_info: InternPool.Key.FuncType, mod: *Module) bool {
|
||||
if (!fn_info.return_type.toType().hasRuntimeBitsIgnoreComptime(mod)) return false;
|
||||
|
||||
const target = mod.getTarget();
|
||||
switch (fn_info.cc) {
|
||||
.Unspecified, .Inline => return isByRef(fn_info.return_type, mod),
|
||||
.Unspecified, .Inline => return isByRef(fn_info.return_type.toType(), mod),
|
||||
.C => switch (target.cpu.arch) {
|
||||
.mips, .mipsel => return false,
|
||||
.x86_64 => switch (target.os.tag) {
|
||||
.windows => return x86_64_abi.classifyWindows(fn_info.return_type, mod) == .memory,
|
||||
else => return firstParamSRetSystemV(fn_info.return_type, mod),
|
||||
.windows => return x86_64_abi.classifyWindows(fn_info.return_type.toType(), mod) == .memory,
|
||||
else => return firstParamSRetSystemV(fn_info.return_type.toType(), mod),
|
||||
},
|
||||
.wasm32 => return wasm_c_abi.classifyType(fn_info.return_type, mod)[0] == .indirect,
|
||||
.aarch64, .aarch64_be => return aarch64_c_abi.classifyType(fn_info.return_type, mod) == .memory,
|
||||
.arm, .armeb => switch (arm_c_abi.classifyType(fn_info.return_type, mod, .ret)) {
|
||||
.wasm32 => return wasm_c_abi.classifyType(fn_info.return_type.toType(), mod)[0] == .indirect,
|
||||
.aarch64, .aarch64_be => return aarch64_c_abi.classifyType(fn_info.return_type.toType(), mod) == .memory,
|
||||
.arm, .armeb => switch (arm_c_abi.classifyType(fn_info.return_type.toType(), mod, .ret)) {
|
||||
.memory, .i64_array => return true,
|
||||
.i32_array => |size| return size != 1,
|
||||
.byval => return false,
|
||||
},
|
||||
.riscv32, .riscv64 => return riscv_c_abi.classifyType(fn_info.return_type, mod) == .memory,
|
||||
.riscv32, .riscv64 => return riscv_c_abi.classifyType(fn_info.return_type.toType(), mod) == .memory,
|
||||
else => return false, // TODO investigate C ABI for other architectures
|
||||
},
|
||||
.SysV => return firstParamSRetSystemV(fn_info.return_type, mod),
|
||||
.Win64 => return x86_64_abi.classifyWindows(fn_info.return_type, mod) == .memory,
|
||||
.Stdcall => return !isScalar(mod, fn_info.return_type),
|
||||
.SysV => return firstParamSRetSystemV(fn_info.return_type.toType(), mod),
|
||||
.Win64 => return x86_64_abi.classifyWindows(fn_info.return_type.toType(), mod) == .memory,
|
||||
.Stdcall => return !isScalar(mod, fn_info.return_type.toType()),
|
||||
else => return false,
|
||||
}
|
||||
}
|
||||
|
|
@ -10585,13 +10581,14 @@ fn firstParamSRetSystemV(ty: Type, mod: *Module) bool {
|
|||
/// In order to support the C calling convention, some return types need to be lowered
|
||||
/// completely differently in the function prototype to honor the C ABI, and then
|
||||
/// be effectively bitcasted to the actual return type.
|
||||
fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type {
|
||||
fn lowerFnRetTy(dg: *DeclGen, fn_info: InternPool.Key.FuncType) !*llvm.Type {
|
||||
const mod = dg.module;
|
||||
if (!fn_info.return_type.hasRuntimeBitsIgnoreComptime(mod)) {
|
||||
const return_type = fn_info.return_type.toType();
|
||||
if (!return_type.hasRuntimeBitsIgnoreComptime(mod)) {
|
||||
// If the return type is an error set or an error union, then we make this
|
||||
// anyerror return type instead, so that it can be coerced into a function
|
||||
// pointer type which has anyerror as the return type.
|
||||
if (fn_info.return_type.isError(mod)) {
|
||||
if (return_type.isError(mod)) {
|
||||
return dg.lowerType(Type.anyerror);
|
||||
} else {
|
||||
return dg.context.voidType();
|
||||
|
|
@ -10600,61 +10597,61 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type {
|
|||
const target = mod.getTarget();
|
||||
switch (fn_info.cc) {
|
||||
.Unspecified, .Inline => {
|
||||
if (isByRef(fn_info.return_type, mod)) {
|
||||
if (isByRef(return_type, mod)) {
|
||||
return dg.context.voidType();
|
||||
} else {
|
||||
return dg.lowerType(fn_info.return_type);
|
||||
return dg.lowerType(return_type);
|
||||
}
|
||||
},
|
||||
.C => {
|
||||
switch (target.cpu.arch) {
|
||||
.mips, .mipsel => return dg.lowerType(fn_info.return_type),
|
||||
.mips, .mipsel => return dg.lowerType(return_type),
|
||||
.x86_64 => switch (target.os.tag) {
|
||||
.windows => return lowerWin64FnRetTy(dg, fn_info),
|
||||
else => return lowerSystemVFnRetTy(dg, fn_info),
|
||||
},
|
||||
.wasm32 => {
|
||||
if (isScalar(mod, fn_info.return_type)) {
|
||||
return dg.lowerType(fn_info.return_type);
|
||||
if (isScalar(mod, return_type)) {
|
||||
return dg.lowerType(return_type);
|
||||
}
|
||||
const classes = wasm_c_abi.classifyType(fn_info.return_type, mod);
|
||||
const classes = wasm_c_abi.classifyType(return_type, mod);
|
||||
if (classes[0] == .indirect or classes[0] == .none) {
|
||||
return dg.context.voidType();
|
||||
}
|
||||
|
||||
assert(classes[0] == .direct and classes[1] == .none);
|
||||
const scalar_type = wasm_c_abi.scalarType(fn_info.return_type, mod);
|
||||
const scalar_type = wasm_c_abi.scalarType(return_type, mod);
|
||||
const abi_size = scalar_type.abiSize(mod);
|
||||
return dg.context.intType(@intCast(c_uint, abi_size * 8));
|
||||
},
|
||||
.aarch64, .aarch64_be => {
|
||||
switch (aarch64_c_abi.classifyType(fn_info.return_type, mod)) {
|
||||
switch (aarch64_c_abi.classifyType(return_type, mod)) {
|
||||
.memory => return dg.context.voidType(),
|
||||
.float_array => return dg.lowerType(fn_info.return_type),
|
||||
.byval => return dg.lowerType(fn_info.return_type),
|
||||
.float_array => return dg.lowerType(return_type),
|
||||
.byval => return dg.lowerType(return_type),
|
||||
.integer => {
|
||||
const bit_size = fn_info.return_type.bitSize(mod);
|
||||
const bit_size = return_type.bitSize(mod);
|
||||
return dg.context.intType(@intCast(c_uint, bit_size));
|
||||
},
|
||||
.double_integer => return dg.context.intType(64).arrayType(2),
|
||||
}
|
||||
},
|
||||
.arm, .armeb => {
|
||||
switch (arm_c_abi.classifyType(fn_info.return_type, mod, .ret)) {
|
||||
switch (arm_c_abi.classifyType(return_type, mod, .ret)) {
|
||||
.memory, .i64_array => return dg.context.voidType(),
|
||||
.i32_array => |len| if (len == 1) {
|
||||
return dg.context.intType(32);
|
||||
} else {
|
||||
return dg.context.voidType();
|
||||
},
|
||||
.byval => return dg.lowerType(fn_info.return_type),
|
||||
.byval => return dg.lowerType(return_type),
|
||||
}
|
||||
},
|
||||
.riscv32, .riscv64 => {
|
||||
switch (riscv_c_abi.classifyType(fn_info.return_type, mod)) {
|
||||
switch (riscv_c_abi.classifyType(return_type, mod)) {
|
||||
.memory => return dg.context.voidType(),
|
||||
.integer => {
|
||||
const bit_size = fn_info.return_type.bitSize(mod);
|
||||
const bit_size = return_type.bitSize(mod);
|
||||
return dg.context.intType(@intCast(c_uint, bit_size));
|
||||
},
|
||||
.double_integer => {
|
||||
|
|
@ -10664,50 +10661,52 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type {
|
|||
};
|
||||
return dg.context.structType(&llvm_types_buffer, 2, .False);
|
||||
},
|
||||
.byval => return dg.lowerType(fn_info.return_type),
|
||||
.byval => return dg.lowerType(return_type),
|
||||
}
|
||||
},
|
||||
// TODO investigate C ABI for other architectures
|
||||
else => return dg.lowerType(fn_info.return_type),
|
||||
else => return dg.lowerType(return_type),
|
||||
}
|
||||
},
|
||||
.Win64 => return lowerWin64FnRetTy(dg, fn_info),
|
||||
.SysV => return lowerSystemVFnRetTy(dg, fn_info),
|
||||
.Stdcall => {
|
||||
if (isScalar(mod, fn_info.return_type)) {
|
||||
return dg.lowerType(fn_info.return_type);
|
||||
if (isScalar(mod, return_type)) {
|
||||
return dg.lowerType(return_type);
|
||||
} else {
|
||||
return dg.context.voidType();
|
||||
}
|
||||
},
|
||||
else => return dg.lowerType(fn_info.return_type),
|
||||
else => return dg.lowerType(return_type),
|
||||
}
|
||||
}
|
||||
|
||||
fn lowerWin64FnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type {
|
||||
fn lowerWin64FnRetTy(dg: *DeclGen, fn_info: InternPool.Key.FuncType) !*llvm.Type {
|
||||
const mod = dg.module;
|
||||
switch (x86_64_abi.classifyWindows(fn_info.return_type, mod)) {
|
||||
const return_type = fn_info.return_type.toType();
|
||||
switch (x86_64_abi.classifyWindows(return_type, mod)) {
|
||||
.integer => {
|
||||
if (isScalar(mod, fn_info.return_type)) {
|
||||
return dg.lowerType(fn_info.return_type);
|
||||
if (isScalar(mod, return_type)) {
|
||||
return dg.lowerType(return_type);
|
||||
} else {
|
||||
const abi_size = fn_info.return_type.abiSize(mod);
|
||||
const abi_size = return_type.abiSize(mod);
|
||||
return dg.context.intType(@intCast(c_uint, abi_size * 8));
|
||||
}
|
||||
},
|
||||
.win_i128 => return dg.context.intType(64).vectorType(2),
|
||||
.memory => return dg.context.voidType(),
|
||||
.sse => return dg.lowerType(fn_info.return_type),
|
||||
.sse => return dg.lowerType(return_type),
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type {
|
||||
fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: InternPool.Key.FuncType) !*llvm.Type {
|
||||
const mod = dg.module;
|
||||
if (isScalar(mod, fn_info.return_type)) {
|
||||
return dg.lowerType(fn_info.return_type);
|
||||
const return_type = fn_info.return_type.toType();
|
||||
if (isScalar(mod, return_type)) {
|
||||
return dg.lowerType(return_type);
|
||||
}
|
||||
const classes = x86_64_abi.classifySystemV(fn_info.return_type, mod, .ret);
|
||||
const classes = x86_64_abi.classifySystemV(return_type, mod, .ret);
|
||||
if (classes[0] == .memory) {
|
||||
return dg.context.voidType();
|
||||
}
|
||||
|
|
@ -10748,7 +10747,7 @@ fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm
|
|||
}
|
||||
}
|
||||
if (classes[0] == .integer and classes[1] == .none) {
|
||||
const abi_size = fn_info.return_type.abiSize(mod);
|
||||
const abi_size = return_type.abiSize(mod);
|
||||
return dg.context.intType(@intCast(c_uint, abi_size * 8));
|
||||
}
|
||||
return dg.context.structType(&llvm_types_buffer, llvm_types_index, .False);
|
||||
|
|
@ -10756,7 +10755,7 @@ fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm
|
|||
|
||||
const ParamTypeIterator = struct {
|
||||
dg: *DeclGen,
|
||||
fn_info: Type.Payload.Function.Data,
|
||||
fn_info: InternPool.Key.FuncType,
|
||||
zig_index: u32,
|
||||
llvm_index: u32,
|
||||
llvm_types_len: u32,
|
||||
|
|
@ -10781,7 +10780,7 @@ const ParamTypeIterator = struct {
|
|||
if (it.zig_index >= it.fn_info.param_types.len) return null;
|
||||
const ty = it.fn_info.param_types[it.zig_index];
|
||||
it.byval_attr = false;
|
||||
return nextInner(it, ty);
|
||||
return nextInner(it, ty.toType());
|
||||
}
|
||||
|
||||
/// `airCall` uses this instead of `next` so that it can take into account variadic functions.
|
||||
|
|
@ -10793,7 +10792,7 @@ const ParamTypeIterator = struct {
|
|||
return nextInner(it, fg.typeOf(args[it.zig_index]));
|
||||
}
|
||||
} else {
|
||||
return nextInner(it, it.fn_info.param_types[it.zig_index]);
|
||||
return nextInner(it, it.fn_info.param_types[it.zig_index].toType());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -11009,7 +11008,7 @@ const ParamTypeIterator = struct {
|
|||
}
|
||||
};
|
||||
|
||||
fn iterateParamTypes(dg: *DeclGen, fn_info: Type.Payload.Function.Data) ParamTypeIterator {
|
||||
fn iterateParamTypes(dg: *DeclGen, fn_info: InternPool.Key.FuncType) ParamTypeIterator {
|
||||
return .{
|
||||
.dg = dg,
|
||||
.fn_info = fn_info,
|
||||
|
|
|
|||
|
|
@ -1227,8 +1227,9 @@ pub const DeclGen = struct {
|
|||
},
|
||||
.Fn => switch (repr) {
|
||||
.direct => {
|
||||
const fn_info = mod.typeToFunc(ty).?;
|
||||
// TODO: Put this somewhere in Sema.zig
|
||||
if (ty.fnIsVarArgs())
|
||||
if (fn_info.is_var_args)
|
||||
return self.fail("VarArgs functions are unsupported for SPIR-V", .{});
|
||||
|
||||
const param_ty_refs = try self.gpa.alloc(CacheRef, ty.fnParamLen());
|
||||
|
|
@ -1546,18 +1547,17 @@ pub const DeclGen = struct {
|
|||
assert(decl.ty.zigTypeTag(mod) == .Fn);
|
||||
const prototype_id = try self.resolveTypeId(decl.ty);
|
||||
try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{
|
||||
.id_result_type = try self.resolveTypeId(decl.ty.fnReturnType()),
|
||||
.id_result_type = try self.resolveTypeId(decl.ty.fnReturnType(mod)),
|
||||
.id_result = decl_id,
|
||||
.function_control = .{}, // TODO: We can set inline here if the type requires it.
|
||||
.function_type = prototype_id,
|
||||
});
|
||||
|
||||
const params = decl.ty.fnParamLen();
|
||||
var i: usize = 0;
|
||||
const fn_info = mod.typeToFunc(decl.ty).?;
|
||||
|
||||
try self.args.ensureUnusedCapacity(self.gpa, params);
|
||||
while (i < params) : (i += 1) {
|
||||
const param_type_id = try self.resolveTypeId(decl.ty.fnParamType(i));
|
||||
try self.args.ensureUnusedCapacity(self.gpa, fn_info.param_types.len);
|
||||
for (fn_info.param_types) |param_type| {
|
||||
const param_type_id = try self.resolveTypeId(param_type.toType());
|
||||
const arg_result_id = self.spv.allocId();
|
||||
try self.func.prologue.emit(self.spv.gpa, .OpFunctionParameter, .{
|
||||
.id_result_type = param_type_id,
|
||||
|
|
@ -3338,10 +3338,10 @@ pub const DeclGen = struct {
|
|||
.Pointer => return self.fail("cannot call function pointers", .{}),
|
||||
else => unreachable,
|
||||
};
|
||||
const fn_info = zig_fn_ty.fnInfo();
|
||||
const fn_info = mod.typeToFunc(zig_fn_ty).?;
|
||||
const return_type = fn_info.return_type;
|
||||
|
||||
const result_type_id = try self.resolveTypeId(return_type);
|
||||
const result_type_id = try self.resolveTypeId(return_type.toType());
|
||||
const result_id = self.spv.allocId();
|
||||
const callee_id = try self.resolve(pl_op.operand);
|
||||
|
||||
|
|
@ -3368,11 +3368,11 @@ pub const DeclGen = struct {
|
|||
.id_ref_3 = params[0..n_params],
|
||||
});
|
||||
|
||||
if (return_type.isNoReturn()) {
|
||||
if (return_type == .noreturn_type) {
|
||||
try self.func.body.emit(self.spv.gpa, .OpUnreachable, {});
|
||||
}
|
||||
|
||||
if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime(mod)) {
|
||||
if (self.liveness.isUnused(inst) or !return_type.toType().hasRuntimeBitsIgnoreComptime(mod)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1430,7 +1430,7 @@ pub fn updateDeclExports(
|
|||
.x86 => std.builtin.CallingConvention.Stdcall,
|
||||
else => std.builtin.CallingConvention.C,
|
||||
};
|
||||
const decl_cc = exported_decl.ty.fnCallingConvention();
|
||||
const decl_cc = exported_decl.ty.fnCallingConvention(mod);
|
||||
if (decl_cc == .C and mem.eql(u8, exp.options.name, "main") and
|
||||
self.base.options.link_libc)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -1022,7 +1022,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index)
|
|||
const decl_name_with_null = decl_name[0 .. decl_name.len + 1];
|
||||
try dbg_info_buffer.ensureUnusedCapacity(25 + decl_name_with_null.len);
|
||||
|
||||
const fn_ret_type = decl.ty.fnReturnType();
|
||||
const fn_ret_type = decl.ty.fnReturnType(mod);
|
||||
const fn_ret_has_bits = fn_ret_type.hasRuntimeBits(mod);
|
||||
if (fn_ret_has_bits) {
|
||||
dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.subprogram));
|
||||
|
|
|
|||
|
|
@ -131,12 +131,12 @@ pub fn updateDecl(self: *SpirV, module: *Module, decl_index: Module.Decl.Index)
|
|||
|
||||
pub fn updateDeclExports(
|
||||
self: *SpirV,
|
||||
module: *Module,
|
||||
mod: *Module,
|
||||
decl_index: Module.Decl.Index,
|
||||
exports: []const *Module.Export,
|
||||
) !void {
|
||||
const decl = module.declPtr(decl_index);
|
||||
if (decl.val.tag() == .function and decl.ty.fnCallingConvention() == .Kernel) {
|
||||
const decl = mod.declPtr(decl_index);
|
||||
if (decl.val.tag() == .function and decl.ty.fnCallingConvention(mod) == .Kernel) {
|
||||
// TODO: Unify with resolveDecl in spirv.zig.
|
||||
const entry = try self.decl_link.getOrPut(decl_index);
|
||||
if (!entry.found_existing) {
|
||||
|
|
|
|||
|
|
@ -649,3 +649,14 @@ pub fn compilerRtIntAbbrev(bits: u16) []const u8 {
|
|||
else => "o", // Non-standard
|
||||
};
|
||||
}
|
||||
|
||||
pub fn fnCallConvAllowsZigTypes(target: std.Target, cc: std.builtin.CallingConvention) bool {
|
||||
return switch (cc) {
|
||||
.Unspecified, .Async, .Inline => true,
|
||||
// For now we want to authorize PTX kernel to use zig objects, even if
|
||||
// we end up exposing the ABI. The goal is to experiment with more
|
||||
// integrated CPU/GPU code.
|
||||
.Kernel => target.cpu.arch == .nvptx or target.cpu.arch == .nvptx64,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
|
|
|
|||
394
src/type.zig
394
src/type.zig
|
|
@ -42,8 +42,6 @@ pub const Type = struct {
|
|||
.error_set_merged,
|
||||
=> return .ErrorSet,
|
||||
|
||||
.function => return .Fn,
|
||||
|
||||
.pointer,
|
||||
.inferred_alloc_const,
|
||||
.inferred_alloc_mut,
|
||||
|
|
@ -66,6 +64,7 @@ pub const Type = struct {
|
|||
.union_type => return .Union,
|
||||
.opaque_type => return .Opaque,
|
||||
.enum_type => return .Enum,
|
||||
.func_type => return .Fn,
|
||||
.simple_type => |s| switch (s) {
|
||||
.f16,
|
||||
.f32,
|
||||
|
|
@ -344,53 +343,6 @@ pub const Type = struct {
|
|||
return true;
|
||||
},
|
||||
|
||||
.function => {
|
||||
if (b.zigTypeTag(mod) != .Fn) return false;
|
||||
|
||||
const a_info = a.fnInfo();
|
||||
const b_info = b.fnInfo();
|
||||
|
||||
if (!a_info.return_type.isGenericPoison() and
|
||||
!b_info.return_type.isGenericPoison() and
|
||||
!eql(a_info.return_type, b_info.return_type, mod))
|
||||
return false;
|
||||
|
||||
if (a_info.is_var_args != b_info.is_var_args)
|
||||
return false;
|
||||
|
||||
if (a_info.is_generic != b_info.is_generic)
|
||||
return false;
|
||||
|
||||
if (a_info.is_noinline != b_info.is_noinline)
|
||||
return false;
|
||||
|
||||
if (a_info.noalias_bits != b_info.noalias_bits)
|
||||
return false;
|
||||
|
||||
if (!a_info.cc_is_generic and a_info.cc != b_info.cc)
|
||||
return false;
|
||||
|
||||
if (!a_info.align_is_generic and a_info.alignment != b_info.alignment)
|
||||
return false;
|
||||
|
||||
if (a_info.param_types.len != b_info.param_types.len)
|
||||
return false;
|
||||
|
||||
for (a_info.param_types, 0..) |a_param_ty, i| {
|
||||
const b_param_ty = b_info.param_types[i];
|
||||
if (a_info.comptime_params[i] != b_info.comptime_params[i])
|
||||
return false;
|
||||
|
||||
if (a_param_ty.isGenericPoison()) continue;
|
||||
if (b_param_ty.isGenericPoison()) continue;
|
||||
|
||||
if (!eql(a_param_ty, b_param_ty, mod))
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
},
|
||||
|
||||
.pointer,
|
||||
.inferred_alloc_const,
|
||||
.inferred_alloc_mut,
|
||||
|
|
@ -501,32 +453,6 @@ pub const Type = struct {
|
|||
std.hash.autoHash(hasher, ies);
|
||||
},
|
||||
|
||||
.function => {
|
||||
std.hash.autoHash(hasher, std.builtin.TypeId.Fn);
|
||||
|
||||
const fn_info = ty.fnInfo();
|
||||
if (!fn_info.return_type.isGenericPoison()) {
|
||||
hashWithHasher(fn_info.return_type, hasher, mod);
|
||||
}
|
||||
if (!fn_info.align_is_generic) {
|
||||
std.hash.autoHash(hasher, fn_info.alignment);
|
||||
}
|
||||
if (!fn_info.cc_is_generic) {
|
||||
std.hash.autoHash(hasher, fn_info.cc);
|
||||
}
|
||||
std.hash.autoHash(hasher, fn_info.is_var_args);
|
||||
std.hash.autoHash(hasher, fn_info.is_generic);
|
||||
std.hash.autoHash(hasher, fn_info.is_noinline);
|
||||
std.hash.autoHash(hasher, fn_info.noalias_bits);
|
||||
|
||||
std.hash.autoHash(hasher, fn_info.param_types.len);
|
||||
for (fn_info.param_types, 0..) |param_ty, i| {
|
||||
std.hash.autoHash(hasher, fn_info.paramIsComptime(i));
|
||||
if (param_ty.isGenericPoison()) continue;
|
||||
hashWithHasher(param_ty, hasher, mod);
|
||||
}
|
||||
},
|
||||
|
||||
.pointer,
|
||||
.inferred_alloc_const,
|
||||
.inferred_alloc_mut,
|
||||
|
|
@ -631,30 +557,6 @@ pub const Type = struct {
|
|||
};
|
||||
},
|
||||
|
||||
.function => {
|
||||
const payload = self.castTag(.function).?.data;
|
||||
const param_types = try allocator.alloc(Type, payload.param_types.len);
|
||||
for (payload.param_types, 0..) |param_ty, i| {
|
||||
param_types[i] = try param_ty.copy(allocator);
|
||||
}
|
||||
const other_comptime_params = payload.comptime_params[0..payload.param_types.len];
|
||||
const comptime_params = try allocator.dupe(bool, other_comptime_params);
|
||||
return Tag.function.create(allocator, .{
|
||||
.return_type = try payload.return_type.copy(allocator),
|
||||
.param_types = param_types,
|
||||
.cc = payload.cc,
|
||||
.alignment = payload.alignment,
|
||||
.is_var_args = payload.is_var_args,
|
||||
.is_generic = payload.is_generic,
|
||||
.is_noinline = payload.is_noinline,
|
||||
.comptime_params = comptime_params.ptr,
|
||||
.align_is_generic = payload.align_is_generic,
|
||||
.cc_is_generic = payload.cc_is_generic,
|
||||
.section_is_generic = payload.section_is_generic,
|
||||
.addrspace_is_generic = payload.addrspace_is_generic,
|
||||
.noalias_bits = payload.noalias_bits,
|
||||
});
|
||||
},
|
||||
.pointer => {
|
||||
const payload = self.castTag(.pointer).?.data;
|
||||
const sent: ?Value = if (payload.sentinel) |some|
|
||||
|
|
@ -766,32 +668,6 @@ pub const Type = struct {
|
|||
while (true) {
|
||||
const t = ty.tag();
|
||||
switch (t) {
|
||||
.function => {
|
||||
const payload = ty.castTag(.function).?.data;
|
||||
try writer.writeAll("fn(");
|
||||
for (payload.param_types, 0..) |param_type, i| {
|
||||
if (i != 0) try writer.writeAll(", ");
|
||||
try param_type.dump("", .{}, writer);
|
||||
}
|
||||
if (payload.is_var_args) {
|
||||
if (payload.param_types.len != 0) {
|
||||
try writer.writeAll(", ");
|
||||
}
|
||||
try writer.writeAll("...");
|
||||
}
|
||||
try writer.writeAll(") ");
|
||||
if (payload.alignment != 0) {
|
||||
try writer.print("align({d}) ", .{payload.alignment});
|
||||
}
|
||||
if (payload.cc != .Unspecified) {
|
||||
try writer.writeAll("callconv(.");
|
||||
try writer.writeAll(@tagName(payload.cc));
|
||||
try writer.writeAll(") ");
|
||||
}
|
||||
ty = payload.return_type;
|
||||
continue;
|
||||
},
|
||||
|
||||
.anyframe_T => {
|
||||
const return_type = ty.castTag(.anyframe_T).?.data;
|
||||
try writer.print("anyframe->", .{});
|
||||
|
|
@ -909,48 +785,6 @@ pub const Type = struct {
|
|||
try writer.writeAll(")).Fn.return_type.?).ErrorUnion.error_set");
|
||||
},
|
||||
|
||||
.function => {
|
||||
const fn_info = ty.fnInfo();
|
||||
if (fn_info.is_noinline) {
|
||||
try writer.writeAll("noinline ");
|
||||
}
|
||||
try writer.writeAll("fn(");
|
||||
for (fn_info.param_types, 0..) |param_ty, i| {
|
||||
if (i != 0) try writer.writeAll(", ");
|
||||
if (fn_info.paramIsComptime(i)) {
|
||||
try writer.writeAll("comptime ");
|
||||
}
|
||||
if (std.math.cast(u5, i)) |index| if (@truncate(u1, fn_info.noalias_bits >> index) != 0) {
|
||||
try writer.writeAll("noalias ");
|
||||
};
|
||||
if (param_ty.isGenericPoison()) {
|
||||
try writer.writeAll("anytype");
|
||||
} else {
|
||||
try print(param_ty, writer, mod);
|
||||
}
|
||||
}
|
||||
if (fn_info.is_var_args) {
|
||||
if (fn_info.param_types.len != 0) {
|
||||
try writer.writeAll(", ");
|
||||
}
|
||||
try writer.writeAll("...");
|
||||
}
|
||||
try writer.writeAll(") ");
|
||||
if (fn_info.alignment != 0) {
|
||||
try writer.print("align({d}) ", .{fn_info.alignment});
|
||||
}
|
||||
if (fn_info.cc != .Unspecified) {
|
||||
try writer.writeAll("callconv(.");
|
||||
try writer.writeAll(@tagName(fn_info.cc));
|
||||
try writer.writeAll(") ");
|
||||
}
|
||||
if (fn_info.return_type.isGenericPoison()) {
|
||||
try writer.writeAll("anytype");
|
||||
} else {
|
||||
try print(fn_info.return_type, writer, mod);
|
||||
}
|
||||
},
|
||||
|
||||
.error_union => {
|
||||
const error_union = ty.castTag(.error_union).?.data;
|
||||
try print(error_union.error_set, writer, mod);
|
||||
|
|
@ -1158,6 +992,48 @@ pub const Type = struct {
|
|||
const decl = mod.declPtr(enum_type.decl);
|
||||
try decl.renderFullyQualifiedName(mod, writer);
|
||||
},
|
||||
.func_type => |fn_info| {
|
||||
if (fn_info.is_noinline) {
|
||||
try writer.writeAll("noinline ");
|
||||
}
|
||||
try writer.writeAll("fn(");
|
||||
for (fn_info.param_types, 0..) |param_ty, i| {
|
||||
if (i != 0) try writer.writeAll(", ");
|
||||
if (std.math.cast(u5, i)) |index| {
|
||||
if (fn_info.paramIsComptime(index)) {
|
||||
try writer.writeAll("comptime ");
|
||||
}
|
||||
if (fn_info.paramIsNoalias(index)) {
|
||||
try writer.writeAll("noalias ");
|
||||
}
|
||||
}
|
||||
if (param_ty == .generic_poison_type) {
|
||||
try writer.writeAll("anytype");
|
||||
} else {
|
||||
try print(param_ty.toType(), writer, mod);
|
||||
}
|
||||
}
|
||||
if (fn_info.is_var_args) {
|
||||
if (fn_info.param_types.len != 0) {
|
||||
try writer.writeAll(", ");
|
||||
}
|
||||
try writer.writeAll("...");
|
||||
}
|
||||
try writer.writeAll(") ");
|
||||
if (fn_info.alignment != 0) {
|
||||
try writer.print("align({d}) ", .{fn_info.alignment});
|
||||
}
|
||||
if (fn_info.cc != .Unspecified) {
|
||||
try writer.writeAll("callconv(.");
|
||||
try writer.writeAll(@tagName(fn_info.cc));
|
||||
try writer.writeAll(") ");
|
||||
}
|
||||
if (fn_info.return_type == .generic_poison_type) {
|
||||
try writer.writeAll("anytype");
|
||||
} else {
|
||||
try print(fn_info.return_type.toType(), writer, mod);
|
||||
}
|
||||
},
|
||||
|
||||
// values, not types
|
||||
.undef => unreachable,
|
||||
|
|
@ -1174,6 +1050,11 @@ pub const Type = struct {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn toIntern(ty: Type) InternPool.Index {
|
||||
assert(ty.ip_index != .none);
|
||||
return ty.ip_index;
|
||||
}
|
||||
|
||||
pub fn toValue(self: Type, allocator: Allocator) Allocator.Error!Value {
|
||||
if (self.ip_index != .none) return self.ip_index.toValue();
|
||||
switch (self.tag()) {
|
||||
|
|
@ -1223,7 +1104,7 @@ pub const Type = struct {
|
|||
if (ignore_comptime_only) {
|
||||
return true;
|
||||
} else if (ty.childType(mod).zigTypeTag(mod) == .Fn) {
|
||||
return !ty.childType(mod).fnInfo().is_generic;
|
||||
return !mod.typeToFunc(ty.childType(mod)).?.is_generic;
|
||||
} else if (strat == .sema) {
|
||||
return !(try strat.sema.typeRequiresComptime(ty));
|
||||
} else {
|
||||
|
|
@ -1231,12 +1112,6 @@ pub const Type = struct {
|
|||
}
|
||||
},
|
||||
|
||||
// These are false because they are comptime-only types.
|
||||
// These are function *bodies*, not pointers.
|
||||
// Special exceptions have to be made when emitting functions due to
|
||||
// this returning false.
|
||||
.function => return false,
|
||||
|
||||
.optional => {
|
||||
const child_ty = ty.optionalChild(mod);
|
||||
if (child_ty.isNoReturn()) {
|
||||
|
|
@ -1262,7 +1137,7 @@ pub const Type = struct {
|
|||
// to comptime-only types do not, with the exception of function pointers.
|
||||
if (ignore_comptime_only) return true;
|
||||
const child_ty = ptr_type.elem_type.toType();
|
||||
if (child_ty.zigTypeTag(mod) == .Fn) return !child_ty.fnInfo().is_generic;
|
||||
if (child_ty.zigTypeTag(mod) == .Fn) return !mod.typeToFunc(child_ty).?.is_generic;
|
||||
if (strat == .sema) return !(try strat.sema.typeRequiresComptime(ty));
|
||||
return !comptimeOnly(ty, mod);
|
||||
},
|
||||
|
|
@ -1293,6 +1168,13 @@ pub const Type = struct {
|
|||
}
|
||||
},
|
||||
.error_union_type => @panic("TODO"),
|
||||
|
||||
// These are function *bodies*, not pointers.
|
||||
// They return false here because they are comptime-only types.
|
||||
// Special exceptions have to be made when emitting functions due to
|
||||
// this returning false.
|
||||
.func_type => false,
|
||||
|
||||
.simple_type => |t| switch (t) {
|
||||
.f16,
|
||||
.f32,
|
||||
|
|
@ -1436,8 +1318,6 @@ pub const Type = struct {
|
|||
.error_set_single,
|
||||
.error_set_inferred,
|
||||
.error_set_merged,
|
||||
// These are function bodies, not function pointers.
|
||||
.function,
|
||||
.error_union,
|
||||
.anyframe_T,
|
||||
=> false,
|
||||
|
|
@ -1448,12 +1328,21 @@ pub const Type = struct {
|
|||
.optional => ty.isPtrLikeOptional(mod),
|
||||
},
|
||||
else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
|
||||
.int_type => true,
|
||||
.ptr_type => true,
|
||||
.int_type,
|
||||
.ptr_type,
|
||||
.vector_type,
|
||||
=> true,
|
||||
|
||||
.error_union_type,
|
||||
.anon_struct_type,
|
||||
.opaque_type,
|
||||
// These are function bodies, not function pointers.
|
||||
.func_type,
|
||||
=> false,
|
||||
|
||||
.array_type => |array_type| array_type.child.toType().hasWellDefinedLayout(mod),
|
||||
.vector_type => true,
|
||||
.opt_type => |child| child.toType().isPtrLikeOptional(mod),
|
||||
.error_union_type => false,
|
||||
|
||||
.simple_type => |t| switch (t) {
|
||||
.f16,
|
||||
.f32,
|
||||
|
|
@ -1509,12 +1398,10 @@ pub const Type = struct {
|
|||
};
|
||||
return struct_obj.layout != .Auto;
|
||||
},
|
||||
.anon_struct_type => false,
|
||||
.union_type => |union_type| switch (union_type.runtime_tag) {
|
||||
.none, .safety => mod.unionPtr(union_type.index).layout != .Auto,
|
||||
.tagged => false,
|
||||
},
|
||||
.opaque_type => false,
|
||||
.enum_type => |enum_type| switch (enum_type.tag_mode) {
|
||||
.auto => false,
|
||||
.explicit, .nonexhaustive => true,
|
||||
|
|
@ -1546,7 +1433,7 @@ pub const Type = struct {
|
|||
pub fn isFnOrHasRuntimeBits(ty: Type, mod: *Module) bool {
|
||||
switch (ty.zigTypeTag(mod)) {
|
||||
.Fn => {
|
||||
const fn_info = ty.fnInfo();
|
||||
const fn_info = mod.typeToFunc(ty).?;
|
||||
if (fn_info.is_generic) return false;
|
||||
if (fn_info.is_var_args) return true;
|
||||
switch (fn_info.cc) {
|
||||
|
|
@ -1555,7 +1442,7 @@ pub const Type = struct {
|
|||
.Inline => return false,
|
||||
else => {},
|
||||
}
|
||||
if (fn_info.return_type.comptimeOnly(mod)) return false;
|
||||
if (fn_info.return_type.toType().comptimeOnly(mod)) return false;
|
||||
return true;
|
||||
},
|
||||
else => return ty.hasRuntimeBits(mod),
|
||||
|
|
@ -1707,13 +1594,6 @@ pub const Type = struct {
|
|||
switch (ty.ip_index) {
|
||||
.empty_struct_type => return AbiAlignmentAdvanced{ .scalar = 0 },
|
||||
.none => switch (ty.tag()) {
|
||||
// represents machine code; not a pointer
|
||||
.function => {
|
||||
const alignment = ty.castTag(.function).?.data.alignment;
|
||||
if (alignment != 0) return AbiAlignmentAdvanced{ .scalar = alignment };
|
||||
return AbiAlignmentAdvanced{ .scalar = target_util.defaultFunctionAlignment(target) };
|
||||
},
|
||||
|
||||
.pointer,
|
||||
.anyframe_T,
|
||||
=> return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) },
|
||||
|
|
@ -1753,6 +1633,13 @@ pub const Type = struct {
|
|||
|
||||
.opt_type => return abiAlignmentAdvancedOptional(ty, mod, strat),
|
||||
.error_union_type => return abiAlignmentAdvancedErrorUnion(ty, mod, strat),
|
||||
// represents machine code; not a pointer
|
||||
.func_type => |func_type| {
|
||||
const alignment = @intCast(u32, func_type.alignment);
|
||||
if (alignment != 0) return AbiAlignmentAdvanced{ .scalar = alignment };
|
||||
return AbiAlignmentAdvanced{ .scalar = target_util.defaultFunctionAlignment(target) };
|
||||
},
|
||||
|
||||
.simple_type => |t| switch (t) {
|
||||
.bool,
|
||||
.atomic_order,
|
||||
|
|
@ -2086,7 +1973,6 @@ pub const Type = struct {
|
|||
.empty_struct_type => return AbiSizeAdvanced{ .scalar = 0 },
|
||||
|
||||
.none => switch (ty.tag()) {
|
||||
.function => unreachable, // represents machine code; not a pointer
|
||||
.inferred_alloc_const => unreachable,
|
||||
.inferred_alloc_mut => unreachable,
|
||||
|
||||
|
|
@ -2187,6 +2073,7 @@ pub const Type = struct {
|
|||
|
||||
.opt_type => return ty.abiSizeAdvancedOptional(mod, strat),
|
||||
.error_union_type => @panic("TODO"),
|
||||
.func_type => unreachable, // represents machine code; not a pointer
|
||||
.simple_type => |t| switch (t) {
|
||||
.bool,
|
||||
.atomic_order,
|
||||
|
|
@ -2408,7 +2295,6 @@ pub const Type = struct {
|
|||
|
||||
switch (ty.ip_index) {
|
||||
.none => switch (ty.tag()) {
|
||||
.function => unreachable, // represents machine code; not a pointer
|
||||
.inferred_alloc_const => unreachable,
|
||||
.inferred_alloc_mut => unreachable,
|
||||
|
||||
|
|
@ -2453,6 +2339,7 @@ pub const Type = struct {
|
|||
},
|
||||
.opt_type => @panic("TODO"),
|
||||
.error_union_type => @panic("TODO"),
|
||||
.func_type => unreachable, // represents machine code; not a pointer
|
||||
.simple_type => |t| switch (t) {
|
||||
.f16 => return 16,
|
||||
.f32 => return 32,
|
||||
|
|
@ -3271,6 +3158,7 @@ pub const Type = struct {
|
|||
|
||||
.opt_type => unreachable,
|
||||
.error_union_type => unreachable,
|
||||
.func_type => unreachable,
|
||||
.simple_type => unreachable, // handled via Index enum tag above
|
||||
|
||||
.union_type => unreachable,
|
||||
|
|
@ -3356,54 +3244,22 @@ pub const Type = struct {
|
|||
};
|
||||
}
|
||||
|
||||
/// Asserts the type is a function.
|
||||
pub fn fnParamLen(self: Type) usize {
|
||||
return self.castTag(.function).?.data.param_types.len;
|
||||
}
|
||||
|
||||
/// Asserts the type is a function. The length of the slice must be at least the length
|
||||
/// given by `fnParamLen`.
|
||||
pub fn fnParamTypes(self: Type, types: []Type) void {
|
||||
const payload = self.castTag(.function).?.data;
|
||||
@memcpy(types[0..payload.param_types.len], payload.param_types);
|
||||
}
|
||||
|
||||
/// Asserts the type is a function.
|
||||
pub fn fnParamType(self: Type, index: usize) Type {
|
||||
switch (self.tag()) {
|
||||
.function => {
|
||||
const payload = self.castTag(.function).?.data;
|
||||
return payload.param_types[index];
|
||||
},
|
||||
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
/// Asserts the type is a function or a function pointer.
|
||||
pub fn fnReturnType(ty: Type) Type {
|
||||
const fn_ty = switch (ty.tag()) {
|
||||
.pointer => ty.castTag(.pointer).?.data.pointee_type,
|
||||
.function => ty,
|
||||
pub fn fnReturnType(ty: Type, mod: *Module) Type {
|
||||
return fnReturnTypeIp(ty, mod.intern_pool);
|
||||
}
|
||||
|
||||
pub fn fnReturnTypeIp(ty: Type, ip: InternPool) Type {
|
||||
return switch (ip.indexToKey(ty.ip_index)) {
|
||||
.ptr_type => |ptr_type| ip.indexToKey(ptr_type.elem_type).func_type.return_type,
|
||||
.func_type => |func_type| func_type.return_type,
|
||||
else => unreachable,
|
||||
};
|
||||
return fn_ty.castTag(.function).?.data.return_type;
|
||||
}.toType();
|
||||
}
|
||||
|
||||
/// Asserts the type is a function.
|
||||
pub fn fnCallingConvention(self: Type) std.builtin.CallingConvention {
|
||||
return self.castTag(.function).?.data.cc;
|
||||
}
|
||||
|
||||
/// Asserts the type is a function.
|
||||
pub fn fnCallingConventionAllowsZigTypes(target: Target, cc: std.builtin.CallingConvention) bool {
|
||||
return switch (cc) {
|
||||
.Unspecified, .Async, .Inline => true,
|
||||
// For now we want to authorize PTX kernel to use zig objects, even if we end up exposing the ABI.
|
||||
// The goal is to experiment with more integrated CPU/GPU code.
|
||||
.Kernel => target.cpu.arch == .nvptx or target.cpu.arch == .nvptx64,
|
||||
else => false,
|
||||
};
|
||||
pub fn fnCallingConvention(ty: Type, mod: *Module) std.builtin.CallingConvention {
|
||||
return mod.intern_pool.indexToKey(ty.ip_index).func_type.cc;
|
||||
}
|
||||
|
||||
pub fn isValidParamType(self: Type, mod: *const Module) bool {
|
||||
|
|
@ -3421,12 +3277,8 @@ pub const Type = struct {
|
|||
}
|
||||
|
||||
/// Asserts the type is a function.
|
||||
pub fn fnIsVarArgs(self: Type) bool {
|
||||
return self.castTag(.function).?.data.is_var_args;
|
||||
}
|
||||
|
||||
pub fn fnInfo(ty: Type) Payload.Function.Data {
|
||||
return ty.castTag(.function).?.data;
|
||||
pub fn fnIsVarArgs(ty: Type, mod: *Module) bool {
|
||||
return mod.intern_pool.indexToKey(ty.ip_index).func_type.is_var_args;
|
||||
}
|
||||
|
||||
pub fn isNumeric(ty: Type, mod: *const Module) bool {
|
||||
|
|
@ -3474,7 +3326,6 @@ pub const Type = struct {
|
|||
.error_set_single,
|
||||
.error_set,
|
||||
.error_set_merged,
|
||||
.function,
|
||||
.error_set_inferred,
|
||||
.anyframe_T,
|
||||
.pointer,
|
||||
|
|
@ -3500,7 +3351,12 @@ pub const Type = struct {
|
|||
return null;
|
||||
}
|
||||
},
|
||||
.ptr_type => return null,
|
||||
|
||||
.ptr_type,
|
||||
.error_union_type,
|
||||
.func_type,
|
||||
=> return null,
|
||||
|
||||
.array_type => |array_type| {
|
||||
if (array_type.len == 0)
|
||||
return Value.initTag(.empty_array);
|
||||
|
|
@ -3514,13 +3370,13 @@ pub const Type = struct {
|
|||
return null;
|
||||
},
|
||||
.opt_type => |child| {
|
||||
if (child.toType().isNoReturn()) {
|
||||
return Value.null;
|
||||
if (child == .noreturn_type) {
|
||||
return try mod.nullValue(ty);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
},
|
||||
.error_union_type => return null,
|
||||
|
||||
.simple_type => |t| switch (t) {
|
||||
.f16,
|
||||
.f32,
|
||||
|
|
@ -3682,9 +3538,6 @@ pub const Type = struct {
|
|||
.error_set_merged,
|
||||
=> false,
|
||||
|
||||
// These are function bodies, not function pointers.
|
||||
.function => true,
|
||||
|
||||
.inferred_alloc_mut => unreachable,
|
||||
.inferred_alloc_const => unreachable,
|
||||
|
||||
|
|
@ -3721,6 +3574,9 @@ pub const Type = struct {
|
|||
.vector_type => |vector_type| vector_type.child.toType().comptimeOnly(mod),
|
||||
.opt_type => |child| child.toType().comptimeOnly(mod),
|
||||
.error_union_type => |error_union_type| error_union_type.payload_type.toType().comptimeOnly(mod),
|
||||
// These are function bodies, not function pointers.
|
||||
.func_type => true,
|
||||
|
||||
.simple_type => |t| switch (t) {
|
||||
.f16,
|
||||
.f32,
|
||||
|
|
@ -4367,6 +4223,10 @@ pub const Type = struct {
|
|||
return ty.ip_index == .generic_poison_type;
|
||||
}
|
||||
|
||||
pub fn isBoundFn(ty: Type) bool {
|
||||
return ty.ip_index == .none and ty.tag() == .bound_fn;
|
||||
}
|
||||
|
||||
/// This enum does not directly correspond to `std.builtin.TypeId` because
|
||||
/// it has extra enum tags in it, as a way of using less memory. For example,
|
||||
/// even though Zig recognizes `*align(10) i32` and `*i32` both as Pointer types
|
||||
|
|
@ -4383,7 +4243,6 @@ pub const Type = struct {
|
|||
// After this, the tag requires a payload.
|
||||
|
||||
pointer,
|
||||
function,
|
||||
optional,
|
||||
error_union,
|
||||
anyframe_T,
|
||||
|
|
@ -4411,7 +4270,6 @@ pub const Type = struct {
|
|||
.error_set_merged => Payload.ErrorSetMerged,
|
||||
|
||||
.pointer => Payload.Pointer,
|
||||
.function => Payload.Function,
|
||||
.error_union => Payload.ErrorUnion,
|
||||
.error_set_single => Payload.Name,
|
||||
};
|
||||
|
|
@ -4508,36 +4366,6 @@ pub const Type = struct {
|
|||
data: u16,
|
||||
};
|
||||
|
||||
pub const Function = struct {
|
||||
pub const base_tag = Tag.function;
|
||||
|
||||
base: Payload = Payload{ .tag = base_tag },
|
||||
data: Data,
|
||||
|
||||
// TODO look into optimizing this memory to take fewer bytes
|
||||
pub const Data = struct {
|
||||
param_types: []Type,
|
||||
comptime_params: [*]bool,
|
||||
return_type: Type,
|
||||
/// If zero use default target function code alignment.
|
||||
alignment: u32,
|
||||
noalias_bits: u32,
|
||||
cc: std.builtin.CallingConvention,
|
||||
is_var_args: bool,
|
||||
is_generic: bool,
|
||||
is_noinline: bool,
|
||||
align_is_generic: bool,
|
||||
cc_is_generic: bool,
|
||||
section_is_generic: bool,
|
||||
addrspace_is_generic: bool,
|
||||
|
||||
pub fn paramIsComptime(self: @This(), i: usize) bool {
|
||||
assert(i < self.param_types.len);
|
||||
return self.comptime_params[i];
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
pub const ErrorSet = struct {
|
||||
pub const base_tag = Tag.error_set;
|
||||
|
||||
|
|
|
|||
|
|
@ -602,6 +602,11 @@ pub const Value = struct {
|
|||
return result;
|
||||
}
|
||||
|
||||
pub fn toIntern(val: Value) InternPool.Index {
|
||||
assert(val.ip_index != .none);
|
||||
return val.ip_index;
|
||||
}
|
||||
|
||||
/// Asserts that the value is representable as a type.
|
||||
pub fn toType(self: Value) Type {
|
||||
if (self.ip_index != .none) return self.ip_index.toType();
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue